ocxl_hw.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * CXL Flash Device Driver
  3. *
  4. * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  5. * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
  6. *
  7. * Copyright (C) 2018 IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/file.h>
  15. #include <linux/idr.h>
  16. #include <linux/module.h>
  17. #include <linux/mount.h>
  18. #include <linux/poll.h>
  19. #include <linux/sched/signal.h>
  20. #include <misc/ocxl.h>
  21. #include <uapi/misc/cxl.h>
  22. #include "backend.h"
  23. #include "ocxl_hw.h"
  24. /*
  25. * Pseudo-filesystem to allocate inodes.
  26. */
  27. #define OCXLFLASH_FS_MAGIC 0x1697698f
  28. static int ocxlflash_fs_cnt;
  29. static struct vfsmount *ocxlflash_vfs_mount;
  30. static const struct dentry_operations ocxlflash_fs_dops = {
  31. .d_dname = simple_dname,
  32. };
  33. /*
  34. * ocxlflash_fs_mount() - mount the pseudo-filesystem
  35. * @fs_type: File system type.
  36. * @flags: Flags for the filesystem.
  37. * @dev_name: Device name associated with the filesystem.
  38. * @data: Data pointer.
  39. *
  40. * Return: pointer to the directory entry structure
  41. */
  42. static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type,
  43. int flags, const char *dev_name,
  44. void *data)
  45. {
  46. return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops,
  47. OCXLFLASH_FS_MAGIC);
  48. }
  49. static struct file_system_type ocxlflash_fs_type = {
  50. .name = "ocxlflash",
  51. .owner = THIS_MODULE,
  52. .mount = ocxlflash_fs_mount,
  53. .kill_sb = kill_anon_super,
  54. };
  55. /*
  56. * ocxlflash_release_mapping() - release the memory mapping
  57. * @ctx: Context whose mapping is to be released.
  58. */
  59. static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
  60. {
  61. if (ctx->mapping)
  62. simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
  63. ctx->mapping = NULL;
  64. }
  65. /*
  66. * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
  67. * @dev: Generic device of the host.
  68. * @name: Name of the pseudo filesystem.
  69. * @fops: File operations.
  70. * @priv: Private data.
  71. * @flags: Flags for the file.
  72. *
  73. * Return: pointer to the file on success, ERR_PTR on failure
  74. */
  75. static struct file *ocxlflash_getfile(struct device *dev, const char *name,
  76. const struct file_operations *fops,
  77. void *priv, int flags)
  78. {
  79. struct file *file;
  80. struct inode *inode;
  81. int rc;
  82. if (fops->owner && !try_module_get(fops->owner)) {
  83. dev_err(dev, "%s: Owner does not exist\n", __func__);
  84. rc = -ENOENT;
  85. goto err1;
  86. }
  87. rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
  88. &ocxlflash_fs_cnt);
  89. if (unlikely(rc < 0)) {
  90. dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
  91. __func__, rc);
  92. goto err2;
  93. }
  94. inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
  95. if (IS_ERR(inode)) {
  96. rc = PTR_ERR(inode);
  97. dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
  98. __func__, rc);
  99. goto err3;
  100. }
  101. file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
  102. flags & (O_ACCMODE | O_NONBLOCK), fops);
  103. if (IS_ERR(file)) {
  104. rc = PTR_ERR(file);
  105. dev_err(dev, "%s: alloc_file failed rc=%d\n",
  106. __func__, rc);
  107. goto err4;
  108. }
  109. file->private_data = priv;
  110. out:
  111. return file;
  112. err4:
  113. iput(inode);
  114. err3:
  115. simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
  116. err2:
  117. module_put(fops->owner);
  118. err1:
  119. file = ERR_PTR(rc);
  120. goto out;
  121. }
  122. /**
  123. * ocxlflash_psa_map() - map the process specific MMIO space
  124. * @ctx_cookie: Adapter context for which the mapping needs to be done.
  125. *
  126. * Return: MMIO pointer of the mapped region
  127. */
  128. static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
  129. {
  130. struct ocxlflash_context *ctx = ctx_cookie;
  131. struct device *dev = ctx->hw_afu->dev;
  132. mutex_lock(&ctx->state_mutex);
  133. if (ctx->state != STARTED) {
  134. dev_err(dev, "%s: Context not started, state=%d\n", __func__,
  135. ctx->state);
  136. mutex_unlock(&ctx->state_mutex);
  137. return NULL;
  138. }
  139. mutex_unlock(&ctx->state_mutex);
  140. return ioremap(ctx->psn_phys, ctx->psn_size);
  141. }
  142. /**
  143. * ocxlflash_psa_unmap() - unmap the process specific MMIO space
  144. * @addr: MMIO pointer to unmap.
  145. */
  146. static void ocxlflash_psa_unmap(void __iomem *addr)
  147. {
  148. iounmap(addr);
  149. }
  150. /**
  151. * ocxlflash_process_element() - get process element of the adapter context
  152. * @ctx_cookie: Adapter context associated with the process element.
  153. *
  154. * Return: process element of the adapter context
  155. */
  156. static int ocxlflash_process_element(void *ctx_cookie)
  157. {
  158. struct ocxlflash_context *ctx = ctx_cookie;
  159. return ctx->pe;
  160. }
  161. /**
  162. * afu_map_irq() - map the interrupt of the adapter context
  163. * @flags: Flags.
  164. * @ctx: Adapter context.
  165. * @num: Per-context AFU interrupt number.
  166. * @handler: Interrupt handler to register.
  167. * @cookie: Interrupt handler private data.
  168. * @name: Name of the interrupt.
  169. *
  170. * Return: 0 on success, -errno on failure
  171. */
  172. static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
  173. irq_handler_t handler, void *cookie, char *name)
  174. {
  175. struct ocxl_hw_afu *afu = ctx->hw_afu;
  176. struct device *dev = afu->dev;
  177. struct ocxlflash_irqs *irq;
  178. void __iomem *vtrig;
  179. u32 virq;
  180. int rc = 0;
  181. if (num < 0 || num >= ctx->num_irqs) {
  182. dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
  183. rc = -ENOENT;
  184. goto out;
  185. }
  186. irq = &ctx->irqs[num];
  187. virq = irq_create_mapping(NULL, irq->hwirq);
  188. if (unlikely(!virq)) {
  189. dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
  190. rc = -ENOMEM;
  191. goto out;
  192. }
  193. rc = request_irq(virq, handler, 0, name, cookie);
  194. if (unlikely(rc)) {
  195. dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
  196. goto err1;
  197. }
  198. vtrig = ioremap(irq->ptrig, PAGE_SIZE);
  199. if (unlikely(!vtrig)) {
  200. dev_err(dev, "%s: Trigger page mapping failed\n", __func__);
  201. rc = -ENOMEM;
  202. goto err2;
  203. }
  204. irq->virq = virq;
  205. irq->vtrig = vtrig;
  206. out:
  207. return rc;
  208. err2:
  209. free_irq(virq, cookie);
  210. err1:
  211. irq_dispose_mapping(virq);
  212. goto out;
  213. }
  214. /**
  215. * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
  216. * @ctx_cookie: Adapter context.
  217. * @num: Per-context AFU interrupt number.
  218. * @handler: Interrupt handler to register.
  219. * @cookie: Interrupt handler private data.
  220. * @name: Name of the interrupt.
  221. *
  222. * Return: 0 on success, -errno on failure
  223. */
  224. static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
  225. irq_handler_t handler, void *cookie,
  226. char *name)
  227. {
  228. return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
  229. }
  230. /**
  231. * afu_unmap_irq() - unmap the interrupt
  232. * @flags: Flags.
  233. * @ctx: Adapter context.
  234. * @num: Per-context AFU interrupt number.
  235. * @cookie: Interrupt handler private data.
  236. */
  237. static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
  238. void *cookie)
  239. {
  240. struct ocxl_hw_afu *afu = ctx->hw_afu;
  241. struct device *dev = afu->dev;
  242. struct ocxlflash_irqs *irq;
  243. if (num < 0 || num >= ctx->num_irqs) {
  244. dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
  245. return;
  246. }
  247. irq = &ctx->irqs[num];
  248. if (irq->vtrig)
  249. iounmap(irq->vtrig);
  250. if (irq_find_mapping(NULL, irq->hwirq)) {
  251. free_irq(irq->virq, cookie);
  252. irq_dispose_mapping(irq->virq);
  253. }
  254. memset(irq, 0, sizeof(*irq));
  255. }
  256. /**
  257. * ocxlflash_unmap_afu_irq() - unmap the interrupt
  258. * @ctx_cookie: Adapter context.
  259. * @num: Per-context AFU interrupt number.
  260. * @cookie: Interrupt handler private data.
  261. */
  262. static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
  263. {
  264. return afu_unmap_irq(0, ctx_cookie, num, cookie);
  265. }
  266. /**
  267. * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
  268. * @ctx_cookie: Context associated with the interrupt.
  269. * @irq: Interrupt number.
  270. *
  271. * Return: effective address of the mapped region
  272. */
  273. static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
  274. {
  275. struct ocxlflash_context *ctx = ctx_cookie;
  276. if (irq < 0 || irq >= ctx->num_irqs)
  277. return 0;
  278. return (__force u64)ctx->irqs[irq].vtrig;
  279. }
  280. /**
  281. * ocxlflash_xsl_fault() - callback when translation error is triggered
  282. * @data: Private data provided at callback registration, the context.
  283. * @addr: Address that triggered the error.
  284. * @dsisr: Value of dsisr register.
  285. */
  286. static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
  287. {
  288. struct ocxlflash_context *ctx = data;
  289. spin_lock(&ctx->slock);
  290. ctx->fault_addr = addr;
  291. ctx->fault_dsisr = dsisr;
  292. ctx->pending_fault = true;
  293. spin_unlock(&ctx->slock);
  294. wake_up_all(&ctx->wq);
  295. }
  296. /**
  297. * start_context() - local routine to start a context
  298. * @ctx: Adapter context to be started.
  299. *
  300. * Assign the context specific MMIO space, add and enable the PE.
  301. *
  302. * Return: 0 on success, -errno on failure
  303. */
  304. static int start_context(struct ocxlflash_context *ctx)
  305. {
  306. struct ocxl_hw_afu *afu = ctx->hw_afu;
  307. struct ocxl_afu_config *acfg = &afu->acfg;
  308. void *link_token = afu->link_token;
  309. struct device *dev = afu->dev;
  310. bool master = ctx->master;
  311. struct mm_struct *mm;
  312. int rc = 0;
  313. u32 pid;
  314. mutex_lock(&ctx->state_mutex);
  315. if (ctx->state != OPENED) {
  316. dev_err(dev, "%s: Context state invalid, state=%d\n",
  317. __func__, ctx->state);
  318. rc = -EINVAL;
  319. goto out;
  320. }
  321. if (master) {
  322. ctx->psn_size = acfg->global_mmio_size;
  323. ctx->psn_phys = afu->gmmio_phys;
  324. } else {
  325. ctx->psn_size = acfg->pp_mmio_stride;
  326. ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
  327. }
  328. /* pid and mm not set for master contexts */
  329. if (master) {
  330. pid = 0;
  331. mm = NULL;
  332. } else {
  333. pid = current->mm->context.id;
  334. mm = current->mm;
  335. }
  336. rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
  337. ocxlflash_xsl_fault, ctx);
  338. if (unlikely(rc)) {
  339. dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
  340. __func__, rc);
  341. goto out;
  342. }
  343. ctx->state = STARTED;
  344. out:
  345. mutex_unlock(&ctx->state_mutex);
  346. return rc;
  347. }
  348. /**
  349. * ocxlflash_start_context() - start a kernel context
  350. * @ctx_cookie: Adapter context to be started.
  351. *
  352. * Return: 0 on success, -errno on failure
  353. */
  354. static int ocxlflash_start_context(void *ctx_cookie)
  355. {
  356. struct ocxlflash_context *ctx = ctx_cookie;
  357. return start_context(ctx);
  358. }
  359. /**
  360. * ocxlflash_stop_context() - stop a context
  361. * @ctx_cookie: Adapter context to be stopped.
  362. *
  363. * Return: 0 on success, -errno on failure
  364. */
  365. static int ocxlflash_stop_context(void *ctx_cookie)
  366. {
  367. struct ocxlflash_context *ctx = ctx_cookie;
  368. struct ocxl_hw_afu *afu = ctx->hw_afu;
  369. struct ocxl_afu_config *acfg = &afu->acfg;
  370. struct pci_dev *pdev = afu->pdev;
  371. struct device *dev = afu->dev;
  372. enum ocxlflash_ctx_state state;
  373. int rc = 0;
  374. mutex_lock(&ctx->state_mutex);
  375. state = ctx->state;
  376. ctx->state = CLOSED;
  377. mutex_unlock(&ctx->state_mutex);
  378. if (state != STARTED)
  379. goto out;
  380. rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
  381. ctx->pe);
  382. if (unlikely(rc)) {
  383. dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
  384. __func__, rc);
  385. /* If EBUSY, PE could be referenced in future by the AFU */
  386. if (rc == -EBUSY)
  387. goto out;
  388. }
  389. rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
  390. if (unlikely(rc)) {
  391. dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
  392. __func__, rc);
  393. goto out;
  394. }
  395. out:
  396. return rc;
  397. }
  398. /**
  399. * ocxlflash_afu_reset() - reset the AFU
  400. * @ctx_cookie: Adapter context.
  401. */
  402. static int ocxlflash_afu_reset(void *ctx_cookie)
  403. {
  404. struct ocxlflash_context *ctx = ctx_cookie;
  405. struct device *dev = ctx->hw_afu->dev;
  406. /* Pending implementation from OCXL transport services */
  407. dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
  408. /* Silently return success until it is implemented */
  409. return 0;
  410. }
  411. /**
  412. * ocxlflash_set_master() - sets the context as master
  413. * @ctx_cookie: Adapter context to set as master.
  414. */
  415. static void ocxlflash_set_master(void *ctx_cookie)
  416. {
  417. struct ocxlflash_context *ctx = ctx_cookie;
  418. ctx->master = true;
  419. }
  420. /**
  421. * ocxlflash_get_context() - obtains the context associated with the host
  422. * @pdev: PCI device associated with the host.
  423. * @afu_cookie: Hardware AFU associated with the host.
  424. *
  425. * Return: returns the pointer to host adapter context
  426. */
  427. static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
  428. {
  429. struct ocxl_hw_afu *afu = afu_cookie;
  430. return afu->ocxl_ctx;
  431. }
  432. /**
  433. * ocxlflash_dev_context_init() - allocate and initialize an adapter context
  434. * @pdev: PCI device associated with the host.
  435. * @afu_cookie: Hardware AFU associated with the host.
  436. *
  437. * Return: returns the adapter context on success, ERR_PTR on failure
  438. */
  439. static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
  440. {
  441. struct ocxl_hw_afu *afu = afu_cookie;
  442. struct device *dev = afu->dev;
  443. struct ocxlflash_context *ctx;
  444. int rc;
  445. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  446. if (unlikely(!ctx)) {
  447. dev_err(dev, "%s: Context allocation failed\n", __func__);
  448. rc = -ENOMEM;
  449. goto err1;
  450. }
  451. idr_preload(GFP_KERNEL);
  452. rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
  453. idr_preload_end();
  454. if (unlikely(rc < 0)) {
  455. dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
  456. goto err2;
  457. }
  458. spin_lock_init(&ctx->slock);
  459. init_waitqueue_head(&ctx->wq);
  460. mutex_init(&ctx->state_mutex);
  461. ctx->state = OPENED;
  462. ctx->pe = rc;
  463. ctx->master = false;
  464. ctx->mapping = NULL;
  465. ctx->hw_afu = afu;
  466. ctx->irq_bitmap = 0;
  467. ctx->pending_irq = false;
  468. ctx->pending_fault = false;
  469. out:
  470. return ctx;
  471. err2:
  472. kfree(ctx);
  473. err1:
  474. ctx = ERR_PTR(rc);
  475. goto out;
  476. }
  477. /**
  478. * ocxlflash_release_context() - releases an adapter context
  479. * @ctx_cookie: Adapter context to be released.
  480. *
  481. * Return: 0 on success, -errno on failure
  482. */
  483. static int ocxlflash_release_context(void *ctx_cookie)
  484. {
  485. struct ocxlflash_context *ctx = ctx_cookie;
  486. struct device *dev;
  487. int rc = 0;
  488. if (!ctx)
  489. goto out;
  490. dev = ctx->hw_afu->dev;
  491. mutex_lock(&ctx->state_mutex);
  492. if (ctx->state >= STARTED) {
  493. dev_err(dev, "%s: Context in use, state=%d\n", __func__,
  494. ctx->state);
  495. mutex_unlock(&ctx->state_mutex);
  496. rc = -EBUSY;
  497. goto out;
  498. }
  499. mutex_unlock(&ctx->state_mutex);
  500. idr_remove(&ctx->hw_afu->idr, ctx->pe);
  501. ocxlflash_release_mapping(ctx);
  502. kfree(ctx);
  503. out:
  504. return rc;
  505. }
  506. /**
  507. * ocxlflash_perst_reloads_same_image() - sets the image reload policy
  508. * @afu_cookie: Hardware AFU associated with the host.
  509. * @image: Whether to load the same image on PERST.
  510. */
  511. static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
  512. {
  513. struct ocxl_hw_afu *afu = afu_cookie;
  514. afu->perst_same_image = image;
  515. }
  516. /**
  517. * ocxlflash_read_adapter_vpd() - reads the adapter VPD
  518. * @pdev: PCI device associated with the host.
  519. * @buf: Buffer to get the VPD data.
  520. * @count: Size of buffer (maximum bytes that can be read).
  521. *
  522. * Return: size of VPD on success, -errno on failure
  523. */
  524. static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
  525. size_t count)
  526. {
  527. return pci_read_vpd(pdev, 0, count, buf);
  528. }
  529. /**
  530. * free_afu_irqs() - internal service to free interrupts
  531. * @ctx: Adapter context.
  532. */
  533. static void free_afu_irqs(struct ocxlflash_context *ctx)
  534. {
  535. struct ocxl_hw_afu *afu = ctx->hw_afu;
  536. struct device *dev = afu->dev;
  537. int i;
  538. if (!ctx->irqs) {
  539. dev_err(dev, "%s: Interrupts not allocated\n", __func__);
  540. return;
  541. }
  542. for (i = ctx->num_irqs; i >= 0; i--)
  543. ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
  544. kfree(ctx->irqs);
  545. ctx->irqs = NULL;
  546. }
  547. /**
  548. * alloc_afu_irqs() - internal service to allocate interrupts
  549. * @ctx: Context associated with the request.
  550. * @num: Number of interrupts requested.
  551. *
  552. * Return: 0 on success, -errno on failure
  553. */
  554. static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
  555. {
  556. struct ocxl_hw_afu *afu = ctx->hw_afu;
  557. struct device *dev = afu->dev;
  558. struct ocxlflash_irqs *irqs;
  559. u64 addr;
  560. int rc = 0;
  561. int hwirq;
  562. int i;
  563. if (ctx->irqs) {
  564. dev_err(dev, "%s: Interrupts already allocated\n", __func__);
  565. rc = -EEXIST;
  566. goto out;
  567. }
  568. if (num > OCXL_MAX_IRQS) {
  569. dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
  570. rc = -EINVAL;
  571. goto out;
  572. }
  573. irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
  574. if (unlikely(!irqs)) {
  575. dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
  576. rc = -ENOMEM;
  577. goto out;
  578. }
  579. for (i = 0; i < num; i++) {
  580. rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr);
  581. if (unlikely(rc)) {
  582. dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
  583. __func__, rc);
  584. goto err;
  585. }
  586. irqs[i].hwirq = hwirq;
  587. irqs[i].ptrig = addr;
  588. }
  589. ctx->irqs = irqs;
  590. ctx->num_irqs = num;
  591. out:
  592. return rc;
  593. err:
  594. for (i = i-1; i >= 0; i--)
  595. ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
  596. kfree(irqs);
  597. goto out;
  598. }
  599. /**
  600. * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
  601. * @ctx_cookie: Context associated with the request.
  602. * @num: Number of interrupts requested.
  603. *
  604. * Return: 0 on success, -errno on failure
  605. */
  606. static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
  607. {
  608. return alloc_afu_irqs(ctx_cookie, num);
  609. }
  610. /**
  611. * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
  612. * @ctx_cookie: Adapter context.
  613. */
  614. static void ocxlflash_free_afu_irqs(void *ctx_cookie)
  615. {
  616. free_afu_irqs(ctx_cookie);
  617. }
  618. /**
  619. * ocxlflash_unconfig_afu() - unconfigure the AFU
  620. * @afu: AFU associated with the host.
  621. */
  622. static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
  623. {
  624. if (afu->gmmio_virt) {
  625. iounmap(afu->gmmio_virt);
  626. afu->gmmio_virt = NULL;
  627. }
  628. }
  629. /**
  630. * ocxlflash_destroy_afu() - destroy the AFU structure
  631. * @afu_cookie: AFU to be freed.
  632. */
  633. static void ocxlflash_destroy_afu(void *afu_cookie)
  634. {
  635. struct ocxl_hw_afu *afu = afu_cookie;
  636. int pos;
  637. if (!afu)
  638. return;
  639. ocxlflash_release_context(afu->ocxl_ctx);
  640. idr_destroy(&afu->idr);
  641. /* Disable the AFU */
  642. pos = afu->acfg.dvsec_afu_control_pos;
  643. ocxl_config_set_afu_state(afu->pdev, pos, 0);
  644. ocxlflash_unconfig_afu(afu);
  645. kfree(afu);
  646. }
  647. /**
  648. * ocxlflash_config_fn() - configure the host function
  649. * @pdev: PCI device associated with the host.
  650. * @afu: AFU associated with the host.
  651. *
  652. * Return: 0 on success, -errno on failure
  653. */
  654. static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
  655. {
  656. struct ocxl_fn_config *fcfg = &afu->fcfg;
  657. struct device *dev = &pdev->dev;
  658. u16 base, enabled, supported;
  659. int rc = 0;
  660. /* Read DVSEC config of the function */
  661. rc = ocxl_config_read_function(pdev, fcfg);
  662. if (unlikely(rc)) {
  663. dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
  664. __func__, rc);
  665. goto out;
  666. }
  667. /* Check if function has AFUs defined, only 1 per function supported */
  668. if (fcfg->max_afu_index >= 0) {
  669. afu->is_present = true;
  670. if (fcfg->max_afu_index != 0)
  671. dev_warn(dev, "%s: Unexpected AFU index value %d\n",
  672. __func__, fcfg->max_afu_index);
  673. }
  674. rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
  675. if (unlikely(rc)) {
  676. dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
  677. __func__, rc);
  678. goto out;
  679. }
  680. afu->fn_actag_base = base;
  681. afu->fn_actag_enabled = enabled;
  682. ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
  683. dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
  684. __func__, base, enabled);
  685. rc = ocxl_link_setup(pdev, 0, &afu->link_token);
  686. if (unlikely(rc)) {
  687. dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
  688. __func__, rc);
  689. goto out;
  690. }
  691. rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
  692. if (unlikely(rc)) {
  693. dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
  694. __func__, rc);
  695. goto err;
  696. }
  697. out:
  698. return rc;
  699. err:
  700. ocxl_link_release(pdev, afu->link_token);
  701. goto out;
  702. }
  703. /**
  704. * ocxlflash_unconfig_fn() - unconfigure the host function
  705. * @pdev: PCI device associated with the host.
  706. * @afu: AFU associated with the host.
  707. */
  708. static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
  709. {
  710. ocxl_link_release(pdev, afu->link_token);
  711. }
  712. /**
  713. * ocxlflash_map_mmio() - map the AFU MMIO space
  714. * @afu: AFU associated with the host.
  715. *
  716. * Return: 0 on success, -errno on failure
  717. */
  718. static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
  719. {
  720. struct ocxl_afu_config *acfg = &afu->acfg;
  721. struct pci_dev *pdev = afu->pdev;
  722. struct device *dev = afu->dev;
  723. phys_addr_t gmmio, ppmmio;
  724. int rc = 0;
  725. rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
  726. if (unlikely(rc)) {
  727. dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
  728. __func__, rc);
  729. goto out;
  730. }
  731. gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
  732. gmmio += acfg->global_mmio_offset;
  733. rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
  734. if (unlikely(rc)) {
  735. dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
  736. __func__, rc);
  737. goto err1;
  738. }
  739. ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
  740. ppmmio += acfg->pp_mmio_offset;
  741. afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
  742. if (unlikely(!afu->gmmio_virt)) {
  743. dev_err(dev, "%s: MMIO mapping failed\n", __func__);
  744. rc = -ENOMEM;
  745. goto err2;
  746. }
  747. afu->gmmio_phys = gmmio;
  748. afu->ppmmio_phys = ppmmio;
  749. out:
  750. return rc;
  751. err2:
  752. pci_release_region(pdev, acfg->pp_mmio_bar);
  753. err1:
  754. pci_release_region(pdev, acfg->global_mmio_bar);
  755. goto out;
  756. }
  757. /**
  758. * ocxlflash_config_afu() - configure the host AFU
  759. * @pdev: PCI device associated with the host.
  760. * @afu: AFU associated with the host.
  761. *
  762. * Must be called _after_ host function configuration.
  763. *
  764. * Return: 0 on success, -errno on failure
  765. */
  766. static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
  767. {
  768. struct ocxl_afu_config *acfg = &afu->acfg;
  769. struct ocxl_fn_config *fcfg = &afu->fcfg;
  770. struct device *dev = &pdev->dev;
  771. int count;
  772. int base;
  773. int pos;
  774. int rc = 0;
  775. /* This HW AFU function does not have any AFUs defined */
  776. if (!afu->is_present)
  777. goto out;
  778. /* Read AFU config at index 0 */
  779. rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
  780. if (unlikely(rc)) {
  781. dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
  782. __func__, rc);
  783. goto out;
  784. }
  785. /* Only one AFU per function is supported, so actag_base is same */
  786. base = afu->fn_actag_base;
  787. count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
  788. pos = acfg->dvsec_afu_control_pos;
  789. ocxl_config_set_afu_actag(pdev, pos, base, count);
  790. dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
  791. afu->afu_actag_base = base;
  792. afu->afu_actag_enabled = count;
  793. afu->max_pasid = 1 << acfg->pasid_supported_log;
  794. ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
  795. rc = ocxlflash_map_mmio(afu);
  796. if (unlikely(rc)) {
  797. dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
  798. __func__, rc);
  799. goto out;
  800. }
  801. /* Enable the AFU */
  802. ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
  803. out:
  804. return rc;
  805. }
  806. /**
  807. * ocxlflash_create_afu() - create the AFU for OCXL
  808. * @pdev: PCI device associated with the host.
  809. *
  810. * Return: AFU on success, NULL on failure
  811. */
  812. static void *ocxlflash_create_afu(struct pci_dev *pdev)
  813. {
  814. struct device *dev = &pdev->dev;
  815. struct ocxlflash_context *ctx;
  816. struct ocxl_hw_afu *afu;
  817. int rc;
  818. afu = kzalloc(sizeof(*afu), GFP_KERNEL);
  819. if (unlikely(!afu)) {
  820. dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
  821. goto out;
  822. }
  823. afu->pdev = pdev;
  824. afu->dev = dev;
  825. idr_init(&afu->idr);
  826. rc = ocxlflash_config_fn(pdev, afu);
  827. if (unlikely(rc)) {
  828. dev_err(dev, "%s: Function configuration failed rc=%d\n",
  829. __func__, rc);
  830. goto err1;
  831. }
  832. rc = ocxlflash_config_afu(pdev, afu);
  833. if (unlikely(rc)) {
  834. dev_err(dev, "%s: AFU configuration failed rc=%d\n",
  835. __func__, rc);
  836. goto err2;
  837. }
  838. ctx = ocxlflash_dev_context_init(pdev, afu);
  839. if (IS_ERR(ctx)) {
  840. rc = PTR_ERR(ctx);
  841. dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
  842. __func__, rc);
  843. goto err3;
  844. }
  845. afu->ocxl_ctx = ctx;
  846. out:
  847. return afu;
  848. err3:
  849. ocxlflash_unconfig_afu(afu);
  850. err2:
  851. ocxlflash_unconfig_fn(pdev, afu);
  852. err1:
  853. idr_destroy(&afu->idr);
  854. kfree(afu);
  855. afu = NULL;
  856. goto out;
  857. }
  858. /**
  859. * ctx_event_pending() - check for any event pending on the context
  860. * @ctx: Context to be checked.
  861. *
  862. * Return: true if there is an event pending, false if none pending
  863. */
  864. static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
  865. {
  866. if (ctx->pending_irq || ctx->pending_fault)
  867. return true;
  868. return false;
  869. }
  870. /**
  871. * afu_poll() - poll the AFU for events on the context
  872. * @file: File associated with the adapter context.
  873. * @poll: Poll structure from the user.
  874. *
  875. * Return: poll mask
  876. */
  877. static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
  878. {
  879. struct ocxlflash_context *ctx = file->private_data;
  880. struct device *dev = ctx->hw_afu->dev;
  881. ulong lock_flags;
  882. int mask = 0;
  883. poll_wait(file, &ctx->wq, poll);
  884. spin_lock_irqsave(&ctx->slock, lock_flags);
  885. if (ctx_event_pending(ctx))
  886. mask |= POLLIN | POLLRDNORM;
  887. else if (ctx->state == CLOSED)
  888. mask |= POLLERR;
  889. spin_unlock_irqrestore(&ctx->slock, lock_flags);
  890. dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
  891. __func__, ctx->pe, mask);
  892. return mask;
  893. }
  894. /**
  895. * afu_read() - perform a read on the context for any event
  896. * @file: File associated with the adapter context.
  897. * @buf: Buffer to receive the data.
  898. * @count: Size of buffer (maximum bytes that can be read).
  899. * @off: Offset.
  900. *
  901. * Return: size of the data read on success, -errno on failure
  902. */
  903. static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
  904. loff_t *off)
  905. {
  906. struct ocxlflash_context *ctx = file->private_data;
  907. struct device *dev = ctx->hw_afu->dev;
  908. struct cxl_event event;
  909. ulong lock_flags;
  910. ssize_t esize;
  911. ssize_t rc;
  912. int bit;
  913. DEFINE_WAIT(event_wait);
  914. if (*off != 0) {
  915. dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
  916. __func__, *off);
  917. rc = -EINVAL;
  918. goto out;
  919. }
  920. spin_lock_irqsave(&ctx->slock, lock_flags);
  921. for (;;) {
  922. prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
  923. if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
  924. break;
  925. if (file->f_flags & O_NONBLOCK) {
  926. dev_err(dev, "%s: File cannot be blocked on I/O\n",
  927. __func__);
  928. rc = -EAGAIN;
  929. goto err;
  930. }
  931. if (signal_pending(current)) {
  932. dev_err(dev, "%s: Signal pending on the process\n",
  933. __func__);
  934. rc = -ERESTARTSYS;
  935. goto err;
  936. }
  937. spin_unlock_irqrestore(&ctx->slock, lock_flags);
  938. schedule();
  939. spin_lock_irqsave(&ctx->slock, lock_flags);
  940. }
  941. finish_wait(&ctx->wq, &event_wait);
  942. memset(&event, 0, sizeof(event));
  943. event.header.process_element = ctx->pe;
  944. event.header.size = sizeof(struct cxl_event_header);
  945. if (ctx->pending_irq) {
  946. esize = sizeof(struct cxl_event_afu_interrupt);
  947. event.header.size += esize;
  948. event.header.type = CXL_EVENT_AFU_INTERRUPT;
  949. bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
  950. clear_bit(bit, &ctx->irq_bitmap);
  951. event.irq.irq = bit + 1;
  952. if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
  953. ctx->pending_irq = false;
  954. } else if (ctx->pending_fault) {
  955. event.header.size += sizeof(struct cxl_event_data_storage);
  956. event.header.type = CXL_EVENT_DATA_STORAGE;
  957. event.fault.addr = ctx->fault_addr;
  958. event.fault.dsisr = ctx->fault_dsisr;
  959. ctx->pending_fault = false;
  960. }
  961. spin_unlock_irqrestore(&ctx->slock, lock_flags);
  962. if (copy_to_user(buf, &event, event.header.size)) {
  963. dev_err(dev, "%s: copy_to_user failed\n", __func__);
  964. rc = -EFAULT;
  965. goto out;
  966. }
  967. rc = event.header.size;
  968. out:
  969. return rc;
  970. err:
  971. finish_wait(&ctx->wq, &event_wait);
  972. spin_unlock_irqrestore(&ctx->slock, lock_flags);
  973. goto out;
  974. }
  975. /**
  976. * afu_release() - release and free the context
  977. * @inode: File inode pointer.
  978. * @file: File associated with the context.
  979. *
  980. * Return: 0 on success, -errno on failure
  981. */
  982. static int afu_release(struct inode *inode, struct file *file)
  983. {
  984. struct ocxlflash_context *ctx = file->private_data;
  985. int i;
  986. /* Unmap and free the interrupts associated with the context */
  987. for (i = ctx->num_irqs; i >= 0; i--)
  988. afu_unmap_irq(0, ctx, i, ctx);
  989. free_afu_irqs(ctx);
  990. return ocxlflash_release_context(ctx);
  991. }
  992. /**
  993. * ocxlflash_mmap_fault() - mmap fault handler
  994. * @vmf: VM fault associated with current fault.
  995. *
  996. * Return: 0 on success, -errno on failure
  997. */
  998. static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
  999. {
  1000. struct vm_area_struct *vma = vmf->vma;
  1001. struct ocxlflash_context *ctx = vma->vm_file->private_data;
  1002. struct device *dev = ctx->hw_afu->dev;
  1003. u64 mmio_area, offset;
  1004. offset = vmf->pgoff << PAGE_SHIFT;
  1005. if (offset >= ctx->psn_size)
  1006. return VM_FAULT_SIGBUS;
  1007. mutex_lock(&ctx->state_mutex);
  1008. if (ctx->state != STARTED) {
  1009. dev_err(dev, "%s: Context not started, state=%d\n",
  1010. __func__, ctx->state);
  1011. mutex_unlock(&ctx->state_mutex);
  1012. return VM_FAULT_SIGBUS;
  1013. }
  1014. mutex_unlock(&ctx->state_mutex);
  1015. mmio_area = ctx->psn_phys;
  1016. mmio_area += offset;
  1017. return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
  1018. }
  1019. static const struct vm_operations_struct ocxlflash_vmops = {
  1020. .fault = ocxlflash_mmap_fault,
  1021. };
  1022. /**
  1023. * afu_mmap() - map the fault handler operations
  1024. * @file: File associated with the context.
  1025. * @vma: VM area associated with mapping.
  1026. *
  1027. * Return: 0 on success, -errno on failure
  1028. */
  1029. static int afu_mmap(struct file *file, struct vm_area_struct *vma)
  1030. {
  1031. struct ocxlflash_context *ctx = file->private_data;
  1032. if ((vma_pages(vma) + vma->vm_pgoff) >
  1033. (ctx->psn_size >> PAGE_SHIFT))
  1034. return -EINVAL;
  1035. vma->vm_flags |= VM_IO | VM_PFNMAP;
  1036. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1037. vma->vm_ops = &ocxlflash_vmops;
  1038. return 0;
  1039. }
  1040. static const struct file_operations ocxl_afu_fops = {
  1041. .owner = THIS_MODULE,
  1042. .poll = afu_poll,
  1043. .read = afu_read,
  1044. .release = afu_release,
  1045. .mmap = afu_mmap,
  1046. };
  1047. #define PATCH_FOPS(NAME) \
  1048. do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
  1049. /**
  1050. * ocxlflash_get_fd() - get file descriptor for an adapter context
  1051. * @ctx_cookie: Adapter context.
  1052. * @fops: File operations to be associated.
  1053. * @fd: File descriptor to be returned back.
  1054. *
  1055. * Return: pointer to the file on success, ERR_PTR on failure
  1056. */
  1057. static struct file *ocxlflash_get_fd(void *ctx_cookie,
  1058. struct file_operations *fops, int *fd)
  1059. {
  1060. struct ocxlflash_context *ctx = ctx_cookie;
  1061. struct device *dev = ctx->hw_afu->dev;
  1062. struct file *file;
  1063. int flags, fdtmp;
  1064. int rc = 0;
  1065. char *name = NULL;
  1066. /* Only allow one fd per context */
  1067. if (ctx->mapping) {
  1068. dev_err(dev, "%s: Context is already mapped to an fd\n",
  1069. __func__);
  1070. rc = -EEXIST;
  1071. goto err1;
  1072. }
  1073. flags = O_RDWR | O_CLOEXEC;
  1074. /* This code is similar to anon_inode_getfd() */
  1075. rc = get_unused_fd_flags(flags);
  1076. if (unlikely(rc < 0)) {
  1077. dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
  1078. __func__, rc);
  1079. goto err1;
  1080. }
  1081. fdtmp = rc;
  1082. /* Patch the file ops that are not defined */
  1083. if (fops) {
  1084. PATCH_FOPS(poll);
  1085. PATCH_FOPS(read);
  1086. PATCH_FOPS(release);
  1087. PATCH_FOPS(mmap);
  1088. } else /* Use default ops */
  1089. fops = (struct file_operations *)&ocxl_afu_fops;
  1090. name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
  1091. file = ocxlflash_getfile(dev, name, fops, ctx, flags);
  1092. kfree(name);
  1093. if (IS_ERR(file)) {
  1094. rc = PTR_ERR(file);
  1095. dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
  1096. __func__, rc);
  1097. goto err2;
  1098. }
  1099. ctx->mapping = file->f_mapping;
  1100. *fd = fdtmp;
  1101. out:
  1102. return file;
  1103. err2:
  1104. put_unused_fd(fdtmp);
  1105. err1:
  1106. file = ERR_PTR(rc);
  1107. goto out;
  1108. }
  1109. /**
  1110. * ocxlflash_fops_get_context() - get the context associated with the file
  1111. * @file: File associated with the adapter context.
  1112. *
  1113. * Return: pointer to the context
  1114. */
  1115. static void *ocxlflash_fops_get_context(struct file *file)
  1116. {
  1117. return file->private_data;
  1118. }
  1119. /**
  1120. * ocxlflash_afu_irq() - interrupt handler for user contexts
  1121. * @irq: Interrupt number.
  1122. * @data: Private data provided at interrupt registration, the context.
  1123. *
  1124. * Return: Always return IRQ_HANDLED.
  1125. */
  1126. static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
  1127. {
  1128. struct ocxlflash_context *ctx = data;
  1129. struct device *dev = ctx->hw_afu->dev;
  1130. int i;
  1131. dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
  1132. __func__, ctx->pe, irq);
  1133. for (i = 0; i < ctx->num_irqs; i++) {
  1134. if (ctx->irqs[i].virq == irq)
  1135. break;
  1136. }
  1137. if (unlikely(i >= ctx->num_irqs)) {
  1138. dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
  1139. goto out;
  1140. }
  1141. spin_lock(&ctx->slock);
  1142. set_bit(i - 1, &ctx->irq_bitmap);
  1143. ctx->pending_irq = true;
  1144. spin_unlock(&ctx->slock);
  1145. wake_up_all(&ctx->wq);
  1146. out:
  1147. return IRQ_HANDLED;
  1148. }
  1149. /**
  1150. * ocxlflash_start_work() - start a user context
  1151. * @ctx_cookie: Context to be started.
  1152. * @num_irqs: Number of interrupts requested.
  1153. *
  1154. * Return: 0 on success, -errno on failure
  1155. */
  1156. static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
  1157. {
  1158. struct ocxlflash_context *ctx = ctx_cookie;
  1159. struct ocxl_hw_afu *afu = ctx->hw_afu;
  1160. struct device *dev = afu->dev;
  1161. char *name;
  1162. int rc = 0;
  1163. int i;
  1164. rc = alloc_afu_irqs(ctx, num_irqs);
  1165. if (unlikely(rc < 0)) {
  1166. dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
  1167. goto out;
  1168. }
  1169. for (i = 0; i < num_irqs; i++) {
  1170. name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
  1171. dev_name(dev), ctx->pe, i);
  1172. rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
  1173. kfree(name);
  1174. if (unlikely(rc < 0)) {
  1175. dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
  1176. __func__, rc);
  1177. goto err;
  1178. }
  1179. }
  1180. rc = start_context(ctx);
  1181. if (unlikely(rc)) {
  1182. dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
  1183. goto err;
  1184. }
  1185. out:
  1186. return rc;
  1187. err:
  1188. for (i = i-1; i >= 0; i--)
  1189. afu_unmap_irq(0, ctx, i, ctx);
  1190. free_afu_irqs(ctx);
  1191. goto out;
  1192. };
  1193. /**
  1194. * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
  1195. * @file: File installed with adapter file descriptor.
  1196. * @vma: VM area associated with mapping.
  1197. *
  1198. * Return: 0 on success, -errno on failure
  1199. */
  1200. static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
  1201. {
  1202. return afu_mmap(file, vma);
  1203. }
  1204. /**
  1205. * ocxlflash_fd_release() - release the context associated with the file
  1206. * @inode: File inode pointer.
  1207. * @file: File associated with the adapter context.
  1208. *
  1209. * Return: 0 on success, -errno on failure
  1210. */
  1211. static int ocxlflash_fd_release(struct inode *inode, struct file *file)
  1212. {
  1213. return afu_release(inode, file);
  1214. }
  1215. /* Backend ops to ocxlflash services */
  1216. const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
  1217. .module = THIS_MODULE,
  1218. .psa_map = ocxlflash_psa_map,
  1219. .psa_unmap = ocxlflash_psa_unmap,
  1220. .process_element = ocxlflash_process_element,
  1221. .map_afu_irq = ocxlflash_map_afu_irq,
  1222. .unmap_afu_irq = ocxlflash_unmap_afu_irq,
  1223. .get_irq_objhndl = ocxlflash_get_irq_objhndl,
  1224. .start_context = ocxlflash_start_context,
  1225. .stop_context = ocxlflash_stop_context,
  1226. .afu_reset = ocxlflash_afu_reset,
  1227. .set_master = ocxlflash_set_master,
  1228. .get_context = ocxlflash_get_context,
  1229. .dev_context_init = ocxlflash_dev_context_init,
  1230. .release_context = ocxlflash_release_context,
  1231. .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
  1232. .read_adapter_vpd = ocxlflash_read_adapter_vpd,
  1233. .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
  1234. .free_afu_irqs = ocxlflash_free_afu_irqs,
  1235. .create_afu = ocxlflash_create_afu,
  1236. .destroy_afu = ocxlflash_destroy_afu,
  1237. .get_fd = ocxlflash_get_fd,
  1238. .fops_get_context = ocxlflash_fops_get_context,
  1239. .start_work = ocxlflash_start_work,
  1240. .fd_mmap = ocxlflash_fd_mmap,
  1241. .fd_release = ocxlflash_fd_release,
  1242. };