dfl-afu-main.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for FPGA Accelerated Function Unit (AFU)
  4. *
  5. * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6. *
  7. * Authors:
  8. * Wu Hao <hao.wu@intel.com>
  9. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  10. * Joseph Grecco <joe.grecco@intel.com>
  11. * Enno Luebbers <enno.luebbers@intel.com>
  12. * Tim Whisonant <tim.whisonant@intel.com>
  13. * Ananda Ravuri <ananda.ravuri@intel.com>
  14. * Henry Mitchel <henry.mitchel@intel.com>
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/fpga-dfl.h>
  20. #include "dfl-afu.h"
  21. /**
  22. * port_enable - enable a port
  23. * @pdev: port platform device.
  24. *
  25. * Enable Port by clear the port soft reset bit, which is set by default.
  26. * The AFU is unable to respond to any MMIO access while in reset.
  27. * port_enable function should only be used after port_disable function.
  28. */
  29. static void port_enable(struct platform_device *pdev)
  30. {
  31. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  32. void __iomem *base;
  33. u64 v;
  34. WARN_ON(!pdata->disable_count);
  35. if (--pdata->disable_count != 0)
  36. return;
  37. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  38. /* Clear port soft reset */
  39. v = readq(base + PORT_HDR_CTRL);
  40. v &= ~PORT_CTRL_SFTRST;
  41. writeq(v, base + PORT_HDR_CTRL);
  42. }
  43. #define RST_POLL_INVL 10 /* us */
  44. #define RST_POLL_TIMEOUT 1000 /* us */
  45. /**
  46. * port_disable - disable a port
  47. * @pdev: port platform device.
  48. *
  49. * Disable Port by setting the port soft reset bit, it puts the port into
  50. * reset.
  51. */
  52. static int port_disable(struct platform_device *pdev)
  53. {
  54. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  55. void __iomem *base;
  56. u64 v;
  57. if (pdata->disable_count++ != 0)
  58. return 0;
  59. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  60. /* Set port soft reset */
  61. v = readq(base + PORT_HDR_CTRL);
  62. v |= PORT_CTRL_SFTRST;
  63. writeq(v, base + PORT_HDR_CTRL);
  64. /*
  65. * HW sets ack bit to 1 when all outstanding requests have been drained
  66. * on this port and minimum soft reset pulse width has elapsed.
  67. * Driver polls port_soft_reset_ack to determine if reset done by HW.
  68. */
  69. if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
  70. v & PORT_CTRL_SFTRST_ACK,
  71. RST_POLL_INVL, RST_POLL_TIMEOUT)) {
  72. dev_err(&pdev->dev, "timeout, fail to reset device\n");
  73. return -ETIMEDOUT;
  74. }
  75. return 0;
  76. }
  77. /*
  78. * This function resets the FPGA Port and its accelerator (AFU) by function
  79. * __port_disable and __port_enable (set port soft reset bit and then clear
  80. * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
  81. * Reconfiguration. But it should never cause any system level issue, only
  82. * functional failure (e.g. DMA or PR operation failure) and be recoverable
  83. * from the failure.
  84. *
  85. * Note: the accelerator (AFU) is not accessible when its port is in reset
  86. * (disabled). Any attempts on MMIO access to AFU while in reset, will
  87. * result errors reported via port error reporting sub feature (if present).
  88. */
  89. static int __port_reset(struct platform_device *pdev)
  90. {
  91. int ret;
  92. ret = port_disable(pdev);
  93. if (!ret)
  94. port_enable(pdev);
  95. return ret;
  96. }
  97. static int port_reset(struct platform_device *pdev)
  98. {
  99. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  100. int ret;
  101. mutex_lock(&pdata->lock);
  102. ret = __port_reset(pdev);
  103. mutex_unlock(&pdata->lock);
  104. return ret;
  105. }
  106. static int port_get_id(struct platform_device *pdev)
  107. {
  108. void __iomem *base;
  109. base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
  110. return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
  111. }
  112. static ssize_t
  113. id_show(struct device *dev, struct device_attribute *attr, char *buf)
  114. {
  115. int id = port_get_id(to_platform_device(dev));
  116. return scnprintf(buf, PAGE_SIZE, "%d\n", id);
  117. }
  118. static DEVICE_ATTR_RO(id);
  119. static const struct attribute *port_hdr_attrs[] = {
  120. &dev_attr_id.attr,
  121. NULL,
  122. };
  123. static int port_hdr_init(struct platform_device *pdev,
  124. struct dfl_feature *feature)
  125. {
  126. dev_dbg(&pdev->dev, "PORT HDR Init.\n");
  127. port_reset(pdev);
  128. return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
  129. }
  130. static void port_hdr_uinit(struct platform_device *pdev,
  131. struct dfl_feature *feature)
  132. {
  133. dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
  134. sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
  135. }
  136. static long
  137. port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
  138. unsigned int cmd, unsigned long arg)
  139. {
  140. long ret;
  141. switch (cmd) {
  142. case DFL_FPGA_PORT_RESET:
  143. if (!arg)
  144. ret = port_reset(pdev);
  145. else
  146. ret = -EINVAL;
  147. break;
  148. default:
  149. dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
  150. ret = -ENODEV;
  151. }
  152. return ret;
  153. }
  154. static const struct dfl_feature_ops port_hdr_ops = {
  155. .init = port_hdr_init,
  156. .uinit = port_hdr_uinit,
  157. .ioctl = port_hdr_ioctl,
  158. };
  159. static ssize_t
  160. afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
  161. {
  162. struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
  163. void __iomem *base;
  164. u64 guidl, guidh;
  165. base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
  166. mutex_lock(&pdata->lock);
  167. if (pdata->disable_count) {
  168. mutex_unlock(&pdata->lock);
  169. return -EBUSY;
  170. }
  171. guidl = readq(base + GUID_L);
  172. guidh = readq(base + GUID_H);
  173. mutex_unlock(&pdata->lock);
  174. return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
  175. }
  176. static DEVICE_ATTR_RO(afu_id);
  177. static const struct attribute *port_afu_attrs[] = {
  178. &dev_attr_afu_id.attr,
  179. NULL
  180. };
  181. static int port_afu_init(struct platform_device *pdev,
  182. struct dfl_feature *feature)
  183. {
  184. struct resource *res = &pdev->resource[feature->resource_index];
  185. int ret;
  186. dev_dbg(&pdev->dev, "PORT AFU Init.\n");
  187. ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
  188. DFL_PORT_REGION_INDEX_AFU, resource_size(res),
  189. res->start, DFL_PORT_REGION_READ |
  190. DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
  191. if (ret)
  192. return ret;
  193. return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
  194. }
  195. static void port_afu_uinit(struct platform_device *pdev,
  196. struct dfl_feature *feature)
  197. {
  198. dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
  199. sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
  200. }
  201. static const struct dfl_feature_ops port_afu_ops = {
  202. .init = port_afu_init,
  203. .uinit = port_afu_uinit,
  204. };
  205. static struct dfl_feature_driver port_feature_drvs[] = {
  206. {
  207. .id = PORT_FEATURE_ID_HEADER,
  208. .ops = &port_hdr_ops,
  209. },
  210. {
  211. .id = PORT_FEATURE_ID_AFU,
  212. .ops = &port_afu_ops,
  213. },
  214. {
  215. .ops = NULL,
  216. }
  217. };
  218. static int afu_open(struct inode *inode, struct file *filp)
  219. {
  220. struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
  221. struct dfl_feature_platform_data *pdata;
  222. int ret;
  223. pdata = dev_get_platdata(&fdev->dev);
  224. if (WARN_ON(!pdata))
  225. return -ENODEV;
  226. ret = dfl_feature_dev_use_begin(pdata);
  227. if (ret)
  228. return ret;
  229. dev_dbg(&fdev->dev, "Device File Open\n");
  230. filp->private_data = fdev;
  231. return 0;
  232. }
  233. static int afu_release(struct inode *inode, struct file *filp)
  234. {
  235. struct platform_device *pdev = filp->private_data;
  236. struct dfl_feature_platform_data *pdata;
  237. dev_dbg(&pdev->dev, "Device File Release\n");
  238. pdata = dev_get_platdata(&pdev->dev);
  239. mutex_lock(&pdata->lock);
  240. __port_reset(pdev);
  241. afu_dma_region_destroy(pdata);
  242. mutex_unlock(&pdata->lock);
  243. dfl_feature_dev_use_end(pdata);
  244. return 0;
  245. }
  246. static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
  247. unsigned long arg)
  248. {
  249. /* No extension support for now */
  250. return 0;
  251. }
  252. static long
  253. afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
  254. {
  255. struct dfl_fpga_port_info info;
  256. struct dfl_afu *afu;
  257. unsigned long minsz;
  258. minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
  259. if (copy_from_user(&info, arg, minsz))
  260. return -EFAULT;
  261. if (info.argsz < minsz)
  262. return -EINVAL;
  263. mutex_lock(&pdata->lock);
  264. afu = dfl_fpga_pdata_get_private(pdata);
  265. info.flags = 0;
  266. info.num_regions = afu->num_regions;
  267. info.num_umsgs = afu->num_umsgs;
  268. mutex_unlock(&pdata->lock);
  269. if (copy_to_user(arg, &info, sizeof(info)))
  270. return -EFAULT;
  271. return 0;
  272. }
  273. static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
  274. void __user *arg)
  275. {
  276. struct dfl_fpga_port_region_info rinfo;
  277. struct dfl_afu_mmio_region region;
  278. unsigned long minsz;
  279. long ret;
  280. minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
  281. if (copy_from_user(&rinfo, arg, minsz))
  282. return -EFAULT;
  283. if (rinfo.argsz < minsz || rinfo.padding)
  284. return -EINVAL;
  285. ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
  286. if (ret)
  287. return ret;
  288. rinfo.flags = region.flags;
  289. rinfo.size = region.size;
  290. rinfo.offset = region.offset;
  291. if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
  292. return -EFAULT;
  293. return 0;
  294. }
  295. static long
  296. afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
  297. {
  298. struct dfl_fpga_port_dma_map map;
  299. unsigned long minsz;
  300. long ret;
  301. minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
  302. if (copy_from_user(&map, arg, minsz))
  303. return -EFAULT;
  304. if (map.argsz < minsz || map.flags)
  305. return -EINVAL;
  306. ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
  307. if (ret)
  308. return ret;
  309. if (copy_to_user(arg, &map, sizeof(map))) {
  310. afu_dma_unmap_region(pdata, map.iova);
  311. return -EFAULT;
  312. }
  313. dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
  314. (unsigned long long)map.user_addr,
  315. (unsigned long long)map.length,
  316. (unsigned long long)map.iova);
  317. return 0;
  318. }
  319. static long
  320. afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
  321. {
  322. struct dfl_fpga_port_dma_unmap unmap;
  323. unsigned long minsz;
  324. minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
  325. if (copy_from_user(&unmap, arg, minsz))
  326. return -EFAULT;
  327. if (unmap.argsz < minsz || unmap.flags)
  328. return -EINVAL;
  329. return afu_dma_unmap_region(pdata, unmap.iova);
  330. }
  331. static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  332. {
  333. struct platform_device *pdev = filp->private_data;
  334. struct dfl_feature_platform_data *pdata;
  335. struct dfl_feature *f;
  336. long ret;
  337. dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
  338. pdata = dev_get_platdata(&pdev->dev);
  339. switch (cmd) {
  340. case DFL_FPGA_GET_API_VERSION:
  341. return DFL_FPGA_API_VERSION;
  342. case DFL_FPGA_CHECK_EXTENSION:
  343. return afu_ioctl_check_extension(pdata, arg);
  344. case DFL_FPGA_PORT_GET_INFO:
  345. return afu_ioctl_get_info(pdata, (void __user *)arg);
  346. case DFL_FPGA_PORT_GET_REGION_INFO:
  347. return afu_ioctl_get_region_info(pdata, (void __user *)arg);
  348. case DFL_FPGA_PORT_DMA_MAP:
  349. return afu_ioctl_dma_map(pdata, (void __user *)arg);
  350. case DFL_FPGA_PORT_DMA_UNMAP:
  351. return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
  352. default:
  353. /*
  354. * Let sub-feature's ioctl function to handle the cmd
  355. * Sub-feature's ioctl returns -ENODEV when cmd is not
  356. * handled in this sub feature, and returns 0 and other
  357. * error code if cmd is handled.
  358. */
  359. dfl_fpga_dev_for_each_feature(pdata, f)
  360. if (f->ops && f->ops->ioctl) {
  361. ret = f->ops->ioctl(pdev, f, cmd, arg);
  362. if (ret != -ENODEV)
  363. return ret;
  364. }
  365. }
  366. return -EINVAL;
  367. }
  368. static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
  369. {
  370. struct platform_device *pdev = filp->private_data;
  371. struct dfl_feature_platform_data *pdata;
  372. u64 size = vma->vm_end - vma->vm_start;
  373. struct dfl_afu_mmio_region region;
  374. u64 offset;
  375. int ret;
  376. if (!(vma->vm_flags & VM_SHARED))
  377. return -EINVAL;
  378. pdata = dev_get_platdata(&pdev->dev);
  379. offset = vma->vm_pgoff << PAGE_SHIFT;
  380. ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
  381. if (ret)
  382. return ret;
  383. if (!(region.flags & DFL_PORT_REGION_MMAP))
  384. return -EINVAL;
  385. if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
  386. return -EPERM;
  387. if ((vma->vm_flags & VM_WRITE) &&
  388. !(region.flags & DFL_PORT_REGION_WRITE))
  389. return -EPERM;
  390. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  391. return remap_pfn_range(vma, vma->vm_start,
  392. (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
  393. size, vma->vm_page_prot);
  394. }
  395. static const struct file_operations afu_fops = {
  396. .owner = THIS_MODULE,
  397. .open = afu_open,
  398. .release = afu_release,
  399. .unlocked_ioctl = afu_ioctl,
  400. .mmap = afu_mmap,
  401. };
  402. static int afu_dev_init(struct platform_device *pdev)
  403. {
  404. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  405. struct dfl_afu *afu;
  406. afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
  407. if (!afu)
  408. return -ENOMEM;
  409. afu->pdata = pdata;
  410. mutex_lock(&pdata->lock);
  411. dfl_fpga_pdata_set_private(pdata, afu);
  412. afu_mmio_region_init(pdata);
  413. afu_dma_region_init(pdata);
  414. mutex_unlock(&pdata->lock);
  415. return 0;
  416. }
  417. static int afu_dev_destroy(struct platform_device *pdev)
  418. {
  419. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  420. struct dfl_afu *afu;
  421. mutex_lock(&pdata->lock);
  422. afu = dfl_fpga_pdata_get_private(pdata);
  423. afu_mmio_region_destroy(pdata);
  424. afu_dma_region_destroy(pdata);
  425. dfl_fpga_pdata_set_private(pdata, NULL);
  426. mutex_unlock(&pdata->lock);
  427. return 0;
  428. }
  429. static int port_enable_set(struct platform_device *pdev, bool enable)
  430. {
  431. struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
  432. int ret = 0;
  433. mutex_lock(&pdata->lock);
  434. if (enable)
  435. port_enable(pdev);
  436. else
  437. ret = port_disable(pdev);
  438. mutex_unlock(&pdata->lock);
  439. return ret;
  440. }
  441. static struct dfl_fpga_port_ops afu_port_ops = {
  442. .name = DFL_FPGA_FEATURE_DEV_PORT,
  443. .owner = THIS_MODULE,
  444. .get_id = port_get_id,
  445. .enable_set = port_enable_set,
  446. };
  447. static int afu_probe(struct platform_device *pdev)
  448. {
  449. int ret;
  450. dev_dbg(&pdev->dev, "%s\n", __func__);
  451. ret = afu_dev_init(pdev);
  452. if (ret)
  453. goto exit;
  454. ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
  455. if (ret)
  456. goto dev_destroy;
  457. ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
  458. if (ret) {
  459. dfl_fpga_dev_feature_uinit(pdev);
  460. goto dev_destroy;
  461. }
  462. return 0;
  463. dev_destroy:
  464. afu_dev_destroy(pdev);
  465. exit:
  466. return ret;
  467. }
  468. static int afu_remove(struct platform_device *pdev)
  469. {
  470. dev_dbg(&pdev->dev, "%s\n", __func__);
  471. dfl_fpga_dev_ops_unregister(pdev);
  472. dfl_fpga_dev_feature_uinit(pdev);
  473. afu_dev_destroy(pdev);
  474. return 0;
  475. }
  476. static struct platform_driver afu_driver = {
  477. .driver = {
  478. .name = DFL_FPGA_FEATURE_DEV_PORT,
  479. },
  480. .probe = afu_probe,
  481. .remove = afu_remove,
  482. };
  483. static int __init afu_init(void)
  484. {
  485. int ret;
  486. dfl_fpga_port_ops_add(&afu_port_ops);
  487. ret = platform_driver_register(&afu_driver);
  488. if (ret)
  489. dfl_fpga_port_ops_del(&afu_port_ops);
  490. return ret;
  491. }
  492. static void __exit afu_exit(void)
  493. {
  494. platform_driver_unregister(&afu_driver);
  495. dfl_fpga_port_ops_del(&afu_port_ops);
  496. }
  497. module_init(afu_init);
  498. module_exit(afu_exit);
  499. MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
  500. MODULE_AUTHOR("Intel Corporation");
  501. MODULE_LICENSE("GPL v2");
  502. MODULE_ALIAS("platform:dfl-port");