pci_endpoint_test.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Host side test driver to test endpoint functionality
  4. *
  5. * Copyright (C) 2017 Texas Instruments
  6. * Author: Kishon Vijay Abraham I <kishon@ti.com>
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/cleanup.h>
  10. #include <linux/delay.h>
  11. #include <linux/fs.h>
  12. #include <linux/io.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/miscdevice.h>
  16. #include <linux/module.h>
  17. #include <linux/mutex.h>
  18. #include <linux/random.h>
  19. #include <linux/slab.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/pci.h>
  22. #include <linux/pci_ids.h>
  23. #include <linux/pci_regs.h>
  24. #include <uapi/linux/pcitest.h>
  25. #define DRV_MODULE_NAME "pci-endpoint-test"
  26. #define IRQ_TYPE_UNDEFINED -1
  27. #define IRQ_TYPE_INTX 0
  28. #define IRQ_TYPE_MSI 1
  29. #define IRQ_TYPE_MSIX 2
  30. #define PCI_ENDPOINT_TEST_MAGIC 0x0
  31. #define PCI_ENDPOINT_TEST_COMMAND 0x4
  32. #define COMMAND_RAISE_INTX_IRQ BIT(0)
  33. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  34. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  35. #define COMMAND_READ BIT(3)
  36. #define COMMAND_WRITE BIT(4)
  37. #define COMMAND_COPY BIT(5)
  38. #define PCI_ENDPOINT_TEST_STATUS 0x8
  39. #define STATUS_READ_SUCCESS BIT(0)
  40. #define STATUS_READ_FAIL BIT(1)
  41. #define STATUS_WRITE_SUCCESS BIT(2)
  42. #define STATUS_WRITE_FAIL BIT(3)
  43. #define STATUS_COPY_SUCCESS BIT(4)
  44. #define STATUS_COPY_FAIL BIT(5)
  45. #define STATUS_IRQ_RAISED BIT(6)
  46. #define STATUS_SRC_ADDR_INVALID BIT(7)
  47. #define STATUS_DST_ADDR_INVALID BIT(8)
  48. #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
  49. #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
  50. #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
  51. #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
  52. #define PCI_ENDPOINT_TEST_SIZE 0x1c
  53. #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
  54. #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
  55. #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
  56. #define PCI_ENDPOINT_TEST_FLAGS 0x2c
  57. #define FLAG_USE_DMA BIT(0)
  58. #define PCI_DEVICE_ID_TI_AM654 0xb00c
  59. #define PCI_DEVICE_ID_TI_J7200 0xb00f
  60. #define PCI_DEVICE_ID_TI_AM64 0xb010
  61. #define PCI_DEVICE_ID_TI_J721S2 0xb013
  62. #define PCI_DEVICE_ID_LS1088A 0x80c0
  63. #define PCI_DEVICE_ID_IMX8 0x0808
  64. #define is_am654_pci_dev(pdev) \
  65. ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  66. #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
  67. #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
  68. #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
  69. #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
  70. #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
  71. #define PCI_VENDOR_ID_ROCKCHIP 0x1d87
  72. #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
  73. static DEFINE_IDA(pci_endpoint_test_ida);
  74. #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  75. miscdev)
  76. static bool no_msi;
  77. module_param(no_msi, bool, 0444);
  78. MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  79. static int irq_type = IRQ_TYPE_MSI;
  80. module_param(irq_type, int, 0444);
  81. MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  82. enum pci_barno {
  83. BAR_0,
  84. BAR_1,
  85. BAR_2,
  86. BAR_3,
  87. BAR_4,
  88. BAR_5,
  89. };
  90. struct pci_endpoint_test {
  91. struct pci_dev *pdev;
  92. void __iomem *base;
  93. void __iomem *bar[PCI_STD_NUM_BARS];
  94. struct completion irq_raised;
  95. int last_irq;
  96. int num_irqs;
  97. int irq_type;
  98. /* mutex to protect the ioctls */
  99. struct mutex mutex;
  100. struct miscdevice miscdev;
  101. enum pci_barno test_reg_bar;
  102. size_t alignment;
  103. const char *name;
  104. };
  105. struct pci_endpoint_test_data {
  106. enum pci_barno test_reg_bar;
  107. size_t alignment;
  108. int irq_type;
  109. };
  110. static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
  111. u32 offset)
  112. {
  113. return readl(test->base + offset);
  114. }
  115. static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
  116. u32 offset, u32 value)
  117. {
  118. writel(value, test->base + offset);
  119. }
  120. static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
  121. {
  122. struct pci_endpoint_test *test = dev_id;
  123. u32 reg;
  124. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  125. if (reg & STATUS_IRQ_RAISED) {
  126. test->last_irq = irq;
  127. complete(&test->irq_raised);
  128. }
  129. return IRQ_HANDLED;
  130. }
  131. static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
  132. {
  133. struct pci_dev *pdev = test->pdev;
  134. pci_free_irq_vectors(pdev);
  135. test->irq_type = IRQ_TYPE_UNDEFINED;
  136. }
  137. static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
  138. int type)
  139. {
  140. int irq = -1;
  141. struct pci_dev *pdev = test->pdev;
  142. struct device *dev = &pdev->dev;
  143. bool res = true;
  144. switch (type) {
  145. case IRQ_TYPE_INTX:
  146. irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
  147. if (irq < 0)
  148. dev_err(dev, "Failed to get Legacy interrupt\n");
  149. break;
  150. case IRQ_TYPE_MSI:
  151. irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
  152. if (irq < 0)
  153. dev_err(dev, "Failed to get MSI interrupts\n");
  154. break;
  155. case IRQ_TYPE_MSIX:
  156. irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
  157. if (irq < 0)
  158. dev_err(dev, "Failed to get MSI-X interrupts\n");
  159. break;
  160. default:
  161. dev_err(dev, "Invalid IRQ type selected\n");
  162. }
  163. if (irq < 0) {
  164. irq = 0;
  165. res = false;
  166. }
  167. test->irq_type = type;
  168. test->num_irqs = irq;
  169. return res;
  170. }
  171. static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
  172. {
  173. int i;
  174. struct pci_dev *pdev = test->pdev;
  175. struct device *dev = &pdev->dev;
  176. for (i = 0; i < test->num_irqs; i++)
  177. devm_free_irq(dev, pci_irq_vector(pdev, i), test);
  178. test->num_irqs = 0;
  179. }
  180. static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
  181. {
  182. int i;
  183. int err;
  184. struct pci_dev *pdev = test->pdev;
  185. struct device *dev = &pdev->dev;
  186. for (i = 0; i < test->num_irqs; i++) {
  187. err = devm_request_irq(dev, pci_irq_vector(pdev, i),
  188. pci_endpoint_test_irqhandler,
  189. IRQF_SHARED, test->name, test);
  190. if (err)
  191. goto fail;
  192. }
  193. return true;
  194. fail:
  195. switch (irq_type) {
  196. case IRQ_TYPE_INTX:
  197. dev_err(dev, "Failed to request IRQ %d for Legacy\n",
  198. pci_irq_vector(pdev, i));
  199. break;
  200. case IRQ_TYPE_MSI:
  201. dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
  202. pci_irq_vector(pdev, i),
  203. i + 1);
  204. break;
  205. case IRQ_TYPE_MSIX:
  206. dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
  207. pci_irq_vector(pdev, i),
  208. i + 1);
  209. break;
  210. }
  211. return false;
  212. }
  213. static const u32 bar_test_pattern[] = {
  214. 0xA0A0A0A0,
  215. 0xA1A1A1A1,
  216. 0xA2A2A2A2,
  217. 0xA3A3A3A3,
  218. 0xA4A4A4A4,
  219. 0xA5A5A5A5,
  220. };
  221. static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
  222. enum pci_barno barno, int offset,
  223. void *write_buf, void *read_buf,
  224. int size)
  225. {
  226. memset(write_buf, bar_test_pattern[barno], size);
  227. memcpy_toio(test->bar[barno] + offset, write_buf, size);
  228. memcpy_fromio(read_buf, test->bar[barno] + offset, size);
  229. return memcmp(write_buf, read_buf, size);
  230. }
  231. static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
  232. enum pci_barno barno)
  233. {
  234. int j, bar_size, buf_size, iters, remain;
  235. void *write_buf __free(kfree) = NULL;
  236. void *read_buf __free(kfree) = NULL;
  237. struct pci_dev *pdev = test->pdev;
  238. if (!test->bar[barno])
  239. return false;
  240. bar_size = pci_resource_len(pdev, barno);
  241. if (barno == test->test_reg_bar)
  242. bar_size = 0x4;
  243. /*
  244. * Allocate a buffer of max size 1MB, and reuse that buffer while
  245. * iterating over the whole BAR size (which might be much larger).
  246. */
  247. buf_size = min(SZ_1M, bar_size);
  248. write_buf = kmalloc(buf_size, GFP_KERNEL);
  249. if (!write_buf)
  250. return false;
  251. read_buf = kmalloc(buf_size, GFP_KERNEL);
  252. if (!read_buf)
  253. return false;
  254. iters = bar_size / buf_size;
  255. for (j = 0; j < iters; j++)
  256. if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
  257. write_buf, read_buf, buf_size))
  258. return false;
  259. remain = bar_size % buf_size;
  260. if (remain)
  261. if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
  262. write_buf, read_buf, remain))
  263. return false;
  264. return true;
  265. }
  266. static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
  267. {
  268. u32 val;
  269. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  270. IRQ_TYPE_INTX);
  271. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
  272. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  273. COMMAND_RAISE_INTX_IRQ);
  274. val = wait_for_completion_timeout(&test->irq_raised,
  275. msecs_to_jiffies(1000));
  276. if (!val)
  277. return false;
  278. return true;
  279. }
  280. static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
  281. u16 msi_num, bool msix)
  282. {
  283. u32 val;
  284. struct pci_dev *pdev = test->pdev;
  285. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  286. msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
  287. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
  288. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  289. msix ? COMMAND_RAISE_MSIX_IRQ :
  290. COMMAND_RAISE_MSI_IRQ);
  291. val = wait_for_completion_timeout(&test->irq_raised,
  292. msecs_to_jiffies(1000));
  293. if (!val)
  294. return false;
  295. return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
  296. }
  297. static int pci_endpoint_test_validate_xfer_params(struct device *dev,
  298. struct pci_endpoint_test_xfer_param *param, size_t alignment)
  299. {
  300. if (!param->size) {
  301. dev_dbg(dev, "Data size is zero\n");
  302. return -EINVAL;
  303. }
  304. if (param->size > SIZE_MAX - alignment) {
  305. dev_dbg(dev, "Maximum transfer data size exceeded\n");
  306. return -EINVAL;
  307. }
  308. return 0;
  309. }
  310. static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
  311. unsigned long arg)
  312. {
  313. struct pci_endpoint_test_xfer_param param;
  314. bool ret = false;
  315. void *src_addr;
  316. void *dst_addr;
  317. u32 flags = 0;
  318. bool use_dma;
  319. size_t size;
  320. dma_addr_t src_phys_addr;
  321. dma_addr_t dst_phys_addr;
  322. struct pci_dev *pdev = test->pdev;
  323. struct device *dev = &pdev->dev;
  324. void *orig_src_addr;
  325. dma_addr_t orig_src_phys_addr;
  326. void *orig_dst_addr;
  327. dma_addr_t orig_dst_phys_addr;
  328. size_t offset;
  329. size_t alignment = test->alignment;
  330. int irq_type = test->irq_type;
  331. u32 src_crc32;
  332. u32 dst_crc32;
  333. int err;
  334. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  335. if (err) {
  336. dev_err(dev, "Failed to get transfer param\n");
  337. return false;
  338. }
  339. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  340. if (err)
  341. return false;
  342. size = param.size;
  343. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  344. if (use_dma)
  345. flags |= FLAG_USE_DMA;
  346. if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
  347. dev_err(dev, "Invalid IRQ type option\n");
  348. goto err;
  349. }
  350. orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
  351. if (!orig_src_addr) {
  352. dev_err(dev, "Failed to allocate source buffer\n");
  353. ret = false;
  354. goto err;
  355. }
  356. get_random_bytes(orig_src_addr, size + alignment);
  357. orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
  358. size + alignment, DMA_TO_DEVICE);
  359. if (dma_mapping_error(dev, orig_src_phys_addr)) {
  360. dev_err(dev, "failed to map source buffer address\n");
  361. ret = false;
  362. goto err_src_phys_addr;
  363. }
  364. if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
  365. src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
  366. offset = src_phys_addr - orig_src_phys_addr;
  367. src_addr = orig_src_addr + offset;
  368. } else {
  369. src_phys_addr = orig_src_phys_addr;
  370. src_addr = orig_src_addr;
  371. }
  372. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  373. lower_32_bits(src_phys_addr));
  374. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  375. upper_32_bits(src_phys_addr));
  376. src_crc32 = crc32_le(~0, src_addr, size);
  377. orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
  378. if (!orig_dst_addr) {
  379. dev_err(dev, "Failed to allocate destination address\n");
  380. ret = false;
  381. goto err_dst_addr;
  382. }
  383. orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
  384. size + alignment, DMA_FROM_DEVICE);
  385. if (dma_mapping_error(dev, orig_dst_phys_addr)) {
  386. dev_err(dev, "failed to map destination buffer address\n");
  387. ret = false;
  388. goto err_dst_phys_addr;
  389. }
  390. if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
  391. dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
  392. offset = dst_phys_addr - orig_dst_phys_addr;
  393. dst_addr = orig_dst_addr + offset;
  394. } else {
  395. dst_phys_addr = orig_dst_phys_addr;
  396. dst_addr = orig_dst_addr;
  397. }
  398. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  399. lower_32_bits(dst_phys_addr));
  400. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  401. upper_32_bits(dst_phys_addr));
  402. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
  403. size);
  404. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  405. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  406. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  407. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  408. COMMAND_COPY);
  409. wait_for_completion(&test->irq_raised);
  410. dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
  411. DMA_FROM_DEVICE);
  412. dst_crc32 = crc32_le(~0, dst_addr, size);
  413. if (dst_crc32 == src_crc32)
  414. ret = true;
  415. err_dst_phys_addr:
  416. kfree(orig_dst_addr);
  417. err_dst_addr:
  418. dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
  419. DMA_TO_DEVICE);
  420. err_src_phys_addr:
  421. kfree(orig_src_addr);
  422. err:
  423. return ret;
  424. }
  425. static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
  426. unsigned long arg)
  427. {
  428. struct pci_endpoint_test_xfer_param param;
  429. bool ret = false;
  430. u32 flags = 0;
  431. bool use_dma;
  432. u32 reg;
  433. void *addr;
  434. dma_addr_t phys_addr;
  435. struct pci_dev *pdev = test->pdev;
  436. struct device *dev = &pdev->dev;
  437. void *orig_addr;
  438. dma_addr_t orig_phys_addr;
  439. size_t offset;
  440. size_t alignment = test->alignment;
  441. int irq_type = test->irq_type;
  442. size_t size;
  443. u32 crc32;
  444. int err;
  445. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  446. if (err != 0) {
  447. dev_err(dev, "Failed to get transfer param\n");
  448. return false;
  449. }
  450. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  451. if (err)
  452. return false;
  453. size = param.size;
  454. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  455. if (use_dma)
  456. flags |= FLAG_USE_DMA;
  457. if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
  458. dev_err(dev, "Invalid IRQ type option\n");
  459. goto err;
  460. }
  461. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  462. if (!orig_addr) {
  463. dev_err(dev, "Failed to allocate address\n");
  464. ret = false;
  465. goto err;
  466. }
  467. get_random_bytes(orig_addr, size + alignment);
  468. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  469. DMA_TO_DEVICE);
  470. if (dma_mapping_error(dev, orig_phys_addr)) {
  471. dev_err(dev, "failed to map source buffer address\n");
  472. ret = false;
  473. goto err_phys_addr;
  474. }
  475. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  476. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  477. offset = phys_addr - orig_phys_addr;
  478. addr = orig_addr + offset;
  479. } else {
  480. phys_addr = orig_phys_addr;
  481. addr = orig_addr;
  482. }
  483. crc32 = crc32_le(~0, addr, size);
  484. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
  485. crc32);
  486. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  487. lower_32_bits(phys_addr));
  488. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  489. upper_32_bits(phys_addr));
  490. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  491. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  492. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  493. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  494. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  495. COMMAND_READ);
  496. wait_for_completion(&test->irq_raised);
  497. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  498. if (reg & STATUS_READ_SUCCESS)
  499. ret = true;
  500. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  501. DMA_TO_DEVICE);
  502. err_phys_addr:
  503. kfree(orig_addr);
  504. err:
  505. return ret;
  506. }
  507. static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
  508. unsigned long arg)
  509. {
  510. struct pci_endpoint_test_xfer_param param;
  511. bool ret = false;
  512. u32 flags = 0;
  513. bool use_dma;
  514. size_t size;
  515. void *addr;
  516. dma_addr_t phys_addr;
  517. struct pci_dev *pdev = test->pdev;
  518. struct device *dev = &pdev->dev;
  519. void *orig_addr;
  520. dma_addr_t orig_phys_addr;
  521. size_t offset;
  522. size_t alignment = test->alignment;
  523. int irq_type = test->irq_type;
  524. u32 crc32;
  525. int err;
  526. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  527. if (err) {
  528. dev_err(dev, "Failed to get transfer param\n");
  529. return false;
  530. }
  531. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  532. if (err)
  533. return false;
  534. size = param.size;
  535. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  536. if (use_dma)
  537. flags |= FLAG_USE_DMA;
  538. if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
  539. dev_err(dev, "Invalid IRQ type option\n");
  540. goto err;
  541. }
  542. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  543. if (!orig_addr) {
  544. dev_err(dev, "Failed to allocate destination address\n");
  545. ret = false;
  546. goto err;
  547. }
  548. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  549. DMA_FROM_DEVICE);
  550. if (dma_mapping_error(dev, orig_phys_addr)) {
  551. dev_err(dev, "failed to map source buffer address\n");
  552. ret = false;
  553. goto err_phys_addr;
  554. }
  555. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  556. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  557. offset = phys_addr - orig_phys_addr;
  558. addr = orig_addr + offset;
  559. } else {
  560. phys_addr = orig_phys_addr;
  561. addr = orig_addr;
  562. }
  563. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  564. lower_32_bits(phys_addr));
  565. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  566. upper_32_bits(phys_addr));
  567. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  568. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  569. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  570. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  571. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  572. COMMAND_WRITE);
  573. wait_for_completion(&test->irq_raised);
  574. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  575. DMA_FROM_DEVICE);
  576. crc32 = crc32_le(~0, addr, size);
  577. if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
  578. ret = true;
  579. err_phys_addr:
  580. kfree(orig_addr);
  581. err:
  582. return ret;
  583. }
  584. static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
  585. {
  586. pci_endpoint_test_release_irq(test);
  587. pci_endpoint_test_free_irq_vectors(test);
  588. return true;
  589. }
  590. static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
  591. int req_irq_type)
  592. {
  593. struct pci_dev *pdev = test->pdev;
  594. struct device *dev = &pdev->dev;
  595. if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
  596. dev_err(dev, "Invalid IRQ type option\n");
  597. return false;
  598. }
  599. if (test->irq_type == req_irq_type)
  600. return true;
  601. pci_endpoint_test_release_irq(test);
  602. pci_endpoint_test_free_irq_vectors(test);
  603. if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
  604. goto err;
  605. if (!pci_endpoint_test_request_irq(test))
  606. goto err;
  607. return true;
  608. err:
  609. pci_endpoint_test_free_irq_vectors(test);
  610. return false;
  611. }
  612. static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
  613. unsigned long arg)
  614. {
  615. int ret = -EINVAL;
  616. enum pci_barno bar;
  617. struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
  618. struct pci_dev *pdev = test->pdev;
  619. mutex_lock(&test->mutex);
  620. reinit_completion(&test->irq_raised);
  621. test->last_irq = -ENODATA;
  622. switch (cmd) {
  623. case PCITEST_BAR:
  624. bar = arg;
  625. if (bar > BAR_5)
  626. goto ret;
  627. if (is_am654_pci_dev(pdev) && bar == BAR_0)
  628. goto ret;
  629. ret = pci_endpoint_test_bar(test, bar);
  630. break;
  631. case PCITEST_INTX_IRQ:
  632. ret = pci_endpoint_test_intx_irq(test);
  633. break;
  634. case PCITEST_MSI:
  635. case PCITEST_MSIX:
  636. ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
  637. break;
  638. case PCITEST_WRITE:
  639. ret = pci_endpoint_test_write(test, arg);
  640. break;
  641. case PCITEST_READ:
  642. ret = pci_endpoint_test_read(test, arg);
  643. break;
  644. case PCITEST_COPY:
  645. ret = pci_endpoint_test_copy(test, arg);
  646. break;
  647. case PCITEST_SET_IRQTYPE:
  648. ret = pci_endpoint_test_set_irq(test, arg);
  649. break;
  650. case PCITEST_GET_IRQTYPE:
  651. ret = irq_type;
  652. break;
  653. case PCITEST_CLEAR_IRQ:
  654. ret = pci_endpoint_test_clear_irq(test);
  655. break;
  656. }
  657. ret:
  658. mutex_unlock(&test->mutex);
  659. return ret;
  660. }
  661. static const struct file_operations pci_endpoint_test_fops = {
  662. .owner = THIS_MODULE,
  663. .unlocked_ioctl = pci_endpoint_test_ioctl,
  664. };
  665. static int pci_endpoint_test_probe(struct pci_dev *pdev,
  666. const struct pci_device_id *ent)
  667. {
  668. int err;
  669. int id;
  670. char name[24];
  671. enum pci_barno bar;
  672. void __iomem *base;
  673. struct device *dev = &pdev->dev;
  674. struct pci_endpoint_test *test;
  675. struct pci_endpoint_test_data *data;
  676. enum pci_barno test_reg_bar = BAR_0;
  677. struct miscdevice *misc_device;
  678. if (pci_is_bridge(pdev))
  679. return -ENODEV;
  680. test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
  681. if (!test)
  682. return -ENOMEM;
  683. test->test_reg_bar = 0;
  684. test->alignment = 0;
  685. test->pdev = pdev;
  686. test->irq_type = IRQ_TYPE_UNDEFINED;
  687. if (no_msi)
  688. irq_type = IRQ_TYPE_INTX;
  689. data = (struct pci_endpoint_test_data *)ent->driver_data;
  690. if (data) {
  691. test_reg_bar = data->test_reg_bar;
  692. test->test_reg_bar = test_reg_bar;
  693. test->alignment = data->alignment;
  694. irq_type = data->irq_type;
  695. }
  696. init_completion(&test->irq_raised);
  697. mutex_init(&test->mutex);
  698. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
  699. err = pci_enable_device(pdev);
  700. if (err) {
  701. dev_err(dev, "Cannot enable PCI device\n");
  702. return err;
  703. }
  704. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  705. if (err) {
  706. dev_err(dev, "Cannot obtain PCI resources\n");
  707. goto err_disable_pdev;
  708. }
  709. pci_set_master(pdev);
  710. if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
  711. err = -EINVAL;
  712. goto err_disable_irq;
  713. }
  714. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  715. if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
  716. base = pci_ioremap_bar(pdev, bar);
  717. if (!base) {
  718. dev_err(dev, "Failed to read BAR%d\n", bar);
  719. WARN_ON(bar == test_reg_bar);
  720. }
  721. test->bar[bar] = base;
  722. }
  723. }
  724. test->base = test->bar[test_reg_bar];
  725. if (!test->base) {
  726. err = -ENOMEM;
  727. dev_err(dev, "Cannot perform PCI test without BAR%d\n",
  728. test_reg_bar);
  729. goto err_iounmap;
  730. }
  731. pci_set_drvdata(pdev, test);
  732. id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
  733. if (id < 0) {
  734. err = id;
  735. dev_err(dev, "Unable to get id\n");
  736. goto err_iounmap;
  737. }
  738. snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
  739. test->name = kstrdup(name, GFP_KERNEL);
  740. if (!test->name) {
  741. err = -ENOMEM;
  742. goto err_ida_remove;
  743. }
  744. if (!pci_endpoint_test_request_irq(test)) {
  745. err = -EINVAL;
  746. goto err_kfree_test_name;
  747. }
  748. misc_device = &test->miscdev;
  749. misc_device->minor = MISC_DYNAMIC_MINOR;
  750. misc_device->name = kstrdup(name, GFP_KERNEL);
  751. if (!misc_device->name) {
  752. err = -ENOMEM;
  753. goto err_release_irq;
  754. }
  755. misc_device->parent = &pdev->dev;
  756. misc_device->fops = &pci_endpoint_test_fops;
  757. err = misc_register(misc_device);
  758. if (err) {
  759. dev_err(dev, "Failed to register device\n");
  760. goto err_kfree_name;
  761. }
  762. return 0;
  763. err_kfree_name:
  764. kfree(misc_device->name);
  765. err_release_irq:
  766. pci_endpoint_test_release_irq(test);
  767. err_kfree_test_name:
  768. kfree(test->name);
  769. err_ida_remove:
  770. ida_free(&pci_endpoint_test_ida, id);
  771. err_iounmap:
  772. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  773. if (test->bar[bar])
  774. pci_iounmap(pdev, test->bar[bar]);
  775. }
  776. err_disable_irq:
  777. pci_endpoint_test_free_irq_vectors(test);
  778. pci_release_regions(pdev);
  779. err_disable_pdev:
  780. pci_disable_device(pdev);
  781. return err;
  782. }
  783. static void pci_endpoint_test_remove(struct pci_dev *pdev)
  784. {
  785. int id;
  786. enum pci_barno bar;
  787. struct pci_endpoint_test *test = pci_get_drvdata(pdev);
  788. struct miscdevice *misc_device = &test->miscdev;
  789. if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
  790. return;
  791. if (id < 0)
  792. return;
  793. pci_endpoint_test_release_irq(test);
  794. pci_endpoint_test_free_irq_vectors(test);
  795. misc_deregister(&test->miscdev);
  796. kfree(misc_device->name);
  797. kfree(test->name);
  798. ida_free(&pci_endpoint_test_ida, id);
  799. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  800. if (test->bar[bar])
  801. pci_iounmap(pdev, test->bar[bar]);
  802. }
  803. pci_release_regions(pdev);
  804. pci_disable_device(pdev);
  805. }
  806. static const struct pci_endpoint_test_data default_data = {
  807. .test_reg_bar = BAR_0,
  808. .alignment = SZ_4K,
  809. .irq_type = IRQ_TYPE_MSI,
  810. };
  811. static const struct pci_endpoint_test_data am654_data = {
  812. .test_reg_bar = BAR_2,
  813. .alignment = SZ_64K,
  814. .irq_type = IRQ_TYPE_MSI,
  815. };
  816. static const struct pci_endpoint_test_data j721e_data = {
  817. .alignment = 256,
  818. .irq_type = IRQ_TYPE_MSI,
  819. };
  820. static const struct pci_endpoint_test_data rk3588_data = {
  821. .alignment = SZ_64K,
  822. .irq_type = IRQ_TYPE_MSI,
  823. };
  824. /*
  825. * If the controller's Vendor/Device ID are programmable, you may be able to
  826. * use one of the existing entries for testing instead of adding a new one.
  827. */
  828. static const struct pci_device_id pci_endpoint_test_tbl[] = {
  829. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
  830. .driver_data = (kernel_ulong_t)&default_data,
  831. },
  832. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
  833. .driver_data = (kernel_ulong_t)&default_data,
  834. },
  835. { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
  836. .driver_data = (kernel_ulong_t)&default_data,
  837. },
  838. { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
  839. { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
  840. .driver_data = (kernel_ulong_t)&default_data,
  841. },
  842. { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
  843. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
  844. .driver_data = (kernel_ulong_t)&am654_data
  845. },
  846. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
  847. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
  848. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
  849. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
  850. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
  851. .driver_data = (kernel_ulong_t)&default_data,
  852. },
  853. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
  854. .driver_data = (kernel_ulong_t)&j721e_data,
  855. },
  856. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
  857. .driver_data = (kernel_ulong_t)&j721e_data,
  858. },
  859. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
  860. .driver_data = (kernel_ulong_t)&j721e_data,
  861. },
  862. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
  863. .driver_data = (kernel_ulong_t)&j721e_data,
  864. },
  865. { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
  866. .driver_data = (kernel_ulong_t)&rk3588_data,
  867. },
  868. { }
  869. };
  870. MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
  871. static struct pci_driver pci_endpoint_test_driver = {
  872. .name = DRV_MODULE_NAME,
  873. .id_table = pci_endpoint_test_tbl,
  874. .probe = pci_endpoint_test_probe,
  875. .remove = pci_endpoint_test_remove,
  876. .sriov_configure = pci_sriov_configure_simple,
  877. };
  878. module_pci_driver(pci_endpoint_test_driver);
  879. MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
  880. MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
  881. MODULE_LICENSE("GPL v2");