doe.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Data Object Exchange
  4. * PCIe r6.0, sec 6.30 DOE
  5. *
  6. * Copyright (C) 2021 Huawei
  7. * Jonathan Cameron <Jonathan.Cameron@huawei.com>
  8. *
  9. * Copyright (C) 2022 Intel Corporation
  10. * Ira Weiny <ira.weiny@intel.com>
  11. */
  12. #define dev_fmt(fmt) "DOE: " fmt
  13. #include <linux/bitfield.h>
  14. #include <linux/delay.h>
  15. #include <linux/jiffies.h>
  16. #include <linux/mutex.h>
  17. #include <linux/pci.h>
  18. #include <linux/pci-doe.h>
  19. #include <linux/workqueue.h>
  20. #include "pci.h"
  21. #define PCI_DOE_PROTOCOL_DISCOVERY 0
  22. /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
  23. #define PCI_DOE_TIMEOUT HZ
  24. #define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
  25. #define PCI_DOE_FLAG_CANCEL 0
  26. #define PCI_DOE_FLAG_DEAD 1
  27. /* Max data object length is 2^18 dwords */
  28. #define PCI_DOE_MAX_LENGTH (1 << 18)
  29. /**
  30. * struct pci_doe_mb - State for a single DOE mailbox
  31. *
  32. * This state is used to manage a single DOE mailbox capability. All fields
  33. * should be considered opaque to the consumers and the structure passed into
  34. * the helpers below after being created by pci_doe_create_mb().
  35. *
  36. * @pdev: PCI device this mailbox belongs to
  37. * @cap_offset: Capability offset
  38. * @prots: Array of protocols supported (encoded as long values)
  39. * @wq: Wait queue for work item
  40. * @work_queue: Queue of pci_doe_work items
  41. * @flags: Bit array of PCI_DOE_FLAG_* flags
  42. */
  43. struct pci_doe_mb {
  44. struct pci_dev *pdev;
  45. u16 cap_offset;
  46. struct xarray prots;
  47. wait_queue_head_t wq;
  48. struct workqueue_struct *work_queue;
  49. unsigned long flags;
  50. };
  51. struct pci_doe_protocol {
  52. u16 vid;
  53. u8 type;
  54. };
  55. /**
  56. * struct pci_doe_task - represents a single query/response
  57. *
  58. * @prot: DOE Protocol
  59. * @request_pl: The request payload
  60. * @request_pl_sz: Size of the request payload (bytes)
  61. * @response_pl: The response payload
  62. * @response_pl_sz: Size of the response payload (bytes)
  63. * @rv: Return value. Length of received response or error (bytes)
  64. * @complete: Called when task is complete
  65. * @private: Private data for the consumer
  66. * @work: Used internally by the mailbox
  67. * @doe_mb: Used internally by the mailbox
  68. */
  69. struct pci_doe_task {
  70. struct pci_doe_protocol prot;
  71. const __le32 *request_pl;
  72. size_t request_pl_sz;
  73. __le32 *response_pl;
  74. size_t response_pl_sz;
  75. int rv;
  76. void (*complete)(struct pci_doe_task *task);
  77. void *private;
  78. /* initialized by pci_doe_submit_task() */
  79. struct work_struct work;
  80. struct pci_doe_mb *doe_mb;
  81. };
  82. static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
  83. {
  84. if (wait_event_timeout(doe_mb->wq,
  85. test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags),
  86. timeout))
  87. return -EIO;
  88. return 0;
  89. }
  90. static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val)
  91. {
  92. struct pci_dev *pdev = doe_mb->pdev;
  93. int offset = doe_mb->cap_offset;
  94. pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val);
  95. }
  96. static int pci_doe_abort(struct pci_doe_mb *doe_mb)
  97. {
  98. struct pci_dev *pdev = doe_mb->pdev;
  99. int offset = doe_mb->cap_offset;
  100. unsigned long timeout_jiffies;
  101. pci_dbg(pdev, "[%x] Issuing Abort\n", offset);
  102. timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
  103. pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT);
  104. do {
  105. int rc;
  106. u32 val;
  107. rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
  108. if (rc)
  109. return rc;
  110. pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
  111. /* Abort success! */
  112. if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) &&
  113. !FIELD_GET(PCI_DOE_STATUS_BUSY, val))
  114. return 0;
  115. } while (!time_after(jiffies, timeout_jiffies));
  116. /* Abort has timed out and the MB is dead */
  117. pci_err(pdev, "[%x] ABORT timed out\n", offset);
  118. return -EIO;
  119. }
  120. static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
  121. struct pci_doe_task *task)
  122. {
  123. struct pci_dev *pdev = doe_mb->pdev;
  124. int offset = doe_mb->cap_offset;
  125. size_t length, remainder;
  126. u32 val;
  127. int i;
  128. /*
  129. * Check the DOE busy bit is not set. If it is set, this could indicate
  130. * someone other than Linux (e.g. firmware) is using the mailbox. Note
  131. * it is expected that firmware and OS will negotiate access rights via
  132. * an, as yet to be defined, method.
  133. */
  134. pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
  135. if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
  136. return -EBUSY;
  137. if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
  138. return -EIO;
  139. /* Length is 2 DW of header + length of payload in DW */
  140. length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32));
  141. if (length > PCI_DOE_MAX_LENGTH)
  142. return -EIO;
  143. if (length == PCI_DOE_MAX_LENGTH)
  144. length = 0;
  145. /* Write DOE Header */
  146. val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
  147. FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
  148. pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
  149. pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
  150. FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
  151. length));
  152. /* Write payload */
  153. for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
  154. pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
  155. le32_to_cpu(task->request_pl[i]));
  156. /* Write last payload dword */
  157. remainder = task->request_pl_sz % sizeof(__le32);
  158. if (remainder) {
  159. val = 0;
  160. memcpy(&val, &task->request_pl[i], remainder);
  161. le32_to_cpus(&val);
  162. pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
  163. }
  164. pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
  165. return 0;
  166. }
  167. static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb)
  168. {
  169. struct pci_dev *pdev = doe_mb->pdev;
  170. int offset = doe_mb->cap_offset;
  171. u32 val;
  172. pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
  173. if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val))
  174. return true;
  175. return false;
  176. }
  177. static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
  178. {
  179. size_t length, payload_length, remainder, received;
  180. struct pci_dev *pdev = doe_mb->pdev;
  181. int offset = doe_mb->cap_offset;
  182. int i = 0;
  183. u32 val;
  184. /* Read the first dword to get the protocol */
  185. pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
  186. if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
  187. (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
  188. dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
  189. doe_mb->cap_offset, task->prot.vid, task->prot.type,
  190. FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
  191. FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
  192. return -EIO;
  193. }
  194. pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
  195. /* Read the second dword to get the length */
  196. pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
  197. pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
  198. length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val);
  199. /* A value of 0x0 indicates max data object length */
  200. if (!length)
  201. length = PCI_DOE_MAX_LENGTH;
  202. if (length < 2)
  203. return -EIO;
  204. /* First 2 dwords have already been read */
  205. length -= 2;
  206. received = task->response_pl_sz;
  207. payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32));
  208. remainder = task->response_pl_sz % sizeof(__le32);
  209. /* remainder signifies number of data bytes in last payload dword */
  210. if (!remainder)
  211. remainder = sizeof(__le32);
  212. if (length < payload_length) {
  213. received = length * sizeof(__le32);
  214. payload_length = length;
  215. remainder = sizeof(__le32);
  216. }
  217. if (payload_length) {
  218. /* Read all payload dwords except the last */
  219. for (; i < payload_length - 1; i++) {
  220. pci_read_config_dword(pdev, offset + PCI_DOE_READ,
  221. &val);
  222. task->response_pl[i] = cpu_to_le32(val);
  223. pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
  224. }
  225. /* Read last payload dword */
  226. pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
  227. cpu_to_le32s(&val);
  228. memcpy(&task->response_pl[i], &val, remainder);
  229. /* Prior to the last ack, ensure Data Object Ready */
  230. if (!pci_doe_data_obj_ready(doe_mb))
  231. return -EIO;
  232. pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
  233. i++;
  234. }
  235. /* Flush excess length */
  236. for (; i < length; i++) {
  237. pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
  238. pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0);
  239. }
  240. /* Final error check to pick up on any since Data Object Ready */
  241. pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
  242. if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
  243. return -EIO;
  244. return received;
  245. }
  246. static void signal_task_complete(struct pci_doe_task *task, int rv)
  247. {
  248. task->rv = rv;
  249. destroy_work_on_stack(&task->work);
  250. task->complete(task);
  251. }
  252. static void signal_task_abort(struct pci_doe_task *task, int rv)
  253. {
  254. struct pci_doe_mb *doe_mb = task->doe_mb;
  255. struct pci_dev *pdev = doe_mb->pdev;
  256. if (pci_doe_abort(doe_mb)) {
  257. /*
  258. * If the device can't process an abort; set the mailbox dead
  259. * - no more submissions
  260. */
  261. pci_err(pdev, "[%x] Abort failed marking mailbox dead\n",
  262. doe_mb->cap_offset);
  263. set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
  264. }
  265. signal_task_complete(task, rv);
  266. }
  267. static void doe_statemachine_work(struct work_struct *work)
  268. {
  269. struct pci_doe_task *task = container_of(work, struct pci_doe_task,
  270. work);
  271. struct pci_doe_mb *doe_mb = task->doe_mb;
  272. struct pci_dev *pdev = doe_mb->pdev;
  273. int offset = doe_mb->cap_offset;
  274. unsigned long timeout_jiffies;
  275. u32 val;
  276. int rc;
  277. if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) {
  278. signal_task_complete(task, -EIO);
  279. return;
  280. }
  281. /* Send request */
  282. rc = pci_doe_send_req(doe_mb, task);
  283. if (rc) {
  284. /*
  285. * The specification does not provide any guidance on how to
  286. * resolve conflicting requests from other entities.
  287. * Furthermore, it is likely that busy will not be detected
  288. * most of the time. Flag any detection of status busy with an
  289. * error.
  290. */
  291. if (rc == -EBUSY)
  292. dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n",
  293. offset);
  294. signal_task_abort(task, rc);
  295. return;
  296. }
  297. timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
  298. /* Poll for response */
  299. retry_resp:
  300. pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
  301. if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) {
  302. signal_task_abort(task, -EIO);
  303. return;
  304. }
  305. if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) {
  306. if (time_after(jiffies, timeout_jiffies)) {
  307. signal_task_abort(task, -EIO);
  308. return;
  309. }
  310. rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL);
  311. if (rc) {
  312. signal_task_abort(task, rc);
  313. return;
  314. }
  315. goto retry_resp;
  316. }
  317. rc = pci_doe_recv_resp(doe_mb, task);
  318. if (rc < 0) {
  319. signal_task_abort(task, rc);
  320. return;
  321. }
  322. signal_task_complete(task, rc);
  323. }
  324. static void pci_doe_task_complete(struct pci_doe_task *task)
  325. {
  326. complete(task->private);
  327. }
  328. static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
  329. u8 *protocol)
  330. {
  331. u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
  332. *index) |
  333. FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
  334. (capver >= 2) ? 2 : 0);
  335. __le32 request_pl_le = cpu_to_le32(request_pl);
  336. __le32 response_pl_le;
  337. u32 response_pl;
  338. int rc;
  339. rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
  340. &request_pl_le, sizeof(request_pl_le),
  341. &response_pl_le, sizeof(response_pl_le));
  342. if (rc < 0)
  343. return rc;
  344. if (rc != sizeof(response_pl_le))
  345. return -EIO;
  346. response_pl = le32_to_cpu(response_pl_le);
  347. *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
  348. *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
  349. response_pl);
  350. *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
  351. response_pl);
  352. return 0;
  353. }
  354. static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
  355. {
  356. return xa_mk_value((vid << 8) | prot);
  357. }
  358. static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
  359. {
  360. u8 index = 0;
  361. u8 xa_idx = 0;
  362. u32 hdr = 0;
  363. pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
  364. do {
  365. int rc;
  366. u16 vid;
  367. u8 prot;
  368. rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
  369. &vid, &prot);
  370. if (rc)
  371. return rc;
  372. pci_dbg(doe_mb->pdev,
  373. "[%x] Found protocol %d vid: %x prot: %x\n",
  374. doe_mb->cap_offset, xa_idx, vid, prot);
  375. rc = xa_insert(&doe_mb->prots, xa_idx++,
  376. pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
  377. if (rc)
  378. return rc;
  379. } while (index);
  380. return 0;
  381. }
  382. static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
  383. {
  384. /* Stop all pending work items from starting */
  385. set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags);
  386. /* Cancel an in progress work item, if necessary */
  387. set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags);
  388. wake_up(&doe_mb->wq);
  389. }
  390. /**
  391. * pci_doe_create_mb() - Create a DOE mailbox object
  392. *
  393. * @pdev: PCI device to create the DOE mailbox for
  394. * @cap_offset: Offset of the DOE mailbox
  395. *
  396. * Create a single mailbox object to manage the mailbox protocol at the
  397. * cap_offset specified.
  398. *
  399. * RETURNS: created mailbox object on success
  400. * ERR_PTR(-errno) on failure
  401. */
  402. static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
  403. u16 cap_offset)
  404. {
  405. struct pci_doe_mb *doe_mb;
  406. int rc;
  407. doe_mb = kzalloc(sizeof(*doe_mb), GFP_KERNEL);
  408. if (!doe_mb)
  409. return ERR_PTR(-ENOMEM);
  410. doe_mb->pdev = pdev;
  411. doe_mb->cap_offset = cap_offset;
  412. init_waitqueue_head(&doe_mb->wq);
  413. xa_init(&doe_mb->prots);
  414. doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
  415. dev_bus_name(&pdev->dev),
  416. pci_name(pdev),
  417. doe_mb->cap_offset);
  418. if (!doe_mb->work_queue) {
  419. pci_err(pdev, "[%x] failed to allocate work queue\n",
  420. doe_mb->cap_offset);
  421. rc = -ENOMEM;
  422. goto err_free;
  423. }
  424. /* Reset the mailbox by issuing an abort */
  425. rc = pci_doe_abort(doe_mb);
  426. if (rc) {
  427. pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n",
  428. doe_mb->cap_offset, rc);
  429. goto err_destroy_wq;
  430. }
  431. /*
  432. * The state machine and the mailbox should be in sync now;
  433. * Use the mailbox to query protocols.
  434. */
  435. rc = pci_doe_cache_protocols(doe_mb);
  436. if (rc) {
  437. pci_err(pdev, "[%x] failed to cache protocols : %d\n",
  438. doe_mb->cap_offset, rc);
  439. goto err_cancel;
  440. }
  441. return doe_mb;
  442. err_cancel:
  443. pci_doe_cancel_tasks(doe_mb);
  444. xa_destroy(&doe_mb->prots);
  445. err_destroy_wq:
  446. destroy_workqueue(doe_mb->work_queue);
  447. err_free:
  448. kfree(doe_mb);
  449. return ERR_PTR(rc);
  450. }
  451. /**
  452. * pci_doe_destroy_mb() - Destroy a DOE mailbox object
  453. *
  454. * @doe_mb: DOE mailbox
  455. *
  456. * Destroy all internal data structures created for the DOE mailbox.
  457. */
  458. static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
  459. {
  460. pci_doe_cancel_tasks(doe_mb);
  461. xa_destroy(&doe_mb->prots);
  462. destroy_workqueue(doe_mb->work_queue);
  463. kfree(doe_mb);
  464. }
  465. /**
  466. * pci_doe_supports_prot() - Return if the DOE instance supports the given
  467. * protocol
  468. * @doe_mb: DOE mailbox capability to query
  469. * @vid: Protocol Vendor ID
  470. * @type: Protocol type
  471. *
  472. * RETURNS: True if the DOE mailbox supports the protocol specified
  473. */
  474. static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
  475. {
  476. unsigned long index;
  477. void *entry;
  478. /* The discovery protocol must always be supported */
  479. if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
  480. return true;
  481. xa_for_each(&doe_mb->prots, index, entry)
  482. if (entry == pci_doe_xa_prot_entry(vid, type))
  483. return true;
  484. return false;
  485. }
  486. /**
  487. * pci_doe_submit_task() - Submit a task to be processed by the state machine
  488. *
  489. * @doe_mb: DOE mailbox capability to submit to
  490. * @task: task to be queued
  491. *
  492. * Submit a DOE task (request/response) to the DOE mailbox to be processed.
  493. * Returns upon queueing the task object. If the queue is full this function
  494. * will sleep until there is room in the queue.
  495. *
  496. * task->complete will be called when the state machine is done processing this
  497. * task.
  498. *
  499. * @task must be allocated on the stack.
  500. *
  501. * Excess data will be discarded.
  502. *
  503. * RETURNS: 0 when task has been successfully queued, -ERRNO on error
  504. */
  505. static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
  506. struct pci_doe_task *task)
  507. {
  508. if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
  509. return -EINVAL;
  510. if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
  511. return -EIO;
  512. task->doe_mb = doe_mb;
  513. INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
  514. queue_work(doe_mb->work_queue, &task->work);
  515. return 0;
  516. }
  517. /**
  518. * pci_doe() - Perform Data Object Exchange
  519. *
  520. * @doe_mb: DOE Mailbox
  521. * @vendor: Vendor ID
  522. * @type: Data Object Type
  523. * @request: Request payload
  524. * @request_sz: Size of request payload (bytes)
  525. * @response: Response payload
  526. * @response_sz: Size of response payload (bytes)
  527. *
  528. * Submit @request to @doe_mb and store the @response.
  529. * The DOE exchange is performed synchronously and may therefore sleep.
  530. *
  531. * Payloads are treated as opaque byte streams which are transmitted verbatim,
  532. * without byte-swapping. If payloads contain little-endian register values,
  533. * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
  534. *
  535. * For convenience, arbitrary payload sizes are allowed even though PCIe r6.0
  536. * sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords. The last
  537. * (partial) dword is copied with byte granularity and padded with zeroes if
  538. * necessary. Callers are thus relieved of using dword-sized bounce buffers.
  539. *
  540. * RETURNS: Length of received response or negative errno.
  541. * Received data in excess of @response_sz is discarded.
  542. * The length may be smaller than @response_sz and the caller
  543. * is responsible for checking that.
  544. */
  545. int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
  546. const void *request, size_t request_sz,
  547. void *response, size_t response_sz)
  548. {
  549. DECLARE_COMPLETION_ONSTACK(c);
  550. struct pci_doe_task task = {
  551. .prot.vid = vendor,
  552. .prot.type = type,
  553. .request_pl = request,
  554. .request_pl_sz = request_sz,
  555. .response_pl = response,
  556. .response_pl_sz = response_sz,
  557. .complete = pci_doe_task_complete,
  558. .private = &c,
  559. };
  560. int rc;
  561. rc = pci_doe_submit_task(doe_mb, &task);
  562. if (rc)
  563. return rc;
  564. wait_for_completion(&c);
  565. return task.rv;
  566. }
  567. EXPORT_SYMBOL_GPL(pci_doe);
  568. /**
  569. * pci_find_doe_mailbox() - Find Data Object Exchange mailbox
  570. *
  571. * @pdev: PCI device
  572. * @vendor: Vendor ID
  573. * @type: Data Object Type
  574. *
  575. * Find first DOE mailbox of a PCI device which supports the given protocol.
  576. *
  577. * RETURNS: Pointer to the DOE mailbox or NULL if none was found.
  578. */
  579. struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
  580. u8 type)
  581. {
  582. struct pci_doe_mb *doe_mb;
  583. unsigned long index;
  584. xa_for_each(&pdev->doe_mbs, index, doe_mb)
  585. if (pci_doe_supports_prot(doe_mb, vendor, type))
  586. return doe_mb;
  587. return NULL;
  588. }
  589. EXPORT_SYMBOL_GPL(pci_find_doe_mailbox);
  590. void pci_doe_init(struct pci_dev *pdev)
  591. {
  592. struct pci_doe_mb *doe_mb;
  593. u16 offset = 0;
  594. int rc;
  595. xa_init(&pdev->doe_mbs);
  596. while ((offset = pci_find_next_ext_capability(pdev, offset,
  597. PCI_EXT_CAP_ID_DOE))) {
  598. doe_mb = pci_doe_create_mb(pdev, offset);
  599. if (IS_ERR(doe_mb)) {
  600. pci_err(pdev, "[%x] failed to create mailbox: %ld\n",
  601. offset, PTR_ERR(doe_mb));
  602. continue;
  603. }
  604. rc = xa_insert(&pdev->doe_mbs, offset, doe_mb, GFP_KERNEL);
  605. if (rc) {
  606. pci_err(pdev, "[%x] failed to insert mailbox: %d\n",
  607. offset, rc);
  608. pci_doe_destroy_mb(doe_mb);
  609. }
  610. }
  611. }
  612. void pci_doe_destroy(struct pci_dev *pdev)
  613. {
  614. struct pci_doe_mb *doe_mb;
  615. unsigned long index;
  616. xa_for_each(&pdev->doe_mbs, index, doe_mb)
  617. pci_doe_destroy_mb(doe_mb);
  618. xa_destroy(&pdev->doe_mbs);
  619. }
  620. void pci_doe_disconnected(struct pci_dev *pdev)
  621. {
  622. struct pci_doe_mb *doe_mb;
  623. unsigned long index;
  624. xa_for_each(&pdev->doe_mbs, index, doe_mb)
  625. pci_doe_cancel_tasks(doe_mb);
  626. }