ctl.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - control channel and configuration commands
  4. *
  5. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/pci.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/workqueue.h>
  14. #include "ctl.h"
  15. #define CREATE_TRACE_POINTS
  16. #include "trace.h"
  17. #define TB_CTL_RX_PKG_COUNT 10
  18. #define TB_CTL_RETRIES 4
  19. /**
  20. * struct tb_ctl - Thunderbolt control channel
  21. * @nhi: Pointer to the NHI structure
  22. * @tx: Transmit ring
  23. * @rx: Receive ring
  24. * @frame_pool: DMA pool for control messages
  25. * @rx_packets: Received control messages
  26. * @request_queue_lock: Lock protecting @request_queue
  27. * @request_queue: List of outstanding requests
  28. * @running: Is the control channel running at the moment
  29. * @timeout_msec: Default timeout for non-raw control messages
  30. * @callback: Callback called when hotplug message is received
  31. * @callback_data: Data passed to @callback
  32. * @index: Domain number. This will be output with the trace record.
  33. */
  34. struct tb_ctl {
  35. struct tb_nhi *nhi;
  36. struct tb_ring *tx;
  37. struct tb_ring *rx;
  38. struct dma_pool *frame_pool;
  39. struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  40. struct mutex request_queue_lock;
  41. struct list_head request_queue;
  42. bool running;
  43. int timeout_msec;
  44. event_cb callback;
  45. void *callback_data;
  46. int index;
  47. };
  48. #define tb_ctl_WARN(ctl, format, arg...) \
  49. dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  50. #define tb_ctl_err(ctl, format, arg...) \
  51. dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  52. #define tb_ctl_warn(ctl, format, arg...) \
  53. dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  54. #define tb_ctl_info(ctl, format, arg...) \
  55. dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  56. #define tb_ctl_dbg(ctl, format, arg...) \
  57. dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
  58. static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  59. /* Serializes access to request kref_get/put */
  60. static DEFINE_MUTEX(tb_cfg_request_lock);
  61. /**
  62. * tb_cfg_request_alloc() - Allocates a new config request
  63. *
  64. * This is refcounted object so when you are done with this, call
  65. * tb_cfg_request_put() to it.
  66. */
  67. struct tb_cfg_request *tb_cfg_request_alloc(void)
  68. {
  69. struct tb_cfg_request *req;
  70. req = kzalloc(sizeof(*req), GFP_KERNEL);
  71. if (!req)
  72. return NULL;
  73. kref_init(&req->kref);
  74. return req;
  75. }
  76. /**
  77. * tb_cfg_request_get() - Increase refcount of a request
  78. * @req: Request whose refcount is increased
  79. */
  80. void tb_cfg_request_get(struct tb_cfg_request *req)
  81. {
  82. mutex_lock(&tb_cfg_request_lock);
  83. kref_get(&req->kref);
  84. mutex_unlock(&tb_cfg_request_lock);
  85. }
  86. static void tb_cfg_request_destroy(struct kref *kref)
  87. {
  88. struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
  89. kfree(req);
  90. }
  91. /**
  92. * tb_cfg_request_put() - Decrease refcount and possibly release the request
  93. * @req: Request whose refcount is decreased
  94. *
  95. * Call this function when you are done with the request. When refcount
  96. * goes to %0 the object is released.
  97. */
  98. void tb_cfg_request_put(struct tb_cfg_request *req)
  99. {
  100. mutex_lock(&tb_cfg_request_lock);
  101. kref_put(&req->kref, tb_cfg_request_destroy);
  102. mutex_unlock(&tb_cfg_request_lock);
  103. }
  104. static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
  105. struct tb_cfg_request *req)
  106. {
  107. WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
  108. WARN_ON(req->ctl);
  109. mutex_lock(&ctl->request_queue_lock);
  110. if (!ctl->running) {
  111. mutex_unlock(&ctl->request_queue_lock);
  112. return -ENOTCONN;
  113. }
  114. req->ctl = ctl;
  115. list_add_tail(&req->list, &ctl->request_queue);
  116. set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  117. mutex_unlock(&ctl->request_queue_lock);
  118. return 0;
  119. }
  120. static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
  121. {
  122. struct tb_ctl *ctl = req->ctl;
  123. mutex_lock(&ctl->request_queue_lock);
  124. list_del(&req->list);
  125. clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  126. if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  127. wake_up(&tb_cfg_request_cancel_queue);
  128. mutex_unlock(&ctl->request_queue_lock);
  129. }
  130. static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
  131. {
  132. return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  133. }
  134. static struct tb_cfg_request *
  135. tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
  136. {
  137. struct tb_cfg_request *req = NULL, *iter;
  138. mutex_lock(&pkg->ctl->request_queue_lock);
  139. list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
  140. tb_cfg_request_get(iter);
  141. if (iter->match(iter, pkg)) {
  142. req = iter;
  143. break;
  144. }
  145. tb_cfg_request_put(iter);
  146. }
  147. mutex_unlock(&pkg->ctl->request_queue_lock);
  148. return req;
  149. }
  150. /* utility functions */
  151. static int check_header(const struct ctl_pkg *pkg, u32 len,
  152. enum tb_cfg_pkg_type type, u64 route)
  153. {
  154. struct tb_cfg_header *header = pkg->buffer;
  155. /* check frame, TODO: frame flags */
  156. if (WARN(len != pkg->frame.size,
  157. "wrong framesize (expected %#x, got %#x)\n",
  158. len, pkg->frame.size))
  159. return -EIO;
  160. if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
  161. type, pkg->frame.eof))
  162. return -EIO;
  163. if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
  164. pkg->frame.sof))
  165. return -EIO;
  166. /* check header */
  167. if (WARN(header->unknown != 1 << 9,
  168. "header->unknown is %#x\n", header->unknown))
  169. return -EIO;
  170. if (WARN(route != tb_cfg_get_route(header),
  171. "wrong route (expected %llx, got %llx)",
  172. route, tb_cfg_get_route(header)))
  173. return -EIO;
  174. return 0;
  175. }
  176. static int check_config_address(struct tb_cfg_address addr,
  177. enum tb_cfg_space space, u32 offset,
  178. u32 length)
  179. {
  180. if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
  181. return -EIO;
  182. if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
  183. space, addr.space))
  184. return -EIO;
  185. if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
  186. offset, addr.offset))
  187. return -EIO;
  188. if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
  189. length, addr.length))
  190. return -EIO;
  191. /*
  192. * We cannot check addr->port as it is set to the upstream port of the
  193. * sender.
  194. */
  195. return 0;
  196. }
  197. static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
  198. {
  199. struct cfg_error_pkg *pkg = response->buffer;
  200. struct tb_cfg_result res = { 0 };
  201. res.response_route = tb_cfg_get_route(&pkg->header);
  202. res.response_port = 0;
  203. res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
  204. tb_cfg_get_route(&pkg->header));
  205. if (res.err)
  206. return res;
  207. res.err = 1;
  208. res.tb_error = pkg->error;
  209. res.response_port = pkg->port;
  210. return res;
  211. }
  212. static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
  213. enum tb_cfg_pkg_type type, u64 route)
  214. {
  215. struct tb_cfg_header *header = pkg->buffer;
  216. struct tb_cfg_result res = { 0 };
  217. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  218. return decode_error(pkg);
  219. res.response_port = 0; /* will be updated later for cfg_read/write */
  220. res.response_route = tb_cfg_get_route(header);
  221. res.err = check_header(pkg, len, type, route);
  222. return res;
  223. }
  224. static void tb_cfg_print_error(struct tb_ctl *ctl,
  225. const struct tb_cfg_result *res)
  226. {
  227. WARN_ON(res->err != 1);
  228. switch (res->tb_error) {
  229. case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  230. /* Port is not connected. This can happen during surprise
  231. * removal. Do not warn. */
  232. return;
  233. case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  234. /*
  235. * Invalid cfg_space/offset/length combination in
  236. * cfg_read/cfg_write.
  237. */
  238. tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
  239. res->response_route, res->response_port);
  240. return;
  241. case TB_CFG_ERROR_NO_SUCH_PORT:
  242. /*
  243. * - The route contains a non-existent port.
  244. * - The route contains a non-PHY port (e.g. PCIe).
  245. * - The port in cfg_read/cfg_write does not exist.
  246. */
  247. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
  248. res->response_route, res->response_port);
  249. return;
  250. case TB_CFG_ERROR_LOOP:
  251. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
  252. res->response_route, res->response_port);
  253. return;
  254. case TB_CFG_ERROR_LOCK:
  255. tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
  256. res->response_route, res->response_port);
  257. return;
  258. default:
  259. /* 5,6,7,9 and 11 are also valid error codes */
  260. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
  261. res->response_route, res->response_port);
  262. return;
  263. }
  264. }
  265. static __be32 tb_crc(const void *data, size_t len)
  266. {
  267. return cpu_to_be32(~__crc32c_le(~0, data, len));
  268. }
  269. static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  270. {
  271. if (pkg) {
  272. dma_pool_free(pkg->ctl->frame_pool,
  273. pkg->buffer, pkg->frame.buffer_phy);
  274. kfree(pkg);
  275. }
  276. }
  277. static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  278. {
  279. struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  280. if (!pkg)
  281. return NULL;
  282. pkg->ctl = ctl;
  283. pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  284. &pkg->frame.buffer_phy);
  285. if (!pkg->buffer) {
  286. kfree(pkg);
  287. return NULL;
  288. }
  289. return pkg;
  290. }
  291. /* RX/TX handling */
  292. static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  293. bool canceled)
  294. {
  295. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  296. tb_ctl_pkg_free(pkg);
  297. }
  298. /*
  299. * tb_cfg_tx() - transmit a packet on the control channel
  300. *
  301. * len must be a multiple of four.
  302. *
  303. * Return: Returns 0 on success or an error code on failure.
  304. */
  305. static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
  306. enum tb_cfg_pkg_type type)
  307. {
  308. int res;
  309. struct ctl_pkg *pkg;
  310. if (len % 4 != 0) { /* required for le->be conversion */
  311. tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
  312. return -EINVAL;
  313. }
  314. if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  315. tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
  316. len, TB_FRAME_SIZE - 4);
  317. return -EINVAL;
  318. }
  319. pkg = tb_ctl_pkg_alloc(ctl);
  320. if (!pkg)
  321. return -ENOMEM;
  322. pkg->frame.callback = tb_ctl_tx_callback;
  323. pkg->frame.size = len + 4;
  324. pkg->frame.sof = type;
  325. pkg->frame.eof = type;
  326. trace_tb_tx(ctl->index, type, data, len);
  327. cpu_to_be32_array(pkg->buffer, data, len / 4);
  328. *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
  329. res = tb_ring_tx(ctl->tx, &pkg->frame);
  330. if (res) /* ring is stopped */
  331. tb_ctl_pkg_free(pkg);
  332. return res;
  333. }
  334. /*
  335. * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
  336. */
  337. static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
  338. struct ctl_pkg *pkg, size_t size)
  339. {
  340. trace_tb_event(ctl->index, type, pkg->buffer, size);
  341. return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
  342. }
  343. static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  344. {
  345. tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
  346. * We ignore failures during stop.
  347. * All rx packets are referenced
  348. * from ctl->rx_packets, so we do
  349. * not loose them.
  350. */
  351. }
  352. static int tb_async_error(const struct ctl_pkg *pkg)
  353. {
  354. const struct cfg_error_pkg *error = pkg->buffer;
  355. if (pkg->frame.eof != TB_CFG_PKG_ERROR)
  356. return false;
  357. switch (error->error) {
  358. case TB_CFG_ERROR_LINK_ERROR:
  359. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  360. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  361. case TB_CFG_ERROR_DP_BW:
  362. case TB_CFG_ERROR_ROP_CMPLT:
  363. case TB_CFG_ERROR_POP_CMPLT:
  364. case TB_CFG_ERROR_PCIE_WAKE:
  365. case TB_CFG_ERROR_DP_CON_CHANGE:
  366. case TB_CFG_ERROR_DPTX_DISCOVERY:
  367. case TB_CFG_ERROR_LINK_RECOVERY:
  368. case TB_CFG_ERROR_ASYM_LINK:
  369. return true;
  370. default:
  371. return false;
  372. }
  373. }
  374. static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  375. bool canceled)
  376. {
  377. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  378. struct tb_cfg_request *req;
  379. __be32 crc32;
  380. if (canceled)
  381. return; /*
  382. * ring is stopped, packet is referenced from
  383. * ctl->rx_packets.
  384. */
  385. if (frame->size < 4 || frame->size % 4 != 0) {
  386. tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
  387. frame->size);
  388. goto rx;
  389. }
  390. frame->size -= 4; /* remove checksum */
  391. crc32 = tb_crc(pkg->buffer, frame->size);
  392. be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
  393. switch (frame->eof) {
  394. case TB_CFG_PKG_READ:
  395. case TB_CFG_PKG_WRITE:
  396. case TB_CFG_PKG_ERROR:
  397. case TB_CFG_PKG_OVERRIDE:
  398. case TB_CFG_PKG_RESET:
  399. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  400. tb_ctl_err(pkg->ctl,
  401. "RX: checksum mismatch, dropping packet\n");
  402. goto rx;
  403. }
  404. if (tb_async_error(pkg)) {
  405. tb_ctl_handle_event(pkg->ctl, frame->eof,
  406. pkg, frame->size);
  407. goto rx;
  408. }
  409. break;
  410. case TB_CFG_PKG_EVENT:
  411. case TB_CFG_PKG_XDOMAIN_RESP:
  412. case TB_CFG_PKG_XDOMAIN_REQ:
  413. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  414. tb_ctl_err(pkg->ctl,
  415. "RX: checksum mismatch, dropping packet\n");
  416. goto rx;
  417. }
  418. fallthrough;
  419. case TB_CFG_PKG_ICM_EVENT:
  420. if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
  421. goto rx;
  422. break;
  423. default:
  424. break;
  425. }
  426. /*
  427. * The received packet will be processed only if there is an
  428. * active request and that the packet is what is expected. This
  429. * prevents packets such as replies coming after timeout has
  430. * triggered from messing with the active requests.
  431. */
  432. req = tb_cfg_request_find(pkg->ctl, pkg);
  433. trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
  434. if (req) {
  435. if (req->copy(req, pkg))
  436. schedule_work(&req->work);
  437. tb_cfg_request_put(req);
  438. }
  439. rx:
  440. tb_ctl_rx_submit(pkg);
  441. }
  442. static void tb_cfg_request_work(struct work_struct *work)
  443. {
  444. struct tb_cfg_request *req = container_of(work, typeof(*req), work);
  445. if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  446. req->callback(req->callback_data);
  447. tb_cfg_request_dequeue(req);
  448. tb_cfg_request_put(req);
  449. }
  450. /**
  451. * tb_cfg_request() - Start control request not waiting for it to complete
  452. * @ctl: Control channel to use
  453. * @req: Request to start
  454. * @callback: Callback called when the request is completed
  455. * @callback_data: Data to be passed to @callback
  456. *
  457. * This queues @req on the given control channel without waiting for it
  458. * to complete. When the request completes @callback is called.
  459. */
  460. int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
  461. void (*callback)(void *), void *callback_data)
  462. {
  463. int ret;
  464. req->flags = 0;
  465. req->callback = callback;
  466. req->callback_data = callback_data;
  467. INIT_WORK(&req->work, tb_cfg_request_work);
  468. INIT_LIST_HEAD(&req->list);
  469. tb_cfg_request_get(req);
  470. ret = tb_cfg_request_enqueue(ctl, req);
  471. if (ret)
  472. goto err_put;
  473. ret = tb_ctl_tx(ctl, req->request, req->request_size,
  474. req->request_type);
  475. if (ret)
  476. goto err_dequeue;
  477. if (!req->response)
  478. schedule_work(&req->work);
  479. return 0;
  480. err_dequeue:
  481. tb_cfg_request_dequeue(req);
  482. err_put:
  483. tb_cfg_request_put(req);
  484. return ret;
  485. }
  486. /**
  487. * tb_cfg_request_cancel() - Cancel a control request
  488. * @req: Request to cancel
  489. * @err: Error to assign to the request
  490. *
  491. * This function can be used to cancel ongoing request. It will wait
  492. * until the request is not active anymore.
  493. */
  494. void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
  495. {
  496. set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
  497. schedule_work(&req->work);
  498. wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
  499. req->result.err = err;
  500. }
  501. static void tb_cfg_request_complete(void *data)
  502. {
  503. complete(data);
  504. }
  505. /**
  506. * tb_cfg_request_sync() - Start control request and wait until it completes
  507. * @ctl: Control channel to use
  508. * @req: Request to start
  509. * @timeout_msec: Timeout how long to wait @req to complete
  510. *
  511. * Starts a control request and waits until it completes. If timeout
  512. * triggers the request is canceled before function returns. Note the
  513. * caller needs to make sure only one message for given switch is active
  514. * at a time.
  515. */
  516. struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
  517. struct tb_cfg_request *req,
  518. int timeout_msec)
  519. {
  520. unsigned long timeout = msecs_to_jiffies(timeout_msec);
  521. struct tb_cfg_result res = { 0 };
  522. DECLARE_COMPLETION_ONSTACK(done);
  523. int ret;
  524. ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
  525. if (ret) {
  526. res.err = ret;
  527. return res;
  528. }
  529. if (!wait_for_completion_timeout(&done, timeout))
  530. tb_cfg_request_cancel(req, -ETIMEDOUT);
  531. flush_work(&req->work);
  532. return req->result;
  533. }
  534. /* public interface, alloc/start/stop/free */
  535. /**
  536. * tb_ctl_alloc() - allocate a control channel
  537. * @nhi: Pointer to NHI
  538. * @index: Domain number
  539. * @timeout_msec: Default timeout used with non-raw control messages
  540. * @cb: Callback called for plug events
  541. * @cb_data: Data passed to @cb
  542. *
  543. * cb will be invoked once for every hot plug event.
  544. *
  545. * Return: Returns a pointer on success or NULL on failure.
  546. */
  547. struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
  548. event_cb cb, void *cb_data)
  549. {
  550. int i;
  551. struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  552. if (!ctl)
  553. return NULL;
  554. ctl->nhi = nhi;
  555. ctl->index = index;
  556. ctl->timeout_msec = timeout_msec;
  557. ctl->callback = cb;
  558. ctl->callback_data = cb_data;
  559. mutex_init(&ctl->request_queue_lock);
  560. INIT_LIST_HEAD(&ctl->request_queue);
  561. ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  562. TB_FRAME_SIZE, 4, 0);
  563. if (!ctl->frame_pool)
  564. goto err;
  565. ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
  566. if (!ctl->tx)
  567. goto err;
  568. ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
  569. 0xffff, NULL, NULL);
  570. if (!ctl->rx)
  571. goto err;
  572. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  573. ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  574. if (!ctl->rx_packets[i])
  575. goto err;
  576. ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  577. }
  578. tb_ctl_dbg(ctl, "control channel created\n");
  579. return ctl;
  580. err:
  581. tb_ctl_free(ctl);
  582. return NULL;
  583. }
  584. /**
  585. * tb_ctl_free() - free a control channel
  586. * @ctl: Control channel to free
  587. *
  588. * Must be called after tb_ctl_stop.
  589. *
  590. * Must NOT be called from ctl->callback.
  591. */
  592. void tb_ctl_free(struct tb_ctl *ctl)
  593. {
  594. int i;
  595. if (!ctl)
  596. return;
  597. if (ctl->rx)
  598. tb_ring_free(ctl->rx);
  599. if (ctl->tx)
  600. tb_ring_free(ctl->tx);
  601. /* free RX packets */
  602. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  603. tb_ctl_pkg_free(ctl->rx_packets[i]);
  604. dma_pool_destroy(ctl->frame_pool);
  605. kfree(ctl);
  606. }
  607. /**
  608. * tb_ctl_start() - start/resume the control channel
  609. * @ctl: Control channel to start
  610. */
  611. void tb_ctl_start(struct tb_ctl *ctl)
  612. {
  613. int i;
  614. tb_ctl_dbg(ctl, "control channel starting...\n");
  615. tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  616. tb_ring_start(ctl->rx);
  617. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  618. tb_ctl_rx_submit(ctl->rx_packets[i]);
  619. ctl->running = true;
  620. }
  621. /**
  622. * tb_ctl_stop() - pause the control channel
  623. * @ctl: Control channel to stop
  624. *
  625. * All invocations of ctl->callback will have finished after this method
  626. * returns.
  627. *
  628. * Must NOT be called from ctl->callback.
  629. */
  630. void tb_ctl_stop(struct tb_ctl *ctl)
  631. {
  632. mutex_lock(&ctl->request_queue_lock);
  633. ctl->running = false;
  634. mutex_unlock(&ctl->request_queue_lock);
  635. tb_ring_stop(ctl->rx);
  636. tb_ring_stop(ctl->tx);
  637. if (!list_empty(&ctl->request_queue))
  638. tb_ctl_WARN(ctl, "dangling request in request_queue\n");
  639. INIT_LIST_HEAD(&ctl->request_queue);
  640. tb_ctl_dbg(ctl, "control channel stopped\n");
  641. }
  642. /* public interface, commands */
  643. /**
  644. * tb_cfg_ack_notification() - Ack notification
  645. * @ctl: Control channel to use
  646. * @route: Router that originated the event
  647. * @error: Pointer to the notification package
  648. *
  649. * Call this as response for non-plug notification to ack it. Returns
  650. * %0 on success or an error code on failure.
  651. */
  652. int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
  653. const struct cfg_error_pkg *error)
  654. {
  655. struct cfg_ack_pkg pkg = {
  656. .header = tb_cfg_make_header(route),
  657. };
  658. const char *name;
  659. switch (error->error) {
  660. case TB_CFG_ERROR_LINK_ERROR:
  661. name = "link error";
  662. break;
  663. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  664. name = "HEC error";
  665. break;
  666. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  667. name = "flow control error";
  668. break;
  669. case TB_CFG_ERROR_DP_BW:
  670. name = "DP_BW";
  671. break;
  672. case TB_CFG_ERROR_ROP_CMPLT:
  673. name = "router operation completion";
  674. break;
  675. case TB_CFG_ERROR_POP_CMPLT:
  676. name = "port operation completion";
  677. break;
  678. case TB_CFG_ERROR_PCIE_WAKE:
  679. name = "PCIe wake";
  680. break;
  681. case TB_CFG_ERROR_DP_CON_CHANGE:
  682. name = "DP connector change";
  683. break;
  684. case TB_CFG_ERROR_DPTX_DISCOVERY:
  685. name = "DPTX discovery";
  686. break;
  687. case TB_CFG_ERROR_LINK_RECOVERY:
  688. name = "link recovery";
  689. break;
  690. case TB_CFG_ERROR_ASYM_LINK:
  691. name = "asymmetric link";
  692. break;
  693. default:
  694. name = "unknown";
  695. break;
  696. }
  697. tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
  698. error->error, route);
  699. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
  700. }
  701. /**
  702. * tb_cfg_ack_plug() - Ack hot plug/unplug event
  703. * @ctl: Control channel to use
  704. * @route: Router that originated the event
  705. * @port: Port where the hot plug/unplug happened
  706. * @unplug: Ack hot plug or unplug
  707. *
  708. * Call this as response for hot plug/unplug event to ack it.
  709. * Returns %0 on success or an error code on failure.
  710. */
  711. int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
  712. {
  713. struct cfg_error_pkg pkg = {
  714. .header = tb_cfg_make_header(route),
  715. .port = port,
  716. .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
  717. .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
  718. : TB_CFG_ERROR_PG_HOT_PLUG,
  719. };
  720. tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
  721. unplug ? "un" : "", route, port);
  722. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  723. }
  724. static bool tb_cfg_match(const struct tb_cfg_request *req,
  725. const struct ctl_pkg *pkg)
  726. {
  727. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  728. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  729. return true;
  730. if (pkg->frame.eof != req->response_type)
  731. return false;
  732. if (route != tb_cfg_get_route(req->request))
  733. return false;
  734. if (pkg->frame.size != req->response_size)
  735. return false;
  736. if (pkg->frame.eof == TB_CFG_PKG_READ ||
  737. pkg->frame.eof == TB_CFG_PKG_WRITE) {
  738. const struct cfg_read_pkg *req_hdr = req->request;
  739. const struct cfg_read_pkg *res_hdr = pkg->buffer;
  740. if (req_hdr->addr.seq != res_hdr->addr.seq)
  741. return false;
  742. }
  743. return true;
  744. }
  745. static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  746. {
  747. struct tb_cfg_result res;
  748. /* Now make sure it is in expected format */
  749. res = parse_header(pkg, req->response_size, req->response_type,
  750. tb_cfg_get_route(req->request));
  751. if (!res.err)
  752. memcpy(req->response, pkg->buffer, req->response_size);
  753. req->result = res;
  754. /* Always complete when first response is received */
  755. return true;
  756. }
  757. /**
  758. * tb_cfg_reset() - send a reset packet and wait for a response
  759. * @ctl: Control channel pointer
  760. * @route: Router string for the router to send reset
  761. *
  762. * If the switch at route is incorrectly configured then we will not receive a
  763. * reply (even though the switch will reset). The caller should check for
  764. * -ETIMEDOUT and attempt to reconfigure the switch.
  765. */
  766. struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
  767. {
  768. struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
  769. struct tb_cfg_result res = { 0 };
  770. struct tb_cfg_header reply;
  771. struct tb_cfg_request *req;
  772. req = tb_cfg_request_alloc();
  773. if (!req) {
  774. res.err = -ENOMEM;
  775. return res;
  776. }
  777. req->match = tb_cfg_match;
  778. req->copy = tb_cfg_copy;
  779. req->request = &request;
  780. req->request_size = sizeof(request);
  781. req->request_type = TB_CFG_PKG_RESET;
  782. req->response = &reply;
  783. req->response_size = sizeof(reply);
  784. req->response_type = TB_CFG_PKG_RESET;
  785. res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
  786. tb_cfg_request_put(req);
  787. return res;
  788. }
  789. /**
  790. * tb_cfg_read_raw() - read from config space into buffer
  791. * @ctl: Pointer to the control channel
  792. * @buffer: Buffer where the data is read
  793. * @route: Route string of the router
  794. * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
  795. * @space: Config space selector
  796. * @offset: Dword word offset of the register to start reading
  797. * @length: Number of dwords to read
  798. * @timeout_msec: Timeout in ms how long to wait for the response
  799. *
  800. * Reads from router config space without translating the possible error.
  801. */
  802. struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  803. u64 route, u32 port, enum tb_cfg_space space,
  804. u32 offset, u32 length, int timeout_msec)
  805. {
  806. struct tb_cfg_result res = { 0 };
  807. struct cfg_read_pkg request = {
  808. .header = tb_cfg_make_header(route),
  809. .addr = {
  810. .port = port,
  811. .space = space,
  812. .offset = offset,
  813. .length = length,
  814. },
  815. };
  816. struct cfg_write_pkg reply;
  817. int retries = 0;
  818. while (retries < TB_CTL_RETRIES) {
  819. struct tb_cfg_request *req;
  820. req = tb_cfg_request_alloc();
  821. if (!req) {
  822. res.err = -ENOMEM;
  823. return res;
  824. }
  825. request.addr.seq = retries++;
  826. req->match = tb_cfg_match;
  827. req->copy = tb_cfg_copy;
  828. req->request = &request;
  829. req->request_size = sizeof(request);
  830. req->request_type = TB_CFG_PKG_READ;
  831. req->response = &reply;
  832. req->response_size = 12 + 4 * length;
  833. req->response_type = TB_CFG_PKG_READ;
  834. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  835. tb_cfg_request_put(req);
  836. if (res.err != -ETIMEDOUT)
  837. break;
  838. /* Wait a bit (arbitrary time) until we send a retry */
  839. usleep_range(10, 100);
  840. }
  841. if (res.err)
  842. return res;
  843. res.response_port = reply.addr.port;
  844. res.err = check_config_address(reply.addr, space, offset, length);
  845. if (!res.err)
  846. memcpy(buffer, &reply.data, 4 * length);
  847. return res;
  848. }
  849. /**
  850. * tb_cfg_write_raw() - write from buffer into config space
  851. * @ctl: Pointer to the control channel
  852. * @buffer: Data to write
  853. * @route: Route string of the router
  854. * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
  855. * @space: Config space selector
  856. * @offset: Dword word offset of the register to start writing
  857. * @length: Number of dwords to write
  858. * @timeout_msec: Timeout in ms how long to wait for the response
  859. *
  860. * Writes to router config space without translating the possible error.
  861. */
  862. struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
  863. u64 route, u32 port, enum tb_cfg_space space,
  864. u32 offset, u32 length, int timeout_msec)
  865. {
  866. struct tb_cfg_result res = { 0 };
  867. struct cfg_write_pkg request = {
  868. .header = tb_cfg_make_header(route),
  869. .addr = {
  870. .port = port,
  871. .space = space,
  872. .offset = offset,
  873. .length = length,
  874. },
  875. };
  876. struct cfg_read_pkg reply;
  877. int retries = 0;
  878. memcpy(&request.data, buffer, length * 4);
  879. while (retries < TB_CTL_RETRIES) {
  880. struct tb_cfg_request *req;
  881. req = tb_cfg_request_alloc();
  882. if (!req) {
  883. res.err = -ENOMEM;
  884. return res;
  885. }
  886. request.addr.seq = retries++;
  887. req->match = tb_cfg_match;
  888. req->copy = tb_cfg_copy;
  889. req->request = &request;
  890. req->request_size = 12 + 4 * length;
  891. req->request_type = TB_CFG_PKG_WRITE;
  892. req->response = &reply;
  893. req->response_size = sizeof(reply);
  894. req->response_type = TB_CFG_PKG_WRITE;
  895. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  896. tb_cfg_request_put(req);
  897. if (res.err != -ETIMEDOUT)
  898. break;
  899. /* Wait a bit (arbitrary time) until we send a retry */
  900. usleep_range(10, 100);
  901. }
  902. if (res.err)
  903. return res;
  904. res.response_port = reply.addr.port;
  905. res.err = check_config_address(reply.addr, space, offset, length);
  906. return res;
  907. }
  908. static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
  909. const struct tb_cfg_result *res)
  910. {
  911. /*
  912. * For unimplemented ports access to port config space may return
  913. * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
  914. * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
  915. * that the caller can mark the port as disabled.
  916. */
  917. if (space == TB_CFG_PORT &&
  918. res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
  919. return -ENODEV;
  920. tb_cfg_print_error(ctl, res);
  921. if (res->tb_error == TB_CFG_ERROR_LOCK)
  922. return -EACCES;
  923. if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
  924. return -ENOTCONN;
  925. return -EIO;
  926. }
  927. int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  928. enum tb_cfg_space space, u32 offset, u32 length)
  929. {
  930. struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  931. space, offset, length, ctl->timeout_msec);
  932. switch (res.err) {
  933. case 0:
  934. /* Success */
  935. break;
  936. case 1:
  937. /* Thunderbolt error, tb_error holds the actual number */
  938. return tb_cfg_get_error(ctl, space, &res);
  939. case -ETIMEDOUT:
  940. tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
  941. route, space, offset);
  942. break;
  943. default:
  944. WARN(1, "tb_cfg_read: %d\n", res.err);
  945. break;
  946. }
  947. return res.err;
  948. }
  949. int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
  950. enum tb_cfg_space space, u32 offset, u32 length)
  951. {
  952. struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  953. space, offset, length, ctl->timeout_msec);
  954. switch (res.err) {
  955. case 0:
  956. /* Success */
  957. break;
  958. case 1:
  959. /* Thunderbolt error, tb_error holds the actual number */
  960. return tb_cfg_get_error(ctl, space, &res);
  961. case -ETIMEDOUT:
  962. tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
  963. route, space, offset);
  964. break;
  965. default:
  966. WARN(1, "tb_cfg_write: %d\n", res.err);
  967. break;
  968. }
  969. return res.err;
  970. }
  971. /**
  972. * tb_cfg_get_upstream_port() - get upstream port number of switch at route
  973. * @ctl: Pointer to the control channel
  974. * @route: Route string of the router
  975. *
  976. * Reads the first dword from the switches TB_CFG_SWITCH config area and
  977. * returns the port number from which the reply originated.
  978. *
  979. * Return: Returns the upstream port number on success or an error code on
  980. * failure.
  981. */
  982. int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  983. {
  984. u32 dummy;
  985. struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  986. TB_CFG_SWITCH, 0, 1,
  987. ctl->timeout_msec);
  988. if (res.err == 1)
  989. return -EIO;
  990. if (res.err)
  991. return res.err;
  992. return res.response_port;
  993. }