ctl.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Thunderbolt driver - control channel and configuration commands
  4. *
  5. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  6. * Copyright (C) 2018, Intel Corporation
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/pci.h>
  12. #include <linux/dmapool.h>
  13. #include <linux/workqueue.h>
  14. #include "ctl.h"
  15. #define CREATE_TRACE_POINTS
  16. #include "trace.h"
  17. #define TB_CTL_RX_PKG_COUNT 10
  18. #define TB_CTL_RETRIES 4
  19. /**
  20. * struct tb_ctl - Thunderbolt control channel
  21. * @nhi: Pointer to the NHI structure
  22. * @tx: Transmit ring
  23. * @rx: Receive ring
  24. * @frame_pool: DMA pool for control messages
  25. * @rx_packets: Received control messages
  26. * @request_queue_lock: Lock protecting @request_queue
  27. * @request_queue: List of outstanding requests
  28. * @running: Is the control channel running at the moment
  29. * @timeout_msec: Default timeout for non-raw control messages
  30. * @callback: Callback called when hotplug message is received
  31. * @callback_data: Data passed to @callback
  32. * @index: Domain number. This will be output with the trace record.
  33. */
  34. struct tb_ctl {
  35. struct tb_nhi *nhi;
  36. struct tb_ring *tx;
  37. struct tb_ring *rx;
  38. struct dma_pool *frame_pool;
  39. struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  40. struct mutex request_queue_lock;
  41. struct list_head request_queue;
  42. bool running;
  43. int timeout_msec;
  44. event_cb callback;
  45. void *callback_data;
  46. int index;
  47. };
  48. #define tb_ctl_WARN(ctl, format, arg...) \
  49. dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  50. #define tb_ctl_err(ctl, format, arg...) \
  51. dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  52. #define tb_ctl_warn(ctl, format, arg...) \
  53. dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  54. #define tb_ctl_info(ctl, format, arg...) \
  55. dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  56. #define tb_ctl_dbg(ctl, format, arg...) \
  57. dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
  58. static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  59. /* Serializes access to request kref_get/put */
  60. static DEFINE_MUTEX(tb_cfg_request_lock);
  61. /**
  62. * tb_cfg_request_alloc() - Allocates a new config request
  63. *
  64. * This is refcounted object so when you are done with this, call
  65. * tb_cfg_request_put() to it.
  66. */
  67. struct tb_cfg_request *tb_cfg_request_alloc(void)
  68. {
  69. struct tb_cfg_request *req;
  70. req = kzalloc(sizeof(*req), GFP_KERNEL);
  71. if (!req)
  72. return NULL;
  73. kref_init(&req->kref);
  74. return req;
  75. }
  76. /**
  77. * tb_cfg_request_get() - Increase refcount of a request
  78. * @req: Request whose refcount is increased
  79. */
  80. void tb_cfg_request_get(struct tb_cfg_request *req)
  81. {
  82. mutex_lock(&tb_cfg_request_lock);
  83. kref_get(&req->kref);
  84. mutex_unlock(&tb_cfg_request_lock);
  85. }
  86. static void tb_cfg_request_destroy(struct kref *kref)
  87. {
  88. struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
  89. kfree(req);
  90. }
  91. /**
  92. * tb_cfg_request_put() - Decrease refcount and possibly release the request
  93. * @req: Request whose refcount is decreased
  94. *
  95. * Call this function when you are done with the request. When refcount
  96. * goes to %0 the object is released.
  97. */
  98. void tb_cfg_request_put(struct tb_cfg_request *req)
  99. {
  100. mutex_lock(&tb_cfg_request_lock);
  101. kref_put(&req->kref, tb_cfg_request_destroy);
  102. mutex_unlock(&tb_cfg_request_lock);
  103. }
  104. static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
  105. struct tb_cfg_request *req)
  106. {
  107. WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
  108. WARN_ON(req->ctl);
  109. mutex_lock(&ctl->request_queue_lock);
  110. if (!ctl->running) {
  111. mutex_unlock(&ctl->request_queue_lock);
  112. return -ENOTCONN;
  113. }
  114. req->ctl = ctl;
  115. list_add_tail(&req->list, &ctl->request_queue);
  116. set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  117. mutex_unlock(&ctl->request_queue_lock);
  118. return 0;
  119. }
  120. static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
  121. {
  122. struct tb_ctl *ctl = req->ctl;
  123. mutex_lock(&ctl->request_queue_lock);
  124. if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
  125. mutex_unlock(&ctl->request_queue_lock);
  126. return;
  127. }
  128. list_del(&req->list);
  129. clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  130. if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  131. wake_up(&tb_cfg_request_cancel_queue);
  132. mutex_unlock(&ctl->request_queue_lock);
  133. }
  134. static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
  135. {
  136. return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  137. }
  138. static struct tb_cfg_request *
  139. tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
  140. {
  141. struct tb_cfg_request *req = NULL, *iter;
  142. mutex_lock(&pkg->ctl->request_queue_lock);
  143. list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
  144. tb_cfg_request_get(iter);
  145. if (iter->match(iter, pkg)) {
  146. req = iter;
  147. break;
  148. }
  149. tb_cfg_request_put(iter);
  150. }
  151. mutex_unlock(&pkg->ctl->request_queue_lock);
  152. return req;
  153. }
  154. /* utility functions */
  155. static int check_header(const struct ctl_pkg *pkg, u32 len,
  156. enum tb_cfg_pkg_type type, u64 route)
  157. {
  158. struct tb_cfg_header *header = pkg->buffer;
  159. /* check frame, TODO: frame flags */
  160. if (WARN(len != pkg->frame.size,
  161. "wrong framesize (expected %#x, got %#x)\n",
  162. len, pkg->frame.size))
  163. return -EIO;
  164. if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
  165. type, pkg->frame.eof))
  166. return -EIO;
  167. if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
  168. pkg->frame.sof))
  169. return -EIO;
  170. /* check header */
  171. if (WARN(header->unknown != 1 << 9,
  172. "header->unknown is %#x\n", header->unknown))
  173. return -EIO;
  174. if (WARN(route != tb_cfg_get_route(header),
  175. "wrong route (expected %llx, got %llx)",
  176. route, tb_cfg_get_route(header)))
  177. return -EIO;
  178. return 0;
  179. }
  180. static int check_config_address(struct tb_cfg_address addr,
  181. enum tb_cfg_space space, u32 offset,
  182. u32 length)
  183. {
  184. if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
  185. return -EIO;
  186. if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
  187. space, addr.space))
  188. return -EIO;
  189. if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
  190. offset, addr.offset))
  191. return -EIO;
  192. if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
  193. length, addr.length))
  194. return -EIO;
  195. /*
  196. * We cannot check addr->port as it is set to the upstream port of the
  197. * sender.
  198. */
  199. return 0;
  200. }
  201. static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
  202. {
  203. struct cfg_error_pkg *pkg = response->buffer;
  204. struct tb_cfg_result res = { 0 };
  205. res.response_route = tb_cfg_get_route(&pkg->header);
  206. res.response_port = 0;
  207. res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
  208. tb_cfg_get_route(&pkg->header));
  209. if (res.err)
  210. return res;
  211. res.err = 1;
  212. res.tb_error = pkg->error;
  213. res.response_port = pkg->port;
  214. return res;
  215. }
  216. static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
  217. enum tb_cfg_pkg_type type, u64 route)
  218. {
  219. struct tb_cfg_header *header = pkg->buffer;
  220. struct tb_cfg_result res = { 0 };
  221. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  222. return decode_error(pkg);
  223. res.response_port = 0; /* will be updated later for cfg_read/write */
  224. res.response_route = tb_cfg_get_route(header);
  225. res.err = check_header(pkg, len, type, route);
  226. return res;
  227. }
  228. static void tb_cfg_print_error(struct tb_ctl *ctl,
  229. const struct tb_cfg_result *res)
  230. {
  231. WARN_ON(res->err != 1);
  232. switch (res->tb_error) {
  233. case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  234. /* Port is not connected. This can happen during surprise
  235. * removal. Do not warn. */
  236. return;
  237. case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  238. /*
  239. * Invalid cfg_space/offset/length combination in
  240. * cfg_read/cfg_write.
  241. */
  242. tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
  243. res->response_route, res->response_port);
  244. return;
  245. case TB_CFG_ERROR_NO_SUCH_PORT:
  246. /*
  247. * - The route contains a non-existent port.
  248. * - The route contains a non-PHY port (e.g. PCIe).
  249. * - The port in cfg_read/cfg_write does not exist.
  250. */
  251. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
  252. res->response_route, res->response_port);
  253. return;
  254. case TB_CFG_ERROR_LOOP:
  255. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
  256. res->response_route, res->response_port);
  257. return;
  258. case TB_CFG_ERROR_LOCK:
  259. tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
  260. res->response_route, res->response_port);
  261. return;
  262. default:
  263. /* 5,6,7,9 and 11 are also valid error codes */
  264. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
  265. res->response_route, res->response_port);
  266. return;
  267. }
  268. }
  269. static __be32 tb_crc(const void *data, size_t len)
  270. {
  271. return cpu_to_be32(~__crc32c_le(~0, data, len));
  272. }
  273. static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  274. {
  275. if (pkg) {
  276. dma_pool_free(pkg->ctl->frame_pool,
  277. pkg->buffer, pkg->frame.buffer_phy);
  278. kfree(pkg);
  279. }
  280. }
  281. static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  282. {
  283. struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  284. if (!pkg)
  285. return NULL;
  286. pkg->ctl = ctl;
  287. pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  288. &pkg->frame.buffer_phy);
  289. if (!pkg->buffer) {
  290. kfree(pkg);
  291. return NULL;
  292. }
  293. return pkg;
  294. }
  295. /* RX/TX handling */
  296. static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  297. bool canceled)
  298. {
  299. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  300. tb_ctl_pkg_free(pkg);
  301. }
  302. /*
  303. * tb_cfg_tx() - transmit a packet on the control channel
  304. *
  305. * len must be a multiple of four.
  306. *
  307. * Return: Returns 0 on success or an error code on failure.
  308. */
  309. static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
  310. enum tb_cfg_pkg_type type)
  311. {
  312. int res;
  313. struct ctl_pkg *pkg;
  314. if (len % 4 != 0) { /* required for le->be conversion */
  315. tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
  316. return -EINVAL;
  317. }
  318. if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  319. tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
  320. len, TB_FRAME_SIZE - 4);
  321. return -EINVAL;
  322. }
  323. pkg = tb_ctl_pkg_alloc(ctl);
  324. if (!pkg)
  325. return -ENOMEM;
  326. pkg->frame.callback = tb_ctl_tx_callback;
  327. pkg->frame.size = len + 4;
  328. pkg->frame.sof = type;
  329. pkg->frame.eof = type;
  330. trace_tb_tx(ctl->index, type, data, len);
  331. cpu_to_be32_array(pkg->buffer, data, len / 4);
  332. *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
  333. res = tb_ring_tx(ctl->tx, &pkg->frame);
  334. if (res) /* ring is stopped */
  335. tb_ctl_pkg_free(pkg);
  336. return res;
  337. }
  338. /*
  339. * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
  340. */
  341. static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
  342. struct ctl_pkg *pkg, size_t size)
  343. {
  344. trace_tb_event(ctl->index, type, pkg->buffer, size);
  345. return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
  346. }
  347. static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  348. {
  349. tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
  350. * We ignore failures during stop.
  351. * All rx packets are referenced
  352. * from ctl->rx_packets, so we do
  353. * not loose them.
  354. */
  355. }
  356. static int tb_async_error(const struct ctl_pkg *pkg)
  357. {
  358. const struct cfg_error_pkg *error = pkg->buffer;
  359. if (pkg->frame.eof != TB_CFG_PKG_ERROR)
  360. return false;
  361. switch (error->error) {
  362. case TB_CFG_ERROR_LINK_ERROR:
  363. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  364. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  365. case TB_CFG_ERROR_DP_BW:
  366. case TB_CFG_ERROR_ROP_CMPLT:
  367. case TB_CFG_ERROR_POP_CMPLT:
  368. case TB_CFG_ERROR_PCIE_WAKE:
  369. case TB_CFG_ERROR_DP_CON_CHANGE:
  370. case TB_CFG_ERROR_DPTX_DISCOVERY:
  371. case TB_CFG_ERROR_LINK_RECOVERY:
  372. case TB_CFG_ERROR_ASYM_LINK:
  373. return true;
  374. default:
  375. return false;
  376. }
  377. }
  378. static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  379. bool canceled)
  380. {
  381. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  382. struct tb_cfg_request *req;
  383. __be32 crc32;
  384. if (canceled)
  385. return; /*
  386. * ring is stopped, packet is referenced from
  387. * ctl->rx_packets.
  388. */
  389. if (frame->size < 4 || frame->size % 4 != 0) {
  390. tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
  391. frame->size);
  392. goto rx;
  393. }
  394. frame->size -= 4; /* remove checksum */
  395. crc32 = tb_crc(pkg->buffer, frame->size);
  396. be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
  397. switch (frame->eof) {
  398. case TB_CFG_PKG_READ:
  399. case TB_CFG_PKG_WRITE:
  400. case TB_CFG_PKG_ERROR:
  401. case TB_CFG_PKG_OVERRIDE:
  402. case TB_CFG_PKG_RESET:
  403. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  404. tb_ctl_err(pkg->ctl,
  405. "RX: checksum mismatch, dropping packet\n");
  406. goto rx;
  407. }
  408. if (tb_async_error(pkg)) {
  409. tb_ctl_handle_event(pkg->ctl, frame->eof,
  410. pkg, frame->size);
  411. goto rx;
  412. }
  413. break;
  414. case TB_CFG_PKG_EVENT:
  415. case TB_CFG_PKG_XDOMAIN_RESP:
  416. case TB_CFG_PKG_XDOMAIN_REQ:
  417. if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  418. tb_ctl_err(pkg->ctl,
  419. "RX: checksum mismatch, dropping packet\n");
  420. goto rx;
  421. }
  422. fallthrough;
  423. case TB_CFG_PKG_ICM_EVENT:
  424. if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
  425. goto rx;
  426. break;
  427. default:
  428. break;
  429. }
  430. /*
  431. * The received packet will be processed only if there is an
  432. * active request and that the packet is what is expected. This
  433. * prevents packets such as replies coming after timeout has
  434. * triggered from messing with the active requests.
  435. */
  436. req = tb_cfg_request_find(pkg->ctl, pkg);
  437. trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
  438. if (req) {
  439. if (req->copy(req, pkg))
  440. schedule_work(&req->work);
  441. tb_cfg_request_put(req);
  442. }
  443. rx:
  444. tb_ctl_rx_submit(pkg);
  445. }
  446. static void tb_cfg_request_work(struct work_struct *work)
  447. {
  448. struct tb_cfg_request *req = container_of(work, typeof(*req), work);
  449. if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  450. req->callback(req->callback_data);
  451. tb_cfg_request_dequeue(req);
  452. tb_cfg_request_put(req);
  453. }
  454. /**
  455. * tb_cfg_request() - Start control request not waiting for it to complete
  456. * @ctl: Control channel to use
  457. * @req: Request to start
  458. * @callback: Callback called when the request is completed
  459. * @callback_data: Data to be passed to @callback
  460. *
  461. * This queues @req on the given control channel without waiting for it
  462. * to complete. When the request completes @callback is called.
  463. */
  464. int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
  465. void (*callback)(void *), void *callback_data)
  466. {
  467. int ret;
  468. req->flags = 0;
  469. req->callback = callback;
  470. req->callback_data = callback_data;
  471. INIT_WORK(&req->work, tb_cfg_request_work);
  472. INIT_LIST_HEAD(&req->list);
  473. tb_cfg_request_get(req);
  474. ret = tb_cfg_request_enqueue(ctl, req);
  475. if (ret)
  476. goto err_put;
  477. ret = tb_ctl_tx(ctl, req->request, req->request_size,
  478. req->request_type);
  479. if (ret)
  480. goto err_dequeue;
  481. if (!req->response)
  482. schedule_work(&req->work);
  483. return 0;
  484. err_dequeue:
  485. tb_cfg_request_dequeue(req);
  486. err_put:
  487. tb_cfg_request_put(req);
  488. return ret;
  489. }
  490. /**
  491. * tb_cfg_request_cancel() - Cancel a control request
  492. * @req: Request to cancel
  493. * @err: Error to assign to the request
  494. *
  495. * This function can be used to cancel ongoing request. It will wait
  496. * until the request is not active anymore.
  497. */
  498. void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
  499. {
  500. set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
  501. schedule_work(&req->work);
  502. wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
  503. req->result.err = err;
  504. }
  505. static void tb_cfg_request_complete(void *data)
  506. {
  507. complete(data);
  508. }
  509. /**
  510. * tb_cfg_request_sync() - Start control request and wait until it completes
  511. * @ctl: Control channel to use
  512. * @req: Request to start
  513. * @timeout_msec: Timeout how long to wait @req to complete
  514. *
  515. * Starts a control request and waits until it completes. If timeout
  516. * triggers the request is canceled before function returns. Note the
  517. * caller needs to make sure only one message for given switch is active
  518. * at a time.
  519. */
  520. struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
  521. struct tb_cfg_request *req,
  522. int timeout_msec)
  523. {
  524. unsigned long timeout = msecs_to_jiffies(timeout_msec);
  525. struct tb_cfg_result res = { 0 };
  526. DECLARE_COMPLETION_ONSTACK(done);
  527. int ret;
  528. ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
  529. if (ret) {
  530. res.err = ret;
  531. return res;
  532. }
  533. if (!wait_for_completion_timeout(&done, timeout))
  534. tb_cfg_request_cancel(req, -ETIMEDOUT);
  535. flush_work(&req->work);
  536. return req->result;
  537. }
  538. /* public interface, alloc/start/stop/free */
  539. /**
  540. * tb_ctl_alloc() - allocate a control channel
  541. * @nhi: Pointer to NHI
  542. * @index: Domain number
  543. * @timeout_msec: Default timeout used with non-raw control messages
  544. * @cb: Callback called for plug events
  545. * @cb_data: Data passed to @cb
  546. *
  547. * cb will be invoked once for every hot plug event.
  548. *
  549. * Return: Returns a pointer on success or NULL on failure.
  550. */
  551. struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
  552. event_cb cb, void *cb_data)
  553. {
  554. int i;
  555. struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  556. if (!ctl)
  557. return NULL;
  558. ctl->nhi = nhi;
  559. ctl->index = index;
  560. ctl->timeout_msec = timeout_msec;
  561. ctl->callback = cb;
  562. ctl->callback_data = cb_data;
  563. mutex_init(&ctl->request_queue_lock);
  564. INIT_LIST_HEAD(&ctl->request_queue);
  565. ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  566. TB_FRAME_SIZE, 4, 0);
  567. if (!ctl->frame_pool)
  568. goto err;
  569. ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
  570. if (!ctl->tx)
  571. goto err;
  572. ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
  573. 0xffff, NULL, NULL);
  574. if (!ctl->rx)
  575. goto err;
  576. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  577. ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  578. if (!ctl->rx_packets[i])
  579. goto err;
  580. ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  581. }
  582. tb_ctl_dbg(ctl, "control channel created\n");
  583. return ctl;
  584. err:
  585. tb_ctl_free(ctl);
  586. return NULL;
  587. }
  588. /**
  589. * tb_ctl_free() - free a control channel
  590. * @ctl: Control channel to free
  591. *
  592. * Must be called after tb_ctl_stop.
  593. *
  594. * Must NOT be called from ctl->callback.
  595. */
  596. void tb_ctl_free(struct tb_ctl *ctl)
  597. {
  598. int i;
  599. if (!ctl)
  600. return;
  601. if (ctl->rx)
  602. tb_ring_free(ctl->rx);
  603. if (ctl->tx)
  604. tb_ring_free(ctl->tx);
  605. /* free RX packets */
  606. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  607. tb_ctl_pkg_free(ctl->rx_packets[i]);
  608. dma_pool_destroy(ctl->frame_pool);
  609. kfree(ctl);
  610. }
  611. /**
  612. * tb_ctl_start() - start/resume the control channel
  613. * @ctl: Control channel to start
  614. */
  615. void tb_ctl_start(struct tb_ctl *ctl)
  616. {
  617. int i;
  618. tb_ctl_dbg(ctl, "control channel starting...\n");
  619. tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  620. tb_ring_start(ctl->rx);
  621. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  622. tb_ctl_rx_submit(ctl->rx_packets[i]);
  623. ctl->running = true;
  624. }
  625. /**
  626. * tb_ctl_stop() - pause the control channel
  627. * @ctl: Control channel to stop
  628. *
  629. * All invocations of ctl->callback will have finished after this method
  630. * returns.
  631. *
  632. * Must NOT be called from ctl->callback.
  633. */
  634. void tb_ctl_stop(struct tb_ctl *ctl)
  635. {
  636. mutex_lock(&ctl->request_queue_lock);
  637. ctl->running = false;
  638. mutex_unlock(&ctl->request_queue_lock);
  639. tb_ring_stop(ctl->rx);
  640. tb_ring_stop(ctl->tx);
  641. if (!list_empty(&ctl->request_queue))
  642. tb_ctl_WARN(ctl, "dangling request in request_queue\n");
  643. INIT_LIST_HEAD(&ctl->request_queue);
  644. tb_ctl_dbg(ctl, "control channel stopped\n");
  645. }
  646. /* public interface, commands */
  647. /**
  648. * tb_cfg_ack_notification() - Ack notification
  649. * @ctl: Control channel to use
  650. * @route: Router that originated the event
  651. * @error: Pointer to the notification package
  652. *
  653. * Call this as response for non-plug notification to ack it. Returns
  654. * %0 on success or an error code on failure.
  655. */
  656. int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
  657. const struct cfg_error_pkg *error)
  658. {
  659. struct cfg_ack_pkg pkg = {
  660. .header = tb_cfg_make_header(route),
  661. };
  662. const char *name;
  663. switch (error->error) {
  664. case TB_CFG_ERROR_LINK_ERROR:
  665. name = "link error";
  666. break;
  667. case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  668. name = "HEC error";
  669. break;
  670. case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  671. name = "flow control error";
  672. break;
  673. case TB_CFG_ERROR_DP_BW:
  674. name = "DP_BW";
  675. break;
  676. case TB_CFG_ERROR_ROP_CMPLT:
  677. name = "router operation completion";
  678. break;
  679. case TB_CFG_ERROR_POP_CMPLT:
  680. name = "port operation completion";
  681. break;
  682. case TB_CFG_ERROR_PCIE_WAKE:
  683. name = "PCIe wake";
  684. break;
  685. case TB_CFG_ERROR_DP_CON_CHANGE:
  686. name = "DP connector change";
  687. break;
  688. case TB_CFG_ERROR_DPTX_DISCOVERY:
  689. name = "DPTX discovery";
  690. break;
  691. case TB_CFG_ERROR_LINK_RECOVERY:
  692. name = "link recovery";
  693. break;
  694. case TB_CFG_ERROR_ASYM_LINK:
  695. name = "asymmetric link";
  696. break;
  697. default:
  698. name = "unknown";
  699. break;
  700. }
  701. tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
  702. error->error, route);
  703. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
  704. }
  705. /**
  706. * tb_cfg_ack_plug() - Ack hot plug/unplug event
  707. * @ctl: Control channel to use
  708. * @route: Router that originated the event
  709. * @port: Port where the hot plug/unplug happened
  710. * @unplug: Ack hot plug or unplug
  711. *
  712. * Call this as response for hot plug/unplug event to ack it.
  713. * Returns %0 on success or an error code on failure.
  714. */
  715. int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
  716. {
  717. struct cfg_error_pkg pkg = {
  718. .header = tb_cfg_make_header(route),
  719. .port = port,
  720. .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
  721. .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
  722. : TB_CFG_ERROR_PG_HOT_PLUG,
  723. };
  724. tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
  725. unplug ? "un" : "", route, port);
  726. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  727. }
  728. static bool tb_cfg_match(const struct tb_cfg_request *req,
  729. const struct ctl_pkg *pkg)
  730. {
  731. u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  732. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  733. return true;
  734. if (pkg->frame.eof != req->response_type)
  735. return false;
  736. if (route != tb_cfg_get_route(req->request))
  737. return false;
  738. if (pkg->frame.size != req->response_size)
  739. return false;
  740. if (pkg->frame.eof == TB_CFG_PKG_READ ||
  741. pkg->frame.eof == TB_CFG_PKG_WRITE) {
  742. const struct cfg_read_pkg *req_hdr = req->request;
  743. const struct cfg_read_pkg *res_hdr = pkg->buffer;
  744. if (req_hdr->addr.seq != res_hdr->addr.seq)
  745. return false;
  746. }
  747. return true;
  748. }
  749. static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  750. {
  751. struct tb_cfg_result res;
  752. /* Now make sure it is in expected format */
  753. res = parse_header(pkg, req->response_size, req->response_type,
  754. tb_cfg_get_route(req->request));
  755. if (!res.err)
  756. memcpy(req->response, pkg->buffer, req->response_size);
  757. req->result = res;
  758. /* Always complete when first response is received */
  759. return true;
  760. }
  761. /**
  762. * tb_cfg_reset() - send a reset packet and wait for a response
  763. * @ctl: Control channel pointer
  764. * @route: Router string for the router to send reset
  765. *
  766. * If the switch at route is incorrectly configured then we will not receive a
  767. * reply (even though the switch will reset). The caller should check for
  768. * -ETIMEDOUT and attempt to reconfigure the switch.
  769. */
  770. struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
  771. {
  772. struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
  773. struct tb_cfg_result res = { 0 };
  774. struct tb_cfg_header reply;
  775. struct tb_cfg_request *req;
  776. req = tb_cfg_request_alloc();
  777. if (!req) {
  778. res.err = -ENOMEM;
  779. return res;
  780. }
  781. req->match = tb_cfg_match;
  782. req->copy = tb_cfg_copy;
  783. req->request = &request;
  784. req->request_size = sizeof(request);
  785. req->request_type = TB_CFG_PKG_RESET;
  786. req->response = &reply;
  787. req->response_size = sizeof(reply);
  788. req->response_type = TB_CFG_PKG_RESET;
  789. res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
  790. tb_cfg_request_put(req);
  791. return res;
  792. }
  793. /**
  794. * tb_cfg_read_raw() - read from config space into buffer
  795. * @ctl: Pointer to the control channel
  796. * @buffer: Buffer where the data is read
  797. * @route: Route string of the router
  798. * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
  799. * @space: Config space selector
  800. * @offset: Dword word offset of the register to start reading
  801. * @length: Number of dwords to read
  802. * @timeout_msec: Timeout in ms how long to wait for the response
  803. *
  804. * Reads from router config space without translating the possible error.
  805. */
  806. struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  807. u64 route, u32 port, enum tb_cfg_space space,
  808. u32 offset, u32 length, int timeout_msec)
  809. {
  810. struct tb_cfg_result res = { 0 };
  811. struct cfg_read_pkg request = {
  812. .header = tb_cfg_make_header(route),
  813. .addr = {
  814. .port = port,
  815. .space = space,
  816. .offset = offset,
  817. .length = length,
  818. },
  819. };
  820. struct cfg_write_pkg reply;
  821. int retries = 0;
  822. while (retries < TB_CTL_RETRIES) {
  823. struct tb_cfg_request *req;
  824. req = tb_cfg_request_alloc();
  825. if (!req) {
  826. res.err = -ENOMEM;
  827. return res;
  828. }
  829. request.addr.seq = retries++;
  830. req->match = tb_cfg_match;
  831. req->copy = tb_cfg_copy;
  832. req->request = &request;
  833. req->request_size = sizeof(request);
  834. req->request_type = TB_CFG_PKG_READ;
  835. req->response = &reply;
  836. req->response_size = 12 + 4 * length;
  837. req->response_type = TB_CFG_PKG_READ;
  838. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  839. tb_cfg_request_put(req);
  840. if (res.err != -ETIMEDOUT)
  841. break;
  842. /* Wait a bit (arbitrary time) until we send a retry */
  843. usleep_range(10, 100);
  844. }
  845. if (res.err)
  846. return res;
  847. res.response_port = reply.addr.port;
  848. res.err = check_config_address(reply.addr, space, offset, length);
  849. if (!res.err)
  850. memcpy(buffer, &reply.data, 4 * length);
  851. return res;
  852. }
  853. /**
  854. * tb_cfg_write_raw() - write from buffer into config space
  855. * @ctl: Pointer to the control channel
  856. * @buffer: Data to write
  857. * @route: Route string of the router
  858. * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
  859. * @space: Config space selector
  860. * @offset: Dword word offset of the register to start writing
  861. * @length: Number of dwords to write
  862. * @timeout_msec: Timeout in ms how long to wait for the response
  863. *
  864. * Writes to router config space without translating the possible error.
  865. */
  866. struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
  867. u64 route, u32 port, enum tb_cfg_space space,
  868. u32 offset, u32 length, int timeout_msec)
  869. {
  870. struct tb_cfg_result res = { 0 };
  871. struct cfg_write_pkg request = {
  872. .header = tb_cfg_make_header(route),
  873. .addr = {
  874. .port = port,
  875. .space = space,
  876. .offset = offset,
  877. .length = length,
  878. },
  879. };
  880. struct cfg_read_pkg reply;
  881. int retries = 0;
  882. memcpy(&request.data, buffer, length * 4);
  883. while (retries < TB_CTL_RETRIES) {
  884. struct tb_cfg_request *req;
  885. req = tb_cfg_request_alloc();
  886. if (!req) {
  887. res.err = -ENOMEM;
  888. return res;
  889. }
  890. request.addr.seq = retries++;
  891. req->match = tb_cfg_match;
  892. req->copy = tb_cfg_copy;
  893. req->request = &request;
  894. req->request_size = 12 + 4 * length;
  895. req->request_type = TB_CFG_PKG_WRITE;
  896. req->response = &reply;
  897. req->response_size = sizeof(reply);
  898. req->response_type = TB_CFG_PKG_WRITE;
  899. res = tb_cfg_request_sync(ctl, req, timeout_msec);
  900. tb_cfg_request_put(req);
  901. if (res.err != -ETIMEDOUT)
  902. break;
  903. /* Wait a bit (arbitrary time) until we send a retry */
  904. usleep_range(10, 100);
  905. }
  906. if (res.err)
  907. return res;
  908. res.response_port = reply.addr.port;
  909. res.err = check_config_address(reply.addr, space, offset, length);
  910. return res;
  911. }
  912. static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
  913. const struct tb_cfg_result *res)
  914. {
  915. /*
  916. * For unimplemented ports access to port config space may return
  917. * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
  918. * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
  919. * that the caller can mark the port as disabled.
  920. */
  921. if (space == TB_CFG_PORT &&
  922. res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
  923. return -ENODEV;
  924. tb_cfg_print_error(ctl, res);
  925. if (res->tb_error == TB_CFG_ERROR_LOCK)
  926. return -EACCES;
  927. if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
  928. return -ENOTCONN;
  929. return -EIO;
  930. }
  931. int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  932. enum tb_cfg_space space, u32 offset, u32 length)
  933. {
  934. struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  935. space, offset, length, ctl->timeout_msec);
  936. switch (res.err) {
  937. case 0:
  938. /* Success */
  939. break;
  940. case 1:
  941. /* Thunderbolt error, tb_error holds the actual number */
  942. return tb_cfg_get_error(ctl, space, &res);
  943. case -ETIMEDOUT:
  944. tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
  945. route, space, offset);
  946. break;
  947. default:
  948. WARN(1, "tb_cfg_read: %d\n", res.err);
  949. break;
  950. }
  951. return res.err;
  952. }
  953. int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
  954. enum tb_cfg_space space, u32 offset, u32 length)
  955. {
  956. struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  957. space, offset, length, ctl->timeout_msec);
  958. switch (res.err) {
  959. case 0:
  960. /* Success */
  961. break;
  962. case 1:
  963. /* Thunderbolt error, tb_error holds the actual number */
  964. return tb_cfg_get_error(ctl, space, &res);
  965. case -ETIMEDOUT:
  966. tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
  967. route, space, offset);
  968. break;
  969. default:
  970. WARN(1, "tb_cfg_write: %d\n", res.err);
  971. break;
  972. }
  973. return res.err;
  974. }
  975. /**
  976. * tb_cfg_get_upstream_port() - get upstream port number of switch at route
  977. * @ctl: Pointer to the control channel
  978. * @route: Route string of the router
  979. *
  980. * Reads the first dword from the switches TB_CFG_SWITCH config area and
  981. * returns the port number from which the reply originated.
  982. *
  983. * Return: Returns the upstream port number on success or an error code on
  984. * failure.
  985. */
  986. int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  987. {
  988. u32 dummy;
  989. struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  990. TB_CFG_SWITCH, 0, 1,
  991. ctl->timeout_msec);
  992. if (res.err == 1)
  993. return -EIO;
  994. if (res.err)
  995. return res.err;
  996. return res.response_port;
  997. }