xhci-dbgtty.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xhci-dbgtty.c - tty glue for xHCI debug capability
  4. *
  5. * Copyright (C) 2017 Intel Corporation
  6. *
  7. * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/tty.h>
  11. #include <linux/tty_flip.h>
  12. #include <linux/idr.h>
  13. #include "xhci.h"
  14. #include "xhci-dbgcap.h"
  15. static struct tty_driver *dbc_tty_driver;
  16. static struct idr dbc_tty_minors;
  17. static DEFINE_MUTEX(dbc_tty_minors_lock);
  18. static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
  19. {
  20. return dbc->priv;
  21. }
  22. static unsigned int
  23. dbc_kfifo_to_req(struct dbc_port *port, char *packet)
  24. {
  25. unsigned int len;
  26. len = kfifo_len(&port->port.xmit_fifo);
  27. if (len == 0)
  28. return 0;
  29. len = min(len, DBC_MAX_PACKET);
  30. if (port->tx_boundary)
  31. len = min(port->tx_boundary, len);
  32. len = kfifo_out(&port->port.xmit_fifo, packet, len);
  33. if (port->tx_boundary)
  34. port->tx_boundary -= len;
  35. return len;
  36. }
  37. static int dbc_start_tx(struct dbc_port *port)
  38. __releases(&port->port_lock)
  39. __acquires(&port->port_lock)
  40. {
  41. int len;
  42. struct dbc_request *req;
  43. int status = 0;
  44. bool do_tty_wake = false;
  45. struct list_head *pool = &port->write_pool;
  46. while (!list_empty(pool)) {
  47. req = list_entry(pool->next, struct dbc_request, list_pool);
  48. len = dbc_kfifo_to_req(port, req->buf);
  49. if (len == 0)
  50. break;
  51. do_tty_wake = true;
  52. req->length = len;
  53. list_del(&req->list_pool);
  54. spin_unlock(&port->port_lock);
  55. status = dbc_ep_queue(req);
  56. spin_lock(&port->port_lock);
  57. if (status) {
  58. list_add(&req->list_pool, pool);
  59. break;
  60. }
  61. }
  62. if (do_tty_wake && port->port.tty)
  63. tty_wakeup(port->port.tty);
  64. return status;
  65. }
  66. static void dbc_start_rx(struct dbc_port *port)
  67. __releases(&port->port_lock)
  68. __acquires(&port->port_lock)
  69. {
  70. struct dbc_request *req;
  71. int status;
  72. struct list_head *pool = &port->read_pool;
  73. while (!list_empty(pool)) {
  74. if (!port->port.tty)
  75. break;
  76. req = list_entry(pool->next, struct dbc_request, list_pool);
  77. list_del(&req->list_pool);
  78. req->length = DBC_MAX_PACKET;
  79. spin_unlock(&port->port_lock);
  80. status = dbc_ep_queue(req);
  81. spin_lock(&port->port_lock);
  82. if (status) {
  83. list_add(&req->list_pool, pool);
  84. break;
  85. }
  86. }
  87. }
  88. static void
  89. dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
  90. {
  91. unsigned long flags;
  92. struct dbc_port *port = dbc_to_port(dbc);
  93. spin_lock_irqsave(&port->port_lock, flags);
  94. list_add_tail(&req->list_pool, &port->read_queue);
  95. tasklet_schedule(&port->push);
  96. spin_unlock_irqrestore(&port->port_lock, flags);
  97. }
  98. static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
  99. {
  100. unsigned long flags;
  101. struct dbc_port *port = dbc_to_port(dbc);
  102. spin_lock_irqsave(&port->port_lock, flags);
  103. list_add(&req->list_pool, &port->write_pool);
  104. switch (req->status) {
  105. case 0:
  106. dbc_start_tx(port);
  107. break;
  108. case -ESHUTDOWN:
  109. break;
  110. default:
  111. dev_warn(dbc->dev, "unexpected write complete status %d\n",
  112. req->status);
  113. break;
  114. }
  115. spin_unlock_irqrestore(&port->port_lock, flags);
  116. }
  117. static void xhci_dbc_free_req(struct dbc_request *req)
  118. {
  119. kfree(req->buf);
  120. dbc_free_request(req);
  121. }
  122. static int
  123. xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
  124. struct list_head *head,
  125. void (*fn)(struct xhci_dbc *, struct dbc_request *))
  126. {
  127. int i;
  128. struct dbc_request *req;
  129. for (i = 0; i < DBC_QUEUE_SIZE; i++) {
  130. req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
  131. if (!req)
  132. break;
  133. req->length = DBC_MAX_PACKET;
  134. req->buf = kmalloc(req->length, GFP_KERNEL);
  135. if (!req->buf) {
  136. dbc_free_request(req);
  137. break;
  138. }
  139. req->complete = fn;
  140. list_add_tail(&req->list_pool, head);
  141. }
  142. return list_empty(head) ? -ENOMEM : 0;
  143. }
  144. static void
  145. xhci_dbc_free_requests(struct list_head *head)
  146. {
  147. struct dbc_request *req;
  148. while (!list_empty(head)) {
  149. req = list_entry(head->next, struct dbc_request, list_pool);
  150. list_del(&req->list_pool);
  151. xhci_dbc_free_req(req);
  152. }
  153. }
  154. static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
  155. {
  156. struct dbc_port *port;
  157. mutex_lock(&dbc_tty_minors_lock);
  158. port = idr_find(&dbc_tty_minors, tty->index);
  159. mutex_unlock(&dbc_tty_minors_lock);
  160. if (!port)
  161. return -ENXIO;
  162. tty->driver_data = port;
  163. return tty_port_install(&port->port, driver, tty);
  164. }
  165. static int dbc_tty_open(struct tty_struct *tty, struct file *file)
  166. {
  167. struct dbc_port *port = tty->driver_data;
  168. return tty_port_open(&port->port, tty, file);
  169. }
  170. static void dbc_tty_close(struct tty_struct *tty, struct file *file)
  171. {
  172. struct dbc_port *port = tty->driver_data;
  173. tty_port_close(&port->port, tty, file);
  174. }
  175. static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
  176. size_t count)
  177. {
  178. struct dbc_port *port = tty->driver_data;
  179. unsigned long flags;
  180. unsigned int written = 0;
  181. spin_lock_irqsave(&port->port_lock, flags);
  182. /*
  183. * Treat tty write as one usb transfer. Make sure the writes are turned
  184. * into TRB request having the same size boundaries as the tty writes.
  185. * Don't add data to kfifo before previous write is turned into TRBs
  186. */
  187. if (port->tx_boundary) {
  188. spin_unlock_irqrestore(&port->port_lock, flags);
  189. return 0;
  190. }
  191. if (count) {
  192. written = kfifo_in(&port->port.xmit_fifo, buf, count);
  193. if (written == count)
  194. port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
  195. dbc_start_tx(port);
  196. }
  197. spin_unlock_irqrestore(&port->port_lock, flags);
  198. return written;
  199. }
  200. static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
  201. {
  202. struct dbc_port *port = tty->driver_data;
  203. unsigned long flags;
  204. int status;
  205. spin_lock_irqsave(&port->port_lock, flags);
  206. status = kfifo_put(&port->port.xmit_fifo, ch);
  207. spin_unlock_irqrestore(&port->port_lock, flags);
  208. return status;
  209. }
  210. static void dbc_tty_flush_chars(struct tty_struct *tty)
  211. {
  212. struct dbc_port *port = tty->driver_data;
  213. unsigned long flags;
  214. spin_lock_irqsave(&port->port_lock, flags);
  215. dbc_start_tx(port);
  216. spin_unlock_irqrestore(&port->port_lock, flags);
  217. }
  218. static unsigned int dbc_tty_write_room(struct tty_struct *tty)
  219. {
  220. struct dbc_port *port = tty->driver_data;
  221. unsigned long flags;
  222. unsigned int room;
  223. spin_lock_irqsave(&port->port_lock, flags);
  224. room = kfifo_avail(&port->port.xmit_fifo);
  225. if (port->tx_boundary)
  226. room = 0;
  227. spin_unlock_irqrestore(&port->port_lock, flags);
  228. return room;
  229. }
  230. static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
  231. {
  232. struct dbc_port *port = tty->driver_data;
  233. unsigned long flags;
  234. unsigned int chars;
  235. spin_lock_irqsave(&port->port_lock, flags);
  236. chars = kfifo_len(&port->port.xmit_fifo);
  237. spin_unlock_irqrestore(&port->port_lock, flags);
  238. return chars;
  239. }
  240. static void dbc_tty_unthrottle(struct tty_struct *tty)
  241. {
  242. struct dbc_port *port = tty->driver_data;
  243. unsigned long flags;
  244. spin_lock_irqsave(&port->port_lock, flags);
  245. tasklet_schedule(&port->push);
  246. spin_unlock_irqrestore(&port->port_lock, flags);
  247. }
  248. static const struct tty_operations dbc_tty_ops = {
  249. .install = dbc_tty_install,
  250. .open = dbc_tty_open,
  251. .close = dbc_tty_close,
  252. .write = dbc_tty_write,
  253. .put_char = dbc_tty_put_char,
  254. .flush_chars = dbc_tty_flush_chars,
  255. .write_room = dbc_tty_write_room,
  256. .chars_in_buffer = dbc_tty_chars_in_buffer,
  257. .unthrottle = dbc_tty_unthrottle,
  258. };
  259. static void dbc_rx_push(struct tasklet_struct *t)
  260. {
  261. struct dbc_request *req;
  262. struct tty_struct *tty;
  263. unsigned long flags;
  264. bool do_push = false;
  265. bool disconnect = false;
  266. struct dbc_port *port = from_tasklet(port, t, push);
  267. struct list_head *queue = &port->read_queue;
  268. spin_lock_irqsave(&port->port_lock, flags);
  269. tty = port->port.tty;
  270. while (!list_empty(queue)) {
  271. req = list_first_entry(queue, struct dbc_request, list_pool);
  272. if (tty && tty_throttled(tty))
  273. break;
  274. switch (req->status) {
  275. case 0:
  276. break;
  277. case -ESHUTDOWN:
  278. disconnect = true;
  279. break;
  280. default:
  281. pr_warn("ttyDBC0: unexpected RX status %d\n",
  282. req->status);
  283. break;
  284. }
  285. if (req->actual) {
  286. char *packet = req->buf;
  287. unsigned int n, size = req->actual;
  288. int count;
  289. n = port->n_read;
  290. if (n) {
  291. packet += n;
  292. size -= n;
  293. }
  294. count = tty_insert_flip_string(&port->port, packet,
  295. size);
  296. if (count)
  297. do_push = true;
  298. if (count != size) {
  299. port->n_read += count;
  300. break;
  301. }
  302. port->n_read = 0;
  303. }
  304. list_move_tail(&req->list_pool, &port->read_pool);
  305. }
  306. if (do_push)
  307. tty_flip_buffer_push(&port->port);
  308. if (!list_empty(queue) && tty) {
  309. if (!tty_throttled(tty)) {
  310. if (do_push)
  311. tasklet_schedule(&port->push);
  312. else
  313. pr_warn("ttyDBC0: RX not scheduled?\n");
  314. }
  315. }
  316. if (!disconnect)
  317. dbc_start_rx(port);
  318. spin_unlock_irqrestore(&port->port_lock, flags);
  319. }
  320. static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
  321. {
  322. unsigned long flags;
  323. struct dbc_port *port = container_of(_port, struct dbc_port, port);
  324. spin_lock_irqsave(&port->port_lock, flags);
  325. dbc_start_rx(port);
  326. spin_unlock_irqrestore(&port->port_lock, flags);
  327. return 0;
  328. }
  329. static const struct tty_port_operations dbc_port_ops = {
  330. .activate = dbc_port_activate,
  331. };
  332. static void
  333. xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
  334. {
  335. tty_port_init(&port->port);
  336. spin_lock_init(&port->port_lock);
  337. tasklet_setup(&port->push, dbc_rx_push);
  338. INIT_LIST_HEAD(&port->read_pool);
  339. INIT_LIST_HEAD(&port->read_queue);
  340. INIT_LIST_HEAD(&port->write_pool);
  341. port->port.ops = &dbc_port_ops;
  342. port->n_read = 0;
  343. }
  344. static void
  345. xhci_dbc_tty_exit_port(struct dbc_port *port)
  346. {
  347. tasklet_kill(&port->push);
  348. tty_port_destroy(&port->port);
  349. }
  350. static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
  351. {
  352. int ret;
  353. struct device *tty_dev;
  354. struct dbc_port *port = dbc_to_port(dbc);
  355. if (port->registered)
  356. return -EBUSY;
  357. xhci_dbc_tty_init_port(dbc, port);
  358. mutex_lock(&dbc_tty_minors_lock);
  359. port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
  360. mutex_unlock(&dbc_tty_minors_lock);
  361. if (port->minor < 0) {
  362. ret = port->minor;
  363. goto err_idr;
  364. }
  365. ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
  366. GFP_KERNEL);
  367. if (ret)
  368. goto err_exit_port;
  369. ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
  370. dbc_read_complete);
  371. if (ret)
  372. goto err_free_fifo;
  373. ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
  374. dbc_write_complete);
  375. if (ret)
  376. goto err_free_requests;
  377. tty_dev = tty_port_register_device(&port->port,
  378. dbc_tty_driver, port->minor, NULL);
  379. if (IS_ERR(tty_dev)) {
  380. ret = PTR_ERR(tty_dev);
  381. goto err_free_requests;
  382. }
  383. port->registered = true;
  384. return 0;
  385. err_free_requests:
  386. xhci_dbc_free_requests(&port->read_pool);
  387. xhci_dbc_free_requests(&port->write_pool);
  388. err_free_fifo:
  389. kfifo_free(&port->port.xmit_fifo);
  390. err_exit_port:
  391. idr_remove(&dbc_tty_minors, port->minor);
  392. err_idr:
  393. xhci_dbc_tty_exit_port(port);
  394. dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
  395. return ret;
  396. }
  397. static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
  398. {
  399. struct dbc_port *port = dbc_to_port(dbc);
  400. if (!port->registered)
  401. return;
  402. tty_unregister_device(dbc_tty_driver, port->minor);
  403. xhci_dbc_tty_exit_port(port);
  404. port->registered = false;
  405. mutex_lock(&dbc_tty_minors_lock);
  406. idr_remove(&dbc_tty_minors, port->minor);
  407. mutex_unlock(&dbc_tty_minors_lock);
  408. kfifo_free(&port->port.xmit_fifo);
  409. xhci_dbc_free_requests(&port->read_pool);
  410. xhci_dbc_free_requests(&port->read_queue);
  411. xhci_dbc_free_requests(&port->write_pool);
  412. }
  413. static const struct dbc_driver dbc_driver = {
  414. .configure = xhci_dbc_tty_register_device,
  415. .disconnect = xhci_dbc_tty_unregister_device,
  416. };
  417. int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
  418. {
  419. struct xhci_dbc *dbc;
  420. struct dbc_port *port;
  421. int status;
  422. if (!dbc_tty_driver)
  423. return -ENODEV;
  424. port = kzalloc(sizeof(*port), GFP_KERNEL);
  425. if (!port)
  426. return -ENOMEM;
  427. dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
  428. if (!dbc) {
  429. status = -ENOMEM;
  430. goto out2;
  431. }
  432. dbc->priv = port;
  433. /* get rid of xhci once this is a real driver binding to a device */
  434. xhci->dbc = dbc;
  435. return 0;
  436. out2:
  437. kfree(port);
  438. return status;
  439. }
  440. /*
  441. * undo what probe did, assume dbc is stopped already.
  442. * we also assume tty_unregister_device() is called before this
  443. */
  444. void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
  445. {
  446. struct dbc_port *port = dbc_to_port(dbc);
  447. xhci_dbc_remove(dbc);
  448. kfree(port);
  449. }
  450. int dbc_tty_init(void)
  451. {
  452. int ret;
  453. idr_init(&dbc_tty_minors);
  454. dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
  455. TTY_DRIVER_DYNAMIC_DEV);
  456. if (IS_ERR(dbc_tty_driver)) {
  457. idr_destroy(&dbc_tty_minors);
  458. return PTR_ERR(dbc_tty_driver);
  459. }
  460. dbc_tty_driver->driver_name = "dbc_serial";
  461. dbc_tty_driver->name = "ttyDBC";
  462. dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
  463. dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
  464. dbc_tty_driver->init_termios = tty_std_termios;
  465. dbc_tty_driver->init_termios.c_cflag =
  466. B9600 | CS8 | CREAD | HUPCL | CLOCAL;
  467. dbc_tty_driver->init_termios.c_ispeed = 9600;
  468. dbc_tty_driver->init_termios.c_ospeed = 9600;
  469. tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
  470. ret = tty_register_driver(dbc_tty_driver);
  471. if (ret) {
  472. pr_err("Can't register dbc tty driver\n");
  473. tty_driver_kref_put(dbc_tty_driver);
  474. idr_destroy(&dbc_tty_minors);
  475. }
  476. return ret;
  477. }
  478. void dbc_tty_exit(void)
  479. {
  480. if (dbc_tty_driver) {
  481. tty_unregister_driver(dbc_tty_driver);
  482. tty_driver_kref_put(dbc_tty_driver);
  483. dbc_tty_driver = NULL;
  484. }
  485. idr_destroy(&dbc_tty_minors);
  486. }