hci_h5.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Bluetooth HCI Three-wire UART driver
  5. *
  6. * Copyright (C) 2012 Intel Corporation
  7. */
  8. #include <linux/acpi.h>
  9. #include <linux/errno.h>
  10. #include <linux/gpio/consumer.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mod_devicetable.h>
  13. #include <linux/of.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/serdev.h>
  16. #include <linux/skbuff.h>
  17. #include <net/bluetooth/bluetooth.h>
  18. #include <net/bluetooth/hci_core.h>
  19. #include "btrtl.h"
  20. #include "hci_uart.h"
  21. #define SUSPEND_TIMEOUT_MS 6000
  22. #define HCI_3WIRE_ACK_PKT 0
  23. #define HCI_3WIRE_LINK_PKT 15
  24. /* Sliding window size */
  25. #define H5_TX_WIN_MAX 4
  26. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  27. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  28. /*
  29. * Maximum Three-wire packet:
  30. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  31. */
  32. #define H5_MAX_LEN (4 + 0xfff + 2)
  33. /* Convenience macros for reading Three-wire header values */
  34. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  35. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  36. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  37. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  38. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  39. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
  40. #define SLIP_DELIMITER 0xc0
  41. #define SLIP_ESC 0xdb
  42. #define SLIP_ESC_DELIM 0xdc
  43. #define SLIP_ESC_ESC 0xdd
  44. /* H5 state flags */
  45. enum {
  46. H5_RX_ESC, /* SLIP escape mode */
  47. H5_TX_ACK_REQ, /* Pending ack to send */
  48. H5_WAKEUP_DISABLE, /* Device cannot wake host */
  49. H5_HW_FLOW_CONTROL, /* Use HW flow control */
  50. };
  51. struct h5 {
  52. /* Must be the first member, hci_serdev.c expects this. */
  53. struct hci_uart serdev_hu;
  54. struct sk_buff_head unack; /* Unack'ed packets queue */
  55. struct sk_buff_head rel; /* Reliable packets queue */
  56. struct sk_buff_head unrel; /* Unreliable packets queue */
  57. unsigned long flags;
  58. struct sk_buff *rx_skb; /* Receive buffer */
  59. size_t rx_pending; /* Expecting more bytes */
  60. u8 rx_ack; /* Last ack number received */
  61. int (*rx_func)(struct hci_uart *hu, u8 c);
  62. struct timer_list timer; /* Retransmission timer */
  63. struct hci_uart *hu; /* Parent HCI UART */
  64. u8 tx_seq; /* Next seq number to send */
  65. u8 tx_ack; /* Next ack number to send */
  66. u8 tx_win; /* Sliding window size */
  67. enum {
  68. H5_UNINITIALIZED,
  69. H5_INITIALIZED,
  70. H5_ACTIVE,
  71. } state;
  72. enum {
  73. H5_AWAKE,
  74. H5_SLEEPING,
  75. H5_WAKING_UP,
  76. } sleep;
  77. const struct h5_vnd *vnd;
  78. const char *id;
  79. struct gpio_desc *enable_gpio;
  80. struct gpio_desc *device_wake_gpio;
  81. };
  82. enum h5_driver_info {
  83. H5_INFO_WAKEUP_DISABLE = BIT(0),
  84. };
  85. struct h5_vnd {
  86. int (*setup)(struct h5 *h5);
  87. void (*open)(struct h5 *h5);
  88. void (*close)(struct h5 *h5);
  89. int (*suspend)(struct h5 *h5);
  90. int (*resume)(struct h5 *h5);
  91. const struct acpi_gpio_mapping *acpi_gpio_map;
  92. int sizeof_priv;
  93. };
  94. struct h5_device_data {
  95. uint32_t driver_info;
  96. struct h5_vnd *vnd;
  97. };
  98. static void h5_reset_rx(struct h5 *h5);
  99. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  100. {
  101. struct h5 *h5 = hu->priv;
  102. struct sk_buff *nskb;
  103. nskb = alloc_skb(3, GFP_ATOMIC);
  104. if (!nskb)
  105. return;
  106. hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
  107. skb_put_data(nskb, data, len);
  108. skb_queue_tail(&h5->unrel, nskb);
  109. }
  110. static u8 h5_cfg_field(struct h5 *h5)
  111. {
  112. /* Sliding window size (first 3 bits) */
  113. return h5->tx_win & 0x07;
  114. }
  115. static void h5_timed_event(struct timer_list *t)
  116. {
  117. const unsigned char sync_req[] = { 0x01, 0x7e };
  118. unsigned char conf_req[3] = { 0x03, 0xfc };
  119. struct h5 *h5 = from_timer(h5, t, timer);
  120. struct hci_uart *hu = h5->hu;
  121. struct sk_buff *skb;
  122. unsigned long flags;
  123. BT_DBG("%s", hu->hdev->name);
  124. if (h5->state == H5_UNINITIALIZED)
  125. h5_link_control(hu, sync_req, sizeof(sync_req));
  126. if (h5->state == H5_INITIALIZED) {
  127. conf_req[2] = h5_cfg_field(h5);
  128. h5_link_control(hu, conf_req, sizeof(conf_req));
  129. }
  130. if (h5->state != H5_ACTIVE) {
  131. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  132. goto wakeup;
  133. }
  134. if (h5->sleep != H5_AWAKE) {
  135. h5->sleep = H5_SLEEPING;
  136. goto wakeup;
  137. }
  138. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  139. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  140. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  141. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  142. skb_queue_head(&h5->rel, skb);
  143. }
  144. spin_unlock_irqrestore(&h5->unack.lock, flags);
  145. wakeup:
  146. hci_uart_tx_wakeup(hu);
  147. }
  148. static void h5_peer_reset(struct hci_uart *hu)
  149. {
  150. struct h5 *h5 = hu->priv;
  151. bt_dev_err(hu->hdev, "Peer device has reset");
  152. h5->state = H5_UNINITIALIZED;
  153. del_timer(&h5->timer);
  154. skb_queue_purge(&h5->rel);
  155. skb_queue_purge(&h5->unrel);
  156. skb_queue_purge(&h5->unack);
  157. h5->tx_seq = 0;
  158. h5->tx_ack = 0;
  159. /* Send reset request to upper stack */
  160. hci_reset_dev(hu->hdev);
  161. }
  162. static int h5_open(struct hci_uart *hu)
  163. {
  164. struct h5 *h5;
  165. const unsigned char sync[] = { 0x01, 0x7e };
  166. BT_DBG("hu %p", hu);
  167. if (hu->serdev) {
  168. h5 = serdev_device_get_drvdata(hu->serdev);
  169. } else {
  170. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  171. if (!h5)
  172. return -ENOMEM;
  173. }
  174. hu->priv = h5;
  175. h5->hu = hu;
  176. skb_queue_head_init(&h5->unack);
  177. skb_queue_head_init(&h5->rel);
  178. skb_queue_head_init(&h5->unrel);
  179. h5_reset_rx(h5);
  180. timer_setup(&h5->timer, h5_timed_event, 0);
  181. h5->tx_win = H5_TX_WIN_MAX;
  182. if (h5->vnd && h5->vnd->open)
  183. h5->vnd->open(h5);
  184. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  185. /* Send initial sync request */
  186. h5_link_control(hu, sync, sizeof(sync));
  187. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  188. return 0;
  189. }
  190. static int h5_close(struct hci_uart *hu)
  191. {
  192. struct h5 *h5 = hu->priv;
  193. del_timer_sync(&h5->timer);
  194. skb_queue_purge(&h5->unack);
  195. skb_queue_purge(&h5->rel);
  196. skb_queue_purge(&h5->unrel);
  197. kfree_skb(h5->rx_skb);
  198. h5->rx_skb = NULL;
  199. if (h5->vnd && h5->vnd->close)
  200. h5->vnd->close(h5);
  201. if (!hu->serdev)
  202. kfree(h5);
  203. return 0;
  204. }
  205. static int h5_setup(struct hci_uart *hu)
  206. {
  207. struct h5 *h5 = hu->priv;
  208. if (h5->vnd && h5->vnd->setup)
  209. return h5->vnd->setup(h5);
  210. return 0;
  211. }
  212. static void h5_pkt_cull(struct h5 *h5)
  213. {
  214. struct sk_buff *skb, *tmp;
  215. unsigned long flags;
  216. int i, to_remove;
  217. u8 seq;
  218. spin_lock_irqsave(&h5->unack.lock, flags);
  219. to_remove = skb_queue_len(&h5->unack);
  220. if (to_remove == 0)
  221. goto unlock;
  222. seq = h5->tx_seq;
  223. while (to_remove > 0) {
  224. if (h5->rx_ack == seq)
  225. break;
  226. to_remove--;
  227. seq = (seq - 1) & 0x07;
  228. }
  229. if (seq != h5->rx_ack)
  230. BT_ERR("Controller acked invalid packet");
  231. i = 0;
  232. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  233. if (i++ >= to_remove)
  234. break;
  235. __skb_unlink(skb, &h5->unack);
  236. dev_kfree_skb_irq(skb);
  237. }
  238. if (skb_queue_empty(&h5->unack))
  239. del_timer(&h5->timer);
  240. unlock:
  241. spin_unlock_irqrestore(&h5->unack.lock, flags);
  242. }
  243. static void h5_handle_internal_rx(struct hci_uart *hu)
  244. {
  245. struct h5 *h5 = hu->priv;
  246. const unsigned char sync_req[] = { 0x01, 0x7e };
  247. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  248. unsigned char conf_req[3] = { 0x03, 0xfc };
  249. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  250. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  251. const unsigned char woken_req[] = { 0x06, 0xf9 };
  252. const unsigned char sleep_req[] = { 0x07, 0x78 };
  253. const unsigned char *hdr = h5->rx_skb->data;
  254. const unsigned char *data = &h5->rx_skb->data[4];
  255. BT_DBG("%s", hu->hdev->name);
  256. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  257. return;
  258. if (H5_HDR_LEN(hdr) < 2)
  259. return;
  260. conf_req[2] = h5_cfg_field(h5);
  261. if (memcmp(data, sync_req, 2) == 0) {
  262. if (h5->state == H5_ACTIVE)
  263. h5_peer_reset(hu);
  264. h5_link_control(hu, sync_rsp, 2);
  265. } else if (memcmp(data, sync_rsp, 2) == 0) {
  266. if (h5->state == H5_ACTIVE)
  267. h5_peer_reset(hu);
  268. h5->state = H5_INITIALIZED;
  269. h5_link_control(hu, conf_req, 3);
  270. } else if (memcmp(data, conf_req, 2) == 0) {
  271. h5_link_control(hu, conf_rsp, 2);
  272. h5_link_control(hu, conf_req, 3);
  273. } else if (memcmp(data, conf_rsp, 2) == 0) {
  274. if (H5_HDR_LEN(hdr) > 2)
  275. h5->tx_win = (data[2] & 0x07);
  276. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  277. h5->state = H5_ACTIVE;
  278. hci_uart_init_ready(hu);
  279. return;
  280. } else if (memcmp(data, sleep_req, 2) == 0) {
  281. BT_DBG("Peer went to sleep");
  282. h5->sleep = H5_SLEEPING;
  283. return;
  284. } else if (memcmp(data, woken_req, 2) == 0) {
  285. BT_DBG("Peer woke up");
  286. h5->sleep = H5_AWAKE;
  287. } else if (memcmp(data, wakeup_req, 2) == 0) {
  288. BT_DBG("Peer requested wakeup");
  289. h5_link_control(hu, woken_req, 2);
  290. h5->sleep = H5_AWAKE;
  291. } else {
  292. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  293. return;
  294. }
  295. hci_uart_tx_wakeup(hu);
  296. }
  297. static void h5_complete_rx_pkt(struct hci_uart *hu)
  298. {
  299. struct h5 *h5 = hu->priv;
  300. const unsigned char *hdr = h5->rx_skb->data;
  301. if (H5_HDR_RELIABLE(hdr)) {
  302. h5->tx_ack = (h5->tx_ack + 1) % 8;
  303. set_bit(H5_TX_ACK_REQ, &h5->flags);
  304. hci_uart_tx_wakeup(hu);
  305. }
  306. h5->rx_ack = H5_HDR_ACK(hdr);
  307. h5_pkt_cull(h5);
  308. switch (H5_HDR_PKT_TYPE(hdr)) {
  309. case HCI_EVENT_PKT:
  310. case HCI_ACLDATA_PKT:
  311. case HCI_SCODATA_PKT:
  312. case HCI_ISODATA_PKT:
  313. hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
  314. /* Remove Three-wire header */
  315. skb_pull(h5->rx_skb, 4);
  316. hci_recv_frame(hu->hdev, h5->rx_skb);
  317. h5->rx_skb = NULL;
  318. break;
  319. default:
  320. h5_handle_internal_rx(hu);
  321. break;
  322. }
  323. h5_reset_rx(h5);
  324. }
  325. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  326. {
  327. h5_complete_rx_pkt(hu);
  328. return 0;
  329. }
  330. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  331. {
  332. struct h5 *h5 = hu->priv;
  333. const unsigned char *hdr = h5->rx_skb->data;
  334. if (H5_HDR_CRC(hdr)) {
  335. h5->rx_func = h5_rx_crc;
  336. h5->rx_pending = 2;
  337. } else {
  338. h5_complete_rx_pkt(hu);
  339. }
  340. return 0;
  341. }
  342. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  343. {
  344. struct h5 *h5 = hu->priv;
  345. const unsigned char *hdr = h5->rx_skb->data;
  346. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  347. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  348. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  349. H5_HDR_LEN(hdr));
  350. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  351. bt_dev_err(hu->hdev, "Invalid header checksum");
  352. h5_reset_rx(h5);
  353. return 0;
  354. }
  355. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  356. bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
  357. H5_HDR_SEQ(hdr), h5->tx_ack);
  358. set_bit(H5_TX_ACK_REQ, &h5->flags);
  359. hci_uart_tx_wakeup(hu);
  360. h5_reset_rx(h5);
  361. return 0;
  362. }
  363. if (h5->state != H5_ACTIVE &&
  364. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  365. bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
  366. h5_reset_rx(h5);
  367. return 0;
  368. }
  369. h5->rx_func = h5_rx_payload;
  370. h5->rx_pending = H5_HDR_LEN(hdr);
  371. return 0;
  372. }
  373. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  374. {
  375. struct h5 *h5 = hu->priv;
  376. if (c == SLIP_DELIMITER)
  377. return 1;
  378. h5->rx_func = h5_rx_3wire_hdr;
  379. h5->rx_pending = 4;
  380. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  381. if (!h5->rx_skb) {
  382. bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
  383. h5_reset_rx(h5);
  384. return -ENOMEM;
  385. }
  386. h5->rx_skb->dev = (void *)hu->hdev;
  387. return 0;
  388. }
  389. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  390. {
  391. struct h5 *h5 = hu->priv;
  392. if (c == SLIP_DELIMITER)
  393. h5->rx_func = h5_rx_pkt_start;
  394. return 1;
  395. }
  396. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  397. {
  398. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  399. const u8 *byte = &c;
  400. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  401. set_bit(H5_RX_ESC, &h5->flags);
  402. return;
  403. }
  404. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  405. switch (c) {
  406. case SLIP_ESC_DELIM:
  407. byte = &delim;
  408. break;
  409. case SLIP_ESC_ESC:
  410. byte = &esc;
  411. break;
  412. default:
  413. BT_ERR("Invalid esc byte 0x%02hhx", c);
  414. h5_reset_rx(h5);
  415. return;
  416. }
  417. }
  418. skb_put_data(h5->rx_skb, byte, 1);
  419. h5->rx_pending--;
  420. BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  421. }
  422. static void h5_reset_rx(struct h5 *h5)
  423. {
  424. if (h5->rx_skb) {
  425. kfree_skb(h5->rx_skb);
  426. h5->rx_skb = NULL;
  427. }
  428. h5->rx_func = h5_rx_delimiter;
  429. h5->rx_pending = 0;
  430. clear_bit(H5_RX_ESC, &h5->flags);
  431. }
  432. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  433. {
  434. struct h5 *h5 = hu->priv;
  435. const unsigned char *ptr = data;
  436. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  437. count);
  438. while (count > 0) {
  439. int processed;
  440. if (h5->rx_pending > 0) {
  441. if (*ptr == SLIP_DELIMITER) {
  442. bt_dev_err(hu->hdev, "Too short H5 packet");
  443. h5_reset_rx(h5);
  444. continue;
  445. }
  446. h5_unslip_one_byte(h5, *ptr);
  447. ptr++; count--;
  448. continue;
  449. }
  450. processed = h5->rx_func(hu, *ptr);
  451. if (processed < 0)
  452. return processed;
  453. ptr += processed;
  454. count -= processed;
  455. }
  456. if (hu->serdev) {
  457. pm_runtime_get(&hu->serdev->dev);
  458. pm_runtime_mark_last_busy(&hu->serdev->dev);
  459. pm_runtime_put_autosuspend(&hu->serdev->dev);
  460. }
  461. return 0;
  462. }
  463. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  464. {
  465. struct h5 *h5 = hu->priv;
  466. if (skb->len > 0xfff) {
  467. bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
  468. kfree_skb(skb);
  469. return 0;
  470. }
  471. if (h5->state != H5_ACTIVE) {
  472. bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
  473. kfree_skb(skb);
  474. return 0;
  475. }
  476. switch (hci_skb_pkt_type(skb)) {
  477. case HCI_ACLDATA_PKT:
  478. case HCI_COMMAND_PKT:
  479. skb_queue_tail(&h5->rel, skb);
  480. break;
  481. case HCI_SCODATA_PKT:
  482. case HCI_ISODATA_PKT:
  483. skb_queue_tail(&h5->unrel, skb);
  484. break;
  485. default:
  486. bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
  487. kfree_skb(skb);
  488. break;
  489. }
  490. if (hu->serdev) {
  491. pm_runtime_get_sync(&hu->serdev->dev);
  492. pm_runtime_mark_last_busy(&hu->serdev->dev);
  493. pm_runtime_put_autosuspend(&hu->serdev->dev);
  494. }
  495. return 0;
  496. }
  497. static void h5_slip_delim(struct sk_buff *skb)
  498. {
  499. const char delim = SLIP_DELIMITER;
  500. skb_put_data(skb, &delim, 1);
  501. }
  502. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  503. {
  504. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  505. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  506. switch (c) {
  507. case SLIP_DELIMITER:
  508. skb_put_data(skb, &esc_delim, 2);
  509. break;
  510. case SLIP_ESC:
  511. skb_put_data(skb, &esc_esc, 2);
  512. break;
  513. default:
  514. skb_put_data(skb, &c, 1);
  515. }
  516. }
  517. static bool valid_packet_type(u8 type)
  518. {
  519. switch (type) {
  520. case HCI_ACLDATA_PKT:
  521. case HCI_COMMAND_PKT:
  522. case HCI_SCODATA_PKT:
  523. case HCI_ISODATA_PKT:
  524. case HCI_3WIRE_LINK_PKT:
  525. case HCI_3WIRE_ACK_PKT:
  526. return true;
  527. default:
  528. return false;
  529. }
  530. }
  531. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  532. const u8 *data, size_t len)
  533. {
  534. struct h5 *h5 = hu->priv;
  535. struct sk_buff *nskb;
  536. u8 hdr[4];
  537. int i;
  538. if (!valid_packet_type(pkt_type)) {
  539. bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
  540. return NULL;
  541. }
  542. /*
  543. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  544. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  545. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  546. * delimiters at start and end).
  547. */
  548. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  549. if (!nskb)
  550. return NULL;
  551. hci_skb_pkt_type(nskb) = pkt_type;
  552. h5_slip_delim(nskb);
  553. hdr[0] = h5->tx_ack << 3;
  554. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  555. /* Reliable packet? */
  556. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  557. hdr[0] |= 1 << 7;
  558. hdr[0] |= h5->tx_seq;
  559. h5->tx_seq = (h5->tx_seq + 1) % 8;
  560. }
  561. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  562. hdr[2] = len >> 4;
  563. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  564. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  565. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  566. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  567. H5_HDR_LEN(hdr));
  568. for (i = 0; i < 4; i++)
  569. h5_slip_one_byte(nskb, hdr[i]);
  570. for (i = 0; i < len; i++)
  571. h5_slip_one_byte(nskb, data[i]);
  572. h5_slip_delim(nskb);
  573. return nskb;
  574. }
  575. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  576. {
  577. struct h5 *h5 = hu->priv;
  578. unsigned long flags;
  579. struct sk_buff *skb, *nskb;
  580. if (h5->sleep != H5_AWAKE) {
  581. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  582. if (h5->sleep == H5_WAKING_UP)
  583. return NULL;
  584. h5->sleep = H5_WAKING_UP;
  585. BT_DBG("Sending wakeup request");
  586. mod_timer(&h5->timer, jiffies + HZ / 100);
  587. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  588. }
  589. skb = skb_dequeue(&h5->unrel);
  590. if (skb) {
  591. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  592. skb->data, skb->len);
  593. if (nskb) {
  594. kfree_skb(skb);
  595. return nskb;
  596. }
  597. skb_queue_head(&h5->unrel, skb);
  598. bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
  599. }
  600. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  601. if (h5->unack.qlen >= h5->tx_win)
  602. goto unlock;
  603. skb = skb_dequeue(&h5->rel);
  604. if (skb) {
  605. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  606. skb->data, skb->len);
  607. if (nskb) {
  608. __skb_queue_tail(&h5->unack, skb);
  609. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  610. spin_unlock_irqrestore(&h5->unack.lock, flags);
  611. return nskb;
  612. }
  613. skb_queue_head(&h5->rel, skb);
  614. bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
  615. }
  616. unlock:
  617. spin_unlock_irqrestore(&h5->unack.lock, flags);
  618. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  619. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  620. return NULL;
  621. }
  622. static int h5_flush(struct hci_uart *hu)
  623. {
  624. BT_DBG("hu %p", hu);
  625. return 0;
  626. }
  627. static const struct hci_uart_proto h5p = {
  628. .id = HCI_UART_3WIRE,
  629. .name = "Three-wire (H5)",
  630. .open = h5_open,
  631. .close = h5_close,
  632. .setup = h5_setup,
  633. .recv = h5_recv,
  634. .enqueue = h5_enqueue,
  635. .dequeue = h5_dequeue,
  636. .flush = h5_flush,
  637. };
  638. static int h5_serdev_probe(struct serdev_device *serdev)
  639. {
  640. struct device *dev = &serdev->dev;
  641. struct h5 *h5;
  642. const struct h5_device_data *data;
  643. h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
  644. if (!h5)
  645. return -ENOMEM;
  646. h5->hu = &h5->serdev_hu;
  647. h5->serdev_hu.serdev = serdev;
  648. serdev_device_set_drvdata(serdev, h5);
  649. if (has_acpi_companion(dev)) {
  650. const struct acpi_device_id *match;
  651. match = acpi_match_device(dev->driver->acpi_match_table, dev);
  652. if (!match)
  653. return -ENODEV;
  654. data = (const struct h5_device_data *)match->driver_data;
  655. h5->vnd = data->vnd;
  656. h5->id = (char *)match->id;
  657. if (h5->vnd->acpi_gpio_map)
  658. devm_acpi_dev_add_driver_gpios(dev,
  659. h5->vnd->acpi_gpio_map);
  660. } else {
  661. data = of_device_get_match_data(dev);
  662. if (!data)
  663. return -ENODEV;
  664. h5->vnd = data->vnd;
  665. }
  666. if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
  667. set_bit(H5_WAKEUP_DISABLE, &h5->flags);
  668. h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
  669. if (IS_ERR(h5->enable_gpio))
  670. return PTR_ERR(h5->enable_gpio);
  671. h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
  672. GPIOD_OUT_LOW);
  673. if (IS_ERR(h5->device_wake_gpio))
  674. return PTR_ERR(h5->device_wake_gpio);
  675. return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
  676. h5->vnd->sizeof_priv);
  677. }
  678. static void h5_serdev_remove(struct serdev_device *serdev)
  679. {
  680. struct h5 *h5 = serdev_device_get_drvdata(serdev);
  681. hci_uart_unregister_device(&h5->serdev_hu);
  682. }
  683. static int __maybe_unused h5_serdev_suspend(struct device *dev)
  684. {
  685. struct h5 *h5 = dev_get_drvdata(dev);
  686. int ret = 0;
  687. if (h5->vnd && h5->vnd->suspend)
  688. ret = h5->vnd->suspend(h5);
  689. return ret;
  690. }
  691. static int __maybe_unused h5_serdev_resume(struct device *dev)
  692. {
  693. struct h5 *h5 = dev_get_drvdata(dev);
  694. int ret = 0;
  695. if (h5->vnd && h5->vnd->resume)
  696. ret = h5->vnd->resume(h5);
  697. return ret;
  698. }
  699. #ifdef CONFIG_BT_HCIUART_RTL
  700. static int h5_btrtl_setup(struct h5 *h5)
  701. {
  702. struct btrtl_device_info *btrtl_dev;
  703. struct sk_buff *skb;
  704. __le32 baudrate_data;
  705. u32 device_baudrate;
  706. unsigned int controller_baudrate;
  707. bool flow_control;
  708. int err;
  709. btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
  710. if (IS_ERR(btrtl_dev))
  711. return PTR_ERR(btrtl_dev);
  712. err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
  713. &controller_baudrate, &device_baudrate,
  714. &flow_control);
  715. if (err)
  716. goto out_free;
  717. baudrate_data = cpu_to_le32(device_baudrate);
  718. skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
  719. &baudrate_data, HCI_INIT_TIMEOUT);
  720. if (IS_ERR(skb)) {
  721. rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
  722. err = PTR_ERR(skb);
  723. goto out_free;
  724. } else {
  725. kfree_skb(skb);
  726. }
  727. /* Give the device some time to set up the new baudrate. */
  728. usleep_range(10000, 20000);
  729. serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
  730. serdev_device_set_flow_control(h5->hu->serdev, flow_control);
  731. if (flow_control)
  732. set_bit(H5_HW_FLOW_CONTROL, &h5->flags);
  733. err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
  734. /* Give the device some time before the hci-core sends it a reset */
  735. usleep_range(10000, 20000);
  736. if (err)
  737. goto out_free;
  738. btrtl_set_quirks(h5->hu->hdev, btrtl_dev);
  739. out_free:
  740. btrtl_free(btrtl_dev);
  741. return err;
  742. }
  743. static void h5_btrtl_open(struct h5 *h5)
  744. {
  745. /*
  746. * Since h5_btrtl_resume() does a device_reprobe() the suspend handling
  747. * done by the hci_suspend_notifier is not necessary; it actually causes
  748. * delays and a bunch of errors to get logged, so disable it.
  749. */
  750. if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
  751. set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags);
  752. /* Devices always start with these fixed parameters */
  753. serdev_device_set_flow_control(h5->hu->serdev, false);
  754. serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
  755. serdev_device_set_baudrate(h5->hu->serdev, 115200);
  756. if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
  757. pm_runtime_set_active(&h5->hu->serdev->dev);
  758. pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
  759. pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
  760. SUSPEND_TIMEOUT_MS);
  761. pm_runtime_enable(&h5->hu->serdev->dev);
  762. }
  763. /* The controller needs reset to startup */
  764. gpiod_set_value_cansleep(h5->enable_gpio, 0);
  765. gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
  766. msleep(100);
  767. /* The controller needs up to 500ms to wakeup */
  768. gpiod_set_value_cansleep(h5->enable_gpio, 1);
  769. gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
  770. msleep(500);
  771. }
  772. static void h5_btrtl_close(struct h5 *h5)
  773. {
  774. if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
  775. pm_runtime_disable(&h5->hu->serdev->dev);
  776. gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
  777. gpiod_set_value_cansleep(h5->enable_gpio, 0);
  778. }
  779. /* Suspend/resume support. On many devices the RTL BT device loses power during
  780. * suspend/resume, causing it to lose its firmware and all state. So we simply
  781. * turn it off on suspend and reprobe on resume. This mirrors how RTL devices
  782. * are handled in the USB driver, where the BTUSB_WAKEUP_DISABLE is used which
  783. * also causes a reprobe on resume.
  784. */
  785. static int h5_btrtl_suspend(struct h5 *h5)
  786. {
  787. serdev_device_set_flow_control(h5->hu->serdev, false);
  788. gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
  789. if (test_bit(H5_WAKEUP_DISABLE, &h5->flags))
  790. gpiod_set_value_cansleep(h5->enable_gpio, 0);
  791. return 0;
  792. }
  793. struct h5_btrtl_reprobe {
  794. struct device *dev;
  795. struct work_struct work;
  796. };
  797. static void h5_btrtl_reprobe_worker(struct work_struct *work)
  798. {
  799. struct h5_btrtl_reprobe *reprobe =
  800. container_of(work, struct h5_btrtl_reprobe, work);
  801. int ret;
  802. ret = device_reprobe(reprobe->dev);
  803. if (ret && ret != -EPROBE_DEFER)
  804. dev_err(reprobe->dev, "Reprobe error %d\n", ret);
  805. put_device(reprobe->dev);
  806. kfree(reprobe);
  807. module_put(THIS_MODULE);
  808. }
  809. static int h5_btrtl_resume(struct h5 *h5)
  810. {
  811. if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
  812. struct h5_btrtl_reprobe *reprobe;
  813. reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
  814. if (!reprobe)
  815. return -ENOMEM;
  816. __module_get(THIS_MODULE);
  817. INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
  818. reprobe->dev = get_device(&h5->hu->serdev->dev);
  819. queue_work(system_long_wq, &reprobe->work);
  820. } else {
  821. gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
  822. if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags))
  823. serdev_device_set_flow_control(h5->hu->serdev, true);
  824. }
  825. return 0;
  826. }
  827. static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
  828. static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
  829. static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
  830. static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
  831. { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
  832. { "enable-gpios", &btrtl_enable_gpios, 1 },
  833. { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
  834. {},
  835. };
  836. static struct h5_vnd rtl_vnd = {
  837. .setup = h5_btrtl_setup,
  838. .open = h5_btrtl_open,
  839. .close = h5_btrtl_close,
  840. .suspend = h5_btrtl_suspend,
  841. .resume = h5_btrtl_resume,
  842. .acpi_gpio_map = acpi_btrtl_gpios,
  843. .sizeof_priv = sizeof(struct btrealtek_data),
  844. };
  845. static const struct h5_device_data h5_data_rtl8822cs = {
  846. .vnd = &rtl_vnd,
  847. };
  848. static const struct h5_device_data h5_data_rtl8723bs = {
  849. .driver_info = H5_INFO_WAKEUP_DISABLE,
  850. .vnd = &rtl_vnd,
  851. };
  852. #endif
  853. #ifdef CONFIG_ACPI
  854. static const struct acpi_device_id h5_acpi_match[] = {
  855. #ifdef CONFIG_BT_HCIUART_RTL
  856. { "OBDA0623", (kernel_ulong_t)&h5_data_rtl8723bs },
  857. { "OBDA8723", (kernel_ulong_t)&h5_data_rtl8723bs },
  858. #endif
  859. { },
  860. };
  861. MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
  862. #endif
  863. static const struct dev_pm_ops h5_serdev_pm_ops = {
  864. SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
  865. SET_RUNTIME_PM_OPS(h5_serdev_suspend, h5_serdev_resume, NULL)
  866. };
  867. static const struct of_device_id rtl_bluetooth_of_match[] = {
  868. #ifdef CONFIG_BT_HCIUART_RTL
  869. { .compatible = "realtek,rtl8822cs-bt",
  870. .data = (const void *)&h5_data_rtl8822cs },
  871. { .compatible = "realtek,rtl8723bs-bt",
  872. .data = (const void *)&h5_data_rtl8723bs },
  873. { .compatible = "realtek,rtl8723cs-bt",
  874. .data = (const void *)&h5_data_rtl8723bs },
  875. { .compatible = "realtek,rtl8723ds-bt",
  876. .data = (const void *)&h5_data_rtl8723bs },
  877. #endif
  878. { },
  879. };
  880. MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
  881. static struct serdev_device_driver h5_serdev_driver = {
  882. .probe = h5_serdev_probe,
  883. .remove = h5_serdev_remove,
  884. .driver = {
  885. .name = "hci_uart_h5",
  886. .acpi_match_table = ACPI_PTR(h5_acpi_match),
  887. .pm = &h5_serdev_pm_ops,
  888. .of_match_table = rtl_bluetooth_of_match,
  889. },
  890. };
  891. int __init h5_init(void)
  892. {
  893. serdev_device_driver_register(&h5_serdev_driver);
  894. return hci_uart_register_proto(&h5p);
  895. }
  896. int __exit h5_deinit(void)
  897. {
  898. serdev_device_driver_unregister(&h5_serdev_driver);
  899. return hci_uart_unregister_proto(&h5p);
  900. }