hci_h5.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. *
  3. * Bluetooth HCI Three-wire UART driver
  4. *
  5. * Copyright (C) 2012 Intel Corporation
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/acpi.h>
  24. #include <linux/errno.h>
  25. #include <linux/gpio/consumer.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mod_devicetable.h>
  28. #include <linux/serdev.h>
  29. #include <linux/skbuff.h>
  30. #include <net/bluetooth/bluetooth.h>
  31. #include <net/bluetooth/hci_core.h>
  32. #include "btrtl.h"
  33. #include "hci_uart.h"
  34. #define HCI_3WIRE_ACK_PKT 0
  35. #define HCI_3WIRE_LINK_PKT 15
  36. /* Sliding window size */
  37. #define H5_TX_WIN_MAX 4
  38. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  39. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  40. /*
  41. * Maximum Three-wire packet:
  42. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  43. */
  44. #define H5_MAX_LEN (4 + 0xfff + 2)
  45. /* Convenience macros for reading Three-wire header values */
  46. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  47. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  48. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  49. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  50. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  51. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
  52. #define SLIP_DELIMITER 0xc0
  53. #define SLIP_ESC 0xdb
  54. #define SLIP_ESC_DELIM 0xdc
  55. #define SLIP_ESC_ESC 0xdd
  56. /* H5 state flags */
  57. enum {
  58. H5_RX_ESC, /* SLIP escape mode */
  59. H5_TX_ACK_REQ, /* Pending ack to send */
  60. };
  61. struct h5 {
  62. /* Must be the first member, hci_serdev.c expects this. */
  63. struct hci_uart serdev_hu;
  64. struct sk_buff_head unack; /* Unack'ed packets queue */
  65. struct sk_buff_head rel; /* Reliable packets queue */
  66. struct sk_buff_head unrel; /* Unreliable packets queue */
  67. unsigned long flags;
  68. struct sk_buff *rx_skb; /* Receive buffer */
  69. size_t rx_pending; /* Expecting more bytes */
  70. u8 rx_ack; /* Last ack number received */
  71. int (*rx_func)(struct hci_uart *hu, u8 c);
  72. struct timer_list timer; /* Retransmission timer */
  73. struct hci_uart *hu; /* Parent HCI UART */
  74. u8 tx_seq; /* Next seq number to send */
  75. u8 tx_ack; /* Next ack number to send */
  76. u8 tx_win; /* Sliding window size */
  77. enum {
  78. H5_UNINITIALIZED,
  79. H5_INITIALIZED,
  80. H5_ACTIVE,
  81. } state;
  82. enum {
  83. H5_AWAKE,
  84. H5_SLEEPING,
  85. H5_WAKING_UP,
  86. } sleep;
  87. const struct h5_vnd *vnd;
  88. const char *id;
  89. struct gpio_desc *enable_gpio;
  90. struct gpio_desc *device_wake_gpio;
  91. };
  92. struct h5_vnd {
  93. int (*setup)(struct h5 *h5);
  94. void (*open)(struct h5 *h5);
  95. void (*close)(struct h5 *h5);
  96. const struct acpi_gpio_mapping *acpi_gpio_map;
  97. };
  98. static void h5_reset_rx(struct h5 *h5);
  99. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  100. {
  101. struct h5 *h5 = hu->priv;
  102. struct sk_buff *nskb;
  103. nskb = alloc_skb(3, GFP_ATOMIC);
  104. if (!nskb)
  105. return;
  106. hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
  107. skb_put_data(nskb, data, len);
  108. skb_queue_tail(&h5->unrel, nskb);
  109. }
  110. static u8 h5_cfg_field(struct h5 *h5)
  111. {
  112. /* Sliding window size (first 3 bits) */
  113. return h5->tx_win & 0x07;
  114. }
  115. static void h5_timed_event(struct timer_list *t)
  116. {
  117. const unsigned char sync_req[] = { 0x01, 0x7e };
  118. unsigned char conf_req[3] = { 0x03, 0xfc };
  119. struct h5 *h5 = from_timer(h5, t, timer);
  120. struct hci_uart *hu = h5->hu;
  121. struct sk_buff *skb;
  122. unsigned long flags;
  123. BT_DBG("%s", hu->hdev->name);
  124. if (h5->state == H5_UNINITIALIZED)
  125. h5_link_control(hu, sync_req, sizeof(sync_req));
  126. if (h5->state == H5_INITIALIZED) {
  127. conf_req[2] = h5_cfg_field(h5);
  128. h5_link_control(hu, conf_req, sizeof(conf_req));
  129. }
  130. if (h5->state != H5_ACTIVE) {
  131. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  132. goto wakeup;
  133. }
  134. if (h5->sleep != H5_AWAKE) {
  135. h5->sleep = H5_SLEEPING;
  136. goto wakeup;
  137. }
  138. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  139. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  140. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  141. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  142. skb_queue_head(&h5->rel, skb);
  143. }
  144. spin_unlock_irqrestore(&h5->unack.lock, flags);
  145. wakeup:
  146. hci_uart_tx_wakeup(hu);
  147. }
  148. static void h5_peer_reset(struct hci_uart *hu)
  149. {
  150. struct h5 *h5 = hu->priv;
  151. BT_ERR("Peer device has reset");
  152. h5->state = H5_UNINITIALIZED;
  153. del_timer(&h5->timer);
  154. skb_queue_purge(&h5->rel);
  155. skb_queue_purge(&h5->unrel);
  156. skb_queue_purge(&h5->unack);
  157. h5->tx_seq = 0;
  158. h5->tx_ack = 0;
  159. /* Send reset request to upper stack */
  160. hci_reset_dev(hu->hdev);
  161. }
  162. static int h5_open(struct hci_uart *hu)
  163. {
  164. struct h5 *h5;
  165. const unsigned char sync[] = { 0x01, 0x7e };
  166. BT_DBG("hu %p", hu);
  167. if (hu->serdev) {
  168. h5 = serdev_device_get_drvdata(hu->serdev);
  169. } else {
  170. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  171. if (!h5)
  172. return -ENOMEM;
  173. }
  174. hu->priv = h5;
  175. h5->hu = hu;
  176. skb_queue_head_init(&h5->unack);
  177. skb_queue_head_init(&h5->rel);
  178. skb_queue_head_init(&h5->unrel);
  179. h5_reset_rx(h5);
  180. timer_setup(&h5->timer, h5_timed_event, 0);
  181. h5->tx_win = H5_TX_WIN_MAX;
  182. if (h5->vnd && h5->vnd->open)
  183. h5->vnd->open(h5);
  184. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  185. /* Send initial sync request */
  186. h5_link_control(hu, sync, sizeof(sync));
  187. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  188. return 0;
  189. }
  190. static int h5_close(struct hci_uart *hu)
  191. {
  192. struct h5 *h5 = hu->priv;
  193. del_timer_sync(&h5->timer);
  194. skb_queue_purge(&h5->unack);
  195. skb_queue_purge(&h5->rel);
  196. skb_queue_purge(&h5->unrel);
  197. kfree_skb(h5->rx_skb);
  198. h5->rx_skb = NULL;
  199. if (h5->vnd && h5->vnd->close)
  200. h5->vnd->close(h5);
  201. if (!hu->serdev)
  202. kfree(h5);
  203. return 0;
  204. }
  205. static int h5_setup(struct hci_uart *hu)
  206. {
  207. struct h5 *h5 = hu->priv;
  208. if (h5->vnd && h5->vnd->setup)
  209. return h5->vnd->setup(h5);
  210. return 0;
  211. }
  212. static void h5_pkt_cull(struct h5 *h5)
  213. {
  214. struct sk_buff *skb, *tmp;
  215. unsigned long flags;
  216. int i, to_remove;
  217. u8 seq;
  218. spin_lock_irqsave(&h5->unack.lock, flags);
  219. to_remove = skb_queue_len(&h5->unack);
  220. if (to_remove == 0)
  221. goto unlock;
  222. seq = h5->tx_seq;
  223. while (to_remove > 0) {
  224. if (h5->rx_ack == seq)
  225. break;
  226. to_remove--;
  227. seq = (seq - 1) & 0x07;
  228. }
  229. if (seq != h5->rx_ack)
  230. BT_ERR("Controller acked invalid packet");
  231. i = 0;
  232. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  233. if (i++ >= to_remove)
  234. break;
  235. __skb_unlink(skb, &h5->unack);
  236. kfree_skb(skb);
  237. }
  238. if (skb_queue_empty(&h5->unack))
  239. del_timer(&h5->timer);
  240. unlock:
  241. spin_unlock_irqrestore(&h5->unack.lock, flags);
  242. }
  243. static void h5_handle_internal_rx(struct hci_uart *hu)
  244. {
  245. struct h5 *h5 = hu->priv;
  246. const unsigned char sync_req[] = { 0x01, 0x7e };
  247. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  248. unsigned char conf_req[3] = { 0x03, 0xfc };
  249. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  250. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  251. const unsigned char woken_req[] = { 0x06, 0xf9 };
  252. const unsigned char sleep_req[] = { 0x07, 0x78 };
  253. const unsigned char *hdr = h5->rx_skb->data;
  254. const unsigned char *data = &h5->rx_skb->data[4];
  255. BT_DBG("%s", hu->hdev->name);
  256. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  257. return;
  258. if (H5_HDR_LEN(hdr) < 2)
  259. return;
  260. conf_req[2] = h5_cfg_field(h5);
  261. if (memcmp(data, sync_req, 2) == 0) {
  262. if (h5->state == H5_ACTIVE)
  263. h5_peer_reset(hu);
  264. h5_link_control(hu, sync_rsp, 2);
  265. } else if (memcmp(data, sync_rsp, 2) == 0) {
  266. if (h5->state == H5_ACTIVE)
  267. h5_peer_reset(hu);
  268. h5->state = H5_INITIALIZED;
  269. h5_link_control(hu, conf_req, 3);
  270. } else if (memcmp(data, conf_req, 2) == 0) {
  271. h5_link_control(hu, conf_rsp, 2);
  272. h5_link_control(hu, conf_req, 3);
  273. } else if (memcmp(data, conf_rsp, 2) == 0) {
  274. if (H5_HDR_LEN(hdr) > 2)
  275. h5->tx_win = (data[2] & 0x07);
  276. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  277. h5->state = H5_ACTIVE;
  278. hci_uart_init_ready(hu);
  279. return;
  280. } else if (memcmp(data, sleep_req, 2) == 0) {
  281. BT_DBG("Peer went to sleep");
  282. h5->sleep = H5_SLEEPING;
  283. return;
  284. } else if (memcmp(data, woken_req, 2) == 0) {
  285. BT_DBG("Peer woke up");
  286. h5->sleep = H5_AWAKE;
  287. } else if (memcmp(data, wakeup_req, 2) == 0) {
  288. BT_DBG("Peer requested wakeup");
  289. h5_link_control(hu, woken_req, 2);
  290. h5->sleep = H5_AWAKE;
  291. } else {
  292. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  293. return;
  294. }
  295. hci_uart_tx_wakeup(hu);
  296. }
  297. static void h5_complete_rx_pkt(struct hci_uart *hu)
  298. {
  299. struct h5 *h5 = hu->priv;
  300. const unsigned char *hdr = h5->rx_skb->data;
  301. if (H5_HDR_RELIABLE(hdr)) {
  302. h5->tx_ack = (h5->tx_ack + 1) % 8;
  303. set_bit(H5_TX_ACK_REQ, &h5->flags);
  304. hci_uart_tx_wakeup(hu);
  305. }
  306. h5->rx_ack = H5_HDR_ACK(hdr);
  307. h5_pkt_cull(h5);
  308. switch (H5_HDR_PKT_TYPE(hdr)) {
  309. case HCI_EVENT_PKT:
  310. case HCI_ACLDATA_PKT:
  311. case HCI_SCODATA_PKT:
  312. hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
  313. /* Remove Three-wire header */
  314. skb_pull(h5->rx_skb, 4);
  315. hci_recv_frame(hu->hdev, h5->rx_skb);
  316. h5->rx_skb = NULL;
  317. break;
  318. default:
  319. h5_handle_internal_rx(hu);
  320. break;
  321. }
  322. h5_reset_rx(h5);
  323. }
  324. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  325. {
  326. h5_complete_rx_pkt(hu);
  327. return 0;
  328. }
  329. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  330. {
  331. struct h5 *h5 = hu->priv;
  332. const unsigned char *hdr = h5->rx_skb->data;
  333. if (H5_HDR_CRC(hdr)) {
  334. h5->rx_func = h5_rx_crc;
  335. h5->rx_pending = 2;
  336. } else {
  337. h5_complete_rx_pkt(hu);
  338. }
  339. return 0;
  340. }
  341. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  342. {
  343. struct h5 *h5 = hu->priv;
  344. const unsigned char *hdr = h5->rx_skb->data;
  345. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  346. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  347. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  348. H5_HDR_LEN(hdr));
  349. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  350. BT_ERR("Invalid header checksum");
  351. h5_reset_rx(h5);
  352. return 0;
  353. }
  354. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  355. BT_ERR("Out-of-order packet arrived (%u != %u)",
  356. H5_HDR_SEQ(hdr), h5->tx_ack);
  357. h5_reset_rx(h5);
  358. return 0;
  359. }
  360. if (h5->state != H5_ACTIVE &&
  361. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  362. BT_ERR("Non-link packet received in non-active state");
  363. h5_reset_rx(h5);
  364. return 0;
  365. }
  366. h5->rx_func = h5_rx_payload;
  367. h5->rx_pending = H5_HDR_LEN(hdr);
  368. return 0;
  369. }
  370. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  371. {
  372. struct h5 *h5 = hu->priv;
  373. if (c == SLIP_DELIMITER)
  374. return 1;
  375. h5->rx_func = h5_rx_3wire_hdr;
  376. h5->rx_pending = 4;
  377. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  378. if (!h5->rx_skb) {
  379. BT_ERR("Can't allocate mem for new packet");
  380. h5_reset_rx(h5);
  381. return -ENOMEM;
  382. }
  383. h5->rx_skb->dev = (void *)hu->hdev;
  384. return 0;
  385. }
  386. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  387. {
  388. struct h5 *h5 = hu->priv;
  389. if (c == SLIP_DELIMITER)
  390. h5->rx_func = h5_rx_pkt_start;
  391. return 1;
  392. }
  393. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  394. {
  395. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  396. const u8 *byte = &c;
  397. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  398. set_bit(H5_RX_ESC, &h5->flags);
  399. return;
  400. }
  401. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  402. switch (c) {
  403. case SLIP_ESC_DELIM:
  404. byte = &delim;
  405. break;
  406. case SLIP_ESC_ESC:
  407. byte = &esc;
  408. break;
  409. default:
  410. BT_ERR("Invalid esc byte 0x%02hhx", c);
  411. h5_reset_rx(h5);
  412. return;
  413. }
  414. }
  415. skb_put_data(h5->rx_skb, byte, 1);
  416. h5->rx_pending--;
  417. BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  418. }
  419. static void h5_reset_rx(struct h5 *h5)
  420. {
  421. if (h5->rx_skb) {
  422. kfree_skb(h5->rx_skb);
  423. h5->rx_skb = NULL;
  424. }
  425. h5->rx_func = h5_rx_delimiter;
  426. h5->rx_pending = 0;
  427. clear_bit(H5_RX_ESC, &h5->flags);
  428. }
  429. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  430. {
  431. struct h5 *h5 = hu->priv;
  432. const unsigned char *ptr = data;
  433. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  434. count);
  435. while (count > 0) {
  436. int processed;
  437. if (h5->rx_pending > 0) {
  438. if (*ptr == SLIP_DELIMITER) {
  439. BT_ERR("Too short H5 packet");
  440. h5_reset_rx(h5);
  441. continue;
  442. }
  443. h5_unslip_one_byte(h5, *ptr);
  444. ptr++; count--;
  445. continue;
  446. }
  447. processed = h5->rx_func(hu, *ptr);
  448. if (processed < 0)
  449. return processed;
  450. ptr += processed;
  451. count -= processed;
  452. }
  453. return 0;
  454. }
  455. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  456. {
  457. struct h5 *h5 = hu->priv;
  458. if (skb->len > 0xfff) {
  459. BT_ERR("Packet too long (%u bytes)", skb->len);
  460. kfree_skb(skb);
  461. return 0;
  462. }
  463. if (h5->state != H5_ACTIVE) {
  464. BT_ERR("Ignoring HCI data in non-active state");
  465. kfree_skb(skb);
  466. return 0;
  467. }
  468. switch (hci_skb_pkt_type(skb)) {
  469. case HCI_ACLDATA_PKT:
  470. case HCI_COMMAND_PKT:
  471. skb_queue_tail(&h5->rel, skb);
  472. break;
  473. case HCI_SCODATA_PKT:
  474. skb_queue_tail(&h5->unrel, skb);
  475. break;
  476. default:
  477. BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
  478. kfree_skb(skb);
  479. break;
  480. }
  481. return 0;
  482. }
  483. static void h5_slip_delim(struct sk_buff *skb)
  484. {
  485. const char delim = SLIP_DELIMITER;
  486. skb_put_data(skb, &delim, 1);
  487. }
  488. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  489. {
  490. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  491. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  492. switch (c) {
  493. case SLIP_DELIMITER:
  494. skb_put_data(skb, &esc_delim, 2);
  495. break;
  496. case SLIP_ESC:
  497. skb_put_data(skb, &esc_esc, 2);
  498. break;
  499. default:
  500. skb_put_data(skb, &c, 1);
  501. }
  502. }
  503. static bool valid_packet_type(u8 type)
  504. {
  505. switch (type) {
  506. case HCI_ACLDATA_PKT:
  507. case HCI_COMMAND_PKT:
  508. case HCI_SCODATA_PKT:
  509. case HCI_3WIRE_LINK_PKT:
  510. case HCI_3WIRE_ACK_PKT:
  511. return true;
  512. default:
  513. return false;
  514. }
  515. }
  516. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  517. const u8 *data, size_t len)
  518. {
  519. struct h5 *h5 = hu->priv;
  520. struct sk_buff *nskb;
  521. u8 hdr[4];
  522. int i;
  523. if (!valid_packet_type(pkt_type)) {
  524. BT_ERR("Unknown packet type %u", pkt_type);
  525. return NULL;
  526. }
  527. /*
  528. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  529. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  530. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  531. * delimiters at start and end).
  532. */
  533. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  534. if (!nskb)
  535. return NULL;
  536. hci_skb_pkt_type(nskb) = pkt_type;
  537. h5_slip_delim(nskb);
  538. hdr[0] = h5->tx_ack << 3;
  539. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  540. /* Reliable packet? */
  541. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  542. hdr[0] |= 1 << 7;
  543. hdr[0] |= h5->tx_seq;
  544. h5->tx_seq = (h5->tx_seq + 1) % 8;
  545. }
  546. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  547. hdr[2] = len >> 4;
  548. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  549. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  550. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  551. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  552. H5_HDR_LEN(hdr));
  553. for (i = 0; i < 4; i++)
  554. h5_slip_one_byte(nskb, hdr[i]);
  555. for (i = 0; i < len; i++)
  556. h5_slip_one_byte(nskb, data[i]);
  557. h5_slip_delim(nskb);
  558. return nskb;
  559. }
  560. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  561. {
  562. struct h5 *h5 = hu->priv;
  563. unsigned long flags;
  564. struct sk_buff *skb, *nskb;
  565. if (h5->sleep != H5_AWAKE) {
  566. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  567. if (h5->sleep == H5_WAKING_UP)
  568. return NULL;
  569. h5->sleep = H5_WAKING_UP;
  570. BT_DBG("Sending wakeup request");
  571. mod_timer(&h5->timer, jiffies + HZ / 100);
  572. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  573. }
  574. skb = skb_dequeue(&h5->unrel);
  575. if (skb) {
  576. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  577. skb->data, skb->len);
  578. if (nskb) {
  579. kfree_skb(skb);
  580. return nskb;
  581. }
  582. skb_queue_head(&h5->unrel, skb);
  583. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  584. }
  585. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  586. if (h5->unack.qlen >= h5->tx_win)
  587. goto unlock;
  588. skb = skb_dequeue(&h5->rel);
  589. if (skb) {
  590. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  591. skb->data, skb->len);
  592. if (nskb) {
  593. __skb_queue_tail(&h5->unack, skb);
  594. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  595. spin_unlock_irqrestore(&h5->unack.lock, flags);
  596. return nskb;
  597. }
  598. skb_queue_head(&h5->rel, skb);
  599. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  600. }
  601. unlock:
  602. spin_unlock_irqrestore(&h5->unack.lock, flags);
  603. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  604. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  605. return NULL;
  606. }
  607. static int h5_flush(struct hci_uart *hu)
  608. {
  609. BT_DBG("hu %p", hu);
  610. return 0;
  611. }
  612. static const struct hci_uart_proto h5p = {
  613. .id = HCI_UART_3WIRE,
  614. .name = "Three-wire (H5)",
  615. .open = h5_open,
  616. .close = h5_close,
  617. .setup = h5_setup,
  618. .recv = h5_recv,
  619. .enqueue = h5_enqueue,
  620. .dequeue = h5_dequeue,
  621. .flush = h5_flush,
  622. };
  623. static int h5_serdev_probe(struct serdev_device *serdev)
  624. {
  625. const struct acpi_device_id *match;
  626. struct device *dev = &serdev->dev;
  627. struct h5 *h5;
  628. h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
  629. if (!h5)
  630. return -ENOMEM;
  631. set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
  632. h5->hu = &h5->serdev_hu;
  633. h5->serdev_hu.serdev = serdev;
  634. serdev_device_set_drvdata(serdev, h5);
  635. if (has_acpi_companion(dev)) {
  636. match = acpi_match_device(dev->driver->acpi_match_table, dev);
  637. if (!match)
  638. return -ENODEV;
  639. h5->vnd = (const struct h5_vnd *)match->driver_data;
  640. h5->id = (char *)match->id;
  641. if (h5->vnd->acpi_gpio_map)
  642. devm_acpi_dev_add_driver_gpios(dev,
  643. h5->vnd->acpi_gpio_map);
  644. }
  645. h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
  646. if (IS_ERR(h5->enable_gpio))
  647. return PTR_ERR(h5->enable_gpio);
  648. h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
  649. GPIOD_OUT_LOW);
  650. if (IS_ERR(h5->device_wake_gpio))
  651. return PTR_ERR(h5->device_wake_gpio);
  652. return hci_uart_register_device(&h5->serdev_hu, &h5p);
  653. }
  654. static void h5_serdev_remove(struct serdev_device *serdev)
  655. {
  656. struct h5 *h5 = serdev_device_get_drvdata(serdev);
  657. hci_uart_unregister_device(&h5->serdev_hu);
  658. }
  659. #ifdef CONFIG_BT_HCIUART_RTL
  660. static int h5_btrtl_setup(struct h5 *h5)
  661. {
  662. struct btrtl_device_info *btrtl_dev;
  663. struct sk_buff *skb;
  664. __le32 baudrate_data;
  665. u32 device_baudrate;
  666. unsigned int controller_baudrate;
  667. bool flow_control;
  668. int err;
  669. btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
  670. if (IS_ERR(btrtl_dev))
  671. return PTR_ERR(btrtl_dev);
  672. err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
  673. &controller_baudrate, &device_baudrate,
  674. &flow_control);
  675. if (err)
  676. goto out_free;
  677. baudrate_data = cpu_to_le32(device_baudrate);
  678. skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
  679. &baudrate_data, HCI_INIT_TIMEOUT);
  680. if (IS_ERR(skb)) {
  681. rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
  682. err = PTR_ERR(skb);
  683. goto out_free;
  684. } else {
  685. kfree_skb(skb);
  686. }
  687. /* Give the device some time to set up the new baudrate. */
  688. usleep_range(10000, 20000);
  689. serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
  690. serdev_device_set_flow_control(h5->hu->serdev, flow_control);
  691. err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
  692. /* Give the device some time before the hci-core sends it a reset */
  693. usleep_range(10000, 20000);
  694. /* Enable controller to do both LE scan and BR/EDR inquiry
  695. * simultaneously.
  696. */
  697. set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
  698. out_free:
  699. btrtl_free(btrtl_dev);
  700. return err;
  701. }
  702. static void h5_btrtl_open(struct h5 *h5)
  703. {
  704. /* Devices always start with these fixed parameters */
  705. serdev_device_set_flow_control(h5->hu->serdev, false);
  706. serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
  707. serdev_device_set_baudrate(h5->hu->serdev, 115200);
  708. /* The controller needs up to 500ms to wakeup */
  709. gpiod_set_value_cansleep(h5->enable_gpio, 1);
  710. gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
  711. msleep(500);
  712. }
  713. static void h5_btrtl_close(struct h5 *h5)
  714. {
  715. gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
  716. gpiod_set_value_cansleep(h5->enable_gpio, 0);
  717. }
  718. static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
  719. static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
  720. static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
  721. static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
  722. { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
  723. { "enable-gpios", &btrtl_enable_gpios, 1 },
  724. { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
  725. {},
  726. };
  727. static struct h5_vnd rtl_vnd = {
  728. .setup = h5_btrtl_setup,
  729. .open = h5_btrtl_open,
  730. .close = h5_btrtl_close,
  731. .acpi_gpio_map = acpi_btrtl_gpios,
  732. };
  733. #endif
  734. #ifdef CONFIG_ACPI
  735. static const struct acpi_device_id h5_acpi_match[] = {
  736. #ifdef CONFIG_BT_HCIUART_RTL
  737. { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
  738. #endif
  739. { },
  740. };
  741. MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
  742. #endif
  743. static struct serdev_device_driver h5_serdev_driver = {
  744. .probe = h5_serdev_probe,
  745. .remove = h5_serdev_remove,
  746. .driver = {
  747. .name = "hci_uart_h5",
  748. .acpi_match_table = ACPI_PTR(h5_acpi_match),
  749. },
  750. };
  751. int __init h5_init(void)
  752. {
  753. serdev_device_driver_register(&h5_serdev_driver);
  754. return hci_uart_register_proto(&h5p);
  755. }
  756. int __exit h5_deinit(void)
  757. {
  758. serdev_device_driver_unregister(&h5_serdev_driver);
  759. return hci_uart_unregister_proto(&h5p);
  760. }