coredump.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2023 Google Corporation
  4. */
  5. #include <linux/devcoredump.h>
  6. #include <linux/unaligned.h>
  7. #include <net/bluetooth/bluetooth.h>
  8. #include <net/bluetooth/hci_core.h>
  9. enum hci_devcoredump_pkt_type {
  10. HCI_DEVCOREDUMP_PKT_INIT,
  11. HCI_DEVCOREDUMP_PKT_SKB,
  12. HCI_DEVCOREDUMP_PKT_PATTERN,
  13. HCI_DEVCOREDUMP_PKT_COMPLETE,
  14. HCI_DEVCOREDUMP_PKT_ABORT,
  15. };
  16. struct hci_devcoredump_skb_cb {
  17. u16 pkt_type;
  18. };
  19. struct hci_devcoredump_skb_pattern {
  20. u8 pattern;
  21. u32 len;
  22. } __packed;
  23. #define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb))
  24. #define DBG_UNEXPECTED_STATE() \
  25. bt_dev_dbg(hdev, \
  26. "Unexpected packet (%d) for state (%d). ", \
  27. hci_dmp_cb(skb)->pkt_type, hdev->dump.state)
  28. #define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */
  29. static int hci_devcd_update_hdr_state(char *buf, size_t size, int state)
  30. {
  31. int len = 0;
  32. if (!buf)
  33. return 0;
  34. len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state);
  35. return len + 1; /* scnprintf adds \0 at the end upon state rewrite */
  36. }
  37. /* Call with hci_dev_lock only. */
  38. static int hci_devcd_update_state(struct hci_dev *hdev, int state)
  39. {
  40. bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.",
  41. hdev->dump.state, state);
  42. hdev->dump.state = state;
  43. return hci_devcd_update_hdr_state(hdev->dump.head,
  44. hdev->dump.alloc_size, state);
  45. }
  46. static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb)
  47. {
  48. char dump_start[] = "--- Start dump ---\n";
  49. char hdr[80];
  50. int hdr_len;
  51. hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr),
  52. HCI_DEVCOREDUMP_IDLE);
  53. skb_put_data(skb, hdr, hdr_len);
  54. if (hdev->dump.dmp_hdr)
  55. hdev->dump.dmp_hdr(hdev, skb);
  56. skb_put_data(skb, dump_start, strlen(dump_start));
  57. return skb->len;
  58. }
  59. /* Do not call with hci_dev_lock since this calls driver code. */
  60. static void hci_devcd_notify(struct hci_dev *hdev, int state)
  61. {
  62. if (hdev->dump.notify_change)
  63. hdev->dump.notify_change(hdev, state);
  64. }
  65. /* Call with hci_dev_lock only. */
  66. void hci_devcd_reset(struct hci_dev *hdev)
  67. {
  68. hdev->dump.head = NULL;
  69. hdev->dump.tail = NULL;
  70. hdev->dump.alloc_size = 0;
  71. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
  72. cancel_delayed_work(&hdev->dump.dump_timeout);
  73. skb_queue_purge(&hdev->dump.dump_q);
  74. }
  75. /* Call with hci_dev_lock only. */
  76. static void hci_devcd_free(struct hci_dev *hdev)
  77. {
  78. vfree(hdev->dump.head);
  79. hci_devcd_reset(hdev);
  80. }
  81. /* Call with hci_dev_lock only. */
  82. static int hci_devcd_alloc(struct hci_dev *hdev, u32 size)
  83. {
  84. hdev->dump.head = vmalloc(size);
  85. if (!hdev->dump.head)
  86. return -ENOMEM;
  87. hdev->dump.alloc_size = size;
  88. hdev->dump.tail = hdev->dump.head;
  89. hdev->dump.end = hdev->dump.head + size;
  90. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE);
  91. return 0;
  92. }
  93. /* Call with hci_dev_lock only. */
  94. static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size)
  95. {
  96. if (hdev->dump.tail + size > hdev->dump.end)
  97. return false;
  98. memcpy(hdev->dump.tail, buf, size);
  99. hdev->dump.tail += size;
  100. return true;
  101. }
  102. /* Call with hci_dev_lock only. */
  103. static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len)
  104. {
  105. if (hdev->dump.tail + len > hdev->dump.end)
  106. return false;
  107. memset(hdev->dump.tail, pattern, len);
  108. hdev->dump.tail += len;
  109. return true;
  110. }
  111. /* Call with hci_dev_lock only. */
  112. static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size)
  113. {
  114. struct sk_buff *skb;
  115. int dump_hdr_size;
  116. int err = 0;
  117. skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC);
  118. if (!skb)
  119. return -ENOMEM;
  120. dump_hdr_size = hci_devcd_mkheader(hdev, skb);
  121. if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) {
  122. err = -ENOMEM;
  123. goto hdr_free;
  124. }
  125. /* Insert the device header */
  126. if (!hci_devcd_copy(hdev, skb->data, skb->len)) {
  127. bt_dev_err(hdev, "Failed to insert header");
  128. hci_devcd_free(hdev);
  129. err = -ENOMEM;
  130. goto hdr_free;
  131. }
  132. hdr_free:
  133. kfree_skb(skb);
  134. return err;
  135. }
  136. static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb)
  137. {
  138. u32 dump_size;
  139. if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) {
  140. DBG_UNEXPECTED_STATE();
  141. return;
  142. }
  143. if (skb->len != sizeof(dump_size)) {
  144. bt_dev_dbg(hdev, "Invalid dump init pkt");
  145. return;
  146. }
  147. dump_size = get_unaligned_le32(skb_pull_data(skb, 4));
  148. if (!dump_size) {
  149. bt_dev_err(hdev, "Zero size dump init pkt");
  150. return;
  151. }
  152. if (hci_devcd_prepare(hdev, dump_size)) {
  153. bt_dev_err(hdev, "Failed to prepare for dump");
  154. return;
  155. }
  156. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE);
  157. queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout,
  158. hdev->dump.timeout);
  159. }
  160. static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb)
  161. {
  162. if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
  163. DBG_UNEXPECTED_STATE();
  164. return;
  165. }
  166. if (!hci_devcd_copy(hdev, skb->data, skb->len))
  167. bt_dev_dbg(hdev, "Failed to insert skb");
  168. }
  169. static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev,
  170. struct sk_buff *skb)
  171. {
  172. struct hci_devcoredump_skb_pattern *pattern;
  173. if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
  174. DBG_UNEXPECTED_STATE();
  175. return;
  176. }
  177. if (skb->len != sizeof(*pattern)) {
  178. bt_dev_dbg(hdev, "Invalid pattern skb");
  179. return;
  180. }
  181. pattern = skb_pull_data(skb, sizeof(*pattern));
  182. if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len))
  183. bt_dev_dbg(hdev, "Failed to set pattern");
  184. }
  185. static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev,
  186. struct sk_buff *skb)
  187. {
  188. u32 dump_size;
  189. if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
  190. DBG_UNEXPECTED_STATE();
  191. return;
  192. }
  193. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE);
  194. dump_size = hdev->dump.tail - hdev->dump.head;
  195. bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size,
  196. hdev->dump.alloc_size);
  197. dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
  198. }
  199. static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev,
  200. struct sk_buff *skb)
  201. {
  202. u32 dump_size;
  203. if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) {
  204. DBG_UNEXPECTED_STATE();
  205. return;
  206. }
  207. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT);
  208. dump_size = hdev->dump.tail - hdev->dump.head;
  209. bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size,
  210. hdev->dump.alloc_size);
  211. /* Emit a devcoredump with the available data */
  212. dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
  213. }
  214. /* Bluetooth devcoredump state machine.
  215. *
  216. * Devcoredump states:
  217. *
  218. * HCI_DEVCOREDUMP_IDLE: The default state.
  219. *
  220. * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has
  221. * been initialized using hci_devcd_init(). Once active, the driver
  222. * can append data using hci_devcd_append() or insert a pattern
  223. * using hci_devcd_append_pattern().
  224. *
  225. * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive
  226. * can signal the completion using hci_devcd_complete(). A
  227. * devcoredump is generated indicating the completion event and
  228. * then the state machine is reset to the default state.
  229. *
  230. * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in
  231. * case of any error using hci_devcd_abort(). A devcoredump is
  232. * still generated with the available data indicating the abort
  233. * event and then the state machine is reset to the default state.
  234. *
  235. * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec
  236. * is started during devcoredump initialization. Once the timeout
  237. * occurs, the driver is notified, a devcoredump is generated with
  238. * the available data indicating the timeout event and then the
  239. * state machine is reset to the default state.
  240. *
  241. * The driver must register using hci_devcd_register() before using the hci
  242. * devcoredump APIs.
  243. */
  244. void hci_devcd_rx(struct work_struct *work)
  245. {
  246. struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx);
  247. struct sk_buff *skb;
  248. int start_state;
  249. while ((skb = skb_dequeue(&hdev->dump.dump_q))) {
  250. /* Return if timeout occurs. The timeout handler function
  251. * hci_devcd_timeout() will report the available dump data.
  252. */
  253. if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) {
  254. kfree_skb(skb);
  255. return;
  256. }
  257. hci_dev_lock(hdev);
  258. start_state = hdev->dump.state;
  259. switch (hci_dmp_cb(skb)->pkt_type) {
  260. case HCI_DEVCOREDUMP_PKT_INIT:
  261. hci_devcd_handle_pkt_init(hdev, skb);
  262. break;
  263. case HCI_DEVCOREDUMP_PKT_SKB:
  264. hci_devcd_handle_pkt_skb(hdev, skb);
  265. break;
  266. case HCI_DEVCOREDUMP_PKT_PATTERN:
  267. hci_devcd_handle_pkt_pattern(hdev, skb);
  268. break;
  269. case HCI_DEVCOREDUMP_PKT_COMPLETE:
  270. hci_devcd_handle_pkt_complete(hdev, skb);
  271. break;
  272. case HCI_DEVCOREDUMP_PKT_ABORT:
  273. hci_devcd_handle_pkt_abort(hdev, skb);
  274. break;
  275. default:
  276. bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ",
  277. hci_dmp_cb(skb)->pkt_type, hdev->dump.state);
  278. break;
  279. }
  280. hci_dev_unlock(hdev);
  281. kfree_skb(skb);
  282. /* Notify the driver about any state changes before resetting
  283. * the state machine
  284. */
  285. if (start_state != hdev->dump.state)
  286. hci_devcd_notify(hdev, hdev->dump.state);
  287. /* Reset the state machine if the devcoredump is complete */
  288. hci_dev_lock(hdev);
  289. if (hdev->dump.state == HCI_DEVCOREDUMP_DONE ||
  290. hdev->dump.state == HCI_DEVCOREDUMP_ABORT)
  291. hci_devcd_reset(hdev);
  292. hci_dev_unlock(hdev);
  293. }
  294. }
  295. EXPORT_SYMBOL(hci_devcd_rx);
  296. void hci_devcd_timeout(struct work_struct *work)
  297. {
  298. struct hci_dev *hdev = container_of(work, struct hci_dev,
  299. dump.dump_timeout.work);
  300. u32 dump_size;
  301. hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT);
  302. hci_dev_lock(hdev);
  303. cancel_work(&hdev->dump.dump_rx);
  304. hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT);
  305. dump_size = hdev->dump.tail - hdev->dump.head;
  306. bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size,
  307. hdev->dump.alloc_size);
  308. /* Emit a devcoredump with the available data */
  309. dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL);
  310. hci_devcd_reset(hdev);
  311. hci_dev_unlock(hdev);
  312. }
  313. EXPORT_SYMBOL(hci_devcd_timeout);
  314. int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump,
  315. dmp_hdr_t dmp_hdr, notify_change_t notify_change)
  316. {
  317. /* Driver must implement coredump() and dmp_hdr() functions for
  318. * bluetooth devcoredump. The coredump() should trigger a coredump
  319. * event on the controller when the device's coredump sysfs entry is
  320. * written to. The dmp_hdr() should create a dump header to identify
  321. * the controller/fw/driver info.
  322. */
  323. if (!coredump || !dmp_hdr)
  324. return -EINVAL;
  325. hci_dev_lock(hdev);
  326. hdev->dump.coredump = coredump;
  327. hdev->dump.dmp_hdr = dmp_hdr;
  328. hdev->dump.notify_change = notify_change;
  329. hdev->dump.supported = true;
  330. hdev->dump.timeout = DEVCOREDUMP_TIMEOUT;
  331. hci_dev_unlock(hdev);
  332. return 0;
  333. }
  334. EXPORT_SYMBOL(hci_devcd_register);
  335. static inline bool hci_devcd_enabled(struct hci_dev *hdev)
  336. {
  337. return hdev->dump.supported;
  338. }
  339. int hci_devcd_init(struct hci_dev *hdev, u32 dump_size)
  340. {
  341. struct sk_buff *skb;
  342. if (!hci_devcd_enabled(hdev))
  343. return -EOPNOTSUPP;
  344. skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC);
  345. if (!skb)
  346. return -ENOMEM;
  347. hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT;
  348. put_unaligned_le32(dump_size, skb_put(skb, 4));
  349. skb_queue_tail(&hdev->dump.dump_q, skb);
  350. queue_work(hdev->workqueue, &hdev->dump.dump_rx);
  351. return 0;
  352. }
  353. EXPORT_SYMBOL(hci_devcd_init);
  354. int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb)
  355. {
  356. if (!skb)
  357. return -ENOMEM;
  358. if (!hci_devcd_enabled(hdev)) {
  359. kfree_skb(skb);
  360. return -EOPNOTSUPP;
  361. }
  362. hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB;
  363. skb_queue_tail(&hdev->dump.dump_q, skb);
  364. queue_work(hdev->workqueue, &hdev->dump.dump_rx);
  365. return 0;
  366. }
  367. EXPORT_SYMBOL(hci_devcd_append);
  368. int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len)
  369. {
  370. struct hci_devcoredump_skb_pattern p;
  371. struct sk_buff *skb;
  372. if (!hci_devcd_enabled(hdev))
  373. return -EOPNOTSUPP;
  374. skb = alloc_skb(sizeof(p), GFP_ATOMIC);
  375. if (!skb)
  376. return -ENOMEM;
  377. p.pattern = pattern;
  378. p.len = len;
  379. hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN;
  380. skb_put_data(skb, &p, sizeof(p));
  381. skb_queue_tail(&hdev->dump.dump_q, skb);
  382. queue_work(hdev->workqueue, &hdev->dump.dump_rx);
  383. return 0;
  384. }
  385. EXPORT_SYMBOL(hci_devcd_append_pattern);
  386. int hci_devcd_complete(struct hci_dev *hdev)
  387. {
  388. struct sk_buff *skb;
  389. if (!hci_devcd_enabled(hdev))
  390. return -EOPNOTSUPP;
  391. skb = alloc_skb(0, GFP_ATOMIC);
  392. if (!skb)
  393. return -ENOMEM;
  394. hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE;
  395. skb_queue_tail(&hdev->dump.dump_q, skb);
  396. queue_work(hdev->workqueue, &hdev->dump.dump_rx);
  397. return 0;
  398. }
  399. EXPORT_SYMBOL(hci_devcd_complete);
  400. int hci_devcd_abort(struct hci_dev *hdev)
  401. {
  402. struct sk_buff *skb;
  403. if (!hci_devcd_enabled(hdev))
  404. return -EOPNOTSUPP;
  405. skb = alloc_skb(0, GFP_ATOMIC);
  406. if (!skb)
  407. return -ENOMEM;
  408. hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT;
  409. skb_queue_tail(&hdev->dump.dump_q, skb);
  410. queue_work(hdev->workqueue, &hdev->dump.dump_rx);
  411. return 0;
  412. }
  413. EXPORT_SYMBOL(hci_devcd_abort);