hid_bpf_dispatch.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * HID-BPF support for Linux
  4. *
  5. * Copyright (c) 2022-2024 Benjamin Tissoires
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/bitops.h>
  9. #include <linux/btf.h>
  10. #include <linux/btf_ids.h>
  11. #include <linux/filter.h>
  12. #include <linux/hid.h>
  13. #include <linux/hid_bpf.h>
  14. #include <linux/init.h>
  15. #include <linux/kfifo.h>
  16. #include <linux/minmax.h>
  17. #include <linux/module.h>
  18. #include "hid_bpf_dispatch.h"
  19. struct hid_ops *hid_ops;
  20. EXPORT_SYMBOL(hid_ops);
  21. u8 *
  22. dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
  23. u32 *size, int interrupt, u64 source, bool from_bpf)
  24. {
  25. struct hid_bpf_ctx_kern ctx_kern = {
  26. .ctx = {
  27. .hid = hdev,
  28. .allocated_size = hdev->bpf.allocated_data,
  29. .size = *size,
  30. },
  31. .data = hdev->bpf.device_data,
  32. .from_bpf = from_bpf,
  33. };
  34. struct hid_bpf_ops *e;
  35. int ret;
  36. if (type >= HID_REPORT_TYPES)
  37. return ERR_PTR(-EINVAL);
  38. /* no program has been attached yet */
  39. if (!hdev->bpf.device_data)
  40. return data;
  41. memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
  42. memcpy(ctx_kern.data, data, *size);
  43. rcu_read_lock();
  44. list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
  45. if (e->hid_device_event) {
  46. ret = e->hid_device_event(&ctx_kern.ctx, type, source);
  47. if (ret < 0) {
  48. rcu_read_unlock();
  49. return ERR_PTR(ret);
  50. }
  51. if (ret)
  52. ctx_kern.ctx.size = ret;
  53. }
  54. }
  55. rcu_read_unlock();
  56. ret = ctx_kern.ctx.size;
  57. if (ret) {
  58. if (ret > ctx_kern.ctx.allocated_size)
  59. return ERR_PTR(-EINVAL);
  60. *size = ret;
  61. }
  62. return ctx_kern.data;
  63. }
  64. EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
  65. int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
  66. unsigned char reportnum, u8 *buf,
  67. u32 size, enum hid_report_type rtype,
  68. enum hid_class_request reqtype,
  69. u64 source, bool from_bpf)
  70. {
  71. struct hid_bpf_ctx_kern ctx_kern = {
  72. .ctx = {
  73. .hid = hdev,
  74. .allocated_size = size,
  75. .size = size,
  76. },
  77. .data = buf,
  78. .from_bpf = from_bpf,
  79. };
  80. struct hid_bpf_ops *e;
  81. int ret, idx;
  82. if (rtype >= HID_REPORT_TYPES)
  83. return -EINVAL;
  84. idx = srcu_read_lock(&hdev->bpf.srcu);
  85. list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
  86. srcu_read_lock_held(&hdev->bpf.srcu)) {
  87. if (!e->hid_hw_request)
  88. continue;
  89. ret = e->hid_hw_request(&ctx_kern.ctx, reportnum, rtype, reqtype, source);
  90. if (ret)
  91. goto out;
  92. }
  93. ret = 0;
  94. out:
  95. srcu_read_unlock(&hdev->bpf.srcu, idx);
  96. return ret;
  97. }
  98. EXPORT_SYMBOL_GPL(dispatch_hid_bpf_raw_requests);
  99. int dispatch_hid_bpf_output_report(struct hid_device *hdev,
  100. __u8 *buf, u32 size, u64 source,
  101. bool from_bpf)
  102. {
  103. struct hid_bpf_ctx_kern ctx_kern = {
  104. .ctx = {
  105. .hid = hdev,
  106. .allocated_size = size,
  107. .size = size,
  108. },
  109. .data = buf,
  110. .from_bpf = from_bpf,
  111. };
  112. struct hid_bpf_ops *e;
  113. int ret, idx;
  114. idx = srcu_read_lock(&hdev->bpf.srcu);
  115. list_for_each_entry_srcu(e, &hdev->bpf.prog_list, list,
  116. srcu_read_lock_held(&hdev->bpf.srcu)) {
  117. if (!e->hid_hw_output_report)
  118. continue;
  119. ret = e->hid_hw_output_report(&ctx_kern.ctx, source);
  120. if (ret)
  121. goto out;
  122. }
  123. ret = 0;
  124. out:
  125. srcu_read_unlock(&hdev->bpf.srcu, idx);
  126. return ret;
  127. }
  128. EXPORT_SYMBOL_GPL(dispatch_hid_bpf_output_report);
  129. u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size)
  130. {
  131. int ret;
  132. struct hid_bpf_ctx_kern ctx_kern = {
  133. .ctx = {
  134. .hid = hdev,
  135. .size = *size,
  136. .allocated_size = HID_MAX_DESCRIPTOR_SIZE,
  137. },
  138. };
  139. if (!hdev->bpf.rdesc_ops)
  140. goto ignore_bpf;
  141. ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
  142. if (!ctx_kern.data)
  143. goto ignore_bpf;
  144. memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
  145. ret = hdev->bpf.rdesc_ops->hid_rdesc_fixup(&ctx_kern.ctx);
  146. if (ret < 0)
  147. goto ignore_bpf;
  148. if (ret) {
  149. if (ret > ctx_kern.ctx.allocated_size)
  150. goto ignore_bpf;
  151. *size = ret;
  152. }
  153. return krealloc(ctx_kern.data, *size, GFP_KERNEL);
  154. ignore_bpf:
  155. kfree(ctx_kern.data);
  156. return kmemdup(rdesc, *size, GFP_KERNEL);
  157. }
  158. EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
  159. static int device_match_id(struct device *dev, const void *id)
  160. {
  161. struct hid_device *hdev = to_hid_device(dev);
  162. return hdev->id == *(int *)id;
  163. }
  164. struct hid_device *hid_get_device(unsigned int hid_id)
  165. {
  166. struct device *dev;
  167. if (!hid_ops)
  168. return ERR_PTR(-EINVAL);
  169. dev = bus_find_device(hid_ops->bus_type, NULL, &hid_id, device_match_id);
  170. if (!dev)
  171. return ERR_PTR(-EINVAL);
  172. return to_hid_device(dev);
  173. }
  174. void hid_put_device(struct hid_device *hid)
  175. {
  176. put_device(&hid->dev);
  177. }
  178. static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
  179. {
  180. u8 *alloc_data;
  181. unsigned int i, j, max_report_len = 0;
  182. size_t alloc_size = 0;
  183. /* compute the maximum report length for this device */
  184. for (i = 0; i < HID_REPORT_TYPES; i++) {
  185. struct hid_report_enum *report_enum = hdev->report_enum + i;
  186. for (j = 0; j < HID_MAX_IDS; j++) {
  187. struct hid_report *report = report_enum->report_id_hash[j];
  188. if (report)
  189. max_report_len = max(max_report_len, hid_report_len(report));
  190. }
  191. }
  192. /*
  193. * Give us a little bit of extra space and some predictability in the
  194. * buffer length we create. This way, we can tell users that they can
  195. * work on chunks of 64 bytes of memory without having the bpf verifier
  196. * scream at them.
  197. */
  198. alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
  199. alloc_data = kzalloc(alloc_size, GFP_KERNEL);
  200. if (!alloc_data)
  201. return -ENOMEM;
  202. *data = alloc_data;
  203. *size = alloc_size;
  204. return 0;
  205. }
  206. int hid_bpf_allocate_event_data(struct hid_device *hdev)
  207. {
  208. /* hdev->bpf.device_data is already allocated, abort */
  209. if (hdev->bpf.device_data)
  210. return 0;
  211. return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
  212. }
  213. int hid_bpf_reconnect(struct hid_device *hdev)
  214. {
  215. if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
  216. return device_reprobe(&hdev->dev);
  217. return 0;
  218. }
  219. /* Disables missing prototype warnings */
  220. __bpf_kfunc_start_defs();
  221. /**
  222. * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
  223. *
  224. * @ctx: The HID-BPF context
  225. * @offset: The offset within the memory
  226. * @rdwr_buf_size: the const size of the buffer
  227. *
  228. * @returns %NULL on error, an %__u8 memory pointer on success
  229. */
  230. __bpf_kfunc __u8 *
  231. hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
  232. {
  233. struct hid_bpf_ctx_kern *ctx_kern;
  234. if (!ctx)
  235. return NULL;
  236. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  237. if (rdwr_buf_size + offset > ctx->allocated_size)
  238. return NULL;
  239. return ctx_kern->data + offset;
  240. }
  241. /**
  242. * hid_bpf_allocate_context - Allocate a context to the given HID device
  243. *
  244. * @hid_id: the system unique identifier of the HID device
  245. *
  246. * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
  247. */
  248. __bpf_kfunc struct hid_bpf_ctx *
  249. hid_bpf_allocate_context(unsigned int hid_id)
  250. {
  251. struct hid_device *hdev;
  252. struct hid_bpf_ctx_kern *ctx_kern = NULL;
  253. hdev = hid_get_device(hid_id);
  254. if (IS_ERR(hdev))
  255. return NULL;
  256. ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
  257. if (!ctx_kern) {
  258. hid_put_device(hdev);
  259. return NULL;
  260. }
  261. ctx_kern->ctx.hid = hdev;
  262. return &ctx_kern->ctx;
  263. }
  264. /**
  265. * hid_bpf_release_context - Release the previously allocated context @ctx
  266. *
  267. * @ctx: the HID-BPF context to release
  268. *
  269. */
  270. __bpf_kfunc void
  271. hid_bpf_release_context(struct hid_bpf_ctx *ctx)
  272. {
  273. struct hid_bpf_ctx_kern *ctx_kern;
  274. struct hid_device *hid;
  275. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  276. hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
  277. kfree(ctx_kern);
  278. /* get_device() is called by bus_find_device() */
  279. hid_put_device(hid);
  280. }
  281. static int
  282. __hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
  283. enum hid_report_type rtype)
  284. {
  285. struct hid_report_enum *report_enum;
  286. struct hid_report *report;
  287. struct hid_device *hdev;
  288. u32 report_len;
  289. /* check arguments */
  290. if (!ctx || !hid_ops || !buf)
  291. return -EINVAL;
  292. switch (rtype) {
  293. case HID_INPUT_REPORT:
  294. case HID_OUTPUT_REPORT:
  295. case HID_FEATURE_REPORT:
  296. break;
  297. default:
  298. return -EINVAL;
  299. }
  300. if (*buf__sz < 1)
  301. return -EINVAL;
  302. hdev = (struct hid_device *)ctx->hid; /* discard const */
  303. report_enum = hdev->report_enum + rtype;
  304. report = hid_ops->hid_get_report(report_enum, buf);
  305. if (!report)
  306. return -EINVAL;
  307. report_len = hid_report_len(report);
  308. if (*buf__sz > report_len)
  309. *buf__sz = report_len;
  310. return 0;
  311. }
  312. /**
  313. * hid_bpf_hw_request - Communicate with a HID device
  314. *
  315. * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
  316. * @buf: a %PTR_TO_MEM buffer
  317. * @buf__sz: the size of the data to transfer
  318. * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
  319. * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
  320. *
  321. * @returns %0 on success, a negative error code otherwise.
  322. */
  323. __bpf_kfunc int
  324. hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
  325. enum hid_report_type rtype, enum hid_class_request reqtype)
  326. {
  327. struct hid_bpf_ctx_kern *ctx_kern;
  328. struct hid_device *hdev;
  329. size_t size = buf__sz;
  330. u8 *dma_data;
  331. int ret;
  332. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  333. if (ctx_kern->from_bpf)
  334. return -EDEADLOCK;
  335. /* check arguments */
  336. ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
  337. if (ret)
  338. return ret;
  339. switch (reqtype) {
  340. case HID_REQ_GET_REPORT:
  341. case HID_REQ_GET_IDLE:
  342. case HID_REQ_GET_PROTOCOL:
  343. case HID_REQ_SET_REPORT:
  344. case HID_REQ_SET_IDLE:
  345. case HID_REQ_SET_PROTOCOL:
  346. break;
  347. default:
  348. return -EINVAL;
  349. }
  350. hdev = (struct hid_device *)ctx->hid; /* discard const */
  351. dma_data = kmemdup(buf, size, GFP_KERNEL);
  352. if (!dma_data)
  353. return -ENOMEM;
  354. ret = hid_ops->hid_hw_raw_request(hdev,
  355. dma_data[0],
  356. dma_data,
  357. size,
  358. rtype,
  359. reqtype,
  360. (u64)(long)ctx,
  361. true); /* prevent infinite recursions */
  362. if (ret > 0)
  363. memcpy(buf, dma_data, ret);
  364. kfree(dma_data);
  365. return ret;
  366. }
  367. /**
  368. * hid_bpf_hw_output_report - Send an output report to a HID device
  369. *
  370. * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
  371. * @buf: a %PTR_TO_MEM buffer
  372. * @buf__sz: the size of the data to transfer
  373. *
  374. * Returns the number of bytes transferred on success, a negative error code otherwise.
  375. */
  376. __bpf_kfunc int
  377. hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
  378. {
  379. struct hid_bpf_ctx_kern *ctx_kern;
  380. struct hid_device *hdev;
  381. size_t size = buf__sz;
  382. u8 *dma_data;
  383. int ret;
  384. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  385. if (ctx_kern->from_bpf)
  386. return -EDEADLOCK;
  387. /* check arguments */
  388. ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
  389. if (ret)
  390. return ret;
  391. hdev = (struct hid_device *)ctx->hid; /* discard const */
  392. dma_data = kmemdup(buf, size, GFP_KERNEL);
  393. if (!dma_data)
  394. return -ENOMEM;
  395. ret = hid_ops->hid_hw_output_report(hdev, dma_data, size, (u64)(long)ctx, true);
  396. kfree(dma_data);
  397. return ret;
  398. }
  399. static int
  400. __hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
  401. size_t size, bool lock_already_taken)
  402. {
  403. struct hid_bpf_ctx_kern *ctx_kern;
  404. int ret;
  405. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  406. if (ctx_kern->from_bpf)
  407. return -EDEADLOCK;
  408. /* check arguments */
  409. ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
  410. if (ret)
  411. return ret;
  412. return hid_ops->hid_input_report(ctx->hid, type, buf, size, 0, (u64)(long)ctx, true,
  413. lock_already_taken);
  414. }
  415. /**
  416. * hid_bpf_try_input_report - Inject a HID report in the kernel from a HID device
  417. *
  418. * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
  419. * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
  420. * @buf: a %PTR_TO_MEM buffer
  421. * @buf__sz: the size of the data to transfer
  422. *
  423. * Returns %0 on success, a negative error code otherwise. This function will immediately
  424. * fail if the device is not available, thus can be safely used in IRQ context.
  425. */
  426. __bpf_kfunc int
  427. hid_bpf_try_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
  428. const size_t buf__sz)
  429. {
  430. struct hid_bpf_ctx_kern *ctx_kern;
  431. bool from_hid_event_hook;
  432. ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
  433. from_hid_event_hook = ctx_kern->data && ctx_kern->data == ctx->hid->bpf.device_data;
  434. return __hid_bpf_input_report(ctx, type, buf, buf__sz, from_hid_event_hook);
  435. }
  436. /**
  437. * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
  438. *
  439. * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
  440. * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
  441. * @buf: a %PTR_TO_MEM buffer
  442. * @buf__sz: the size of the data to transfer
  443. *
  444. * Returns %0 on success, a negative error code otherwise. This function will wait for the
  445. * device to be available before injecting the event, thus needs to be called in sleepable
  446. * context.
  447. */
  448. __bpf_kfunc int
  449. hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
  450. const size_t buf__sz)
  451. {
  452. int ret;
  453. ret = down_interruptible(&ctx->hid->driver_input_lock);
  454. if (ret)
  455. return ret;
  456. /* check arguments */
  457. ret = __hid_bpf_input_report(ctx, type, buf, buf__sz, true /* lock_already_taken */);
  458. up(&ctx->hid->driver_input_lock);
  459. return ret;
  460. }
  461. __bpf_kfunc_end_defs();
  462. /*
  463. * The following set contains all functions we agree BPF programs
  464. * can use.
  465. */
  466. BTF_KFUNCS_START(hid_bpf_kfunc_ids)
  467. BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
  468. BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
  469. BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
  470. BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
  471. BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
  472. BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
  473. BTF_ID_FLAGS(func, hid_bpf_try_input_report)
  474. BTF_KFUNCS_END(hid_bpf_kfunc_ids)
  475. static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
  476. .owner = THIS_MODULE,
  477. .set = &hid_bpf_kfunc_ids,
  478. };
  479. /* for syscall HID-BPF */
  480. BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
  481. BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
  482. BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
  483. BTF_ID_FLAGS(func, hid_bpf_hw_request)
  484. BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
  485. BTF_ID_FLAGS(func, hid_bpf_input_report)
  486. BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
  487. static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
  488. .owner = THIS_MODULE,
  489. .set = &hid_bpf_syscall_kfunc_ids,
  490. };
  491. int hid_bpf_connect_device(struct hid_device *hdev)
  492. {
  493. bool need_to_allocate = false;
  494. struct hid_bpf_ops *e;
  495. rcu_read_lock();
  496. list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
  497. if (e->hid_device_event) {
  498. need_to_allocate = true;
  499. break;
  500. }
  501. }
  502. rcu_read_unlock();
  503. /* only allocate BPF data if there are programs attached */
  504. if (!need_to_allocate)
  505. return 0;
  506. return hid_bpf_allocate_event_data(hdev);
  507. }
  508. EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
  509. void hid_bpf_disconnect_device(struct hid_device *hdev)
  510. {
  511. kfree(hdev->bpf.device_data);
  512. hdev->bpf.device_data = NULL;
  513. hdev->bpf.allocated_data = 0;
  514. }
  515. EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
  516. void hid_bpf_destroy_device(struct hid_device *hdev)
  517. {
  518. if (!hdev)
  519. return;
  520. /* mark the device as destroyed in bpf so we don't reattach it */
  521. hdev->bpf.destroyed = true;
  522. __hid_bpf_ops_destroy_device(hdev);
  523. synchronize_srcu(&hdev->bpf.srcu);
  524. cleanup_srcu_struct(&hdev->bpf.srcu);
  525. }
  526. EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
  527. int hid_bpf_device_init(struct hid_device *hdev)
  528. {
  529. INIT_LIST_HEAD(&hdev->bpf.prog_list);
  530. mutex_init(&hdev->bpf.prog_list_lock);
  531. return init_srcu_struct(&hdev->bpf.srcu);
  532. }
  533. EXPORT_SYMBOL_GPL(hid_bpf_device_init);
  534. static int __init hid_bpf_init(void)
  535. {
  536. int err;
  537. /* Note: if we exit with an error any time here, we would entirely break HID, which
  538. * is probably not something we want. So we log an error and return success.
  539. *
  540. * This is not a big deal: nobody will be able to use the functionality.
  541. */
  542. err = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &hid_bpf_kfunc_set);
  543. if (err) {
  544. pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
  545. return 0;
  546. }
  547. err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
  548. if (err) {
  549. pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
  550. return 0;
  551. }
  552. return 0;
  553. }
  554. late_initcall(hid_bpf_init);
  555. MODULE_AUTHOR("Benjamin Tissoires");
  556. MODULE_LICENSE("GPL");