f_hid.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * f_hid.c -- USB HID function driver
  4. *
  5. * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/hid.h>
  10. #include <linux/idr.h>
  11. #include <linux/cdev.h>
  12. #include <linux/mutex.h>
  13. #include <linux/poll.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/wait.h>
  16. #include <linux/sched.h>
  17. #include <linux/workqueue.h>
  18. #include <linux/usb/func_utils.h>
  19. #include <linux/usb/g_hid.h>
  20. #include <uapi/linux/usb/g_hid.h>
  21. #include "u_hid.h"
  22. #define HIDG_MINORS 4
  23. /*
  24. * Most operating systems seem to allow for 5000ms timeout, we will allow
  25. * userspace half that time to respond before we return an empty report.
  26. */
  27. #define GET_REPORT_TIMEOUT_MS 2500
  28. static int major, minors;
  29. static const struct class hidg_class = {
  30. .name = "hidg",
  31. };
  32. static DEFINE_IDA(hidg_ida);
  33. static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */
  34. struct report_entry {
  35. struct usb_hidg_report report_data;
  36. struct list_head node;
  37. };
  38. /*-------------------------------------------------------------------------*/
  39. /* HID gadget struct */
  40. struct f_hidg_req_list {
  41. struct usb_request *req;
  42. unsigned int pos;
  43. struct list_head list;
  44. };
  45. struct f_hidg {
  46. /* configuration */
  47. unsigned char bInterfaceSubClass;
  48. unsigned char bInterfaceProtocol;
  49. unsigned char protocol;
  50. unsigned char idle;
  51. unsigned short report_desc_length;
  52. char *report_desc;
  53. unsigned short report_length;
  54. /*
  55. * use_out_ep - if true, the OUT Endpoint (interrupt out method)
  56. * will be used to receive reports from the host
  57. * using functions with the "intout" suffix.
  58. * Otherwise, the OUT Endpoint will not be configured
  59. * and the SETUP/SET_REPORT method ("ssreport" suffix)
  60. * will be used to receive reports.
  61. */
  62. bool use_out_ep;
  63. /* recv report */
  64. spinlock_t read_spinlock;
  65. wait_queue_head_t read_queue;
  66. bool disabled;
  67. /* recv report - interrupt out only (use_out_ep == 1) */
  68. struct list_head completed_out_req;
  69. unsigned int qlen;
  70. /* recv report - setup set_report only (use_out_ep == 0) */
  71. char *set_report_buf;
  72. unsigned int set_report_length;
  73. /* send report */
  74. spinlock_t write_spinlock;
  75. bool write_pending;
  76. wait_queue_head_t write_queue;
  77. struct usb_request *req;
  78. /* get report */
  79. struct usb_request *get_req;
  80. struct usb_hidg_report get_report;
  81. bool get_report_returned;
  82. int get_report_req_report_id;
  83. int get_report_req_report_length;
  84. spinlock_t get_report_spinlock;
  85. wait_queue_head_t get_queue; /* Waiting for userspace response */
  86. wait_queue_head_t get_id_queue; /* Get ID came in */
  87. struct work_struct work;
  88. struct workqueue_struct *workqueue;
  89. struct list_head report_list;
  90. struct device dev;
  91. struct cdev cdev;
  92. struct usb_function func;
  93. struct usb_ep *in_ep;
  94. struct usb_ep *out_ep;
  95. };
  96. static inline struct f_hidg *func_to_hidg(struct usb_function *f)
  97. {
  98. return container_of(f, struct f_hidg, func);
  99. }
  100. static void hidg_release(struct device *dev)
  101. {
  102. struct f_hidg *hidg = container_of(dev, struct f_hidg, dev);
  103. kfree(hidg->report_desc);
  104. kfree(hidg->set_report_buf);
  105. kfree(hidg);
  106. }
  107. /*-------------------------------------------------------------------------*/
  108. /* Static descriptors */
  109. static struct usb_interface_descriptor hidg_interface_desc = {
  110. .bLength = sizeof hidg_interface_desc,
  111. .bDescriptorType = USB_DT_INTERFACE,
  112. /* .bInterfaceNumber = DYNAMIC */
  113. .bAlternateSetting = 0,
  114. /* .bNumEndpoints = DYNAMIC (depends on use_out_ep) */
  115. .bInterfaceClass = USB_CLASS_HID,
  116. /* .bInterfaceSubClass = DYNAMIC */
  117. /* .bInterfaceProtocol = DYNAMIC */
  118. /* .iInterface = DYNAMIC */
  119. };
  120. static struct hid_descriptor hidg_desc = {
  121. .bLength = sizeof hidg_desc,
  122. .bDescriptorType = HID_DT_HID,
  123. .bcdHID = cpu_to_le16(0x0101),
  124. .bCountryCode = 0x00,
  125. .bNumDescriptors = 0x1,
  126. /*.rpt_desc.bDescriptorType = DYNAMIC */
  127. /*.rpt_desc.wDescriptorLength = DYNAMIC */
  128. };
  129. /* Super-Speed Support */
  130. static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
  131. .bLength = USB_DT_ENDPOINT_SIZE,
  132. .bDescriptorType = USB_DT_ENDPOINT,
  133. .bEndpointAddress = USB_DIR_IN,
  134. .bmAttributes = USB_ENDPOINT_XFER_INT,
  135. /*.wMaxPacketSize = DYNAMIC */
  136. .bInterval = 4, /* FIXME: Add this field in the
  137. * HID gadget configuration?
  138. * (struct hidg_func_descriptor)
  139. */
  140. };
  141. static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
  142. .bLength = sizeof(hidg_ss_in_comp_desc),
  143. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  144. /* .bMaxBurst = 0, */
  145. /* .bmAttributes = 0, */
  146. /* .wBytesPerInterval = DYNAMIC */
  147. };
  148. static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
  149. .bLength = USB_DT_ENDPOINT_SIZE,
  150. .bDescriptorType = USB_DT_ENDPOINT,
  151. .bEndpointAddress = USB_DIR_OUT,
  152. .bmAttributes = USB_ENDPOINT_XFER_INT,
  153. /*.wMaxPacketSize = DYNAMIC */
  154. .bInterval = 4, /* FIXME: Add this field in the
  155. * HID gadget configuration?
  156. * (struct hidg_func_descriptor)
  157. */
  158. };
  159. static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
  160. .bLength = sizeof(hidg_ss_out_comp_desc),
  161. .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
  162. /* .bMaxBurst = 0, */
  163. /* .bmAttributes = 0, */
  164. /* .wBytesPerInterval = DYNAMIC */
  165. };
  166. static struct usb_descriptor_header *hidg_ss_descriptors_intout[] = {
  167. (struct usb_descriptor_header *)&hidg_interface_desc,
  168. (struct usb_descriptor_header *)&hidg_desc,
  169. (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
  170. (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
  171. (struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
  172. (struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
  173. NULL,
  174. };
  175. static struct usb_descriptor_header *hidg_ss_descriptors_ssreport[] = {
  176. (struct usb_descriptor_header *)&hidg_interface_desc,
  177. (struct usb_descriptor_header *)&hidg_desc,
  178. (struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
  179. (struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
  180. NULL,
  181. };
  182. /* High-Speed Support */
  183. static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
  184. .bLength = USB_DT_ENDPOINT_SIZE,
  185. .bDescriptorType = USB_DT_ENDPOINT,
  186. .bEndpointAddress = USB_DIR_IN,
  187. .bmAttributes = USB_ENDPOINT_XFER_INT,
  188. /*.wMaxPacketSize = DYNAMIC */
  189. .bInterval = 4, /* FIXME: Add this field in the
  190. * HID gadget configuration?
  191. * (struct hidg_func_descriptor)
  192. */
  193. };
  194. static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = {
  195. .bLength = USB_DT_ENDPOINT_SIZE,
  196. .bDescriptorType = USB_DT_ENDPOINT,
  197. .bEndpointAddress = USB_DIR_OUT,
  198. .bmAttributes = USB_ENDPOINT_XFER_INT,
  199. /*.wMaxPacketSize = DYNAMIC */
  200. .bInterval = 4, /* FIXME: Add this field in the
  201. * HID gadget configuration?
  202. * (struct hidg_func_descriptor)
  203. */
  204. };
  205. static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = {
  206. (struct usb_descriptor_header *)&hidg_interface_desc,
  207. (struct usb_descriptor_header *)&hidg_desc,
  208. (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
  209. (struct usb_descriptor_header *)&hidg_hs_out_ep_desc,
  210. NULL,
  211. };
  212. static struct usb_descriptor_header *hidg_hs_descriptors_ssreport[] = {
  213. (struct usb_descriptor_header *)&hidg_interface_desc,
  214. (struct usb_descriptor_header *)&hidg_desc,
  215. (struct usb_descriptor_header *)&hidg_hs_in_ep_desc,
  216. NULL,
  217. };
  218. /* Full-Speed Support */
  219. static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = {
  220. .bLength = USB_DT_ENDPOINT_SIZE,
  221. .bDescriptorType = USB_DT_ENDPOINT,
  222. .bEndpointAddress = USB_DIR_IN,
  223. .bmAttributes = USB_ENDPOINT_XFER_INT,
  224. /*.wMaxPacketSize = DYNAMIC */
  225. .bInterval = 10, /* FIXME: Add this field in the
  226. * HID gadget configuration?
  227. * (struct hidg_func_descriptor)
  228. */
  229. };
  230. static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = {
  231. .bLength = USB_DT_ENDPOINT_SIZE,
  232. .bDescriptorType = USB_DT_ENDPOINT,
  233. .bEndpointAddress = USB_DIR_OUT,
  234. .bmAttributes = USB_ENDPOINT_XFER_INT,
  235. /*.wMaxPacketSize = DYNAMIC */
  236. .bInterval = 10, /* FIXME: Add this field in the
  237. * HID gadget configuration?
  238. * (struct hidg_func_descriptor)
  239. */
  240. };
  241. static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = {
  242. (struct usb_descriptor_header *)&hidg_interface_desc,
  243. (struct usb_descriptor_header *)&hidg_desc,
  244. (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
  245. (struct usb_descriptor_header *)&hidg_fs_out_ep_desc,
  246. NULL,
  247. };
  248. static struct usb_descriptor_header *hidg_fs_descriptors_ssreport[] = {
  249. (struct usb_descriptor_header *)&hidg_interface_desc,
  250. (struct usb_descriptor_header *)&hidg_desc,
  251. (struct usb_descriptor_header *)&hidg_fs_in_ep_desc,
  252. NULL,
  253. };
  254. /*-------------------------------------------------------------------------*/
  255. /* Strings */
  256. #define CT_FUNC_HID_IDX 0
  257. static struct usb_string ct_func_string_defs[] = {
  258. [CT_FUNC_HID_IDX].s = "HID Interface",
  259. {}, /* end of list */
  260. };
  261. static struct usb_gadget_strings ct_func_string_table = {
  262. .language = 0x0409, /* en-US */
  263. .strings = ct_func_string_defs,
  264. };
  265. static struct usb_gadget_strings *ct_func_strings[] = {
  266. &ct_func_string_table,
  267. NULL,
  268. };
  269. /*-------------------------------------------------------------------------*/
  270. /* Char Device */
  271. static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer,
  272. size_t count, loff_t *ptr)
  273. {
  274. struct f_hidg *hidg = file->private_data;
  275. struct f_hidg_req_list *list;
  276. struct usb_request *req;
  277. unsigned long flags;
  278. int ret;
  279. if (!count)
  280. return 0;
  281. spin_lock_irqsave(&hidg->read_spinlock, flags);
  282. #define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled)
  283. /* wait for at least one buffer to complete */
  284. while (!READ_COND_INTOUT) {
  285. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  286. if (file->f_flags & O_NONBLOCK)
  287. return -EAGAIN;
  288. if (wait_event_interruptible(hidg->read_queue, READ_COND_INTOUT))
  289. return -ERESTARTSYS;
  290. spin_lock_irqsave(&hidg->read_spinlock, flags);
  291. }
  292. if (hidg->disabled) {
  293. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  294. return -ESHUTDOWN;
  295. }
  296. /* pick the first one */
  297. list = list_first_entry(&hidg->completed_out_req,
  298. struct f_hidg_req_list, list);
  299. /*
  300. * Remove this from list to protect it from beign free()
  301. * while host disables our function
  302. */
  303. list_del(&list->list);
  304. req = list->req;
  305. count = min_t(unsigned int, count, req->actual - list->pos);
  306. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  307. /* copy to user outside spinlock */
  308. count -= copy_to_user(buffer, req->buf + list->pos, count);
  309. list->pos += count;
  310. /*
  311. * if this request is completely handled and transfered to
  312. * userspace, remove its entry from the list and requeue it
  313. * again. Otherwise, we will revisit it again upon the next
  314. * call, taking into account its current read position.
  315. */
  316. if (list->pos == req->actual) {
  317. kfree(list);
  318. req->length = hidg->report_length;
  319. ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
  320. if (ret < 0) {
  321. free_ep_req(hidg->out_ep, req);
  322. return ret;
  323. }
  324. } else {
  325. spin_lock_irqsave(&hidg->read_spinlock, flags);
  326. list_add(&list->list, &hidg->completed_out_req);
  327. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  328. wake_up(&hidg->read_queue);
  329. }
  330. return count;
  331. }
  332. #define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled)
  333. static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer,
  334. size_t count, loff_t *ptr)
  335. {
  336. struct f_hidg *hidg = file->private_data;
  337. char *tmp_buf = NULL;
  338. unsigned long flags;
  339. if (!count)
  340. return 0;
  341. spin_lock_irqsave(&hidg->read_spinlock, flags);
  342. while (!READ_COND_SSREPORT) {
  343. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  344. if (file->f_flags & O_NONBLOCK)
  345. return -EAGAIN;
  346. if (wait_event_interruptible(hidg->read_queue, READ_COND_SSREPORT))
  347. return -ERESTARTSYS;
  348. spin_lock_irqsave(&hidg->read_spinlock, flags);
  349. }
  350. count = min_t(unsigned int, count, hidg->set_report_length);
  351. tmp_buf = hidg->set_report_buf;
  352. hidg->set_report_buf = NULL;
  353. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  354. if (tmp_buf != NULL) {
  355. count -= copy_to_user(buffer, tmp_buf, count);
  356. kfree(tmp_buf);
  357. } else {
  358. count = -ENOMEM;
  359. }
  360. wake_up(&hidg->read_queue);
  361. return count;
  362. }
  363. static ssize_t f_hidg_read(struct file *file, char __user *buffer,
  364. size_t count, loff_t *ptr)
  365. {
  366. struct f_hidg *hidg = file->private_data;
  367. if (hidg->use_out_ep)
  368. return f_hidg_intout_read(file, buffer, count, ptr);
  369. else
  370. return f_hidg_ssreport_read(file, buffer, count, ptr);
  371. }
  372. static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
  373. {
  374. struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
  375. unsigned long flags;
  376. if (req->status != 0) {
  377. ERROR(hidg->func.config->cdev,
  378. "End Point Request ERROR: %d\n", req->status);
  379. }
  380. spin_lock_irqsave(&hidg->write_spinlock, flags);
  381. hidg->write_pending = 0;
  382. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  383. wake_up(&hidg->write_queue);
  384. }
  385. static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
  386. size_t count, loff_t *offp)
  387. {
  388. struct f_hidg *hidg = file->private_data;
  389. struct usb_request *req;
  390. unsigned long flags;
  391. ssize_t status = -ENOMEM;
  392. spin_lock_irqsave(&hidg->write_spinlock, flags);
  393. if (!hidg->req) {
  394. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  395. return -ESHUTDOWN;
  396. }
  397. #define WRITE_COND (!hidg->write_pending)
  398. try_again:
  399. /* write queue */
  400. while (!WRITE_COND) {
  401. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  402. if (file->f_flags & O_NONBLOCK)
  403. return -EAGAIN;
  404. if (wait_event_interruptible_exclusive(
  405. hidg->write_queue, WRITE_COND))
  406. return -ERESTARTSYS;
  407. spin_lock_irqsave(&hidg->write_spinlock, flags);
  408. }
  409. hidg->write_pending = 1;
  410. req = hidg->req;
  411. count = min_t(unsigned, count, hidg->report_length);
  412. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  413. if (!req) {
  414. ERROR(hidg->func.config->cdev, "hidg->req is NULL\n");
  415. status = -ESHUTDOWN;
  416. goto release_write_pending;
  417. }
  418. status = copy_from_user(req->buf, buffer, count);
  419. if (status != 0) {
  420. ERROR(hidg->func.config->cdev,
  421. "copy_from_user error\n");
  422. status = -EINVAL;
  423. goto release_write_pending;
  424. }
  425. spin_lock_irqsave(&hidg->write_spinlock, flags);
  426. /* when our function has been disabled by host */
  427. if (!hidg->req) {
  428. free_ep_req(hidg->in_ep, req);
  429. /*
  430. * TODO
  431. * Should we fail with error here?
  432. */
  433. goto try_again;
  434. }
  435. req->status = 0;
  436. req->zero = 0;
  437. req->length = count;
  438. req->complete = f_hidg_req_complete;
  439. req->context = hidg;
  440. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  441. if (!hidg->in_ep->enabled) {
  442. ERROR(hidg->func.config->cdev, "in_ep is disabled\n");
  443. status = -ESHUTDOWN;
  444. goto release_write_pending;
  445. }
  446. status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
  447. if (status < 0)
  448. goto release_write_pending;
  449. else
  450. status = count;
  451. return status;
  452. release_write_pending:
  453. spin_lock_irqsave(&hidg->write_spinlock, flags);
  454. hidg->write_pending = 0;
  455. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  456. wake_up(&hidg->write_queue);
  457. return status;
  458. }
  459. static struct report_entry *f_hidg_search_for_report(struct f_hidg *hidg, u8 report_id)
  460. {
  461. struct list_head *ptr;
  462. struct report_entry *entry;
  463. list_for_each(ptr, &hidg->report_list) {
  464. entry = list_entry(ptr, struct report_entry, node);
  465. if (entry->report_data.report_id == report_id)
  466. return entry;
  467. }
  468. return NULL;
  469. }
  470. static void get_report_workqueue_handler(struct work_struct *work)
  471. {
  472. struct f_hidg *hidg = container_of(work, struct f_hidg, work);
  473. struct usb_composite_dev *cdev = hidg->func.config->cdev;
  474. struct usb_request *req;
  475. struct report_entry *ptr;
  476. unsigned long flags;
  477. int status = 0;
  478. spin_lock_irqsave(&hidg->get_report_spinlock, flags);
  479. req = hidg->get_req;
  480. if (!req) {
  481. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  482. return;
  483. }
  484. req->zero = 0;
  485. req->length = min_t(unsigned int, min_t(unsigned int, hidg->get_report_req_report_length,
  486. hidg->report_length),
  487. MAX_REPORT_LENGTH);
  488. /* Check if there is a response available for immediate response */
  489. ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
  490. if (ptr && !ptr->report_data.userspace_req) {
  491. /* Report exists in list and it is to be used for immediate response */
  492. req->buf = ptr->report_data.data;
  493. status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  494. hidg->get_report_returned = true;
  495. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  496. } else {
  497. /*
  498. * Report does not exist in list or should not be immediately sent
  499. * i.e. give userspace time to respond
  500. */
  501. hidg->get_report_returned = false;
  502. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  503. wake_up(&hidg->get_id_queue);
  504. #define GET_REPORT_COND (!hidg->get_report_returned)
  505. /* Wait until userspace has responded or timeout */
  506. status = wait_event_interruptible_timeout(hidg->get_queue, !GET_REPORT_COND,
  507. msecs_to_jiffies(GET_REPORT_TIMEOUT_MS));
  508. spin_lock_irqsave(&hidg->get_report_spinlock, flags);
  509. req = hidg->get_req;
  510. if (!req) {
  511. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  512. return;
  513. }
  514. if (status == 0 && !hidg->get_report_returned) {
  515. /* GET_REPORT request was not serviced by userspace within timeout period */
  516. VDBG(cdev, "get_report : userspace timeout.\n");
  517. hidg->get_report_returned = true;
  518. }
  519. /* Search again for report ID in list and respond to GET_REPORT request */
  520. ptr = f_hidg_search_for_report(hidg, hidg->get_report_req_report_id);
  521. if (ptr) {
  522. /*
  523. * Either get an updated response just serviced by userspace
  524. * or send the latest response in the list
  525. */
  526. req->buf = ptr->report_data.data;
  527. } else {
  528. /* If there are no prevoiusly sent reports send empty report */
  529. req->buf = hidg->get_report.data;
  530. memset(req->buf, 0x0, req->length);
  531. }
  532. status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  533. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  534. }
  535. if (status < 0)
  536. VDBG(cdev, "usb_ep_queue error on ep0 responding to GET_REPORT\n");
  537. }
  538. static int f_hidg_get_report_id(struct file *file, __u8 __user *buffer)
  539. {
  540. struct f_hidg *hidg = file->private_data;
  541. int ret = 0;
  542. ret = put_user(hidg->get_report_req_report_id, buffer);
  543. return ret;
  544. }
  545. static int f_hidg_get_report(struct file *file, struct usb_hidg_report __user *buffer)
  546. {
  547. struct f_hidg *hidg = file->private_data;
  548. struct usb_composite_dev *cdev = hidg->func.config->cdev;
  549. unsigned long flags;
  550. struct report_entry *entry;
  551. struct report_entry *ptr;
  552. __u8 report_id;
  553. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  554. if (!entry)
  555. return -ENOMEM;
  556. if (copy_from_user(&entry->report_data, buffer,
  557. sizeof(struct usb_hidg_report))) {
  558. ERROR(cdev, "copy_from_user error\n");
  559. kfree(entry);
  560. return -EINVAL;
  561. }
  562. report_id = entry->report_data.report_id;
  563. spin_lock_irqsave(&hidg->get_report_spinlock, flags);
  564. ptr = f_hidg_search_for_report(hidg, report_id);
  565. if (ptr) {
  566. /* Report already exists in list - update it */
  567. if (copy_from_user(&ptr->report_data, buffer,
  568. sizeof(struct usb_hidg_report))) {
  569. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  570. ERROR(cdev, "copy_from_user error\n");
  571. kfree(entry);
  572. return -EINVAL;
  573. }
  574. kfree(entry);
  575. } else {
  576. /* Report does not exist in list - add it */
  577. list_add_tail(&entry->node, &hidg->report_list);
  578. }
  579. /* If there is no response pending then do nothing further */
  580. if (hidg->get_report_returned) {
  581. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  582. return 0;
  583. }
  584. /* If this userspace response serves the current pending report */
  585. if (hidg->get_report_req_report_id == report_id) {
  586. hidg->get_report_returned = true;
  587. wake_up(&hidg->get_queue);
  588. }
  589. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  590. return 0;
  591. }
  592. static long f_hidg_ioctl(struct file *file, unsigned int code, unsigned long arg)
  593. {
  594. switch (code) {
  595. case GADGET_HID_READ_GET_REPORT_ID:
  596. return f_hidg_get_report_id(file, (__u8 __user *)arg);
  597. case GADGET_HID_WRITE_GET_REPORT:
  598. return f_hidg_get_report(file, (struct usb_hidg_report __user *)arg);
  599. default:
  600. return -ENOTTY;
  601. }
  602. }
  603. static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
  604. {
  605. struct f_hidg *hidg = file->private_data;
  606. __poll_t ret = 0;
  607. poll_wait(file, &hidg->read_queue, wait);
  608. poll_wait(file, &hidg->write_queue, wait);
  609. poll_wait(file, &hidg->get_queue, wait);
  610. poll_wait(file, &hidg->get_id_queue, wait);
  611. if (WRITE_COND)
  612. ret |= EPOLLOUT | EPOLLWRNORM;
  613. if (hidg->use_out_ep) {
  614. if (READ_COND_INTOUT)
  615. ret |= EPOLLIN | EPOLLRDNORM;
  616. } else {
  617. if (READ_COND_SSREPORT)
  618. ret |= EPOLLIN | EPOLLRDNORM;
  619. }
  620. if (GET_REPORT_COND)
  621. ret |= EPOLLPRI;
  622. return ret;
  623. }
  624. #undef WRITE_COND
  625. #undef READ_COND_SSREPORT
  626. #undef READ_COND_INTOUT
  627. #undef GET_REPORT_COND
  628. static int f_hidg_release(struct inode *inode, struct file *fd)
  629. {
  630. fd->private_data = NULL;
  631. return 0;
  632. }
  633. static int f_hidg_open(struct inode *inode, struct file *fd)
  634. {
  635. struct f_hidg *hidg =
  636. container_of(inode->i_cdev, struct f_hidg, cdev);
  637. fd->private_data = hidg;
  638. return 0;
  639. }
  640. /*-------------------------------------------------------------------------*/
  641. /* usb_function */
  642. static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
  643. unsigned length)
  644. {
  645. return alloc_ep_req(ep, length);
  646. }
  647. static void hidg_intout_complete(struct usb_ep *ep, struct usb_request *req)
  648. {
  649. struct f_hidg *hidg = (struct f_hidg *) req->context;
  650. struct usb_composite_dev *cdev = hidg->func.config->cdev;
  651. struct f_hidg_req_list *req_list;
  652. unsigned long flags;
  653. switch (req->status) {
  654. case 0:
  655. req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
  656. if (!req_list) {
  657. ERROR(cdev, "Unable to allocate mem for req_list\n");
  658. goto free_req;
  659. }
  660. req_list->req = req;
  661. spin_lock_irqsave(&hidg->read_spinlock, flags);
  662. list_add_tail(&req_list->list, &hidg->completed_out_req);
  663. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  664. wake_up(&hidg->read_queue);
  665. break;
  666. default:
  667. ERROR(cdev, "Set report failed %d\n", req->status);
  668. fallthrough;
  669. case -ECONNABORTED: /* hardware forced ep reset */
  670. case -ECONNRESET: /* request dequeued */
  671. case -ESHUTDOWN: /* disconnect from host */
  672. free_req:
  673. free_ep_req(ep, req);
  674. return;
  675. }
  676. }
  677. static void hidg_ssreport_complete(struct usb_ep *ep, struct usb_request *req)
  678. {
  679. struct f_hidg *hidg = (struct f_hidg *)req->context;
  680. struct usb_composite_dev *cdev = hidg->func.config->cdev;
  681. char *new_buf = NULL;
  682. unsigned long flags;
  683. if (req->status != 0 || req->buf == NULL || req->actual == 0) {
  684. ERROR(cdev,
  685. "%s FAILED: status=%d, buf=%p, actual=%d\n",
  686. __func__, req->status, req->buf, req->actual);
  687. return;
  688. }
  689. spin_lock_irqsave(&hidg->read_spinlock, flags);
  690. new_buf = krealloc(hidg->set_report_buf, req->actual, GFP_ATOMIC);
  691. if (new_buf == NULL) {
  692. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  693. return;
  694. }
  695. hidg->set_report_buf = new_buf;
  696. hidg->set_report_length = req->actual;
  697. memcpy(hidg->set_report_buf, req->buf, req->actual);
  698. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  699. wake_up(&hidg->read_queue);
  700. }
  701. static void hidg_get_report_complete(struct usb_ep *ep, struct usb_request *req)
  702. {
  703. }
  704. static int hidg_setup(struct usb_function *f,
  705. const struct usb_ctrlrequest *ctrl)
  706. {
  707. struct f_hidg *hidg = func_to_hidg(f);
  708. struct usb_composite_dev *cdev = f->config->cdev;
  709. struct usb_request *req = cdev->req;
  710. int status = 0;
  711. __u16 value, length;
  712. unsigned long flags;
  713. value = __le16_to_cpu(ctrl->wValue);
  714. length = __le16_to_cpu(ctrl->wLength);
  715. VDBG(cdev,
  716. "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
  717. __func__, ctrl->bRequestType, ctrl->bRequest, value);
  718. switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
  719. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  720. | HID_REQ_GET_REPORT):
  721. VDBG(cdev, "get_report | wLength=%d\n", ctrl->wLength);
  722. /*
  723. * Update GET_REPORT ID so that an ioctl can be used to determine what
  724. * GET_REPORT the request was actually for.
  725. */
  726. spin_lock_irqsave(&hidg->get_report_spinlock, flags);
  727. hidg->get_report_req_report_id = value & 0xff;
  728. hidg->get_report_req_report_length = length;
  729. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  730. queue_work(hidg->workqueue, &hidg->work);
  731. return status;
  732. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  733. | HID_REQ_GET_PROTOCOL):
  734. VDBG(cdev, "get_protocol\n");
  735. length = min_t(unsigned int, length, 1);
  736. ((u8 *) req->buf)[0] = hidg->protocol;
  737. goto respond;
  738. break;
  739. case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  740. | HID_REQ_GET_IDLE):
  741. VDBG(cdev, "get_idle\n");
  742. length = min_t(unsigned int, length, 1);
  743. ((u8 *) req->buf)[0] = hidg->idle;
  744. goto respond;
  745. break;
  746. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  747. | HID_REQ_SET_REPORT):
  748. VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
  749. if (hidg->use_out_ep)
  750. goto stall;
  751. req->complete = hidg_ssreport_complete;
  752. req->context = hidg;
  753. goto respond;
  754. break;
  755. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  756. | HID_REQ_SET_PROTOCOL):
  757. VDBG(cdev, "set_protocol\n");
  758. if (value > HID_REPORT_PROTOCOL)
  759. goto stall;
  760. length = 0;
  761. /*
  762. * We assume that programs implementing the Boot protocol
  763. * are also compatible with the Report Protocol
  764. */
  765. if (hidg->bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) {
  766. hidg->protocol = value;
  767. goto respond;
  768. }
  769. goto stall;
  770. break;
  771. case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
  772. | HID_REQ_SET_IDLE):
  773. VDBG(cdev, "set_idle\n");
  774. length = 0;
  775. hidg->idle = value >> 8;
  776. goto respond;
  777. break;
  778. case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
  779. | USB_REQ_GET_DESCRIPTOR):
  780. switch (value >> 8) {
  781. case HID_DT_HID:
  782. {
  783. struct hid_descriptor hidg_desc_copy = hidg_desc;
  784. VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
  785. hidg_desc_copy.rpt_desc.bDescriptorType = HID_DT_REPORT;
  786. hidg_desc_copy.rpt_desc.wDescriptorLength =
  787. cpu_to_le16(hidg->report_desc_length);
  788. length = min_t(unsigned short, length,
  789. hidg_desc_copy.bLength);
  790. memcpy(req->buf, &hidg_desc_copy, length);
  791. goto respond;
  792. break;
  793. }
  794. case HID_DT_REPORT:
  795. VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
  796. length = min_t(unsigned short, length,
  797. hidg->report_desc_length);
  798. memcpy(req->buf, hidg->report_desc, length);
  799. goto respond;
  800. break;
  801. default:
  802. VDBG(cdev, "Unknown descriptor request 0x%x\n",
  803. value >> 8);
  804. goto stall;
  805. break;
  806. }
  807. break;
  808. default:
  809. VDBG(cdev, "Unknown request 0x%x\n",
  810. ctrl->bRequest);
  811. goto stall;
  812. break;
  813. }
  814. stall:
  815. return -EOPNOTSUPP;
  816. respond:
  817. req->zero = 0;
  818. req->length = length;
  819. status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
  820. if (status < 0)
  821. ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value);
  822. return status;
  823. }
  824. static void hidg_disable(struct usb_function *f)
  825. {
  826. struct f_hidg *hidg = func_to_hidg(f);
  827. struct f_hidg_req_list *list, *next;
  828. unsigned long flags;
  829. usb_ep_disable(hidg->in_ep);
  830. if (hidg->out_ep) {
  831. usb_ep_disable(hidg->out_ep);
  832. spin_lock_irqsave(&hidg->read_spinlock, flags);
  833. list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
  834. free_ep_req(hidg->out_ep, list->req);
  835. list_del(&list->list);
  836. kfree(list);
  837. }
  838. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  839. }
  840. spin_lock_irqsave(&hidg->get_report_spinlock, flags);
  841. if (!hidg->get_report_returned) {
  842. usb_ep_free_request(f->config->cdev->gadget->ep0, hidg->get_req);
  843. hidg->get_req = NULL;
  844. hidg->get_report_returned = true;
  845. }
  846. spin_unlock_irqrestore(&hidg->get_report_spinlock, flags);
  847. spin_lock_irqsave(&hidg->read_spinlock, flags);
  848. hidg->disabled = true;
  849. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  850. wake_up(&hidg->read_queue);
  851. spin_lock_irqsave(&hidg->write_spinlock, flags);
  852. if (!hidg->write_pending) {
  853. free_ep_req(hidg->in_ep, hidg->req);
  854. hidg->write_pending = 1;
  855. }
  856. hidg->req = NULL;
  857. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  858. }
  859. static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
  860. {
  861. struct usb_composite_dev *cdev = f->config->cdev;
  862. struct f_hidg *hidg = func_to_hidg(f);
  863. struct usb_request *req_in = NULL;
  864. unsigned long flags;
  865. int i, status = 0;
  866. VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
  867. if (hidg->in_ep != NULL) {
  868. /* restart endpoint */
  869. usb_ep_disable(hidg->in_ep);
  870. status = config_ep_by_speed(f->config->cdev->gadget, f,
  871. hidg->in_ep);
  872. if (status) {
  873. ERROR(cdev, "config_ep_by_speed FAILED!\n");
  874. goto fail;
  875. }
  876. status = usb_ep_enable(hidg->in_ep);
  877. if (status < 0) {
  878. ERROR(cdev, "Enable IN endpoint FAILED!\n");
  879. goto fail;
  880. }
  881. hidg->in_ep->driver_data = hidg;
  882. req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length);
  883. if (!req_in) {
  884. status = -ENOMEM;
  885. goto disable_ep_in;
  886. }
  887. }
  888. if (hidg->use_out_ep && hidg->out_ep != NULL) {
  889. /* restart endpoint */
  890. usb_ep_disable(hidg->out_ep);
  891. status = config_ep_by_speed(f->config->cdev->gadget, f,
  892. hidg->out_ep);
  893. if (status) {
  894. ERROR(cdev, "config_ep_by_speed FAILED!\n");
  895. goto free_req_in;
  896. }
  897. status = usb_ep_enable(hidg->out_ep);
  898. if (status < 0) {
  899. ERROR(cdev, "Enable OUT endpoint FAILED!\n");
  900. goto free_req_in;
  901. }
  902. hidg->out_ep->driver_data = hidg;
  903. /*
  904. * allocate a bunch of read buffers and queue them all at once.
  905. */
  906. for (i = 0; i < hidg->qlen && status == 0; i++) {
  907. struct usb_request *req =
  908. hidg_alloc_ep_req(hidg->out_ep,
  909. hidg->report_length);
  910. if (req) {
  911. req->complete = hidg_intout_complete;
  912. req->context = hidg;
  913. status = usb_ep_queue(hidg->out_ep, req,
  914. GFP_ATOMIC);
  915. if (status) {
  916. ERROR(cdev, "%s queue req --> %d\n",
  917. hidg->out_ep->name, status);
  918. free_ep_req(hidg->out_ep, req);
  919. }
  920. } else {
  921. status = -ENOMEM;
  922. goto disable_out_ep;
  923. }
  924. }
  925. }
  926. spin_lock_irqsave(&hidg->read_spinlock, flags);
  927. hidg->disabled = false;
  928. spin_unlock_irqrestore(&hidg->read_spinlock, flags);
  929. if (hidg->in_ep != NULL) {
  930. spin_lock_irqsave(&hidg->write_spinlock, flags);
  931. hidg->req = req_in;
  932. hidg->write_pending = 0;
  933. spin_unlock_irqrestore(&hidg->write_spinlock, flags);
  934. wake_up(&hidg->write_queue);
  935. }
  936. return 0;
  937. disable_out_ep:
  938. if (hidg->out_ep)
  939. usb_ep_disable(hidg->out_ep);
  940. free_req_in:
  941. if (req_in)
  942. free_ep_req(hidg->in_ep, req_in);
  943. disable_ep_in:
  944. if (hidg->in_ep)
  945. usb_ep_disable(hidg->in_ep);
  946. fail:
  947. return status;
  948. }
  949. #ifdef CONFIG_COMPAT
  950. static long f_hidg_compat_ioctl(struct file *file, unsigned int code,
  951. unsigned long value)
  952. {
  953. return f_hidg_ioctl(file, code, value);
  954. }
  955. #endif
  956. static const struct file_operations f_hidg_fops = {
  957. .owner = THIS_MODULE,
  958. .open = f_hidg_open,
  959. .release = f_hidg_release,
  960. .write = f_hidg_write,
  961. .read = f_hidg_read,
  962. .poll = f_hidg_poll,
  963. .unlocked_ioctl = f_hidg_ioctl,
  964. #ifdef CONFIG_COMPAT
  965. .compat_ioctl = f_hidg_compat_ioctl,
  966. #endif
  967. .llseek = noop_llseek,
  968. };
  969. static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
  970. {
  971. struct usb_ep *ep;
  972. struct f_hidg *hidg = func_to_hidg(f);
  973. struct usb_string *us;
  974. int status;
  975. hidg->get_req = usb_ep_alloc_request(c->cdev->gadget->ep0, GFP_ATOMIC);
  976. if (!hidg->get_req)
  977. return -ENOMEM;
  978. hidg->get_req->zero = 0;
  979. hidg->get_req->complete = hidg_get_report_complete;
  980. hidg->get_req->context = hidg;
  981. hidg->get_report_returned = true;
  982. /* maybe allocate device-global string IDs, and patch descriptors */
  983. us = usb_gstrings_attach(c->cdev, ct_func_strings,
  984. ARRAY_SIZE(ct_func_string_defs));
  985. if (IS_ERR(us))
  986. return PTR_ERR(us);
  987. hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id;
  988. /* allocate instance-specific interface IDs, and patch descriptors */
  989. status = usb_interface_id(c, f);
  990. if (status < 0)
  991. goto fail;
  992. hidg_interface_desc.bInterfaceNumber = status;
  993. /* allocate instance-specific endpoints */
  994. status = -ENODEV;
  995. ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc);
  996. if (!ep)
  997. goto fail;
  998. hidg->in_ep = ep;
  999. hidg->out_ep = NULL;
  1000. if (hidg->use_out_ep) {
  1001. ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc);
  1002. if (!ep)
  1003. goto fail;
  1004. hidg->out_ep = ep;
  1005. }
  1006. /* used only if use_out_ep == 1 */
  1007. hidg->set_report_buf = NULL;
  1008. /* set descriptor dynamic values */
  1009. hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
  1010. hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
  1011. hidg_interface_desc.bNumEndpoints = hidg->use_out_ep ? 2 : 1;
  1012. hidg->protocol = HID_REPORT_PROTOCOL;
  1013. hidg->idle = 1;
  1014. hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1015. hidg_ss_in_comp_desc.wBytesPerInterval =
  1016. cpu_to_le16(hidg->report_length);
  1017. hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1018. hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1019. hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1020. hidg_ss_out_comp_desc.wBytesPerInterval =
  1021. cpu_to_le16(hidg->report_length);
  1022. hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1023. hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
  1024. /*
  1025. * We can use hidg_desc struct here but we should not relay
  1026. * that its content won't change after returning from this function.
  1027. */
  1028. hidg_desc.rpt_desc.bDescriptorType = HID_DT_REPORT;
  1029. hidg_desc.rpt_desc.wDescriptorLength =
  1030. cpu_to_le16(hidg->report_desc_length);
  1031. hidg_hs_in_ep_desc.bEndpointAddress =
  1032. hidg_fs_in_ep_desc.bEndpointAddress;
  1033. hidg_hs_out_ep_desc.bEndpointAddress =
  1034. hidg_fs_out_ep_desc.bEndpointAddress;
  1035. hidg_ss_in_ep_desc.bEndpointAddress =
  1036. hidg_fs_in_ep_desc.bEndpointAddress;
  1037. hidg_ss_out_ep_desc.bEndpointAddress =
  1038. hidg_fs_out_ep_desc.bEndpointAddress;
  1039. if (hidg->use_out_ep)
  1040. status = usb_assign_descriptors(f,
  1041. hidg_fs_descriptors_intout,
  1042. hidg_hs_descriptors_intout,
  1043. hidg_ss_descriptors_intout,
  1044. hidg_ss_descriptors_intout);
  1045. else
  1046. status = usb_assign_descriptors(f,
  1047. hidg_fs_descriptors_ssreport,
  1048. hidg_hs_descriptors_ssreport,
  1049. hidg_ss_descriptors_ssreport,
  1050. hidg_ss_descriptors_ssreport);
  1051. if (status)
  1052. goto fail;
  1053. spin_lock_init(&hidg->write_spinlock);
  1054. hidg->write_pending = 1;
  1055. hidg->req = NULL;
  1056. spin_lock_init(&hidg->read_spinlock);
  1057. spin_lock_init(&hidg->get_report_spinlock);
  1058. init_waitqueue_head(&hidg->write_queue);
  1059. init_waitqueue_head(&hidg->read_queue);
  1060. init_waitqueue_head(&hidg->get_queue);
  1061. init_waitqueue_head(&hidg->get_id_queue);
  1062. INIT_LIST_HEAD(&hidg->completed_out_req);
  1063. INIT_LIST_HEAD(&hidg->report_list);
  1064. INIT_WORK(&hidg->work, get_report_workqueue_handler);
  1065. hidg->workqueue = alloc_workqueue("report_work",
  1066. WQ_FREEZABLE |
  1067. WQ_MEM_RECLAIM,
  1068. 1);
  1069. if (!hidg->workqueue) {
  1070. status = -ENOMEM;
  1071. goto fail_free_descs;
  1072. }
  1073. /* create char device */
  1074. cdev_init(&hidg->cdev, &f_hidg_fops);
  1075. status = cdev_device_add(&hidg->cdev, &hidg->dev);
  1076. if (status)
  1077. goto fail_free_all;
  1078. return 0;
  1079. fail_free_all:
  1080. destroy_workqueue(hidg->workqueue);
  1081. fail_free_descs:
  1082. usb_free_all_descriptors(f);
  1083. fail:
  1084. ERROR(f->config->cdev, "hidg_bind FAILED\n");
  1085. if (hidg->req != NULL)
  1086. free_ep_req(hidg->in_ep, hidg->req);
  1087. usb_ep_free_request(c->cdev->gadget->ep0, hidg->get_req);
  1088. hidg->get_req = NULL;
  1089. return status;
  1090. }
  1091. static inline int hidg_get_minor(void)
  1092. {
  1093. int ret;
  1094. ret = ida_alloc(&hidg_ida, GFP_KERNEL);
  1095. if (ret >= HIDG_MINORS) {
  1096. ida_free(&hidg_ida, ret);
  1097. ret = -ENODEV;
  1098. }
  1099. return ret;
  1100. }
  1101. static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item)
  1102. {
  1103. return container_of(to_config_group(item), struct f_hid_opts,
  1104. func_inst.group);
  1105. }
  1106. static void hid_attr_release(struct config_item *item)
  1107. {
  1108. struct f_hid_opts *opts = to_f_hid_opts(item);
  1109. usb_put_function_instance(&opts->func_inst);
  1110. }
  1111. static struct configfs_item_operations hidg_item_ops = {
  1112. .release = hid_attr_release,
  1113. };
  1114. #define F_HID_OPT(name, prec, limit) \
  1115. static ssize_t f_hid_opts_##name##_show(struct config_item *item, char *page)\
  1116. { \
  1117. struct f_hid_opts *opts = to_f_hid_opts(item); \
  1118. int result; \
  1119. \
  1120. mutex_lock(&opts->lock); \
  1121. result = sprintf(page, "%d\n", opts->name); \
  1122. mutex_unlock(&opts->lock); \
  1123. \
  1124. return result; \
  1125. } \
  1126. \
  1127. static ssize_t f_hid_opts_##name##_store(struct config_item *item, \
  1128. const char *page, size_t len) \
  1129. { \
  1130. struct f_hid_opts *opts = to_f_hid_opts(item); \
  1131. int ret; \
  1132. u##prec num; \
  1133. \
  1134. mutex_lock(&opts->lock); \
  1135. if (opts->refcnt) { \
  1136. ret = -EBUSY; \
  1137. goto end; \
  1138. } \
  1139. \
  1140. ret = kstrtou##prec(page, 0, &num); \
  1141. if (ret) \
  1142. goto end; \
  1143. \
  1144. if (num > limit) { \
  1145. ret = -EINVAL; \
  1146. goto end; \
  1147. } \
  1148. opts->name = num; \
  1149. ret = len; \
  1150. \
  1151. end: \
  1152. mutex_unlock(&opts->lock); \
  1153. return ret; \
  1154. } \
  1155. \
  1156. CONFIGFS_ATTR(f_hid_opts_, name)
  1157. F_HID_OPT(subclass, 8, 255);
  1158. F_HID_OPT(protocol, 8, 255);
  1159. F_HID_OPT(no_out_endpoint, 8, 1);
  1160. F_HID_OPT(report_length, 16, 65535);
  1161. static ssize_t f_hid_opts_report_desc_show(struct config_item *item, char *page)
  1162. {
  1163. struct f_hid_opts *opts = to_f_hid_opts(item);
  1164. int result;
  1165. mutex_lock(&opts->lock);
  1166. result = opts->report_desc_length;
  1167. memcpy(page, opts->report_desc, opts->report_desc_length);
  1168. mutex_unlock(&opts->lock);
  1169. return result;
  1170. }
  1171. static ssize_t f_hid_opts_report_desc_store(struct config_item *item,
  1172. const char *page, size_t len)
  1173. {
  1174. struct f_hid_opts *opts = to_f_hid_opts(item);
  1175. int ret = -EBUSY;
  1176. char *d;
  1177. mutex_lock(&opts->lock);
  1178. if (opts->refcnt)
  1179. goto end;
  1180. if (len > PAGE_SIZE) {
  1181. ret = -ENOSPC;
  1182. goto end;
  1183. }
  1184. d = kmemdup(page, len, GFP_KERNEL);
  1185. if (!d) {
  1186. ret = -ENOMEM;
  1187. goto end;
  1188. }
  1189. kfree(opts->report_desc);
  1190. opts->report_desc = d;
  1191. opts->report_desc_length = len;
  1192. opts->report_desc_alloc = true;
  1193. ret = len;
  1194. end:
  1195. mutex_unlock(&opts->lock);
  1196. return ret;
  1197. }
  1198. CONFIGFS_ATTR(f_hid_opts_, report_desc);
  1199. static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page)
  1200. {
  1201. struct f_hid_opts *opts = to_f_hid_opts(item);
  1202. return sprintf(page, "%d:%d\n", major, opts->minor);
  1203. }
  1204. CONFIGFS_ATTR_RO(f_hid_opts_, dev);
  1205. static struct configfs_attribute *hid_attrs[] = {
  1206. &f_hid_opts_attr_subclass,
  1207. &f_hid_opts_attr_protocol,
  1208. &f_hid_opts_attr_no_out_endpoint,
  1209. &f_hid_opts_attr_report_length,
  1210. &f_hid_opts_attr_report_desc,
  1211. &f_hid_opts_attr_dev,
  1212. NULL,
  1213. };
  1214. static const struct config_item_type hid_func_type = {
  1215. .ct_item_ops = &hidg_item_ops,
  1216. .ct_attrs = hid_attrs,
  1217. .ct_owner = THIS_MODULE,
  1218. };
  1219. static inline void hidg_put_minor(int minor)
  1220. {
  1221. ida_free(&hidg_ida, minor);
  1222. }
  1223. static void hidg_free_inst(struct usb_function_instance *f)
  1224. {
  1225. struct f_hid_opts *opts;
  1226. opts = container_of(f, struct f_hid_opts, func_inst);
  1227. mutex_lock(&hidg_ida_lock);
  1228. hidg_put_minor(opts->minor);
  1229. if (ida_is_empty(&hidg_ida))
  1230. ghid_cleanup();
  1231. mutex_unlock(&hidg_ida_lock);
  1232. if (opts->report_desc_alloc)
  1233. kfree(opts->report_desc);
  1234. kfree(opts);
  1235. }
  1236. static struct usb_function_instance *hidg_alloc_inst(void)
  1237. {
  1238. struct f_hid_opts *opts;
  1239. struct usb_function_instance *ret;
  1240. int status = 0;
  1241. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  1242. if (!opts)
  1243. return ERR_PTR(-ENOMEM);
  1244. mutex_init(&opts->lock);
  1245. opts->func_inst.free_func_inst = hidg_free_inst;
  1246. ret = &opts->func_inst;
  1247. mutex_lock(&hidg_ida_lock);
  1248. if (ida_is_empty(&hidg_ida)) {
  1249. status = ghid_setup(NULL, HIDG_MINORS);
  1250. if (status) {
  1251. ret = ERR_PTR(status);
  1252. kfree(opts);
  1253. goto unlock;
  1254. }
  1255. }
  1256. opts->minor = hidg_get_minor();
  1257. if (opts->minor < 0) {
  1258. ret = ERR_PTR(opts->minor);
  1259. kfree(opts);
  1260. if (ida_is_empty(&hidg_ida))
  1261. ghid_cleanup();
  1262. goto unlock;
  1263. }
  1264. config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type);
  1265. unlock:
  1266. mutex_unlock(&hidg_ida_lock);
  1267. return ret;
  1268. }
  1269. static void hidg_free(struct usb_function *f)
  1270. {
  1271. struct f_hidg *hidg;
  1272. struct f_hid_opts *opts;
  1273. hidg = func_to_hidg(f);
  1274. opts = container_of(f->fi, struct f_hid_opts, func_inst);
  1275. put_device(&hidg->dev);
  1276. mutex_lock(&opts->lock);
  1277. --opts->refcnt;
  1278. mutex_unlock(&opts->lock);
  1279. }
  1280. static void hidg_unbind(struct usb_configuration *c, struct usb_function *f)
  1281. {
  1282. struct f_hidg *hidg = func_to_hidg(f);
  1283. cdev_device_del(&hidg->cdev, &hidg->dev);
  1284. destroy_workqueue(hidg->workqueue);
  1285. usb_free_all_descriptors(f);
  1286. }
  1287. static struct usb_function *hidg_alloc(struct usb_function_instance *fi)
  1288. {
  1289. struct f_hidg *hidg;
  1290. struct f_hid_opts *opts;
  1291. int ret;
  1292. /* allocate and initialize one new instance */
  1293. hidg = kzalloc(sizeof(*hidg), GFP_KERNEL);
  1294. if (!hidg)
  1295. return ERR_PTR(-ENOMEM);
  1296. opts = container_of(fi, struct f_hid_opts, func_inst);
  1297. mutex_lock(&opts->lock);
  1298. device_initialize(&hidg->dev);
  1299. hidg->dev.release = hidg_release;
  1300. hidg->dev.class = &hidg_class;
  1301. hidg->dev.devt = MKDEV(major, opts->minor);
  1302. ret = dev_set_name(&hidg->dev, "hidg%d", opts->minor);
  1303. if (ret)
  1304. goto err_unlock;
  1305. hidg->bInterfaceSubClass = opts->subclass;
  1306. hidg->bInterfaceProtocol = opts->protocol;
  1307. hidg->report_length = opts->report_length;
  1308. hidg->report_desc_length = opts->report_desc_length;
  1309. if (opts->report_desc) {
  1310. hidg->report_desc = kmemdup(opts->report_desc,
  1311. opts->report_desc_length,
  1312. GFP_KERNEL);
  1313. if (!hidg->report_desc) {
  1314. ret = -ENOMEM;
  1315. goto err_put_device;
  1316. }
  1317. }
  1318. hidg->use_out_ep = !opts->no_out_endpoint;
  1319. ++opts->refcnt;
  1320. mutex_unlock(&opts->lock);
  1321. hidg->func.name = "hid";
  1322. hidg->func.bind = hidg_bind;
  1323. hidg->func.unbind = hidg_unbind;
  1324. hidg->func.set_alt = hidg_set_alt;
  1325. hidg->func.disable = hidg_disable;
  1326. hidg->func.setup = hidg_setup;
  1327. hidg->func.free_func = hidg_free;
  1328. /* this could be made configurable at some point */
  1329. hidg->qlen = 4;
  1330. return &hidg->func;
  1331. err_put_device:
  1332. put_device(&hidg->dev);
  1333. err_unlock:
  1334. mutex_unlock(&opts->lock);
  1335. return ERR_PTR(ret);
  1336. }
  1337. DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc);
  1338. MODULE_DESCRIPTION("USB HID function driver");
  1339. MODULE_LICENSE("GPL");
  1340. MODULE_AUTHOR("Fabien Chouteau");
  1341. int ghid_setup(struct usb_gadget *g, int count)
  1342. {
  1343. int status;
  1344. dev_t dev;
  1345. status = class_register(&hidg_class);
  1346. if (status)
  1347. return status;
  1348. status = alloc_chrdev_region(&dev, 0, count, "hidg");
  1349. if (status) {
  1350. class_unregister(&hidg_class);
  1351. return status;
  1352. }
  1353. major = MAJOR(dev);
  1354. minors = count;
  1355. return 0;
  1356. }
  1357. void ghid_cleanup(void)
  1358. {
  1359. if (major) {
  1360. unregister_chrdev_region(MKDEV(major, 0), minors);
  1361. major = minors = 0;
  1362. }
  1363. class_unregister(&hidg_class);
  1364. }