hid-core.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453
  1. /*
  2. * HID support for Linux
  3. *
  4. * Copyright (c) 1999 Andreas Gal
  5. * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
  6. * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
  7. * Copyright (c) 2006-2012 Jiri Kosina
  8. */
  9. /*
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/init.h>
  19. #include <linux/kernel.h>
  20. #include <linux/list.h>
  21. #include <linux/mm.h>
  22. #include <linux/spinlock.h>
  23. #include <asm/unaligned.h>
  24. #include <asm/byteorder.h>
  25. #include <linux/input.h>
  26. #include <linux/wait.h>
  27. #include <linux/vmalloc.h>
  28. #include <linux/sched.h>
  29. #include <linux/semaphore.h>
  30. #include <linux/hid.h>
  31. #include <linux/hiddev.h>
  32. #include <linux/hid-debug.h>
  33. #include <linux/hidraw.h>
  34. #include "hid-ids.h"
  35. /*
  36. * Version Information
  37. */
  38. #define DRIVER_DESC "HID core driver"
  39. int hid_debug = 0;
  40. module_param_named(debug, hid_debug, int, 0600);
  41. MODULE_PARM_DESC(debug, "toggle HID debugging messages");
  42. EXPORT_SYMBOL_GPL(hid_debug);
  43. static int hid_ignore_special_drivers = 0;
  44. module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
  45. MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
  46. /*
  47. * Register a new report for a device.
  48. */
  49. struct hid_report *hid_register_report(struct hid_device *device,
  50. unsigned int type, unsigned int id,
  51. unsigned int application)
  52. {
  53. struct hid_report_enum *report_enum = device->report_enum + type;
  54. struct hid_report *report;
  55. if (id >= HID_MAX_IDS)
  56. return NULL;
  57. if (report_enum->report_id_hash[id])
  58. return report_enum->report_id_hash[id];
  59. report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
  60. if (!report)
  61. return NULL;
  62. if (id != 0)
  63. report_enum->numbered = 1;
  64. report->id = id;
  65. report->type = type;
  66. report->size = 0;
  67. report->device = device;
  68. report->application = application;
  69. report_enum->report_id_hash[id] = report;
  70. list_add_tail(&report->list, &report_enum->report_list);
  71. return report;
  72. }
  73. EXPORT_SYMBOL_GPL(hid_register_report);
  74. /*
  75. * Register a new field for this report.
  76. */
  77. static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
  78. {
  79. struct hid_field *field;
  80. if (report->maxfield == HID_MAX_FIELDS) {
  81. hid_err(report->device, "too many fields in report\n");
  82. return NULL;
  83. }
  84. field = kzalloc((sizeof(struct hid_field) +
  85. usages * sizeof(struct hid_usage) +
  86. usages * sizeof(unsigned)), GFP_KERNEL);
  87. if (!field)
  88. return NULL;
  89. field->index = report->maxfield++;
  90. report->field[field->index] = field;
  91. field->usage = (struct hid_usage *)(field + 1);
  92. field->value = (s32 *)(field->usage + usages);
  93. field->report = report;
  94. return field;
  95. }
  96. /*
  97. * Open a collection. The type/usage is pushed on the stack.
  98. */
  99. static int open_collection(struct hid_parser *parser, unsigned type)
  100. {
  101. struct hid_collection *collection;
  102. unsigned usage;
  103. usage = parser->local.usage[0];
  104. if (parser->collection_stack_ptr == parser->collection_stack_size) {
  105. unsigned int *collection_stack;
  106. unsigned int new_size = parser->collection_stack_size +
  107. HID_COLLECTION_STACK_SIZE;
  108. collection_stack = krealloc(parser->collection_stack,
  109. new_size * sizeof(unsigned int),
  110. GFP_KERNEL);
  111. if (!collection_stack)
  112. return -ENOMEM;
  113. parser->collection_stack = collection_stack;
  114. parser->collection_stack_size = new_size;
  115. }
  116. if (parser->device->maxcollection == parser->device->collection_size) {
  117. collection = kmalloc(
  118. array3_size(sizeof(struct hid_collection),
  119. parser->device->collection_size,
  120. 2),
  121. GFP_KERNEL);
  122. if (collection == NULL) {
  123. hid_err(parser->device, "failed to reallocate collection array\n");
  124. return -ENOMEM;
  125. }
  126. memcpy(collection, parser->device->collection,
  127. sizeof(struct hid_collection) *
  128. parser->device->collection_size);
  129. memset(collection + parser->device->collection_size, 0,
  130. sizeof(struct hid_collection) *
  131. parser->device->collection_size);
  132. kfree(parser->device->collection);
  133. parser->device->collection = collection;
  134. parser->device->collection_size *= 2;
  135. }
  136. parser->collection_stack[parser->collection_stack_ptr++] =
  137. parser->device->maxcollection;
  138. collection = parser->device->collection +
  139. parser->device->maxcollection++;
  140. collection->type = type;
  141. collection->usage = usage;
  142. collection->level = parser->collection_stack_ptr - 1;
  143. if (type == HID_COLLECTION_APPLICATION)
  144. parser->device->maxapplication++;
  145. return 0;
  146. }
  147. /*
  148. * Close a collection.
  149. */
  150. static int close_collection(struct hid_parser *parser)
  151. {
  152. if (!parser->collection_stack_ptr) {
  153. hid_err(parser->device, "collection stack underflow\n");
  154. return -EINVAL;
  155. }
  156. parser->collection_stack_ptr--;
  157. return 0;
  158. }
  159. /*
  160. * Climb up the stack, search for the specified collection type
  161. * and return the usage.
  162. */
  163. static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
  164. {
  165. struct hid_collection *collection = parser->device->collection;
  166. int n;
  167. for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
  168. unsigned index = parser->collection_stack[n];
  169. if (collection[index].type == type)
  170. return collection[index].usage;
  171. }
  172. return 0; /* we know nothing about this usage type */
  173. }
  174. /*
  175. * Concatenate usage which defines 16 bits or less with the
  176. * currently defined usage page to form a 32 bit usage
  177. */
  178. static void complete_usage(struct hid_parser *parser, unsigned int index)
  179. {
  180. parser->local.usage[index] &= 0xFFFF;
  181. parser->local.usage[index] |=
  182. (parser->global.usage_page & 0xFFFF) << 16;
  183. }
  184. /*
  185. * Add a usage to the temporary parser table.
  186. */
  187. static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
  188. {
  189. if (parser->local.usage_index >= HID_MAX_USAGES) {
  190. hid_err(parser->device, "usage index exceeded\n");
  191. return -1;
  192. }
  193. parser->local.usage[parser->local.usage_index] = usage;
  194. /*
  195. * If Usage item only includes usage id, concatenate it with
  196. * currently defined usage page
  197. */
  198. if (size <= 2)
  199. complete_usage(parser, parser->local.usage_index);
  200. parser->local.usage_size[parser->local.usage_index] = size;
  201. parser->local.collection_index[parser->local.usage_index] =
  202. parser->collection_stack_ptr ?
  203. parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
  204. parser->local.usage_index++;
  205. return 0;
  206. }
  207. /*
  208. * Register a new field for this report.
  209. */
  210. static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
  211. {
  212. struct hid_report *report;
  213. struct hid_field *field;
  214. unsigned int usages;
  215. unsigned int offset;
  216. unsigned int i;
  217. unsigned int application;
  218. application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
  219. report = hid_register_report(parser->device, report_type,
  220. parser->global.report_id, application);
  221. if (!report) {
  222. hid_err(parser->device, "hid_register_report failed\n");
  223. return -1;
  224. }
  225. /* Handle both signed and unsigned cases properly */
  226. if ((parser->global.logical_minimum < 0 &&
  227. parser->global.logical_maximum <
  228. parser->global.logical_minimum) ||
  229. (parser->global.logical_minimum >= 0 &&
  230. (__u32)parser->global.logical_maximum <
  231. (__u32)parser->global.logical_minimum)) {
  232. dbg_hid("logical range invalid 0x%x 0x%x\n",
  233. parser->global.logical_minimum,
  234. parser->global.logical_maximum);
  235. return -1;
  236. }
  237. offset = report->size;
  238. report->size += parser->global.report_size * parser->global.report_count;
  239. /* Total size check: Allow for possible report index byte */
  240. if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
  241. hid_err(parser->device, "report is too long\n");
  242. return -1;
  243. }
  244. if (!parser->local.usage_index) /* Ignore padding fields */
  245. return 0;
  246. usages = max_t(unsigned, parser->local.usage_index,
  247. parser->global.report_count);
  248. field = hid_register_field(report, usages);
  249. if (!field)
  250. return 0;
  251. field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
  252. field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
  253. field->application = application;
  254. for (i = 0; i < usages; i++) {
  255. unsigned j = i;
  256. /* Duplicate the last usage we parsed if we have excess values */
  257. if (i >= parser->local.usage_index)
  258. j = parser->local.usage_index - 1;
  259. field->usage[i].hid = parser->local.usage[j];
  260. field->usage[i].collection_index =
  261. parser->local.collection_index[j];
  262. field->usage[i].usage_index = i;
  263. }
  264. field->maxusage = usages;
  265. field->flags = flags;
  266. field->report_offset = offset;
  267. field->report_type = report_type;
  268. field->report_size = parser->global.report_size;
  269. field->report_count = parser->global.report_count;
  270. field->logical_minimum = parser->global.logical_minimum;
  271. field->logical_maximum = parser->global.logical_maximum;
  272. field->physical_minimum = parser->global.physical_minimum;
  273. field->physical_maximum = parser->global.physical_maximum;
  274. field->unit_exponent = parser->global.unit_exponent;
  275. field->unit = parser->global.unit;
  276. return 0;
  277. }
  278. /*
  279. * Read data value from item.
  280. */
  281. static u32 item_udata(struct hid_item *item)
  282. {
  283. switch (item->size) {
  284. case 1: return item->data.u8;
  285. case 2: return item->data.u16;
  286. case 4: return item->data.u32;
  287. }
  288. return 0;
  289. }
  290. static s32 item_sdata(struct hid_item *item)
  291. {
  292. switch (item->size) {
  293. case 1: return item->data.s8;
  294. case 2: return item->data.s16;
  295. case 4: return item->data.s32;
  296. }
  297. return 0;
  298. }
  299. /*
  300. * Process a global item.
  301. */
  302. static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
  303. {
  304. __s32 raw_value;
  305. switch (item->tag) {
  306. case HID_GLOBAL_ITEM_TAG_PUSH:
  307. if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
  308. hid_err(parser->device, "global environment stack overflow\n");
  309. return -1;
  310. }
  311. memcpy(parser->global_stack + parser->global_stack_ptr++,
  312. &parser->global, sizeof(struct hid_global));
  313. return 0;
  314. case HID_GLOBAL_ITEM_TAG_POP:
  315. if (!parser->global_stack_ptr) {
  316. hid_err(parser->device, "global environment stack underflow\n");
  317. return -1;
  318. }
  319. memcpy(&parser->global, parser->global_stack +
  320. --parser->global_stack_ptr, sizeof(struct hid_global));
  321. return 0;
  322. case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
  323. parser->global.usage_page = item_udata(item);
  324. return 0;
  325. case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
  326. parser->global.logical_minimum = item_sdata(item);
  327. return 0;
  328. case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
  329. if (parser->global.logical_minimum < 0)
  330. parser->global.logical_maximum = item_sdata(item);
  331. else
  332. parser->global.logical_maximum = item_udata(item);
  333. return 0;
  334. case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
  335. parser->global.physical_minimum = item_sdata(item);
  336. return 0;
  337. case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
  338. if (parser->global.physical_minimum < 0)
  339. parser->global.physical_maximum = item_sdata(item);
  340. else
  341. parser->global.physical_maximum = item_udata(item);
  342. return 0;
  343. case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
  344. /* Many devices provide unit exponent as a two's complement
  345. * nibble due to the common misunderstanding of HID
  346. * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
  347. * both this and the standard encoding. */
  348. raw_value = item_sdata(item);
  349. if (!(raw_value & 0xfffffff0))
  350. parser->global.unit_exponent = hid_snto32(raw_value, 4);
  351. else
  352. parser->global.unit_exponent = raw_value;
  353. return 0;
  354. case HID_GLOBAL_ITEM_TAG_UNIT:
  355. parser->global.unit = item_udata(item);
  356. return 0;
  357. case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
  358. parser->global.report_size = item_udata(item);
  359. if (parser->global.report_size > 128) {
  360. hid_err(parser->device, "invalid report_size %d\n",
  361. parser->global.report_size);
  362. return -1;
  363. }
  364. return 0;
  365. case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
  366. parser->global.report_count = item_udata(item);
  367. if (parser->global.report_count > HID_MAX_USAGES) {
  368. hid_err(parser->device, "invalid report_count %d\n",
  369. parser->global.report_count);
  370. return -1;
  371. }
  372. return 0;
  373. case HID_GLOBAL_ITEM_TAG_REPORT_ID:
  374. parser->global.report_id = item_udata(item);
  375. if (parser->global.report_id == 0 ||
  376. parser->global.report_id >= HID_MAX_IDS) {
  377. hid_err(parser->device, "report_id %u is invalid\n",
  378. parser->global.report_id);
  379. return -1;
  380. }
  381. return 0;
  382. default:
  383. hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
  384. return -1;
  385. }
  386. }
  387. /*
  388. * Process a local item.
  389. */
  390. static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
  391. {
  392. __u32 data;
  393. unsigned n;
  394. __u32 count;
  395. data = item_udata(item);
  396. switch (item->tag) {
  397. case HID_LOCAL_ITEM_TAG_DELIMITER:
  398. if (data) {
  399. /*
  400. * We treat items before the first delimiter
  401. * as global to all usage sets (branch 0).
  402. * In the moment we process only these global
  403. * items and the first delimiter set.
  404. */
  405. if (parser->local.delimiter_depth != 0) {
  406. hid_err(parser->device, "nested delimiters\n");
  407. return -1;
  408. }
  409. parser->local.delimiter_depth++;
  410. parser->local.delimiter_branch++;
  411. } else {
  412. if (parser->local.delimiter_depth < 1) {
  413. hid_err(parser->device, "bogus close delimiter\n");
  414. return -1;
  415. }
  416. parser->local.delimiter_depth--;
  417. }
  418. return 0;
  419. case HID_LOCAL_ITEM_TAG_USAGE:
  420. if (parser->local.delimiter_branch > 1) {
  421. dbg_hid("alternative usage ignored\n");
  422. return 0;
  423. }
  424. return hid_add_usage(parser, data, item->size);
  425. case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
  426. if (parser->local.delimiter_branch > 1) {
  427. dbg_hid("alternative usage ignored\n");
  428. return 0;
  429. }
  430. parser->local.usage_minimum = data;
  431. return 0;
  432. case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
  433. if (parser->local.delimiter_branch > 1) {
  434. dbg_hid("alternative usage ignored\n");
  435. return 0;
  436. }
  437. count = data - parser->local.usage_minimum;
  438. if (count + parser->local.usage_index >= HID_MAX_USAGES) {
  439. /*
  440. * We do not warn if the name is not set, we are
  441. * actually pre-scanning the device.
  442. */
  443. if (dev_name(&parser->device->dev))
  444. hid_warn(parser->device,
  445. "ignoring exceeding usage max\n");
  446. data = HID_MAX_USAGES - parser->local.usage_index +
  447. parser->local.usage_minimum - 1;
  448. if (data <= 0) {
  449. hid_err(parser->device,
  450. "no more usage index available\n");
  451. return -1;
  452. }
  453. }
  454. for (n = parser->local.usage_minimum; n <= data; n++)
  455. if (hid_add_usage(parser, n, item->size)) {
  456. dbg_hid("hid_add_usage failed\n");
  457. return -1;
  458. }
  459. return 0;
  460. default:
  461. dbg_hid("unknown local item tag 0x%x\n", item->tag);
  462. return 0;
  463. }
  464. return 0;
  465. }
  466. /*
  467. * Concatenate Usage Pages into Usages where relevant:
  468. * As per specification, 6.2.2.8: "When the parser encounters a main item it
  469. * concatenates the last declared Usage Page with a Usage to form a complete
  470. * usage value."
  471. */
  472. static void hid_concatenate_last_usage_page(struct hid_parser *parser)
  473. {
  474. int i;
  475. unsigned int usage_page;
  476. unsigned int current_page;
  477. if (!parser->local.usage_index)
  478. return;
  479. usage_page = parser->global.usage_page;
  480. /*
  481. * Concatenate usage page again only if last declared Usage Page
  482. * has not been already used in previous usages concatenation
  483. */
  484. for (i = parser->local.usage_index - 1; i >= 0; i--) {
  485. if (parser->local.usage_size[i] > 2)
  486. /* Ignore extended usages */
  487. continue;
  488. current_page = parser->local.usage[i] >> 16;
  489. if (current_page == usage_page)
  490. break;
  491. complete_usage(parser, i);
  492. }
  493. }
  494. /*
  495. * Process a main item.
  496. */
  497. static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
  498. {
  499. __u32 data;
  500. int ret;
  501. hid_concatenate_last_usage_page(parser);
  502. data = item_udata(item);
  503. switch (item->tag) {
  504. case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
  505. ret = open_collection(parser, data & 0xff);
  506. break;
  507. case HID_MAIN_ITEM_TAG_END_COLLECTION:
  508. ret = close_collection(parser);
  509. break;
  510. case HID_MAIN_ITEM_TAG_INPUT:
  511. ret = hid_add_field(parser, HID_INPUT_REPORT, data);
  512. break;
  513. case HID_MAIN_ITEM_TAG_OUTPUT:
  514. ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
  515. break;
  516. case HID_MAIN_ITEM_TAG_FEATURE:
  517. ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
  518. break;
  519. default:
  520. hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
  521. ret = 0;
  522. }
  523. memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
  524. return ret;
  525. }
  526. /*
  527. * Process a reserved item.
  528. */
  529. static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
  530. {
  531. dbg_hid("reserved item type, tag 0x%x\n", item->tag);
  532. return 0;
  533. }
  534. /*
  535. * Free a report and all registered fields. The field->usage and
  536. * field->value table's are allocated behind the field, so we need
  537. * only to free(field) itself.
  538. */
  539. static void hid_free_report(struct hid_report *report)
  540. {
  541. unsigned n;
  542. for (n = 0; n < report->maxfield; n++)
  543. kfree(report->field[n]);
  544. kfree(report);
  545. }
  546. /*
  547. * Close report. This function returns the device
  548. * state to the point prior to hid_open_report().
  549. */
  550. static void hid_close_report(struct hid_device *device)
  551. {
  552. unsigned i, j;
  553. for (i = 0; i < HID_REPORT_TYPES; i++) {
  554. struct hid_report_enum *report_enum = device->report_enum + i;
  555. for (j = 0; j < HID_MAX_IDS; j++) {
  556. struct hid_report *report = report_enum->report_id_hash[j];
  557. if (report)
  558. hid_free_report(report);
  559. }
  560. memset(report_enum, 0, sizeof(*report_enum));
  561. INIT_LIST_HEAD(&report_enum->report_list);
  562. }
  563. kfree(device->rdesc);
  564. device->rdesc = NULL;
  565. device->rsize = 0;
  566. kfree(device->collection);
  567. device->collection = NULL;
  568. device->collection_size = 0;
  569. device->maxcollection = 0;
  570. device->maxapplication = 0;
  571. device->status &= ~HID_STAT_PARSED;
  572. }
  573. /*
  574. * Free a device structure, all reports, and all fields.
  575. */
  576. static void hid_device_release(struct device *dev)
  577. {
  578. struct hid_device *hid = to_hid_device(dev);
  579. hid_close_report(hid);
  580. kfree(hid->dev_rdesc);
  581. kfree(hid);
  582. }
  583. /*
  584. * Fetch a report description item from the data stream. We support long
  585. * items, though they are not used yet.
  586. */
  587. static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
  588. {
  589. u8 b;
  590. if ((end - start) <= 0)
  591. return NULL;
  592. b = *start++;
  593. item->type = (b >> 2) & 3;
  594. item->tag = (b >> 4) & 15;
  595. if (item->tag == HID_ITEM_TAG_LONG) {
  596. item->format = HID_ITEM_FORMAT_LONG;
  597. if ((end - start) < 2)
  598. return NULL;
  599. item->size = *start++;
  600. item->tag = *start++;
  601. if ((end - start) < item->size)
  602. return NULL;
  603. item->data.longdata = start;
  604. start += item->size;
  605. return start;
  606. }
  607. item->format = HID_ITEM_FORMAT_SHORT;
  608. item->size = b & 3;
  609. switch (item->size) {
  610. case 0:
  611. return start;
  612. case 1:
  613. if ((end - start) < 1)
  614. return NULL;
  615. item->data.u8 = *start++;
  616. return start;
  617. case 2:
  618. if ((end - start) < 2)
  619. return NULL;
  620. item->data.u16 = get_unaligned_le16(start);
  621. start = (__u8 *)((__le16 *)start + 1);
  622. return start;
  623. case 3:
  624. item->size++;
  625. if ((end - start) < 4)
  626. return NULL;
  627. item->data.u32 = get_unaligned_le32(start);
  628. start = (__u8 *)((__le32 *)start + 1);
  629. return start;
  630. }
  631. return NULL;
  632. }
  633. static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
  634. {
  635. struct hid_device *hid = parser->device;
  636. if (usage == HID_DG_CONTACTID)
  637. hid->group = HID_GROUP_MULTITOUCH;
  638. }
  639. static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
  640. {
  641. if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
  642. parser->global.report_size == 8)
  643. parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
  644. if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
  645. parser->global.report_size == 8)
  646. parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
  647. }
  648. static void hid_scan_collection(struct hid_parser *parser, unsigned type)
  649. {
  650. struct hid_device *hid = parser->device;
  651. int i;
  652. if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
  653. type == HID_COLLECTION_PHYSICAL)
  654. hid->group = HID_GROUP_SENSOR_HUB;
  655. if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
  656. hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
  657. hid->group == HID_GROUP_MULTITOUCH)
  658. hid->group = HID_GROUP_GENERIC;
  659. if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
  660. for (i = 0; i < parser->local.usage_index; i++)
  661. if (parser->local.usage[i] == HID_GD_POINTER)
  662. parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
  663. if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
  664. parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
  665. }
  666. static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
  667. {
  668. __u32 data;
  669. int i;
  670. hid_concatenate_last_usage_page(parser);
  671. data = item_udata(item);
  672. switch (item->tag) {
  673. case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
  674. hid_scan_collection(parser, data & 0xff);
  675. break;
  676. case HID_MAIN_ITEM_TAG_END_COLLECTION:
  677. break;
  678. case HID_MAIN_ITEM_TAG_INPUT:
  679. /* ignore constant inputs, they will be ignored by hid-input */
  680. if (data & HID_MAIN_ITEM_CONSTANT)
  681. break;
  682. for (i = 0; i < parser->local.usage_index; i++)
  683. hid_scan_input_usage(parser, parser->local.usage[i]);
  684. break;
  685. case HID_MAIN_ITEM_TAG_OUTPUT:
  686. break;
  687. case HID_MAIN_ITEM_TAG_FEATURE:
  688. for (i = 0; i < parser->local.usage_index; i++)
  689. hid_scan_feature_usage(parser, parser->local.usage[i]);
  690. break;
  691. }
  692. /* Reset the local parser environment */
  693. memset(&parser->local, 0, sizeof(parser->local));
  694. return 0;
  695. }
  696. /*
  697. * Scan a report descriptor before the device is added to the bus.
  698. * Sets device groups and other properties that determine what driver
  699. * to load.
  700. */
  701. static int hid_scan_report(struct hid_device *hid)
  702. {
  703. struct hid_parser *parser;
  704. struct hid_item item;
  705. __u8 *start = hid->dev_rdesc;
  706. __u8 *end = start + hid->dev_rsize;
  707. static int (*dispatch_type[])(struct hid_parser *parser,
  708. struct hid_item *item) = {
  709. hid_scan_main,
  710. hid_parser_global,
  711. hid_parser_local,
  712. hid_parser_reserved
  713. };
  714. parser = vzalloc(sizeof(struct hid_parser));
  715. if (!parser)
  716. return -ENOMEM;
  717. parser->device = hid;
  718. hid->group = HID_GROUP_GENERIC;
  719. /*
  720. * The parsing is simpler than the one in hid_open_report() as we should
  721. * be robust against hid errors. Those errors will be raised by
  722. * hid_open_report() anyway.
  723. */
  724. while ((start = fetch_item(start, end, &item)) != NULL)
  725. dispatch_type[item.type](parser, &item);
  726. /*
  727. * Handle special flags set during scanning.
  728. */
  729. if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
  730. (hid->group == HID_GROUP_MULTITOUCH))
  731. hid->group = HID_GROUP_MULTITOUCH_WIN_8;
  732. /*
  733. * Vendor specific handlings
  734. */
  735. switch (hid->vendor) {
  736. case USB_VENDOR_ID_WACOM:
  737. hid->group = HID_GROUP_WACOM;
  738. break;
  739. case USB_VENDOR_ID_SYNAPTICS:
  740. if (hid->group == HID_GROUP_GENERIC)
  741. if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
  742. && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
  743. /*
  744. * hid-rmi should take care of them,
  745. * not hid-generic
  746. */
  747. hid->group = HID_GROUP_RMI;
  748. break;
  749. }
  750. kfree(parser->collection_stack);
  751. vfree(parser);
  752. return 0;
  753. }
  754. /**
  755. * hid_parse_report - parse device report
  756. *
  757. * @device: hid device
  758. * @start: report start
  759. * @size: report size
  760. *
  761. * Allocate the device report as read by the bus driver. This function should
  762. * only be called from parse() in ll drivers.
  763. */
  764. int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
  765. {
  766. hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
  767. if (!hid->dev_rdesc)
  768. return -ENOMEM;
  769. hid->dev_rsize = size;
  770. return 0;
  771. }
  772. EXPORT_SYMBOL_GPL(hid_parse_report);
  773. static const char * const hid_report_names[] = {
  774. "HID_INPUT_REPORT",
  775. "HID_OUTPUT_REPORT",
  776. "HID_FEATURE_REPORT",
  777. };
  778. /**
  779. * hid_validate_values - validate existing device report's value indexes
  780. *
  781. * @device: hid device
  782. * @type: which report type to examine
  783. * @id: which report ID to examine (0 for first)
  784. * @field_index: which report field to examine
  785. * @report_counts: expected number of values
  786. *
  787. * Validate the number of values in a given field of a given report, after
  788. * parsing.
  789. */
  790. struct hid_report *hid_validate_values(struct hid_device *hid,
  791. unsigned int type, unsigned int id,
  792. unsigned int field_index,
  793. unsigned int report_counts)
  794. {
  795. struct hid_report *report;
  796. if (type > HID_FEATURE_REPORT) {
  797. hid_err(hid, "invalid HID report type %u\n", type);
  798. return NULL;
  799. }
  800. if (id >= HID_MAX_IDS) {
  801. hid_err(hid, "invalid HID report id %u\n", id);
  802. return NULL;
  803. }
  804. /*
  805. * Explicitly not using hid_get_report() here since it depends on
  806. * ->numbered being checked, which may not always be the case when
  807. * drivers go to access report values.
  808. */
  809. if (id == 0) {
  810. /*
  811. * Validating on id 0 means we should examine the first
  812. * report in the list.
  813. */
  814. report = list_entry(
  815. hid->report_enum[type].report_list.next,
  816. struct hid_report, list);
  817. } else {
  818. report = hid->report_enum[type].report_id_hash[id];
  819. }
  820. if (!report) {
  821. hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
  822. return NULL;
  823. }
  824. if (report->maxfield <= field_index) {
  825. hid_err(hid, "not enough fields in %s %u\n",
  826. hid_report_names[type], id);
  827. return NULL;
  828. }
  829. if (report->field[field_index]->report_count < report_counts) {
  830. hid_err(hid, "not enough values in %s %u field %u\n",
  831. hid_report_names[type], id, field_index);
  832. return NULL;
  833. }
  834. return report;
  835. }
  836. EXPORT_SYMBOL_GPL(hid_validate_values);
  837. /**
  838. * hid_open_report - open a driver-specific device report
  839. *
  840. * @device: hid device
  841. *
  842. * Parse a report description into a hid_device structure. Reports are
  843. * enumerated, fields are attached to these reports.
  844. * 0 returned on success, otherwise nonzero error value.
  845. *
  846. * This function (or the equivalent hid_parse() macro) should only be
  847. * called from probe() in drivers, before starting the device.
  848. */
  849. int hid_open_report(struct hid_device *device)
  850. {
  851. struct hid_parser *parser;
  852. struct hid_item item;
  853. unsigned int size;
  854. __u8 *start;
  855. __u8 *buf;
  856. __u8 *end;
  857. __u8 *next;
  858. int ret;
  859. static int (*dispatch_type[])(struct hid_parser *parser,
  860. struct hid_item *item) = {
  861. hid_parser_main,
  862. hid_parser_global,
  863. hid_parser_local,
  864. hid_parser_reserved
  865. };
  866. if (WARN_ON(device->status & HID_STAT_PARSED))
  867. return -EBUSY;
  868. start = device->dev_rdesc;
  869. if (WARN_ON(!start))
  870. return -ENODEV;
  871. size = device->dev_rsize;
  872. buf = kmemdup(start, size, GFP_KERNEL);
  873. if (buf == NULL)
  874. return -ENOMEM;
  875. if (device->driver->report_fixup)
  876. start = device->driver->report_fixup(device, buf, &size);
  877. else
  878. start = buf;
  879. start = kmemdup(start, size, GFP_KERNEL);
  880. kfree(buf);
  881. if (start == NULL)
  882. return -ENOMEM;
  883. device->rdesc = start;
  884. device->rsize = size;
  885. parser = vzalloc(sizeof(struct hid_parser));
  886. if (!parser) {
  887. ret = -ENOMEM;
  888. goto alloc_err;
  889. }
  890. parser->device = device;
  891. end = start + size;
  892. device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
  893. sizeof(struct hid_collection), GFP_KERNEL);
  894. if (!device->collection) {
  895. ret = -ENOMEM;
  896. goto err;
  897. }
  898. device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
  899. ret = -EINVAL;
  900. while ((next = fetch_item(start, end, &item)) != NULL) {
  901. start = next;
  902. if (item.format != HID_ITEM_FORMAT_SHORT) {
  903. hid_err(device, "unexpected long global item\n");
  904. goto err;
  905. }
  906. if (dispatch_type[item.type](parser, &item)) {
  907. hid_err(device, "item %u %u %u %u parsing failed\n",
  908. item.format, (unsigned)item.size,
  909. (unsigned)item.type, (unsigned)item.tag);
  910. goto err;
  911. }
  912. if (start == end) {
  913. if (parser->collection_stack_ptr) {
  914. hid_err(device, "unbalanced collection at end of report description\n");
  915. goto err;
  916. }
  917. if (parser->local.delimiter_depth) {
  918. hid_err(device, "unbalanced delimiter at end of report description\n");
  919. goto err;
  920. }
  921. kfree(parser->collection_stack);
  922. vfree(parser);
  923. device->status |= HID_STAT_PARSED;
  924. return 0;
  925. }
  926. }
  927. hid_err(device, "item fetching failed at offset %u/%u\n",
  928. size - (unsigned int)(end - start), size);
  929. err:
  930. kfree(parser->collection_stack);
  931. alloc_err:
  932. vfree(parser);
  933. hid_close_report(device);
  934. return ret;
  935. }
  936. EXPORT_SYMBOL_GPL(hid_open_report);
  937. /*
  938. * Convert a signed n-bit integer to signed 32-bit integer. Common
  939. * cases are done through the compiler, the screwed things has to be
  940. * done by hand.
  941. */
  942. static s32 snto32(__u32 value, unsigned n)
  943. {
  944. if (!value || !n)
  945. return 0;
  946. switch (n) {
  947. case 8: return ((__s8)value);
  948. case 16: return ((__s16)value);
  949. case 32: return ((__s32)value);
  950. }
  951. return value & (1 << (n - 1)) ? value | (~0U << n) : value;
  952. }
  953. s32 hid_snto32(__u32 value, unsigned n)
  954. {
  955. return snto32(value, n);
  956. }
  957. EXPORT_SYMBOL_GPL(hid_snto32);
  958. /*
  959. * Convert a signed 32-bit integer to a signed n-bit integer.
  960. */
  961. static u32 s32ton(__s32 value, unsigned n)
  962. {
  963. s32 a = value >> (n - 1);
  964. if (a && a != -1)
  965. return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
  966. return value & ((1 << n) - 1);
  967. }
  968. /*
  969. * Extract/implement a data field from/to a little endian report (bit array).
  970. *
  971. * Code sort-of follows HID spec:
  972. * http://www.usb.org/developers/hidpage/HID1_11.pdf
  973. *
  974. * While the USB HID spec allows unlimited length bit fields in "report
  975. * descriptors", most devices never use more than 16 bits.
  976. * One model of UPS is claimed to report "LINEV" as a 32-bit field.
  977. * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
  978. */
  979. static u32 __extract(u8 *report, unsigned offset, int n)
  980. {
  981. unsigned int idx = offset / 8;
  982. unsigned int bit_nr = 0;
  983. unsigned int bit_shift = offset % 8;
  984. int bits_to_copy = 8 - bit_shift;
  985. u32 value = 0;
  986. u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
  987. while (n > 0) {
  988. value |= ((u32)report[idx] >> bit_shift) << bit_nr;
  989. n -= bits_to_copy;
  990. bit_nr += bits_to_copy;
  991. bits_to_copy = 8;
  992. bit_shift = 0;
  993. idx++;
  994. }
  995. return value & mask;
  996. }
  997. u32 hid_field_extract(const struct hid_device *hid, u8 *report,
  998. unsigned offset, unsigned n)
  999. {
  1000. if (n > 32) {
  1001. hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
  1002. n, current->comm);
  1003. n = 32;
  1004. }
  1005. return __extract(report, offset, n);
  1006. }
  1007. EXPORT_SYMBOL_GPL(hid_field_extract);
  1008. /*
  1009. * "implement" : set bits in a little endian bit stream.
  1010. * Same concepts as "extract" (see comments above).
  1011. * The data mangled in the bit stream remains in little endian
  1012. * order the whole time. It make more sense to talk about
  1013. * endianness of register values by considering a register
  1014. * a "cached" copy of the little endian bit stream.
  1015. */
  1016. static void __implement(u8 *report, unsigned offset, int n, u32 value)
  1017. {
  1018. unsigned int idx = offset / 8;
  1019. unsigned int bit_shift = offset % 8;
  1020. int bits_to_set = 8 - bit_shift;
  1021. while (n - bits_to_set >= 0) {
  1022. report[idx] &= ~(0xff << bit_shift);
  1023. report[idx] |= value << bit_shift;
  1024. value >>= bits_to_set;
  1025. n -= bits_to_set;
  1026. bits_to_set = 8;
  1027. bit_shift = 0;
  1028. idx++;
  1029. }
  1030. /* last nibble */
  1031. if (n) {
  1032. u8 bit_mask = ((1U << n) - 1);
  1033. report[idx] &= ~(bit_mask << bit_shift);
  1034. report[idx] |= value << bit_shift;
  1035. }
  1036. }
  1037. static void implement(const struct hid_device *hid, u8 *report,
  1038. unsigned offset, unsigned n, u32 value)
  1039. {
  1040. if (unlikely(n > 32)) {
  1041. hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
  1042. __func__, n, current->comm);
  1043. n = 32;
  1044. } else if (n < 32) {
  1045. u32 m = (1U << n) - 1;
  1046. if (unlikely(value > m)) {
  1047. hid_warn(hid,
  1048. "%s() called with too large value %d (n: %d)! (%s)\n",
  1049. __func__, value, n, current->comm);
  1050. WARN_ON(1);
  1051. value &= m;
  1052. }
  1053. }
  1054. __implement(report, offset, n, value);
  1055. }
  1056. /*
  1057. * Search an array for a value.
  1058. */
  1059. static int search(__s32 *array, __s32 value, unsigned n)
  1060. {
  1061. while (n--) {
  1062. if (*array++ == value)
  1063. return 0;
  1064. }
  1065. return -1;
  1066. }
  1067. /**
  1068. * hid_match_report - check if driver's raw_event should be called
  1069. *
  1070. * @hid: hid device
  1071. * @report_type: type to match against
  1072. *
  1073. * compare hid->driver->report_table->report_type to report->type
  1074. */
  1075. static int hid_match_report(struct hid_device *hid, struct hid_report *report)
  1076. {
  1077. const struct hid_report_id *id = hid->driver->report_table;
  1078. if (!id) /* NULL means all */
  1079. return 1;
  1080. for (; id->report_type != HID_TERMINATOR; id++)
  1081. if (id->report_type == HID_ANY_ID ||
  1082. id->report_type == report->type)
  1083. return 1;
  1084. return 0;
  1085. }
  1086. /**
  1087. * hid_match_usage - check if driver's event should be called
  1088. *
  1089. * @hid: hid device
  1090. * @usage: usage to match against
  1091. *
  1092. * compare hid->driver->usage_table->usage_{type,code} to
  1093. * usage->usage_{type,code}
  1094. */
  1095. static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
  1096. {
  1097. const struct hid_usage_id *id = hid->driver->usage_table;
  1098. if (!id) /* NULL means all */
  1099. return 1;
  1100. for (; id->usage_type != HID_ANY_ID - 1; id++)
  1101. if ((id->usage_hid == HID_ANY_ID ||
  1102. id->usage_hid == usage->hid) &&
  1103. (id->usage_type == HID_ANY_ID ||
  1104. id->usage_type == usage->type) &&
  1105. (id->usage_code == HID_ANY_ID ||
  1106. id->usage_code == usage->code))
  1107. return 1;
  1108. return 0;
  1109. }
  1110. static void hid_process_event(struct hid_device *hid, struct hid_field *field,
  1111. struct hid_usage *usage, __s32 value, int interrupt)
  1112. {
  1113. struct hid_driver *hdrv = hid->driver;
  1114. int ret;
  1115. if (!list_empty(&hid->debug_list))
  1116. hid_dump_input(hid, usage, value);
  1117. if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
  1118. ret = hdrv->event(hid, field, usage, value);
  1119. if (ret != 0) {
  1120. if (ret < 0)
  1121. hid_err(hid, "%s's event failed with %d\n",
  1122. hdrv->name, ret);
  1123. return;
  1124. }
  1125. }
  1126. if (hid->claimed & HID_CLAIMED_INPUT)
  1127. hidinput_hid_event(hid, field, usage, value);
  1128. if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
  1129. hid->hiddev_hid_event(hid, field, usage, value);
  1130. }
  1131. /*
  1132. * Analyse a received field, and fetch the data from it. The field
  1133. * content is stored for next report processing (we do differential
  1134. * reporting to the layer).
  1135. */
  1136. static void hid_input_field(struct hid_device *hid, struct hid_field *field,
  1137. __u8 *data, int interrupt)
  1138. {
  1139. unsigned n;
  1140. unsigned count = field->report_count;
  1141. unsigned offset = field->report_offset;
  1142. unsigned size = field->report_size;
  1143. __s32 min = field->logical_minimum;
  1144. __s32 max = field->logical_maximum;
  1145. __s32 *value;
  1146. value = kmalloc_array(count, sizeof(__s32), GFP_ATOMIC);
  1147. if (!value)
  1148. return;
  1149. for (n = 0; n < count; n++) {
  1150. value[n] = min < 0 ?
  1151. snto32(hid_field_extract(hid, data, offset + n * size,
  1152. size), size) :
  1153. hid_field_extract(hid, data, offset + n * size, size);
  1154. /* Ignore report if ErrorRollOver */
  1155. if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
  1156. value[n] >= min && value[n] <= max &&
  1157. value[n] - min < field->maxusage &&
  1158. field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
  1159. goto exit;
  1160. }
  1161. for (n = 0; n < count; n++) {
  1162. if (HID_MAIN_ITEM_VARIABLE & field->flags) {
  1163. hid_process_event(hid, field, &field->usage[n], value[n], interrupt);
  1164. continue;
  1165. }
  1166. if (field->value[n] >= min && field->value[n] <= max
  1167. && field->value[n] - min < field->maxusage
  1168. && field->usage[field->value[n] - min].hid
  1169. && search(value, field->value[n], count))
  1170. hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
  1171. if (value[n] >= min && value[n] <= max
  1172. && value[n] - min < field->maxusage
  1173. && field->usage[value[n] - min].hid
  1174. && search(field->value, value[n], count))
  1175. hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
  1176. }
  1177. memcpy(field->value, value, count * sizeof(__s32));
  1178. exit:
  1179. kfree(value);
  1180. }
  1181. /*
  1182. * Output the field into the report.
  1183. */
  1184. static void hid_output_field(const struct hid_device *hid,
  1185. struct hid_field *field, __u8 *data)
  1186. {
  1187. unsigned count = field->report_count;
  1188. unsigned offset = field->report_offset;
  1189. unsigned size = field->report_size;
  1190. unsigned n;
  1191. for (n = 0; n < count; n++) {
  1192. if (field->logical_minimum < 0) /* signed values */
  1193. implement(hid, data, offset + n * size, size,
  1194. s32ton(field->value[n], size));
  1195. else /* unsigned values */
  1196. implement(hid, data, offset + n * size, size,
  1197. field->value[n]);
  1198. }
  1199. }
  1200. /*
  1201. * Compute the size of a report.
  1202. */
  1203. static size_t hid_compute_report_size(struct hid_report *report)
  1204. {
  1205. if (report->size)
  1206. return ((report->size - 1) >> 3) + 1;
  1207. return 0;
  1208. }
  1209. /*
  1210. * Create a report. 'data' has to be allocated using
  1211. * hid_alloc_report_buf() so that it has proper size.
  1212. */
  1213. void hid_output_report(struct hid_report *report, __u8 *data)
  1214. {
  1215. unsigned n;
  1216. if (report->id > 0)
  1217. *data++ = report->id;
  1218. memset(data, 0, hid_compute_report_size(report));
  1219. for (n = 0; n < report->maxfield; n++)
  1220. hid_output_field(report->device, report->field[n], data);
  1221. }
  1222. EXPORT_SYMBOL_GPL(hid_output_report);
  1223. /*
  1224. * Allocator for buffer that is going to be passed to hid_output_report()
  1225. */
  1226. u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
  1227. {
  1228. /*
  1229. * 7 extra bytes are necessary to achieve proper functionality
  1230. * of implement() working on 8 byte chunks
  1231. */
  1232. u32 len = hid_report_len(report) + 7;
  1233. return kmalloc(len, flags);
  1234. }
  1235. EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
  1236. /*
  1237. * Set a field value. The report this field belongs to has to be
  1238. * created and transferred to the device, to set this value in the
  1239. * device.
  1240. */
  1241. int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
  1242. {
  1243. unsigned size;
  1244. if (!field)
  1245. return -1;
  1246. size = field->report_size;
  1247. hid_dump_input(field->report->device, field->usage + offset, value);
  1248. if (offset >= field->report_count) {
  1249. hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
  1250. offset, field->report_count);
  1251. return -1;
  1252. }
  1253. if (field->logical_minimum < 0) {
  1254. if (value != snto32(s32ton(value, size), size)) {
  1255. hid_err(field->report->device, "value %d is out of range\n", value);
  1256. return -1;
  1257. }
  1258. }
  1259. field->value[offset] = value;
  1260. return 0;
  1261. }
  1262. EXPORT_SYMBOL_GPL(hid_set_field);
  1263. static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
  1264. const u8 *data)
  1265. {
  1266. struct hid_report *report;
  1267. unsigned int n = 0; /* Normally report number is 0 */
  1268. /* Device uses numbered reports, data[0] is report number */
  1269. if (report_enum->numbered)
  1270. n = *data;
  1271. report = report_enum->report_id_hash[n];
  1272. if (report == NULL)
  1273. dbg_hid("undefined report_id %u received\n", n);
  1274. return report;
  1275. }
  1276. /*
  1277. * Implement a generic .request() callback, using .raw_request()
  1278. * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
  1279. */
  1280. void __hid_request(struct hid_device *hid, struct hid_report *report,
  1281. int reqtype)
  1282. {
  1283. char *buf;
  1284. int ret;
  1285. u32 len;
  1286. buf = hid_alloc_report_buf(report, GFP_KERNEL);
  1287. if (!buf)
  1288. return;
  1289. len = hid_report_len(report);
  1290. if (reqtype == HID_REQ_SET_REPORT)
  1291. hid_output_report(report, buf);
  1292. ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
  1293. report->type, reqtype);
  1294. if (ret < 0) {
  1295. dbg_hid("unable to complete request: %d\n", ret);
  1296. goto out;
  1297. }
  1298. if (reqtype == HID_REQ_GET_REPORT)
  1299. hid_input_report(hid, report->type, buf, ret, 0);
  1300. out:
  1301. kfree(buf);
  1302. }
  1303. EXPORT_SYMBOL_GPL(__hid_request);
  1304. int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
  1305. int interrupt)
  1306. {
  1307. struct hid_report_enum *report_enum = hid->report_enum + type;
  1308. struct hid_report *report;
  1309. struct hid_driver *hdrv;
  1310. unsigned int a;
  1311. u32 rsize, csize = size;
  1312. u8 *cdata = data;
  1313. int ret = 0;
  1314. report = hid_get_report(report_enum, data);
  1315. if (!report)
  1316. goto out;
  1317. if (report_enum->numbered) {
  1318. cdata++;
  1319. csize--;
  1320. }
  1321. rsize = hid_compute_report_size(report);
  1322. if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
  1323. rsize = HID_MAX_BUFFER_SIZE - 1;
  1324. else if (rsize > HID_MAX_BUFFER_SIZE)
  1325. rsize = HID_MAX_BUFFER_SIZE;
  1326. if (csize < rsize) {
  1327. dbg_hid("report %d is too short, (%d < %d)\n", report->id,
  1328. csize, rsize);
  1329. memset(cdata + csize, 0, rsize - csize);
  1330. }
  1331. if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
  1332. hid->hiddev_report_event(hid, report);
  1333. if (hid->claimed & HID_CLAIMED_HIDRAW) {
  1334. ret = hidraw_report_event(hid, data, size);
  1335. if (ret)
  1336. goto out;
  1337. }
  1338. if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
  1339. for (a = 0; a < report->maxfield; a++)
  1340. hid_input_field(hid, report->field[a], cdata, interrupt);
  1341. hdrv = hid->driver;
  1342. if (hdrv && hdrv->report)
  1343. hdrv->report(hid, report);
  1344. }
  1345. if (hid->claimed & HID_CLAIMED_INPUT)
  1346. hidinput_report_event(hid, report);
  1347. out:
  1348. return ret;
  1349. }
  1350. EXPORT_SYMBOL_GPL(hid_report_raw_event);
  1351. /**
  1352. * hid_input_report - report data from lower layer (usb, bt...)
  1353. *
  1354. * @hid: hid device
  1355. * @type: HID report type (HID_*_REPORT)
  1356. * @data: report contents
  1357. * @size: size of data parameter
  1358. * @interrupt: distinguish between interrupt and control transfers
  1359. *
  1360. * This is data entry for lower layers.
  1361. */
  1362. int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
  1363. {
  1364. struct hid_report_enum *report_enum;
  1365. struct hid_driver *hdrv;
  1366. struct hid_report *report;
  1367. int ret = 0;
  1368. if (!hid)
  1369. return -ENODEV;
  1370. if (down_trylock(&hid->driver_input_lock))
  1371. return -EBUSY;
  1372. if (!hid->driver) {
  1373. ret = -ENODEV;
  1374. goto unlock;
  1375. }
  1376. report_enum = hid->report_enum + type;
  1377. hdrv = hid->driver;
  1378. if (!size) {
  1379. dbg_hid("empty report\n");
  1380. ret = -1;
  1381. goto unlock;
  1382. }
  1383. /* Avoid unnecessary overhead if debugfs is disabled */
  1384. if (!list_empty(&hid->debug_list))
  1385. hid_dump_report(hid, type, data, size);
  1386. report = hid_get_report(report_enum, data);
  1387. if (!report) {
  1388. ret = -1;
  1389. goto unlock;
  1390. }
  1391. if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
  1392. ret = hdrv->raw_event(hid, report, data, size);
  1393. if (ret < 0)
  1394. goto unlock;
  1395. }
  1396. ret = hid_report_raw_event(hid, type, data, size, interrupt);
  1397. unlock:
  1398. up(&hid->driver_input_lock);
  1399. return ret;
  1400. }
  1401. EXPORT_SYMBOL_GPL(hid_input_report);
  1402. bool hid_match_one_id(const struct hid_device *hdev,
  1403. const struct hid_device_id *id)
  1404. {
  1405. return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
  1406. (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
  1407. (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
  1408. (id->product == HID_ANY_ID || id->product == hdev->product);
  1409. }
  1410. const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
  1411. const struct hid_device_id *id)
  1412. {
  1413. for (; id->bus; id++)
  1414. if (hid_match_one_id(hdev, id))
  1415. return id;
  1416. return NULL;
  1417. }
  1418. static const struct hid_device_id hid_hiddev_list[] = {
  1419. { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
  1420. { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
  1421. { }
  1422. };
  1423. static bool hid_hiddev(struct hid_device *hdev)
  1424. {
  1425. return !!hid_match_id(hdev, hid_hiddev_list);
  1426. }
  1427. static ssize_t
  1428. read_report_descriptor(struct file *filp, struct kobject *kobj,
  1429. struct bin_attribute *attr,
  1430. char *buf, loff_t off, size_t count)
  1431. {
  1432. struct device *dev = kobj_to_dev(kobj);
  1433. struct hid_device *hdev = to_hid_device(dev);
  1434. if (off >= hdev->rsize)
  1435. return 0;
  1436. if (off + count > hdev->rsize)
  1437. count = hdev->rsize - off;
  1438. memcpy(buf, hdev->rdesc + off, count);
  1439. return count;
  1440. }
  1441. static ssize_t
  1442. show_country(struct device *dev, struct device_attribute *attr,
  1443. char *buf)
  1444. {
  1445. struct hid_device *hdev = to_hid_device(dev);
  1446. return sprintf(buf, "%02x\n", hdev->country & 0xff);
  1447. }
  1448. static struct bin_attribute dev_bin_attr_report_desc = {
  1449. .attr = { .name = "report_descriptor", .mode = 0444 },
  1450. .read = read_report_descriptor,
  1451. .size = HID_MAX_DESCRIPTOR_SIZE,
  1452. };
  1453. static const struct device_attribute dev_attr_country = {
  1454. .attr = { .name = "country", .mode = 0444 },
  1455. .show = show_country,
  1456. };
  1457. int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
  1458. {
  1459. static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
  1460. "Joystick", "Gamepad", "Keyboard", "Keypad",
  1461. "Multi-Axis Controller"
  1462. };
  1463. const char *type, *bus;
  1464. char buf[64] = "";
  1465. unsigned int i;
  1466. int len;
  1467. int ret;
  1468. if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
  1469. connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
  1470. if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
  1471. connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
  1472. if (hdev->bus != BUS_USB)
  1473. connect_mask &= ~HID_CONNECT_HIDDEV;
  1474. if (hid_hiddev(hdev))
  1475. connect_mask |= HID_CONNECT_HIDDEV_FORCE;
  1476. if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
  1477. connect_mask & HID_CONNECT_HIDINPUT_FORCE))
  1478. hdev->claimed |= HID_CLAIMED_INPUT;
  1479. if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
  1480. !hdev->hiddev_connect(hdev,
  1481. connect_mask & HID_CONNECT_HIDDEV_FORCE))
  1482. hdev->claimed |= HID_CLAIMED_HIDDEV;
  1483. if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
  1484. hdev->claimed |= HID_CLAIMED_HIDRAW;
  1485. if (connect_mask & HID_CONNECT_DRIVER)
  1486. hdev->claimed |= HID_CLAIMED_DRIVER;
  1487. /* Drivers with the ->raw_event callback set are not required to connect
  1488. * to any other listener. */
  1489. if (!hdev->claimed && !hdev->driver->raw_event) {
  1490. hid_err(hdev, "device has no listeners, quitting\n");
  1491. return -ENODEV;
  1492. }
  1493. if ((hdev->claimed & HID_CLAIMED_INPUT) &&
  1494. (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
  1495. hdev->ff_init(hdev);
  1496. len = 0;
  1497. if (hdev->claimed & HID_CLAIMED_INPUT)
  1498. len += sprintf(buf + len, "input");
  1499. if (hdev->claimed & HID_CLAIMED_HIDDEV)
  1500. len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
  1501. ((struct hiddev *)hdev->hiddev)->minor);
  1502. if (hdev->claimed & HID_CLAIMED_HIDRAW)
  1503. len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
  1504. ((struct hidraw *)hdev->hidraw)->minor);
  1505. type = "Device";
  1506. for (i = 0; i < hdev->maxcollection; i++) {
  1507. struct hid_collection *col = &hdev->collection[i];
  1508. if (col->type == HID_COLLECTION_APPLICATION &&
  1509. (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
  1510. (col->usage & 0xffff) < ARRAY_SIZE(types)) {
  1511. type = types[col->usage & 0xffff];
  1512. break;
  1513. }
  1514. }
  1515. switch (hdev->bus) {
  1516. case BUS_USB:
  1517. bus = "USB";
  1518. break;
  1519. case BUS_BLUETOOTH:
  1520. bus = "BLUETOOTH";
  1521. break;
  1522. case BUS_I2C:
  1523. bus = "I2C";
  1524. break;
  1525. default:
  1526. bus = "<UNKNOWN>";
  1527. }
  1528. ret = device_create_file(&hdev->dev, &dev_attr_country);
  1529. if (ret)
  1530. hid_warn(hdev,
  1531. "can't create sysfs country code attribute err: %d\n", ret);
  1532. hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
  1533. buf, bus, hdev->version >> 8, hdev->version & 0xff,
  1534. type, hdev->name, hdev->phys);
  1535. return 0;
  1536. }
  1537. EXPORT_SYMBOL_GPL(hid_connect);
  1538. void hid_disconnect(struct hid_device *hdev)
  1539. {
  1540. device_remove_file(&hdev->dev, &dev_attr_country);
  1541. if (hdev->claimed & HID_CLAIMED_INPUT)
  1542. hidinput_disconnect(hdev);
  1543. if (hdev->claimed & HID_CLAIMED_HIDDEV)
  1544. hdev->hiddev_disconnect(hdev);
  1545. if (hdev->claimed & HID_CLAIMED_HIDRAW)
  1546. hidraw_disconnect(hdev);
  1547. hdev->claimed = 0;
  1548. }
  1549. EXPORT_SYMBOL_GPL(hid_disconnect);
  1550. /**
  1551. * hid_hw_start - start underlying HW
  1552. * @hdev: hid device
  1553. * @connect_mask: which outputs to connect, see HID_CONNECT_*
  1554. *
  1555. * Call this in probe function *after* hid_parse. This will setup HW
  1556. * buffers and start the device (if not defeirred to device open).
  1557. * hid_hw_stop must be called if this was successful.
  1558. */
  1559. int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
  1560. {
  1561. int error;
  1562. error = hdev->ll_driver->start(hdev);
  1563. if (error)
  1564. return error;
  1565. if (connect_mask) {
  1566. error = hid_connect(hdev, connect_mask);
  1567. if (error) {
  1568. hdev->ll_driver->stop(hdev);
  1569. return error;
  1570. }
  1571. }
  1572. return 0;
  1573. }
  1574. EXPORT_SYMBOL_GPL(hid_hw_start);
  1575. /**
  1576. * hid_hw_stop - stop underlying HW
  1577. * @hdev: hid device
  1578. *
  1579. * This is usually called from remove function or from probe when something
  1580. * failed and hid_hw_start was called already.
  1581. */
  1582. void hid_hw_stop(struct hid_device *hdev)
  1583. {
  1584. hid_disconnect(hdev);
  1585. hdev->ll_driver->stop(hdev);
  1586. }
  1587. EXPORT_SYMBOL_GPL(hid_hw_stop);
  1588. /**
  1589. * hid_hw_open - signal underlying HW to start delivering events
  1590. * @hdev: hid device
  1591. *
  1592. * Tell underlying HW to start delivering events from the device.
  1593. * This function should be called sometime after successful call
  1594. * to hid_hw_start().
  1595. */
  1596. int hid_hw_open(struct hid_device *hdev)
  1597. {
  1598. int ret;
  1599. ret = mutex_lock_killable(&hdev->ll_open_lock);
  1600. if (ret)
  1601. return ret;
  1602. if (!hdev->ll_open_count++) {
  1603. ret = hdev->ll_driver->open(hdev);
  1604. if (ret)
  1605. hdev->ll_open_count--;
  1606. }
  1607. mutex_unlock(&hdev->ll_open_lock);
  1608. return ret;
  1609. }
  1610. EXPORT_SYMBOL_GPL(hid_hw_open);
  1611. /**
  1612. * hid_hw_close - signal underlaying HW to stop delivering events
  1613. *
  1614. * @hdev: hid device
  1615. *
  1616. * This function indicates that we are not interested in the events
  1617. * from this device anymore. Delivery of events may or may not stop,
  1618. * depending on the number of users still outstanding.
  1619. */
  1620. void hid_hw_close(struct hid_device *hdev)
  1621. {
  1622. mutex_lock(&hdev->ll_open_lock);
  1623. if (!--hdev->ll_open_count)
  1624. hdev->ll_driver->close(hdev);
  1625. mutex_unlock(&hdev->ll_open_lock);
  1626. }
  1627. EXPORT_SYMBOL_GPL(hid_hw_close);
  1628. struct hid_dynid {
  1629. struct list_head list;
  1630. struct hid_device_id id;
  1631. };
  1632. /**
  1633. * store_new_id - add a new HID device ID to this driver and re-probe devices
  1634. * @driver: target device driver
  1635. * @buf: buffer for scanning device ID data
  1636. * @count: input size
  1637. *
  1638. * Adds a new dynamic hid device ID to this driver,
  1639. * and causes the driver to probe for all devices again.
  1640. */
  1641. static ssize_t new_id_store(struct device_driver *drv, const char *buf,
  1642. size_t count)
  1643. {
  1644. struct hid_driver *hdrv = to_hid_driver(drv);
  1645. struct hid_dynid *dynid;
  1646. __u32 bus, vendor, product;
  1647. unsigned long driver_data = 0;
  1648. int ret;
  1649. ret = sscanf(buf, "%x %x %x %lx",
  1650. &bus, &vendor, &product, &driver_data);
  1651. if (ret < 3)
  1652. return -EINVAL;
  1653. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  1654. if (!dynid)
  1655. return -ENOMEM;
  1656. dynid->id.bus = bus;
  1657. dynid->id.group = HID_GROUP_ANY;
  1658. dynid->id.vendor = vendor;
  1659. dynid->id.product = product;
  1660. dynid->id.driver_data = driver_data;
  1661. spin_lock(&hdrv->dyn_lock);
  1662. list_add_tail(&dynid->list, &hdrv->dyn_list);
  1663. spin_unlock(&hdrv->dyn_lock);
  1664. ret = driver_attach(&hdrv->driver);
  1665. return ret ? : count;
  1666. }
  1667. static DRIVER_ATTR_WO(new_id);
  1668. static struct attribute *hid_drv_attrs[] = {
  1669. &driver_attr_new_id.attr,
  1670. NULL,
  1671. };
  1672. ATTRIBUTE_GROUPS(hid_drv);
  1673. static void hid_free_dynids(struct hid_driver *hdrv)
  1674. {
  1675. struct hid_dynid *dynid, *n;
  1676. spin_lock(&hdrv->dyn_lock);
  1677. list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
  1678. list_del(&dynid->list);
  1679. kfree(dynid);
  1680. }
  1681. spin_unlock(&hdrv->dyn_lock);
  1682. }
  1683. const struct hid_device_id *hid_match_device(struct hid_device *hdev,
  1684. struct hid_driver *hdrv)
  1685. {
  1686. struct hid_dynid *dynid;
  1687. spin_lock(&hdrv->dyn_lock);
  1688. list_for_each_entry(dynid, &hdrv->dyn_list, list) {
  1689. if (hid_match_one_id(hdev, &dynid->id)) {
  1690. spin_unlock(&hdrv->dyn_lock);
  1691. return &dynid->id;
  1692. }
  1693. }
  1694. spin_unlock(&hdrv->dyn_lock);
  1695. return hid_match_id(hdev, hdrv->id_table);
  1696. }
  1697. EXPORT_SYMBOL_GPL(hid_match_device);
  1698. static int hid_bus_match(struct device *dev, struct device_driver *drv)
  1699. {
  1700. struct hid_driver *hdrv = to_hid_driver(drv);
  1701. struct hid_device *hdev = to_hid_device(dev);
  1702. return hid_match_device(hdev, hdrv) != NULL;
  1703. }
  1704. /**
  1705. * hid_compare_device_paths - check if both devices share the same path
  1706. * @hdev_a: hid device
  1707. * @hdev_b: hid device
  1708. * @separator: char to use as separator
  1709. *
  1710. * Check if two devices share the same path up to the last occurrence of
  1711. * the separator char. Both paths must exist (i.e., zero-length paths
  1712. * don't match).
  1713. */
  1714. bool hid_compare_device_paths(struct hid_device *hdev_a,
  1715. struct hid_device *hdev_b, char separator)
  1716. {
  1717. int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
  1718. int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
  1719. if (n1 != n2 || n1 <= 0 || n2 <= 0)
  1720. return false;
  1721. return !strncmp(hdev_a->phys, hdev_b->phys, n1);
  1722. }
  1723. EXPORT_SYMBOL_GPL(hid_compare_device_paths);
  1724. static int hid_device_probe(struct device *dev)
  1725. {
  1726. struct hid_driver *hdrv = to_hid_driver(dev->driver);
  1727. struct hid_device *hdev = to_hid_device(dev);
  1728. const struct hid_device_id *id;
  1729. int ret = 0;
  1730. if (down_interruptible(&hdev->driver_input_lock)) {
  1731. ret = -EINTR;
  1732. goto end;
  1733. }
  1734. hdev->io_started = false;
  1735. clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
  1736. if (!hdev->driver) {
  1737. id = hid_match_device(hdev, hdrv);
  1738. if (id == NULL) {
  1739. ret = -ENODEV;
  1740. goto unlock;
  1741. }
  1742. if (hdrv->match) {
  1743. if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
  1744. ret = -ENODEV;
  1745. goto unlock;
  1746. }
  1747. } else {
  1748. /*
  1749. * hid-generic implements .match(), so if
  1750. * hid_ignore_special_drivers is set, we can safely
  1751. * return.
  1752. */
  1753. if (hid_ignore_special_drivers) {
  1754. ret = -ENODEV;
  1755. goto unlock;
  1756. }
  1757. }
  1758. /* reset the quirks that has been previously set */
  1759. hdev->quirks = hid_lookup_quirk(hdev);
  1760. hdev->driver = hdrv;
  1761. if (hdrv->probe) {
  1762. ret = hdrv->probe(hdev, id);
  1763. } else { /* default probe */
  1764. ret = hid_open_report(hdev);
  1765. if (!ret)
  1766. ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
  1767. }
  1768. if (ret) {
  1769. hid_close_report(hdev);
  1770. hdev->driver = NULL;
  1771. }
  1772. }
  1773. unlock:
  1774. if (!hdev->io_started)
  1775. up(&hdev->driver_input_lock);
  1776. end:
  1777. return ret;
  1778. }
  1779. static int hid_device_remove(struct device *dev)
  1780. {
  1781. struct hid_device *hdev = to_hid_device(dev);
  1782. struct hid_driver *hdrv;
  1783. int ret = 0;
  1784. if (down_interruptible(&hdev->driver_input_lock)) {
  1785. ret = -EINTR;
  1786. goto end;
  1787. }
  1788. hdev->io_started = false;
  1789. hdrv = hdev->driver;
  1790. if (hdrv) {
  1791. if (hdrv->remove)
  1792. hdrv->remove(hdev);
  1793. else /* default remove */
  1794. hid_hw_stop(hdev);
  1795. hid_close_report(hdev);
  1796. hdev->driver = NULL;
  1797. }
  1798. if (!hdev->io_started)
  1799. up(&hdev->driver_input_lock);
  1800. end:
  1801. return ret;
  1802. }
  1803. static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
  1804. char *buf)
  1805. {
  1806. struct hid_device *hdev = container_of(dev, struct hid_device, dev);
  1807. return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
  1808. hdev->bus, hdev->group, hdev->vendor, hdev->product);
  1809. }
  1810. static DEVICE_ATTR_RO(modalias);
  1811. static struct attribute *hid_dev_attrs[] = {
  1812. &dev_attr_modalias.attr,
  1813. NULL,
  1814. };
  1815. static struct bin_attribute *hid_dev_bin_attrs[] = {
  1816. &dev_bin_attr_report_desc,
  1817. NULL
  1818. };
  1819. static const struct attribute_group hid_dev_group = {
  1820. .attrs = hid_dev_attrs,
  1821. .bin_attrs = hid_dev_bin_attrs,
  1822. };
  1823. __ATTRIBUTE_GROUPS(hid_dev);
  1824. static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
  1825. {
  1826. struct hid_device *hdev = to_hid_device(dev);
  1827. if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
  1828. hdev->bus, hdev->vendor, hdev->product))
  1829. return -ENOMEM;
  1830. if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
  1831. return -ENOMEM;
  1832. if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
  1833. return -ENOMEM;
  1834. if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
  1835. return -ENOMEM;
  1836. if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
  1837. hdev->bus, hdev->group, hdev->vendor, hdev->product))
  1838. return -ENOMEM;
  1839. return 0;
  1840. }
  1841. struct bus_type hid_bus_type = {
  1842. .name = "hid",
  1843. .dev_groups = hid_dev_groups,
  1844. .drv_groups = hid_drv_groups,
  1845. .match = hid_bus_match,
  1846. .probe = hid_device_probe,
  1847. .remove = hid_device_remove,
  1848. .uevent = hid_uevent,
  1849. };
  1850. EXPORT_SYMBOL(hid_bus_type);
  1851. int hid_add_device(struct hid_device *hdev)
  1852. {
  1853. static atomic_t id = ATOMIC_INIT(0);
  1854. int ret;
  1855. if (WARN_ON(hdev->status & HID_STAT_ADDED))
  1856. return -EBUSY;
  1857. hdev->quirks = hid_lookup_quirk(hdev);
  1858. /* we need to kill them here, otherwise they will stay allocated to
  1859. * wait for coming driver */
  1860. if (hid_ignore(hdev))
  1861. return -ENODEV;
  1862. /*
  1863. * Check for the mandatory transport channel.
  1864. */
  1865. if (!hdev->ll_driver->raw_request) {
  1866. hid_err(hdev, "transport driver missing .raw_request()\n");
  1867. return -EINVAL;
  1868. }
  1869. /*
  1870. * Read the device report descriptor once and use as template
  1871. * for the driver-specific modifications.
  1872. */
  1873. ret = hdev->ll_driver->parse(hdev);
  1874. if (ret)
  1875. return ret;
  1876. if (!hdev->dev_rdesc)
  1877. return -ENODEV;
  1878. /*
  1879. * Scan generic devices for group information
  1880. */
  1881. if (hid_ignore_special_drivers) {
  1882. hdev->group = HID_GROUP_GENERIC;
  1883. } else if (!hdev->group &&
  1884. !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
  1885. ret = hid_scan_report(hdev);
  1886. if (ret)
  1887. hid_warn(hdev, "bad device descriptor (%d)\n", ret);
  1888. }
  1889. /* XXX hack, any other cleaner solution after the driver core
  1890. * is converted to allow more than 20 bytes as the device name? */
  1891. dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
  1892. hdev->vendor, hdev->product, atomic_inc_return(&id));
  1893. hid_debug_register(hdev, dev_name(&hdev->dev));
  1894. ret = device_add(&hdev->dev);
  1895. if (!ret)
  1896. hdev->status |= HID_STAT_ADDED;
  1897. else
  1898. hid_debug_unregister(hdev);
  1899. return ret;
  1900. }
  1901. EXPORT_SYMBOL_GPL(hid_add_device);
  1902. /**
  1903. * hid_allocate_device - allocate new hid device descriptor
  1904. *
  1905. * Allocate and initialize hid device, so that hid_destroy_device might be
  1906. * used to free it.
  1907. *
  1908. * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
  1909. * error value.
  1910. */
  1911. struct hid_device *hid_allocate_device(void)
  1912. {
  1913. struct hid_device *hdev;
  1914. int ret = -ENOMEM;
  1915. hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
  1916. if (hdev == NULL)
  1917. return ERR_PTR(ret);
  1918. device_initialize(&hdev->dev);
  1919. hdev->dev.release = hid_device_release;
  1920. hdev->dev.bus = &hid_bus_type;
  1921. device_enable_async_suspend(&hdev->dev);
  1922. hid_close_report(hdev);
  1923. init_waitqueue_head(&hdev->debug_wait);
  1924. INIT_LIST_HEAD(&hdev->debug_list);
  1925. spin_lock_init(&hdev->debug_list_lock);
  1926. sema_init(&hdev->driver_input_lock, 1);
  1927. mutex_init(&hdev->ll_open_lock);
  1928. return hdev;
  1929. }
  1930. EXPORT_SYMBOL_GPL(hid_allocate_device);
  1931. static void hid_remove_device(struct hid_device *hdev)
  1932. {
  1933. if (hdev->status & HID_STAT_ADDED) {
  1934. device_del(&hdev->dev);
  1935. hid_debug_unregister(hdev);
  1936. hdev->status &= ~HID_STAT_ADDED;
  1937. }
  1938. kfree(hdev->dev_rdesc);
  1939. hdev->dev_rdesc = NULL;
  1940. hdev->dev_rsize = 0;
  1941. }
  1942. /**
  1943. * hid_destroy_device - free previously allocated device
  1944. *
  1945. * @hdev: hid device
  1946. *
  1947. * If you allocate hid_device through hid_allocate_device, you should ever
  1948. * free by this function.
  1949. */
  1950. void hid_destroy_device(struct hid_device *hdev)
  1951. {
  1952. hid_remove_device(hdev);
  1953. put_device(&hdev->dev);
  1954. }
  1955. EXPORT_SYMBOL_GPL(hid_destroy_device);
  1956. static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
  1957. {
  1958. struct hid_driver *hdrv = data;
  1959. struct hid_device *hdev = to_hid_device(dev);
  1960. if (hdev->driver == hdrv &&
  1961. !hdrv->match(hdev, hid_ignore_special_drivers) &&
  1962. !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
  1963. return device_reprobe(dev);
  1964. return 0;
  1965. }
  1966. static int __hid_bus_driver_added(struct device_driver *drv, void *data)
  1967. {
  1968. struct hid_driver *hdrv = to_hid_driver(drv);
  1969. if (hdrv->match) {
  1970. bus_for_each_dev(&hid_bus_type, NULL, hdrv,
  1971. __hid_bus_reprobe_drivers);
  1972. }
  1973. return 0;
  1974. }
  1975. static int __bus_removed_driver(struct device_driver *drv, void *data)
  1976. {
  1977. return bus_rescan_devices(&hid_bus_type);
  1978. }
  1979. int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
  1980. const char *mod_name)
  1981. {
  1982. int ret;
  1983. hdrv->driver.name = hdrv->name;
  1984. hdrv->driver.bus = &hid_bus_type;
  1985. hdrv->driver.owner = owner;
  1986. hdrv->driver.mod_name = mod_name;
  1987. INIT_LIST_HEAD(&hdrv->dyn_list);
  1988. spin_lock_init(&hdrv->dyn_lock);
  1989. ret = driver_register(&hdrv->driver);
  1990. if (ret == 0)
  1991. bus_for_each_drv(&hid_bus_type, NULL, NULL,
  1992. __hid_bus_driver_added);
  1993. return ret;
  1994. }
  1995. EXPORT_SYMBOL_GPL(__hid_register_driver);
  1996. void hid_unregister_driver(struct hid_driver *hdrv)
  1997. {
  1998. driver_unregister(&hdrv->driver);
  1999. hid_free_dynids(hdrv);
  2000. bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
  2001. }
  2002. EXPORT_SYMBOL_GPL(hid_unregister_driver);
  2003. int hid_check_keys_pressed(struct hid_device *hid)
  2004. {
  2005. struct hid_input *hidinput;
  2006. int i;
  2007. if (!(hid->claimed & HID_CLAIMED_INPUT))
  2008. return 0;
  2009. list_for_each_entry(hidinput, &hid->inputs, list) {
  2010. for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
  2011. if (hidinput->input->key[i])
  2012. return 1;
  2013. }
  2014. return 0;
  2015. }
  2016. EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
  2017. static int __init hid_init(void)
  2018. {
  2019. int ret;
  2020. if (hid_debug)
  2021. pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
  2022. "debugfs is now used for inspecting the device (report descriptor, reports)\n");
  2023. ret = bus_register(&hid_bus_type);
  2024. if (ret) {
  2025. pr_err("can't register hid bus\n");
  2026. goto err;
  2027. }
  2028. ret = hidraw_init();
  2029. if (ret)
  2030. goto err_bus;
  2031. hid_debug_init();
  2032. return 0;
  2033. err_bus:
  2034. bus_unregister(&hid_bus_type);
  2035. err:
  2036. return ret;
  2037. }
  2038. static void __exit hid_exit(void)
  2039. {
  2040. hid_debug_exit();
  2041. hidraw_exit();
  2042. bus_unregister(&hid_bus_type);
  2043. hid_quirks_exit(HID_BUS_ANY);
  2044. }
  2045. module_init(hid_init);
  2046. module_exit(hid_exit);
  2047. MODULE_AUTHOR("Andreas Gal");
  2048. MODULE_AUTHOR("Vojtech Pavlik");
  2049. MODULE_AUTHOR("Jiri Kosina");
  2050. MODULE_LICENSE("GPL");