rmi_driver.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This driver provides the core support for a single RMI4-based device.
  6. *
  7. * The RMI4 specification can be found here (URL split for line length):
  8. *
  9. * http://www.synaptics.com/sites/default/files/
  10. * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published by
  14. * the Free Software Foundation.
  15. */
  16. #include <linux/bitmap.h>
  17. #include <linux/delay.h>
  18. #include <linux/fs.h>
  19. #include <linux/irq.h>
  20. #include <linux/pm.h>
  21. #include <linux/slab.h>
  22. #include <linux/of.h>
  23. #include <linux/irqdomain.h>
  24. #include <uapi/linux/input.h>
  25. #include <linux/rmi.h>
  26. #include "rmi_bus.h"
  27. #include "rmi_driver.h"
  28. #define HAS_NONSTANDARD_PDT_MASK 0x40
  29. #define RMI4_MAX_PAGE 0xff
  30. #define RMI4_PAGE_SIZE 0x100
  31. #define RMI4_PAGE_MASK 0xFF00
  32. #define RMI_DEVICE_RESET_CMD 0x01
  33. #define DEFAULT_RESET_DELAY_MS 100
  34. void rmi_free_function_list(struct rmi_device *rmi_dev)
  35. {
  36. struct rmi_function *fn, *tmp;
  37. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  38. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
  39. /* Doing it in the reverse order so F01 will be removed last */
  40. list_for_each_entry_safe_reverse(fn, tmp,
  41. &data->function_list, node) {
  42. list_del(&fn->node);
  43. rmi_unregister_function(fn);
  44. }
  45. devm_kfree(&rmi_dev->dev, data->irq_memory);
  46. data->irq_memory = NULL;
  47. data->irq_status = NULL;
  48. data->fn_irq_bits = NULL;
  49. data->current_irq_mask = NULL;
  50. data->new_irq_mask = NULL;
  51. data->f01_container = NULL;
  52. data->f34_container = NULL;
  53. }
  54. static int reset_one_function(struct rmi_function *fn)
  55. {
  56. struct rmi_function_handler *fh;
  57. int retval = 0;
  58. if (!fn || !fn->dev.driver)
  59. return 0;
  60. fh = to_rmi_function_handler(fn->dev.driver);
  61. if (fh->reset) {
  62. retval = fh->reset(fn);
  63. if (retval < 0)
  64. dev_err(&fn->dev, "Reset failed with code %d.\n",
  65. retval);
  66. }
  67. return retval;
  68. }
  69. static int configure_one_function(struct rmi_function *fn)
  70. {
  71. struct rmi_function_handler *fh;
  72. int retval = 0;
  73. if (!fn || !fn->dev.driver)
  74. return 0;
  75. fh = to_rmi_function_handler(fn->dev.driver);
  76. if (fh->config) {
  77. retval = fh->config(fn);
  78. if (retval < 0)
  79. dev_err(&fn->dev, "Config failed with code %d.\n",
  80. retval);
  81. }
  82. return retval;
  83. }
  84. static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
  85. {
  86. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  87. struct rmi_function *entry;
  88. int retval;
  89. list_for_each_entry(entry, &data->function_list, node) {
  90. retval = reset_one_function(entry);
  91. if (retval < 0)
  92. return retval;
  93. }
  94. return 0;
  95. }
  96. static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
  97. {
  98. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  99. struct rmi_function *entry;
  100. int retval;
  101. list_for_each_entry(entry, &data->function_list, node) {
  102. retval = configure_one_function(entry);
  103. if (retval < 0)
  104. return retval;
  105. }
  106. return 0;
  107. }
  108. static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
  109. {
  110. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  111. struct device *dev = &rmi_dev->dev;
  112. int i;
  113. int error;
  114. if (!data)
  115. return 0;
  116. if (!data->attn_data.data) {
  117. error = rmi_read_block(rmi_dev,
  118. data->f01_container->fd.data_base_addr + 1,
  119. data->irq_status, data->num_of_irq_regs);
  120. if (error < 0) {
  121. dev_err(dev, "Failed to read irqs, code=%d\n", error);
  122. return error;
  123. }
  124. }
  125. mutex_lock(&data->irq_mutex);
  126. bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
  127. data->irq_count);
  128. /*
  129. * At this point, irq_status has all bits that are set in the
  130. * interrupt status register and are enabled.
  131. */
  132. mutex_unlock(&data->irq_mutex);
  133. for_each_set_bit(i, data->irq_status, data->irq_count)
  134. handle_nested_irq(irq_find_mapping(data->irqdomain, i));
  135. if (data->input)
  136. input_sync(data->input);
  137. return 0;
  138. }
  139. void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
  140. void *data, size_t size)
  141. {
  142. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  143. struct rmi4_attn_data attn_data;
  144. void *fifo_data;
  145. if (!drvdata->enabled)
  146. return;
  147. fifo_data = kmemdup(data, size, GFP_ATOMIC);
  148. if (!fifo_data)
  149. return;
  150. attn_data.irq_status = irq_status;
  151. attn_data.size = size;
  152. attn_data.data = fifo_data;
  153. kfifo_put(&drvdata->attn_fifo, attn_data);
  154. }
  155. EXPORT_SYMBOL_GPL(rmi_set_attn_data);
  156. static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
  157. {
  158. struct rmi_device *rmi_dev = dev_id;
  159. struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
  160. struct rmi4_attn_data attn_data = {0};
  161. int ret, count;
  162. count = kfifo_get(&drvdata->attn_fifo, &attn_data);
  163. if (count) {
  164. *(drvdata->irq_status) = attn_data.irq_status;
  165. drvdata->attn_data = attn_data;
  166. }
  167. ret = rmi_process_interrupt_requests(rmi_dev);
  168. if (ret)
  169. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
  170. "Failed to process interrupt request: %d\n", ret);
  171. if (count) {
  172. kfree(attn_data.data);
  173. drvdata->attn_data.data = NULL;
  174. }
  175. if (!kfifo_is_empty(&drvdata->attn_fifo))
  176. return rmi_irq_fn(irq, dev_id);
  177. return IRQ_HANDLED;
  178. }
  179. static int rmi_irq_init(struct rmi_device *rmi_dev)
  180. {
  181. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  182. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  183. int irq_flags = irq_get_trigger_type(pdata->irq);
  184. int ret;
  185. if (!irq_flags)
  186. irq_flags = IRQF_TRIGGER_LOW;
  187. ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
  188. rmi_irq_fn, irq_flags | IRQF_ONESHOT,
  189. dev_driver_string(rmi_dev->xport->dev),
  190. rmi_dev);
  191. if (ret < 0) {
  192. dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
  193. pdata->irq);
  194. return ret;
  195. }
  196. data->enabled = true;
  197. return 0;
  198. }
  199. struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
  200. {
  201. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  202. struct rmi_function *entry;
  203. list_for_each_entry(entry, &data->function_list, node) {
  204. if (entry->fd.function_number == number)
  205. return entry;
  206. }
  207. return NULL;
  208. }
  209. static int suspend_one_function(struct rmi_function *fn)
  210. {
  211. struct rmi_function_handler *fh;
  212. int retval = 0;
  213. if (!fn || !fn->dev.driver)
  214. return 0;
  215. fh = to_rmi_function_handler(fn->dev.driver);
  216. if (fh->suspend) {
  217. retval = fh->suspend(fn);
  218. if (retval < 0)
  219. dev_err(&fn->dev, "Suspend failed with code %d.\n",
  220. retval);
  221. }
  222. return retval;
  223. }
  224. static int rmi_suspend_functions(struct rmi_device *rmi_dev)
  225. {
  226. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  227. struct rmi_function *entry;
  228. int retval;
  229. list_for_each_entry(entry, &data->function_list, node) {
  230. retval = suspend_one_function(entry);
  231. if (retval < 0)
  232. return retval;
  233. }
  234. return 0;
  235. }
  236. static int resume_one_function(struct rmi_function *fn)
  237. {
  238. struct rmi_function_handler *fh;
  239. int retval = 0;
  240. if (!fn || !fn->dev.driver)
  241. return 0;
  242. fh = to_rmi_function_handler(fn->dev.driver);
  243. if (fh->resume) {
  244. retval = fh->resume(fn);
  245. if (retval < 0)
  246. dev_err(&fn->dev, "Resume failed with code %d.\n",
  247. retval);
  248. }
  249. return retval;
  250. }
  251. static int rmi_resume_functions(struct rmi_device *rmi_dev)
  252. {
  253. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  254. struct rmi_function *entry;
  255. int retval;
  256. list_for_each_entry(entry, &data->function_list, node) {
  257. retval = resume_one_function(entry);
  258. if (retval < 0)
  259. return retval;
  260. }
  261. return 0;
  262. }
  263. int rmi_enable_sensor(struct rmi_device *rmi_dev)
  264. {
  265. int retval = 0;
  266. retval = rmi_driver_process_config_requests(rmi_dev);
  267. if (retval < 0)
  268. return retval;
  269. return rmi_process_interrupt_requests(rmi_dev);
  270. }
  271. /**
  272. * rmi_driver_set_input_params - set input device id and other data.
  273. *
  274. * @rmi_dev: Pointer to an RMI device
  275. * @input: Pointer to input device
  276. *
  277. */
  278. static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
  279. struct input_dev *input)
  280. {
  281. input->name = SYNAPTICS_INPUT_DEVICE_NAME;
  282. input->id.vendor = SYNAPTICS_VENDOR_ID;
  283. input->id.bustype = BUS_RMI;
  284. return 0;
  285. }
  286. static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
  287. struct input_dev *input)
  288. {
  289. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  290. const char *device_name = rmi_f01_get_product_ID(data->f01_container);
  291. char *name;
  292. name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
  293. "Synaptics %s", device_name);
  294. if (!name)
  295. return;
  296. input->name = name;
  297. }
  298. static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
  299. unsigned long *mask)
  300. {
  301. int error = 0;
  302. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  303. struct device *dev = &rmi_dev->dev;
  304. mutex_lock(&data->irq_mutex);
  305. bitmap_or(data->new_irq_mask,
  306. data->current_irq_mask, mask, data->irq_count);
  307. error = rmi_write_block(rmi_dev,
  308. data->f01_container->fd.control_base_addr + 1,
  309. data->new_irq_mask, data->num_of_irq_regs);
  310. if (error < 0) {
  311. dev_err(dev, "%s: Failed to change enabled interrupts!",
  312. __func__);
  313. goto error_unlock;
  314. }
  315. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  316. data->num_of_irq_regs);
  317. bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
  318. error_unlock:
  319. mutex_unlock(&data->irq_mutex);
  320. return error;
  321. }
  322. static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
  323. unsigned long *mask)
  324. {
  325. int error = 0;
  326. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  327. struct device *dev = &rmi_dev->dev;
  328. mutex_lock(&data->irq_mutex);
  329. bitmap_andnot(data->fn_irq_bits,
  330. data->fn_irq_bits, mask, data->irq_count);
  331. bitmap_andnot(data->new_irq_mask,
  332. data->current_irq_mask, mask, data->irq_count);
  333. error = rmi_write_block(rmi_dev,
  334. data->f01_container->fd.control_base_addr + 1,
  335. data->new_irq_mask, data->num_of_irq_regs);
  336. if (error < 0) {
  337. dev_err(dev, "%s: Failed to change enabled interrupts!",
  338. __func__);
  339. goto error_unlock;
  340. }
  341. bitmap_copy(data->current_irq_mask, data->new_irq_mask,
  342. data->num_of_irq_regs);
  343. error_unlock:
  344. mutex_unlock(&data->irq_mutex);
  345. return error;
  346. }
  347. static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
  348. {
  349. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  350. int error;
  351. /*
  352. * Can get called before the driver is fully ready to deal with
  353. * this situation.
  354. */
  355. if (!data || !data->f01_container) {
  356. dev_warn(&rmi_dev->dev,
  357. "Not ready to handle reset yet!\n");
  358. return 0;
  359. }
  360. error = rmi_read_block(rmi_dev,
  361. data->f01_container->fd.control_base_addr + 1,
  362. data->current_irq_mask, data->num_of_irq_regs);
  363. if (error < 0) {
  364. dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
  365. __func__);
  366. return error;
  367. }
  368. error = rmi_driver_process_reset_requests(rmi_dev);
  369. if (error < 0)
  370. return error;
  371. error = rmi_driver_process_config_requests(rmi_dev);
  372. if (error < 0)
  373. return error;
  374. return 0;
  375. }
  376. static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
  377. struct pdt_entry *entry, u16 pdt_address)
  378. {
  379. u8 buf[RMI_PDT_ENTRY_SIZE];
  380. int error;
  381. error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
  382. if (error) {
  383. dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
  384. pdt_address, error);
  385. return error;
  386. }
  387. entry->page_start = pdt_address & RMI4_PAGE_MASK;
  388. entry->query_base_addr = buf[0];
  389. entry->command_base_addr = buf[1];
  390. entry->control_base_addr = buf[2];
  391. entry->data_base_addr = buf[3];
  392. entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
  393. entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
  394. entry->function_number = buf[5];
  395. return 0;
  396. }
  397. static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
  398. struct rmi_function_descriptor *fd)
  399. {
  400. fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
  401. fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
  402. fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
  403. fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
  404. fd->function_number = pdt->function_number;
  405. fd->interrupt_source_count = pdt->interrupt_source_count;
  406. fd->function_version = pdt->function_version;
  407. }
  408. #define RMI_SCAN_CONTINUE 0
  409. #define RMI_SCAN_DONE 1
  410. static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
  411. int page,
  412. int *empty_pages,
  413. void *ctx,
  414. int (*callback)(struct rmi_device *rmi_dev,
  415. void *ctx,
  416. const struct pdt_entry *entry))
  417. {
  418. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  419. struct pdt_entry pdt_entry;
  420. u16 page_start = RMI4_PAGE_SIZE * page;
  421. u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
  422. u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
  423. u16 addr;
  424. int error;
  425. int retval;
  426. for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
  427. error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
  428. if (error)
  429. return error;
  430. if (RMI4_END_OF_PDT(pdt_entry.function_number))
  431. break;
  432. retval = callback(rmi_dev, ctx, &pdt_entry);
  433. if (retval != RMI_SCAN_CONTINUE)
  434. return retval;
  435. }
  436. /*
  437. * Count number of empty PDT pages. If a gap of two pages
  438. * or more is found, stop scanning.
  439. */
  440. if (addr == pdt_start)
  441. ++*empty_pages;
  442. else
  443. *empty_pages = 0;
  444. return (data->bootloader_mode || *empty_pages >= 2) ?
  445. RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
  446. }
  447. int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
  448. int (*callback)(struct rmi_device *rmi_dev,
  449. void *ctx, const struct pdt_entry *entry))
  450. {
  451. int page;
  452. int empty_pages = 0;
  453. int retval = RMI_SCAN_DONE;
  454. for (page = 0; page <= RMI4_MAX_PAGE; page++) {
  455. retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
  456. ctx, callback);
  457. if (retval != RMI_SCAN_CONTINUE)
  458. break;
  459. }
  460. return retval < 0 ? retval : 0;
  461. }
  462. int rmi_read_register_desc(struct rmi_device *d, u16 addr,
  463. struct rmi_register_descriptor *rdesc)
  464. {
  465. int ret;
  466. u8 size_presence_reg;
  467. u8 buf[35];
  468. int presense_offset = 1;
  469. u8 *struct_buf;
  470. int reg;
  471. int offset = 0;
  472. int map_offset = 0;
  473. int i;
  474. int b;
  475. /*
  476. * The first register of the register descriptor is the size of
  477. * the register descriptor's presense register.
  478. */
  479. ret = rmi_read(d, addr, &size_presence_reg);
  480. if (ret)
  481. return ret;
  482. ++addr;
  483. if (size_presence_reg < 0 || size_presence_reg > 35)
  484. return -EIO;
  485. memset(buf, 0, sizeof(buf));
  486. /*
  487. * The presence register contains the size of the register structure
  488. * and a bitmap which identified which packet registers are present
  489. * for this particular register type (ie query, control, or data).
  490. */
  491. ret = rmi_read_block(d, addr, buf, size_presence_reg);
  492. if (ret)
  493. return ret;
  494. ++addr;
  495. if (buf[0] == 0) {
  496. presense_offset = 3;
  497. rdesc->struct_size = buf[1] | (buf[2] << 8);
  498. } else {
  499. rdesc->struct_size = buf[0];
  500. }
  501. for (i = presense_offset; i < size_presence_reg; i++) {
  502. for (b = 0; b < 8; b++) {
  503. if (buf[i] & (0x1 << b))
  504. bitmap_set(rdesc->presense_map, map_offset, 1);
  505. ++map_offset;
  506. }
  507. }
  508. rdesc->num_registers = bitmap_weight(rdesc->presense_map,
  509. RMI_REG_DESC_PRESENSE_BITS);
  510. rdesc->registers = devm_kcalloc(&d->dev,
  511. rdesc->num_registers,
  512. sizeof(struct rmi_register_desc_item),
  513. GFP_KERNEL);
  514. if (!rdesc->registers)
  515. return -ENOMEM;
  516. /*
  517. * Allocate a temporary buffer to hold the register structure.
  518. * I'm not using devm_kzalloc here since it will not be retained
  519. * after exiting this function
  520. */
  521. struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
  522. if (!struct_buf)
  523. return -ENOMEM;
  524. /*
  525. * The register structure contains information about every packet
  526. * register of this type. This includes the size of the packet
  527. * register and a bitmap of all subpackets contained in the packet
  528. * register.
  529. */
  530. ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
  531. if (ret)
  532. goto free_struct_buff;
  533. reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
  534. for (i = 0; i < rdesc->num_registers; i++) {
  535. struct rmi_register_desc_item *item = &rdesc->registers[i];
  536. int reg_size = struct_buf[offset];
  537. ++offset;
  538. if (reg_size == 0) {
  539. reg_size = struct_buf[offset] |
  540. (struct_buf[offset + 1] << 8);
  541. offset += 2;
  542. }
  543. if (reg_size == 0) {
  544. reg_size = struct_buf[offset] |
  545. (struct_buf[offset + 1] << 8) |
  546. (struct_buf[offset + 2] << 16) |
  547. (struct_buf[offset + 3] << 24);
  548. offset += 4;
  549. }
  550. item->reg = reg;
  551. item->reg_size = reg_size;
  552. map_offset = 0;
  553. do {
  554. for (b = 0; b < 7; b++) {
  555. if (struct_buf[offset] & (0x1 << b))
  556. bitmap_set(item->subpacket_map,
  557. map_offset, 1);
  558. ++map_offset;
  559. }
  560. } while (struct_buf[offset++] & 0x80);
  561. item->num_subpackets = bitmap_weight(item->subpacket_map,
  562. RMI_REG_DESC_SUBPACKET_BITS);
  563. rmi_dbg(RMI_DEBUG_CORE, &d->dev,
  564. "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
  565. item->reg, item->reg_size, item->num_subpackets);
  566. reg = find_next_bit(rdesc->presense_map,
  567. RMI_REG_DESC_PRESENSE_BITS, reg + 1);
  568. }
  569. free_struct_buff:
  570. kfree(struct_buf);
  571. return ret;
  572. }
  573. const struct rmi_register_desc_item *rmi_get_register_desc_item(
  574. struct rmi_register_descriptor *rdesc, u16 reg)
  575. {
  576. const struct rmi_register_desc_item *item;
  577. int i;
  578. for (i = 0; i < rdesc->num_registers; i++) {
  579. item = &rdesc->registers[i];
  580. if (item->reg == reg)
  581. return item;
  582. }
  583. return NULL;
  584. }
  585. size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
  586. {
  587. const struct rmi_register_desc_item *item;
  588. int i;
  589. size_t size = 0;
  590. for (i = 0; i < rdesc->num_registers; i++) {
  591. item = &rdesc->registers[i];
  592. size += item->reg_size;
  593. }
  594. return size;
  595. }
  596. /* Compute the register offset relative to the base address */
  597. int rmi_register_desc_calc_reg_offset(
  598. struct rmi_register_descriptor *rdesc, u16 reg)
  599. {
  600. const struct rmi_register_desc_item *item;
  601. int offset = 0;
  602. int i;
  603. for (i = 0; i < rdesc->num_registers; i++) {
  604. item = &rdesc->registers[i];
  605. if (item->reg == reg)
  606. return offset;
  607. ++offset;
  608. }
  609. return -1;
  610. }
  611. bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
  612. u8 subpacket)
  613. {
  614. return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
  615. subpacket) == subpacket;
  616. }
  617. static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
  618. const struct pdt_entry *pdt)
  619. {
  620. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  621. int ret;
  622. u8 status;
  623. if (pdt->function_number == 0x34 && pdt->function_version > 1) {
  624. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  625. if (ret) {
  626. dev_err(&rmi_dev->dev,
  627. "Failed to read F34 status: %d.\n", ret);
  628. return ret;
  629. }
  630. if (status & BIT(7))
  631. data->bootloader_mode = true;
  632. } else if (pdt->function_number == 0x01) {
  633. ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
  634. if (ret) {
  635. dev_err(&rmi_dev->dev,
  636. "Failed to read F01 status: %d.\n", ret);
  637. return ret;
  638. }
  639. if (status & BIT(6))
  640. data->bootloader_mode = true;
  641. }
  642. return 0;
  643. }
  644. static int rmi_count_irqs(struct rmi_device *rmi_dev,
  645. void *ctx, const struct pdt_entry *pdt)
  646. {
  647. int *irq_count = ctx;
  648. int ret;
  649. *irq_count += pdt->interrupt_source_count;
  650. ret = rmi_check_bootloader_mode(rmi_dev, pdt);
  651. if (ret < 0)
  652. return ret;
  653. return RMI_SCAN_CONTINUE;
  654. }
  655. int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
  656. const struct pdt_entry *pdt)
  657. {
  658. int error;
  659. if (pdt->function_number == 0x01) {
  660. u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
  661. u8 cmd_buf = RMI_DEVICE_RESET_CMD;
  662. const struct rmi_device_platform_data *pdata =
  663. rmi_get_platform_data(rmi_dev);
  664. if (rmi_dev->xport->ops->reset) {
  665. error = rmi_dev->xport->ops->reset(rmi_dev->xport,
  666. cmd_addr);
  667. if (error)
  668. return error;
  669. return RMI_SCAN_DONE;
  670. }
  671. rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
  672. error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
  673. if (error) {
  674. dev_err(&rmi_dev->dev,
  675. "Initial reset failed. Code = %d.\n", error);
  676. return error;
  677. }
  678. mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
  679. return RMI_SCAN_DONE;
  680. }
  681. /* F01 should always be on page 0. If we don't find it there, fail. */
  682. return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
  683. }
  684. static int rmi_create_function(struct rmi_device *rmi_dev,
  685. void *ctx, const struct pdt_entry *pdt)
  686. {
  687. struct device *dev = &rmi_dev->dev;
  688. struct rmi_driver_data *data = dev_get_drvdata(dev);
  689. int *current_irq_count = ctx;
  690. struct rmi_function *fn;
  691. int i;
  692. int error;
  693. rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
  694. pdt->function_number);
  695. fn = kzalloc(sizeof(struct rmi_function) +
  696. BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
  697. GFP_KERNEL);
  698. if (!fn) {
  699. dev_err(dev, "Failed to allocate memory for F%02X\n",
  700. pdt->function_number);
  701. return -ENOMEM;
  702. }
  703. INIT_LIST_HEAD(&fn->node);
  704. rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
  705. fn->rmi_dev = rmi_dev;
  706. fn->num_of_irqs = pdt->interrupt_source_count;
  707. fn->irq_pos = *current_irq_count;
  708. *current_irq_count += fn->num_of_irqs;
  709. for (i = 0; i < fn->num_of_irqs; i++)
  710. set_bit(fn->irq_pos + i, fn->irq_mask);
  711. error = rmi_register_function(fn);
  712. if (error)
  713. return error;
  714. if (pdt->function_number == 0x01)
  715. data->f01_container = fn;
  716. else if (pdt->function_number == 0x34)
  717. data->f34_container = fn;
  718. list_add_tail(&fn->node, &data->function_list);
  719. return RMI_SCAN_CONTINUE;
  720. }
  721. void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
  722. {
  723. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  724. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  725. int irq = pdata->irq;
  726. int irq_flags;
  727. int retval;
  728. mutex_lock(&data->enabled_mutex);
  729. if (data->enabled)
  730. goto out;
  731. enable_irq(irq);
  732. data->enabled = true;
  733. if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  734. retval = disable_irq_wake(irq);
  735. if (retval)
  736. dev_warn(&rmi_dev->dev,
  737. "Failed to disable irq for wake: %d\n",
  738. retval);
  739. }
  740. /*
  741. * Call rmi_process_interrupt_requests() after enabling irq,
  742. * otherwise we may lose interrupt on edge-triggered systems.
  743. */
  744. irq_flags = irq_get_trigger_type(pdata->irq);
  745. if (irq_flags & IRQ_TYPE_EDGE_BOTH)
  746. rmi_process_interrupt_requests(rmi_dev);
  747. out:
  748. mutex_unlock(&data->enabled_mutex);
  749. }
  750. void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
  751. {
  752. struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
  753. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  754. struct rmi4_attn_data attn_data = {0};
  755. int irq = pdata->irq;
  756. int retval, count;
  757. mutex_lock(&data->enabled_mutex);
  758. if (!data->enabled)
  759. goto out;
  760. data->enabled = false;
  761. disable_irq(irq);
  762. if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
  763. retval = enable_irq_wake(irq);
  764. if (retval)
  765. dev_warn(&rmi_dev->dev,
  766. "Failed to enable irq for wake: %d\n",
  767. retval);
  768. }
  769. /* make sure the fifo is clean */
  770. while (!kfifo_is_empty(&data->attn_fifo)) {
  771. count = kfifo_get(&data->attn_fifo, &attn_data);
  772. if (count)
  773. kfree(attn_data.data);
  774. }
  775. out:
  776. mutex_unlock(&data->enabled_mutex);
  777. }
  778. int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
  779. {
  780. int retval;
  781. retval = rmi_suspend_functions(rmi_dev);
  782. if (retval)
  783. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  784. retval);
  785. rmi_disable_irq(rmi_dev, enable_wake);
  786. return retval;
  787. }
  788. EXPORT_SYMBOL_GPL(rmi_driver_suspend);
  789. int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
  790. {
  791. int retval;
  792. rmi_enable_irq(rmi_dev, clear_wake);
  793. retval = rmi_resume_functions(rmi_dev);
  794. if (retval)
  795. dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
  796. retval);
  797. return retval;
  798. }
  799. EXPORT_SYMBOL_GPL(rmi_driver_resume);
  800. static int rmi_driver_remove(struct device *dev)
  801. {
  802. struct rmi_device *rmi_dev = to_rmi_device(dev);
  803. struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
  804. rmi_disable_irq(rmi_dev, false);
  805. irq_domain_remove(data->irqdomain);
  806. data->irqdomain = NULL;
  807. rmi_f34_remove_sysfs(rmi_dev);
  808. rmi_free_function_list(rmi_dev);
  809. return 0;
  810. }
  811. #ifdef CONFIG_OF
  812. static int rmi_driver_of_probe(struct device *dev,
  813. struct rmi_device_platform_data *pdata)
  814. {
  815. int retval;
  816. retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
  817. "syna,reset-delay-ms", 1);
  818. if (retval)
  819. return retval;
  820. return 0;
  821. }
  822. #else
  823. static inline int rmi_driver_of_probe(struct device *dev,
  824. struct rmi_device_platform_data *pdata)
  825. {
  826. return -ENODEV;
  827. }
  828. #endif
  829. int rmi_probe_interrupts(struct rmi_driver_data *data)
  830. {
  831. struct rmi_device *rmi_dev = data->rmi_dev;
  832. struct device *dev = &rmi_dev->dev;
  833. struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
  834. int irq_count = 0;
  835. size_t size;
  836. int retval;
  837. /*
  838. * We need to count the IRQs and allocate their storage before scanning
  839. * the PDT and creating the function entries, because adding a new
  840. * function can trigger events that result in the IRQ related storage
  841. * being accessed.
  842. */
  843. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
  844. data->bootloader_mode = false;
  845. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
  846. if (retval < 0) {
  847. dev_err(dev, "IRQ counting failed with code %d.\n", retval);
  848. return retval;
  849. }
  850. if (data->bootloader_mode)
  851. dev_warn(dev, "Device in bootloader mode.\n");
  852. /* Allocate and register a linear revmap irq_domain */
  853. data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
  854. &irq_domain_simple_ops,
  855. data);
  856. if (!data->irqdomain) {
  857. dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
  858. return -ENOMEM;
  859. }
  860. data->irq_count = irq_count;
  861. data->num_of_irq_regs = (data->irq_count + 7) / 8;
  862. size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
  863. data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL);
  864. if (!data->irq_memory) {
  865. dev_err(dev, "Failed to allocate memory for irq masks.\n");
  866. return -ENOMEM;
  867. }
  868. data->irq_status = data->irq_memory + size * 0;
  869. data->fn_irq_bits = data->irq_memory + size * 1;
  870. data->current_irq_mask = data->irq_memory + size * 2;
  871. data->new_irq_mask = data->irq_memory + size * 3;
  872. return retval;
  873. }
  874. int rmi_init_functions(struct rmi_driver_data *data)
  875. {
  876. struct rmi_device *rmi_dev = data->rmi_dev;
  877. struct device *dev = &rmi_dev->dev;
  878. int irq_count = 0;
  879. int retval;
  880. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
  881. retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
  882. if (retval < 0) {
  883. dev_err(dev, "Function creation failed with code %d.\n",
  884. retval);
  885. goto err_destroy_functions;
  886. }
  887. if (!data->f01_container) {
  888. dev_err(dev, "Missing F01 container!\n");
  889. retval = -EINVAL;
  890. goto err_destroy_functions;
  891. }
  892. retval = rmi_read_block(rmi_dev,
  893. data->f01_container->fd.control_base_addr + 1,
  894. data->current_irq_mask, data->num_of_irq_regs);
  895. if (retval < 0) {
  896. dev_err(dev, "%s: Failed to read current IRQ mask.\n",
  897. __func__);
  898. goto err_destroy_functions;
  899. }
  900. return 0;
  901. err_destroy_functions:
  902. rmi_free_function_list(rmi_dev);
  903. return retval;
  904. }
  905. static int rmi_driver_probe(struct device *dev)
  906. {
  907. struct rmi_driver *rmi_driver;
  908. struct rmi_driver_data *data;
  909. struct rmi_device_platform_data *pdata;
  910. struct rmi_device *rmi_dev;
  911. int retval;
  912. rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
  913. __func__);
  914. if (!rmi_is_physical_device(dev)) {
  915. rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
  916. return -ENODEV;
  917. }
  918. rmi_dev = to_rmi_device(dev);
  919. rmi_driver = to_rmi_driver(dev->driver);
  920. rmi_dev->driver = rmi_driver;
  921. pdata = rmi_get_platform_data(rmi_dev);
  922. if (rmi_dev->xport->dev->of_node) {
  923. retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
  924. if (retval)
  925. return retval;
  926. }
  927. data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
  928. if (!data)
  929. return -ENOMEM;
  930. INIT_LIST_HEAD(&data->function_list);
  931. data->rmi_dev = rmi_dev;
  932. dev_set_drvdata(&rmi_dev->dev, data);
  933. /*
  934. * Right before a warm boot, the sensor might be in some unusual state,
  935. * such as F54 diagnostics, or F34 bootloader mode after a firmware
  936. * or configuration update. In order to clear the sensor to a known
  937. * state and/or apply any updates, we issue a initial reset to clear any
  938. * previous settings and force it into normal operation.
  939. *
  940. * We have to do this before actually building the PDT because
  941. * the reflash updates (if any) might cause various registers to move
  942. * around.
  943. *
  944. * For a number of reasons, this initial reset may fail to return
  945. * within the specified time, but we'll still be able to bring up the
  946. * driver normally after that failure. This occurs most commonly in
  947. * a cold boot situation (where then firmware takes longer to come up
  948. * than from a warm boot) and the reset_delay_ms in the platform data
  949. * has been set too short to accommodate that. Since the sensor will
  950. * eventually come up and be usable, we don't want to just fail here
  951. * and leave the customer's device unusable. So we warn them, and
  952. * continue processing.
  953. */
  954. retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
  955. if (retval < 0)
  956. dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
  957. retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
  958. if (retval < 0) {
  959. /*
  960. * we'll print out a warning and continue since
  961. * failure to get the PDT properties is not a cause to fail
  962. */
  963. dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
  964. PDT_PROPERTIES_LOCATION, retval);
  965. }
  966. mutex_init(&data->irq_mutex);
  967. mutex_init(&data->enabled_mutex);
  968. retval = rmi_probe_interrupts(data);
  969. if (retval)
  970. goto err;
  971. if (rmi_dev->xport->input) {
  972. /*
  973. * The transport driver already has an input device.
  974. * In some cases it is preferable to reuse the transport
  975. * devices input device instead of creating a new one here.
  976. * One example is some HID touchpads report "pass-through"
  977. * button events are not reported by rmi registers.
  978. */
  979. data->input = rmi_dev->xport->input;
  980. } else {
  981. data->input = devm_input_allocate_device(dev);
  982. if (!data->input) {
  983. dev_err(dev, "%s: Failed to allocate input device.\n",
  984. __func__);
  985. retval = -ENOMEM;
  986. goto err;
  987. }
  988. rmi_driver_set_input_params(rmi_dev, data->input);
  989. data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
  990. "%s/input0", dev_name(dev));
  991. }
  992. retval = rmi_init_functions(data);
  993. if (retval)
  994. goto err;
  995. retval = rmi_f34_create_sysfs(rmi_dev);
  996. if (retval)
  997. goto err;
  998. if (data->input) {
  999. rmi_driver_set_input_name(rmi_dev, data->input);
  1000. if (!rmi_dev->xport->input) {
  1001. retval = input_register_device(data->input);
  1002. if (retval) {
  1003. dev_err(dev, "%s: Failed to register input device.\n",
  1004. __func__);
  1005. goto err_destroy_functions;
  1006. }
  1007. }
  1008. }
  1009. retval = rmi_irq_init(rmi_dev);
  1010. if (retval < 0)
  1011. goto err_destroy_functions;
  1012. if (data->f01_container->dev.driver) {
  1013. /* Driver already bound, so enable ATTN now. */
  1014. retval = rmi_enable_sensor(rmi_dev);
  1015. if (retval)
  1016. goto err_disable_irq;
  1017. }
  1018. return 0;
  1019. err_disable_irq:
  1020. rmi_disable_irq(rmi_dev, false);
  1021. err_destroy_functions:
  1022. rmi_free_function_list(rmi_dev);
  1023. err:
  1024. return retval;
  1025. }
  1026. static struct rmi_driver rmi_physical_driver = {
  1027. .driver = {
  1028. .owner = THIS_MODULE,
  1029. .name = "rmi4_physical",
  1030. .bus = &rmi_bus_type,
  1031. .probe = rmi_driver_probe,
  1032. .remove = rmi_driver_remove,
  1033. },
  1034. .reset_handler = rmi_driver_reset_handler,
  1035. .clear_irq_bits = rmi_driver_clear_irq_bits,
  1036. .set_irq_bits = rmi_driver_set_irq_bits,
  1037. .set_input_params = rmi_driver_set_input_params,
  1038. };
  1039. bool rmi_is_physical_driver(struct device_driver *drv)
  1040. {
  1041. return drv == &rmi_physical_driver.driver;
  1042. }
  1043. int __init rmi_register_physical_driver(void)
  1044. {
  1045. int error;
  1046. error = driver_register(&rmi_physical_driver.driver);
  1047. if (error) {
  1048. pr_err("%s: driver register failed, code=%d.\n", __func__,
  1049. error);
  1050. return error;
  1051. }
  1052. return 0;
  1053. }
  1054. void __exit rmi_unregister_physical_driver(void)
  1055. {
  1056. driver_unregister(&rmi_physical_driver.driver);
  1057. }