ec.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26. */
  27. /* Uncomment next line to get verbose printout */
  28. /* #define DEBUG */
  29. #define pr_fmt(fmt) "ACPI: EC: " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/acpi.h>
  40. #include <linux/dmi.h>
  41. #include <asm/io.h>
  42. #include "internal.h"
  43. #define ACPI_EC_CLASS "embedded_controller"
  44. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  45. #define ACPI_EC_FILE_INFO "info"
  46. /* EC status register */
  47. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  48. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  49. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  50. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  51. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  52. /*
  53. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  54. * This leads to lots of practical timing issues for the host EC driver.
  55. * The following variations are defined (from the target EC firmware's
  56. * perspective):
  57. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  58. * target can clear SCI_EVT at any time so long as the host can see
  59. * the indication by reading the status register (EC_SC). So the
  60. * host should re-check SCI_EVT after the first time the SCI_EVT
  61. * indication is seen, which is the same time the query request
  62. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  63. * at any later time could indicate another event. Normally such
  64. * kind of EC firmware has implemented an event queue and will
  65. * return 0x00 to indicate "no outstanding event".
  66. * QUERY: After seeing the query request (QR_EC) written to the command
  67. * register (EC_CMD) by the host and having prepared the responding
  68. * event value in the data register (EC_DATA), the target can safely
  69. * clear SCI_EVT because the target can confirm that the current
  70. * event is being handled by the host. The host then should check
  71. * SCI_EVT right after reading the event response from the data
  72. * register (EC_DATA).
  73. * EVENT: After seeing the event response read from the data register
  74. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  75. * target requires time to notice the change in the data register
  76. * (EC_DATA), the host may be required to wait additional guarding
  77. * time before checking the SCI_EVT again. Such guarding may not be
  78. * necessary if the host is notified via another IRQ.
  79. */
  80. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  81. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  82. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  83. /* EC commands */
  84. enum ec_command {
  85. ACPI_EC_COMMAND_READ = 0x80,
  86. ACPI_EC_COMMAND_WRITE = 0x81,
  87. ACPI_EC_BURST_ENABLE = 0x82,
  88. ACPI_EC_BURST_DISABLE = 0x83,
  89. ACPI_EC_COMMAND_QUERY = 0x84,
  90. };
  91. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  92. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  93. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  94. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  95. * when trying to clear the EC */
  96. #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
  97. enum {
  98. EC_FLAGS_QUERY_ENABLED, /* Query is enabled */
  99. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  100. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  101. EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
  102. EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
  103. EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
  104. EC_FLAGS_STARTED, /* Driver is started */
  105. EC_FLAGS_STOPPED, /* Driver is stopped */
  106. EC_FLAGS_GPE_MASKED, /* GPE masked */
  107. };
  108. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  109. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  110. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  111. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  112. module_param(ec_delay, uint, 0644);
  113. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  114. static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
  115. module_param(ec_max_queries, uint, 0644);
  116. MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
  117. static bool ec_busy_polling __read_mostly;
  118. module_param(ec_busy_polling, bool, 0644);
  119. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  120. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  121. module_param(ec_polling_guard, uint, 0644);
  122. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  123. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  124. /*
  125. * If the number of false interrupts per one transaction exceeds
  126. * this threshold, will think there is a GPE storm happened and
  127. * will disable the GPE for normal transaction.
  128. */
  129. static unsigned int ec_storm_threshold __read_mostly = 8;
  130. module_param(ec_storm_threshold, uint, 0644);
  131. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  132. static bool ec_freeze_events __read_mostly = false;
  133. module_param(ec_freeze_events, bool, 0644);
  134. MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
  135. static bool ec_no_wakeup __read_mostly;
  136. module_param(ec_no_wakeup, bool, 0644);
  137. MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
  138. struct acpi_ec_query_handler {
  139. struct list_head node;
  140. acpi_ec_query_func func;
  141. acpi_handle handle;
  142. void *data;
  143. u8 query_bit;
  144. struct kref kref;
  145. };
  146. struct transaction {
  147. const u8 *wdata;
  148. u8 *rdata;
  149. unsigned short irq_count;
  150. u8 command;
  151. u8 wi;
  152. u8 ri;
  153. u8 wlen;
  154. u8 rlen;
  155. u8 flags;
  156. };
  157. struct acpi_ec_query {
  158. struct transaction transaction;
  159. struct work_struct work;
  160. struct acpi_ec_query_handler *handler;
  161. };
  162. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  163. static void advance_transaction(struct acpi_ec *ec);
  164. static void acpi_ec_event_handler(struct work_struct *work);
  165. static void acpi_ec_event_processor(struct work_struct *work);
  166. struct acpi_ec *boot_ec, *first_ec;
  167. EXPORT_SYMBOL(first_ec);
  168. static bool boot_ec_is_ecdt = false;
  169. static struct workqueue_struct *ec_query_wq;
  170. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  171. static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
  172. static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */
  173. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  174. /* --------------------------------------------------------------------------
  175. * Logging/Debugging
  176. * -------------------------------------------------------------------------- */
  177. /*
  178. * Splitters used by the developers to track the boundary of the EC
  179. * handling processes.
  180. */
  181. #ifdef DEBUG
  182. #define EC_DBG_SEP " "
  183. #define EC_DBG_DRV "+++++"
  184. #define EC_DBG_STM "====="
  185. #define EC_DBG_REQ "*****"
  186. #define EC_DBG_EVT "#####"
  187. #else
  188. #define EC_DBG_SEP ""
  189. #define EC_DBG_DRV
  190. #define EC_DBG_STM
  191. #define EC_DBG_REQ
  192. #define EC_DBG_EVT
  193. #endif
  194. #define ec_log_raw(fmt, ...) \
  195. pr_info(fmt "\n", ##__VA_ARGS__)
  196. #define ec_dbg_raw(fmt, ...) \
  197. pr_debug(fmt "\n", ##__VA_ARGS__)
  198. #define ec_log(filter, fmt, ...) \
  199. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  200. #define ec_dbg(filter, fmt, ...) \
  201. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  202. #define ec_log_drv(fmt, ...) \
  203. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  204. #define ec_dbg_drv(fmt, ...) \
  205. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  206. #define ec_dbg_stm(fmt, ...) \
  207. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  208. #define ec_dbg_req(fmt, ...) \
  209. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  210. #define ec_dbg_evt(fmt, ...) \
  211. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  212. #define ec_dbg_ref(ec, fmt, ...) \
  213. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  214. /* --------------------------------------------------------------------------
  215. * Device Flags
  216. * -------------------------------------------------------------------------- */
  217. static bool acpi_ec_started(struct acpi_ec *ec)
  218. {
  219. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  220. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  221. }
  222. static bool acpi_ec_event_enabled(struct acpi_ec *ec)
  223. {
  224. /*
  225. * There is an OSPM early stage logic. During the early stages
  226. * (boot/resume), OSPMs shouldn't enable the event handling, only
  227. * the EC transactions are allowed to be performed.
  228. */
  229. if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  230. return false;
  231. /*
  232. * However, disabling the event handling is experimental for late
  233. * stage (suspend), and is controlled by the boot parameter of
  234. * "ec_freeze_events":
  235. * 1. true: The EC event handling is disabled before entering
  236. * the noirq stage.
  237. * 2. false: The EC event handling is automatically disabled as
  238. * soon as the EC driver is stopped.
  239. */
  240. if (ec_freeze_events)
  241. return acpi_ec_started(ec);
  242. else
  243. return test_bit(EC_FLAGS_STARTED, &ec->flags);
  244. }
  245. static bool acpi_ec_flushed(struct acpi_ec *ec)
  246. {
  247. return ec->reference_count == 1;
  248. }
  249. /* --------------------------------------------------------------------------
  250. * EC Registers
  251. * -------------------------------------------------------------------------- */
  252. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  253. {
  254. u8 x = inb(ec->command_addr);
  255. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  256. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  257. x,
  258. !!(x & ACPI_EC_FLAG_SCI),
  259. !!(x & ACPI_EC_FLAG_BURST),
  260. !!(x & ACPI_EC_FLAG_CMD),
  261. !!(x & ACPI_EC_FLAG_IBF),
  262. !!(x & ACPI_EC_FLAG_OBF));
  263. return x;
  264. }
  265. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  266. {
  267. u8 x = inb(ec->data_addr);
  268. ec->timestamp = jiffies;
  269. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  270. return x;
  271. }
  272. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  273. {
  274. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  275. outb(command, ec->command_addr);
  276. ec->timestamp = jiffies;
  277. }
  278. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  279. {
  280. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  281. outb(data, ec->data_addr);
  282. ec->timestamp = jiffies;
  283. }
  284. #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
  285. static const char *acpi_ec_cmd_string(u8 cmd)
  286. {
  287. switch (cmd) {
  288. case 0x80:
  289. return "RD_EC";
  290. case 0x81:
  291. return "WR_EC";
  292. case 0x82:
  293. return "BE_EC";
  294. case 0x83:
  295. return "BD_EC";
  296. case 0x84:
  297. return "QR_EC";
  298. }
  299. return "UNKNOWN";
  300. }
  301. #else
  302. #define acpi_ec_cmd_string(cmd) "UNDEF"
  303. #endif
  304. /* --------------------------------------------------------------------------
  305. * GPE Registers
  306. * -------------------------------------------------------------------------- */
  307. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  308. {
  309. acpi_event_status gpe_status = 0;
  310. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  311. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  312. }
  313. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  314. {
  315. if (open)
  316. acpi_enable_gpe(NULL, ec->gpe);
  317. else {
  318. BUG_ON(ec->reference_count < 1);
  319. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  320. }
  321. if (acpi_ec_is_gpe_raised(ec)) {
  322. /*
  323. * On some platforms, EN=1 writes cannot trigger GPE. So
  324. * software need to manually trigger a pseudo GPE event on
  325. * EN=1 writes.
  326. */
  327. ec_dbg_raw("Polling quirk");
  328. advance_transaction(ec);
  329. }
  330. }
  331. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  332. {
  333. if (close)
  334. acpi_disable_gpe(NULL, ec->gpe);
  335. else {
  336. BUG_ON(ec->reference_count < 1);
  337. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  338. }
  339. }
  340. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  341. {
  342. /*
  343. * GPE STS is a W1C register, which means:
  344. * 1. Software can clear it without worrying about clearing other
  345. * GPEs' STS bits when the hardware sets them in parallel.
  346. * 2. As long as software can ensure only clearing it when it is
  347. * set, hardware won't set it in parallel.
  348. * So software can clear GPE in any contexts.
  349. * Warning: do not move the check into advance_transaction() as the
  350. * EC commands will be sent without GPE raised.
  351. */
  352. if (!acpi_ec_is_gpe_raised(ec))
  353. return;
  354. acpi_clear_gpe(NULL, ec->gpe);
  355. }
  356. /* --------------------------------------------------------------------------
  357. * Transaction Management
  358. * -------------------------------------------------------------------------- */
  359. static void acpi_ec_submit_request(struct acpi_ec *ec)
  360. {
  361. ec->reference_count++;
  362. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  363. ec->reference_count == 1)
  364. acpi_ec_enable_gpe(ec, true);
  365. }
  366. static void acpi_ec_complete_request(struct acpi_ec *ec)
  367. {
  368. bool flushed = false;
  369. ec->reference_count--;
  370. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
  371. ec->reference_count == 0)
  372. acpi_ec_disable_gpe(ec, true);
  373. flushed = acpi_ec_flushed(ec);
  374. if (flushed)
  375. wake_up(&ec->wait);
  376. }
  377. static void acpi_ec_mask_gpe(struct acpi_ec *ec)
  378. {
  379. if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
  380. acpi_ec_disable_gpe(ec, false);
  381. ec_dbg_drv("Polling enabled");
  382. set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
  383. }
  384. }
  385. static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
  386. {
  387. if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
  388. clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
  389. acpi_ec_enable_gpe(ec, false);
  390. ec_dbg_drv("Polling disabled");
  391. }
  392. }
  393. /*
  394. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  395. * the flush operation is not in
  396. * progress
  397. * @ec: the EC device
  398. *
  399. * This function must be used before taking a new action that should hold
  400. * the reference count. If this function returns false, then the action
  401. * must be discarded or it will prevent the flush operation from being
  402. * completed.
  403. */
  404. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  405. {
  406. if (!acpi_ec_started(ec))
  407. return false;
  408. acpi_ec_submit_request(ec);
  409. return true;
  410. }
  411. static void acpi_ec_submit_query(struct acpi_ec *ec)
  412. {
  413. acpi_ec_mask_gpe(ec);
  414. if (!acpi_ec_event_enabled(ec))
  415. return;
  416. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  417. ec_dbg_evt("Command(%s) submitted/blocked",
  418. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  419. ec->nr_pending_queries++;
  420. schedule_work(&ec->work);
  421. }
  422. }
  423. static void acpi_ec_complete_query(struct acpi_ec *ec)
  424. {
  425. if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
  426. ec_dbg_evt("Command(%s) unblocked",
  427. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  428. acpi_ec_unmask_gpe(ec);
  429. }
  430. static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
  431. {
  432. if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  433. ec_log_drv("event unblocked");
  434. /*
  435. * Unconditionally invoke this once after enabling the event
  436. * handling mechanism to detect the pending events.
  437. */
  438. advance_transaction(ec);
  439. }
  440. static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
  441. {
  442. if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
  443. ec_log_drv("event blocked");
  444. }
  445. /*
  446. * Process _Q events that might have accumulated in the EC.
  447. * Run with locked ec mutex.
  448. */
  449. static void acpi_ec_clear(struct acpi_ec *ec)
  450. {
  451. int i, status;
  452. u8 value = 0;
  453. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  454. status = acpi_ec_query(ec, &value);
  455. if (status || !value)
  456. break;
  457. }
  458. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  459. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  460. else
  461. pr_info("%d stale EC events cleared\n", i);
  462. }
  463. static void acpi_ec_enable_event(struct acpi_ec *ec)
  464. {
  465. unsigned long flags;
  466. spin_lock_irqsave(&ec->lock, flags);
  467. if (acpi_ec_started(ec))
  468. __acpi_ec_enable_event(ec);
  469. spin_unlock_irqrestore(&ec->lock, flags);
  470. /* Drain additional events if hardware requires that */
  471. if (EC_FLAGS_CLEAR_ON_RESUME)
  472. acpi_ec_clear(ec);
  473. }
  474. #ifdef CONFIG_PM_SLEEP
  475. static bool acpi_ec_query_flushed(struct acpi_ec *ec)
  476. {
  477. bool flushed;
  478. unsigned long flags;
  479. spin_lock_irqsave(&ec->lock, flags);
  480. flushed = !ec->nr_pending_queries;
  481. spin_unlock_irqrestore(&ec->lock, flags);
  482. return flushed;
  483. }
  484. static void __acpi_ec_flush_event(struct acpi_ec *ec)
  485. {
  486. /*
  487. * When ec_freeze_events is true, we need to flush events in
  488. * the proper position before entering the noirq stage.
  489. */
  490. wait_event(ec->wait, acpi_ec_query_flushed(ec));
  491. if (ec_query_wq)
  492. flush_workqueue(ec_query_wq);
  493. }
  494. static void acpi_ec_disable_event(struct acpi_ec *ec)
  495. {
  496. unsigned long flags;
  497. spin_lock_irqsave(&ec->lock, flags);
  498. __acpi_ec_disable_event(ec);
  499. spin_unlock_irqrestore(&ec->lock, flags);
  500. __acpi_ec_flush_event(ec);
  501. }
  502. void acpi_ec_flush_work(void)
  503. {
  504. if (first_ec)
  505. __acpi_ec_flush_event(first_ec);
  506. flush_scheduled_work();
  507. }
  508. #endif /* CONFIG_PM_SLEEP */
  509. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  510. {
  511. bool guarded = true;
  512. unsigned long flags;
  513. spin_lock_irqsave(&ec->lock, flags);
  514. /*
  515. * If firmware SCI_EVT clearing timing is "event", we actually
  516. * don't know when the SCI_EVT will be cleared by firmware after
  517. * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
  518. * acceptable period.
  519. *
  520. * The guarding period begins when EC_FLAGS_QUERY_PENDING is
  521. * flagged, which means SCI_EVT check has just been performed.
  522. * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
  523. * guarding should have already been performed (via
  524. * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
  525. * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
  526. * ACPI_EC_COMMAND_POLL state immediately.
  527. */
  528. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  529. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  530. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  531. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  532. guarded = false;
  533. spin_unlock_irqrestore(&ec->lock, flags);
  534. return guarded;
  535. }
  536. static int ec_transaction_polled(struct acpi_ec *ec)
  537. {
  538. unsigned long flags;
  539. int ret = 0;
  540. spin_lock_irqsave(&ec->lock, flags);
  541. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  542. ret = 1;
  543. spin_unlock_irqrestore(&ec->lock, flags);
  544. return ret;
  545. }
  546. static int ec_transaction_completed(struct acpi_ec *ec)
  547. {
  548. unsigned long flags;
  549. int ret = 0;
  550. spin_lock_irqsave(&ec->lock, flags);
  551. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  552. ret = 1;
  553. spin_unlock_irqrestore(&ec->lock, flags);
  554. return ret;
  555. }
  556. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  557. {
  558. ec->curr->flags |= flag;
  559. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  560. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  561. flag == ACPI_EC_COMMAND_POLL)
  562. acpi_ec_complete_query(ec);
  563. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  564. flag == ACPI_EC_COMMAND_COMPLETE)
  565. acpi_ec_complete_query(ec);
  566. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  567. flag == ACPI_EC_COMMAND_COMPLETE)
  568. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  569. }
  570. }
  571. static void advance_transaction(struct acpi_ec *ec)
  572. {
  573. struct transaction *t;
  574. u8 status;
  575. bool wakeup = false;
  576. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  577. smp_processor_id());
  578. /*
  579. * By always clearing STS before handling all indications, we can
  580. * ensure a hardware STS 0->1 change after this clearing can always
  581. * trigger a GPE interrupt.
  582. */
  583. acpi_ec_clear_gpe(ec);
  584. status = acpi_ec_read_status(ec);
  585. t = ec->curr;
  586. /*
  587. * Another IRQ or a guarded polling mode advancement is detected,
  588. * the next QR_EC submission is then allowed.
  589. */
  590. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  591. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  592. (!ec->nr_pending_queries ||
  593. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  594. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  595. acpi_ec_complete_query(ec);
  596. }
  597. }
  598. if (!t)
  599. goto err;
  600. if (t->flags & ACPI_EC_COMMAND_POLL) {
  601. if (t->wlen > t->wi) {
  602. if ((status & ACPI_EC_FLAG_IBF) == 0)
  603. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  604. else
  605. goto err;
  606. } else if (t->rlen > t->ri) {
  607. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  608. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  609. if (t->rlen == t->ri) {
  610. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  611. if (t->command == ACPI_EC_COMMAND_QUERY)
  612. ec_dbg_evt("Command(%s) completed by hardware",
  613. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  614. wakeup = true;
  615. }
  616. } else
  617. goto err;
  618. } else if (t->wlen == t->wi &&
  619. (status & ACPI_EC_FLAG_IBF) == 0) {
  620. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  621. wakeup = true;
  622. }
  623. goto out;
  624. } else {
  625. if (EC_FLAGS_QUERY_HANDSHAKE &&
  626. !(status & ACPI_EC_FLAG_SCI) &&
  627. (t->command == ACPI_EC_COMMAND_QUERY)) {
  628. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  629. t->rdata[t->ri++] = 0x00;
  630. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  631. ec_dbg_evt("Command(%s) completed by software",
  632. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  633. wakeup = true;
  634. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  635. acpi_ec_write_cmd(ec, t->command);
  636. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  637. } else
  638. goto err;
  639. goto out;
  640. }
  641. err:
  642. /*
  643. * If SCI bit is set, then don't think it's a false IRQ
  644. * otherwise will take a not handled IRQ as a false one.
  645. */
  646. if (!(status & ACPI_EC_FLAG_SCI)) {
  647. if (in_interrupt() && t) {
  648. if (t->irq_count < ec_storm_threshold)
  649. ++t->irq_count;
  650. /* Allow triggering on 0 threshold */
  651. if (t->irq_count == ec_storm_threshold)
  652. acpi_ec_mask_gpe(ec);
  653. }
  654. }
  655. out:
  656. if (status & ACPI_EC_FLAG_SCI)
  657. acpi_ec_submit_query(ec);
  658. if (wakeup && in_interrupt())
  659. wake_up(&ec->wait);
  660. }
  661. static void start_transaction(struct acpi_ec *ec)
  662. {
  663. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  664. ec->curr->flags = 0;
  665. }
  666. static int ec_guard(struct acpi_ec *ec)
  667. {
  668. unsigned long guard = usecs_to_jiffies(ec->polling_guard);
  669. unsigned long timeout = ec->timestamp + guard;
  670. /* Ensure guarding period before polling EC status */
  671. do {
  672. if (ec->busy_polling) {
  673. /* Perform busy polling */
  674. if (ec_transaction_completed(ec))
  675. return 0;
  676. udelay(jiffies_to_usecs(guard));
  677. } else {
  678. /*
  679. * Perform wait polling
  680. * 1. Wait the transaction to be completed by the
  681. * GPE handler after the transaction enters
  682. * ACPI_EC_COMMAND_POLL state.
  683. * 2. A special guarding logic is also required
  684. * for event clearing mode "event" before the
  685. * transaction enters ACPI_EC_COMMAND_POLL
  686. * state.
  687. */
  688. if (!ec_transaction_polled(ec) &&
  689. !acpi_ec_guard_event(ec))
  690. break;
  691. if (wait_event_timeout(ec->wait,
  692. ec_transaction_completed(ec),
  693. guard))
  694. return 0;
  695. }
  696. } while (time_before(jiffies, timeout));
  697. return -ETIME;
  698. }
  699. static int ec_poll(struct acpi_ec *ec)
  700. {
  701. unsigned long flags;
  702. int repeat = 5; /* number of command restarts */
  703. while (repeat--) {
  704. unsigned long delay = jiffies +
  705. msecs_to_jiffies(ec_delay);
  706. do {
  707. if (!ec_guard(ec))
  708. return 0;
  709. spin_lock_irqsave(&ec->lock, flags);
  710. advance_transaction(ec);
  711. spin_unlock_irqrestore(&ec->lock, flags);
  712. } while (time_before(jiffies, delay));
  713. pr_debug("controller reset, restart transaction\n");
  714. spin_lock_irqsave(&ec->lock, flags);
  715. start_transaction(ec);
  716. spin_unlock_irqrestore(&ec->lock, flags);
  717. }
  718. return -ETIME;
  719. }
  720. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  721. struct transaction *t)
  722. {
  723. unsigned long tmp;
  724. int ret = 0;
  725. /* start transaction */
  726. spin_lock_irqsave(&ec->lock, tmp);
  727. /* Enable GPE for command processing (IBF=0/OBF=1) */
  728. if (!acpi_ec_submit_flushable_request(ec)) {
  729. ret = -EINVAL;
  730. goto unlock;
  731. }
  732. ec_dbg_ref(ec, "Increase command");
  733. /* following two actions should be kept atomic */
  734. ec->curr = t;
  735. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  736. start_transaction(ec);
  737. spin_unlock_irqrestore(&ec->lock, tmp);
  738. ret = ec_poll(ec);
  739. spin_lock_irqsave(&ec->lock, tmp);
  740. if (t->irq_count == ec_storm_threshold)
  741. acpi_ec_unmask_gpe(ec);
  742. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  743. ec->curr = NULL;
  744. /* Disable GPE for command processing (IBF=0/OBF=1) */
  745. acpi_ec_complete_request(ec);
  746. ec_dbg_ref(ec, "Decrease command");
  747. unlock:
  748. spin_unlock_irqrestore(&ec->lock, tmp);
  749. return ret;
  750. }
  751. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  752. {
  753. int status;
  754. u32 glk;
  755. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  756. return -EINVAL;
  757. if (t->rdata)
  758. memset(t->rdata, 0, t->rlen);
  759. mutex_lock(&ec->mutex);
  760. if (ec->global_lock) {
  761. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  762. if (ACPI_FAILURE(status)) {
  763. status = -ENODEV;
  764. goto unlock;
  765. }
  766. }
  767. status = acpi_ec_transaction_unlocked(ec, t);
  768. if (ec->global_lock)
  769. acpi_release_global_lock(glk);
  770. unlock:
  771. mutex_unlock(&ec->mutex);
  772. return status;
  773. }
  774. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  775. {
  776. u8 d;
  777. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  778. .wdata = NULL, .rdata = &d,
  779. .wlen = 0, .rlen = 1};
  780. return acpi_ec_transaction(ec, &t);
  781. }
  782. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  783. {
  784. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  785. .wdata = NULL, .rdata = NULL,
  786. .wlen = 0, .rlen = 0};
  787. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  788. acpi_ec_transaction(ec, &t) : 0;
  789. }
  790. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  791. {
  792. int result;
  793. u8 d;
  794. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  795. .wdata = &address, .rdata = &d,
  796. .wlen = 1, .rlen = 1};
  797. result = acpi_ec_transaction(ec, &t);
  798. *data = d;
  799. return result;
  800. }
  801. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  802. {
  803. u8 wdata[2] = { address, data };
  804. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  805. .wdata = wdata, .rdata = NULL,
  806. .wlen = 2, .rlen = 0};
  807. return acpi_ec_transaction(ec, &t);
  808. }
  809. int ec_read(u8 addr, u8 *val)
  810. {
  811. int err;
  812. u8 temp_data;
  813. if (!first_ec)
  814. return -ENODEV;
  815. err = acpi_ec_read(first_ec, addr, &temp_data);
  816. if (!err) {
  817. *val = temp_data;
  818. return 0;
  819. }
  820. return err;
  821. }
  822. EXPORT_SYMBOL(ec_read);
  823. int ec_write(u8 addr, u8 val)
  824. {
  825. int err;
  826. if (!first_ec)
  827. return -ENODEV;
  828. err = acpi_ec_write(first_ec, addr, val);
  829. return err;
  830. }
  831. EXPORT_SYMBOL(ec_write);
  832. int ec_transaction(u8 command,
  833. const u8 *wdata, unsigned wdata_len,
  834. u8 *rdata, unsigned rdata_len)
  835. {
  836. struct transaction t = {.command = command,
  837. .wdata = wdata, .rdata = rdata,
  838. .wlen = wdata_len, .rlen = rdata_len};
  839. if (!first_ec)
  840. return -ENODEV;
  841. return acpi_ec_transaction(first_ec, &t);
  842. }
  843. EXPORT_SYMBOL(ec_transaction);
  844. /* Get the handle to the EC device */
  845. acpi_handle ec_get_handle(void)
  846. {
  847. if (!first_ec)
  848. return NULL;
  849. return first_ec->handle;
  850. }
  851. EXPORT_SYMBOL(ec_get_handle);
  852. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  853. {
  854. unsigned long flags;
  855. spin_lock_irqsave(&ec->lock, flags);
  856. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  857. ec_dbg_drv("Starting EC");
  858. /* Enable GPE for event processing (SCI_EVT=1) */
  859. if (!resuming) {
  860. acpi_ec_submit_request(ec);
  861. ec_dbg_ref(ec, "Increase driver");
  862. }
  863. ec_log_drv("EC started");
  864. }
  865. spin_unlock_irqrestore(&ec->lock, flags);
  866. }
  867. static bool acpi_ec_stopped(struct acpi_ec *ec)
  868. {
  869. unsigned long flags;
  870. bool flushed;
  871. spin_lock_irqsave(&ec->lock, flags);
  872. flushed = acpi_ec_flushed(ec);
  873. spin_unlock_irqrestore(&ec->lock, flags);
  874. return flushed;
  875. }
  876. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  877. {
  878. unsigned long flags;
  879. spin_lock_irqsave(&ec->lock, flags);
  880. if (acpi_ec_started(ec)) {
  881. ec_dbg_drv("Stopping EC");
  882. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  883. spin_unlock_irqrestore(&ec->lock, flags);
  884. wait_event(ec->wait, acpi_ec_stopped(ec));
  885. spin_lock_irqsave(&ec->lock, flags);
  886. /* Disable GPE for event processing (SCI_EVT=1) */
  887. if (!suspending) {
  888. acpi_ec_complete_request(ec);
  889. ec_dbg_ref(ec, "Decrease driver");
  890. } else if (!ec_freeze_events)
  891. __acpi_ec_disable_event(ec);
  892. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  893. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  894. ec_log_drv("EC stopped");
  895. }
  896. spin_unlock_irqrestore(&ec->lock, flags);
  897. }
  898. static void acpi_ec_enter_noirq(struct acpi_ec *ec)
  899. {
  900. unsigned long flags;
  901. spin_lock_irqsave(&ec->lock, flags);
  902. ec->busy_polling = true;
  903. ec->polling_guard = 0;
  904. ec_log_drv("interrupt blocked");
  905. spin_unlock_irqrestore(&ec->lock, flags);
  906. }
  907. static void acpi_ec_leave_noirq(struct acpi_ec *ec)
  908. {
  909. unsigned long flags;
  910. spin_lock_irqsave(&ec->lock, flags);
  911. ec->busy_polling = ec_busy_polling;
  912. ec->polling_guard = ec_polling_guard;
  913. ec_log_drv("interrupt unblocked");
  914. spin_unlock_irqrestore(&ec->lock, flags);
  915. }
  916. void acpi_ec_block_transactions(void)
  917. {
  918. struct acpi_ec *ec = first_ec;
  919. if (!ec)
  920. return;
  921. mutex_lock(&ec->mutex);
  922. /* Prevent transactions from being carried out */
  923. acpi_ec_stop(ec, true);
  924. mutex_unlock(&ec->mutex);
  925. }
  926. void acpi_ec_unblock_transactions(void)
  927. {
  928. /*
  929. * Allow transactions to happen again (this function is called from
  930. * atomic context during wakeup, so we don't need to acquire the mutex).
  931. */
  932. if (first_ec)
  933. acpi_ec_start(first_ec, true);
  934. }
  935. void acpi_ec_mark_gpe_for_wake(void)
  936. {
  937. if (first_ec && !ec_no_wakeup)
  938. acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
  939. }
  940. void acpi_ec_set_gpe_wake_mask(u8 action)
  941. {
  942. if (first_ec && !ec_no_wakeup)
  943. acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
  944. }
  945. void acpi_ec_dispatch_gpe(void)
  946. {
  947. if (first_ec)
  948. acpi_dispatch_gpe(NULL, first_ec->gpe);
  949. }
  950. /* --------------------------------------------------------------------------
  951. Event Management
  952. -------------------------------------------------------------------------- */
  953. static struct acpi_ec_query_handler *
  954. acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
  955. {
  956. struct acpi_ec_query_handler *handler;
  957. mutex_lock(&ec->mutex);
  958. list_for_each_entry(handler, &ec->list, node) {
  959. if (value == handler->query_bit) {
  960. kref_get(&handler->kref);
  961. mutex_unlock(&ec->mutex);
  962. return handler;
  963. }
  964. }
  965. mutex_unlock(&ec->mutex);
  966. return NULL;
  967. }
  968. static void acpi_ec_query_handler_release(struct kref *kref)
  969. {
  970. struct acpi_ec_query_handler *handler =
  971. container_of(kref, struct acpi_ec_query_handler, kref);
  972. kfree(handler);
  973. }
  974. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  975. {
  976. kref_put(&handler->kref, acpi_ec_query_handler_release);
  977. }
  978. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  979. acpi_handle handle, acpi_ec_query_func func,
  980. void *data)
  981. {
  982. struct acpi_ec_query_handler *handler =
  983. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  984. if (!handler)
  985. return -ENOMEM;
  986. handler->query_bit = query_bit;
  987. handler->handle = handle;
  988. handler->func = func;
  989. handler->data = data;
  990. mutex_lock(&ec->mutex);
  991. kref_init(&handler->kref);
  992. list_add(&handler->node, &ec->list);
  993. mutex_unlock(&ec->mutex);
  994. return 0;
  995. }
  996. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  997. static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
  998. bool remove_all, u8 query_bit)
  999. {
  1000. struct acpi_ec_query_handler *handler, *tmp;
  1001. LIST_HEAD(free_list);
  1002. mutex_lock(&ec->mutex);
  1003. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  1004. if (remove_all || query_bit == handler->query_bit) {
  1005. list_del_init(&handler->node);
  1006. list_add(&handler->node, &free_list);
  1007. }
  1008. }
  1009. mutex_unlock(&ec->mutex);
  1010. list_for_each_entry_safe(handler, tmp, &free_list, node)
  1011. acpi_ec_put_query_handler(handler);
  1012. }
  1013. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  1014. {
  1015. acpi_ec_remove_query_handlers(ec, false, query_bit);
  1016. }
  1017. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  1018. static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
  1019. {
  1020. struct acpi_ec_query *q;
  1021. struct transaction *t;
  1022. q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
  1023. if (!q)
  1024. return NULL;
  1025. INIT_WORK(&q->work, acpi_ec_event_processor);
  1026. t = &q->transaction;
  1027. t->command = ACPI_EC_COMMAND_QUERY;
  1028. t->rdata = pval;
  1029. t->rlen = 1;
  1030. return q;
  1031. }
  1032. static void acpi_ec_delete_query(struct acpi_ec_query *q)
  1033. {
  1034. if (q) {
  1035. if (q->handler)
  1036. acpi_ec_put_query_handler(q->handler);
  1037. kfree(q);
  1038. }
  1039. }
  1040. static void acpi_ec_event_processor(struct work_struct *work)
  1041. {
  1042. struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
  1043. struct acpi_ec_query_handler *handler = q->handler;
  1044. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  1045. if (handler->func)
  1046. handler->func(handler->data);
  1047. else if (handler->handle)
  1048. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  1049. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  1050. acpi_ec_delete_query(q);
  1051. }
  1052. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  1053. {
  1054. u8 value = 0;
  1055. int result;
  1056. struct acpi_ec_query *q;
  1057. q = acpi_ec_create_query(&value);
  1058. if (!q)
  1059. return -ENOMEM;
  1060. /*
  1061. * Query the EC to find out which _Qxx method we need to evaluate.
  1062. * Note that successful completion of the query causes the ACPI_EC_SCI
  1063. * bit to be cleared (and thus clearing the interrupt source).
  1064. */
  1065. result = acpi_ec_transaction(ec, &q->transaction);
  1066. if (!value)
  1067. result = -ENODATA;
  1068. if (result)
  1069. goto err_exit;
  1070. q->handler = acpi_ec_get_query_handler_by_value(ec, value);
  1071. if (!q->handler) {
  1072. result = -ENODATA;
  1073. goto err_exit;
  1074. }
  1075. /*
  1076. * It is reported that _Qxx are evaluated in a parallel way on
  1077. * Windows:
  1078. * https://bugzilla.kernel.org/show_bug.cgi?id=94411
  1079. *
  1080. * Put this log entry before schedule_work() in order to make
  1081. * it appearing before any other log entries occurred during the
  1082. * work queue execution.
  1083. */
  1084. ec_dbg_evt("Query(0x%02x) scheduled", value);
  1085. if (!queue_work(ec_query_wq, &q->work)) {
  1086. ec_dbg_evt("Query(0x%02x) overlapped", value);
  1087. result = -EBUSY;
  1088. }
  1089. err_exit:
  1090. if (result)
  1091. acpi_ec_delete_query(q);
  1092. if (data)
  1093. *data = value;
  1094. return result;
  1095. }
  1096. static void acpi_ec_check_event(struct acpi_ec *ec)
  1097. {
  1098. unsigned long flags;
  1099. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  1100. if (ec_guard(ec)) {
  1101. spin_lock_irqsave(&ec->lock, flags);
  1102. /*
  1103. * Take care of the SCI_EVT unless no one else is
  1104. * taking care of it.
  1105. */
  1106. if (!ec->curr)
  1107. advance_transaction(ec);
  1108. spin_unlock_irqrestore(&ec->lock, flags);
  1109. }
  1110. }
  1111. }
  1112. static void acpi_ec_event_handler(struct work_struct *work)
  1113. {
  1114. unsigned long flags;
  1115. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  1116. ec_dbg_evt("Event started");
  1117. spin_lock_irqsave(&ec->lock, flags);
  1118. while (ec->nr_pending_queries) {
  1119. spin_unlock_irqrestore(&ec->lock, flags);
  1120. (void)acpi_ec_query(ec, NULL);
  1121. spin_lock_irqsave(&ec->lock, flags);
  1122. ec->nr_pending_queries--;
  1123. /*
  1124. * Before exit, make sure that this work item can be
  1125. * scheduled again. There might be QR_EC failures, leaving
  1126. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  1127. * item from being scheduled again.
  1128. */
  1129. if (!ec->nr_pending_queries) {
  1130. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  1131. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  1132. acpi_ec_complete_query(ec);
  1133. }
  1134. }
  1135. spin_unlock_irqrestore(&ec->lock, flags);
  1136. ec_dbg_evt("Event stopped");
  1137. acpi_ec_check_event(ec);
  1138. }
  1139. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  1140. u32 gpe_number, void *data)
  1141. {
  1142. unsigned long flags;
  1143. struct acpi_ec *ec = data;
  1144. spin_lock_irqsave(&ec->lock, flags);
  1145. advance_transaction(ec);
  1146. spin_unlock_irqrestore(&ec->lock, flags);
  1147. return ACPI_INTERRUPT_HANDLED;
  1148. }
  1149. /* --------------------------------------------------------------------------
  1150. * Address Space Management
  1151. * -------------------------------------------------------------------------- */
  1152. static acpi_status
  1153. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  1154. u32 bits, u64 *value64,
  1155. void *handler_context, void *region_context)
  1156. {
  1157. struct acpi_ec *ec = handler_context;
  1158. int result = 0, i, bytes = bits / 8;
  1159. u8 *value = (u8 *)value64;
  1160. if ((address > 0xFF) || !value || !handler_context)
  1161. return AE_BAD_PARAMETER;
  1162. if (function != ACPI_READ && function != ACPI_WRITE)
  1163. return AE_BAD_PARAMETER;
  1164. if (ec->busy_polling || bits > 8)
  1165. acpi_ec_burst_enable(ec);
  1166. for (i = 0; i < bytes; ++i, ++address, ++value)
  1167. result = (function == ACPI_READ) ?
  1168. acpi_ec_read(ec, address, value) :
  1169. acpi_ec_write(ec, address, *value);
  1170. if (ec->busy_polling || bits > 8)
  1171. acpi_ec_burst_disable(ec);
  1172. switch (result) {
  1173. case -EINVAL:
  1174. return AE_BAD_PARAMETER;
  1175. case -ENODEV:
  1176. return AE_NOT_FOUND;
  1177. case -ETIME:
  1178. return AE_TIME;
  1179. default:
  1180. return AE_OK;
  1181. }
  1182. }
  1183. /* --------------------------------------------------------------------------
  1184. * Driver Interface
  1185. * -------------------------------------------------------------------------- */
  1186. static acpi_status
  1187. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1188. static void acpi_ec_free(struct acpi_ec *ec)
  1189. {
  1190. if (first_ec == ec)
  1191. first_ec = NULL;
  1192. if (boot_ec == ec)
  1193. boot_ec = NULL;
  1194. kfree(ec);
  1195. }
  1196. static struct acpi_ec *acpi_ec_alloc(void)
  1197. {
  1198. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1199. if (!ec)
  1200. return NULL;
  1201. mutex_init(&ec->mutex);
  1202. init_waitqueue_head(&ec->wait);
  1203. INIT_LIST_HEAD(&ec->list);
  1204. spin_lock_init(&ec->lock);
  1205. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1206. ec->timestamp = jiffies;
  1207. ec->busy_polling = true;
  1208. ec->polling_guard = 0;
  1209. return ec;
  1210. }
  1211. static acpi_status
  1212. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1213. void *context, void **return_value)
  1214. {
  1215. char node_name[5];
  1216. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1217. struct acpi_ec *ec = context;
  1218. int value = 0;
  1219. acpi_status status;
  1220. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1221. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1222. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1223. return AE_OK;
  1224. }
  1225. static acpi_status
  1226. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1227. {
  1228. acpi_status status;
  1229. unsigned long long tmp = 0;
  1230. struct acpi_ec *ec = context;
  1231. /* clear addr values, ec_parse_io_ports depend on it */
  1232. ec->command_addr = ec->data_addr = 0;
  1233. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1234. ec_parse_io_ports, ec);
  1235. if (ACPI_FAILURE(status))
  1236. return status;
  1237. if (ec->data_addr == 0 || ec->command_addr == 0)
  1238. return AE_OK;
  1239. if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) {
  1240. /*
  1241. * Always inherit the GPE number setting from the ECDT
  1242. * EC.
  1243. */
  1244. ec->gpe = boot_ec->gpe;
  1245. } else {
  1246. /* Get GPE bit assignment (EC events). */
  1247. /* TODO: Add support for _GPE returning a package */
  1248. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1249. if (ACPI_FAILURE(status))
  1250. return status;
  1251. ec->gpe = tmp;
  1252. }
  1253. /* Use the global lock for all EC transactions? */
  1254. tmp = 0;
  1255. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1256. ec->global_lock = tmp;
  1257. ec->handle = handle;
  1258. return AE_CTRL_TERMINATE;
  1259. }
  1260. /*
  1261. * Note: This function returns an error code only when the address space
  1262. * handler is not installed, which means "not able to handle
  1263. * transactions".
  1264. */
  1265. static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
  1266. {
  1267. acpi_status status;
  1268. acpi_ec_start(ec, false);
  1269. if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1270. acpi_ec_enter_noirq(ec);
  1271. status = acpi_install_address_space_handler(ec->handle,
  1272. ACPI_ADR_SPACE_EC,
  1273. &acpi_ec_space_handler,
  1274. NULL, ec);
  1275. if (ACPI_FAILURE(status)) {
  1276. if (status == AE_NOT_FOUND) {
  1277. /*
  1278. * Maybe OS fails in evaluating the _REG
  1279. * object. The AE_NOT_FOUND error will be
  1280. * ignored and OS * continue to initialize
  1281. * EC.
  1282. */
  1283. pr_err("Fail in evaluating the _REG object"
  1284. " of EC device. Broken bios is suspected.\n");
  1285. } else {
  1286. acpi_ec_stop(ec, false);
  1287. return -ENODEV;
  1288. }
  1289. }
  1290. set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1291. }
  1292. if (!handle_events)
  1293. return 0;
  1294. if (!test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1295. /* Find and register all query methods */
  1296. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1297. acpi_ec_register_query_methods,
  1298. NULL, ec, NULL);
  1299. set_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1300. }
  1301. if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1302. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1303. ACPI_GPE_EDGE_TRIGGERED,
  1304. &acpi_ec_gpe_handler, ec);
  1305. /* This is not fatal as we can poll EC events */
  1306. if (ACPI_SUCCESS(status)) {
  1307. set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1308. acpi_ec_leave_noirq(ec);
  1309. if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1310. ec->reference_count >= 1)
  1311. acpi_ec_enable_gpe(ec, true);
  1312. }
  1313. }
  1314. /* EC is fully operational, allow queries */
  1315. acpi_ec_enable_event(ec);
  1316. return 0;
  1317. }
  1318. static void ec_remove_handlers(struct acpi_ec *ec)
  1319. {
  1320. if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
  1321. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1322. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1323. pr_err("failed to remove space handler\n");
  1324. clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
  1325. }
  1326. /*
  1327. * Stops handling the EC transactions after removing the operation
  1328. * region handler. This is required because _REG(DISCONNECT)
  1329. * invoked during the removal can result in new EC transactions.
  1330. *
  1331. * Flushes the EC requests and thus disables the GPE before
  1332. * removing the GPE handler. This is required by the current ACPICA
  1333. * GPE core. ACPICA GPE core will automatically disable a GPE when
  1334. * it is indicated but there is no way to handle it. So the drivers
  1335. * must disable the GPEs prior to removing the GPE handlers.
  1336. */
  1337. acpi_ec_stop(ec, false);
  1338. if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
  1339. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1340. &acpi_ec_gpe_handler)))
  1341. pr_err("failed to remove gpe handler\n");
  1342. clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
  1343. }
  1344. if (test_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags)) {
  1345. acpi_ec_remove_query_handlers(ec, true, 0);
  1346. clear_bit(EC_FLAGS_EVT_HANDLER_INSTALLED, &ec->flags);
  1347. }
  1348. }
  1349. static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
  1350. {
  1351. int ret;
  1352. ret = ec_install_handlers(ec, handle_events);
  1353. if (ret)
  1354. return ret;
  1355. /* First EC capable of handling transactions */
  1356. if (!first_ec) {
  1357. first_ec = ec;
  1358. acpi_handle_info(first_ec->handle, "Used as first EC\n");
  1359. }
  1360. acpi_handle_info(ec->handle,
  1361. "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
  1362. ec->gpe, ec->command_addr, ec->data_addr);
  1363. return ret;
  1364. }
  1365. static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle,
  1366. bool handle_events, bool is_ecdt)
  1367. {
  1368. int ret;
  1369. /*
  1370. * Changing the ACPI handle results in a re-configuration of the
  1371. * boot EC. And if it happens after the namespace initialization,
  1372. * it causes _REG evaluations.
  1373. */
  1374. if (boot_ec && boot_ec->handle != handle)
  1375. ec_remove_handlers(boot_ec);
  1376. /* Unset old boot EC */
  1377. if (boot_ec != ec)
  1378. acpi_ec_free(boot_ec);
  1379. /*
  1380. * ECDT device creation is split into acpi_ec_ecdt_probe() and
  1381. * acpi_ec_ecdt_start(). This function takes care of completing the
  1382. * ECDT parsing logic as the handle update should be performed
  1383. * between the installation/uninstallation of the handlers.
  1384. */
  1385. if (ec->handle != handle)
  1386. ec->handle = handle;
  1387. ret = acpi_ec_setup(ec, handle_events);
  1388. if (ret)
  1389. return ret;
  1390. /* Set new boot EC */
  1391. if (!boot_ec) {
  1392. boot_ec = ec;
  1393. boot_ec_is_ecdt = is_ecdt;
  1394. }
  1395. acpi_handle_info(boot_ec->handle,
  1396. "Used as boot %s EC to handle transactions%s\n",
  1397. is_ecdt ? "ECDT" : "DSDT",
  1398. handle_events ? " and events" : "");
  1399. return ret;
  1400. }
  1401. static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle)
  1402. {
  1403. struct acpi_table_ecdt *ecdt_ptr;
  1404. acpi_status status;
  1405. acpi_handle handle;
  1406. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1407. (struct acpi_table_header **)&ecdt_ptr);
  1408. if (ACPI_FAILURE(status))
  1409. return false;
  1410. status = acpi_get_handle(NULL, ecdt_ptr->id, &handle);
  1411. if (ACPI_FAILURE(status))
  1412. return false;
  1413. *phandle = handle;
  1414. return true;
  1415. }
  1416. static bool acpi_is_boot_ec(struct acpi_ec *ec)
  1417. {
  1418. if (!boot_ec)
  1419. return false;
  1420. if (ec->command_addr == boot_ec->command_addr &&
  1421. ec->data_addr == boot_ec->data_addr)
  1422. return true;
  1423. return false;
  1424. }
  1425. static int acpi_ec_add(struct acpi_device *device)
  1426. {
  1427. struct acpi_ec *ec = NULL;
  1428. int ret;
  1429. bool is_ecdt = false;
  1430. acpi_status status;
  1431. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1432. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1433. if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
  1434. is_ecdt = true;
  1435. ec = boot_ec;
  1436. } else {
  1437. ec = acpi_ec_alloc();
  1438. if (!ec)
  1439. return -ENOMEM;
  1440. status = ec_parse_device(device->handle, 0, ec, NULL);
  1441. if (status != AE_CTRL_TERMINATE) {
  1442. ret = -EINVAL;
  1443. goto err_alloc;
  1444. }
  1445. }
  1446. if (acpi_is_boot_ec(ec)) {
  1447. boot_ec_is_ecdt = is_ecdt;
  1448. if (!is_ecdt) {
  1449. /*
  1450. * Trust PNP0C09 namespace location rather than
  1451. * ECDT ID. But trust ECDT GPE rather than _GPE
  1452. * because of ASUS quirks, so do not change
  1453. * boot_ec->gpe to ec->gpe.
  1454. */
  1455. boot_ec->handle = ec->handle;
  1456. acpi_handle_debug(ec->handle, "duplicated.\n");
  1457. acpi_ec_free(ec);
  1458. ec = boot_ec;
  1459. }
  1460. ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
  1461. } else
  1462. ret = acpi_ec_setup(ec, true);
  1463. if (ret)
  1464. goto err_query;
  1465. device->driver_data = ec;
  1466. ret = !!request_region(ec->data_addr, 1, "EC data");
  1467. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1468. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1469. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1470. if (!is_ecdt) {
  1471. /* Reprobe devices depending on the EC */
  1472. acpi_walk_dep_device_list(ec->handle);
  1473. }
  1474. acpi_handle_debug(ec->handle, "enumerated.\n");
  1475. return 0;
  1476. err_query:
  1477. if (ec != boot_ec)
  1478. acpi_ec_remove_query_handlers(ec, true, 0);
  1479. err_alloc:
  1480. if (ec != boot_ec)
  1481. acpi_ec_free(ec);
  1482. return ret;
  1483. }
  1484. static int acpi_ec_remove(struct acpi_device *device)
  1485. {
  1486. struct acpi_ec *ec;
  1487. if (!device)
  1488. return -EINVAL;
  1489. ec = acpi_driver_data(device);
  1490. release_region(ec->data_addr, 1);
  1491. release_region(ec->command_addr, 1);
  1492. device->driver_data = NULL;
  1493. if (ec != boot_ec) {
  1494. ec_remove_handlers(ec);
  1495. acpi_ec_free(ec);
  1496. }
  1497. return 0;
  1498. }
  1499. static acpi_status
  1500. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1501. {
  1502. struct acpi_ec *ec = context;
  1503. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1504. return AE_OK;
  1505. /*
  1506. * The first address region returned is the data port, and
  1507. * the second address region returned is the status/command
  1508. * port.
  1509. */
  1510. if (ec->data_addr == 0)
  1511. ec->data_addr = resource->data.io.minimum;
  1512. else if (ec->command_addr == 0)
  1513. ec->command_addr = resource->data.io.minimum;
  1514. else
  1515. return AE_CTRL_TERMINATE;
  1516. return AE_OK;
  1517. }
  1518. static const struct acpi_device_id ec_device_ids[] = {
  1519. {"PNP0C09", 0},
  1520. {ACPI_ECDT_HID, 0},
  1521. {"", 0},
  1522. };
  1523. /*
  1524. * This function is not Windows-compatible as Windows never enumerates the
  1525. * namespace EC before the main ACPI device enumeration process. It is
  1526. * retained for historical reason and will be deprecated in the future.
  1527. */
  1528. int __init acpi_ec_dsdt_probe(void)
  1529. {
  1530. acpi_status status;
  1531. struct acpi_ec *ec;
  1532. int ret;
  1533. /*
  1534. * If a platform has ECDT, there is no need to proceed as the
  1535. * following probe is not a part of the ACPI device enumeration,
  1536. * executing _STA is not safe, and thus this probe may risk of
  1537. * picking up an invalid EC device.
  1538. */
  1539. if (boot_ec)
  1540. return -ENODEV;
  1541. ec = acpi_ec_alloc();
  1542. if (!ec)
  1543. return -ENOMEM;
  1544. /*
  1545. * At this point, the namespace is initialized, so start to find
  1546. * the namespace objects.
  1547. */
  1548. status = acpi_get_devices(ec_device_ids[0].id,
  1549. ec_parse_device, ec, NULL);
  1550. if (ACPI_FAILURE(status) || !ec->handle) {
  1551. ret = -ENODEV;
  1552. goto error;
  1553. }
  1554. /*
  1555. * When the DSDT EC is available, always re-configure boot EC to
  1556. * have _REG evaluated. _REG can only be evaluated after the
  1557. * namespace initialization.
  1558. * At this point, the GPE is not fully initialized, so do not to
  1559. * handle the events.
  1560. */
  1561. ret = acpi_config_boot_ec(ec, ec->handle, false, false);
  1562. error:
  1563. if (ret)
  1564. acpi_ec_free(ec);
  1565. return ret;
  1566. }
  1567. /*
  1568. * If the DSDT EC is not functioning, we still need to prepare a fully
  1569. * functioning ECDT EC first in order to handle the events.
  1570. * https://bugzilla.kernel.org/show_bug.cgi?id=115021
  1571. */
  1572. static int __init acpi_ec_ecdt_start(void)
  1573. {
  1574. acpi_handle handle;
  1575. if (!boot_ec)
  1576. return -ENODEV;
  1577. /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
  1578. if (!boot_ec_is_ecdt)
  1579. return -ENODEV;
  1580. /*
  1581. * At this point, the namespace and the GPE is initialized, so
  1582. * start to find the namespace objects and handle the events.
  1583. *
  1584. * Note: ec->handle can be valid if this function is called after
  1585. * acpi_ec_add(), hence the fast path.
  1586. */
  1587. if (boot_ec->handle == ACPI_ROOT_OBJECT) {
  1588. if (!acpi_ec_ecdt_get_handle(&handle))
  1589. return -ENODEV;
  1590. boot_ec->handle = handle;
  1591. }
  1592. /* Register to ACPI bus with PM ops attached */
  1593. return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
  1594. }
  1595. #if 0
  1596. /*
  1597. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1598. * set, for which case, we complete the QR_EC without issuing it to the
  1599. * firmware.
  1600. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1601. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1602. */
  1603. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1604. {
  1605. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1606. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1607. return 0;
  1608. }
  1609. #endif
  1610. /*
  1611. * On some hardware it is necessary to clear events accumulated by the EC during
  1612. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1613. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1614. *
  1615. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1616. *
  1617. * Ideally, the EC should also be instructed NOT to accumulate events during
  1618. * sleep (which Windows seems to do somehow), but the interface to control this
  1619. * behaviour is not known at this time.
  1620. *
  1621. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1622. * however it is very likely that other Samsung models are affected.
  1623. *
  1624. * On systems which don't accumulate _Q events during sleep, this extra check
  1625. * should be harmless.
  1626. */
  1627. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1628. {
  1629. pr_debug("Detected system needing EC poll on resume.\n");
  1630. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1631. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1632. return 0;
  1633. }
  1634. /*
  1635. * Some ECDTs contain wrong register addresses.
  1636. * MSI MS-171F
  1637. * https://bugzilla.kernel.org/show_bug.cgi?id=12461
  1638. */
  1639. static int ec_correct_ecdt(const struct dmi_system_id *id)
  1640. {
  1641. pr_debug("Detected system needing ECDT address correction.\n");
  1642. EC_FLAGS_CORRECT_ECDT = 1;
  1643. return 0;
  1644. }
  1645. /*
  1646. * Some DSDTs contain wrong GPE setting.
  1647. * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD
  1648. * https://bugzilla.kernel.org/show_bug.cgi?id=195651
  1649. */
  1650. static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
  1651. {
  1652. pr_debug("Detected system needing ignore DSDT GPE setting.\n");
  1653. EC_FLAGS_IGNORE_DSDT_GPE = 1;
  1654. return 0;
  1655. }
  1656. static const struct dmi_system_id ec_dmi_table[] __initconst = {
  1657. {
  1658. ec_correct_ecdt, "MSI MS-171F", {
  1659. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1660. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1661. {
  1662. ec_honor_ecdt_gpe, "ASUS FX502VD", {
  1663. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1664. DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL},
  1665. {
  1666. ec_honor_ecdt_gpe, "ASUS FX502VE", {
  1667. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1668. DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL},
  1669. {
  1670. ec_honor_ecdt_gpe, "ASUS GL702VMK", {
  1671. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1672. DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
  1673. {
  1674. ec_honor_ecdt_gpe, "ASUS X550VXK", {
  1675. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1676. DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},
  1677. {
  1678. ec_honor_ecdt_gpe, "ASUS X580VD", {
  1679. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
  1680. DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL},
  1681. {
  1682. ec_clear_on_resume, "Samsung hardware", {
  1683. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1684. {},
  1685. };
  1686. int __init acpi_ec_ecdt_probe(void)
  1687. {
  1688. int ret;
  1689. acpi_status status;
  1690. struct acpi_table_ecdt *ecdt_ptr;
  1691. struct acpi_ec *ec;
  1692. ec = acpi_ec_alloc();
  1693. if (!ec)
  1694. return -ENOMEM;
  1695. /*
  1696. * Generate a boot ec context
  1697. */
  1698. dmi_check_system(ec_dmi_table);
  1699. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1700. (struct acpi_table_header **)&ecdt_ptr);
  1701. if (ACPI_FAILURE(status)) {
  1702. ret = -ENODEV;
  1703. goto error;
  1704. }
  1705. if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
  1706. /*
  1707. * Asus X50GL:
  1708. * https://bugzilla.kernel.org/show_bug.cgi?id=11880
  1709. */
  1710. ret = -ENODEV;
  1711. goto error;
  1712. }
  1713. if (EC_FLAGS_CORRECT_ECDT) {
  1714. ec->command_addr = ecdt_ptr->data.address;
  1715. ec->data_addr = ecdt_ptr->control.address;
  1716. } else {
  1717. ec->command_addr = ecdt_ptr->control.address;
  1718. ec->data_addr = ecdt_ptr->data.address;
  1719. }
  1720. ec->gpe = ecdt_ptr->gpe;
  1721. /*
  1722. * At this point, the namespace is not initialized, so do not find
  1723. * the namespace objects, or handle the events.
  1724. */
  1725. ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true);
  1726. error:
  1727. if (ret)
  1728. acpi_ec_free(ec);
  1729. return ret;
  1730. }
  1731. #ifdef CONFIG_PM_SLEEP
  1732. static int acpi_ec_suspend(struct device *dev)
  1733. {
  1734. struct acpi_ec *ec =
  1735. acpi_driver_data(to_acpi_device(dev));
  1736. if (acpi_sleep_no_ec_events() && ec_freeze_events)
  1737. acpi_ec_disable_event(ec);
  1738. return 0;
  1739. }
  1740. static int acpi_ec_suspend_noirq(struct device *dev)
  1741. {
  1742. struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
  1743. /*
  1744. * The SCI handler doesn't run at this point, so the GPE can be
  1745. * masked at the low level without side effects.
  1746. */
  1747. if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1748. ec->reference_count >= 1)
  1749. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  1750. if (acpi_sleep_no_ec_events())
  1751. acpi_ec_enter_noirq(ec);
  1752. return 0;
  1753. }
  1754. static int acpi_ec_resume_noirq(struct device *dev)
  1755. {
  1756. struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
  1757. if (acpi_sleep_no_ec_events())
  1758. acpi_ec_leave_noirq(ec);
  1759. if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  1760. ec->reference_count >= 1)
  1761. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  1762. return 0;
  1763. }
  1764. static int acpi_ec_resume(struct device *dev)
  1765. {
  1766. struct acpi_ec *ec =
  1767. acpi_driver_data(to_acpi_device(dev));
  1768. acpi_ec_enable_event(ec);
  1769. return 0;
  1770. }
  1771. #endif
  1772. static const struct dev_pm_ops acpi_ec_pm = {
  1773. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
  1774. SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
  1775. };
  1776. static int param_set_event_clearing(const char *val,
  1777. const struct kernel_param *kp)
  1778. {
  1779. int result = 0;
  1780. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1781. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1782. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1783. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1784. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1785. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1786. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1787. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1788. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1789. } else
  1790. result = -EINVAL;
  1791. return result;
  1792. }
  1793. static int param_get_event_clearing(char *buffer,
  1794. const struct kernel_param *kp)
  1795. {
  1796. switch (ec_event_clearing) {
  1797. case ACPI_EC_EVT_TIMING_STATUS:
  1798. return sprintf(buffer, "status");
  1799. case ACPI_EC_EVT_TIMING_QUERY:
  1800. return sprintf(buffer, "query");
  1801. case ACPI_EC_EVT_TIMING_EVENT:
  1802. return sprintf(buffer, "event");
  1803. default:
  1804. return sprintf(buffer, "invalid");
  1805. }
  1806. return 0;
  1807. }
  1808. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1809. NULL, 0644);
  1810. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1811. static struct acpi_driver acpi_ec_driver = {
  1812. .name = "ec",
  1813. .class = ACPI_EC_CLASS,
  1814. .ids = ec_device_ids,
  1815. .ops = {
  1816. .add = acpi_ec_add,
  1817. .remove = acpi_ec_remove,
  1818. },
  1819. .drv.pm = &acpi_ec_pm,
  1820. };
  1821. static inline int acpi_ec_query_init(void)
  1822. {
  1823. if (!ec_query_wq) {
  1824. ec_query_wq = alloc_workqueue("kec_query", 0,
  1825. ec_max_queries);
  1826. if (!ec_query_wq)
  1827. return -ENODEV;
  1828. }
  1829. return 0;
  1830. }
  1831. static inline void acpi_ec_query_exit(void)
  1832. {
  1833. if (ec_query_wq) {
  1834. destroy_workqueue(ec_query_wq);
  1835. ec_query_wq = NULL;
  1836. }
  1837. }
  1838. static const struct dmi_system_id acpi_ec_no_wakeup[] = {
  1839. {
  1840. .ident = "Thinkpad X1 Carbon 6th",
  1841. .matches = {
  1842. DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
  1843. DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
  1844. },
  1845. },
  1846. {
  1847. .ident = "ThinkPad X1 Carbon 6th",
  1848. .matches = {
  1849. DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
  1850. DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"),
  1851. },
  1852. },
  1853. {
  1854. .ident = "ThinkPad X1 Yoga 3rd",
  1855. .matches = {
  1856. DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
  1857. DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"),
  1858. },
  1859. },
  1860. { },
  1861. };
  1862. int __init acpi_ec_init(void)
  1863. {
  1864. int result;
  1865. int ecdt_fail, dsdt_fail;
  1866. /* register workqueue for _Qxx evaluations */
  1867. result = acpi_ec_query_init();
  1868. if (result)
  1869. return result;
  1870. /*
  1871. * Disable EC wakeup on following systems to prevent periodic
  1872. * wakeup from EC GPE.
  1873. */
  1874. if (dmi_check_system(acpi_ec_no_wakeup)) {
  1875. ec_no_wakeup = true;
  1876. pr_debug("Disabling EC wakeup on suspend-to-idle\n");
  1877. }
  1878. /* Drivers must be started after acpi_ec_query_init() */
  1879. dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
  1880. /*
  1881. * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
  1882. * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
  1883. * settings but invalid DSDT settings.
  1884. * https://bugzilla.kernel.org/show_bug.cgi?id=196847
  1885. */
  1886. ecdt_fail = acpi_ec_ecdt_start();
  1887. return ecdt_fail && dsdt_fail ? -ENODEV : 0;
  1888. }
  1889. /* EC driver currently not unloadable */
  1890. #if 0
  1891. static void __exit acpi_ec_exit(void)
  1892. {
  1893. acpi_bus_unregister_driver(&acpi_ec_driver);
  1894. acpi_ec_query_exit();
  1895. }
  1896. #endif /* 0 */