manage.c 62 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  4. * Copyright (C) 2005-2006 Thomas Gleixner
  5. *
  6. * This file contains driver APIs to the irq subsystem.
  7. */
  8. #define pr_fmt(fmt) "genirq: " fmt
  9. #include <linux/irq.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/random.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/rt.h>
  18. #include <linux/sched/task.h>
  19. #include <uapi/linux/sched/types.h>
  20. #include <linux/task_work.h>
  21. #include "internals.h"
  22. #ifdef CONFIG_IRQ_FORCED_THREADING
  23. __read_mostly bool force_irqthreads;
  24. EXPORT_SYMBOL_GPL(force_irqthreads);
  25. static int __init setup_forced_irqthreads(char *arg)
  26. {
  27. force_irqthreads = true;
  28. return 0;
  29. }
  30. early_param("threadirqs", setup_forced_irqthreads);
  31. #endif
  32. static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
  33. {
  34. struct irq_data *irqd = irq_desc_get_irq_data(desc);
  35. bool inprogress;
  36. do {
  37. unsigned long flags;
  38. /*
  39. * Wait until we're out of the critical section. This might
  40. * give the wrong answer due to the lack of memory barriers.
  41. */
  42. while (irqd_irq_inprogress(&desc->irq_data))
  43. cpu_relax();
  44. /* Ok, that indicated we're done: double-check carefully. */
  45. raw_spin_lock_irqsave(&desc->lock, flags);
  46. inprogress = irqd_irq_inprogress(&desc->irq_data);
  47. /*
  48. * If requested and supported, check at the chip whether it
  49. * is in flight at the hardware level, i.e. already pending
  50. * in a CPU and waiting for service and acknowledge.
  51. */
  52. if (!inprogress && sync_chip) {
  53. /*
  54. * Ignore the return code. inprogress is only updated
  55. * when the chip supports it.
  56. */
  57. __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
  58. &inprogress);
  59. }
  60. raw_spin_unlock_irqrestore(&desc->lock, flags);
  61. /* Oops, that failed? */
  62. } while (inprogress);
  63. }
  64. /**
  65. * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  66. * @irq: interrupt number to wait for
  67. *
  68. * This function waits for any pending hard IRQ handlers for this
  69. * interrupt to complete before returning. If you use this
  70. * function while holding a resource the IRQ handler may need you
  71. * will deadlock. It does not take associated threaded handlers
  72. * into account.
  73. *
  74. * Do not use this for shutdown scenarios where you must be sure
  75. * that all parts (hardirq and threaded handler) have completed.
  76. *
  77. * Returns: false if a threaded handler is active.
  78. *
  79. * This function may be called - with care - from IRQ context.
  80. *
  81. * It does not check whether there is an interrupt in flight at the
  82. * hardware level, but not serviced yet, as this might deadlock when
  83. * called with interrupts disabled and the target CPU of the interrupt
  84. * is the current CPU.
  85. */
  86. bool synchronize_hardirq(unsigned int irq)
  87. {
  88. struct irq_desc *desc = irq_to_desc(irq);
  89. if (desc) {
  90. __synchronize_hardirq(desc, false);
  91. return !atomic_read(&desc->threads_active);
  92. }
  93. return true;
  94. }
  95. EXPORT_SYMBOL(synchronize_hardirq);
  96. /**
  97. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  98. * @irq: interrupt number to wait for
  99. *
  100. * This function waits for any pending IRQ handlers for this interrupt
  101. * to complete before returning. If you use this function while
  102. * holding a resource the IRQ handler may need you will deadlock.
  103. *
  104. * Can only be called from preemptible code as it might sleep when
  105. * an interrupt thread is associated to @irq.
  106. *
  107. * It optionally makes sure (when the irq chip supports that method)
  108. * that the interrupt is not pending in any CPU and waiting for
  109. * service.
  110. */
  111. void synchronize_irq(unsigned int irq)
  112. {
  113. struct irq_desc *desc = irq_to_desc(irq);
  114. if (desc) {
  115. __synchronize_hardirq(desc, true);
  116. /*
  117. * We made sure that no hardirq handler is
  118. * running. Now verify that no threaded handlers are
  119. * active.
  120. */
  121. wait_event(desc->wait_for_threads,
  122. !atomic_read(&desc->threads_active));
  123. }
  124. }
  125. EXPORT_SYMBOL(synchronize_irq);
  126. #ifdef CONFIG_SMP
  127. cpumask_var_t irq_default_affinity;
  128. static bool __irq_can_set_affinity(struct irq_desc *desc)
  129. {
  130. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  131. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  132. return false;
  133. return true;
  134. }
  135. /**
  136. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  137. * @irq: Interrupt to check
  138. *
  139. */
  140. int irq_can_set_affinity(unsigned int irq)
  141. {
  142. return __irq_can_set_affinity(irq_to_desc(irq));
  143. }
  144. /**
  145. * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
  146. * @irq: Interrupt to check
  147. *
  148. * Like irq_can_set_affinity() above, but additionally checks for the
  149. * AFFINITY_MANAGED flag.
  150. */
  151. bool irq_can_set_affinity_usr(unsigned int irq)
  152. {
  153. struct irq_desc *desc = irq_to_desc(irq);
  154. return __irq_can_set_affinity(desc) &&
  155. !irqd_affinity_is_managed(&desc->irq_data);
  156. }
  157. /**
  158. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  159. * @desc: irq descriptor which has affitnity changed
  160. *
  161. * We just set IRQTF_AFFINITY and delegate the affinity setting
  162. * to the interrupt thread itself. We can not call
  163. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  164. * code can be called from hard interrupt context.
  165. */
  166. void irq_set_thread_affinity(struct irq_desc *desc)
  167. {
  168. struct irqaction *action;
  169. for_each_action_of_desc(desc, action)
  170. if (action->thread)
  171. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  172. }
  173. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  174. static void irq_validate_effective_affinity(struct irq_data *data)
  175. {
  176. const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
  177. struct irq_chip *chip = irq_data_get_irq_chip(data);
  178. if (!cpumask_empty(m))
  179. return;
  180. pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
  181. chip->name, data->irq);
  182. }
  183. static inline void irq_init_effective_affinity(struct irq_data *data,
  184. const struct cpumask *mask)
  185. {
  186. cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
  187. }
  188. #else
  189. static inline void irq_validate_effective_affinity(struct irq_data *data) { }
  190. static inline void irq_init_effective_affinity(struct irq_data *data,
  191. const struct cpumask *mask) { }
  192. #endif
  193. int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
  194. bool force)
  195. {
  196. struct irq_desc *desc = irq_data_to_desc(data);
  197. struct irq_chip *chip = irq_data_get_irq_chip(data);
  198. int ret;
  199. if (!chip || !chip->irq_set_affinity)
  200. return -EINVAL;
  201. ret = chip->irq_set_affinity(data, mask, force);
  202. switch (ret) {
  203. case IRQ_SET_MASK_OK:
  204. case IRQ_SET_MASK_OK_DONE:
  205. cpumask_copy(desc->irq_common_data.affinity, mask);
  206. case IRQ_SET_MASK_OK_NOCOPY:
  207. irq_validate_effective_affinity(data);
  208. irq_set_thread_affinity(desc);
  209. ret = 0;
  210. }
  211. return ret;
  212. }
  213. #ifdef CONFIG_GENERIC_PENDING_IRQ
  214. static inline int irq_set_affinity_pending(struct irq_data *data,
  215. const struct cpumask *dest)
  216. {
  217. struct irq_desc *desc = irq_data_to_desc(data);
  218. irqd_set_move_pending(data);
  219. irq_copy_pending(desc, dest);
  220. return 0;
  221. }
  222. #else
  223. static inline int irq_set_affinity_pending(struct irq_data *data,
  224. const struct cpumask *dest)
  225. {
  226. return -EBUSY;
  227. }
  228. #endif
  229. static int irq_try_set_affinity(struct irq_data *data,
  230. const struct cpumask *dest, bool force)
  231. {
  232. int ret = irq_do_set_affinity(data, dest, force);
  233. /*
  234. * In case that the underlying vector management is busy and the
  235. * architecture supports the generic pending mechanism then utilize
  236. * this to avoid returning an error to user space.
  237. */
  238. if (ret == -EBUSY && !force)
  239. ret = irq_set_affinity_pending(data, dest);
  240. return ret;
  241. }
  242. static bool irq_set_affinity_deactivated(struct irq_data *data,
  243. const struct cpumask *mask, bool force)
  244. {
  245. struct irq_desc *desc = irq_data_to_desc(data);
  246. /*
  247. * Handle irq chips which can handle affinity only in activated
  248. * state correctly
  249. *
  250. * If the interrupt is not yet activated, just store the affinity
  251. * mask and do not call the chip driver at all. On activation the
  252. * driver has to make sure anyway that the interrupt is in a
  253. * useable state so startup works.
  254. */
  255. if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
  256. irqd_is_activated(data) || !irqd_affinity_on_activate(data))
  257. return false;
  258. cpumask_copy(desc->irq_common_data.affinity, mask);
  259. irq_init_effective_affinity(data, mask);
  260. irqd_set(data, IRQD_AFFINITY_SET);
  261. return true;
  262. }
  263. int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
  264. bool force)
  265. {
  266. struct irq_chip *chip = irq_data_get_irq_chip(data);
  267. struct irq_desc *desc = irq_data_to_desc(data);
  268. int ret = 0;
  269. if (!chip || !chip->irq_set_affinity)
  270. return -EINVAL;
  271. if (irq_set_affinity_deactivated(data, mask, force))
  272. return 0;
  273. if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
  274. ret = irq_try_set_affinity(data, mask, force);
  275. } else {
  276. irqd_set_move_pending(data);
  277. irq_copy_pending(desc, mask);
  278. }
  279. if (desc->affinity_notify) {
  280. kref_get(&desc->affinity_notify->kref);
  281. if (!schedule_work(&desc->affinity_notify->work)) {
  282. /* Work was already scheduled, drop our extra ref */
  283. kref_put(&desc->affinity_notify->kref,
  284. desc->affinity_notify->release);
  285. }
  286. }
  287. irqd_set(data, IRQD_AFFINITY_SET);
  288. return ret;
  289. }
  290. int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
  291. {
  292. struct irq_desc *desc = irq_to_desc(irq);
  293. unsigned long flags;
  294. int ret;
  295. if (!desc)
  296. return -EINVAL;
  297. raw_spin_lock_irqsave(&desc->lock, flags);
  298. ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
  299. raw_spin_unlock_irqrestore(&desc->lock, flags);
  300. return ret;
  301. }
  302. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  303. {
  304. unsigned long flags;
  305. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  306. if (!desc)
  307. return -EINVAL;
  308. desc->affinity_hint = m;
  309. irq_put_desc_unlock(desc, flags);
  310. /* set the initial affinity to prevent every interrupt being on CPU0 */
  311. if (m)
  312. __irq_set_affinity(irq, m, false);
  313. return 0;
  314. }
  315. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  316. static void irq_affinity_notify(struct work_struct *work)
  317. {
  318. struct irq_affinity_notify *notify =
  319. container_of(work, struct irq_affinity_notify, work);
  320. struct irq_desc *desc = irq_to_desc(notify->irq);
  321. cpumask_var_t cpumask;
  322. unsigned long flags;
  323. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  324. goto out;
  325. raw_spin_lock_irqsave(&desc->lock, flags);
  326. if (irq_move_pending(&desc->irq_data))
  327. irq_get_pending(cpumask, desc);
  328. else
  329. cpumask_copy(cpumask, desc->irq_common_data.affinity);
  330. raw_spin_unlock_irqrestore(&desc->lock, flags);
  331. notify->notify(notify, cpumask);
  332. free_cpumask_var(cpumask);
  333. out:
  334. kref_put(&notify->kref, notify->release);
  335. }
  336. /**
  337. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  338. * @irq: Interrupt for which to enable/disable notification
  339. * @notify: Context for notification, or %NULL to disable
  340. * notification. Function pointers must be initialised;
  341. * the other fields will be initialised by this function.
  342. *
  343. * Must be called in process context. Notification may only be enabled
  344. * after the IRQ is allocated and must be disabled before the IRQ is
  345. * freed using free_irq().
  346. */
  347. int
  348. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  349. {
  350. struct irq_desc *desc = irq_to_desc(irq);
  351. struct irq_affinity_notify *old_notify;
  352. unsigned long flags;
  353. /* The release function is promised process context */
  354. might_sleep();
  355. if (!desc)
  356. return -EINVAL;
  357. /* Complete initialisation of *notify */
  358. if (notify) {
  359. notify->irq = irq;
  360. kref_init(&notify->kref);
  361. INIT_WORK(&notify->work, irq_affinity_notify);
  362. }
  363. raw_spin_lock_irqsave(&desc->lock, flags);
  364. old_notify = desc->affinity_notify;
  365. desc->affinity_notify = notify;
  366. raw_spin_unlock_irqrestore(&desc->lock, flags);
  367. if (old_notify) {
  368. if (cancel_work_sync(&old_notify->work)) {
  369. /* Pending work had a ref, put that one too */
  370. kref_put(&old_notify->kref, old_notify->release);
  371. }
  372. kref_put(&old_notify->kref, old_notify->release);
  373. }
  374. return 0;
  375. }
  376. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  377. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  378. /*
  379. * Generic version of the affinity autoselector.
  380. */
  381. int irq_setup_affinity(struct irq_desc *desc)
  382. {
  383. struct cpumask *set = irq_default_affinity;
  384. int ret, node = irq_desc_get_node(desc);
  385. static DEFINE_RAW_SPINLOCK(mask_lock);
  386. static struct cpumask mask;
  387. /* Excludes PER_CPU and NO_BALANCE interrupts */
  388. if (!__irq_can_set_affinity(desc))
  389. return 0;
  390. raw_spin_lock(&mask_lock);
  391. /*
  392. * Preserve the managed affinity setting and a userspace affinity
  393. * setup, but make sure that one of the targets is online.
  394. */
  395. if (irqd_affinity_is_managed(&desc->irq_data) ||
  396. irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  397. if (cpumask_intersects(desc->irq_common_data.affinity,
  398. cpu_online_mask))
  399. set = desc->irq_common_data.affinity;
  400. else
  401. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  402. }
  403. cpumask_and(&mask, cpu_online_mask, set);
  404. if (cpumask_empty(&mask))
  405. cpumask_copy(&mask, cpu_online_mask);
  406. if (node != NUMA_NO_NODE) {
  407. const struct cpumask *nodemask = cpumask_of_node(node);
  408. /* make sure at least one of the cpus in nodemask is online */
  409. if (cpumask_intersects(&mask, nodemask))
  410. cpumask_and(&mask, &mask, nodemask);
  411. }
  412. ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
  413. raw_spin_unlock(&mask_lock);
  414. return ret;
  415. }
  416. #else
  417. /* Wrapper for ALPHA specific affinity selector magic */
  418. int irq_setup_affinity(struct irq_desc *desc)
  419. {
  420. return irq_select_affinity(irq_desc_get_irq(desc));
  421. }
  422. #endif /* CONFIG_AUTO_IRQ_AFFINITY */
  423. #endif /* CONFIG_SMP */
  424. /**
  425. * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
  426. * @irq: interrupt number to set affinity
  427. * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
  428. * specific data for percpu_devid interrupts
  429. *
  430. * This function uses the vCPU specific data to set the vCPU
  431. * affinity for an irq. The vCPU specific data is passed from
  432. * outside, such as KVM. One example code path is as below:
  433. * KVM -> IOMMU -> irq_set_vcpu_affinity().
  434. */
  435. int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
  436. {
  437. unsigned long flags;
  438. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  439. struct irq_data *data;
  440. struct irq_chip *chip;
  441. int ret = -ENOSYS;
  442. if (!desc)
  443. return -EINVAL;
  444. data = irq_desc_get_irq_data(desc);
  445. do {
  446. chip = irq_data_get_irq_chip(data);
  447. if (chip && chip->irq_set_vcpu_affinity)
  448. break;
  449. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  450. data = data->parent_data;
  451. #else
  452. data = NULL;
  453. #endif
  454. } while (data);
  455. if (data)
  456. ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
  457. irq_put_desc_unlock(desc, flags);
  458. return ret;
  459. }
  460. EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
  461. void __disable_irq(struct irq_desc *desc)
  462. {
  463. if (!desc->depth++)
  464. irq_disable(desc);
  465. }
  466. static int __disable_irq_nosync(unsigned int irq)
  467. {
  468. unsigned long flags;
  469. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  470. if (!desc)
  471. return -EINVAL;
  472. __disable_irq(desc);
  473. irq_put_desc_busunlock(desc, flags);
  474. return 0;
  475. }
  476. /**
  477. * disable_irq_nosync - disable an irq without waiting
  478. * @irq: Interrupt to disable
  479. *
  480. * Disable the selected interrupt line. Disables and Enables are
  481. * nested.
  482. * Unlike disable_irq(), this function does not ensure existing
  483. * instances of the IRQ handler have completed before returning.
  484. *
  485. * This function may be called from IRQ context.
  486. */
  487. void disable_irq_nosync(unsigned int irq)
  488. {
  489. __disable_irq_nosync(irq);
  490. }
  491. EXPORT_SYMBOL(disable_irq_nosync);
  492. /**
  493. * disable_irq - disable an irq and wait for completion
  494. * @irq: Interrupt to disable
  495. *
  496. * Disable the selected interrupt line. Enables and Disables are
  497. * nested.
  498. * This function waits for any pending IRQ handlers for this interrupt
  499. * to complete before returning. If you use this function while
  500. * holding a resource the IRQ handler may need you will deadlock.
  501. *
  502. * This function may be called - with care - from IRQ context.
  503. */
  504. void disable_irq(unsigned int irq)
  505. {
  506. if (!__disable_irq_nosync(irq))
  507. synchronize_irq(irq);
  508. }
  509. EXPORT_SYMBOL(disable_irq);
  510. /**
  511. * disable_hardirq - disables an irq and waits for hardirq completion
  512. * @irq: Interrupt to disable
  513. *
  514. * Disable the selected interrupt line. Enables and Disables are
  515. * nested.
  516. * This function waits for any pending hard IRQ handlers for this
  517. * interrupt to complete before returning. If you use this function while
  518. * holding a resource the hard IRQ handler may need you will deadlock.
  519. *
  520. * When used to optimistically disable an interrupt from atomic context
  521. * the return value must be checked.
  522. *
  523. * Returns: false if a threaded handler is active.
  524. *
  525. * This function may be called - with care - from IRQ context.
  526. */
  527. bool disable_hardirq(unsigned int irq)
  528. {
  529. if (!__disable_irq_nosync(irq))
  530. return synchronize_hardirq(irq);
  531. return false;
  532. }
  533. EXPORT_SYMBOL_GPL(disable_hardirq);
  534. void __enable_irq(struct irq_desc *desc)
  535. {
  536. switch (desc->depth) {
  537. case 0:
  538. err_out:
  539. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
  540. irq_desc_get_irq(desc));
  541. break;
  542. case 1: {
  543. if (desc->istate & IRQS_SUSPENDED)
  544. goto err_out;
  545. /* Prevent probing on this irq: */
  546. irq_settings_set_noprobe(desc);
  547. /*
  548. * Call irq_startup() not irq_enable() here because the
  549. * interrupt might be marked NOAUTOEN. So irq_startup()
  550. * needs to be invoked when it gets enabled the first
  551. * time. If it was already started up, then irq_startup()
  552. * will invoke irq_enable() under the hood.
  553. */
  554. irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
  555. break;
  556. }
  557. default:
  558. desc->depth--;
  559. }
  560. }
  561. /**
  562. * enable_irq - enable handling of an irq
  563. * @irq: Interrupt to enable
  564. *
  565. * Undoes the effect of one call to disable_irq(). If this
  566. * matches the last disable, processing of interrupts on this
  567. * IRQ line is re-enabled.
  568. *
  569. * This function may be called from IRQ context only when
  570. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  571. */
  572. void enable_irq(unsigned int irq)
  573. {
  574. unsigned long flags;
  575. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  576. if (!desc)
  577. return;
  578. if (WARN(!desc->irq_data.chip,
  579. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  580. goto out;
  581. __enable_irq(desc);
  582. out:
  583. irq_put_desc_busunlock(desc, flags);
  584. }
  585. EXPORT_SYMBOL(enable_irq);
  586. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  587. {
  588. struct irq_desc *desc = irq_to_desc(irq);
  589. int ret = -ENXIO;
  590. if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
  591. return 0;
  592. if (desc->irq_data.chip->irq_set_wake)
  593. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  594. return ret;
  595. }
  596. /**
  597. * irq_set_irq_wake - control irq power management wakeup
  598. * @irq: interrupt to control
  599. * @on: enable/disable power management wakeup
  600. *
  601. * Enable/disable power management wakeup mode, which is
  602. * disabled by default. Enables and disables must match,
  603. * just as they match for non-wakeup mode support.
  604. *
  605. * Wakeup mode lets this IRQ wake the system from sleep
  606. * states like "suspend to RAM".
  607. */
  608. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  609. {
  610. unsigned long flags;
  611. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  612. int ret = 0;
  613. if (!desc)
  614. return -EINVAL;
  615. /* wakeup-capable irqs can be shared between drivers that
  616. * don't need to have the same sleep mode behaviors.
  617. */
  618. if (on) {
  619. if (desc->wake_depth++ == 0) {
  620. ret = set_irq_wake_real(irq, on);
  621. if (ret)
  622. desc->wake_depth = 0;
  623. else
  624. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  625. }
  626. } else {
  627. if (desc->wake_depth == 0) {
  628. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  629. } else if (--desc->wake_depth == 0) {
  630. ret = set_irq_wake_real(irq, on);
  631. if (ret)
  632. desc->wake_depth = 1;
  633. else
  634. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  635. }
  636. }
  637. irq_put_desc_busunlock(desc, flags);
  638. return ret;
  639. }
  640. EXPORT_SYMBOL(irq_set_irq_wake);
  641. /*
  642. * Internal function that tells the architecture code whether a
  643. * particular irq has been exclusively allocated or is available
  644. * for driver use.
  645. */
  646. int can_request_irq(unsigned int irq, unsigned long irqflags)
  647. {
  648. unsigned long flags;
  649. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  650. int canrequest = 0;
  651. if (!desc)
  652. return 0;
  653. if (irq_settings_can_request(desc)) {
  654. if (!desc->action ||
  655. irqflags & desc->action->flags & IRQF_SHARED)
  656. canrequest = 1;
  657. }
  658. irq_put_desc_unlock(desc, flags);
  659. return canrequest;
  660. }
  661. int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
  662. {
  663. struct irq_chip *chip = desc->irq_data.chip;
  664. int ret, unmask = 0;
  665. if (!chip || !chip->irq_set_type) {
  666. /*
  667. * IRQF_TRIGGER_* but the PIC does not support multiple
  668. * flow-types?
  669. */
  670. pr_debug("No set_type function for IRQ %d (%s)\n",
  671. irq_desc_get_irq(desc),
  672. chip ? (chip->name ? : "unknown") : "unknown");
  673. return 0;
  674. }
  675. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  676. if (!irqd_irq_masked(&desc->irq_data))
  677. mask_irq(desc);
  678. if (!irqd_irq_disabled(&desc->irq_data))
  679. unmask = 1;
  680. }
  681. /* Mask all flags except trigger mode */
  682. flags &= IRQ_TYPE_SENSE_MASK;
  683. ret = chip->irq_set_type(&desc->irq_data, flags);
  684. switch (ret) {
  685. case IRQ_SET_MASK_OK:
  686. case IRQ_SET_MASK_OK_DONE:
  687. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  688. irqd_set(&desc->irq_data, flags);
  689. case IRQ_SET_MASK_OK_NOCOPY:
  690. flags = irqd_get_trigger_type(&desc->irq_data);
  691. irq_settings_set_trigger_mask(desc, flags);
  692. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  693. irq_settings_clr_level(desc);
  694. if (flags & IRQ_TYPE_LEVEL_MASK) {
  695. irq_settings_set_level(desc);
  696. irqd_set(&desc->irq_data, IRQD_LEVEL);
  697. }
  698. ret = 0;
  699. break;
  700. default:
  701. pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
  702. flags, irq_desc_get_irq(desc), chip->irq_set_type);
  703. }
  704. if (unmask)
  705. unmask_irq(desc);
  706. return ret;
  707. }
  708. #ifdef CONFIG_HARDIRQS_SW_RESEND
  709. int irq_set_parent(int irq, int parent_irq)
  710. {
  711. unsigned long flags;
  712. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  713. if (!desc)
  714. return -EINVAL;
  715. desc->parent_irq = parent_irq;
  716. irq_put_desc_unlock(desc, flags);
  717. return 0;
  718. }
  719. EXPORT_SYMBOL_GPL(irq_set_parent);
  720. #endif
  721. /*
  722. * Default primary interrupt handler for threaded interrupts. Is
  723. * assigned as primary handler when request_threaded_irq is called
  724. * with handler == NULL. Useful for oneshot interrupts.
  725. */
  726. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  727. {
  728. return IRQ_WAKE_THREAD;
  729. }
  730. /*
  731. * Primary handler for nested threaded interrupts. Should never be
  732. * called.
  733. */
  734. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  735. {
  736. WARN(1, "Primary handler called for nested irq %d\n", irq);
  737. return IRQ_NONE;
  738. }
  739. static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
  740. {
  741. WARN(1, "Secondary action handler called for irq %d\n", irq);
  742. return IRQ_NONE;
  743. }
  744. static int irq_wait_for_interrupt(struct irqaction *action)
  745. {
  746. for (;;) {
  747. set_current_state(TASK_INTERRUPTIBLE);
  748. if (kthread_should_stop()) {
  749. /* may need to run one last time */
  750. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  751. &action->thread_flags)) {
  752. __set_current_state(TASK_RUNNING);
  753. return 0;
  754. }
  755. __set_current_state(TASK_RUNNING);
  756. return -1;
  757. }
  758. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  759. &action->thread_flags)) {
  760. __set_current_state(TASK_RUNNING);
  761. return 0;
  762. }
  763. schedule();
  764. }
  765. }
  766. /*
  767. * Oneshot interrupts keep the irq line masked until the threaded
  768. * handler finished. unmask if the interrupt has not been disabled and
  769. * is marked MASKED.
  770. */
  771. static void irq_finalize_oneshot(struct irq_desc *desc,
  772. struct irqaction *action)
  773. {
  774. if (!(desc->istate & IRQS_ONESHOT) ||
  775. action->handler == irq_forced_secondary_handler)
  776. return;
  777. again:
  778. chip_bus_lock(desc);
  779. raw_spin_lock_irq(&desc->lock);
  780. /*
  781. * Implausible though it may be we need to protect us against
  782. * the following scenario:
  783. *
  784. * The thread is faster done than the hard interrupt handler
  785. * on the other CPU. If we unmask the irq line then the
  786. * interrupt can come in again and masks the line, leaves due
  787. * to IRQS_INPROGRESS and the irq line is masked forever.
  788. *
  789. * This also serializes the state of shared oneshot handlers
  790. * versus "desc->threads_onehsot |= action->thread_mask;" in
  791. * irq_wake_thread(). See the comment there which explains the
  792. * serialization.
  793. */
  794. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  795. raw_spin_unlock_irq(&desc->lock);
  796. chip_bus_sync_unlock(desc);
  797. cpu_relax();
  798. goto again;
  799. }
  800. /*
  801. * Now check again, whether the thread should run. Otherwise
  802. * we would clear the threads_oneshot bit of this thread which
  803. * was just set.
  804. */
  805. if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  806. goto out_unlock;
  807. desc->threads_oneshot &= ~action->thread_mask;
  808. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  809. irqd_irq_masked(&desc->irq_data))
  810. unmask_threaded_irq(desc);
  811. out_unlock:
  812. raw_spin_unlock_irq(&desc->lock);
  813. chip_bus_sync_unlock(desc);
  814. }
  815. #ifdef CONFIG_SMP
  816. /*
  817. * Check whether we need to change the affinity of the interrupt thread.
  818. */
  819. static void
  820. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  821. {
  822. cpumask_var_t mask;
  823. bool valid = true;
  824. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  825. return;
  826. /*
  827. * In case we are out of memory we set IRQTF_AFFINITY again and
  828. * try again next time
  829. */
  830. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  831. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  832. return;
  833. }
  834. raw_spin_lock_irq(&desc->lock);
  835. /*
  836. * This code is triggered unconditionally. Check the affinity
  837. * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
  838. */
  839. if (cpumask_available(desc->irq_common_data.affinity)) {
  840. const struct cpumask *m;
  841. m = irq_data_get_effective_affinity_mask(&desc->irq_data);
  842. cpumask_copy(mask, m);
  843. } else {
  844. valid = false;
  845. }
  846. raw_spin_unlock_irq(&desc->lock);
  847. if (valid)
  848. set_cpus_allowed_ptr(current, mask);
  849. free_cpumask_var(mask);
  850. }
  851. #else
  852. static inline void
  853. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  854. #endif
  855. /*
  856. * Interrupts which are not explicitely requested as threaded
  857. * interrupts rely on the implicit bh/preempt disable of the hard irq
  858. * context. So we need to disable bh here to avoid deadlocks and other
  859. * side effects.
  860. */
  861. static irqreturn_t
  862. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  863. {
  864. irqreturn_t ret;
  865. local_bh_disable();
  866. if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
  867. local_irq_disable();
  868. ret = action->thread_fn(action->irq, action->dev_id);
  869. if (ret == IRQ_HANDLED)
  870. atomic_inc(&desc->threads_handled);
  871. irq_finalize_oneshot(desc, action);
  872. if (!IS_ENABLED(CONFIG_PREEMPT_RT_BASE))
  873. local_irq_enable();
  874. local_bh_enable();
  875. return ret;
  876. }
  877. /*
  878. * Interrupts explicitly requested as threaded interrupts want to be
  879. * preemtible - many of them need to sleep and wait for slow busses to
  880. * complete.
  881. */
  882. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  883. struct irqaction *action)
  884. {
  885. irqreturn_t ret;
  886. ret = action->thread_fn(action->irq, action->dev_id);
  887. if (ret == IRQ_HANDLED)
  888. atomic_inc(&desc->threads_handled);
  889. irq_finalize_oneshot(desc, action);
  890. return ret;
  891. }
  892. static void wake_threads_waitq(struct irq_desc *desc)
  893. {
  894. if (atomic_dec_and_test(&desc->threads_active))
  895. wake_up(&desc->wait_for_threads);
  896. }
  897. static void irq_thread_dtor(struct callback_head *unused)
  898. {
  899. struct task_struct *tsk = current;
  900. struct irq_desc *desc;
  901. struct irqaction *action;
  902. if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
  903. return;
  904. action = kthread_data(tsk);
  905. pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  906. tsk->comm, tsk->pid, action->irq);
  907. desc = irq_to_desc(action->irq);
  908. /*
  909. * If IRQTF_RUNTHREAD is set, we need to decrement
  910. * desc->threads_active and wake possible waiters.
  911. */
  912. if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  913. wake_threads_waitq(desc);
  914. /* Prevent a stale desc->threads_oneshot */
  915. irq_finalize_oneshot(desc, action);
  916. }
  917. static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
  918. {
  919. struct irqaction *secondary = action->secondary;
  920. if (WARN_ON_ONCE(!secondary))
  921. return;
  922. raw_spin_lock_irq(&desc->lock);
  923. __irq_wake_thread(desc, secondary);
  924. raw_spin_unlock_irq(&desc->lock);
  925. }
  926. /*
  927. * Interrupt handler thread
  928. */
  929. static int irq_thread(void *data)
  930. {
  931. struct callback_head on_exit_work;
  932. struct irqaction *action = data;
  933. struct irq_desc *desc = irq_to_desc(action->irq);
  934. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  935. struct irqaction *action);
  936. if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
  937. &action->thread_flags))
  938. handler_fn = irq_forced_thread_fn;
  939. else
  940. handler_fn = irq_thread_fn;
  941. init_task_work(&on_exit_work, irq_thread_dtor);
  942. task_work_add(current, &on_exit_work, false);
  943. irq_thread_check_affinity(desc, action);
  944. while (!irq_wait_for_interrupt(action)) {
  945. irqreturn_t action_ret;
  946. irq_thread_check_affinity(desc, action);
  947. action_ret = handler_fn(desc, action);
  948. if (action_ret == IRQ_WAKE_THREAD)
  949. irq_wake_secondary(desc, action);
  950. wake_threads_waitq(desc);
  951. }
  952. /*
  953. * This is the regular exit path. __free_irq() is stopping the
  954. * thread via kthread_stop() after calling
  955. * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
  956. * oneshot mask bit can be set.
  957. */
  958. task_work_cancel(current, irq_thread_dtor);
  959. return 0;
  960. }
  961. /**
  962. * irq_wake_thread - wake the irq thread for the action identified by dev_id
  963. * @irq: Interrupt line
  964. * @dev_id: Device identity for which the thread should be woken
  965. *
  966. */
  967. void irq_wake_thread(unsigned int irq, void *dev_id)
  968. {
  969. struct irq_desc *desc = irq_to_desc(irq);
  970. struct irqaction *action;
  971. unsigned long flags;
  972. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  973. return;
  974. raw_spin_lock_irqsave(&desc->lock, flags);
  975. for_each_action_of_desc(desc, action) {
  976. if (action->dev_id == dev_id) {
  977. if (action->thread)
  978. __irq_wake_thread(desc, action);
  979. break;
  980. }
  981. }
  982. raw_spin_unlock_irqrestore(&desc->lock, flags);
  983. }
  984. EXPORT_SYMBOL_GPL(irq_wake_thread);
  985. static int irq_setup_forced_threading(struct irqaction *new)
  986. {
  987. if (!force_irqthreads)
  988. return 0;
  989. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  990. return 0;
  991. /*
  992. * No further action required for interrupts which are requested as
  993. * threaded interrupts already
  994. */
  995. if (new->handler == irq_default_primary_handler)
  996. return 0;
  997. new->flags |= IRQF_ONESHOT;
  998. /*
  999. * Handle the case where we have a real primary handler and a
  1000. * thread handler. We force thread them as well by creating a
  1001. * secondary action.
  1002. */
  1003. if (new->handler && new->thread_fn) {
  1004. /* Allocate the secondary action */
  1005. new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1006. if (!new->secondary)
  1007. return -ENOMEM;
  1008. new->secondary->handler = irq_forced_secondary_handler;
  1009. new->secondary->thread_fn = new->thread_fn;
  1010. new->secondary->dev_id = new->dev_id;
  1011. new->secondary->irq = new->irq;
  1012. new->secondary->name = new->name;
  1013. }
  1014. /* Deal with the primary handler */
  1015. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  1016. new->thread_fn = new->handler;
  1017. new->handler = irq_default_primary_handler;
  1018. return 0;
  1019. }
  1020. static int irq_request_resources(struct irq_desc *desc)
  1021. {
  1022. struct irq_data *d = &desc->irq_data;
  1023. struct irq_chip *c = d->chip;
  1024. return c->irq_request_resources ? c->irq_request_resources(d) : 0;
  1025. }
  1026. static void irq_release_resources(struct irq_desc *desc)
  1027. {
  1028. struct irq_data *d = &desc->irq_data;
  1029. struct irq_chip *c = d->chip;
  1030. if (c->irq_release_resources)
  1031. c->irq_release_resources(d);
  1032. }
  1033. static int
  1034. setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
  1035. {
  1036. struct task_struct *t;
  1037. struct sched_param param = {
  1038. .sched_priority = MAX_USER_RT_PRIO/2,
  1039. };
  1040. if (!secondary) {
  1041. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  1042. new->name);
  1043. } else {
  1044. t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
  1045. new->name);
  1046. param.sched_priority -= 1;
  1047. }
  1048. if (IS_ERR(t))
  1049. return PTR_ERR(t);
  1050. sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
  1051. /*
  1052. * We keep the reference to the task struct even if
  1053. * the thread dies to avoid that the interrupt code
  1054. * references an already freed task_struct.
  1055. */
  1056. get_task_struct(t);
  1057. new->thread = t;
  1058. /*
  1059. * Tell the thread to set its affinity. This is
  1060. * important for shared interrupt handlers as we do
  1061. * not invoke setup_affinity() for the secondary
  1062. * handlers as everything is already set up. Even for
  1063. * interrupts marked with IRQF_NO_BALANCE this is
  1064. * correct as we want the thread to move to the cpu(s)
  1065. * on which the requesting code placed the interrupt.
  1066. */
  1067. set_bit(IRQTF_AFFINITY, &new->thread_flags);
  1068. return 0;
  1069. }
  1070. /*
  1071. * Internal function to register an irqaction - typically used to
  1072. * allocate special interrupts that are part of the architecture.
  1073. *
  1074. * Locking rules:
  1075. *
  1076. * desc->request_mutex Provides serialization against a concurrent free_irq()
  1077. * chip_bus_lock Provides serialization for slow bus operations
  1078. * desc->lock Provides serialization against hard interrupts
  1079. *
  1080. * chip_bus_lock and desc->lock are sufficient for all other management and
  1081. * interrupt related functions. desc->request_mutex solely serializes
  1082. * request/free_irq().
  1083. */
  1084. static int
  1085. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  1086. {
  1087. struct irqaction *old, **old_ptr;
  1088. unsigned long flags, thread_mask = 0;
  1089. int ret, nested, shared = 0;
  1090. if (!desc)
  1091. return -EINVAL;
  1092. if (desc->irq_data.chip == &no_irq_chip)
  1093. return -ENOSYS;
  1094. if (!try_module_get(desc->owner))
  1095. return -ENODEV;
  1096. new->irq = irq;
  1097. /*
  1098. * If the trigger type is not specified by the caller,
  1099. * then use the default for this interrupt.
  1100. */
  1101. if (!(new->flags & IRQF_TRIGGER_MASK))
  1102. new->flags |= irqd_get_trigger_type(&desc->irq_data);
  1103. /*
  1104. * Check whether the interrupt nests into another interrupt
  1105. * thread.
  1106. */
  1107. nested = irq_settings_is_nested_thread(desc);
  1108. if (nested) {
  1109. if (!new->thread_fn) {
  1110. ret = -EINVAL;
  1111. goto out_mput;
  1112. }
  1113. /*
  1114. * Replace the primary handler which was provided from
  1115. * the driver for non nested interrupt handling by the
  1116. * dummy function which warns when called.
  1117. */
  1118. new->handler = irq_nested_primary_handler;
  1119. } else {
  1120. if (irq_settings_can_thread(desc)) {
  1121. ret = irq_setup_forced_threading(new);
  1122. if (ret)
  1123. goto out_mput;
  1124. }
  1125. }
  1126. /*
  1127. * Create a handler thread when a thread function is supplied
  1128. * and the interrupt does not nest into another interrupt
  1129. * thread.
  1130. */
  1131. if (new->thread_fn && !nested) {
  1132. ret = setup_irq_thread(new, irq, false);
  1133. if (ret)
  1134. goto out_mput;
  1135. if (new->secondary) {
  1136. ret = setup_irq_thread(new->secondary, irq, true);
  1137. if (ret)
  1138. goto out_thread;
  1139. }
  1140. }
  1141. /*
  1142. * Drivers are often written to work w/o knowledge about the
  1143. * underlying irq chip implementation, so a request for a
  1144. * threaded irq without a primary hard irq context handler
  1145. * requires the ONESHOT flag to be set. Some irq chips like
  1146. * MSI based interrupts are per se one shot safe. Check the
  1147. * chip flags, so we can avoid the unmask dance at the end of
  1148. * the threaded handler for those.
  1149. */
  1150. if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
  1151. new->flags &= ~IRQF_ONESHOT;
  1152. /*
  1153. * Protects against a concurrent __free_irq() call which might wait
  1154. * for synchronize_hardirq() to complete without holding the optional
  1155. * chip bus lock and desc->lock. Also protects against handing out
  1156. * a recycled oneshot thread_mask bit while it's still in use by
  1157. * its previous owner.
  1158. */
  1159. mutex_lock(&desc->request_mutex);
  1160. /*
  1161. * Acquire bus lock as the irq_request_resources() callback below
  1162. * might rely on the serialization or the magic power management
  1163. * functions which are abusing the irq_bus_lock() callback,
  1164. */
  1165. chip_bus_lock(desc);
  1166. /* First installed action requests resources. */
  1167. if (!desc->action) {
  1168. ret = irq_request_resources(desc);
  1169. if (ret) {
  1170. pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
  1171. new->name, irq, desc->irq_data.chip->name);
  1172. goto out_bus_unlock;
  1173. }
  1174. }
  1175. /*
  1176. * The following block of code has to be executed atomically
  1177. * protected against a concurrent interrupt and any of the other
  1178. * management calls which are not serialized via
  1179. * desc->request_mutex or the optional bus lock.
  1180. */
  1181. raw_spin_lock_irqsave(&desc->lock, flags);
  1182. old_ptr = &desc->action;
  1183. old = *old_ptr;
  1184. if (old) {
  1185. /*
  1186. * Can't share interrupts unless both agree to and are
  1187. * the same type (level, edge, polarity). So both flag
  1188. * fields must have IRQF_SHARED set and the bits which
  1189. * set the trigger type must match. Also all must
  1190. * agree on ONESHOT.
  1191. */
  1192. unsigned int oldtype;
  1193. /*
  1194. * If nobody did set the configuration before, inherit
  1195. * the one provided by the requester.
  1196. */
  1197. if (irqd_trigger_type_was_set(&desc->irq_data)) {
  1198. oldtype = irqd_get_trigger_type(&desc->irq_data);
  1199. } else {
  1200. oldtype = new->flags & IRQF_TRIGGER_MASK;
  1201. irqd_set_trigger_type(&desc->irq_data, oldtype);
  1202. }
  1203. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  1204. (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
  1205. ((old->flags ^ new->flags) & IRQF_ONESHOT))
  1206. goto mismatch;
  1207. /* All handlers must agree on per-cpuness */
  1208. if ((old->flags & IRQF_PERCPU) !=
  1209. (new->flags & IRQF_PERCPU))
  1210. goto mismatch;
  1211. /* add new interrupt at end of irq queue */
  1212. do {
  1213. /*
  1214. * Or all existing action->thread_mask bits,
  1215. * so we can find the next zero bit for this
  1216. * new action.
  1217. */
  1218. thread_mask |= old->thread_mask;
  1219. old_ptr = &old->next;
  1220. old = *old_ptr;
  1221. } while (old);
  1222. shared = 1;
  1223. }
  1224. /*
  1225. * Setup the thread mask for this irqaction for ONESHOT. For
  1226. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  1227. * conditional in irq_wake_thread().
  1228. */
  1229. if (new->flags & IRQF_ONESHOT) {
  1230. /*
  1231. * Unlikely to have 32 resp 64 irqs sharing one line,
  1232. * but who knows.
  1233. */
  1234. if (thread_mask == ~0UL) {
  1235. ret = -EBUSY;
  1236. goto out_unlock;
  1237. }
  1238. /*
  1239. * The thread_mask for the action is or'ed to
  1240. * desc->thread_active to indicate that the
  1241. * IRQF_ONESHOT thread handler has been woken, but not
  1242. * yet finished. The bit is cleared when a thread
  1243. * completes. When all threads of a shared interrupt
  1244. * line have completed desc->threads_active becomes
  1245. * zero and the interrupt line is unmasked. See
  1246. * handle.c:irq_wake_thread() for further information.
  1247. *
  1248. * If no thread is woken by primary (hard irq context)
  1249. * interrupt handlers, then desc->threads_active is
  1250. * also checked for zero to unmask the irq line in the
  1251. * affected hard irq flow handlers
  1252. * (handle_[fasteoi|level]_irq).
  1253. *
  1254. * The new action gets the first zero bit of
  1255. * thread_mask assigned. See the loop above which or's
  1256. * all existing action->thread_mask bits.
  1257. */
  1258. new->thread_mask = 1UL << ffz(thread_mask);
  1259. } else if (new->handler == irq_default_primary_handler &&
  1260. !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
  1261. /*
  1262. * The interrupt was requested with handler = NULL, so
  1263. * we use the default primary handler for it. But it
  1264. * does not have the oneshot flag set. In combination
  1265. * with level interrupts this is deadly, because the
  1266. * default primary handler just wakes the thread, then
  1267. * the irq lines is reenabled, but the device still
  1268. * has the level irq asserted. Rinse and repeat....
  1269. *
  1270. * While this works for edge type interrupts, we play
  1271. * it safe and reject unconditionally because we can't
  1272. * say for sure which type this interrupt really
  1273. * has. The type flags are unreliable as the
  1274. * underlying chip implementation can override them.
  1275. */
  1276. pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
  1277. irq);
  1278. ret = -EINVAL;
  1279. goto out_unlock;
  1280. }
  1281. if (!shared) {
  1282. init_waitqueue_head(&desc->wait_for_threads);
  1283. /* Setup the type (level, edge polarity) if configured: */
  1284. if (new->flags & IRQF_TRIGGER_MASK) {
  1285. ret = __irq_set_trigger(desc,
  1286. new->flags & IRQF_TRIGGER_MASK);
  1287. if (ret)
  1288. goto out_unlock;
  1289. }
  1290. /*
  1291. * Activate the interrupt. That activation must happen
  1292. * independently of IRQ_NOAUTOEN. request_irq() can fail
  1293. * and the callers are supposed to handle
  1294. * that. enable_irq() of an interrupt requested with
  1295. * IRQ_NOAUTOEN is not supposed to fail. The activation
  1296. * keeps it in shutdown mode, it merily associates
  1297. * resources if necessary and if that's not possible it
  1298. * fails. Interrupts which are in managed shutdown mode
  1299. * will simply ignore that activation request.
  1300. */
  1301. ret = irq_activate(desc);
  1302. if (ret)
  1303. goto out_unlock;
  1304. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  1305. IRQS_ONESHOT | IRQS_WAITING);
  1306. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  1307. if (new->flags & IRQF_PERCPU) {
  1308. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  1309. irq_settings_set_per_cpu(desc);
  1310. }
  1311. if (new->flags & IRQF_ONESHOT)
  1312. desc->istate |= IRQS_ONESHOT;
  1313. /* Exclude IRQ from balancing if requested */
  1314. if (new->flags & IRQF_NOBALANCING) {
  1315. irq_settings_set_no_balancing(desc);
  1316. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  1317. }
  1318. if (irq_settings_can_autoenable(desc)) {
  1319. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  1320. } else {
  1321. /*
  1322. * Shared interrupts do not go well with disabling
  1323. * auto enable. The sharing interrupt might request
  1324. * it while it's still disabled and then wait for
  1325. * interrupts forever.
  1326. */
  1327. WARN_ON_ONCE(new->flags & IRQF_SHARED);
  1328. /* Undo nested disables: */
  1329. desc->depth = 1;
  1330. }
  1331. } else if (new->flags & IRQF_TRIGGER_MASK) {
  1332. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  1333. unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
  1334. if (nmsk != omsk)
  1335. /* hope the handler works with current trigger mode */
  1336. pr_warn("irq %d uses trigger mode %u; requested %u\n",
  1337. irq, omsk, nmsk);
  1338. }
  1339. *old_ptr = new;
  1340. irq_pm_install_action(desc, new);
  1341. /* Reset broken irq detection when installing new handler */
  1342. desc->irq_count = 0;
  1343. desc->irqs_unhandled = 0;
  1344. /*
  1345. * Check whether we disabled the irq via the spurious handler
  1346. * before. Reenable it and give it another chance.
  1347. */
  1348. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  1349. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  1350. __enable_irq(desc);
  1351. }
  1352. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1353. chip_bus_sync_unlock(desc);
  1354. mutex_unlock(&desc->request_mutex);
  1355. irq_setup_timings(desc, new);
  1356. /*
  1357. * Strictly no need to wake it up, but hung_task complains
  1358. * when no hard interrupt wakes the thread up.
  1359. */
  1360. if (new->thread)
  1361. wake_up_process(new->thread);
  1362. if (new->secondary)
  1363. wake_up_process(new->secondary->thread);
  1364. register_irq_proc(irq, desc);
  1365. new->dir = NULL;
  1366. register_handler_proc(irq, new);
  1367. return 0;
  1368. mismatch:
  1369. if (!(new->flags & IRQF_PROBE_SHARED)) {
  1370. pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
  1371. irq, new->flags, new->name, old->flags, old->name);
  1372. #ifdef CONFIG_DEBUG_SHIRQ
  1373. dump_stack();
  1374. #endif
  1375. }
  1376. ret = -EBUSY;
  1377. out_unlock:
  1378. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1379. if (!desc->action)
  1380. irq_release_resources(desc);
  1381. out_bus_unlock:
  1382. chip_bus_sync_unlock(desc);
  1383. mutex_unlock(&desc->request_mutex);
  1384. out_thread:
  1385. if (new->thread) {
  1386. struct task_struct *t = new->thread;
  1387. new->thread = NULL;
  1388. kthread_stop(t);
  1389. put_task_struct(t);
  1390. }
  1391. if (new->secondary && new->secondary->thread) {
  1392. struct task_struct *t = new->secondary->thread;
  1393. new->secondary->thread = NULL;
  1394. kthread_stop(t);
  1395. put_task_struct(t);
  1396. }
  1397. out_mput:
  1398. module_put(desc->owner);
  1399. return ret;
  1400. }
  1401. /**
  1402. * setup_irq - setup an interrupt
  1403. * @irq: Interrupt line to setup
  1404. * @act: irqaction for the interrupt
  1405. *
  1406. * Used to statically setup interrupts in the early boot process.
  1407. */
  1408. int setup_irq(unsigned int irq, struct irqaction *act)
  1409. {
  1410. int retval;
  1411. struct irq_desc *desc = irq_to_desc(irq);
  1412. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1413. return -EINVAL;
  1414. retval = irq_chip_pm_get(&desc->irq_data);
  1415. if (retval < 0)
  1416. return retval;
  1417. retval = __setup_irq(irq, desc, act);
  1418. if (retval)
  1419. irq_chip_pm_put(&desc->irq_data);
  1420. return retval;
  1421. }
  1422. EXPORT_SYMBOL_GPL(setup_irq);
  1423. /*
  1424. * Internal function to unregister an irqaction - used to free
  1425. * regular and special interrupts that are part of the architecture.
  1426. */
  1427. static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
  1428. {
  1429. unsigned irq = desc->irq_data.irq;
  1430. struct irqaction *action, **action_ptr;
  1431. unsigned long flags;
  1432. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1433. mutex_lock(&desc->request_mutex);
  1434. chip_bus_lock(desc);
  1435. raw_spin_lock_irqsave(&desc->lock, flags);
  1436. /*
  1437. * There can be multiple actions per IRQ descriptor, find the right
  1438. * one based on the dev_id:
  1439. */
  1440. action_ptr = &desc->action;
  1441. for (;;) {
  1442. action = *action_ptr;
  1443. if (!action) {
  1444. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1445. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1446. chip_bus_sync_unlock(desc);
  1447. mutex_unlock(&desc->request_mutex);
  1448. return NULL;
  1449. }
  1450. if (action->dev_id == dev_id)
  1451. break;
  1452. action_ptr = &action->next;
  1453. }
  1454. /* Found it - now remove it from the list of entries: */
  1455. *action_ptr = action->next;
  1456. irq_pm_remove_action(desc, action);
  1457. /* If this was the last handler, shut down the IRQ line: */
  1458. if (!desc->action) {
  1459. irq_settings_clr_disable_unlazy(desc);
  1460. /* Only shutdown. Deactivate after synchronize_hardirq() */
  1461. irq_shutdown(desc);
  1462. }
  1463. #ifdef CONFIG_SMP
  1464. /* make sure affinity_hint is cleaned up */
  1465. if (WARN_ON_ONCE(desc->affinity_hint))
  1466. desc->affinity_hint = NULL;
  1467. #endif
  1468. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1469. /*
  1470. * Drop bus_lock here so the changes which were done in the chip
  1471. * callbacks above are synced out to the irq chips which hang
  1472. * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
  1473. *
  1474. * Aside of that the bus_lock can also be taken from the threaded
  1475. * handler in irq_finalize_oneshot() which results in a deadlock
  1476. * because kthread_stop() would wait forever for the thread to
  1477. * complete, which is blocked on the bus lock.
  1478. *
  1479. * The still held desc->request_mutex() protects against a
  1480. * concurrent request_irq() of this irq so the release of resources
  1481. * and timing data is properly serialized.
  1482. */
  1483. chip_bus_sync_unlock(desc);
  1484. unregister_handler_proc(irq, action);
  1485. /*
  1486. * Make sure it's not being used on another CPU and if the chip
  1487. * supports it also make sure that there is no (not yet serviced)
  1488. * interrupt in flight at the hardware level.
  1489. */
  1490. __synchronize_hardirq(desc, true);
  1491. #ifdef CONFIG_DEBUG_SHIRQ
  1492. /*
  1493. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1494. * event to happen even now it's being freed, so let's make sure that
  1495. * is so by doing an extra call to the handler ....
  1496. *
  1497. * ( We do this after actually deregistering it, to make sure that a
  1498. * 'real' IRQ doesn't run in parallel with our fake. )
  1499. */
  1500. if (action->flags & IRQF_SHARED) {
  1501. local_irq_save(flags);
  1502. action->handler(irq, dev_id);
  1503. local_irq_restore(flags);
  1504. }
  1505. #endif
  1506. /*
  1507. * The action has already been removed above, but the thread writes
  1508. * its oneshot mask bit when it completes. Though request_mutex is
  1509. * held across this which prevents __setup_irq() from handing out
  1510. * the same bit to a newly requested action.
  1511. */
  1512. if (action->thread) {
  1513. kthread_stop(action->thread);
  1514. put_task_struct(action->thread);
  1515. if (action->secondary && action->secondary->thread) {
  1516. kthread_stop(action->secondary->thread);
  1517. put_task_struct(action->secondary->thread);
  1518. }
  1519. }
  1520. /* Last action releases resources */
  1521. if (!desc->action) {
  1522. /*
  1523. * Reaquire bus lock as irq_release_resources() might
  1524. * require it to deallocate resources over the slow bus.
  1525. */
  1526. chip_bus_lock(desc);
  1527. /*
  1528. * There is no interrupt on the fly anymore. Deactivate it
  1529. * completely.
  1530. */
  1531. raw_spin_lock_irqsave(&desc->lock, flags);
  1532. irq_domain_deactivate_irq(&desc->irq_data);
  1533. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1534. irq_release_resources(desc);
  1535. chip_bus_sync_unlock(desc);
  1536. irq_remove_timings(desc);
  1537. }
  1538. mutex_unlock(&desc->request_mutex);
  1539. irq_chip_pm_put(&desc->irq_data);
  1540. module_put(desc->owner);
  1541. kfree(action->secondary);
  1542. return action;
  1543. }
  1544. /**
  1545. * remove_irq - free an interrupt
  1546. * @irq: Interrupt line to free
  1547. * @act: irqaction for the interrupt
  1548. *
  1549. * Used to remove interrupts statically setup by the early boot process.
  1550. */
  1551. void remove_irq(unsigned int irq, struct irqaction *act)
  1552. {
  1553. struct irq_desc *desc = irq_to_desc(irq);
  1554. if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1555. __free_irq(desc, act->dev_id);
  1556. }
  1557. EXPORT_SYMBOL_GPL(remove_irq);
  1558. /**
  1559. * free_irq - free an interrupt allocated with request_irq
  1560. * @irq: Interrupt line to free
  1561. * @dev_id: Device identity to free
  1562. *
  1563. * Remove an interrupt handler. The handler is removed and if the
  1564. * interrupt line is no longer in use by any driver it is disabled.
  1565. * On a shared IRQ the caller must ensure the interrupt is disabled
  1566. * on the card it drives before calling this function. The function
  1567. * does not return until any executing interrupts for this IRQ
  1568. * have completed.
  1569. *
  1570. * This function must not be called from interrupt context.
  1571. *
  1572. * Returns the devname argument passed to request_irq.
  1573. */
  1574. const void *free_irq(unsigned int irq, void *dev_id)
  1575. {
  1576. struct irq_desc *desc = irq_to_desc(irq);
  1577. struct irqaction *action;
  1578. const char *devname;
  1579. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1580. return NULL;
  1581. #ifdef CONFIG_SMP
  1582. if (WARN_ON(desc->affinity_notify))
  1583. desc->affinity_notify = NULL;
  1584. #endif
  1585. action = __free_irq(desc, dev_id);
  1586. if (!action)
  1587. return NULL;
  1588. devname = action->name;
  1589. kfree(action);
  1590. return devname;
  1591. }
  1592. EXPORT_SYMBOL(free_irq);
  1593. /**
  1594. * request_threaded_irq - allocate an interrupt line
  1595. * @irq: Interrupt line to allocate
  1596. * @handler: Function to be called when the IRQ occurs.
  1597. * Primary handler for threaded interrupts
  1598. * If NULL and thread_fn != NULL the default
  1599. * primary handler is installed
  1600. * @thread_fn: Function called from the irq handler thread
  1601. * If NULL, no irq thread is created
  1602. * @irqflags: Interrupt type flags
  1603. * @devname: An ascii name for the claiming device
  1604. * @dev_id: A cookie passed back to the handler function
  1605. *
  1606. * This call allocates interrupt resources and enables the
  1607. * interrupt line and IRQ handling. From the point this
  1608. * call is made your handler function may be invoked. Since
  1609. * your handler function must clear any interrupt the board
  1610. * raises, you must take care both to initialise your hardware
  1611. * and to set up the interrupt handler in the right order.
  1612. *
  1613. * If you want to set up a threaded irq handler for your device
  1614. * then you need to supply @handler and @thread_fn. @handler is
  1615. * still called in hard interrupt context and has to check
  1616. * whether the interrupt originates from the device. If yes it
  1617. * needs to disable the interrupt on the device and return
  1618. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1619. * @thread_fn. This split handler design is necessary to support
  1620. * shared interrupts.
  1621. *
  1622. * Dev_id must be globally unique. Normally the address of the
  1623. * device data structure is used as the cookie. Since the handler
  1624. * receives this value it makes sense to use it.
  1625. *
  1626. * If your interrupt is shared you must pass a non NULL dev_id
  1627. * as this is required when freeing the interrupt.
  1628. *
  1629. * Flags:
  1630. *
  1631. * IRQF_SHARED Interrupt is shared
  1632. * IRQF_TRIGGER_* Specify active edge(s) or level
  1633. *
  1634. */
  1635. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1636. irq_handler_t thread_fn, unsigned long irqflags,
  1637. const char *devname, void *dev_id)
  1638. {
  1639. struct irqaction *action;
  1640. struct irq_desc *desc;
  1641. int retval;
  1642. if (irq == IRQ_NOTCONNECTED)
  1643. return -ENOTCONN;
  1644. /*
  1645. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1646. * otherwise we'll have trouble later trying to figure out
  1647. * which interrupt is which (messes up the interrupt freeing
  1648. * logic etc).
  1649. *
  1650. * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
  1651. * it cannot be set along with IRQF_NO_SUSPEND.
  1652. */
  1653. if (((irqflags & IRQF_SHARED) && !dev_id) ||
  1654. (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
  1655. ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
  1656. return -EINVAL;
  1657. desc = irq_to_desc(irq);
  1658. if (!desc)
  1659. return -EINVAL;
  1660. if (!irq_settings_can_request(desc) ||
  1661. WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1662. return -EINVAL;
  1663. if (!handler) {
  1664. if (!thread_fn)
  1665. return -EINVAL;
  1666. handler = irq_default_primary_handler;
  1667. }
  1668. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1669. if (!action)
  1670. return -ENOMEM;
  1671. action->handler = handler;
  1672. action->thread_fn = thread_fn;
  1673. action->flags = irqflags;
  1674. action->name = devname;
  1675. action->dev_id = dev_id;
  1676. retval = irq_chip_pm_get(&desc->irq_data);
  1677. if (retval < 0) {
  1678. kfree(action);
  1679. return retval;
  1680. }
  1681. retval = __setup_irq(irq, desc, action);
  1682. if (retval) {
  1683. irq_chip_pm_put(&desc->irq_data);
  1684. kfree(action->secondary);
  1685. kfree(action);
  1686. }
  1687. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1688. if (!retval && (irqflags & IRQF_SHARED)) {
  1689. /*
  1690. * It's a shared IRQ -- the driver ought to be prepared for it
  1691. * to happen immediately, so let's make sure....
  1692. * We disable the irq to make sure that a 'real' IRQ doesn't
  1693. * run in parallel with our fake.
  1694. */
  1695. unsigned long flags;
  1696. disable_irq(irq);
  1697. local_irq_save(flags);
  1698. handler(irq, dev_id);
  1699. local_irq_restore(flags);
  1700. enable_irq(irq);
  1701. }
  1702. #endif
  1703. return retval;
  1704. }
  1705. EXPORT_SYMBOL(request_threaded_irq);
  1706. /**
  1707. * request_any_context_irq - allocate an interrupt line
  1708. * @irq: Interrupt line to allocate
  1709. * @handler: Function to be called when the IRQ occurs.
  1710. * Threaded handler for threaded interrupts.
  1711. * @flags: Interrupt type flags
  1712. * @name: An ascii name for the claiming device
  1713. * @dev_id: A cookie passed back to the handler function
  1714. *
  1715. * This call allocates interrupt resources and enables the
  1716. * interrupt line and IRQ handling. It selects either a
  1717. * hardirq or threaded handling method depending on the
  1718. * context.
  1719. *
  1720. * On failure, it returns a negative value. On success,
  1721. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1722. */
  1723. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1724. unsigned long flags, const char *name, void *dev_id)
  1725. {
  1726. struct irq_desc *desc;
  1727. int ret;
  1728. if (irq == IRQ_NOTCONNECTED)
  1729. return -ENOTCONN;
  1730. desc = irq_to_desc(irq);
  1731. if (!desc)
  1732. return -EINVAL;
  1733. if (irq_settings_is_nested_thread(desc)) {
  1734. ret = request_threaded_irq(irq, NULL, handler,
  1735. flags, name, dev_id);
  1736. return !ret ? IRQC_IS_NESTED : ret;
  1737. }
  1738. ret = request_irq(irq, handler, flags, name, dev_id);
  1739. return !ret ? IRQC_IS_HARDIRQ : ret;
  1740. }
  1741. EXPORT_SYMBOL_GPL(request_any_context_irq);
  1742. void enable_percpu_irq(unsigned int irq, unsigned int type)
  1743. {
  1744. unsigned int cpu = smp_processor_id();
  1745. unsigned long flags;
  1746. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1747. if (!desc)
  1748. return;
  1749. /*
  1750. * If the trigger type is not specified by the caller, then
  1751. * use the default for this interrupt.
  1752. */
  1753. type &= IRQ_TYPE_SENSE_MASK;
  1754. if (type == IRQ_TYPE_NONE)
  1755. type = irqd_get_trigger_type(&desc->irq_data);
  1756. if (type != IRQ_TYPE_NONE) {
  1757. int ret;
  1758. ret = __irq_set_trigger(desc, type);
  1759. if (ret) {
  1760. WARN(1, "failed to set type for IRQ%d\n", irq);
  1761. goto out;
  1762. }
  1763. }
  1764. irq_percpu_enable(desc, cpu);
  1765. out:
  1766. irq_put_desc_unlock(desc, flags);
  1767. }
  1768. EXPORT_SYMBOL_GPL(enable_percpu_irq);
  1769. /**
  1770. * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
  1771. * @irq: Linux irq number to check for
  1772. *
  1773. * Must be called from a non migratable context. Returns the enable
  1774. * state of a per cpu interrupt on the current cpu.
  1775. */
  1776. bool irq_percpu_is_enabled(unsigned int irq)
  1777. {
  1778. unsigned int cpu = smp_processor_id();
  1779. struct irq_desc *desc;
  1780. unsigned long flags;
  1781. bool is_enabled;
  1782. desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1783. if (!desc)
  1784. return false;
  1785. is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
  1786. irq_put_desc_unlock(desc, flags);
  1787. return is_enabled;
  1788. }
  1789. EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
  1790. void disable_percpu_irq(unsigned int irq)
  1791. {
  1792. unsigned int cpu = smp_processor_id();
  1793. unsigned long flags;
  1794. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1795. if (!desc)
  1796. return;
  1797. irq_percpu_disable(desc, cpu);
  1798. irq_put_desc_unlock(desc, flags);
  1799. }
  1800. EXPORT_SYMBOL_GPL(disable_percpu_irq);
  1801. /*
  1802. * Internal function to unregister a percpu irqaction.
  1803. */
  1804. static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1805. {
  1806. struct irq_desc *desc = irq_to_desc(irq);
  1807. struct irqaction *action;
  1808. unsigned long flags;
  1809. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1810. if (!desc)
  1811. return NULL;
  1812. raw_spin_lock_irqsave(&desc->lock, flags);
  1813. action = desc->action;
  1814. if (!action || action->percpu_dev_id != dev_id) {
  1815. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1816. goto bad;
  1817. }
  1818. if (!cpumask_empty(desc->percpu_enabled)) {
  1819. WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
  1820. irq, cpumask_first(desc->percpu_enabled));
  1821. goto bad;
  1822. }
  1823. /* Found it - now remove it from the list of entries: */
  1824. desc->action = NULL;
  1825. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1826. unregister_handler_proc(irq, action);
  1827. irq_chip_pm_put(&desc->irq_data);
  1828. module_put(desc->owner);
  1829. return action;
  1830. bad:
  1831. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1832. return NULL;
  1833. }
  1834. /**
  1835. * remove_percpu_irq - free a per-cpu interrupt
  1836. * @irq: Interrupt line to free
  1837. * @act: irqaction for the interrupt
  1838. *
  1839. * Used to remove interrupts statically setup by the early boot process.
  1840. */
  1841. void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  1842. {
  1843. struct irq_desc *desc = irq_to_desc(irq);
  1844. if (desc && irq_settings_is_per_cpu_devid(desc))
  1845. __free_percpu_irq(irq, act->percpu_dev_id);
  1846. }
  1847. /**
  1848. * free_percpu_irq - free an interrupt allocated with request_percpu_irq
  1849. * @irq: Interrupt line to free
  1850. * @dev_id: Device identity to free
  1851. *
  1852. * Remove a percpu interrupt handler. The handler is removed, but
  1853. * the interrupt line is not disabled. This must be done on each
  1854. * CPU before calling this function. The function does not return
  1855. * until any executing interrupts for this IRQ have completed.
  1856. *
  1857. * This function must not be called from interrupt context.
  1858. */
  1859. void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1860. {
  1861. struct irq_desc *desc = irq_to_desc(irq);
  1862. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1863. return;
  1864. chip_bus_lock(desc);
  1865. kfree(__free_percpu_irq(irq, dev_id));
  1866. chip_bus_sync_unlock(desc);
  1867. }
  1868. EXPORT_SYMBOL_GPL(free_percpu_irq);
  1869. /**
  1870. * setup_percpu_irq - setup a per-cpu interrupt
  1871. * @irq: Interrupt line to setup
  1872. * @act: irqaction for the interrupt
  1873. *
  1874. * Used to statically setup per-cpu interrupts in the early boot process.
  1875. */
  1876. int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  1877. {
  1878. struct irq_desc *desc = irq_to_desc(irq);
  1879. int retval;
  1880. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1881. return -EINVAL;
  1882. retval = irq_chip_pm_get(&desc->irq_data);
  1883. if (retval < 0)
  1884. return retval;
  1885. retval = __setup_irq(irq, desc, act);
  1886. if (retval)
  1887. irq_chip_pm_put(&desc->irq_data);
  1888. return retval;
  1889. }
  1890. /**
  1891. * __request_percpu_irq - allocate a percpu interrupt line
  1892. * @irq: Interrupt line to allocate
  1893. * @handler: Function to be called when the IRQ occurs.
  1894. * @flags: Interrupt type flags (IRQF_TIMER only)
  1895. * @devname: An ascii name for the claiming device
  1896. * @dev_id: A percpu cookie passed back to the handler function
  1897. *
  1898. * This call allocates interrupt resources and enables the
  1899. * interrupt on the local CPU. If the interrupt is supposed to be
  1900. * enabled on other CPUs, it has to be done on each CPU using
  1901. * enable_percpu_irq().
  1902. *
  1903. * Dev_id must be globally unique. It is a per-cpu variable, and
  1904. * the handler gets called with the interrupted CPU's instance of
  1905. * that variable.
  1906. */
  1907. int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
  1908. unsigned long flags, const char *devname,
  1909. void __percpu *dev_id)
  1910. {
  1911. struct irqaction *action;
  1912. struct irq_desc *desc;
  1913. int retval;
  1914. if (!dev_id)
  1915. return -EINVAL;
  1916. desc = irq_to_desc(irq);
  1917. if (!desc || !irq_settings_can_request(desc) ||
  1918. !irq_settings_is_per_cpu_devid(desc))
  1919. return -EINVAL;
  1920. if (flags && flags != IRQF_TIMER)
  1921. return -EINVAL;
  1922. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1923. if (!action)
  1924. return -ENOMEM;
  1925. action->handler = handler;
  1926. action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
  1927. action->name = devname;
  1928. action->percpu_dev_id = dev_id;
  1929. retval = irq_chip_pm_get(&desc->irq_data);
  1930. if (retval < 0) {
  1931. kfree(action);
  1932. return retval;
  1933. }
  1934. retval = __setup_irq(irq, desc, action);
  1935. if (retval) {
  1936. irq_chip_pm_put(&desc->irq_data);
  1937. kfree(action);
  1938. }
  1939. return retval;
  1940. }
  1941. EXPORT_SYMBOL_GPL(__request_percpu_irq);
  1942. int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
  1943. bool *state)
  1944. {
  1945. struct irq_chip *chip;
  1946. int err = -EINVAL;
  1947. do {
  1948. chip = irq_data_get_irq_chip(data);
  1949. if (chip->irq_get_irqchip_state)
  1950. break;
  1951. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1952. data = data->parent_data;
  1953. #else
  1954. data = NULL;
  1955. #endif
  1956. } while (data);
  1957. if (data)
  1958. err = chip->irq_get_irqchip_state(data, which, state);
  1959. return err;
  1960. }
  1961. /**
  1962. * irq_get_irqchip_state - returns the irqchip state of a interrupt.
  1963. * @irq: Interrupt line that is forwarded to a VM
  1964. * @which: One of IRQCHIP_STATE_* the caller wants to know about
  1965. * @state: a pointer to a boolean where the state is to be storeed
  1966. *
  1967. * This call snapshots the internal irqchip state of an
  1968. * interrupt, returning into @state the bit corresponding to
  1969. * stage @which
  1970. *
  1971. * This function should be called with preemption disabled if the
  1972. * interrupt controller has per-cpu registers.
  1973. */
  1974. int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  1975. bool *state)
  1976. {
  1977. struct irq_desc *desc;
  1978. struct irq_data *data;
  1979. unsigned long flags;
  1980. int err = -EINVAL;
  1981. desc = irq_get_desc_buslock(irq, &flags, 0);
  1982. if (!desc)
  1983. return err;
  1984. data = irq_desc_get_irq_data(desc);
  1985. err = __irq_get_irqchip_state(data, which, state);
  1986. irq_put_desc_busunlock(desc, flags);
  1987. return err;
  1988. }
  1989. EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
  1990. /**
  1991. * irq_set_irqchip_state - set the state of a forwarded interrupt.
  1992. * @irq: Interrupt line that is forwarded to a VM
  1993. * @which: State to be restored (one of IRQCHIP_STATE_*)
  1994. * @val: Value corresponding to @which
  1995. *
  1996. * This call sets the internal irqchip state of an interrupt,
  1997. * depending on the value of @which.
  1998. *
  1999. * This function should be called with preemption disabled if the
  2000. * interrupt controller has per-cpu registers.
  2001. */
  2002. int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  2003. bool val)
  2004. {
  2005. struct irq_desc *desc;
  2006. struct irq_data *data;
  2007. struct irq_chip *chip;
  2008. unsigned long flags;
  2009. int err = -EINVAL;
  2010. desc = irq_get_desc_buslock(irq, &flags, 0);
  2011. if (!desc)
  2012. return err;
  2013. data = irq_desc_get_irq_data(desc);
  2014. do {
  2015. chip = irq_data_get_irq_chip(data);
  2016. if (chip->irq_set_irqchip_state)
  2017. break;
  2018. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  2019. data = data->parent_data;
  2020. #else
  2021. data = NULL;
  2022. #endif
  2023. } while (data);
  2024. if (data)
  2025. err = chip->irq_set_irqchip_state(data, which, val);
  2026. irq_put_desc_busunlock(desc, flags);
  2027. return err;
  2028. }
  2029. EXPORT_SYMBOL_GPL(irq_set_irqchip_state);