events_base.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159
  1. /*
  2. * Xen event channels
  3. *
  4. * Xen models interrupts with abstract event channels. Because each
  5. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  6. * must dynamically map irqs<->event channels. The event channels
  7. * interface with the rest of the kernel by defining a xen interrupt
  8. * chip. When an event is received, it is mapped to an irq and sent
  9. * through the normal interrupt processing path.
  10. *
  11. * There are four kinds of events which can be mapped to an event
  12. * channel:
  13. *
  14. * 1. Inter-domain notifications. This includes all the virtual
  15. * device events, since they're driven by front-ends in another domain
  16. * (typically dom0).
  17. * 2. VIRQs, typically used for timers. These are per-cpu events.
  18. * 3. IPIs.
  19. * 4. PIRQs - Hardware interrupts.
  20. *
  21. * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  22. */
  23. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  24. #include <linux/linkage.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/irq.h>
  27. #include <linux/moduleparam.h>
  28. #include <linux/string.h>
  29. #include <linux/bootmem.h>
  30. #include <linux/slab.h>
  31. #include <linux/irqnr.h>
  32. #include <linux/pci.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/cpuhotplug.h>
  35. #include <linux/atomic.h>
  36. #include <linux/ktime.h>
  37. #ifdef CONFIG_X86
  38. #include <asm/desc.h>
  39. #include <asm/ptrace.h>
  40. #include <asm/irq.h>
  41. #include <asm/io_apic.h>
  42. #include <asm/i8259.h>
  43. #include <asm/xen/pci.h>
  44. #endif
  45. #include <asm/sync_bitops.h>
  46. #include <asm/xen/hypercall.h>
  47. #include <asm/xen/hypervisor.h>
  48. #include <xen/page.h>
  49. #include <xen/xen.h>
  50. #include <xen/hvm.h>
  51. #include <xen/xen-ops.h>
  52. #include <xen/events.h>
  53. #include <xen/interface/xen.h>
  54. #include <xen/interface/event_channel.h>
  55. #include <xen/interface/hvm/hvm_op.h>
  56. #include <xen/interface/hvm/params.h>
  57. #include <xen/interface/physdev.h>
  58. #include <xen/interface/sched.h>
  59. #include <xen/interface/vcpu.h>
  60. #include <asm/hw_irq.h>
  61. #include "events_internal.h"
  62. #undef MODULE_PARAM_PREFIX
  63. #define MODULE_PARAM_PREFIX "xen."
  64. static uint __read_mostly event_loop_timeout = 2;
  65. module_param(event_loop_timeout, uint, 0644);
  66. static uint __read_mostly event_eoi_delay = 10;
  67. module_param(event_eoi_delay, uint, 0644);
  68. const struct evtchn_ops *evtchn_ops;
  69. /*
  70. * This lock protects updates to the following mapping and reference-count
  71. * arrays. The lock does not need to be acquired to read the mapping tables.
  72. */
  73. static DEFINE_MUTEX(irq_mapping_update_lock);
  74. /*
  75. * Lock protecting event handling loop against removing event channels.
  76. * Adding of event channels is no issue as the associated IRQ becomes active
  77. * only after everything is setup (before request_[threaded_]irq() the handler
  78. * can't be entered for an event, as the event channel will be unmasked only
  79. * then).
  80. */
  81. static DEFINE_RWLOCK(evtchn_rwlock);
  82. /*
  83. * Lock hierarchy:
  84. *
  85. * irq_mapping_update_lock
  86. * evtchn_rwlock
  87. * IRQ-desc lock
  88. * percpu eoi_list_lock
  89. * irq_info->lock
  90. */
  91. static LIST_HEAD(xen_irq_list_head);
  92. /* IRQ <-> VIRQ mapping. */
  93. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  94. /* IRQ <-> IPI mapping */
  95. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  96. int **evtchn_to_irq;
  97. #ifdef CONFIG_X86
  98. static unsigned long *pirq_eoi_map;
  99. #endif
  100. static bool (*pirq_needs_eoi)(unsigned irq);
  101. #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  102. #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  103. #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
  104. /* Xen will never allocate port zero for any purpose. */
  105. #define VALID_EVTCHN(chn) ((chn) != 0)
  106. static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
  107. static struct irq_chip xen_dynamic_chip;
  108. static struct irq_chip xen_lateeoi_chip;
  109. static struct irq_chip xen_percpu_chip;
  110. static struct irq_chip xen_pirq_chip;
  111. static void enable_dynirq(struct irq_data *data);
  112. static void disable_dynirq(struct irq_data *data);
  113. static DEFINE_PER_CPU(unsigned int, irq_epoch);
  114. static void clear_evtchn_to_irq_row(unsigned row)
  115. {
  116. unsigned col;
  117. for (col = 0; col < EVTCHN_PER_ROW; col++)
  118. WRITE_ONCE(evtchn_to_irq[row][col], -1);
  119. }
  120. static void clear_evtchn_to_irq_all(void)
  121. {
  122. unsigned row;
  123. for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
  124. if (evtchn_to_irq[row] == NULL)
  125. continue;
  126. clear_evtchn_to_irq_row(row);
  127. }
  128. }
  129. static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
  130. {
  131. unsigned row;
  132. unsigned col;
  133. if (evtchn >= xen_evtchn_max_channels())
  134. return -EINVAL;
  135. row = EVTCHN_ROW(evtchn);
  136. col = EVTCHN_COL(evtchn);
  137. if (evtchn_to_irq[row] == NULL) {
  138. /* Unallocated irq entries return -1 anyway */
  139. if (irq == -1)
  140. return 0;
  141. evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
  142. if (evtchn_to_irq[row] == NULL)
  143. return -ENOMEM;
  144. clear_evtchn_to_irq_row(row);
  145. }
  146. WRITE_ONCE(evtchn_to_irq[row][col], irq);
  147. return 0;
  148. }
  149. int get_evtchn_to_irq(unsigned evtchn)
  150. {
  151. if (evtchn >= xen_evtchn_max_channels())
  152. return -1;
  153. if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
  154. return -1;
  155. return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
  156. }
  157. /* Get info for IRQ */
  158. struct irq_info *info_for_irq(unsigned irq)
  159. {
  160. if (irq < nr_legacy_irqs())
  161. return legacy_info_ptrs[irq];
  162. else
  163. return irq_get_chip_data(irq);
  164. }
  165. static void set_info_for_irq(unsigned int irq, struct irq_info *info)
  166. {
  167. if (irq < nr_legacy_irqs())
  168. legacy_info_ptrs[irq] = info;
  169. else
  170. irq_set_chip_data(irq, info);
  171. }
  172. /* Constructors for packed IRQ information. */
  173. static int xen_irq_info_common_setup(struct irq_info *info,
  174. unsigned irq,
  175. enum xen_irq_type type,
  176. unsigned evtchn,
  177. unsigned short cpu)
  178. {
  179. int ret;
  180. BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
  181. info->type = type;
  182. info->irq = irq;
  183. info->evtchn = evtchn;
  184. info->cpu = cpu;
  185. info->mask_reason = EVT_MASK_REASON_EXPLICIT;
  186. raw_spin_lock_init(&info->lock);
  187. ret = set_evtchn_to_irq(evtchn, irq);
  188. if (ret < 0)
  189. return ret;
  190. irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
  191. return xen_evtchn_port_setup(info);
  192. }
  193. static int xen_irq_info_evtchn_setup(unsigned irq,
  194. unsigned evtchn)
  195. {
  196. struct irq_info *info = info_for_irq(irq);
  197. return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
  198. }
  199. static int xen_irq_info_ipi_setup(unsigned cpu,
  200. unsigned irq,
  201. unsigned evtchn,
  202. enum ipi_vector ipi)
  203. {
  204. struct irq_info *info = info_for_irq(irq);
  205. info->u.ipi = ipi;
  206. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  207. return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
  208. }
  209. static int xen_irq_info_virq_setup(unsigned cpu,
  210. unsigned irq,
  211. unsigned evtchn,
  212. unsigned virq)
  213. {
  214. struct irq_info *info = info_for_irq(irq);
  215. info->u.virq = virq;
  216. per_cpu(virq_to_irq, cpu)[virq] = irq;
  217. return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
  218. }
  219. static int xen_irq_info_pirq_setup(unsigned irq,
  220. unsigned evtchn,
  221. unsigned pirq,
  222. unsigned gsi,
  223. uint16_t domid,
  224. unsigned char flags)
  225. {
  226. struct irq_info *info = info_for_irq(irq);
  227. info->u.pirq.pirq = pirq;
  228. info->u.pirq.gsi = gsi;
  229. info->u.pirq.domid = domid;
  230. info->u.pirq.flags = flags;
  231. return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
  232. }
  233. static void xen_irq_info_cleanup(struct irq_info *info)
  234. {
  235. set_evtchn_to_irq(info->evtchn, -1);
  236. xen_evtchn_port_remove(info->evtchn, info->cpu);
  237. info->evtchn = 0;
  238. }
  239. /*
  240. * Accessors for packed IRQ information.
  241. */
  242. unsigned int evtchn_from_irq(unsigned irq)
  243. {
  244. const struct irq_info *info = NULL;
  245. if (likely(irq < nr_irqs))
  246. info = info_for_irq(irq);
  247. if (!info)
  248. return 0;
  249. return info->evtchn;
  250. }
  251. unsigned irq_from_evtchn(unsigned int evtchn)
  252. {
  253. return get_evtchn_to_irq(evtchn);
  254. }
  255. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  256. int irq_from_virq(unsigned int cpu, unsigned int virq)
  257. {
  258. return per_cpu(virq_to_irq, cpu)[virq];
  259. }
  260. static enum ipi_vector ipi_from_irq(unsigned irq)
  261. {
  262. struct irq_info *info = info_for_irq(irq);
  263. BUG_ON(info == NULL);
  264. BUG_ON(info->type != IRQT_IPI);
  265. return info->u.ipi;
  266. }
  267. static unsigned virq_from_irq(unsigned irq)
  268. {
  269. struct irq_info *info = info_for_irq(irq);
  270. BUG_ON(info == NULL);
  271. BUG_ON(info->type != IRQT_VIRQ);
  272. return info->u.virq;
  273. }
  274. static unsigned pirq_from_irq(unsigned irq)
  275. {
  276. struct irq_info *info = info_for_irq(irq);
  277. BUG_ON(info == NULL);
  278. BUG_ON(info->type != IRQT_PIRQ);
  279. return info->u.pirq.pirq;
  280. }
  281. static enum xen_irq_type type_from_irq(unsigned irq)
  282. {
  283. return info_for_irq(irq)->type;
  284. }
  285. unsigned cpu_from_irq(unsigned irq)
  286. {
  287. return info_for_irq(irq)->cpu;
  288. }
  289. unsigned int cpu_from_evtchn(unsigned int evtchn)
  290. {
  291. int irq = get_evtchn_to_irq(evtchn);
  292. unsigned ret = 0;
  293. if (irq != -1)
  294. ret = cpu_from_irq(irq);
  295. return ret;
  296. }
  297. static void do_mask(struct irq_info *info, u8 reason)
  298. {
  299. unsigned long flags;
  300. raw_spin_lock_irqsave(&info->lock, flags);
  301. if (!info->mask_reason)
  302. mask_evtchn(info->evtchn);
  303. info->mask_reason |= reason;
  304. raw_spin_unlock_irqrestore(&info->lock, flags);
  305. }
  306. static void do_unmask(struct irq_info *info, u8 reason)
  307. {
  308. unsigned long flags;
  309. raw_spin_lock_irqsave(&info->lock, flags);
  310. info->mask_reason &= ~reason;
  311. if (!info->mask_reason)
  312. unmask_evtchn(info->evtchn);
  313. raw_spin_unlock_irqrestore(&info->lock, flags);
  314. }
  315. #ifdef CONFIG_X86
  316. static bool pirq_check_eoi_map(unsigned irq)
  317. {
  318. return test_bit(pirq_from_irq(irq), pirq_eoi_map);
  319. }
  320. #endif
  321. static bool pirq_needs_eoi_flag(unsigned irq)
  322. {
  323. struct irq_info *info = info_for_irq(irq);
  324. BUG_ON(info->type != IRQT_PIRQ);
  325. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  326. }
  327. static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  328. {
  329. int irq = get_evtchn_to_irq(chn);
  330. struct irq_info *info = info_for_irq(irq);
  331. BUG_ON(irq == -1);
  332. #ifdef CONFIG_SMP
  333. cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
  334. #endif
  335. xen_evtchn_port_bind_to_cpu(info, cpu);
  336. info->cpu = cpu;
  337. }
  338. /**
  339. * notify_remote_via_irq - send event to remote end of event channel via irq
  340. * @irq: irq of event channel to send event to
  341. *
  342. * Unlike notify_remote_via_evtchn(), this is safe to use across
  343. * save/restore. Notifications on a broken connection are silently
  344. * dropped.
  345. */
  346. void notify_remote_via_irq(int irq)
  347. {
  348. int evtchn = evtchn_from_irq(irq);
  349. if (VALID_EVTCHN(evtchn))
  350. notify_remote_via_evtchn(evtchn);
  351. }
  352. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  353. struct lateeoi_work {
  354. struct delayed_work delayed;
  355. spinlock_t eoi_list_lock;
  356. struct list_head eoi_list;
  357. };
  358. static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
  359. static void lateeoi_list_del(struct irq_info *info)
  360. {
  361. struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
  362. unsigned long flags;
  363. spin_lock_irqsave(&eoi->eoi_list_lock, flags);
  364. list_del_init(&info->eoi_list);
  365. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  366. }
  367. static void lateeoi_list_add(struct irq_info *info)
  368. {
  369. struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
  370. struct irq_info *elem;
  371. u64 now = get_jiffies_64();
  372. unsigned long delay;
  373. unsigned long flags;
  374. if (now < info->eoi_time)
  375. delay = info->eoi_time - now;
  376. else
  377. delay = 1;
  378. spin_lock_irqsave(&eoi->eoi_list_lock, flags);
  379. if (list_empty(&eoi->eoi_list)) {
  380. list_add(&info->eoi_list, &eoi->eoi_list);
  381. mod_delayed_work_on(info->eoi_cpu, system_wq,
  382. &eoi->delayed, delay);
  383. } else {
  384. list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
  385. if (elem->eoi_time <= info->eoi_time)
  386. break;
  387. }
  388. list_add(&info->eoi_list, &elem->eoi_list);
  389. }
  390. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  391. }
  392. static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
  393. {
  394. evtchn_port_t evtchn;
  395. unsigned int cpu;
  396. unsigned int delay = 0;
  397. evtchn = info->evtchn;
  398. if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
  399. return;
  400. if (spurious) {
  401. if ((1 << info->spurious_cnt) < (HZ << 2))
  402. info->spurious_cnt++;
  403. if (info->spurious_cnt > 1) {
  404. delay = 1 << (info->spurious_cnt - 2);
  405. if (delay > HZ)
  406. delay = HZ;
  407. if (!info->eoi_time)
  408. info->eoi_cpu = smp_processor_id();
  409. info->eoi_time = get_jiffies_64() + delay;
  410. }
  411. } else {
  412. info->spurious_cnt = 0;
  413. }
  414. cpu = info->eoi_cpu;
  415. if (info->eoi_time &&
  416. (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
  417. lateeoi_list_add(info);
  418. return;
  419. }
  420. info->eoi_time = 0;
  421. do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
  422. }
  423. static void xen_irq_lateeoi_worker(struct work_struct *work)
  424. {
  425. struct lateeoi_work *eoi;
  426. struct irq_info *info;
  427. u64 now = get_jiffies_64();
  428. unsigned long flags;
  429. eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
  430. read_lock_irqsave(&evtchn_rwlock, flags);
  431. while (true) {
  432. spin_lock(&eoi->eoi_list_lock);
  433. info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
  434. eoi_list);
  435. if (info == NULL || now < info->eoi_time) {
  436. spin_unlock(&eoi->eoi_list_lock);
  437. break;
  438. }
  439. list_del_init(&info->eoi_list);
  440. spin_unlock(&eoi->eoi_list_lock);
  441. info->eoi_time = 0;
  442. xen_irq_lateeoi_locked(info, false);
  443. }
  444. if (info)
  445. mod_delayed_work_on(info->eoi_cpu, system_wq,
  446. &eoi->delayed, info->eoi_time - now);
  447. read_unlock_irqrestore(&evtchn_rwlock, flags);
  448. }
  449. static void xen_cpu_init_eoi(unsigned int cpu)
  450. {
  451. struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
  452. INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
  453. spin_lock_init(&eoi->eoi_list_lock);
  454. INIT_LIST_HEAD(&eoi->eoi_list);
  455. }
  456. void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
  457. {
  458. struct irq_info *info;
  459. unsigned long flags;
  460. read_lock_irqsave(&evtchn_rwlock, flags);
  461. info = info_for_irq(irq);
  462. if (info)
  463. xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
  464. read_unlock_irqrestore(&evtchn_rwlock, flags);
  465. }
  466. EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
  467. static void xen_irq_init(unsigned irq)
  468. {
  469. struct irq_info *info;
  470. #ifdef CONFIG_SMP
  471. /* By default all event channels notify CPU#0. */
  472. cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
  473. #endif
  474. info = kzalloc(sizeof(*info), GFP_KERNEL);
  475. if (info == NULL)
  476. panic("Unable to allocate metadata for IRQ%d\n", irq);
  477. info->type = IRQT_UNBOUND;
  478. info->refcnt = -1;
  479. set_info_for_irq(irq, info);
  480. INIT_LIST_HEAD(&info->eoi_list);
  481. list_add_tail(&info->list, &xen_irq_list_head);
  482. }
  483. static int __must_check xen_allocate_irqs_dynamic(int nvec)
  484. {
  485. int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
  486. if (irq >= 0) {
  487. for (i = 0; i < nvec; i++)
  488. xen_irq_init(irq + i);
  489. }
  490. return irq;
  491. }
  492. static inline int __must_check xen_allocate_irq_dynamic(void)
  493. {
  494. return xen_allocate_irqs_dynamic(1);
  495. }
  496. static int __must_check xen_allocate_irq_gsi(unsigned gsi)
  497. {
  498. int irq;
  499. /*
  500. * A PV guest has no concept of a GSI (since it has no ACPI
  501. * nor access to/knowledge of the physical APICs). Therefore
  502. * all IRQs are dynamically allocated from the entire IRQ
  503. * space.
  504. */
  505. if (xen_pv_domain() && !xen_initial_domain())
  506. return xen_allocate_irq_dynamic();
  507. /* Legacy IRQ descriptors are already allocated by the arch. */
  508. if (gsi < nr_legacy_irqs())
  509. irq = gsi;
  510. else
  511. irq = irq_alloc_desc_at(gsi, -1);
  512. xen_irq_init(irq);
  513. return irq;
  514. }
  515. static void xen_free_irq(unsigned irq)
  516. {
  517. struct irq_info *info = info_for_irq(irq);
  518. unsigned long flags;
  519. if (WARN_ON(!info))
  520. return;
  521. write_lock_irqsave(&evtchn_rwlock, flags);
  522. if (!list_empty(&info->eoi_list))
  523. lateeoi_list_del(info);
  524. list_del(&info->list);
  525. set_info_for_irq(irq, NULL);
  526. WARN_ON(info->refcnt > 0);
  527. write_unlock_irqrestore(&evtchn_rwlock, flags);
  528. kfree(info);
  529. /* Legacy IRQ descriptors are managed by the arch. */
  530. if (irq < nr_legacy_irqs())
  531. return;
  532. irq_free_desc(irq);
  533. }
  534. static void xen_evtchn_close(unsigned int port)
  535. {
  536. struct evtchn_close close;
  537. close.port = port;
  538. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  539. BUG();
  540. }
  541. static void event_handler_exit(struct irq_info *info)
  542. {
  543. smp_store_release(&info->is_active, 0);
  544. clear_evtchn(info->evtchn);
  545. }
  546. static void pirq_query_unmask(int irq)
  547. {
  548. struct physdev_irq_status_query irq_status;
  549. struct irq_info *info = info_for_irq(irq);
  550. BUG_ON(info->type != IRQT_PIRQ);
  551. irq_status.irq = pirq_from_irq(irq);
  552. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  553. irq_status.flags = 0;
  554. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  555. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  556. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  557. }
  558. static void eoi_pirq(struct irq_data *data)
  559. {
  560. struct irq_info *info = info_for_irq(data->irq);
  561. int evtchn = info ? info->evtchn : 0;
  562. struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
  563. int rc = 0;
  564. if (!VALID_EVTCHN(evtchn))
  565. return;
  566. if (unlikely(irqd_is_setaffinity_pending(data)) &&
  567. likely(!irqd_irq_disabled(data))) {
  568. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  569. event_handler_exit(info);
  570. irq_move_masked_irq(data);
  571. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  572. } else
  573. event_handler_exit(info);
  574. if (pirq_needs_eoi(data->irq)) {
  575. rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  576. WARN_ON(rc);
  577. }
  578. }
  579. static void mask_ack_pirq(struct irq_data *data)
  580. {
  581. disable_dynirq(data);
  582. eoi_pirq(data);
  583. }
  584. static unsigned int __startup_pirq(unsigned int irq)
  585. {
  586. struct evtchn_bind_pirq bind_pirq;
  587. struct irq_info *info = info_for_irq(irq);
  588. int evtchn = evtchn_from_irq(irq);
  589. int rc;
  590. BUG_ON(info->type != IRQT_PIRQ);
  591. if (VALID_EVTCHN(evtchn))
  592. goto out;
  593. bind_pirq.pirq = pirq_from_irq(irq);
  594. /* NB. We are happy to share unless we are probing. */
  595. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  596. BIND_PIRQ__WILL_SHARE : 0;
  597. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  598. if (rc != 0) {
  599. pr_warn("Failed to obtain physical IRQ %d\n", irq);
  600. return 0;
  601. }
  602. evtchn = bind_pirq.port;
  603. pirq_query_unmask(irq);
  604. rc = set_evtchn_to_irq(evtchn, irq);
  605. if (rc)
  606. goto err;
  607. info->evtchn = evtchn;
  608. bind_evtchn_to_cpu(evtchn, 0);
  609. rc = xen_evtchn_port_setup(info);
  610. if (rc)
  611. goto err;
  612. out:
  613. do_unmask(info, EVT_MASK_REASON_EXPLICIT);
  614. eoi_pirq(irq_get_irq_data(irq));
  615. return 0;
  616. err:
  617. pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
  618. xen_evtchn_close(evtchn);
  619. return 0;
  620. }
  621. static unsigned int startup_pirq(struct irq_data *data)
  622. {
  623. return __startup_pirq(data->irq);
  624. }
  625. static void shutdown_pirq(struct irq_data *data)
  626. {
  627. unsigned int irq = data->irq;
  628. struct irq_info *info = info_for_irq(irq);
  629. unsigned evtchn = evtchn_from_irq(irq);
  630. BUG_ON(info->type != IRQT_PIRQ);
  631. if (!VALID_EVTCHN(evtchn))
  632. return;
  633. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  634. xen_evtchn_close(evtchn);
  635. xen_irq_info_cleanup(info);
  636. }
  637. static void enable_pirq(struct irq_data *data)
  638. {
  639. enable_dynirq(data);
  640. }
  641. static void disable_pirq(struct irq_data *data)
  642. {
  643. disable_dynirq(data);
  644. }
  645. int xen_irq_from_gsi(unsigned gsi)
  646. {
  647. struct irq_info *info;
  648. list_for_each_entry(info, &xen_irq_list_head, list) {
  649. if (info->type != IRQT_PIRQ)
  650. continue;
  651. if (info->u.pirq.gsi == gsi)
  652. return info->irq;
  653. }
  654. return -1;
  655. }
  656. EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
  657. static void __unbind_from_irq(unsigned int irq)
  658. {
  659. int evtchn = evtchn_from_irq(irq);
  660. struct irq_info *info = info_for_irq(irq);
  661. if (info->refcnt > 0) {
  662. info->refcnt--;
  663. if (info->refcnt != 0)
  664. return;
  665. }
  666. if (VALID_EVTCHN(evtchn)) {
  667. unsigned int cpu = cpu_from_irq(irq);
  668. xen_evtchn_close(evtchn);
  669. switch (type_from_irq(irq)) {
  670. case IRQT_VIRQ:
  671. per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
  672. break;
  673. case IRQT_IPI:
  674. per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
  675. break;
  676. default:
  677. break;
  678. }
  679. xen_irq_info_cleanup(info);
  680. }
  681. xen_free_irq(irq);
  682. }
  683. /*
  684. * Do not make any assumptions regarding the relationship between the
  685. * IRQ number returned here and the Xen pirq argument.
  686. *
  687. * Note: We don't assign an event channel until the irq actually started
  688. * up. Return an existing irq if we've already got one for the gsi.
  689. *
  690. * Shareable implies level triggered, not shareable implies edge
  691. * triggered here.
  692. */
  693. int xen_bind_pirq_gsi_to_irq(unsigned gsi,
  694. unsigned pirq, int shareable, char *name)
  695. {
  696. int irq = -1;
  697. struct physdev_irq irq_op;
  698. int ret;
  699. mutex_lock(&irq_mapping_update_lock);
  700. irq = xen_irq_from_gsi(gsi);
  701. if (irq != -1) {
  702. pr_info("%s: returning irq %d for gsi %u\n",
  703. __func__, irq, gsi);
  704. goto out;
  705. }
  706. irq = xen_allocate_irq_gsi(gsi);
  707. if (irq < 0)
  708. goto out;
  709. irq_op.irq = irq;
  710. irq_op.vector = 0;
  711. /* Only the privileged domain can do this. For non-priv, the pcifront
  712. * driver provides a PCI bus that does the call to do exactly
  713. * this in the priv domain. */
  714. if (xen_initial_domain() &&
  715. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  716. xen_free_irq(irq);
  717. irq = -ENOSPC;
  718. goto out;
  719. }
  720. ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
  721. shareable ? PIRQ_SHAREABLE : 0);
  722. if (ret < 0) {
  723. __unbind_from_irq(irq);
  724. irq = ret;
  725. goto out;
  726. }
  727. pirq_query_unmask(irq);
  728. /* We try to use the handler with the appropriate semantic for the
  729. * type of interrupt: if the interrupt is an edge triggered
  730. * interrupt we use handle_edge_irq.
  731. *
  732. * On the other hand if the interrupt is level triggered we use
  733. * handle_fasteoi_irq like the native code does for this kind of
  734. * interrupts.
  735. *
  736. * Depending on the Xen version, pirq_needs_eoi might return true
  737. * not only for level triggered interrupts but for edge triggered
  738. * interrupts too. In any case Xen always honors the eoi mechanism,
  739. * not injecting any more pirqs of the same kind if the first one
  740. * hasn't received an eoi yet. Therefore using the fasteoi handler
  741. * is the right choice either way.
  742. */
  743. if (shareable)
  744. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  745. handle_fasteoi_irq, name);
  746. else
  747. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  748. handle_edge_irq, name);
  749. out:
  750. mutex_unlock(&irq_mapping_update_lock);
  751. return irq;
  752. }
  753. #ifdef CONFIG_PCI_MSI
  754. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  755. {
  756. int rc;
  757. struct physdev_get_free_pirq op_get_free_pirq;
  758. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  759. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  760. WARN_ONCE(rc == -ENOSYS,
  761. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  762. return rc ? -1 : op_get_free_pirq.pirq;
  763. }
  764. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  765. int pirq, int nvec, const char *name, domid_t domid)
  766. {
  767. int i, irq, ret;
  768. mutex_lock(&irq_mapping_update_lock);
  769. irq = xen_allocate_irqs_dynamic(nvec);
  770. if (irq < 0)
  771. goto out;
  772. for (i = 0; i < nvec; i++) {
  773. irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
  774. ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
  775. i == 0 ? 0 : PIRQ_MSI_GROUP);
  776. if (ret < 0)
  777. goto error_irq;
  778. }
  779. ret = irq_set_msi_desc(irq, msidesc);
  780. if (ret < 0)
  781. goto error_irq;
  782. out:
  783. mutex_unlock(&irq_mapping_update_lock);
  784. return irq;
  785. error_irq:
  786. while (nvec--)
  787. __unbind_from_irq(irq + nvec);
  788. mutex_unlock(&irq_mapping_update_lock);
  789. return ret;
  790. }
  791. #endif
  792. int xen_destroy_irq(int irq)
  793. {
  794. struct physdev_unmap_pirq unmap_irq;
  795. struct irq_info *info = info_for_irq(irq);
  796. int rc = -ENOENT;
  797. mutex_lock(&irq_mapping_update_lock);
  798. /*
  799. * If trying to remove a vector in a MSI group different
  800. * than the first one skip the PIRQ unmap unless this vector
  801. * is the first one in the group.
  802. */
  803. if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
  804. unmap_irq.pirq = info->u.pirq.pirq;
  805. unmap_irq.domid = info->u.pirq.domid;
  806. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  807. /* If another domain quits without making the pci_disable_msix
  808. * call, the Xen hypervisor takes care of freeing the PIRQs
  809. * (free_domain_pirqs).
  810. */
  811. if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
  812. pr_info("domain %d does not have %d anymore\n",
  813. info->u.pirq.domid, info->u.pirq.pirq);
  814. else if (rc) {
  815. pr_warn("unmap irq failed %d\n", rc);
  816. goto out;
  817. }
  818. }
  819. xen_free_irq(irq);
  820. out:
  821. mutex_unlock(&irq_mapping_update_lock);
  822. return rc;
  823. }
  824. int xen_irq_from_pirq(unsigned pirq)
  825. {
  826. int irq;
  827. struct irq_info *info;
  828. mutex_lock(&irq_mapping_update_lock);
  829. list_for_each_entry(info, &xen_irq_list_head, list) {
  830. if (info->type != IRQT_PIRQ)
  831. continue;
  832. irq = info->irq;
  833. if (info->u.pirq.pirq == pirq)
  834. goto out;
  835. }
  836. irq = -1;
  837. out:
  838. mutex_unlock(&irq_mapping_update_lock);
  839. return irq;
  840. }
  841. int xen_pirq_from_irq(unsigned irq)
  842. {
  843. return pirq_from_irq(irq);
  844. }
  845. EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
  846. static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
  847. {
  848. int irq;
  849. int ret;
  850. if (evtchn >= xen_evtchn_max_channels())
  851. return -ENOMEM;
  852. mutex_lock(&irq_mapping_update_lock);
  853. irq = get_evtchn_to_irq(evtchn);
  854. if (irq == -1) {
  855. irq = xen_allocate_irq_dynamic();
  856. if (irq < 0)
  857. goto out;
  858. irq_set_chip_and_handler_name(irq, chip,
  859. handle_edge_irq, "event");
  860. ret = xen_irq_info_evtchn_setup(irq, evtchn);
  861. if (ret < 0) {
  862. __unbind_from_irq(irq);
  863. irq = ret;
  864. goto out;
  865. }
  866. /* New interdomain events are bound to VCPU 0. */
  867. bind_evtchn_to_cpu(evtchn, 0);
  868. } else {
  869. struct irq_info *info = info_for_irq(irq);
  870. WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
  871. }
  872. out:
  873. mutex_unlock(&irq_mapping_update_lock);
  874. return irq;
  875. }
  876. int bind_evtchn_to_irq(evtchn_port_t evtchn)
  877. {
  878. return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip);
  879. }
  880. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  881. int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
  882. {
  883. return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
  884. }
  885. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
  886. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  887. {
  888. struct evtchn_bind_ipi bind_ipi;
  889. int evtchn, irq;
  890. int ret;
  891. mutex_lock(&irq_mapping_update_lock);
  892. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  893. if (irq == -1) {
  894. irq = xen_allocate_irq_dynamic();
  895. if (irq < 0)
  896. goto out;
  897. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  898. handle_percpu_irq, "ipi");
  899. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  900. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  901. &bind_ipi) != 0)
  902. BUG();
  903. evtchn = bind_ipi.port;
  904. ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  905. if (ret < 0) {
  906. __unbind_from_irq(irq);
  907. irq = ret;
  908. goto out;
  909. }
  910. bind_evtchn_to_cpu(evtchn, cpu);
  911. } else {
  912. struct irq_info *info = info_for_irq(irq);
  913. WARN_ON(info == NULL || info->type != IRQT_IPI);
  914. }
  915. out:
  916. mutex_unlock(&irq_mapping_update_lock);
  917. return irq;
  918. }
  919. static int bind_interdomain_evtchn_to_irq_chip(unsigned int remote_domain,
  920. evtchn_port_t remote_port,
  921. struct irq_chip *chip)
  922. {
  923. struct evtchn_bind_interdomain bind_interdomain;
  924. int err;
  925. bind_interdomain.remote_dom = remote_domain;
  926. bind_interdomain.remote_port = remote_port;
  927. err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
  928. &bind_interdomain);
  929. return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
  930. chip);
  931. }
  932. int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
  933. evtchn_port_t remote_port)
  934. {
  935. return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
  936. &xen_dynamic_chip);
  937. }
  938. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
  939. int bind_interdomain_evtchn_to_irq_lateeoi(unsigned int remote_domain,
  940. evtchn_port_t remote_port)
  941. {
  942. return bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
  943. &xen_lateeoi_chip);
  944. }
  945. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
  946. static int find_virq(unsigned int virq, unsigned int cpu)
  947. {
  948. struct evtchn_status status;
  949. int port, rc = -ENOENT;
  950. memset(&status, 0, sizeof(status));
  951. for (port = 0; port < xen_evtchn_max_channels(); port++) {
  952. status.dom = DOMID_SELF;
  953. status.port = port;
  954. rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
  955. if (rc < 0)
  956. continue;
  957. if (status.status != EVTCHNSTAT_virq)
  958. continue;
  959. if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
  960. rc = port;
  961. break;
  962. }
  963. }
  964. return rc;
  965. }
  966. /**
  967. * xen_evtchn_nr_channels - number of usable event channel ports
  968. *
  969. * This may be less than the maximum supported by the current
  970. * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
  971. * supported.
  972. */
  973. unsigned xen_evtchn_nr_channels(void)
  974. {
  975. return evtchn_ops->nr_channels();
  976. }
  977. EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
  978. int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
  979. {
  980. struct evtchn_bind_virq bind_virq;
  981. int evtchn, irq, ret;
  982. mutex_lock(&irq_mapping_update_lock);
  983. irq = per_cpu(virq_to_irq, cpu)[virq];
  984. if (irq == -1) {
  985. irq = xen_allocate_irq_dynamic();
  986. if (irq < 0)
  987. goto out;
  988. if (percpu)
  989. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  990. handle_percpu_irq, "virq");
  991. else
  992. irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
  993. handle_edge_irq, "virq");
  994. bind_virq.virq = virq;
  995. bind_virq.vcpu = xen_vcpu_nr(cpu);
  996. ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  997. &bind_virq);
  998. if (ret == 0)
  999. evtchn = bind_virq.port;
  1000. else {
  1001. if (ret == -EEXIST)
  1002. ret = find_virq(virq, cpu);
  1003. BUG_ON(ret < 0);
  1004. evtchn = ret;
  1005. }
  1006. ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  1007. if (ret < 0) {
  1008. __unbind_from_irq(irq);
  1009. irq = ret;
  1010. goto out;
  1011. }
  1012. bind_evtchn_to_cpu(evtchn, cpu);
  1013. } else {
  1014. struct irq_info *info = info_for_irq(irq);
  1015. WARN_ON(info == NULL || info->type != IRQT_VIRQ);
  1016. }
  1017. out:
  1018. mutex_unlock(&irq_mapping_update_lock);
  1019. return irq;
  1020. }
  1021. static void unbind_from_irq(unsigned int irq)
  1022. {
  1023. mutex_lock(&irq_mapping_update_lock);
  1024. __unbind_from_irq(irq);
  1025. mutex_unlock(&irq_mapping_update_lock);
  1026. }
  1027. static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
  1028. irq_handler_t handler,
  1029. unsigned long irqflags,
  1030. const char *devname, void *dev_id,
  1031. struct irq_chip *chip)
  1032. {
  1033. int irq, retval;
  1034. irq = bind_evtchn_to_irq_chip(evtchn, chip);
  1035. if (irq < 0)
  1036. return irq;
  1037. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1038. if (retval != 0) {
  1039. unbind_from_irq(irq);
  1040. return retval;
  1041. }
  1042. return irq;
  1043. }
  1044. int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
  1045. irq_handler_t handler,
  1046. unsigned long irqflags,
  1047. const char *devname, void *dev_id)
  1048. {
  1049. return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
  1050. devname, dev_id,
  1051. &xen_dynamic_chip);
  1052. }
  1053. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  1054. int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
  1055. irq_handler_t handler,
  1056. unsigned long irqflags,
  1057. const char *devname, void *dev_id)
  1058. {
  1059. return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
  1060. devname, dev_id,
  1061. &xen_lateeoi_chip);
  1062. }
  1063. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
  1064. static int bind_interdomain_evtchn_to_irqhandler_chip(
  1065. unsigned int remote_domain, evtchn_port_t remote_port,
  1066. irq_handler_t handler, unsigned long irqflags,
  1067. const char *devname, void *dev_id, struct irq_chip *chip)
  1068. {
  1069. int irq, retval;
  1070. irq = bind_interdomain_evtchn_to_irq_chip(remote_domain, remote_port,
  1071. chip);
  1072. if (irq < 0)
  1073. return irq;
  1074. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1075. if (retval != 0) {
  1076. unbind_from_irq(irq);
  1077. return retval;
  1078. }
  1079. return irq;
  1080. }
  1081. int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
  1082. evtchn_port_t remote_port,
  1083. irq_handler_t handler,
  1084. unsigned long irqflags,
  1085. const char *devname,
  1086. void *dev_id)
  1087. {
  1088. return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
  1089. remote_port, handler, irqflags, devname,
  1090. dev_id, &xen_dynamic_chip);
  1091. }
  1092. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
  1093. int bind_interdomain_evtchn_to_irqhandler_lateeoi(unsigned int remote_domain,
  1094. evtchn_port_t remote_port,
  1095. irq_handler_t handler,
  1096. unsigned long irqflags,
  1097. const char *devname,
  1098. void *dev_id)
  1099. {
  1100. return bind_interdomain_evtchn_to_irqhandler_chip(remote_domain,
  1101. remote_port, handler, irqflags, devname,
  1102. dev_id, &xen_lateeoi_chip);
  1103. }
  1104. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
  1105. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  1106. irq_handler_t handler,
  1107. unsigned long irqflags, const char *devname, void *dev_id)
  1108. {
  1109. int irq, retval;
  1110. irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
  1111. if (irq < 0)
  1112. return irq;
  1113. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1114. if (retval != 0) {
  1115. unbind_from_irq(irq);
  1116. return retval;
  1117. }
  1118. return irq;
  1119. }
  1120. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  1121. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  1122. unsigned int cpu,
  1123. irq_handler_t handler,
  1124. unsigned long irqflags,
  1125. const char *devname,
  1126. void *dev_id)
  1127. {
  1128. int irq, retval;
  1129. irq = bind_ipi_to_irq(ipi, cpu);
  1130. if (irq < 0)
  1131. return irq;
  1132. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
  1133. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1134. if (retval != 0) {
  1135. unbind_from_irq(irq);
  1136. return retval;
  1137. }
  1138. return irq;
  1139. }
  1140. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  1141. {
  1142. struct irq_info *info = info_for_irq(irq);
  1143. if (WARN_ON(!info))
  1144. return;
  1145. free_irq(irq, dev_id);
  1146. unbind_from_irq(irq);
  1147. }
  1148. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  1149. /**
  1150. * xen_set_irq_priority() - set an event channel priority.
  1151. * @irq:irq bound to an event channel.
  1152. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
  1153. */
  1154. int xen_set_irq_priority(unsigned irq, unsigned priority)
  1155. {
  1156. struct evtchn_set_priority set_priority;
  1157. set_priority.port = evtchn_from_irq(irq);
  1158. set_priority.priority = priority;
  1159. return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
  1160. &set_priority);
  1161. }
  1162. EXPORT_SYMBOL_GPL(xen_set_irq_priority);
  1163. int evtchn_make_refcounted(unsigned int evtchn)
  1164. {
  1165. int irq = get_evtchn_to_irq(evtchn);
  1166. struct irq_info *info;
  1167. if (irq == -1)
  1168. return -ENOENT;
  1169. info = info_for_irq(irq);
  1170. if (!info)
  1171. return -ENOENT;
  1172. WARN_ON(info->refcnt != -1);
  1173. info->refcnt = 1;
  1174. return 0;
  1175. }
  1176. EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
  1177. int evtchn_get(unsigned int evtchn)
  1178. {
  1179. int irq;
  1180. struct irq_info *info;
  1181. int err = -ENOENT;
  1182. if (evtchn >= xen_evtchn_max_channels())
  1183. return -EINVAL;
  1184. mutex_lock(&irq_mapping_update_lock);
  1185. irq = get_evtchn_to_irq(evtchn);
  1186. if (irq == -1)
  1187. goto done;
  1188. info = info_for_irq(irq);
  1189. if (!info)
  1190. goto done;
  1191. err = -EINVAL;
  1192. if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
  1193. goto done;
  1194. info->refcnt++;
  1195. err = 0;
  1196. done:
  1197. mutex_unlock(&irq_mapping_update_lock);
  1198. return err;
  1199. }
  1200. EXPORT_SYMBOL_GPL(evtchn_get);
  1201. void evtchn_put(unsigned int evtchn)
  1202. {
  1203. int irq = get_evtchn_to_irq(evtchn);
  1204. if (WARN_ON(irq == -1))
  1205. return;
  1206. unbind_from_irq(irq);
  1207. }
  1208. EXPORT_SYMBOL_GPL(evtchn_put);
  1209. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  1210. {
  1211. int irq;
  1212. #ifdef CONFIG_X86
  1213. if (unlikely(vector == XEN_NMI_VECTOR)) {
  1214. int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
  1215. NULL);
  1216. if (rc < 0)
  1217. printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
  1218. return;
  1219. }
  1220. #endif
  1221. irq = per_cpu(ipi_to_irq, cpu)[vector];
  1222. BUG_ON(irq < 0);
  1223. notify_remote_via_irq(irq);
  1224. }
  1225. struct evtchn_loop_ctrl {
  1226. ktime_t timeout;
  1227. unsigned count;
  1228. bool defer_eoi;
  1229. };
  1230. void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
  1231. {
  1232. int irq;
  1233. struct irq_info *info;
  1234. irq = get_evtchn_to_irq(port);
  1235. if (irq == -1)
  1236. return;
  1237. /*
  1238. * Check for timeout every 256 events.
  1239. * We are setting the timeout value only after the first 256
  1240. * events in order to not hurt the common case of few loop
  1241. * iterations. The 256 is basically an arbitrary value.
  1242. *
  1243. * In case we are hitting the timeout we need to defer all further
  1244. * EOIs in order to ensure to leave the event handling loop rather
  1245. * sooner than later.
  1246. */
  1247. if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
  1248. ktime_t kt = ktime_get();
  1249. if (!ctrl->timeout) {
  1250. kt = ktime_add_ms(kt,
  1251. jiffies_to_msecs(event_loop_timeout));
  1252. ctrl->timeout = kt;
  1253. } else if (kt > ctrl->timeout) {
  1254. ctrl->defer_eoi = true;
  1255. }
  1256. }
  1257. info = info_for_irq(irq);
  1258. if (xchg_acquire(&info->is_active, 1))
  1259. return;
  1260. if (ctrl->defer_eoi) {
  1261. info->eoi_cpu = smp_processor_id();
  1262. info->irq_epoch = __this_cpu_read(irq_epoch);
  1263. info->eoi_time = get_jiffies_64() + event_eoi_delay;
  1264. }
  1265. generic_handle_irq(irq);
  1266. }
  1267. static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  1268. static void __xen_evtchn_do_upcall(void)
  1269. {
  1270. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  1271. int cpu = get_cpu();
  1272. unsigned count;
  1273. struct evtchn_loop_ctrl ctrl = { 0 };
  1274. read_lock(&evtchn_rwlock);
  1275. do {
  1276. vcpu_info->evtchn_upcall_pending = 0;
  1277. if (__this_cpu_inc_return(xed_nesting_count) - 1)
  1278. goto out;
  1279. xen_evtchn_handle_events(cpu, &ctrl);
  1280. BUG_ON(!irqs_disabled());
  1281. count = __this_cpu_read(xed_nesting_count);
  1282. __this_cpu_write(xed_nesting_count, 0);
  1283. } while (count != 1 || vcpu_info->evtchn_upcall_pending);
  1284. out:
  1285. read_unlock(&evtchn_rwlock);
  1286. /*
  1287. * Increment irq_epoch only now to defer EOIs only for
  1288. * xen_irq_lateeoi() invocations occurring from inside the loop
  1289. * above.
  1290. */
  1291. __this_cpu_inc(irq_epoch);
  1292. put_cpu();
  1293. }
  1294. void xen_evtchn_do_upcall(struct pt_regs *regs)
  1295. {
  1296. struct pt_regs *old_regs = set_irq_regs(regs);
  1297. irq_enter();
  1298. #ifdef CONFIG_X86
  1299. inc_irq_stat(irq_hv_callback_count);
  1300. #endif
  1301. __xen_evtchn_do_upcall();
  1302. irq_exit();
  1303. set_irq_regs(old_regs);
  1304. }
  1305. void xen_hvm_evtchn_do_upcall(void)
  1306. {
  1307. __xen_evtchn_do_upcall();
  1308. }
  1309. EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  1310. /* Rebind a new event channel to an existing irq. */
  1311. void rebind_evtchn_irq(int evtchn, int irq)
  1312. {
  1313. struct irq_info *info = info_for_irq(irq);
  1314. if (WARN_ON(!info))
  1315. return;
  1316. /* Make sure the irq is masked, since the new event channel
  1317. will also be masked. */
  1318. disable_irq(irq);
  1319. mutex_lock(&irq_mapping_update_lock);
  1320. /* After resume the irq<->evtchn mappings are all cleared out */
  1321. BUG_ON(get_evtchn_to_irq(evtchn) != -1);
  1322. /* Expect irq to have been bound before,
  1323. so there should be a proper type */
  1324. BUG_ON(info->type == IRQT_UNBOUND);
  1325. (void)xen_irq_info_evtchn_setup(irq, evtchn);
  1326. mutex_unlock(&irq_mapping_update_lock);
  1327. bind_evtchn_to_cpu(evtchn, info->cpu);
  1328. /* This will be deferred until interrupt is processed */
  1329. irq_set_affinity(irq, cpumask_of(info->cpu));
  1330. /* Unmask the event channel. */
  1331. enable_irq(irq);
  1332. }
  1333. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  1334. static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
  1335. {
  1336. struct evtchn_bind_vcpu bind_vcpu;
  1337. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1338. if (!VALID_EVTCHN(evtchn))
  1339. return -1;
  1340. if (!xen_support_evtchn_rebind())
  1341. return -1;
  1342. /* Send future instances of this interrupt to other vcpu. */
  1343. bind_vcpu.port = evtchn;
  1344. bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
  1345. /*
  1346. * Mask the event while changing the VCPU binding to prevent
  1347. * it being delivered on an unexpected VCPU.
  1348. */
  1349. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  1350. /*
  1351. * If this fails, it usually just indicates that we're dealing with a
  1352. * virq or IPI channel, which don't actually need to be rebound. Ignore
  1353. * it, but don't do the xenlinux-level rebind in that case.
  1354. */
  1355. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  1356. bind_evtchn_to_cpu(evtchn, tcpu);
  1357. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  1358. return 0;
  1359. }
  1360. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  1361. bool force)
  1362. {
  1363. unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
  1364. int ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
  1365. if (!ret)
  1366. irq_data_update_effective_affinity(data, cpumask_of(tcpu));
  1367. return ret;
  1368. }
  1369. /* To be called with desc->lock held. */
  1370. int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
  1371. {
  1372. struct irq_data *d = irq_desc_get_irq_data(desc);
  1373. return set_affinity_irq(d, cpumask_of(tcpu), false);
  1374. }
  1375. EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
  1376. static void enable_dynirq(struct irq_data *data)
  1377. {
  1378. struct irq_info *info = info_for_irq(data->irq);
  1379. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1380. if (VALID_EVTCHN(evtchn))
  1381. do_unmask(info, EVT_MASK_REASON_EXPLICIT);
  1382. }
  1383. static void disable_dynirq(struct irq_data *data)
  1384. {
  1385. struct irq_info *info = info_for_irq(data->irq);
  1386. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1387. if (VALID_EVTCHN(evtchn))
  1388. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  1389. }
  1390. static void ack_dynirq(struct irq_data *data)
  1391. {
  1392. struct irq_info *info = info_for_irq(data->irq);
  1393. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1394. if (!VALID_EVTCHN(evtchn))
  1395. return;
  1396. if (unlikely(irqd_is_setaffinity_pending(data)) &&
  1397. likely(!irqd_irq_disabled(data))) {
  1398. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  1399. event_handler_exit(info);
  1400. irq_move_masked_irq(data);
  1401. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  1402. } else
  1403. event_handler_exit(info);
  1404. }
  1405. static void mask_ack_dynirq(struct irq_data *data)
  1406. {
  1407. disable_dynirq(data);
  1408. ack_dynirq(data);
  1409. }
  1410. static void lateeoi_ack_dynirq(struct irq_data *data)
  1411. {
  1412. struct irq_info *info = info_for_irq(data->irq);
  1413. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1414. if (VALID_EVTCHN(evtchn)) {
  1415. do_mask(info, EVT_MASK_REASON_EOI_PENDING);
  1416. ack_dynirq(data);
  1417. }
  1418. }
  1419. static void lateeoi_mask_ack_dynirq(struct irq_data *data)
  1420. {
  1421. struct irq_info *info = info_for_irq(data->irq);
  1422. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1423. if (VALID_EVTCHN(evtchn)) {
  1424. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  1425. ack_dynirq(data);
  1426. }
  1427. }
  1428. static int retrigger_dynirq(struct irq_data *data)
  1429. {
  1430. struct irq_info *info = info_for_irq(data->irq);
  1431. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1432. if (!VALID_EVTCHN(evtchn))
  1433. return 0;
  1434. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  1435. set_evtchn(evtchn);
  1436. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  1437. return 1;
  1438. }
  1439. static void restore_pirqs(void)
  1440. {
  1441. int pirq, rc, irq, gsi;
  1442. struct physdev_map_pirq map_irq;
  1443. struct irq_info *info;
  1444. list_for_each_entry(info, &xen_irq_list_head, list) {
  1445. if (info->type != IRQT_PIRQ)
  1446. continue;
  1447. pirq = info->u.pirq.pirq;
  1448. gsi = info->u.pirq.gsi;
  1449. irq = info->irq;
  1450. /* save/restore of PT devices doesn't work, so at this point the
  1451. * only devices present are GSI based emulated devices */
  1452. if (!gsi)
  1453. continue;
  1454. map_irq.domid = DOMID_SELF;
  1455. map_irq.type = MAP_PIRQ_TYPE_GSI;
  1456. map_irq.index = gsi;
  1457. map_irq.pirq = pirq;
  1458. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  1459. if (rc) {
  1460. pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1461. gsi, irq, pirq, rc);
  1462. xen_free_irq(irq);
  1463. continue;
  1464. }
  1465. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1466. __startup_pirq(irq);
  1467. }
  1468. }
  1469. static void restore_cpu_virqs(unsigned int cpu)
  1470. {
  1471. struct evtchn_bind_virq bind_virq;
  1472. int virq, irq, evtchn;
  1473. for (virq = 0; virq < NR_VIRQS; virq++) {
  1474. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1475. continue;
  1476. BUG_ON(virq_from_irq(irq) != virq);
  1477. /* Get a new binding from Xen. */
  1478. bind_virq.virq = virq;
  1479. bind_virq.vcpu = xen_vcpu_nr(cpu);
  1480. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1481. &bind_virq) != 0)
  1482. BUG();
  1483. evtchn = bind_virq.port;
  1484. /* Record the new mapping. */
  1485. (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  1486. bind_evtchn_to_cpu(evtchn, cpu);
  1487. }
  1488. }
  1489. static void restore_cpu_ipis(unsigned int cpu)
  1490. {
  1491. struct evtchn_bind_ipi bind_ipi;
  1492. int ipi, irq, evtchn;
  1493. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1494. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1495. continue;
  1496. BUG_ON(ipi_from_irq(irq) != ipi);
  1497. /* Get a new binding from Xen. */
  1498. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  1499. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1500. &bind_ipi) != 0)
  1501. BUG();
  1502. evtchn = bind_ipi.port;
  1503. /* Record the new mapping. */
  1504. (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  1505. bind_evtchn_to_cpu(evtchn, cpu);
  1506. }
  1507. }
  1508. /* Clear an irq's pending state, in preparation for polling on it */
  1509. void xen_clear_irq_pending(int irq)
  1510. {
  1511. struct irq_info *info = info_for_irq(irq);
  1512. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1513. if (VALID_EVTCHN(evtchn))
  1514. event_handler_exit(info);
  1515. }
  1516. EXPORT_SYMBOL(xen_clear_irq_pending);
  1517. void xen_set_irq_pending(int irq)
  1518. {
  1519. int evtchn = evtchn_from_irq(irq);
  1520. if (VALID_EVTCHN(evtchn))
  1521. set_evtchn(evtchn);
  1522. }
  1523. bool xen_test_irq_pending(int irq)
  1524. {
  1525. int evtchn = evtchn_from_irq(irq);
  1526. bool ret = false;
  1527. if (VALID_EVTCHN(evtchn))
  1528. ret = test_evtchn(evtchn);
  1529. return ret;
  1530. }
  1531. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1532. * the irq will be disabled so it won't deliver an interrupt. */
  1533. void xen_poll_irq_timeout(int irq, u64 timeout)
  1534. {
  1535. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1536. if (VALID_EVTCHN(evtchn)) {
  1537. struct sched_poll poll;
  1538. poll.nr_ports = 1;
  1539. poll.timeout = timeout;
  1540. set_xen_guest_handle(poll.ports, &evtchn);
  1541. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1542. BUG();
  1543. }
  1544. }
  1545. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1546. /* Poll waiting for an irq to become pending. In the usual case, the
  1547. * irq will be disabled so it won't deliver an interrupt. */
  1548. void xen_poll_irq(int irq)
  1549. {
  1550. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1551. }
  1552. /* Check whether the IRQ line is shared with other guests. */
  1553. int xen_test_irq_shared(int irq)
  1554. {
  1555. struct irq_info *info = info_for_irq(irq);
  1556. struct physdev_irq_status_query irq_status;
  1557. if (WARN_ON(!info))
  1558. return -ENOENT;
  1559. irq_status.irq = info->u.pirq.pirq;
  1560. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  1561. return 0;
  1562. return !(irq_status.flags & XENIRQSTAT_shared);
  1563. }
  1564. EXPORT_SYMBOL_GPL(xen_test_irq_shared);
  1565. void xen_irq_resume(void)
  1566. {
  1567. unsigned int cpu;
  1568. struct irq_info *info;
  1569. /* New event-channel space is not 'live' yet. */
  1570. xen_evtchn_resume();
  1571. /* No IRQ <-> event-channel mappings. */
  1572. list_for_each_entry(info, &xen_irq_list_head, list)
  1573. info->evtchn = 0; /* zap event-channel binding */
  1574. clear_evtchn_to_irq_all();
  1575. for_each_possible_cpu(cpu) {
  1576. restore_cpu_virqs(cpu);
  1577. restore_cpu_ipis(cpu);
  1578. }
  1579. restore_pirqs();
  1580. }
  1581. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1582. .name = "xen-dyn",
  1583. .irq_disable = disable_dynirq,
  1584. .irq_mask = disable_dynirq,
  1585. .irq_unmask = enable_dynirq,
  1586. .irq_ack = ack_dynirq,
  1587. .irq_mask_ack = mask_ack_dynirq,
  1588. .irq_set_affinity = set_affinity_irq,
  1589. .irq_retrigger = retrigger_dynirq,
  1590. };
  1591. static struct irq_chip xen_lateeoi_chip __read_mostly = {
  1592. /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
  1593. .name = "xen-dyn-lateeoi",
  1594. .irq_disable = disable_dynirq,
  1595. .irq_mask = disable_dynirq,
  1596. .irq_unmask = enable_dynirq,
  1597. .irq_ack = lateeoi_ack_dynirq,
  1598. .irq_mask_ack = lateeoi_mask_ack_dynirq,
  1599. .irq_set_affinity = set_affinity_irq,
  1600. .irq_retrigger = retrigger_dynirq,
  1601. };
  1602. static struct irq_chip xen_pirq_chip __read_mostly = {
  1603. .name = "xen-pirq",
  1604. .irq_startup = startup_pirq,
  1605. .irq_shutdown = shutdown_pirq,
  1606. .irq_enable = enable_pirq,
  1607. .irq_disable = disable_pirq,
  1608. .irq_mask = disable_dynirq,
  1609. .irq_unmask = enable_dynirq,
  1610. .irq_ack = eoi_pirq,
  1611. .irq_eoi = eoi_pirq,
  1612. .irq_mask_ack = mask_ack_pirq,
  1613. .irq_set_affinity = set_affinity_irq,
  1614. .irq_retrigger = retrigger_dynirq,
  1615. };
  1616. static struct irq_chip xen_percpu_chip __read_mostly = {
  1617. .name = "xen-percpu",
  1618. .irq_disable = disable_dynirq,
  1619. .irq_mask = disable_dynirq,
  1620. .irq_unmask = enable_dynirq,
  1621. .irq_ack = ack_dynirq,
  1622. };
  1623. #ifdef CONFIG_XEN_PVHVM
  1624. /* Vector callbacks are better than PCI interrupts to receive event
  1625. * channel notifications because we can receive vector callbacks on any
  1626. * vcpu and we don't need PCI support or APIC interactions. */
  1627. void xen_callback_vector(void)
  1628. {
  1629. int rc;
  1630. uint64_t callback_via;
  1631. if (xen_have_vector_callback) {
  1632. callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
  1633. rc = xen_set_callback_via(callback_via);
  1634. if (rc) {
  1635. pr_err("Request for Xen HVM callback vector failed\n");
  1636. xen_have_vector_callback = 0;
  1637. return;
  1638. }
  1639. pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
  1640. alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
  1641. xen_hvm_callback_vector);
  1642. }
  1643. }
  1644. #else
  1645. void xen_callback_vector(void) {}
  1646. #endif
  1647. static bool fifo_events = true;
  1648. module_param(fifo_events, bool, 0);
  1649. static int xen_evtchn_cpu_prepare(unsigned int cpu)
  1650. {
  1651. int ret = 0;
  1652. xen_cpu_init_eoi(cpu);
  1653. if (evtchn_ops->percpu_init)
  1654. ret = evtchn_ops->percpu_init(cpu);
  1655. return ret;
  1656. }
  1657. static int xen_evtchn_cpu_dead(unsigned int cpu)
  1658. {
  1659. int ret = 0;
  1660. if (evtchn_ops->percpu_deinit)
  1661. ret = evtchn_ops->percpu_deinit(cpu);
  1662. return ret;
  1663. }
  1664. void __init xen_init_IRQ(void)
  1665. {
  1666. int ret = -EINVAL;
  1667. unsigned int evtchn;
  1668. if (fifo_events)
  1669. ret = xen_evtchn_fifo_init();
  1670. if (ret < 0)
  1671. xen_evtchn_2l_init();
  1672. xen_cpu_init_eoi(smp_processor_id());
  1673. cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
  1674. "xen/evtchn:prepare",
  1675. xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
  1676. evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
  1677. sizeof(*evtchn_to_irq), GFP_KERNEL);
  1678. BUG_ON(!evtchn_to_irq);
  1679. /* No event channels are 'live' right now. */
  1680. for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
  1681. mask_evtchn(evtchn);
  1682. pirq_needs_eoi = pirq_needs_eoi_flag;
  1683. #ifdef CONFIG_X86
  1684. if (xen_pv_domain()) {
  1685. irq_ctx_init(smp_processor_id());
  1686. if (xen_initial_domain())
  1687. pci_xen_initial_domain();
  1688. }
  1689. if (xen_feature(XENFEAT_hvm_callback_vector))
  1690. xen_callback_vector();
  1691. if (xen_hvm_domain()) {
  1692. native_init_IRQ();
  1693. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1694. * __acpi_register_gsi can point at the right function */
  1695. pci_xen_hvm_init();
  1696. } else {
  1697. int rc;
  1698. struct physdev_pirq_eoi_gmfn eoi_gmfn;
  1699. pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  1700. eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
  1701. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
  1702. if (rc != 0) {
  1703. free_page((unsigned long) pirq_eoi_map);
  1704. pirq_eoi_map = NULL;
  1705. } else
  1706. pirq_needs_eoi = pirq_check_eoi_map;
  1707. }
  1708. #endif
  1709. }