book3s_xive.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973
  1. /*
  2. * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License, version 2, as
  6. * published by the Free Software Foundation.
  7. */
  8. #define pr_fmt(fmt) "xive-kvm: " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/kvm_host.h>
  11. #include <linux/err.h>
  12. #include <linux/gfp.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/delay.h>
  15. #include <linux/percpu.h>
  16. #include <linux/cpumask.h>
  17. #include <linux/uaccess.h>
  18. #include <asm/kvm_book3s.h>
  19. #include <asm/kvm_ppc.h>
  20. #include <asm/hvcall.h>
  21. #include <asm/xics.h>
  22. #include <asm/xive.h>
  23. #include <asm/xive-regs.h>
  24. #include <asm/debug.h>
  25. #include <asm/debugfs.h>
  26. #include <asm/time.h>
  27. #include <asm/opal.h>
  28. #include <linux/debugfs.h>
  29. #include <linux/seq_file.h>
  30. #include "book3s_xive.h"
  31. /*
  32. * Virtual mode variants of the hcalls for use on radix/radix
  33. * with AIL. They require the VCPU's VP to be "pushed"
  34. *
  35. * We still instantiate them here because we use some of the
  36. * generated utility functions as well in this file.
  37. */
  38. #define XIVE_RUNTIME_CHECKS
  39. #define X_PFX xive_vm_
  40. #define X_STATIC static
  41. #define X_STAT_PFX stat_vm_
  42. #define __x_tima xive_tima
  43. #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
  44. #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
  45. #define __x_writeb __raw_writeb
  46. #define __x_readw __raw_readw
  47. #define __x_readq __raw_readq
  48. #define __x_writeq __raw_writeq
  49. #include "book3s_xive_template.c"
  50. /*
  51. * We leave a gap of a couple of interrupts in the queue to
  52. * account for the IPI and additional safety guard.
  53. */
  54. #define XIVE_Q_GAP 2
  55. /*
  56. * This is a simple trigger for a generic XIVE IRQ. This must
  57. * only be called for interrupts that support a trigger page
  58. */
  59. static bool xive_irq_trigger(struct xive_irq_data *xd)
  60. {
  61. /* This should be only for MSIs */
  62. if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
  63. return false;
  64. /* Those interrupts should always have a trigger page */
  65. if (WARN_ON(!xd->trig_mmio))
  66. return false;
  67. out_be64(xd->trig_mmio, 0);
  68. return true;
  69. }
  70. static irqreturn_t xive_esc_irq(int irq, void *data)
  71. {
  72. struct kvm_vcpu *vcpu = data;
  73. vcpu->arch.irq_pending = 1;
  74. smp_mb();
  75. if (vcpu->arch.ceded)
  76. kvmppc_fast_vcpu_kick(vcpu);
  77. /* Since we have the no-EOI flag, the interrupt is effectively
  78. * disabled now. Clearing xive_esc_on means we won't bother
  79. * doing so on the next entry.
  80. *
  81. * This also allows the entry code to know that if a PQ combination
  82. * of 10 is observed while xive_esc_on is true, it means the queue
  83. * contains an unprocessed escalation interrupt. We don't make use of
  84. * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
  85. */
  86. vcpu->arch.xive_esc_on = false;
  87. return IRQ_HANDLED;
  88. }
  89. static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
  90. {
  91. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  92. struct xive_q *q = &xc->queues[prio];
  93. char *name = NULL;
  94. int rc;
  95. /* Already there ? */
  96. if (xc->esc_virq[prio])
  97. return 0;
  98. /* Hook up the escalation interrupt */
  99. xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
  100. if (!xc->esc_virq[prio]) {
  101. pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
  102. prio, xc->server_num);
  103. return -EIO;
  104. }
  105. if (xc->xive->single_escalation)
  106. name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
  107. vcpu->kvm->arch.lpid, xc->server_num);
  108. else
  109. name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
  110. vcpu->kvm->arch.lpid, xc->server_num, prio);
  111. if (!name) {
  112. pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
  113. prio, xc->server_num);
  114. rc = -ENOMEM;
  115. goto error;
  116. }
  117. pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
  118. rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
  119. IRQF_NO_THREAD, name, vcpu);
  120. if (rc) {
  121. pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
  122. prio, xc->server_num);
  123. goto error;
  124. }
  125. xc->esc_virq_names[prio] = name;
  126. /* In single escalation mode, we grab the ESB MMIO of the
  127. * interrupt and mask it. Also populate the VCPU v/raddr
  128. * of the ESB page for use by asm entry/exit code. Finally
  129. * set the XIVE_IRQ_NO_EOI flag which will prevent the
  130. * core code from performing an EOI on the escalation
  131. * interrupt, thus leaving it effectively masked after
  132. * it fires once.
  133. */
  134. if (xc->xive->single_escalation) {
  135. struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
  136. struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
  137. xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
  138. vcpu->arch.xive_esc_raddr = xd->eoi_page;
  139. vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
  140. xd->flags |= XIVE_IRQ_NO_EOI;
  141. }
  142. return 0;
  143. error:
  144. irq_dispose_mapping(xc->esc_virq[prio]);
  145. xc->esc_virq[prio] = 0;
  146. kfree(name);
  147. return rc;
  148. }
  149. static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
  150. {
  151. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  152. struct kvmppc_xive *xive = xc->xive;
  153. struct xive_q *q = &xc->queues[prio];
  154. void *qpage;
  155. int rc;
  156. if (WARN_ON(q->qpage))
  157. return 0;
  158. /* Allocate the queue and retrieve infos on current node for now */
  159. qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
  160. if (!qpage) {
  161. pr_err("Failed to allocate queue %d for VCPU %d\n",
  162. prio, xc->server_num);
  163. return -ENOMEM;
  164. }
  165. memset(qpage, 0, 1 << xive->q_order);
  166. /*
  167. * Reconfigure the queue. This will set q->qpage only once the
  168. * queue is fully configured. This is a requirement for prio 0
  169. * as we will stop doing EOIs for every IPI as soon as we observe
  170. * qpage being non-NULL, and instead will only EOI when we receive
  171. * corresponding queue 0 entries
  172. */
  173. rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
  174. xive->q_order, true);
  175. if (rc)
  176. pr_err("Failed to configure queue %d for VCPU %d\n",
  177. prio, xc->server_num);
  178. return rc;
  179. }
  180. /* Called with kvm_lock held */
  181. static int xive_check_provisioning(struct kvm *kvm, u8 prio)
  182. {
  183. struct kvmppc_xive *xive = kvm->arch.xive;
  184. struct kvm_vcpu *vcpu;
  185. int i, rc;
  186. lockdep_assert_held(&kvm->lock);
  187. /* Already provisioned ? */
  188. if (xive->qmap & (1 << prio))
  189. return 0;
  190. pr_devel("Provisioning prio... %d\n", prio);
  191. /* Provision each VCPU and enable escalations if needed */
  192. kvm_for_each_vcpu(i, vcpu, kvm) {
  193. if (!vcpu->arch.xive_vcpu)
  194. continue;
  195. rc = xive_provision_queue(vcpu, prio);
  196. if (rc == 0 && !xive->single_escalation)
  197. xive_attach_escalation(vcpu, prio);
  198. if (rc)
  199. return rc;
  200. }
  201. /* Order previous stores and mark it as provisioned */
  202. mb();
  203. xive->qmap |= (1 << prio);
  204. return 0;
  205. }
  206. static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
  207. {
  208. struct kvm_vcpu *vcpu;
  209. struct kvmppc_xive_vcpu *xc;
  210. struct xive_q *q;
  211. /* Locate target server */
  212. vcpu = kvmppc_xive_find_server(kvm, server);
  213. if (!vcpu) {
  214. pr_warn("%s: Can't find server %d\n", __func__, server);
  215. return;
  216. }
  217. xc = vcpu->arch.xive_vcpu;
  218. if (WARN_ON(!xc))
  219. return;
  220. q = &xc->queues[prio];
  221. atomic_inc(&q->pending_count);
  222. }
  223. static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
  224. {
  225. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  226. struct xive_q *q;
  227. u32 max;
  228. if (WARN_ON(!xc))
  229. return -ENXIO;
  230. if (!xc->valid)
  231. return -ENXIO;
  232. q = &xc->queues[prio];
  233. if (WARN_ON(!q->qpage))
  234. return -ENXIO;
  235. /* Calculate max number of interrupts in that queue. */
  236. max = (q->msk + 1) - XIVE_Q_GAP;
  237. return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
  238. }
  239. static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
  240. {
  241. struct kvm_vcpu *vcpu;
  242. int i, rc;
  243. /* Locate target server */
  244. vcpu = kvmppc_xive_find_server(kvm, *server);
  245. if (!vcpu) {
  246. pr_devel("Can't find server %d\n", *server);
  247. return -EINVAL;
  248. }
  249. pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
  250. /* Try pick it */
  251. rc = xive_try_pick_queue(vcpu, prio);
  252. if (rc == 0)
  253. return rc;
  254. pr_devel(" .. failed, looking up candidate...\n");
  255. /* Failed, pick another VCPU */
  256. kvm_for_each_vcpu(i, vcpu, kvm) {
  257. if (!vcpu->arch.xive_vcpu)
  258. continue;
  259. rc = xive_try_pick_queue(vcpu, prio);
  260. if (rc == 0) {
  261. *server = vcpu->arch.xive_vcpu->server_num;
  262. pr_devel(" found on 0x%x/%d\n", *server, prio);
  263. return rc;
  264. }
  265. }
  266. pr_devel(" no available target !\n");
  267. /* No available target ! */
  268. return -EBUSY;
  269. }
  270. static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
  271. {
  272. return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
  273. }
  274. static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
  275. struct kvmppc_xive_src_block *sb,
  276. struct kvmppc_xive_irq_state *state)
  277. {
  278. struct xive_irq_data *xd;
  279. u32 hw_num;
  280. u8 old_prio;
  281. u64 val;
  282. /*
  283. * Take the lock, set masked, try again if racing
  284. * with H_EOI
  285. */
  286. for (;;) {
  287. arch_spin_lock(&sb->lock);
  288. old_prio = state->guest_priority;
  289. state->guest_priority = MASKED;
  290. mb();
  291. if (!state->in_eoi)
  292. break;
  293. state->guest_priority = old_prio;
  294. arch_spin_unlock(&sb->lock);
  295. }
  296. /* No change ? Bail */
  297. if (old_prio == MASKED)
  298. return old_prio;
  299. /* Get the right irq */
  300. kvmppc_xive_select_irq(state, &hw_num, &xd);
  301. /*
  302. * If the interrupt is marked as needing masking via
  303. * firmware, we do it here. Firmware masking however
  304. * is "lossy", it won't return the old p and q bits
  305. * and won't set the interrupt to a state where it will
  306. * record queued ones. If this is an issue we should do
  307. * lazy masking instead.
  308. *
  309. * For now, we work around this in unmask by forcing
  310. * an interrupt whenever we unmask a non-LSI via FW
  311. * (if ever).
  312. */
  313. if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
  314. xive_native_configure_irq(hw_num,
  315. xive_vp(xive, state->act_server),
  316. MASKED, state->number);
  317. /* set old_p so we can track if an H_EOI was done */
  318. state->old_p = true;
  319. state->old_q = false;
  320. } else {
  321. /* Set PQ to 10, return old P and old Q and remember them */
  322. val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
  323. state->old_p = !!(val & 2);
  324. state->old_q = !!(val & 1);
  325. /*
  326. * Synchronize hardware to sensure the queues are updated
  327. * when masking
  328. */
  329. xive_native_sync_source(hw_num);
  330. }
  331. return old_prio;
  332. }
  333. static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
  334. struct kvmppc_xive_irq_state *state)
  335. {
  336. /*
  337. * Take the lock try again if racing with H_EOI
  338. */
  339. for (;;) {
  340. arch_spin_lock(&sb->lock);
  341. if (!state->in_eoi)
  342. break;
  343. arch_spin_unlock(&sb->lock);
  344. }
  345. }
  346. static void xive_finish_unmask(struct kvmppc_xive *xive,
  347. struct kvmppc_xive_src_block *sb,
  348. struct kvmppc_xive_irq_state *state,
  349. u8 prio)
  350. {
  351. struct xive_irq_data *xd;
  352. u32 hw_num;
  353. /* If we aren't changing a thing, move on */
  354. if (state->guest_priority != MASKED)
  355. goto bail;
  356. /* Get the right irq */
  357. kvmppc_xive_select_irq(state, &hw_num, &xd);
  358. /*
  359. * See command in xive_lock_and_mask() concerning masking
  360. * via firmware.
  361. */
  362. if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
  363. xive_native_configure_irq(hw_num,
  364. xive_vp(xive, state->act_server),
  365. state->act_priority, state->number);
  366. /* If an EOI is needed, do it here */
  367. if (!state->old_p)
  368. xive_vm_source_eoi(hw_num, xd);
  369. /* If this is not an LSI, force a trigger */
  370. if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
  371. xive_irq_trigger(xd);
  372. goto bail;
  373. }
  374. /* Old Q set, set PQ to 11 */
  375. if (state->old_q)
  376. xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
  377. /*
  378. * If not old P, then perform an "effective" EOI,
  379. * on the source. This will handle the cases where
  380. * FW EOI is needed.
  381. */
  382. if (!state->old_p)
  383. xive_vm_source_eoi(hw_num, xd);
  384. /* Synchronize ordering and mark unmasked */
  385. mb();
  386. bail:
  387. state->guest_priority = prio;
  388. }
  389. /*
  390. * Target an interrupt to a given server/prio, this will fallback
  391. * to another server if necessary and perform the HW targetting
  392. * updates as needed
  393. *
  394. * NOTE: Must be called with the state lock held
  395. */
  396. static int xive_target_interrupt(struct kvm *kvm,
  397. struct kvmppc_xive_irq_state *state,
  398. u32 server, u8 prio)
  399. {
  400. struct kvmppc_xive *xive = kvm->arch.xive;
  401. u32 hw_num;
  402. int rc;
  403. /*
  404. * This will return a tentative server and actual
  405. * priority. The count for that new target will have
  406. * already been incremented.
  407. */
  408. rc = xive_select_target(kvm, &server, prio);
  409. /*
  410. * We failed to find a target ? Not much we can do
  411. * at least until we support the GIQ.
  412. */
  413. if (rc)
  414. return rc;
  415. /*
  416. * Increment the old queue pending count if there
  417. * was one so that the old queue count gets adjusted later
  418. * when observed to be empty.
  419. */
  420. if (state->act_priority != MASKED)
  421. xive_inc_q_pending(kvm,
  422. state->act_server,
  423. state->act_priority);
  424. /*
  425. * Update state and HW
  426. */
  427. state->act_priority = prio;
  428. state->act_server = server;
  429. /* Get the right irq */
  430. kvmppc_xive_select_irq(state, &hw_num, NULL);
  431. return xive_native_configure_irq(hw_num,
  432. xive_vp(xive, server),
  433. prio, state->number);
  434. }
  435. /*
  436. * Targetting rules: In order to avoid losing track of
  437. * pending interrupts accross mask and unmask, which would
  438. * allow queue overflows, we implement the following rules:
  439. *
  440. * - Unless it was never enabled (or we run out of capacity)
  441. * an interrupt is always targetted at a valid server/queue
  442. * pair even when "masked" by the guest. This pair tends to
  443. * be the last one used but it can be changed under some
  444. * circumstances. That allows us to separate targetting
  445. * from masking, we only handle accounting during (re)targetting,
  446. * this also allows us to let an interrupt drain into its target
  447. * queue after masking, avoiding complex schemes to remove
  448. * interrupts out of remote processor queues.
  449. *
  450. * - When masking, we set PQ to 10 and save the previous value
  451. * of P and Q.
  452. *
  453. * - When unmasking, if saved Q was set, we set PQ to 11
  454. * otherwise we leave PQ to the HW state which will be either
  455. * 10 if nothing happened or 11 if the interrupt fired while
  456. * masked. Effectively we are OR'ing the previous Q into the
  457. * HW Q.
  458. *
  459. * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
  460. * which will unmask the interrupt and shoot a new one if Q was
  461. * set.
  462. *
  463. * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
  464. * effectively meaning an H_EOI from the guest is still expected
  465. * for that interrupt).
  466. *
  467. * - If H_EOI occurs while masked, we clear the saved P.
  468. *
  469. * - When changing target, we account on the new target and
  470. * increment a separate "pending" counter on the old one.
  471. * This pending counter will be used to decrement the old
  472. * target's count when its queue has been observed empty.
  473. */
  474. int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
  475. u32 priority)
  476. {
  477. struct kvmppc_xive *xive = kvm->arch.xive;
  478. struct kvmppc_xive_src_block *sb;
  479. struct kvmppc_xive_irq_state *state;
  480. u8 new_act_prio;
  481. int rc = 0;
  482. u16 idx;
  483. if (!xive)
  484. return -ENODEV;
  485. pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
  486. irq, server, priority);
  487. /* First, check provisioning of queues */
  488. if (priority != MASKED)
  489. rc = xive_check_provisioning(xive->kvm,
  490. xive_prio_from_guest(priority));
  491. if (rc) {
  492. pr_devel(" provisioning failure %d !\n", rc);
  493. return rc;
  494. }
  495. sb = kvmppc_xive_find_source(xive, irq, &idx);
  496. if (!sb)
  497. return -EINVAL;
  498. state = &sb->irq_state[idx];
  499. /*
  500. * We first handle masking/unmasking since the locking
  501. * might need to be retried due to EOIs, we'll handle
  502. * targetting changes later. These functions will return
  503. * with the SB lock held.
  504. *
  505. * xive_lock_and_mask() will also set state->guest_priority
  506. * but won't otherwise change other fields of the state.
  507. *
  508. * xive_lock_for_unmask will not actually unmask, this will
  509. * be done later by xive_finish_unmask() once the targetting
  510. * has been done, so we don't try to unmask an interrupt
  511. * that hasn't yet been targetted.
  512. */
  513. if (priority == MASKED)
  514. xive_lock_and_mask(xive, sb, state);
  515. else
  516. xive_lock_for_unmask(sb, state);
  517. /*
  518. * Then we handle targetting.
  519. *
  520. * First calculate a new "actual priority"
  521. */
  522. new_act_prio = state->act_priority;
  523. if (priority != MASKED)
  524. new_act_prio = xive_prio_from_guest(priority);
  525. pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
  526. new_act_prio, state->act_server, state->act_priority);
  527. /*
  528. * Then check if we actually need to change anything,
  529. *
  530. * The condition for re-targetting the interrupt is that
  531. * we have a valid new priority (new_act_prio is not 0xff)
  532. * and either the server or the priority changed.
  533. *
  534. * Note: If act_priority was ff and the new priority is
  535. * also ff, we don't do anything and leave the interrupt
  536. * untargetted. An attempt of doing an int_on on an
  537. * untargetted interrupt will fail. If that is a problem
  538. * we could initialize interrupts with valid default
  539. */
  540. if (new_act_prio != MASKED &&
  541. (state->act_server != server ||
  542. state->act_priority != new_act_prio))
  543. rc = xive_target_interrupt(kvm, state, server, new_act_prio);
  544. /*
  545. * Perform the final unmasking of the interrupt source
  546. * if necessary
  547. */
  548. if (priority != MASKED)
  549. xive_finish_unmask(xive, sb, state, priority);
  550. /*
  551. * Finally Update saved_priority to match. Only int_on/off
  552. * set this field to a different value.
  553. */
  554. state->saved_priority = priority;
  555. arch_spin_unlock(&sb->lock);
  556. return rc;
  557. }
  558. int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
  559. u32 *priority)
  560. {
  561. struct kvmppc_xive *xive = kvm->arch.xive;
  562. struct kvmppc_xive_src_block *sb;
  563. struct kvmppc_xive_irq_state *state;
  564. u16 idx;
  565. if (!xive)
  566. return -ENODEV;
  567. sb = kvmppc_xive_find_source(xive, irq, &idx);
  568. if (!sb)
  569. return -EINVAL;
  570. state = &sb->irq_state[idx];
  571. arch_spin_lock(&sb->lock);
  572. *server = state->act_server;
  573. *priority = state->guest_priority;
  574. arch_spin_unlock(&sb->lock);
  575. return 0;
  576. }
  577. int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
  578. {
  579. struct kvmppc_xive *xive = kvm->arch.xive;
  580. struct kvmppc_xive_src_block *sb;
  581. struct kvmppc_xive_irq_state *state;
  582. u16 idx;
  583. if (!xive)
  584. return -ENODEV;
  585. sb = kvmppc_xive_find_source(xive, irq, &idx);
  586. if (!sb)
  587. return -EINVAL;
  588. state = &sb->irq_state[idx];
  589. pr_devel("int_on(irq=0x%x)\n", irq);
  590. /*
  591. * Check if interrupt was not targetted
  592. */
  593. if (state->act_priority == MASKED) {
  594. pr_devel("int_on on untargetted interrupt\n");
  595. return -EINVAL;
  596. }
  597. /* If saved_priority is 0xff, do nothing */
  598. if (state->saved_priority == MASKED)
  599. return 0;
  600. /*
  601. * Lock and unmask it.
  602. */
  603. xive_lock_for_unmask(sb, state);
  604. xive_finish_unmask(xive, sb, state, state->saved_priority);
  605. arch_spin_unlock(&sb->lock);
  606. return 0;
  607. }
  608. int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
  609. {
  610. struct kvmppc_xive *xive = kvm->arch.xive;
  611. struct kvmppc_xive_src_block *sb;
  612. struct kvmppc_xive_irq_state *state;
  613. u16 idx;
  614. if (!xive)
  615. return -ENODEV;
  616. sb = kvmppc_xive_find_source(xive, irq, &idx);
  617. if (!sb)
  618. return -EINVAL;
  619. state = &sb->irq_state[idx];
  620. pr_devel("int_off(irq=0x%x)\n", irq);
  621. /*
  622. * Lock and mask
  623. */
  624. state->saved_priority = xive_lock_and_mask(xive, sb, state);
  625. arch_spin_unlock(&sb->lock);
  626. return 0;
  627. }
  628. static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
  629. {
  630. struct kvmppc_xive_src_block *sb;
  631. struct kvmppc_xive_irq_state *state;
  632. u16 idx;
  633. sb = kvmppc_xive_find_source(xive, irq, &idx);
  634. if (!sb)
  635. return false;
  636. state = &sb->irq_state[idx];
  637. if (!state->valid)
  638. return false;
  639. /*
  640. * Trigger the IPI. This assumes we never restore a pass-through
  641. * interrupt which should be safe enough
  642. */
  643. xive_irq_trigger(&state->ipi_data);
  644. return true;
  645. }
  646. u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
  647. {
  648. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  649. if (!xc)
  650. return 0;
  651. /* Return the per-cpu state for state saving/migration */
  652. return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
  653. (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
  654. (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
  655. }
  656. int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
  657. {
  658. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  659. struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
  660. u8 cppr, mfrr;
  661. u32 xisr;
  662. if (!xc || !xive)
  663. return -ENOENT;
  664. /* Grab individual state fields. We don't use pending_pri */
  665. cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
  666. xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
  667. KVM_REG_PPC_ICP_XISR_MASK;
  668. mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
  669. pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
  670. xc->server_num, cppr, mfrr, xisr);
  671. /*
  672. * We can't update the state of a "pushed" VCPU, but that
  673. * shouldn't happen.
  674. */
  675. if (WARN_ON(vcpu->arch.xive_pushed))
  676. return -EIO;
  677. /* Update VCPU HW saved state */
  678. vcpu->arch.xive_saved_state.cppr = cppr;
  679. xc->hw_cppr = xc->cppr = cppr;
  680. /*
  681. * Update MFRR state. If it's not 0xff, we mark the VCPU as
  682. * having a pending MFRR change, which will re-evaluate the
  683. * target. The VCPU will thus potentially get a spurious
  684. * interrupt but that's not a big deal.
  685. */
  686. xc->mfrr = mfrr;
  687. if (mfrr < cppr)
  688. xive_irq_trigger(&xc->vp_ipi_data);
  689. /*
  690. * Now saved XIRR is "interesting". It means there's something in
  691. * the legacy "1 element" queue... for an IPI we simply ignore it,
  692. * as the MFRR restore will handle that. For anything else we need
  693. * to force a resend of the source.
  694. * However the source may not have been setup yet. If that's the
  695. * case, we keep that info and increment a counter in the xive to
  696. * tell subsequent xive_set_source() to go look.
  697. */
  698. if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
  699. xc->delayed_irq = xisr;
  700. xive->delayed_irqs++;
  701. pr_devel(" xisr restore delayed\n");
  702. }
  703. return 0;
  704. }
  705. int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
  706. struct irq_desc *host_desc)
  707. {
  708. struct kvmppc_xive *xive = kvm->arch.xive;
  709. struct kvmppc_xive_src_block *sb;
  710. struct kvmppc_xive_irq_state *state;
  711. struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
  712. unsigned int host_irq = irq_desc_get_irq(host_desc);
  713. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
  714. u16 idx;
  715. u8 prio;
  716. int rc;
  717. if (!xive)
  718. return -ENODEV;
  719. pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
  720. sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
  721. if (!sb)
  722. return -EINVAL;
  723. state = &sb->irq_state[idx];
  724. /*
  725. * Mark the passed-through interrupt as going to a VCPU,
  726. * this will prevent further EOIs and similar operations
  727. * from the XIVE code. It will also mask the interrupt
  728. * to either PQ=10 or 11 state, the latter if the interrupt
  729. * is pending. This will allow us to unmask or retrigger it
  730. * after routing it to the guest with a simple EOI.
  731. *
  732. * The "state" argument is a "token", all it needs is to be
  733. * non-NULL to switch to passed-through or NULL for the
  734. * other way around. We may not yet have an actual VCPU
  735. * target here and we don't really care.
  736. */
  737. rc = irq_set_vcpu_affinity(host_irq, state);
  738. if (rc) {
  739. pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
  740. return rc;
  741. }
  742. /*
  743. * Mask and read state of IPI. We need to know if its P bit
  744. * is set as that means it's potentially already using a
  745. * queue entry in the target
  746. */
  747. prio = xive_lock_and_mask(xive, sb, state);
  748. pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
  749. state->old_p, state->old_q);
  750. /* Turn the IPI hard off */
  751. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  752. /* Grab info about irq */
  753. state->pt_number = hw_irq;
  754. state->pt_data = irq_data_get_irq_handler_data(host_data);
  755. /*
  756. * Configure the IRQ to match the existing configuration of
  757. * the IPI if it was already targetted. Otherwise this will
  758. * mask the interrupt in a lossy way (act_priority is 0xff)
  759. * which is fine for a never started interrupt.
  760. */
  761. xive_native_configure_irq(hw_irq,
  762. xive_vp(xive, state->act_server),
  763. state->act_priority, state->number);
  764. /*
  765. * We do an EOI to enable the interrupt (and retrigger if needed)
  766. * if the guest has the interrupt unmasked and the P bit was *not*
  767. * set in the IPI. If it was set, we know a slot may still be in
  768. * use in the target queue thus we have to wait for a guest
  769. * originated EOI
  770. */
  771. if (prio != MASKED && !state->old_p)
  772. xive_vm_source_eoi(hw_irq, state->pt_data);
  773. /* Clear old_p/old_q as they are no longer relevant */
  774. state->old_p = state->old_q = false;
  775. /* Restore guest prio (unlocks EOI) */
  776. mb();
  777. state->guest_priority = prio;
  778. arch_spin_unlock(&sb->lock);
  779. return 0;
  780. }
  781. EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
  782. int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
  783. struct irq_desc *host_desc)
  784. {
  785. struct kvmppc_xive *xive = kvm->arch.xive;
  786. struct kvmppc_xive_src_block *sb;
  787. struct kvmppc_xive_irq_state *state;
  788. unsigned int host_irq = irq_desc_get_irq(host_desc);
  789. u16 idx;
  790. u8 prio;
  791. int rc;
  792. if (!xive)
  793. return -ENODEV;
  794. pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
  795. sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
  796. if (!sb)
  797. return -EINVAL;
  798. state = &sb->irq_state[idx];
  799. /*
  800. * Mask and read state of IRQ. We need to know if its P bit
  801. * is set as that means it's potentially already using a
  802. * queue entry in the target
  803. */
  804. prio = xive_lock_and_mask(xive, sb, state);
  805. pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
  806. state->old_p, state->old_q);
  807. /*
  808. * If old_p is set, the interrupt is pending, we switch it to
  809. * PQ=11. This will force a resend in the host so the interrupt
  810. * isn't lost to whatver host driver may pick it up
  811. */
  812. if (state->old_p)
  813. xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
  814. /* Release the passed-through interrupt to the host */
  815. rc = irq_set_vcpu_affinity(host_irq, NULL);
  816. if (rc) {
  817. pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
  818. return rc;
  819. }
  820. /* Forget about the IRQ */
  821. state->pt_number = 0;
  822. state->pt_data = NULL;
  823. /* Reconfigure the IPI */
  824. xive_native_configure_irq(state->ipi_number,
  825. xive_vp(xive, state->act_server),
  826. state->act_priority, state->number);
  827. /*
  828. * If old_p is set (we have a queue entry potentially
  829. * occupied) or the interrupt is masked, we set the IPI
  830. * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
  831. */
  832. if (prio == MASKED || state->old_p)
  833. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
  834. else
  835. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
  836. /* Restore guest prio (unlocks EOI) */
  837. mb();
  838. state->guest_priority = prio;
  839. arch_spin_unlock(&sb->lock);
  840. return 0;
  841. }
  842. EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
  843. static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
  844. {
  845. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  846. struct kvm *kvm = vcpu->kvm;
  847. struct kvmppc_xive *xive = kvm->arch.xive;
  848. int i, j;
  849. for (i = 0; i <= xive->max_sbid; i++) {
  850. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  851. if (!sb)
  852. continue;
  853. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
  854. struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
  855. if (!state->valid)
  856. continue;
  857. if (state->act_priority == MASKED)
  858. continue;
  859. if (state->act_server != xc->server_num)
  860. continue;
  861. /* Clean it up */
  862. arch_spin_lock(&sb->lock);
  863. state->act_priority = MASKED;
  864. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  865. xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
  866. if (state->pt_number) {
  867. xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
  868. xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
  869. }
  870. arch_spin_unlock(&sb->lock);
  871. }
  872. }
  873. }
  874. void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
  875. {
  876. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  877. struct kvmppc_xive *xive = xc->xive;
  878. int i;
  879. pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
  880. /* Ensure no interrupt is still routed to that VP */
  881. xc->valid = false;
  882. kvmppc_xive_disable_vcpu_interrupts(vcpu);
  883. /* Mask the VP IPI */
  884. xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
  885. /* Free escalations */
  886. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  887. if (xc->esc_virq[i]) {
  888. free_irq(xc->esc_virq[i], vcpu);
  889. irq_dispose_mapping(xc->esc_virq[i]);
  890. kfree(xc->esc_virq_names[i]);
  891. }
  892. }
  893. /* Disable the VP */
  894. xive_native_disable_vp(xc->vp_id);
  895. /* Free the queues */
  896. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  897. struct xive_q *q = &xc->queues[i];
  898. xive_native_disable_queue(xc->vp_id, q, i);
  899. if (q->qpage) {
  900. free_pages((unsigned long)q->qpage,
  901. xive->q_page_order);
  902. q->qpage = NULL;
  903. }
  904. }
  905. /* Free the IPI */
  906. if (xc->vp_ipi) {
  907. xive_cleanup_irq_data(&xc->vp_ipi_data);
  908. xive_native_free_irq(xc->vp_ipi);
  909. }
  910. /* Free the VP */
  911. kfree(xc);
  912. }
  913. int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
  914. struct kvm_vcpu *vcpu, u32 cpu)
  915. {
  916. struct kvmppc_xive *xive = dev->private;
  917. struct kvmppc_xive_vcpu *xc;
  918. int i, r = -EBUSY;
  919. pr_devel("connect_vcpu(cpu=%d)\n", cpu);
  920. if (dev->ops != &kvm_xive_ops) {
  921. pr_devel("Wrong ops !\n");
  922. return -EPERM;
  923. }
  924. if (xive->kvm != vcpu->kvm)
  925. return -EPERM;
  926. if (vcpu->arch.irq_type)
  927. return -EBUSY;
  928. if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
  929. pr_devel("Duplicate !\n");
  930. return -EEXIST;
  931. }
  932. if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
  933. pr_devel("Out of bounds !\n");
  934. return -EINVAL;
  935. }
  936. xc = kzalloc(sizeof(*xc), GFP_KERNEL);
  937. if (!xc)
  938. return -ENOMEM;
  939. /* We need to synchronize with queue provisioning */
  940. mutex_lock(&vcpu->kvm->lock);
  941. vcpu->arch.xive_vcpu = xc;
  942. xc->xive = xive;
  943. xc->vcpu = vcpu;
  944. xc->server_num = cpu;
  945. xc->vp_id = xive_vp(xive, cpu);
  946. xc->mfrr = 0xff;
  947. xc->valid = true;
  948. r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
  949. if (r)
  950. goto bail;
  951. /* Configure VCPU fields for use by assembly push/pull */
  952. vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
  953. vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
  954. /* Allocate IPI */
  955. xc->vp_ipi = xive_native_alloc_irq();
  956. if (!xc->vp_ipi) {
  957. pr_err("Failed to allocate xive irq for VCPU IPI\n");
  958. r = -EIO;
  959. goto bail;
  960. }
  961. pr_devel(" IPI=0x%x\n", xc->vp_ipi);
  962. r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
  963. if (r)
  964. goto bail;
  965. /*
  966. * Enable the VP first as the single escalation mode will
  967. * affect escalation interrupts numbering
  968. */
  969. r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
  970. if (r) {
  971. pr_err("Failed to enable VP in OPAL, err %d\n", r);
  972. goto bail;
  973. }
  974. /*
  975. * Initialize queues. Initially we set them all for no queueing
  976. * and we enable escalation for queue 0 only which we'll use for
  977. * our mfrr change notifications. If the VCPU is hot-plugged, we
  978. * do handle provisioning however based on the existing "map"
  979. * of enabled queues.
  980. */
  981. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  982. struct xive_q *q = &xc->queues[i];
  983. /* Single escalation, no queue 7 */
  984. if (i == 7 && xive->single_escalation)
  985. break;
  986. /* Is queue already enabled ? Provision it */
  987. if (xive->qmap & (1 << i)) {
  988. r = xive_provision_queue(vcpu, i);
  989. if (r == 0 && !xive->single_escalation)
  990. xive_attach_escalation(vcpu, i);
  991. if (r)
  992. goto bail;
  993. } else {
  994. r = xive_native_configure_queue(xc->vp_id,
  995. q, i, NULL, 0, true);
  996. if (r) {
  997. pr_err("Failed to configure queue %d for VCPU %d\n",
  998. i, cpu);
  999. goto bail;
  1000. }
  1001. }
  1002. }
  1003. /* If not done above, attach priority 0 escalation */
  1004. r = xive_attach_escalation(vcpu, 0);
  1005. if (r)
  1006. goto bail;
  1007. /* Route the IPI */
  1008. r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
  1009. if (!r)
  1010. xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
  1011. bail:
  1012. mutex_unlock(&vcpu->kvm->lock);
  1013. if (r) {
  1014. kvmppc_xive_cleanup_vcpu(vcpu);
  1015. return r;
  1016. }
  1017. vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
  1018. return 0;
  1019. }
  1020. /*
  1021. * Scanning of queues before/after migration save
  1022. */
  1023. static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
  1024. {
  1025. struct kvmppc_xive_src_block *sb;
  1026. struct kvmppc_xive_irq_state *state;
  1027. u16 idx;
  1028. sb = kvmppc_xive_find_source(xive, irq, &idx);
  1029. if (!sb)
  1030. return;
  1031. state = &sb->irq_state[idx];
  1032. /* Some sanity checking */
  1033. if (!state->valid) {
  1034. pr_err("invalid irq 0x%x in cpu queue!\n", irq);
  1035. return;
  1036. }
  1037. /*
  1038. * If the interrupt is in a queue it should have P set.
  1039. * We warn so that gets reported. A backtrace isn't useful
  1040. * so no need to use a WARN_ON.
  1041. */
  1042. if (!state->saved_p)
  1043. pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
  1044. /* Set flag */
  1045. state->in_queue = true;
  1046. }
  1047. static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
  1048. struct kvmppc_xive_src_block *sb,
  1049. u32 irq)
  1050. {
  1051. struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
  1052. if (!state->valid)
  1053. return;
  1054. /* Mask and save state, this will also sync HW queues */
  1055. state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
  1056. /* Transfer P and Q */
  1057. state->saved_p = state->old_p;
  1058. state->saved_q = state->old_q;
  1059. /* Unlock */
  1060. arch_spin_unlock(&sb->lock);
  1061. }
  1062. static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
  1063. struct kvmppc_xive_src_block *sb,
  1064. u32 irq)
  1065. {
  1066. struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
  1067. if (!state->valid)
  1068. return;
  1069. /*
  1070. * Lock / exclude EOI (not technically necessary if the
  1071. * guest isn't running concurrently. If this becomes a
  1072. * performance issue we can probably remove the lock.
  1073. */
  1074. xive_lock_for_unmask(sb, state);
  1075. /* Restore mask/prio if it wasn't masked */
  1076. if (state->saved_scan_prio != MASKED)
  1077. xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
  1078. /* Unlock */
  1079. arch_spin_unlock(&sb->lock);
  1080. }
  1081. static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
  1082. {
  1083. u32 idx = q->idx;
  1084. u32 toggle = q->toggle;
  1085. u32 irq;
  1086. do {
  1087. irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
  1088. if (irq > XICS_IPI)
  1089. xive_pre_save_set_queued(xive, irq);
  1090. } while(irq);
  1091. }
  1092. static void xive_pre_save_scan(struct kvmppc_xive *xive)
  1093. {
  1094. struct kvm_vcpu *vcpu = NULL;
  1095. int i, j;
  1096. /*
  1097. * See comment in xive_get_source() about how this
  1098. * work. Collect a stable state for all interrupts
  1099. */
  1100. for (i = 0; i <= xive->max_sbid; i++) {
  1101. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  1102. if (!sb)
  1103. continue;
  1104. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
  1105. xive_pre_save_mask_irq(xive, sb, j);
  1106. }
  1107. /* Then scan the queues and update the "in_queue" flag */
  1108. kvm_for_each_vcpu(i, vcpu, xive->kvm) {
  1109. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  1110. if (!xc)
  1111. continue;
  1112. for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
  1113. if (xc->queues[j].qpage)
  1114. xive_pre_save_queue(xive, &xc->queues[j]);
  1115. }
  1116. }
  1117. /* Finally restore interrupt states */
  1118. for (i = 0; i <= xive->max_sbid; i++) {
  1119. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  1120. if (!sb)
  1121. continue;
  1122. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
  1123. xive_pre_save_unmask_irq(xive, sb, j);
  1124. }
  1125. }
  1126. static void xive_post_save_scan(struct kvmppc_xive *xive)
  1127. {
  1128. u32 i, j;
  1129. /* Clear all the in_queue flags */
  1130. for (i = 0; i <= xive->max_sbid; i++) {
  1131. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  1132. if (!sb)
  1133. continue;
  1134. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
  1135. sb->irq_state[j].in_queue = false;
  1136. }
  1137. /* Next get_source() will do a new scan */
  1138. xive->saved_src_count = 0;
  1139. }
  1140. /*
  1141. * This returns the source configuration and state to user space.
  1142. */
  1143. static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
  1144. {
  1145. struct kvmppc_xive_src_block *sb;
  1146. struct kvmppc_xive_irq_state *state;
  1147. u64 __user *ubufp = (u64 __user *) addr;
  1148. u64 val, prio;
  1149. u16 idx;
  1150. sb = kvmppc_xive_find_source(xive, irq, &idx);
  1151. if (!sb)
  1152. return -ENOENT;
  1153. state = &sb->irq_state[idx];
  1154. if (!state->valid)
  1155. return -ENOENT;
  1156. pr_devel("get_source(%ld)...\n", irq);
  1157. /*
  1158. * So to properly save the state into something that looks like a
  1159. * XICS migration stream we cannot treat interrupts individually.
  1160. *
  1161. * We need, instead, mask them all (& save their previous PQ state)
  1162. * to get a stable state in the HW, then sync them to ensure that
  1163. * any interrupt that had already fired hits its queue, and finally
  1164. * scan all the queues to collect which interrupts are still present
  1165. * in the queues, so we can set the "pending" flag on them and
  1166. * they can be resent on restore.
  1167. *
  1168. * So we do it all when the "first" interrupt gets saved, all the
  1169. * state is collected at that point, the rest of xive_get_source()
  1170. * will merely collect and convert that state to the expected
  1171. * userspace bit mask.
  1172. */
  1173. if (xive->saved_src_count == 0)
  1174. xive_pre_save_scan(xive);
  1175. xive->saved_src_count++;
  1176. /* Convert saved state into something compatible with xics */
  1177. val = state->act_server;
  1178. prio = state->saved_scan_prio;
  1179. if (prio == MASKED) {
  1180. val |= KVM_XICS_MASKED;
  1181. prio = state->saved_priority;
  1182. }
  1183. val |= prio << KVM_XICS_PRIORITY_SHIFT;
  1184. if (state->lsi) {
  1185. val |= KVM_XICS_LEVEL_SENSITIVE;
  1186. if (state->saved_p)
  1187. val |= KVM_XICS_PENDING;
  1188. } else {
  1189. if (state->saved_p)
  1190. val |= KVM_XICS_PRESENTED;
  1191. if (state->saved_q)
  1192. val |= KVM_XICS_QUEUED;
  1193. /*
  1194. * We mark it pending (which will attempt a re-delivery)
  1195. * if we are in a queue *or* we were masked and had
  1196. * Q set which is equivalent to the XICS "masked pending"
  1197. * state
  1198. */
  1199. if (state->in_queue || (prio == MASKED && state->saved_q))
  1200. val |= KVM_XICS_PENDING;
  1201. }
  1202. /*
  1203. * If that was the last interrupt saved, reset the
  1204. * in_queue flags
  1205. */
  1206. if (xive->saved_src_count == xive->src_count)
  1207. xive_post_save_scan(xive);
  1208. /* Copy the result to userspace */
  1209. if (put_user(val, ubufp))
  1210. return -EFAULT;
  1211. return 0;
  1212. }
  1213. static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
  1214. int irq)
  1215. {
  1216. struct kvm *kvm = xive->kvm;
  1217. struct kvmppc_xive_src_block *sb;
  1218. int i, bid;
  1219. bid = irq >> KVMPPC_XICS_ICS_SHIFT;
  1220. mutex_lock(&kvm->lock);
  1221. /* block already exists - somebody else got here first */
  1222. if (xive->src_blocks[bid])
  1223. goto out;
  1224. /* Create the ICS */
  1225. sb = kzalloc(sizeof(*sb), GFP_KERNEL);
  1226. if (!sb)
  1227. goto out;
  1228. sb->id = bid;
  1229. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  1230. sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
  1231. sb->irq_state[i].guest_priority = MASKED;
  1232. sb->irq_state[i].saved_priority = MASKED;
  1233. sb->irq_state[i].act_priority = MASKED;
  1234. }
  1235. smp_wmb();
  1236. xive->src_blocks[bid] = sb;
  1237. if (bid > xive->max_sbid)
  1238. xive->max_sbid = bid;
  1239. out:
  1240. mutex_unlock(&kvm->lock);
  1241. return xive->src_blocks[bid];
  1242. }
  1243. static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
  1244. {
  1245. struct kvm *kvm = xive->kvm;
  1246. struct kvm_vcpu *vcpu = NULL;
  1247. int i;
  1248. kvm_for_each_vcpu(i, vcpu, kvm) {
  1249. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  1250. if (!xc)
  1251. continue;
  1252. if (xc->delayed_irq == irq) {
  1253. xc->delayed_irq = 0;
  1254. xive->delayed_irqs--;
  1255. return true;
  1256. }
  1257. }
  1258. return false;
  1259. }
  1260. static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
  1261. {
  1262. struct kvmppc_xive_src_block *sb;
  1263. struct kvmppc_xive_irq_state *state;
  1264. u64 __user *ubufp = (u64 __user *) addr;
  1265. u16 idx;
  1266. u64 val;
  1267. u8 act_prio, guest_prio;
  1268. u32 server;
  1269. int rc = 0;
  1270. if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
  1271. return -ENOENT;
  1272. pr_devel("set_source(irq=0x%lx)\n", irq);
  1273. /* Find the source */
  1274. sb = kvmppc_xive_find_source(xive, irq, &idx);
  1275. if (!sb) {
  1276. pr_devel("No source, creating source block...\n");
  1277. sb = xive_create_src_block(xive, irq);
  1278. if (!sb) {
  1279. pr_devel("Failed to create block...\n");
  1280. return -ENOMEM;
  1281. }
  1282. }
  1283. state = &sb->irq_state[idx];
  1284. /* Read user passed data */
  1285. if (get_user(val, ubufp)) {
  1286. pr_devel("fault getting user info !\n");
  1287. return -EFAULT;
  1288. }
  1289. server = val & KVM_XICS_DESTINATION_MASK;
  1290. guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
  1291. pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
  1292. val, server, guest_prio);
  1293. /*
  1294. * If the source doesn't already have an IPI, allocate
  1295. * one and get the corresponding data
  1296. */
  1297. if (!state->ipi_number) {
  1298. state->ipi_number = xive_native_alloc_irq();
  1299. if (state->ipi_number == 0) {
  1300. pr_devel("Failed to allocate IPI !\n");
  1301. return -ENOMEM;
  1302. }
  1303. xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
  1304. pr_devel(" src_ipi=0x%x\n", state->ipi_number);
  1305. }
  1306. /*
  1307. * We use lock_and_mask() to set us in the right masked
  1308. * state. We will override that state from the saved state
  1309. * further down, but this will handle the cases of interrupts
  1310. * that need FW masking. We set the initial guest_priority to
  1311. * 0 before calling it to ensure it actually performs the masking.
  1312. */
  1313. state->guest_priority = 0;
  1314. xive_lock_and_mask(xive, sb, state);
  1315. /*
  1316. * Now, we select a target if we have one. If we don't we
  1317. * leave the interrupt untargetted. It means that an interrupt
  1318. * can become "untargetted" accross migration if it was masked
  1319. * by set_xive() but there is little we can do about it.
  1320. */
  1321. /* First convert prio and mark interrupt as untargetted */
  1322. act_prio = xive_prio_from_guest(guest_prio);
  1323. state->act_priority = MASKED;
  1324. /*
  1325. * We need to drop the lock due to the mutex below. Hopefully
  1326. * nothing is touching that interrupt yet since it hasn't been
  1327. * advertized to a running guest yet
  1328. */
  1329. arch_spin_unlock(&sb->lock);
  1330. /* If we have a priority target the interrupt */
  1331. if (act_prio != MASKED) {
  1332. /* First, check provisioning of queues */
  1333. mutex_lock(&xive->kvm->lock);
  1334. rc = xive_check_provisioning(xive->kvm, act_prio);
  1335. mutex_unlock(&xive->kvm->lock);
  1336. /* Target interrupt */
  1337. if (rc == 0)
  1338. rc = xive_target_interrupt(xive->kvm, state,
  1339. server, act_prio);
  1340. /*
  1341. * If provisioning or targetting failed, leave it
  1342. * alone and masked. It will remain disabled until
  1343. * the guest re-targets it.
  1344. */
  1345. }
  1346. /*
  1347. * Find out if this was a delayed irq stashed in an ICP,
  1348. * in which case, treat it as pending
  1349. */
  1350. if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
  1351. val |= KVM_XICS_PENDING;
  1352. pr_devel(" Found delayed ! forcing PENDING !\n");
  1353. }
  1354. /* Cleanup the SW state */
  1355. state->old_p = false;
  1356. state->old_q = false;
  1357. state->lsi = false;
  1358. state->asserted = false;
  1359. /* Restore LSI state */
  1360. if (val & KVM_XICS_LEVEL_SENSITIVE) {
  1361. state->lsi = true;
  1362. if (val & KVM_XICS_PENDING)
  1363. state->asserted = true;
  1364. pr_devel(" LSI ! Asserted=%d\n", state->asserted);
  1365. }
  1366. /*
  1367. * Restore P and Q. If the interrupt was pending, we
  1368. * force Q and !P, which will trigger a resend.
  1369. *
  1370. * That means that a guest that had both an interrupt
  1371. * pending (queued) and Q set will restore with only
  1372. * one instance of that interrupt instead of 2, but that
  1373. * is perfectly fine as coalescing interrupts that haven't
  1374. * been presented yet is always allowed.
  1375. */
  1376. if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
  1377. state->old_p = true;
  1378. if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
  1379. state->old_q = true;
  1380. pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
  1381. /*
  1382. * If the interrupt was unmasked, update guest priority and
  1383. * perform the appropriate state transition and do a
  1384. * re-trigger if necessary.
  1385. */
  1386. if (val & KVM_XICS_MASKED) {
  1387. pr_devel(" masked, saving prio\n");
  1388. state->guest_priority = MASKED;
  1389. state->saved_priority = guest_prio;
  1390. } else {
  1391. pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
  1392. xive_finish_unmask(xive, sb, state, guest_prio);
  1393. state->saved_priority = guest_prio;
  1394. }
  1395. /* Increment the number of valid sources and mark this one valid */
  1396. if (!state->valid)
  1397. xive->src_count++;
  1398. state->valid = true;
  1399. return 0;
  1400. }
  1401. int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  1402. bool line_status)
  1403. {
  1404. struct kvmppc_xive *xive = kvm->arch.xive;
  1405. struct kvmppc_xive_src_block *sb;
  1406. struct kvmppc_xive_irq_state *state;
  1407. u16 idx;
  1408. if (!xive)
  1409. return -ENODEV;
  1410. sb = kvmppc_xive_find_source(xive, irq, &idx);
  1411. if (!sb)
  1412. return -EINVAL;
  1413. /* Perform locklessly .... (we need to do some RCUisms here...) */
  1414. state = &sb->irq_state[idx];
  1415. if (!state->valid)
  1416. return -EINVAL;
  1417. /* We don't allow a trigger on a passed-through interrupt */
  1418. if (state->pt_number)
  1419. return -EINVAL;
  1420. if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
  1421. state->asserted = 1;
  1422. else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
  1423. state->asserted = 0;
  1424. return 0;
  1425. }
  1426. /* Trigger the IPI */
  1427. xive_irq_trigger(&state->ipi_data);
  1428. return 0;
  1429. }
  1430. static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1431. {
  1432. struct kvmppc_xive *xive = dev->private;
  1433. /* We honor the existing XICS ioctl */
  1434. switch (attr->group) {
  1435. case KVM_DEV_XICS_GRP_SOURCES:
  1436. return xive_set_source(xive, attr->attr, attr->addr);
  1437. }
  1438. return -ENXIO;
  1439. }
  1440. static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1441. {
  1442. struct kvmppc_xive *xive = dev->private;
  1443. /* We honor the existing XICS ioctl */
  1444. switch (attr->group) {
  1445. case KVM_DEV_XICS_GRP_SOURCES:
  1446. return xive_get_source(xive, attr->attr, attr->addr);
  1447. }
  1448. return -ENXIO;
  1449. }
  1450. static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
  1451. {
  1452. /* We honor the same limits as XICS, at least for now */
  1453. switch (attr->group) {
  1454. case KVM_DEV_XICS_GRP_SOURCES:
  1455. if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
  1456. attr->attr < KVMPPC_XICS_NR_IRQS)
  1457. return 0;
  1458. break;
  1459. }
  1460. return -ENXIO;
  1461. }
  1462. static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
  1463. {
  1464. xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
  1465. xive_native_configure_irq(hw_num, 0, MASKED, 0);
  1466. }
  1467. static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
  1468. {
  1469. int i;
  1470. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  1471. struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
  1472. if (!state->valid)
  1473. continue;
  1474. kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
  1475. xive_cleanup_irq_data(&state->ipi_data);
  1476. xive_native_free_irq(state->ipi_number);
  1477. /* Pass-through, cleanup too but keep IRQ hw data */
  1478. if (state->pt_number)
  1479. kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
  1480. state->valid = false;
  1481. }
  1482. }
  1483. static void kvmppc_xive_free(struct kvm_device *dev)
  1484. {
  1485. struct kvmppc_xive *xive = dev->private;
  1486. struct kvm *kvm = xive->kvm;
  1487. int i;
  1488. debugfs_remove(xive->dentry);
  1489. if (kvm)
  1490. kvm->arch.xive = NULL;
  1491. /* Mask and free interrupts */
  1492. for (i = 0; i <= xive->max_sbid; i++) {
  1493. if (xive->src_blocks[i])
  1494. kvmppc_xive_free_sources(xive->src_blocks[i]);
  1495. kfree(xive->src_blocks[i]);
  1496. xive->src_blocks[i] = NULL;
  1497. }
  1498. if (xive->vp_base != XIVE_INVALID_VP)
  1499. xive_native_free_vp_block(xive->vp_base);
  1500. kfree(xive);
  1501. kfree(dev);
  1502. }
  1503. static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
  1504. {
  1505. struct kvmppc_xive *xive;
  1506. struct kvm *kvm = dev->kvm;
  1507. int ret = 0;
  1508. pr_devel("Creating xive for partition\n");
  1509. xive = kzalloc(sizeof(*xive), GFP_KERNEL);
  1510. if (!xive)
  1511. return -ENOMEM;
  1512. dev->private = xive;
  1513. xive->dev = dev;
  1514. xive->kvm = kvm;
  1515. /* Already there ? */
  1516. if (kvm->arch.xive)
  1517. ret = -EEXIST;
  1518. else
  1519. kvm->arch.xive = xive;
  1520. /* We use the default queue size set by the host */
  1521. xive->q_order = xive_native_default_eq_shift();
  1522. if (xive->q_order < PAGE_SHIFT)
  1523. xive->q_page_order = 0;
  1524. else
  1525. xive->q_page_order = xive->q_order - PAGE_SHIFT;
  1526. /* Allocate a bunch of VPs */
  1527. xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
  1528. pr_devel("VP_Base=%x\n", xive->vp_base);
  1529. if (xive->vp_base == XIVE_INVALID_VP)
  1530. ret = -ENOMEM;
  1531. xive->single_escalation = xive_native_has_single_escalation();
  1532. if (ret) {
  1533. kfree(xive);
  1534. return ret;
  1535. }
  1536. return 0;
  1537. }
  1538. static int xive_debug_show(struct seq_file *m, void *private)
  1539. {
  1540. struct kvmppc_xive *xive = m->private;
  1541. struct kvm *kvm = xive->kvm;
  1542. struct kvm_vcpu *vcpu;
  1543. u64 t_rm_h_xirr = 0;
  1544. u64 t_rm_h_ipoll = 0;
  1545. u64 t_rm_h_cppr = 0;
  1546. u64 t_rm_h_eoi = 0;
  1547. u64 t_rm_h_ipi = 0;
  1548. u64 t_vm_h_xirr = 0;
  1549. u64 t_vm_h_ipoll = 0;
  1550. u64 t_vm_h_cppr = 0;
  1551. u64 t_vm_h_eoi = 0;
  1552. u64 t_vm_h_ipi = 0;
  1553. unsigned int i;
  1554. if (!kvm)
  1555. return 0;
  1556. seq_printf(m, "=========\nVCPU state\n=========\n");
  1557. kvm_for_each_vcpu(i, vcpu, kvm) {
  1558. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  1559. unsigned int i;
  1560. if (!xc)
  1561. continue;
  1562. seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
  1563. " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
  1564. xc->server_num, xc->cppr, xc->hw_cppr,
  1565. xc->mfrr, xc->pending,
  1566. xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
  1567. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  1568. struct xive_q *q = &xc->queues[i];
  1569. u32 i0, i1, idx;
  1570. if (!q->qpage && !xc->esc_virq[i])
  1571. continue;
  1572. seq_printf(m, " [q%d]: ", i);
  1573. if (q->qpage) {
  1574. idx = q->idx;
  1575. i0 = be32_to_cpup(q->qpage + idx);
  1576. idx = (idx + 1) & q->msk;
  1577. i1 = be32_to_cpup(q->qpage + idx);
  1578. seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
  1579. }
  1580. if (xc->esc_virq[i]) {
  1581. struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
  1582. struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
  1583. u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
  1584. seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
  1585. (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
  1586. (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
  1587. xc->esc_virq[i], pq, xd->eoi_page);
  1588. seq_printf(m, "\n");
  1589. }
  1590. }
  1591. t_rm_h_xirr += xc->stat_rm_h_xirr;
  1592. t_rm_h_ipoll += xc->stat_rm_h_ipoll;
  1593. t_rm_h_cppr += xc->stat_rm_h_cppr;
  1594. t_rm_h_eoi += xc->stat_rm_h_eoi;
  1595. t_rm_h_ipi += xc->stat_rm_h_ipi;
  1596. t_vm_h_xirr += xc->stat_vm_h_xirr;
  1597. t_vm_h_ipoll += xc->stat_vm_h_ipoll;
  1598. t_vm_h_cppr += xc->stat_vm_h_cppr;
  1599. t_vm_h_eoi += xc->stat_vm_h_eoi;
  1600. t_vm_h_ipi += xc->stat_vm_h_ipi;
  1601. }
  1602. seq_printf(m, "Hcalls totals\n");
  1603. seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
  1604. seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
  1605. seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
  1606. seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
  1607. seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
  1608. return 0;
  1609. }
  1610. static int xive_debug_open(struct inode *inode, struct file *file)
  1611. {
  1612. return single_open(file, xive_debug_show, inode->i_private);
  1613. }
  1614. static const struct file_operations xive_debug_fops = {
  1615. .open = xive_debug_open,
  1616. .read = seq_read,
  1617. .llseek = seq_lseek,
  1618. .release = single_release,
  1619. };
  1620. static void xive_debugfs_init(struct kvmppc_xive *xive)
  1621. {
  1622. char *name;
  1623. name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
  1624. if (!name) {
  1625. pr_err("%s: no memory for name\n", __func__);
  1626. return;
  1627. }
  1628. xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
  1629. xive, &xive_debug_fops);
  1630. pr_debug("%s: created %s\n", __func__, name);
  1631. kfree(name);
  1632. }
  1633. static void kvmppc_xive_init(struct kvm_device *dev)
  1634. {
  1635. struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
  1636. /* Register some debug interfaces */
  1637. xive_debugfs_init(xive);
  1638. }
  1639. struct kvm_device_ops kvm_xive_ops = {
  1640. .name = "kvm-xive",
  1641. .create = kvmppc_xive_create,
  1642. .init = kvmppc_xive_init,
  1643. .destroy = kvmppc_xive_free,
  1644. .set_attr = xive_set_attr,
  1645. .get_attr = xive_get_attr,
  1646. .has_attr = xive_has_attr,
  1647. };
  1648. void kvmppc_xive_init_module(void)
  1649. {
  1650. __xive_vm_h_xirr = xive_vm_h_xirr;
  1651. __xive_vm_h_ipoll = xive_vm_h_ipoll;
  1652. __xive_vm_h_ipi = xive_vm_h_ipi;
  1653. __xive_vm_h_cppr = xive_vm_h_cppr;
  1654. __xive_vm_h_eoi = xive_vm_h_eoi;
  1655. }
  1656. void kvmppc_xive_exit_module(void)
  1657. {
  1658. __xive_vm_h_xirr = NULL;
  1659. __xive_vm_h_ipoll = NULL;
  1660. __xive_vm_h_ipi = NULL;
  1661. __xive_vm_h_cppr = NULL;
  1662. __xive_vm_h_eoi = NULL;
  1663. }