book3s_xive_native.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2017-2019, IBM Corporation.
  4. */
  5. #define pr_fmt(fmt) "xive-kvm: " fmt
  6. #include <linux/kernel.h>
  7. #include <linux/kvm_host.h>
  8. #include <linux/err.h>
  9. #include <linux/gfp.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/delay.h>
  12. #include <linux/file.h>
  13. #include <linux/irqdomain.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/kvm_book3s.h>
  16. #include <asm/kvm_ppc.h>
  17. #include <asm/hvcall.h>
  18. #include <asm/xive.h>
  19. #include <asm/xive-regs.h>
  20. #include <asm/debug.h>
  21. #include <asm/opal.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/seq_file.h>
  24. #include "book3s_xive.h"
  25. static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
  26. {
  27. u64 val;
  28. /*
  29. * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
  30. * load operation, so there is no need to enforce load-after-store
  31. * ordering.
  32. */
  33. val = in_be64(xd->eoi_mmio + offset);
  34. return (u8)val;
  35. }
  36. static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
  37. {
  38. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  39. struct xive_q *q = &xc->queues[prio];
  40. xive_native_disable_queue(xc->vp_id, q, prio);
  41. if (q->qpage) {
  42. put_page(virt_to_page(q->qpage));
  43. q->qpage = NULL;
  44. }
  45. }
  46. static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
  47. u8 prio, __be32 *qpage,
  48. u32 order, bool can_escalate)
  49. {
  50. int rc;
  51. __be32 *qpage_prev = q->qpage;
  52. rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
  53. can_escalate);
  54. if (rc)
  55. return rc;
  56. if (qpage_prev)
  57. put_page(virt_to_page(qpage_prev));
  58. return rc;
  59. }
  60. void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
  61. {
  62. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  63. int i;
  64. if (!kvmppc_xive_enabled(vcpu))
  65. return;
  66. if (!xc)
  67. return;
  68. pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
  69. /* Ensure no interrupt is still routed to that VP */
  70. xc->valid = false;
  71. kvmppc_xive_disable_vcpu_interrupts(vcpu);
  72. /* Free escalations */
  73. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  74. /* Free the escalation irq */
  75. if (xc->esc_virq[i]) {
  76. if (kvmppc_xive_has_single_escalation(xc->xive))
  77. xive_cleanup_single_escalation(vcpu, xc->esc_virq[i]);
  78. free_irq(xc->esc_virq[i], vcpu);
  79. irq_dispose_mapping(xc->esc_virq[i]);
  80. kfree(xc->esc_virq_names[i]);
  81. xc->esc_virq[i] = 0;
  82. }
  83. }
  84. /* Disable the VP */
  85. xive_native_disable_vp(xc->vp_id);
  86. /* Clear the cam word so guest entry won't try to push context */
  87. vcpu->arch.xive_cam_word = 0;
  88. /* Free the queues */
  89. for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
  90. kvmppc_xive_native_cleanup_queue(vcpu, i);
  91. }
  92. /* Free the VP */
  93. kfree(xc);
  94. /* Cleanup the vcpu */
  95. vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
  96. vcpu->arch.xive_vcpu = NULL;
  97. }
  98. int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
  99. struct kvm_vcpu *vcpu, u32 server_num)
  100. {
  101. struct kvmppc_xive *xive = dev->private;
  102. struct kvmppc_xive_vcpu *xc = NULL;
  103. int rc;
  104. u32 vp_id;
  105. pr_devel("native_connect_vcpu(server=%d)\n", server_num);
  106. if (dev->ops != &kvm_xive_native_ops) {
  107. pr_devel("Wrong ops !\n");
  108. return -EPERM;
  109. }
  110. if (xive->kvm != vcpu->kvm)
  111. return -EPERM;
  112. if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
  113. return -EBUSY;
  114. mutex_lock(&xive->lock);
  115. rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
  116. if (rc)
  117. goto bail;
  118. xc = kzalloc(sizeof(*xc), GFP_KERNEL);
  119. if (!xc) {
  120. rc = -ENOMEM;
  121. goto bail;
  122. }
  123. vcpu->arch.xive_vcpu = xc;
  124. xc->xive = xive;
  125. xc->vcpu = vcpu;
  126. xc->server_num = server_num;
  127. xc->vp_id = vp_id;
  128. xc->valid = true;
  129. vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
  130. rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
  131. if (rc) {
  132. pr_err("Failed to get VP info from OPAL: %d\n", rc);
  133. goto bail;
  134. }
  135. if (!kvmppc_xive_check_save_restore(vcpu)) {
  136. pr_err("inconsistent save-restore setup for VCPU %d\n", server_num);
  137. rc = -EIO;
  138. goto bail;
  139. }
  140. /*
  141. * Enable the VP first as the single escalation mode will
  142. * affect escalation interrupts numbering
  143. */
  144. rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
  145. if (rc) {
  146. pr_err("Failed to enable VP in OPAL: %d\n", rc);
  147. goto bail;
  148. }
  149. /* Configure VCPU fields for use by assembly push/pull */
  150. vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
  151. vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
  152. /* TODO: reset all queues to a clean state ? */
  153. bail:
  154. mutex_unlock(&xive->lock);
  155. if (rc)
  156. kvmppc_xive_native_cleanup_vcpu(vcpu);
  157. return rc;
  158. }
  159. /*
  160. * Device passthrough support
  161. */
  162. static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
  163. {
  164. struct kvmppc_xive *xive = kvm->arch.xive;
  165. pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
  166. if (irq >= KVMPPC_XIVE_NR_IRQS)
  167. return -EINVAL;
  168. /*
  169. * Clear the ESB pages of the IRQ number being mapped (or
  170. * unmapped) into the guest and let the VM fault handler
  171. * repopulate with the appropriate ESB pages (device or IC)
  172. */
  173. pr_debug("clearing esb pages for girq 0x%lx\n", irq);
  174. mutex_lock(&xive->mapping_lock);
  175. if (xive->mapping)
  176. unmap_mapping_range(xive->mapping,
  177. esb_pgoff << PAGE_SHIFT,
  178. 2ull << PAGE_SHIFT, 1);
  179. mutex_unlock(&xive->mapping_lock);
  180. return 0;
  181. }
  182. static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
  183. .reset_mapped = kvmppc_xive_native_reset_mapped,
  184. };
  185. static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
  186. {
  187. struct vm_area_struct *vma = vmf->vma;
  188. struct kvm_device *dev = vma->vm_file->private_data;
  189. struct kvmppc_xive *xive = dev->private;
  190. struct kvmppc_xive_src_block *sb;
  191. struct kvmppc_xive_irq_state *state;
  192. struct xive_irq_data *xd;
  193. u32 hw_num;
  194. u16 src;
  195. u64 page;
  196. unsigned long irq;
  197. u64 page_offset;
  198. /*
  199. * Linux/KVM uses a two pages ESB setting, one for trigger and
  200. * one for EOI
  201. */
  202. page_offset = vmf->pgoff - vma->vm_pgoff;
  203. irq = page_offset / 2;
  204. sb = kvmppc_xive_find_source(xive, irq, &src);
  205. if (!sb) {
  206. pr_devel("%s: source %lx not found !\n", __func__, irq);
  207. return VM_FAULT_SIGBUS;
  208. }
  209. state = &sb->irq_state[src];
  210. /* Some sanity checking */
  211. if (!state->valid) {
  212. pr_devel("%s: source %lx invalid !\n", __func__, irq);
  213. return VM_FAULT_SIGBUS;
  214. }
  215. kvmppc_xive_select_irq(state, &hw_num, &xd);
  216. arch_spin_lock(&sb->lock);
  217. /*
  218. * first/even page is for trigger
  219. * second/odd page is for EOI and management.
  220. */
  221. page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
  222. arch_spin_unlock(&sb->lock);
  223. if (WARN_ON(!page)) {
  224. pr_err("%s: accessing invalid ESB page for source %lx !\n",
  225. __func__, irq);
  226. return VM_FAULT_SIGBUS;
  227. }
  228. vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
  229. return VM_FAULT_NOPAGE;
  230. }
  231. static const struct vm_operations_struct xive_native_esb_vmops = {
  232. .fault = xive_native_esb_fault,
  233. };
  234. static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
  235. {
  236. struct vm_area_struct *vma = vmf->vma;
  237. switch (vmf->pgoff - vma->vm_pgoff) {
  238. case 0: /* HW - forbid access */
  239. case 1: /* HV - forbid access */
  240. return VM_FAULT_SIGBUS;
  241. case 2: /* OS */
  242. vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
  243. return VM_FAULT_NOPAGE;
  244. case 3: /* USER - TODO */
  245. default:
  246. return VM_FAULT_SIGBUS;
  247. }
  248. }
  249. static const struct vm_operations_struct xive_native_tima_vmops = {
  250. .fault = xive_native_tima_fault,
  251. };
  252. static int kvmppc_xive_native_mmap(struct kvm_device *dev,
  253. struct vm_area_struct *vma)
  254. {
  255. struct kvmppc_xive *xive = dev->private;
  256. /* We only allow mappings at fixed offset for now */
  257. if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
  258. if (vma_pages(vma) > 4)
  259. return -EINVAL;
  260. vma->vm_ops = &xive_native_tima_vmops;
  261. } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
  262. if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
  263. return -EINVAL;
  264. vma->vm_ops = &xive_native_esb_vmops;
  265. } else {
  266. return -EINVAL;
  267. }
  268. vm_flags_set(vma, VM_IO | VM_PFNMAP);
  269. vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
  270. /*
  271. * Grab the KVM device file address_space to be able to clear
  272. * the ESB pages mapping when a device is passed-through into
  273. * the guest.
  274. */
  275. xive->mapping = vma->vm_file->f_mapping;
  276. return 0;
  277. }
  278. static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
  279. u64 addr)
  280. {
  281. struct kvmppc_xive_src_block *sb;
  282. struct kvmppc_xive_irq_state *state;
  283. u64 __user *ubufp = (u64 __user *) addr;
  284. u64 val;
  285. u16 idx;
  286. int rc;
  287. pr_devel("%s irq=0x%lx\n", __func__, irq);
  288. if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
  289. return -E2BIG;
  290. sb = kvmppc_xive_find_source(xive, irq, &idx);
  291. if (!sb) {
  292. pr_debug("No source, creating source block...\n");
  293. sb = kvmppc_xive_create_src_block(xive, irq);
  294. if (!sb) {
  295. pr_err("Failed to create block...\n");
  296. return -ENOMEM;
  297. }
  298. }
  299. state = &sb->irq_state[idx];
  300. if (get_user(val, ubufp)) {
  301. pr_err("fault getting user info !\n");
  302. return -EFAULT;
  303. }
  304. arch_spin_lock(&sb->lock);
  305. /*
  306. * If the source doesn't already have an IPI, allocate
  307. * one and get the corresponding data
  308. */
  309. if (!state->ipi_number) {
  310. state->ipi_number = xive_native_alloc_irq();
  311. if (state->ipi_number == 0) {
  312. pr_err("Failed to allocate IRQ !\n");
  313. rc = -ENXIO;
  314. goto unlock;
  315. }
  316. xive_native_populate_irq_data(state->ipi_number,
  317. &state->ipi_data);
  318. pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
  319. state->ipi_number, irq);
  320. }
  321. /* Restore LSI state */
  322. if (val & KVM_XIVE_LEVEL_SENSITIVE) {
  323. state->lsi = true;
  324. if (val & KVM_XIVE_LEVEL_ASSERTED)
  325. state->asserted = true;
  326. pr_devel(" LSI ! Asserted=%d\n", state->asserted);
  327. }
  328. /* Mask IRQ to start with */
  329. state->act_server = 0;
  330. state->act_priority = MASKED;
  331. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  332. xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
  333. /* Increment the number of valid sources and mark this one valid */
  334. if (!state->valid)
  335. xive->src_count++;
  336. state->valid = true;
  337. rc = 0;
  338. unlock:
  339. arch_spin_unlock(&sb->lock);
  340. return rc;
  341. }
  342. static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
  343. struct kvmppc_xive_src_block *sb,
  344. struct kvmppc_xive_irq_state *state,
  345. u32 server, u8 priority, bool masked,
  346. u32 eisn)
  347. {
  348. struct kvm *kvm = xive->kvm;
  349. u32 hw_num;
  350. int rc = 0;
  351. arch_spin_lock(&sb->lock);
  352. if (state->act_server == server && state->act_priority == priority &&
  353. state->eisn == eisn)
  354. goto unlock;
  355. pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
  356. priority, server, masked, state->act_server,
  357. state->act_priority);
  358. kvmppc_xive_select_irq(state, &hw_num, NULL);
  359. if (priority != MASKED && !masked) {
  360. rc = kvmppc_xive_select_target(kvm, &server, priority);
  361. if (rc)
  362. goto unlock;
  363. state->act_priority = priority;
  364. state->act_server = server;
  365. state->eisn = eisn;
  366. rc = xive_native_configure_irq(hw_num,
  367. kvmppc_xive_vp(xive, server),
  368. priority, eisn);
  369. } else {
  370. state->act_priority = MASKED;
  371. state->act_server = 0;
  372. state->eisn = 0;
  373. rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
  374. }
  375. unlock:
  376. arch_spin_unlock(&sb->lock);
  377. return rc;
  378. }
  379. static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
  380. long irq, u64 addr)
  381. {
  382. struct kvmppc_xive_src_block *sb;
  383. struct kvmppc_xive_irq_state *state;
  384. u64 __user *ubufp = (u64 __user *) addr;
  385. u16 src;
  386. u64 kvm_cfg;
  387. u32 server;
  388. u8 priority;
  389. bool masked;
  390. u32 eisn;
  391. sb = kvmppc_xive_find_source(xive, irq, &src);
  392. if (!sb)
  393. return -ENOENT;
  394. state = &sb->irq_state[src];
  395. if (!state->valid)
  396. return -EINVAL;
  397. if (get_user(kvm_cfg, ubufp))
  398. return -EFAULT;
  399. pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
  400. priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
  401. KVM_XIVE_SOURCE_PRIORITY_SHIFT;
  402. server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
  403. KVM_XIVE_SOURCE_SERVER_SHIFT;
  404. masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
  405. KVM_XIVE_SOURCE_MASKED_SHIFT;
  406. eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
  407. KVM_XIVE_SOURCE_EISN_SHIFT;
  408. if (priority != xive_prio_from_guest(priority)) {
  409. pr_err("invalid priority for queue %d for VCPU %d\n",
  410. priority, server);
  411. return -EINVAL;
  412. }
  413. return kvmppc_xive_native_update_source_config(xive, sb, state, server,
  414. priority, masked, eisn);
  415. }
  416. static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
  417. long irq, u64 addr)
  418. {
  419. struct kvmppc_xive_src_block *sb;
  420. struct kvmppc_xive_irq_state *state;
  421. struct xive_irq_data *xd;
  422. u32 hw_num;
  423. u16 src;
  424. int rc = 0;
  425. pr_devel("%s irq=0x%lx", __func__, irq);
  426. sb = kvmppc_xive_find_source(xive, irq, &src);
  427. if (!sb)
  428. return -ENOENT;
  429. state = &sb->irq_state[src];
  430. rc = -EINVAL;
  431. arch_spin_lock(&sb->lock);
  432. if (state->valid) {
  433. kvmppc_xive_select_irq(state, &hw_num, &xd);
  434. xive_native_sync_source(hw_num);
  435. rc = 0;
  436. }
  437. arch_spin_unlock(&sb->lock);
  438. return rc;
  439. }
  440. static int xive_native_validate_queue_size(u32 qshift)
  441. {
  442. /*
  443. * We only support 64K pages for the moment. This is also
  444. * advertised in the DT property "ibm,xive-eq-sizes"
  445. */
  446. switch (qshift) {
  447. case 0: /* EQ reset */
  448. case 16:
  449. return 0;
  450. case 12:
  451. case 21:
  452. case 24:
  453. default:
  454. return -EINVAL;
  455. }
  456. }
  457. static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
  458. long eq_idx, u64 addr)
  459. {
  460. struct kvm *kvm = xive->kvm;
  461. struct kvm_vcpu *vcpu;
  462. struct kvmppc_xive_vcpu *xc;
  463. void __user *ubufp = (void __user *) addr;
  464. u32 server;
  465. u8 priority;
  466. struct kvm_ppc_xive_eq kvm_eq;
  467. int rc;
  468. __be32 *qaddr = NULL;
  469. struct page *page;
  470. struct xive_q *q;
  471. gfn_t gfn;
  472. unsigned long page_size;
  473. int srcu_idx;
  474. /*
  475. * Demangle priority/server tuple from the EQ identifier
  476. */
  477. priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
  478. KVM_XIVE_EQ_PRIORITY_SHIFT;
  479. server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
  480. KVM_XIVE_EQ_SERVER_SHIFT;
  481. if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
  482. return -EFAULT;
  483. vcpu = kvmppc_xive_find_server(kvm, server);
  484. if (!vcpu) {
  485. pr_err("Can't find server %d\n", server);
  486. return -ENOENT;
  487. }
  488. xc = vcpu->arch.xive_vcpu;
  489. if (priority != xive_prio_from_guest(priority)) {
  490. pr_err("Trying to restore invalid queue %d for VCPU %d\n",
  491. priority, server);
  492. return -EINVAL;
  493. }
  494. q = &xc->queues[priority];
  495. pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
  496. __func__, server, priority, kvm_eq.flags,
  497. kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
  498. /* reset queue and disable queueing */
  499. if (!kvm_eq.qshift) {
  500. q->guest_qaddr = 0;
  501. q->guest_qshift = 0;
  502. rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
  503. NULL, 0, true);
  504. if (rc) {
  505. pr_err("Failed to reset queue %d for VCPU %d: %d\n",
  506. priority, xc->server_num, rc);
  507. return rc;
  508. }
  509. return 0;
  510. }
  511. /*
  512. * sPAPR specifies a "Unconditional Notify (n) flag" for the
  513. * H_INT_SET_QUEUE_CONFIG hcall which forces notification
  514. * without using the coalescing mechanisms provided by the
  515. * XIVE END ESBs. This is required on KVM as notification
  516. * using the END ESBs is not supported.
  517. */
  518. if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
  519. pr_err("invalid flags %d\n", kvm_eq.flags);
  520. return -EINVAL;
  521. }
  522. rc = xive_native_validate_queue_size(kvm_eq.qshift);
  523. if (rc) {
  524. pr_err("invalid queue size %d\n", kvm_eq.qshift);
  525. return rc;
  526. }
  527. if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
  528. pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
  529. 1ull << kvm_eq.qshift);
  530. return -EINVAL;
  531. }
  532. srcu_idx = srcu_read_lock(&kvm->srcu);
  533. gfn = gpa_to_gfn(kvm_eq.qaddr);
  534. page_size = kvm_host_page_size(vcpu, gfn);
  535. if (1ull << kvm_eq.qshift > page_size) {
  536. srcu_read_unlock(&kvm->srcu, srcu_idx);
  537. pr_warn("Incompatible host page size %lx!\n", page_size);
  538. return -EINVAL;
  539. }
  540. page = gfn_to_page(kvm, gfn);
  541. if (is_error_page(page)) {
  542. srcu_read_unlock(&kvm->srcu, srcu_idx);
  543. pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
  544. return -EINVAL;
  545. }
  546. qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
  547. srcu_read_unlock(&kvm->srcu, srcu_idx);
  548. /*
  549. * Backup the queue page guest address to the mark EQ page
  550. * dirty for migration.
  551. */
  552. q->guest_qaddr = kvm_eq.qaddr;
  553. q->guest_qshift = kvm_eq.qshift;
  554. /*
  555. * Unconditional Notification is forced by default at the
  556. * OPAL level because the use of END ESBs is not supported by
  557. * Linux.
  558. */
  559. rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
  560. (__be32 *) qaddr, kvm_eq.qshift, true);
  561. if (rc) {
  562. pr_err("Failed to configure queue %d for VCPU %d: %d\n",
  563. priority, xc->server_num, rc);
  564. put_page(page);
  565. return rc;
  566. }
  567. /*
  568. * Only restore the queue state when needed. When doing the
  569. * H_INT_SET_SOURCE_CONFIG hcall, it should not.
  570. */
  571. if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
  572. rc = xive_native_set_queue_state(xc->vp_id, priority,
  573. kvm_eq.qtoggle,
  574. kvm_eq.qindex);
  575. if (rc)
  576. goto error;
  577. }
  578. rc = kvmppc_xive_attach_escalation(vcpu, priority,
  579. kvmppc_xive_has_single_escalation(xive));
  580. error:
  581. if (rc)
  582. kvmppc_xive_native_cleanup_queue(vcpu, priority);
  583. return rc;
  584. }
  585. static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
  586. long eq_idx, u64 addr)
  587. {
  588. struct kvm *kvm = xive->kvm;
  589. struct kvm_vcpu *vcpu;
  590. struct kvmppc_xive_vcpu *xc;
  591. struct xive_q *q;
  592. void __user *ubufp = (u64 __user *) addr;
  593. u32 server;
  594. u8 priority;
  595. struct kvm_ppc_xive_eq kvm_eq;
  596. u64 qaddr;
  597. u64 qshift;
  598. u64 qeoi_page;
  599. u32 escalate_irq;
  600. u64 qflags;
  601. int rc;
  602. /*
  603. * Demangle priority/server tuple from the EQ identifier
  604. */
  605. priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
  606. KVM_XIVE_EQ_PRIORITY_SHIFT;
  607. server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
  608. KVM_XIVE_EQ_SERVER_SHIFT;
  609. vcpu = kvmppc_xive_find_server(kvm, server);
  610. if (!vcpu) {
  611. pr_err("Can't find server %d\n", server);
  612. return -ENOENT;
  613. }
  614. xc = vcpu->arch.xive_vcpu;
  615. if (priority != xive_prio_from_guest(priority)) {
  616. pr_err("invalid priority for queue %d for VCPU %d\n",
  617. priority, server);
  618. return -EINVAL;
  619. }
  620. q = &xc->queues[priority];
  621. memset(&kvm_eq, 0, sizeof(kvm_eq));
  622. if (!q->qpage)
  623. return 0;
  624. rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
  625. &qeoi_page, &escalate_irq, &qflags);
  626. if (rc)
  627. return rc;
  628. kvm_eq.flags = 0;
  629. if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
  630. kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
  631. kvm_eq.qshift = q->guest_qshift;
  632. kvm_eq.qaddr = q->guest_qaddr;
  633. rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
  634. &kvm_eq.qindex);
  635. if (rc)
  636. return rc;
  637. pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
  638. __func__, server, priority, kvm_eq.flags,
  639. kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
  640. if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
  641. return -EFAULT;
  642. return 0;
  643. }
  644. static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
  645. {
  646. int i;
  647. for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
  648. struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
  649. if (!state->valid)
  650. continue;
  651. if (state->act_priority == MASKED)
  652. continue;
  653. state->eisn = 0;
  654. state->act_server = 0;
  655. state->act_priority = MASKED;
  656. xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
  657. xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
  658. if (state->pt_number) {
  659. xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
  660. xive_native_configure_irq(state->pt_number,
  661. 0, MASKED, 0);
  662. }
  663. }
  664. }
  665. static int kvmppc_xive_reset(struct kvmppc_xive *xive)
  666. {
  667. struct kvm *kvm = xive->kvm;
  668. struct kvm_vcpu *vcpu;
  669. unsigned long i;
  670. pr_devel("%s\n", __func__);
  671. mutex_lock(&xive->lock);
  672. kvm_for_each_vcpu(i, vcpu, kvm) {
  673. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  674. unsigned int prio;
  675. if (!xc)
  676. continue;
  677. kvmppc_xive_disable_vcpu_interrupts(vcpu);
  678. for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
  679. /* Single escalation, no queue 7 */
  680. if (prio == 7 && kvmppc_xive_has_single_escalation(xive))
  681. break;
  682. if (xc->esc_virq[prio]) {
  683. free_irq(xc->esc_virq[prio], vcpu);
  684. irq_dispose_mapping(xc->esc_virq[prio]);
  685. kfree(xc->esc_virq_names[prio]);
  686. xc->esc_virq[prio] = 0;
  687. }
  688. kvmppc_xive_native_cleanup_queue(vcpu, prio);
  689. }
  690. }
  691. for (i = 0; i <= xive->max_sbid; i++) {
  692. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  693. if (sb) {
  694. arch_spin_lock(&sb->lock);
  695. kvmppc_xive_reset_sources(sb);
  696. arch_spin_unlock(&sb->lock);
  697. }
  698. }
  699. mutex_unlock(&xive->lock);
  700. return 0;
  701. }
  702. static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
  703. {
  704. int j;
  705. for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
  706. struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
  707. struct xive_irq_data *xd;
  708. u32 hw_num;
  709. if (!state->valid)
  710. continue;
  711. /*
  712. * The struct kvmppc_xive_irq_state reflects the state
  713. * of the EAS configuration and not the state of the
  714. * source. The source is masked setting the PQ bits to
  715. * '-Q', which is what is being done before calling
  716. * the KVM_DEV_XIVE_EQ_SYNC control.
  717. *
  718. * If a source EAS is configured, OPAL syncs the XIVE
  719. * IC of the source and the XIVE IC of the previous
  720. * target if any.
  721. *
  722. * So it should be fine ignoring MASKED sources as
  723. * they have been synced already.
  724. */
  725. if (state->act_priority == MASKED)
  726. continue;
  727. kvmppc_xive_select_irq(state, &hw_num, &xd);
  728. xive_native_sync_source(hw_num);
  729. xive_native_sync_queue(hw_num);
  730. }
  731. }
  732. static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
  733. {
  734. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  735. unsigned int prio;
  736. int srcu_idx;
  737. if (!xc)
  738. return -ENOENT;
  739. for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
  740. struct xive_q *q = &xc->queues[prio];
  741. if (!q->qpage)
  742. continue;
  743. /* Mark EQ page dirty for migration */
  744. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  745. mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
  746. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  747. }
  748. return 0;
  749. }
  750. static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
  751. {
  752. struct kvm *kvm = xive->kvm;
  753. struct kvm_vcpu *vcpu;
  754. unsigned long i;
  755. pr_devel("%s\n", __func__);
  756. mutex_lock(&xive->lock);
  757. for (i = 0; i <= xive->max_sbid; i++) {
  758. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  759. if (sb) {
  760. arch_spin_lock(&sb->lock);
  761. kvmppc_xive_native_sync_sources(sb);
  762. arch_spin_unlock(&sb->lock);
  763. }
  764. }
  765. kvm_for_each_vcpu(i, vcpu, kvm) {
  766. kvmppc_xive_native_vcpu_eq_sync(vcpu);
  767. }
  768. mutex_unlock(&xive->lock);
  769. return 0;
  770. }
  771. static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
  772. struct kvm_device_attr *attr)
  773. {
  774. struct kvmppc_xive *xive = dev->private;
  775. switch (attr->group) {
  776. case KVM_DEV_XIVE_GRP_CTRL:
  777. switch (attr->attr) {
  778. case KVM_DEV_XIVE_RESET:
  779. return kvmppc_xive_reset(xive);
  780. case KVM_DEV_XIVE_EQ_SYNC:
  781. return kvmppc_xive_native_eq_sync(xive);
  782. case KVM_DEV_XIVE_NR_SERVERS:
  783. return kvmppc_xive_set_nr_servers(xive, attr->addr);
  784. }
  785. break;
  786. case KVM_DEV_XIVE_GRP_SOURCE:
  787. return kvmppc_xive_native_set_source(xive, attr->attr,
  788. attr->addr);
  789. case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
  790. return kvmppc_xive_native_set_source_config(xive, attr->attr,
  791. attr->addr);
  792. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  793. return kvmppc_xive_native_set_queue_config(xive, attr->attr,
  794. attr->addr);
  795. case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
  796. return kvmppc_xive_native_sync_source(xive, attr->attr,
  797. attr->addr);
  798. }
  799. return -ENXIO;
  800. }
  801. static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
  802. struct kvm_device_attr *attr)
  803. {
  804. struct kvmppc_xive *xive = dev->private;
  805. switch (attr->group) {
  806. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  807. return kvmppc_xive_native_get_queue_config(xive, attr->attr,
  808. attr->addr);
  809. }
  810. return -ENXIO;
  811. }
  812. static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
  813. struct kvm_device_attr *attr)
  814. {
  815. switch (attr->group) {
  816. case KVM_DEV_XIVE_GRP_CTRL:
  817. switch (attr->attr) {
  818. case KVM_DEV_XIVE_RESET:
  819. case KVM_DEV_XIVE_EQ_SYNC:
  820. case KVM_DEV_XIVE_NR_SERVERS:
  821. return 0;
  822. }
  823. break;
  824. case KVM_DEV_XIVE_GRP_SOURCE:
  825. case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
  826. case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
  827. if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
  828. attr->attr < KVMPPC_XIVE_NR_IRQS)
  829. return 0;
  830. break;
  831. case KVM_DEV_XIVE_GRP_EQ_CONFIG:
  832. return 0;
  833. }
  834. return -ENXIO;
  835. }
  836. /*
  837. * Called when device fd is closed. kvm->lock is held.
  838. */
  839. static void kvmppc_xive_native_release(struct kvm_device *dev)
  840. {
  841. struct kvmppc_xive *xive = dev->private;
  842. struct kvm *kvm = xive->kvm;
  843. struct kvm_vcpu *vcpu;
  844. unsigned long i;
  845. pr_devel("Releasing xive native device\n");
  846. /*
  847. * Clear the KVM device file address_space which is used to
  848. * unmap the ESB pages when a device is passed-through.
  849. */
  850. mutex_lock(&xive->mapping_lock);
  851. xive->mapping = NULL;
  852. mutex_unlock(&xive->mapping_lock);
  853. /*
  854. * Since this is the device release function, we know that
  855. * userspace does not have any open fd or mmap referring to
  856. * the device. Therefore there can not be any of the
  857. * device attribute set/get, mmap, or page fault functions
  858. * being executed concurrently, and similarly, the
  859. * connect_vcpu and set/clr_mapped functions also cannot
  860. * be being executed.
  861. */
  862. debugfs_remove(xive->dentry);
  863. /*
  864. * We should clean up the vCPU interrupt presenters first.
  865. */
  866. kvm_for_each_vcpu(i, vcpu, kvm) {
  867. /*
  868. * Take vcpu->mutex to ensure that no one_reg get/set ioctl
  869. * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
  870. * Holding the vcpu->mutex also means that the vcpu cannot
  871. * be executing the KVM_RUN ioctl, and therefore it cannot
  872. * be executing the XIVE push or pull code or accessing
  873. * the XIVE MMIO regions.
  874. */
  875. mutex_lock(&vcpu->mutex);
  876. kvmppc_xive_native_cleanup_vcpu(vcpu);
  877. mutex_unlock(&vcpu->mutex);
  878. }
  879. /*
  880. * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
  881. * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
  882. * against xive code getting called during vcpu execution or
  883. * set/get one_reg operations.
  884. */
  885. kvm->arch.xive = NULL;
  886. for (i = 0; i <= xive->max_sbid; i++) {
  887. if (xive->src_blocks[i])
  888. kvmppc_xive_free_sources(xive->src_blocks[i]);
  889. kfree(xive->src_blocks[i]);
  890. xive->src_blocks[i] = NULL;
  891. }
  892. if (xive->vp_base != XIVE_INVALID_VP)
  893. xive_native_free_vp_block(xive->vp_base);
  894. /*
  895. * A reference of the kvmppc_xive pointer is now kept under
  896. * the xive_devices struct of the machine for reuse. It is
  897. * freed when the VM is destroyed for now until we fix all the
  898. * execution paths.
  899. */
  900. kfree(dev);
  901. }
  902. /*
  903. * Create a XIVE device. kvm->lock is held.
  904. */
  905. static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
  906. {
  907. struct kvmppc_xive *xive;
  908. struct kvm *kvm = dev->kvm;
  909. pr_devel("Creating xive native device\n");
  910. if (kvm->arch.xive)
  911. return -EEXIST;
  912. xive = kvmppc_xive_get_device(kvm, type);
  913. if (!xive)
  914. return -ENOMEM;
  915. dev->private = xive;
  916. xive->dev = dev;
  917. xive->kvm = kvm;
  918. mutex_init(&xive->mapping_lock);
  919. mutex_init(&xive->lock);
  920. /* VP allocation is delayed to the first call to connect_vcpu */
  921. xive->vp_base = XIVE_INVALID_VP;
  922. /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
  923. * on a POWER9 system.
  924. */
  925. xive->nr_servers = KVM_MAX_VCPUS;
  926. if (xive_native_has_single_escalation())
  927. xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
  928. if (xive_native_has_save_restore())
  929. xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
  930. xive->ops = &kvmppc_xive_native_ops;
  931. kvm->arch.xive = xive;
  932. return 0;
  933. }
  934. /*
  935. * Interrupt Pending Buffer (IPB) offset
  936. */
  937. #define TM_IPB_SHIFT 40
  938. #define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
  939. int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
  940. {
  941. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  942. u64 opal_state;
  943. int rc;
  944. if (!kvmppc_xive_enabled(vcpu))
  945. return -EPERM;
  946. if (!xc)
  947. return -ENOENT;
  948. /* Thread context registers. We only care about IPB and CPPR */
  949. val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
  950. /* Get the VP state from OPAL */
  951. rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
  952. if (rc)
  953. return rc;
  954. /*
  955. * Capture the backup of IPB register in the NVT structure and
  956. * merge it in our KVM VP state.
  957. */
  958. val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
  959. pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
  960. __func__,
  961. vcpu->arch.xive_saved_state.nsr,
  962. vcpu->arch.xive_saved_state.cppr,
  963. vcpu->arch.xive_saved_state.ipb,
  964. vcpu->arch.xive_saved_state.pipr,
  965. vcpu->arch.xive_saved_state.w01,
  966. (u32) vcpu->arch.xive_cam_word, opal_state);
  967. return 0;
  968. }
  969. int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
  970. {
  971. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  972. struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
  973. pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
  974. val->xive_timaval[0], val->xive_timaval[1]);
  975. if (!kvmppc_xive_enabled(vcpu))
  976. return -EPERM;
  977. if (!xc || !xive)
  978. return -ENOENT;
  979. /* We can't update the state of a "pushed" VCPU */
  980. if (WARN_ON(vcpu->arch.xive_pushed))
  981. return -EBUSY;
  982. /*
  983. * Restore the thread context registers. IPB and CPPR should
  984. * be the only ones that matter.
  985. */
  986. vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
  987. /*
  988. * There is no need to restore the XIVE internal state (IPB
  989. * stored in the NVT) as the IPB register was merged in KVM VP
  990. * state when captured.
  991. */
  992. return 0;
  993. }
  994. bool kvmppc_xive_native_supported(void)
  995. {
  996. return xive_native_has_queue_state_support();
  997. }
  998. static int xive_native_debug_show(struct seq_file *m, void *private)
  999. {
  1000. struct kvmppc_xive *xive = m->private;
  1001. struct kvm *kvm = xive->kvm;
  1002. struct kvm_vcpu *vcpu;
  1003. unsigned long i;
  1004. if (!kvm)
  1005. return 0;
  1006. seq_puts(m, "=========\nVCPU state\n=========\n");
  1007. kvm_for_each_vcpu(i, vcpu, kvm) {
  1008. struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
  1009. if (!xc)
  1010. continue;
  1011. seq_printf(m, "VCPU %d: VP=%#x/%02x\n"
  1012. " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
  1013. xc->server_num, xc->vp_id, xc->vp_chip_id,
  1014. vcpu->arch.xive_saved_state.nsr,
  1015. vcpu->arch.xive_saved_state.cppr,
  1016. vcpu->arch.xive_saved_state.ipb,
  1017. vcpu->arch.xive_saved_state.pipr,
  1018. be64_to_cpu(vcpu->arch.xive_saved_state.w01),
  1019. be32_to_cpu(vcpu->arch.xive_cam_word));
  1020. kvmppc_xive_debug_show_queues(m, vcpu);
  1021. }
  1022. seq_puts(m, "=========\nSources\n=========\n");
  1023. for (i = 0; i <= xive->max_sbid; i++) {
  1024. struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
  1025. if (sb) {
  1026. arch_spin_lock(&sb->lock);
  1027. kvmppc_xive_debug_show_sources(m, sb);
  1028. arch_spin_unlock(&sb->lock);
  1029. }
  1030. }
  1031. return 0;
  1032. }
  1033. DEFINE_SHOW_ATTRIBUTE(xive_native_debug);
  1034. static void xive_native_debugfs_init(struct kvmppc_xive *xive)
  1035. {
  1036. xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry,
  1037. xive, &xive_native_debug_fops);
  1038. pr_debug("%s: created\n", __func__);
  1039. }
  1040. static void kvmppc_xive_native_init(struct kvm_device *dev)
  1041. {
  1042. struct kvmppc_xive *xive = dev->private;
  1043. /* Register some debug interfaces */
  1044. xive_native_debugfs_init(xive);
  1045. }
  1046. struct kvm_device_ops kvm_xive_native_ops = {
  1047. .name = "kvm-xive-native",
  1048. .create = kvmppc_xive_native_create,
  1049. .init = kvmppc_xive_native_init,
  1050. .release = kvmppc_xive_native_release,
  1051. .set_attr = kvmppc_xive_native_set_attr,
  1052. .get_attr = kvmppc_xive_native_get_attr,
  1053. .has_attr = kvmppc_xive_native_has_attr,
  1054. .mmap = kvmppc_xive_native_mmap,
  1055. };