book3s.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. * Kevin Wolf <mail@kevin-wolf.de>
  8. *
  9. * Description:
  10. * This file is derived from arch/powerpc/kvm/44x.c,
  11. * by Hollis Blanchard <hollisb@us.ibm.com>.
  12. */
  13. #include <linux/kvm_host.h>
  14. #include <linux/err.h>
  15. #include <linux/export.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/gfp.h>
  20. #include <linux/sched.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/highmem.h>
  23. #include <asm/reg.h>
  24. #include <asm/cputable.h>
  25. #include <asm/cacheflush.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/io.h>
  28. #include <asm/kvm_ppc.h>
  29. #include <asm/kvm_book3s.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/page.h>
  32. #include <asm/xive.h>
  33. #include "book3s.h"
  34. #include "trace.h"
  35. /* #define EXIT_DEBUG */
  36. const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
  37. KVM_GENERIC_VM_STATS(),
  38. STATS_DESC_ICOUNTER(VM, num_2M_pages),
  39. STATS_DESC_ICOUNTER(VM, num_1G_pages)
  40. };
  41. const struct kvm_stats_header kvm_vm_stats_header = {
  42. .name_size = KVM_STATS_NAME_SIZE,
  43. .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
  44. .id_offset = sizeof(struct kvm_stats_header),
  45. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  46. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  47. sizeof(kvm_vm_stats_desc),
  48. };
  49. const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
  50. KVM_GENERIC_VCPU_STATS(),
  51. STATS_DESC_COUNTER(VCPU, sum_exits),
  52. STATS_DESC_COUNTER(VCPU, mmio_exits),
  53. STATS_DESC_COUNTER(VCPU, signal_exits),
  54. STATS_DESC_COUNTER(VCPU, light_exits),
  55. STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
  56. STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
  57. STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
  58. STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
  59. STATS_DESC_COUNTER(VCPU, syscall_exits),
  60. STATS_DESC_COUNTER(VCPU, isi_exits),
  61. STATS_DESC_COUNTER(VCPU, dsi_exits),
  62. STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
  63. STATS_DESC_COUNTER(VCPU, dec_exits),
  64. STATS_DESC_COUNTER(VCPU, ext_intr_exits),
  65. STATS_DESC_COUNTER(VCPU, halt_successful_wait),
  66. STATS_DESC_COUNTER(VCPU, dbell_exits),
  67. STATS_DESC_COUNTER(VCPU, gdbell_exits),
  68. STATS_DESC_COUNTER(VCPU, ld),
  69. STATS_DESC_COUNTER(VCPU, st),
  70. STATS_DESC_COUNTER(VCPU, pf_storage),
  71. STATS_DESC_COUNTER(VCPU, pf_instruc),
  72. STATS_DESC_COUNTER(VCPU, sp_storage),
  73. STATS_DESC_COUNTER(VCPU, sp_instruc),
  74. STATS_DESC_COUNTER(VCPU, queue_intr),
  75. STATS_DESC_COUNTER(VCPU, ld_slow),
  76. STATS_DESC_COUNTER(VCPU, st_slow),
  77. STATS_DESC_COUNTER(VCPU, pthru_all),
  78. STATS_DESC_COUNTER(VCPU, pthru_host),
  79. STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
  80. };
  81. const struct kvm_stats_header kvm_vcpu_stats_header = {
  82. .name_size = KVM_STATS_NAME_SIZE,
  83. .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
  84. .id_offset = sizeof(struct kvm_stats_header),
  85. .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
  86. .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
  87. sizeof(kvm_vcpu_stats_desc),
  88. };
  89. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  90. unsigned long pending_now, unsigned long old_pending)
  91. {
  92. if (is_kvmppc_hv_enabled(vcpu->kvm))
  93. return;
  94. if (pending_now)
  95. kvmppc_set_int_pending(vcpu, 1);
  96. else if (old_pending)
  97. kvmppc_set_int_pending(vcpu, 0);
  98. }
  99. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  100. {
  101. ulong crit_raw;
  102. ulong crit_r1;
  103. bool crit;
  104. if (is_kvmppc_hv_enabled(vcpu->kvm))
  105. return false;
  106. crit_raw = kvmppc_get_critical(vcpu);
  107. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  108. /* Truncate crit indicators in 32 bit mode */
  109. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  110. crit_raw &= 0xffffffff;
  111. crit_r1 &= 0xffffffff;
  112. }
  113. /* Critical section when crit == r1 */
  114. crit = (crit_raw == crit_r1);
  115. /* ... and we're in supervisor mode */
  116. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  117. return crit;
  118. }
  119. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  120. {
  121. vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
  122. }
  123. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  124. {
  125. unsigned int prio;
  126. switch (vec) {
  127. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  128. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  129. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  130. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  131. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  132. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  133. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  134. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  135. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  136. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  137. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  138. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  139. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  140. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  141. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  142. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  143. default: prio = BOOK3S_IRQPRIO_MAX; break;
  144. }
  145. return prio;
  146. }
  147. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  148. unsigned int vec)
  149. {
  150. unsigned long old_pending = vcpu->arch.pending_exceptions;
  151. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  152. &vcpu->arch.pending_exceptions);
  153. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  154. old_pending);
  155. }
  156. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  157. {
  158. vcpu->stat.queue_intr++;
  159. set_bit(kvmppc_book3s_vec2irqprio(vec),
  160. &vcpu->arch.pending_exceptions);
  161. #ifdef EXIT_DEBUG
  162. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  163. #endif
  164. }
  165. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  166. void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
  167. {
  168. /* might as well deliver this straight away */
  169. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
  170. }
  171. EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
  172. void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
  173. {
  174. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
  175. }
  176. EXPORT_SYMBOL(kvmppc_core_queue_syscall);
  177. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
  178. {
  179. /* might as well deliver this straight away */
  180. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
  181. }
  182. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  183. void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
  184. {
  185. /* might as well deliver this straight away */
  186. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
  187. }
  188. void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
  189. {
  190. /* might as well deliver this straight away */
  191. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
  192. }
  193. void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
  194. {
  195. /* might as well deliver this straight away */
  196. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
  197. }
  198. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  199. {
  200. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  201. }
  202. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  203. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  204. {
  205. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  206. }
  207. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  208. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  209. {
  210. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  211. }
  212. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  213. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  214. struct kvm_interrupt *irq)
  215. {
  216. /*
  217. * This case (KVM_INTERRUPT_SET) should never actually arise for
  218. * a pseries guest (because pseries guests expect their interrupt
  219. * controllers to continue asserting an external interrupt request
  220. * until it is acknowledged at the interrupt controller), but is
  221. * included to avoid ABI breakage and potentially for other
  222. * sorts of guest.
  223. *
  224. * There is a subtlety here: HV KVM does not test the
  225. * external_oneshot flag in the code that synthesizes
  226. * external interrupts for the guest just before entering
  227. * the guest. That is OK even if userspace did do a
  228. * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
  229. * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
  230. * which ends up doing a smp_send_reschedule(), which will
  231. * pull the guest all the way out to the host, meaning that
  232. * we will call kvmppc_core_prepare_to_enter() before entering
  233. * the guest again, and that will handle the external_oneshot
  234. * flag correctly.
  235. */
  236. if (irq->irq == KVM_INTERRUPT_SET)
  237. vcpu->arch.external_oneshot = 1;
  238. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  239. }
  240. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  241. {
  242. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  243. }
  244. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
  245. ulong dar, ulong dsisr)
  246. {
  247. kvmppc_set_dar(vcpu, dar);
  248. kvmppc_set_dsisr(vcpu, dsisr);
  249. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
  250. }
  251. EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
  252. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
  253. {
  254. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
  255. }
  256. EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
  257. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  258. unsigned int priority)
  259. {
  260. int deliver = 1;
  261. int vec = 0;
  262. bool crit = kvmppc_critical_section(vcpu);
  263. switch (priority) {
  264. case BOOK3S_IRQPRIO_DECREMENTER:
  265. deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  266. vec = BOOK3S_INTERRUPT_DECREMENTER;
  267. break;
  268. case BOOK3S_IRQPRIO_EXTERNAL:
  269. deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  270. vec = BOOK3S_INTERRUPT_EXTERNAL;
  271. break;
  272. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  273. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  274. break;
  275. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  276. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  277. break;
  278. case BOOK3S_IRQPRIO_DATA_STORAGE:
  279. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  280. break;
  281. case BOOK3S_IRQPRIO_INST_STORAGE:
  282. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  283. break;
  284. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  285. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  286. break;
  287. case BOOK3S_IRQPRIO_INST_SEGMENT:
  288. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  289. break;
  290. case BOOK3S_IRQPRIO_ALIGNMENT:
  291. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  292. break;
  293. case BOOK3S_IRQPRIO_PROGRAM:
  294. vec = BOOK3S_INTERRUPT_PROGRAM;
  295. break;
  296. case BOOK3S_IRQPRIO_VSX:
  297. vec = BOOK3S_INTERRUPT_VSX;
  298. break;
  299. case BOOK3S_IRQPRIO_ALTIVEC:
  300. vec = BOOK3S_INTERRUPT_ALTIVEC;
  301. break;
  302. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  303. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  304. break;
  305. case BOOK3S_IRQPRIO_SYSCALL:
  306. vec = BOOK3S_INTERRUPT_SYSCALL;
  307. break;
  308. case BOOK3S_IRQPRIO_DEBUG:
  309. vec = BOOK3S_INTERRUPT_TRACE;
  310. break;
  311. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  312. vec = BOOK3S_INTERRUPT_PERFMON;
  313. break;
  314. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  315. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  316. break;
  317. default:
  318. deliver = 0;
  319. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  320. break;
  321. }
  322. if (deliver)
  323. kvmppc_inject_interrupt(vcpu, vec, 0);
  324. return deliver;
  325. }
  326. /*
  327. * This function determines if an irqprio should be cleared once issued.
  328. */
  329. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  330. {
  331. switch (priority) {
  332. case BOOK3S_IRQPRIO_DECREMENTER:
  333. /* DEC interrupts get cleared by mtdec */
  334. return false;
  335. case BOOK3S_IRQPRIO_EXTERNAL:
  336. /*
  337. * External interrupts get cleared by userspace
  338. * except when set by the KVM_INTERRUPT ioctl with
  339. * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
  340. */
  341. if (vcpu->arch.external_oneshot) {
  342. vcpu->arch.external_oneshot = 0;
  343. return true;
  344. }
  345. return false;
  346. }
  347. return true;
  348. }
  349. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  350. {
  351. unsigned long *pending = &vcpu->arch.pending_exceptions;
  352. unsigned long old_pending = vcpu->arch.pending_exceptions;
  353. unsigned int priority;
  354. #ifdef EXIT_DEBUG
  355. if (vcpu->arch.pending_exceptions)
  356. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  357. #endif
  358. priority = __ffs(*pending);
  359. while (priority < BOOK3S_IRQPRIO_MAX) {
  360. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  361. clear_irqprio(vcpu, priority)) {
  362. clear_bit(priority, &vcpu->arch.pending_exceptions);
  363. break;
  364. }
  365. priority = find_next_bit(pending,
  366. BITS_PER_BYTE * sizeof(*pending),
  367. priority + 1);
  368. }
  369. /* Tell the guest about our interrupt status */
  370. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  371. return 0;
  372. }
  373. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  374. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  375. bool *writable)
  376. {
  377. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  378. gfn_t gfn = gpa >> PAGE_SHIFT;
  379. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  380. mp_pa = (uint32_t)mp_pa;
  381. /* Magic page override */
  382. gpa &= ~0xFFFULL;
  383. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  384. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  385. kvm_pfn_t pfn;
  386. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  387. get_page(pfn_to_page(pfn));
  388. if (writable)
  389. *writable = true;
  390. return pfn;
  391. }
  392. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  393. }
  394. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  395. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  396. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  397. {
  398. bool data = (xlid == XLATE_DATA);
  399. bool iswrite = (xlrw == XLATE_WRITE);
  400. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  401. int r;
  402. if (relocated) {
  403. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  404. } else {
  405. pte->eaddr = eaddr;
  406. pte->raddr = eaddr & KVM_PAM;
  407. pte->vpage = VSID_REAL | eaddr >> 12;
  408. pte->may_read = true;
  409. pte->may_write = true;
  410. pte->may_execute = true;
  411. r = 0;
  412. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  413. !data) {
  414. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  415. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  416. pte->raddr &= ~SPLIT_HACK_MASK;
  417. }
  418. }
  419. return r;
  420. }
  421. /*
  422. * Returns prefixed instructions with the prefix in the high 32 bits
  423. * of *inst and suffix in the low 32 bits. This is the same convention
  424. * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
  425. * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
  426. * half of the value needs byte-swapping if the guest endianness is
  427. * different from the host endianness.
  428. */
  429. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
  430. enum instruction_fetch_type type, unsigned long *inst)
  431. {
  432. ulong pc = kvmppc_get_pc(vcpu);
  433. int r;
  434. u32 iw;
  435. if (type == INST_SC)
  436. pc -= 4;
  437. r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
  438. if (r != EMULATE_DONE)
  439. return EMULATE_AGAIN;
  440. /*
  441. * If [H]SRR1 indicates that the instruction that caused the
  442. * current interrupt is a prefixed instruction, get the suffix.
  443. */
  444. if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
  445. u32 suffix;
  446. pc += 4;
  447. r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
  448. if (r != EMULATE_DONE)
  449. return EMULATE_AGAIN;
  450. *inst = ((u64)iw << 32) | suffix;
  451. } else {
  452. *inst = iw;
  453. }
  454. return r;
  455. }
  456. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  457. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  458. {
  459. return 0;
  460. }
  461. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  462. {
  463. }
  464. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  465. struct kvm_sregs *sregs)
  466. {
  467. int ret;
  468. vcpu_load(vcpu);
  469. ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  470. vcpu_put(vcpu);
  471. return ret;
  472. }
  473. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  474. struct kvm_sregs *sregs)
  475. {
  476. int ret;
  477. vcpu_load(vcpu);
  478. ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  479. vcpu_put(vcpu);
  480. return ret;
  481. }
  482. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  483. {
  484. int i;
  485. regs->pc = kvmppc_get_pc(vcpu);
  486. regs->cr = kvmppc_get_cr(vcpu);
  487. regs->ctr = kvmppc_get_ctr(vcpu);
  488. regs->lr = kvmppc_get_lr(vcpu);
  489. regs->xer = kvmppc_get_xer(vcpu);
  490. regs->msr = kvmppc_get_msr(vcpu);
  491. regs->srr0 = kvmppc_get_srr0(vcpu);
  492. regs->srr1 = kvmppc_get_srr1(vcpu);
  493. regs->pid = kvmppc_get_pid(vcpu);
  494. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  495. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  496. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  497. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  498. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  499. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  500. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  501. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  502. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  503. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  504. return 0;
  505. }
  506. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  507. {
  508. int i;
  509. kvmppc_set_pc(vcpu, regs->pc);
  510. kvmppc_set_cr(vcpu, regs->cr);
  511. kvmppc_set_ctr(vcpu, regs->ctr);
  512. kvmppc_set_lr(vcpu, regs->lr);
  513. kvmppc_set_xer(vcpu, regs->xer);
  514. kvmppc_set_msr(vcpu, regs->msr);
  515. kvmppc_set_srr0(vcpu, regs->srr0);
  516. kvmppc_set_srr1(vcpu, regs->srr1);
  517. kvmppc_set_sprg0(vcpu, regs->sprg0);
  518. kvmppc_set_sprg1(vcpu, regs->sprg1);
  519. kvmppc_set_sprg2(vcpu, regs->sprg2);
  520. kvmppc_set_sprg3(vcpu, regs->sprg3);
  521. kvmppc_set_sprg4(vcpu, regs->sprg4);
  522. kvmppc_set_sprg5(vcpu, regs->sprg5);
  523. kvmppc_set_sprg6(vcpu, regs->sprg6);
  524. kvmppc_set_sprg7(vcpu, regs->sprg7);
  525. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  526. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  527. return 0;
  528. }
  529. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  530. {
  531. return -EOPNOTSUPP;
  532. }
  533. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  534. {
  535. return -EOPNOTSUPP;
  536. }
  537. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  538. union kvmppc_one_reg *val)
  539. {
  540. int r = 0;
  541. long int i;
  542. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  543. if (r == -EINVAL) {
  544. r = 0;
  545. switch (id) {
  546. case KVM_REG_PPC_DAR:
  547. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  548. break;
  549. case KVM_REG_PPC_DSISR:
  550. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  551. break;
  552. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  553. i = id - KVM_REG_PPC_FPR0;
  554. *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
  555. break;
  556. case KVM_REG_PPC_FPSCR:
  557. *val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
  558. break;
  559. #ifdef CONFIG_VSX
  560. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  561. if (cpu_has_feature(CPU_FTR_VSX)) {
  562. i = id - KVM_REG_PPC_VSR0;
  563. val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
  564. val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
  565. } else {
  566. r = -ENXIO;
  567. }
  568. break;
  569. #endif /* CONFIG_VSX */
  570. case KVM_REG_PPC_DEBUG_INST:
  571. *val = get_reg_val(id, INS_TW);
  572. break;
  573. #ifdef CONFIG_KVM_XICS
  574. case KVM_REG_PPC_ICP_STATE:
  575. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  576. r = -ENXIO;
  577. break;
  578. }
  579. if (xics_on_xive())
  580. *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
  581. else
  582. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  583. break;
  584. #endif /* CONFIG_KVM_XICS */
  585. #ifdef CONFIG_KVM_XIVE
  586. case KVM_REG_PPC_VP_STATE:
  587. if (!vcpu->arch.xive_vcpu) {
  588. r = -ENXIO;
  589. break;
  590. }
  591. if (xive_enabled())
  592. r = kvmppc_xive_native_get_vp(vcpu, val);
  593. else
  594. r = -ENXIO;
  595. break;
  596. #endif /* CONFIG_KVM_XIVE */
  597. case KVM_REG_PPC_FSCR:
  598. *val = get_reg_val(id, vcpu->arch.fscr);
  599. break;
  600. case KVM_REG_PPC_TAR:
  601. *val = get_reg_val(id, kvmppc_get_tar(vcpu));
  602. break;
  603. case KVM_REG_PPC_EBBHR:
  604. *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
  605. break;
  606. case KVM_REG_PPC_EBBRR:
  607. *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
  608. break;
  609. case KVM_REG_PPC_BESCR:
  610. *val = get_reg_val(id, kvmppc_get_bescr(vcpu));
  611. break;
  612. case KVM_REG_PPC_IC:
  613. *val = get_reg_val(id, kvmppc_get_ic(vcpu));
  614. break;
  615. default:
  616. r = -EINVAL;
  617. break;
  618. }
  619. }
  620. return r;
  621. }
  622. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  623. union kvmppc_one_reg *val)
  624. {
  625. int r = 0;
  626. long int i;
  627. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  628. if (r == -EINVAL) {
  629. r = 0;
  630. switch (id) {
  631. case KVM_REG_PPC_DAR:
  632. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  633. break;
  634. case KVM_REG_PPC_DSISR:
  635. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  636. break;
  637. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  638. i = id - KVM_REG_PPC_FPR0;
  639. kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
  640. break;
  641. case KVM_REG_PPC_FPSCR:
  642. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  643. break;
  644. #ifdef CONFIG_VSX
  645. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  646. if (cpu_has_feature(CPU_FTR_VSX)) {
  647. i = id - KVM_REG_PPC_VSR0;
  648. kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
  649. kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
  650. } else {
  651. r = -ENXIO;
  652. }
  653. break;
  654. #endif /* CONFIG_VSX */
  655. #ifdef CONFIG_KVM_XICS
  656. case KVM_REG_PPC_ICP_STATE:
  657. if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
  658. r = -ENXIO;
  659. break;
  660. }
  661. if (xics_on_xive())
  662. r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
  663. else
  664. r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
  665. break;
  666. #endif /* CONFIG_KVM_XICS */
  667. #ifdef CONFIG_KVM_XIVE
  668. case KVM_REG_PPC_VP_STATE:
  669. if (!vcpu->arch.xive_vcpu) {
  670. r = -ENXIO;
  671. break;
  672. }
  673. if (xive_enabled())
  674. r = kvmppc_xive_native_set_vp(vcpu, val);
  675. else
  676. r = -ENXIO;
  677. break;
  678. #endif /* CONFIG_KVM_XIVE */
  679. case KVM_REG_PPC_FSCR:
  680. kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
  681. break;
  682. case KVM_REG_PPC_TAR:
  683. kvmppc_set_tar(vcpu, set_reg_val(id, *val));
  684. break;
  685. case KVM_REG_PPC_EBBHR:
  686. kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
  687. break;
  688. case KVM_REG_PPC_EBBRR:
  689. kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
  690. break;
  691. case KVM_REG_PPC_BESCR:
  692. kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
  693. break;
  694. case KVM_REG_PPC_IC:
  695. kvmppc_set_ic(vcpu, set_reg_val(id, *val));
  696. break;
  697. default:
  698. r = -EINVAL;
  699. break;
  700. }
  701. }
  702. return r;
  703. }
  704. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  705. {
  706. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  707. }
  708. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  709. {
  710. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  711. }
  712. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  713. {
  714. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  715. }
  716. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  717. int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
  718. {
  719. return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
  720. }
  721. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  722. struct kvm_translation *tr)
  723. {
  724. return 0;
  725. }
  726. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  727. struct kvm_guest_debug *dbg)
  728. {
  729. vcpu_load(vcpu);
  730. vcpu->guest_debug = dbg->control;
  731. vcpu_put(vcpu);
  732. return 0;
  733. }
  734. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  735. {
  736. kvmppc_core_queue_dec(vcpu);
  737. kvm_vcpu_kick(vcpu);
  738. }
  739. int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
  740. {
  741. return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
  742. }
  743. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  744. {
  745. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  746. }
  747. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  748. {
  749. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  750. }
  751. void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
  752. {
  753. }
  754. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  755. {
  756. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  757. }
  758. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
  759. {
  760. kvm->arch.kvm_ops->free_memslot(slot);
  761. }
  762. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  763. {
  764. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  765. }
  766. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  767. const struct kvm_memory_slot *old,
  768. struct kvm_memory_slot *new,
  769. enum kvm_mr_change change)
  770. {
  771. return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
  772. }
  773. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  774. struct kvm_memory_slot *old,
  775. const struct kvm_memory_slot *new,
  776. enum kvm_mr_change change)
  777. {
  778. kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
  779. }
  780. bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
  781. {
  782. return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
  783. }
  784. bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
  785. {
  786. return kvm->arch.kvm_ops->age_gfn(kvm, range);
  787. }
  788. bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
  789. {
  790. return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
  791. }
  792. int kvmppc_core_init_vm(struct kvm *kvm)
  793. {
  794. #ifdef CONFIG_PPC64
  795. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  796. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  797. mutex_init(&kvm->arch.rtas_token_lock);
  798. #endif
  799. return kvm->arch.kvm_ops->init_vm(kvm);
  800. }
  801. void kvmppc_core_destroy_vm(struct kvm *kvm)
  802. {
  803. kvm->arch.kvm_ops->destroy_vm(kvm);
  804. #ifdef CONFIG_PPC64
  805. kvmppc_rtas_tokens_free(kvm);
  806. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  807. #endif
  808. #ifdef CONFIG_KVM_XICS
  809. /*
  810. * Free the XIVE and XICS devices which are not directly freed by the
  811. * device 'release' method
  812. */
  813. kfree(kvm->arch.xive_devices.native);
  814. kvm->arch.xive_devices.native = NULL;
  815. kfree(kvm->arch.xive_devices.xics_on_xive);
  816. kvm->arch.xive_devices.xics_on_xive = NULL;
  817. kfree(kvm->arch.xics_device);
  818. kvm->arch.xics_device = NULL;
  819. #endif /* CONFIG_KVM_XICS */
  820. }
  821. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  822. {
  823. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  824. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  825. u64 buf;
  826. int srcu_idx;
  827. int ret;
  828. if (!is_power_of_2(size) || (size > sizeof(buf)))
  829. return H_TOO_HARD;
  830. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  831. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  832. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  833. if (ret != 0)
  834. return H_TOO_HARD;
  835. switch (size) {
  836. case 1:
  837. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  838. break;
  839. case 2:
  840. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  841. break;
  842. case 4:
  843. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  844. break;
  845. case 8:
  846. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  847. break;
  848. default:
  849. BUG();
  850. }
  851. return H_SUCCESS;
  852. }
  853. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  854. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  855. {
  856. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  857. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  858. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  859. u64 buf;
  860. int srcu_idx;
  861. int ret;
  862. switch (size) {
  863. case 1:
  864. *(u8 *)&buf = val;
  865. break;
  866. case 2:
  867. *(__be16 *)&buf = cpu_to_be16(val);
  868. break;
  869. case 4:
  870. *(__be32 *)&buf = cpu_to_be32(val);
  871. break;
  872. case 8:
  873. *(__be64 *)&buf = cpu_to_be64(val);
  874. break;
  875. default:
  876. return H_TOO_HARD;
  877. }
  878. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  879. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  880. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  881. if (ret != 0)
  882. return H_TOO_HARD;
  883. return H_SUCCESS;
  884. }
  885. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  886. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  887. {
  888. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  889. }
  890. #ifdef CONFIG_KVM_XICS
  891. int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
  892. bool line_status)
  893. {
  894. if (xics_on_xive())
  895. return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
  896. line_status);
  897. else
  898. return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
  899. line_status);
  900. }
  901. int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
  902. struct kvm *kvm, int irq_source_id,
  903. int level, bool line_status)
  904. {
  905. return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
  906. level, line_status);
  907. }
  908. static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
  909. struct kvm *kvm, int irq_source_id, int level,
  910. bool line_status)
  911. {
  912. return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
  913. }
  914. int kvm_irq_map_gsi(struct kvm *kvm,
  915. struct kvm_kernel_irq_routing_entry *entries, int gsi)
  916. {
  917. entries->gsi = gsi;
  918. entries->type = KVM_IRQ_ROUTING_IRQCHIP;
  919. entries->set = kvmppc_book3s_set_irq;
  920. entries->irqchip.irqchip = 0;
  921. entries->irqchip.pin = gsi;
  922. return 1;
  923. }
  924. int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
  925. {
  926. return pin;
  927. }
  928. #endif /* CONFIG_KVM_XICS */
  929. static int kvmppc_book3s_init(void)
  930. {
  931. int r;
  932. r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  933. if (r)
  934. return r;
  935. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  936. r = kvmppc_book3s_init_pr();
  937. #endif
  938. #ifdef CONFIG_KVM_XICS
  939. #ifdef CONFIG_KVM_XIVE
  940. if (xics_on_xive()) {
  941. kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
  942. if (kvmppc_xive_native_supported())
  943. kvm_register_device_ops(&kvm_xive_native_ops,
  944. KVM_DEV_TYPE_XIVE);
  945. } else
  946. #endif
  947. kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
  948. #endif
  949. return r;
  950. }
  951. static void kvmppc_book3s_exit(void)
  952. {
  953. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  954. kvmppc_book3s_exit_pr();
  955. #endif
  956. kvm_exit();
  957. }
  958. module_init(kvmppc_book3s_init);
  959. module_exit(kvmppc_book3s_exit);
  960. /* On 32bit this is our one and only kernel module */
  961. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  962. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  963. MODULE_ALIAS("devname:kvm");
  964. #endif