kvm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3. * Copyright 2010-2011 Freescale Semiconductor, Inc.
  4. *
  5. * Authors:
  6. * Alexander Graf <agraf@suse.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License, version 2, as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  20. */
  21. #include <linux/kvm_host.h>
  22. #include <linux/init.h>
  23. #include <linux/export.h>
  24. #include <linux/kmemleak.h>
  25. #include <linux/kvm_para.h>
  26. #include <linux/slab.h>
  27. #include <linux/of.h>
  28. #include <linux/pagemap.h>
  29. #include <asm/reg.h>
  30. #include <asm/sections.h>
  31. #include <asm/cacheflush.h>
  32. #include <asm/disassemble.h>
  33. #include <asm/ppc-opcode.h>
  34. #include <asm/epapr_hcalls.h>
  35. #define KVM_MAGIC_PAGE (-4096L)
  36. #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
  37. #define KVM_INST_LWZ 0x80000000
  38. #define KVM_INST_STW 0x90000000
  39. #define KVM_INST_LD 0xe8000000
  40. #define KVM_INST_STD 0xf8000000
  41. #define KVM_INST_NOP 0x60000000
  42. #define KVM_INST_B 0x48000000
  43. #define KVM_INST_B_MASK 0x03ffffff
  44. #define KVM_INST_B_MAX 0x01ffffff
  45. #define KVM_INST_LI 0x38000000
  46. #define KVM_MASK_RT 0x03e00000
  47. #define KVM_RT_30 0x03c00000
  48. #define KVM_MASK_RB 0x0000f800
  49. #define KVM_INST_MFMSR 0x7c0000a6
  50. #define SPR_FROM 0
  51. #define SPR_TO 0x100
  52. #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
  53. (((sprn) & 0x1f) << 16) | \
  54. (((sprn) & 0x3e0) << 6) | \
  55. (moveto))
  56. #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
  57. #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
  58. #define KVM_INST_TLBSYNC 0x7c00046c
  59. #define KVM_INST_MTMSRD_L0 0x7c000164
  60. #define KVM_INST_MTMSRD_L1 0x7c010164
  61. #define KVM_INST_MTMSR 0x7c000124
  62. #define KVM_INST_WRTEE 0x7c000106
  63. #define KVM_INST_WRTEEI_0 0x7c000146
  64. #define KVM_INST_WRTEEI_1 0x7c008146
  65. #define KVM_INST_MTSRIN 0x7c0001e4
  66. static bool kvm_patching_worked = true;
  67. char kvm_tmp[1024 * 1024];
  68. static int kvm_tmp_index;
  69. static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
  70. {
  71. *inst = new_inst;
  72. flush_icache_range((ulong)inst, (ulong)inst + 4);
  73. }
  74. static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
  75. {
  76. #ifdef CONFIG_64BIT
  77. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  78. #else
  79. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
  80. #endif
  81. }
  82. static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
  83. {
  84. #ifdef CONFIG_64BIT
  85. kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
  86. #else
  87. kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
  88. #endif
  89. }
  90. static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
  91. {
  92. kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
  93. }
  94. static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
  95. {
  96. #ifdef CONFIG_64BIT
  97. kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
  98. #else
  99. kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
  100. #endif
  101. }
  102. static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
  103. {
  104. kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
  105. }
  106. static void kvm_patch_ins_nop(u32 *inst)
  107. {
  108. kvm_patch_ins(inst, KVM_INST_NOP);
  109. }
  110. static void kvm_patch_ins_b(u32 *inst, int addr)
  111. {
  112. #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
  113. /* On relocatable kernels interrupts handlers and our code
  114. can be in different regions, so we don't patch them */
  115. if ((ulong)inst < (ulong)&__end_interrupts)
  116. return;
  117. #endif
  118. kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
  119. }
  120. static u32 *kvm_alloc(int len)
  121. {
  122. u32 *p;
  123. if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
  124. printk(KERN_ERR "KVM: No more space (%d + %d)\n",
  125. kvm_tmp_index, len);
  126. kvm_patching_worked = false;
  127. return NULL;
  128. }
  129. p = (void*)&kvm_tmp[kvm_tmp_index];
  130. kvm_tmp_index += len;
  131. return p;
  132. }
  133. extern u32 kvm_emulate_mtmsrd_branch_offs;
  134. extern u32 kvm_emulate_mtmsrd_reg_offs;
  135. extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
  136. extern u32 kvm_emulate_mtmsrd_len;
  137. extern u32 kvm_emulate_mtmsrd[];
  138. static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
  139. {
  140. u32 *p;
  141. int distance_start;
  142. int distance_end;
  143. ulong next_inst;
  144. p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
  145. if (!p)
  146. return;
  147. /* Find out where we are and put everything there */
  148. distance_start = (ulong)p - (ulong)inst;
  149. next_inst = ((ulong)inst + 4);
  150. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
  151. /* Make sure we only write valid b instructions */
  152. if (distance_start > KVM_INST_B_MAX) {
  153. kvm_patching_worked = false;
  154. return;
  155. }
  156. /* Modify the chunk to fit the invocation */
  157. memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
  158. p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
  159. switch (get_rt(rt)) {
  160. case 30:
  161. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  162. magic_var(scratch2), KVM_RT_30);
  163. break;
  164. case 31:
  165. kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
  166. magic_var(scratch1), KVM_RT_30);
  167. break;
  168. default:
  169. p[kvm_emulate_mtmsrd_reg_offs] |= rt;
  170. break;
  171. }
  172. p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
  173. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
  174. /* Patch the invocation */
  175. kvm_patch_ins_b(inst, distance_start);
  176. }
  177. extern u32 kvm_emulate_mtmsr_branch_offs;
  178. extern u32 kvm_emulate_mtmsr_reg1_offs;
  179. extern u32 kvm_emulate_mtmsr_reg2_offs;
  180. extern u32 kvm_emulate_mtmsr_orig_ins_offs;
  181. extern u32 kvm_emulate_mtmsr_len;
  182. extern u32 kvm_emulate_mtmsr[];
  183. static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
  184. {
  185. u32 *p;
  186. int distance_start;
  187. int distance_end;
  188. ulong next_inst;
  189. p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
  190. if (!p)
  191. return;
  192. /* Find out where we are and put everything there */
  193. distance_start = (ulong)p - (ulong)inst;
  194. next_inst = ((ulong)inst + 4);
  195. distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
  196. /* Make sure we only write valid b instructions */
  197. if (distance_start > KVM_INST_B_MAX) {
  198. kvm_patching_worked = false;
  199. return;
  200. }
  201. /* Modify the chunk to fit the invocation */
  202. memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
  203. p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
  204. /* Make clobbered registers work too */
  205. switch (get_rt(rt)) {
  206. case 30:
  207. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  208. magic_var(scratch2), KVM_RT_30);
  209. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  210. magic_var(scratch2), KVM_RT_30);
  211. break;
  212. case 31:
  213. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
  214. magic_var(scratch1), KVM_RT_30);
  215. kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
  216. magic_var(scratch1), KVM_RT_30);
  217. break;
  218. default:
  219. p[kvm_emulate_mtmsr_reg1_offs] |= rt;
  220. p[kvm_emulate_mtmsr_reg2_offs] |= rt;
  221. break;
  222. }
  223. p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
  224. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
  225. /* Patch the invocation */
  226. kvm_patch_ins_b(inst, distance_start);
  227. }
  228. #ifdef CONFIG_BOOKE
  229. extern u32 kvm_emulate_wrtee_branch_offs;
  230. extern u32 kvm_emulate_wrtee_reg_offs;
  231. extern u32 kvm_emulate_wrtee_orig_ins_offs;
  232. extern u32 kvm_emulate_wrtee_len;
  233. extern u32 kvm_emulate_wrtee[];
  234. static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
  235. {
  236. u32 *p;
  237. int distance_start;
  238. int distance_end;
  239. ulong next_inst;
  240. p = kvm_alloc(kvm_emulate_wrtee_len * 4);
  241. if (!p)
  242. return;
  243. /* Find out where we are and put everything there */
  244. distance_start = (ulong)p - (ulong)inst;
  245. next_inst = ((ulong)inst + 4);
  246. distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
  247. /* Make sure we only write valid b instructions */
  248. if (distance_start > KVM_INST_B_MAX) {
  249. kvm_patching_worked = false;
  250. return;
  251. }
  252. /* Modify the chunk to fit the invocation */
  253. memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
  254. p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
  255. if (imm_one) {
  256. p[kvm_emulate_wrtee_reg_offs] =
  257. KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
  258. } else {
  259. /* Make clobbered registers work too */
  260. switch (get_rt(rt)) {
  261. case 30:
  262. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  263. magic_var(scratch2), KVM_RT_30);
  264. break;
  265. case 31:
  266. kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
  267. magic_var(scratch1), KVM_RT_30);
  268. break;
  269. default:
  270. p[kvm_emulate_wrtee_reg_offs] |= rt;
  271. break;
  272. }
  273. }
  274. p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
  275. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
  276. /* Patch the invocation */
  277. kvm_patch_ins_b(inst, distance_start);
  278. }
  279. extern u32 kvm_emulate_wrteei_0_branch_offs;
  280. extern u32 kvm_emulate_wrteei_0_len;
  281. extern u32 kvm_emulate_wrteei_0[];
  282. static void kvm_patch_ins_wrteei_0(u32 *inst)
  283. {
  284. u32 *p;
  285. int distance_start;
  286. int distance_end;
  287. ulong next_inst;
  288. p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
  289. if (!p)
  290. return;
  291. /* Find out where we are and put everything there */
  292. distance_start = (ulong)p - (ulong)inst;
  293. next_inst = ((ulong)inst + 4);
  294. distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
  295. /* Make sure we only write valid b instructions */
  296. if (distance_start > KVM_INST_B_MAX) {
  297. kvm_patching_worked = false;
  298. return;
  299. }
  300. memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
  301. p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
  302. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
  303. /* Patch the invocation */
  304. kvm_patch_ins_b(inst, distance_start);
  305. }
  306. #endif
  307. #ifdef CONFIG_PPC_BOOK3S_32
  308. extern u32 kvm_emulate_mtsrin_branch_offs;
  309. extern u32 kvm_emulate_mtsrin_reg1_offs;
  310. extern u32 kvm_emulate_mtsrin_reg2_offs;
  311. extern u32 kvm_emulate_mtsrin_orig_ins_offs;
  312. extern u32 kvm_emulate_mtsrin_len;
  313. extern u32 kvm_emulate_mtsrin[];
  314. static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
  315. {
  316. u32 *p;
  317. int distance_start;
  318. int distance_end;
  319. ulong next_inst;
  320. p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
  321. if (!p)
  322. return;
  323. /* Find out where we are and put everything there */
  324. distance_start = (ulong)p - (ulong)inst;
  325. next_inst = ((ulong)inst + 4);
  326. distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
  327. /* Make sure we only write valid b instructions */
  328. if (distance_start > KVM_INST_B_MAX) {
  329. kvm_patching_worked = false;
  330. return;
  331. }
  332. /* Modify the chunk to fit the invocation */
  333. memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
  334. p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
  335. p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
  336. p[kvm_emulate_mtsrin_reg2_offs] |= rt;
  337. p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
  338. flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
  339. /* Patch the invocation */
  340. kvm_patch_ins_b(inst, distance_start);
  341. }
  342. #endif
  343. static void kvm_map_magic_page(void *data)
  344. {
  345. u32 *features = data;
  346. ulong in[8] = {0};
  347. ulong out[8];
  348. in[0] = KVM_MAGIC_PAGE;
  349. in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
  350. epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
  351. *features = out[0];
  352. }
  353. static void kvm_check_ins(u32 *inst, u32 features)
  354. {
  355. u32 _inst = *inst;
  356. u32 inst_no_rt = _inst & ~KVM_MASK_RT;
  357. u32 inst_rt = _inst & KVM_MASK_RT;
  358. switch (inst_no_rt) {
  359. /* Loads */
  360. case KVM_INST_MFMSR:
  361. kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
  362. break;
  363. case KVM_INST_MFSPR(SPRN_SPRG0):
  364. kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
  365. break;
  366. case KVM_INST_MFSPR(SPRN_SPRG1):
  367. kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
  368. break;
  369. case KVM_INST_MFSPR(SPRN_SPRG2):
  370. kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
  371. break;
  372. case KVM_INST_MFSPR(SPRN_SPRG3):
  373. kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
  374. break;
  375. case KVM_INST_MFSPR(SPRN_SRR0):
  376. kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
  377. break;
  378. case KVM_INST_MFSPR(SPRN_SRR1):
  379. kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
  380. break;
  381. #ifdef CONFIG_BOOKE
  382. case KVM_INST_MFSPR(SPRN_DEAR):
  383. #else
  384. case KVM_INST_MFSPR(SPRN_DAR):
  385. #endif
  386. kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
  387. break;
  388. case KVM_INST_MFSPR(SPRN_DSISR):
  389. kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
  390. break;
  391. #ifdef CONFIG_PPC_BOOK3E_MMU
  392. case KVM_INST_MFSPR(SPRN_MAS0):
  393. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  394. kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
  395. break;
  396. case KVM_INST_MFSPR(SPRN_MAS1):
  397. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  398. kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
  399. break;
  400. case KVM_INST_MFSPR(SPRN_MAS2):
  401. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  402. kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
  403. break;
  404. case KVM_INST_MFSPR(SPRN_MAS3):
  405. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  406. kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
  407. break;
  408. case KVM_INST_MFSPR(SPRN_MAS4):
  409. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  410. kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
  411. break;
  412. case KVM_INST_MFSPR(SPRN_MAS6):
  413. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  414. kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
  415. break;
  416. case KVM_INST_MFSPR(SPRN_MAS7):
  417. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  418. kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
  419. break;
  420. #endif /* CONFIG_PPC_BOOK3E_MMU */
  421. case KVM_INST_MFSPR(SPRN_SPRG4):
  422. #ifdef CONFIG_BOOKE
  423. case KVM_INST_MFSPR(SPRN_SPRG4R):
  424. #endif
  425. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  426. kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
  427. break;
  428. case KVM_INST_MFSPR(SPRN_SPRG5):
  429. #ifdef CONFIG_BOOKE
  430. case KVM_INST_MFSPR(SPRN_SPRG5R):
  431. #endif
  432. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  433. kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
  434. break;
  435. case KVM_INST_MFSPR(SPRN_SPRG6):
  436. #ifdef CONFIG_BOOKE
  437. case KVM_INST_MFSPR(SPRN_SPRG6R):
  438. #endif
  439. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  440. kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
  441. break;
  442. case KVM_INST_MFSPR(SPRN_SPRG7):
  443. #ifdef CONFIG_BOOKE
  444. case KVM_INST_MFSPR(SPRN_SPRG7R):
  445. #endif
  446. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  447. kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
  448. break;
  449. #ifdef CONFIG_BOOKE
  450. case KVM_INST_MFSPR(SPRN_ESR):
  451. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  452. kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
  453. break;
  454. #endif
  455. case KVM_INST_MFSPR(SPRN_PIR):
  456. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  457. kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
  458. break;
  459. /* Stores */
  460. case KVM_INST_MTSPR(SPRN_SPRG0):
  461. kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
  462. break;
  463. case KVM_INST_MTSPR(SPRN_SPRG1):
  464. kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
  465. break;
  466. case KVM_INST_MTSPR(SPRN_SPRG2):
  467. kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
  468. break;
  469. case KVM_INST_MTSPR(SPRN_SPRG3):
  470. kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
  471. break;
  472. case KVM_INST_MTSPR(SPRN_SRR0):
  473. kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
  474. break;
  475. case KVM_INST_MTSPR(SPRN_SRR1):
  476. kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
  477. break;
  478. #ifdef CONFIG_BOOKE
  479. case KVM_INST_MTSPR(SPRN_DEAR):
  480. #else
  481. case KVM_INST_MTSPR(SPRN_DAR):
  482. #endif
  483. kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
  484. break;
  485. case KVM_INST_MTSPR(SPRN_DSISR):
  486. kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
  487. break;
  488. #ifdef CONFIG_PPC_BOOK3E_MMU
  489. case KVM_INST_MTSPR(SPRN_MAS0):
  490. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  491. kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
  492. break;
  493. case KVM_INST_MTSPR(SPRN_MAS1):
  494. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  495. kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
  496. break;
  497. case KVM_INST_MTSPR(SPRN_MAS2):
  498. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  499. kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
  500. break;
  501. case KVM_INST_MTSPR(SPRN_MAS3):
  502. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  503. kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
  504. break;
  505. case KVM_INST_MTSPR(SPRN_MAS4):
  506. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  507. kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
  508. break;
  509. case KVM_INST_MTSPR(SPRN_MAS6):
  510. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  511. kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
  512. break;
  513. case KVM_INST_MTSPR(SPRN_MAS7):
  514. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  515. kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
  516. break;
  517. #endif /* CONFIG_PPC_BOOK3E_MMU */
  518. case KVM_INST_MTSPR(SPRN_SPRG4):
  519. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  520. kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
  521. break;
  522. case KVM_INST_MTSPR(SPRN_SPRG5):
  523. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  524. kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
  525. break;
  526. case KVM_INST_MTSPR(SPRN_SPRG6):
  527. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  528. kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
  529. break;
  530. case KVM_INST_MTSPR(SPRN_SPRG7):
  531. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  532. kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
  533. break;
  534. #ifdef CONFIG_BOOKE
  535. case KVM_INST_MTSPR(SPRN_ESR):
  536. if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
  537. kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
  538. break;
  539. #endif
  540. /* Nops */
  541. case KVM_INST_TLBSYNC:
  542. kvm_patch_ins_nop(inst);
  543. break;
  544. /* Rewrites */
  545. case KVM_INST_MTMSRD_L1:
  546. kvm_patch_ins_mtmsrd(inst, inst_rt);
  547. break;
  548. case KVM_INST_MTMSR:
  549. case KVM_INST_MTMSRD_L0:
  550. kvm_patch_ins_mtmsr(inst, inst_rt);
  551. break;
  552. #ifdef CONFIG_BOOKE
  553. case KVM_INST_WRTEE:
  554. kvm_patch_ins_wrtee(inst, inst_rt, 0);
  555. break;
  556. #endif
  557. }
  558. switch (inst_no_rt & ~KVM_MASK_RB) {
  559. #ifdef CONFIG_PPC_BOOK3S_32
  560. case KVM_INST_MTSRIN:
  561. if (features & KVM_MAGIC_FEAT_SR) {
  562. u32 inst_rb = _inst & KVM_MASK_RB;
  563. kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
  564. }
  565. break;
  566. #endif
  567. }
  568. switch (_inst) {
  569. #ifdef CONFIG_BOOKE
  570. case KVM_INST_WRTEEI_0:
  571. kvm_patch_ins_wrteei_0(inst);
  572. break;
  573. case KVM_INST_WRTEEI_1:
  574. kvm_patch_ins_wrtee(inst, 0, 1);
  575. break;
  576. #endif
  577. }
  578. }
  579. extern u32 kvm_template_start[];
  580. extern u32 kvm_template_end[];
  581. static void kvm_use_magic_page(void)
  582. {
  583. u32 *p;
  584. u32 *start, *end;
  585. u32 features;
  586. /* Tell the host to map the magic page to -4096 on all CPUs */
  587. on_each_cpu(kvm_map_magic_page, &features, 1);
  588. /* Quick self-test to see if the mapping works */
  589. if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
  590. kvm_patching_worked = false;
  591. return;
  592. }
  593. /* Now loop through all code and find instructions */
  594. start = (void*)_stext;
  595. end = (void*)_etext;
  596. /*
  597. * Being interrupted in the middle of patching would
  598. * be bad for SPRG4-7, which KVM can't keep in sync
  599. * with emulated accesses because reads don't trap.
  600. */
  601. local_irq_disable();
  602. for (p = start; p < end; p++) {
  603. /* Avoid patching the template code */
  604. if (p >= kvm_template_start && p < kvm_template_end) {
  605. p = kvm_template_end - 1;
  606. continue;
  607. }
  608. kvm_check_ins(p, features);
  609. }
  610. local_irq_enable();
  611. printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
  612. kvm_patching_worked ? "worked" : "failed");
  613. }
  614. static __init void kvm_free_tmp(void)
  615. {
  616. /*
  617. * Inform kmemleak about the hole in the .bss section since the
  618. * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
  619. */
  620. kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
  621. ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
  622. free_reserved_area(&kvm_tmp[kvm_tmp_index],
  623. &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
  624. }
  625. static int __init kvm_guest_init(void)
  626. {
  627. if (!kvm_para_available())
  628. goto free_tmp;
  629. if (!epapr_paravirt_enabled)
  630. goto free_tmp;
  631. if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
  632. kvm_use_magic_page();
  633. #ifdef CONFIG_PPC_BOOK3S_64
  634. /* Enable napping */
  635. powersave_nap = 1;
  636. #endif
  637. free_tmp:
  638. kvm_free_tmp();
  639. return 0;
  640. }
  641. postcore_initcall(kvm_guest_init);