book3s_emulate.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <asm/kvm_ppc.h>
  20. #include <asm/disassemble.h>
  21. #include <asm/kvm_book3s.h>
  22. #include <asm/reg.h>
  23. #include <asm/switch_to.h>
  24. #include <asm/time.h>
  25. #include <asm/tm.h>
  26. #include "book3s.h"
  27. #include <asm/asm-prototypes.h>
  28. #define OP_19_XOP_RFID 18
  29. #define OP_19_XOP_RFI 50
  30. #define OP_31_XOP_MFMSR 83
  31. #define OP_31_XOP_MTMSR 146
  32. #define OP_31_XOP_MTMSRD 178
  33. #define OP_31_XOP_MTSR 210
  34. #define OP_31_XOP_MTSRIN 242
  35. #define OP_31_XOP_TLBIEL 274
  36. #define OP_31_XOP_TLBIE 306
  37. /* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
  38. #define OP_31_XOP_FAKE_SC1 308
  39. #define OP_31_XOP_SLBMTE 402
  40. #define OP_31_XOP_SLBIE 434
  41. #define OP_31_XOP_SLBIA 498
  42. #define OP_31_XOP_MFSR 595
  43. #define OP_31_XOP_MFSRIN 659
  44. #define OP_31_XOP_DCBA 758
  45. #define OP_31_XOP_SLBMFEV 851
  46. #define OP_31_XOP_EIOIO 854
  47. #define OP_31_XOP_SLBMFEE 915
  48. #define OP_31_XOP_TBEGIN 654
  49. #define OP_31_XOP_TABORT 910
  50. #define OP_31_XOP_TRECLAIM 942
  51. #define OP_31_XOP_TRCHKPT 1006
  52. /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
  53. #define OP_31_XOP_DCBZ 1010
  54. #define OP_LFS 48
  55. #define OP_LFD 50
  56. #define OP_STFS 52
  57. #define OP_STFD 54
  58. #define SPRN_GQR0 912
  59. #define SPRN_GQR1 913
  60. #define SPRN_GQR2 914
  61. #define SPRN_GQR3 915
  62. #define SPRN_GQR4 916
  63. #define SPRN_GQR5 917
  64. #define SPRN_GQR6 918
  65. #define SPRN_GQR7 919
  66. /* Book3S_32 defines mfsrin(v) - but that messes up our abstract
  67. * function pointers, so let's just disable the define. */
  68. #undef mfsrin
  69. enum priv_level {
  70. PRIV_PROBLEM = 0,
  71. PRIV_SUPER = 1,
  72. PRIV_HYPER = 2,
  73. };
  74. static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
  75. {
  76. /* PAPR VMs only access supervisor SPRs */
  77. if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
  78. return false;
  79. /* Limit user space to its own small SPR set */
  80. if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
  81. return false;
  82. return true;
  83. }
  84. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  85. static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
  86. {
  87. memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
  88. sizeof(vcpu->arch.gpr_tm));
  89. memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
  90. sizeof(struct thread_fp_state));
  91. memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
  92. sizeof(struct thread_vr_state));
  93. vcpu->arch.ppr_tm = vcpu->arch.ppr;
  94. vcpu->arch.dscr_tm = vcpu->arch.dscr;
  95. vcpu->arch.amr_tm = vcpu->arch.amr;
  96. vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
  97. vcpu->arch.tar_tm = vcpu->arch.tar;
  98. vcpu->arch.lr_tm = vcpu->arch.regs.link;
  99. vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
  100. vcpu->arch.xer_tm = vcpu->arch.regs.xer;
  101. vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
  102. }
  103. static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
  104. {
  105. memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
  106. sizeof(vcpu->arch.regs.gpr));
  107. memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
  108. sizeof(struct thread_fp_state));
  109. memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
  110. sizeof(struct thread_vr_state));
  111. vcpu->arch.ppr = vcpu->arch.ppr_tm;
  112. vcpu->arch.dscr = vcpu->arch.dscr_tm;
  113. vcpu->arch.amr = vcpu->arch.amr_tm;
  114. vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
  115. vcpu->arch.tar = vcpu->arch.tar_tm;
  116. vcpu->arch.regs.link = vcpu->arch.lr_tm;
  117. vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
  118. vcpu->arch.regs.xer = vcpu->arch.xer_tm;
  119. vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
  120. }
  121. static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
  122. {
  123. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  124. int fc_val = ra_val ? ra_val : 1;
  125. uint64_t texasr;
  126. /* CR0 = 0 | MSR[TS] | 0 */
  127. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
  128. (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
  129. << CR0_SHIFT);
  130. preempt_disable();
  131. tm_enable();
  132. texasr = mfspr(SPRN_TEXASR);
  133. kvmppc_save_tm_pr(vcpu);
  134. kvmppc_copyfrom_vcpu_tm(vcpu);
  135. /* failure recording depends on Failure Summary bit */
  136. if (!(texasr & TEXASR_FS)) {
  137. texasr &= ~TEXASR_FC;
  138. texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
  139. texasr &= ~(TEXASR_PR | TEXASR_HV);
  140. if (kvmppc_get_msr(vcpu) & MSR_PR)
  141. texasr |= TEXASR_PR;
  142. if (kvmppc_get_msr(vcpu) & MSR_HV)
  143. texasr |= TEXASR_HV;
  144. vcpu->arch.texasr = texasr;
  145. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  146. mtspr(SPRN_TEXASR, texasr);
  147. mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
  148. }
  149. tm_disable();
  150. /*
  151. * treclaim need quit to non-transactional state.
  152. */
  153. guest_msr &= ~(MSR_TS_MASK);
  154. kvmppc_set_msr(vcpu, guest_msr);
  155. preempt_enable();
  156. if (vcpu->arch.shadow_fscr & FSCR_TAR)
  157. mtspr(SPRN_TAR, vcpu->arch.tar);
  158. }
  159. static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
  160. {
  161. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  162. preempt_disable();
  163. /*
  164. * need flush FP/VEC/VSX to vcpu save area before
  165. * copy.
  166. */
  167. kvmppc_giveup_ext(vcpu, MSR_VSX);
  168. kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
  169. kvmppc_copyto_vcpu_tm(vcpu);
  170. kvmppc_save_tm_sprs(vcpu);
  171. /*
  172. * as a result of trecheckpoint. set TS to suspended.
  173. */
  174. guest_msr &= ~(MSR_TS_MASK);
  175. guest_msr |= MSR_TS_S;
  176. kvmppc_set_msr(vcpu, guest_msr);
  177. kvmppc_restore_tm_pr(vcpu);
  178. preempt_enable();
  179. }
  180. /* emulate tabort. at guest privilege state */
  181. void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
  182. {
  183. /* currently we only emulate tabort. but no emulation of other
  184. * tabort variants since there is no kernel usage of them at
  185. * present.
  186. */
  187. unsigned long guest_msr = kvmppc_get_msr(vcpu);
  188. uint64_t org_texasr;
  189. preempt_disable();
  190. tm_enable();
  191. org_texasr = mfspr(SPRN_TEXASR);
  192. tm_abort(ra_val);
  193. /* CR0 = 0 | MSR[TS] | 0 */
  194. vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
  195. (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
  196. << CR0_SHIFT);
  197. vcpu->arch.texasr = mfspr(SPRN_TEXASR);
  198. /* failure recording depends on Failure Summary bit,
  199. * and tabort will be treated as nops in non-transactional
  200. * state.
  201. */
  202. if (!(org_texasr & TEXASR_FS) &&
  203. MSR_TM_ACTIVE(guest_msr)) {
  204. vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
  205. if (guest_msr & MSR_PR)
  206. vcpu->arch.texasr |= TEXASR_PR;
  207. if (guest_msr & MSR_HV)
  208. vcpu->arch.texasr |= TEXASR_HV;
  209. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  210. }
  211. tm_disable();
  212. preempt_enable();
  213. }
  214. #endif
  215. int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
  216. unsigned int inst, int *advance)
  217. {
  218. int emulated = EMULATE_DONE;
  219. int rt = get_rt(inst);
  220. int rs = get_rs(inst);
  221. int ra = get_ra(inst);
  222. int rb = get_rb(inst);
  223. u32 inst_sc = 0x44000002;
  224. switch (get_op(inst)) {
  225. case 0:
  226. emulated = EMULATE_FAIL;
  227. if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
  228. (inst == swab32(inst_sc))) {
  229. /*
  230. * This is the byte reversed syscall instruction of our
  231. * hypercall handler. Early versions of LE Linux didn't
  232. * swap the instructions correctly and ended up in
  233. * illegal instructions.
  234. * Just always fail hypercalls on these broken systems.
  235. */
  236. kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
  237. kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
  238. emulated = EMULATE_DONE;
  239. }
  240. break;
  241. case 19:
  242. switch (get_xop(inst)) {
  243. case OP_19_XOP_RFID:
  244. case OP_19_XOP_RFI: {
  245. unsigned long srr1 = kvmppc_get_srr1(vcpu);
  246. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  247. unsigned long cur_msr = kvmppc_get_msr(vcpu);
  248. /*
  249. * add rules to fit in ISA specification regarding TM
  250. * state transistion in TM disable/Suspended state,
  251. * and target TM state is TM inactive(00) state. (the
  252. * change should be suppressed).
  253. */
  254. if (((cur_msr & MSR_TM) == 0) &&
  255. ((srr1 & MSR_TM) == 0) &&
  256. MSR_TM_SUSPENDED(cur_msr) &&
  257. !MSR_TM_ACTIVE(srr1))
  258. srr1 |= MSR_TS_S;
  259. #endif
  260. kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
  261. kvmppc_set_msr(vcpu, srr1);
  262. *advance = 0;
  263. break;
  264. }
  265. default:
  266. emulated = EMULATE_FAIL;
  267. break;
  268. }
  269. break;
  270. case 31:
  271. switch (get_xop(inst)) {
  272. case OP_31_XOP_MFMSR:
  273. kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
  274. break;
  275. case OP_31_XOP_MTMSRD:
  276. {
  277. ulong rs_val = kvmppc_get_gpr(vcpu, rs);
  278. if (inst & 0x10000) {
  279. ulong new_msr = kvmppc_get_msr(vcpu);
  280. new_msr &= ~(MSR_RI | MSR_EE);
  281. new_msr |= rs_val & (MSR_RI | MSR_EE);
  282. kvmppc_set_msr_fast(vcpu, new_msr);
  283. } else
  284. kvmppc_set_msr(vcpu, rs_val);
  285. break;
  286. }
  287. case OP_31_XOP_MTMSR:
  288. kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
  289. break;
  290. case OP_31_XOP_MFSR:
  291. {
  292. int srnum;
  293. srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
  294. if (vcpu->arch.mmu.mfsrin) {
  295. u32 sr;
  296. sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
  297. kvmppc_set_gpr(vcpu, rt, sr);
  298. }
  299. break;
  300. }
  301. case OP_31_XOP_MFSRIN:
  302. {
  303. int srnum;
  304. srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
  305. if (vcpu->arch.mmu.mfsrin) {
  306. u32 sr;
  307. sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
  308. kvmppc_set_gpr(vcpu, rt, sr);
  309. }
  310. break;
  311. }
  312. case OP_31_XOP_MTSR:
  313. vcpu->arch.mmu.mtsrin(vcpu,
  314. (inst >> 16) & 0xf,
  315. kvmppc_get_gpr(vcpu, rs));
  316. break;
  317. case OP_31_XOP_MTSRIN:
  318. vcpu->arch.mmu.mtsrin(vcpu,
  319. (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
  320. kvmppc_get_gpr(vcpu, rs));
  321. break;
  322. case OP_31_XOP_TLBIE:
  323. case OP_31_XOP_TLBIEL:
  324. {
  325. bool large = (inst & 0x00200000) ? true : false;
  326. ulong addr = kvmppc_get_gpr(vcpu, rb);
  327. vcpu->arch.mmu.tlbie(vcpu, addr, large);
  328. break;
  329. }
  330. #ifdef CONFIG_PPC_BOOK3S_64
  331. case OP_31_XOP_FAKE_SC1:
  332. {
  333. /* SC 1 papr hypercalls */
  334. ulong cmd = kvmppc_get_gpr(vcpu, 3);
  335. int i;
  336. if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
  337. !vcpu->arch.papr_enabled) {
  338. emulated = EMULATE_FAIL;
  339. break;
  340. }
  341. if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
  342. break;
  343. run->papr_hcall.nr = cmd;
  344. for (i = 0; i < 9; ++i) {
  345. ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
  346. run->papr_hcall.args[i] = gpr;
  347. }
  348. run->exit_reason = KVM_EXIT_PAPR_HCALL;
  349. vcpu->arch.hcall_needed = 1;
  350. emulated = EMULATE_EXIT_USER;
  351. break;
  352. }
  353. #endif
  354. case OP_31_XOP_EIOIO:
  355. break;
  356. case OP_31_XOP_SLBMTE:
  357. if (!vcpu->arch.mmu.slbmte)
  358. return EMULATE_FAIL;
  359. vcpu->arch.mmu.slbmte(vcpu,
  360. kvmppc_get_gpr(vcpu, rs),
  361. kvmppc_get_gpr(vcpu, rb));
  362. break;
  363. case OP_31_XOP_SLBIE:
  364. if (!vcpu->arch.mmu.slbie)
  365. return EMULATE_FAIL;
  366. vcpu->arch.mmu.slbie(vcpu,
  367. kvmppc_get_gpr(vcpu, rb));
  368. break;
  369. case OP_31_XOP_SLBIA:
  370. if (!vcpu->arch.mmu.slbia)
  371. return EMULATE_FAIL;
  372. vcpu->arch.mmu.slbia(vcpu);
  373. break;
  374. case OP_31_XOP_SLBMFEE:
  375. if (!vcpu->arch.mmu.slbmfee) {
  376. emulated = EMULATE_FAIL;
  377. } else {
  378. ulong t, rb_val;
  379. rb_val = kvmppc_get_gpr(vcpu, rb);
  380. t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
  381. kvmppc_set_gpr(vcpu, rt, t);
  382. }
  383. break;
  384. case OP_31_XOP_SLBMFEV:
  385. if (!vcpu->arch.mmu.slbmfev) {
  386. emulated = EMULATE_FAIL;
  387. } else {
  388. ulong t, rb_val;
  389. rb_val = kvmppc_get_gpr(vcpu, rb);
  390. t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
  391. kvmppc_set_gpr(vcpu, rt, t);
  392. }
  393. break;
  394. case OP_31_XOP_DCBA:
  395. /* Gets treated as NOP */
  396. break;
  397. case OP_31_XOP_DCBZ:
  398. {
  399. ulong rb_val = kvmppc_get_gpr(vcpu, rb);
  400. ulong ra_val = 0;
  401. ulong addr, vaddr;
  402. u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
  403. u32 dsisr;
  404. int r;
  405. if (ra)
  406. ra_val = kvmppc_get_gpr(vcpu, ra);
  407. addr = (ra_val + rb_val) & ~31ULL;
  408. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  409. addr &= 0xffffffff;
  410. vaddr = addr;
  411. r = kvmppc_st(vcpu, &addr, 32, zeros, true);
  412. if ((r == -ENOENT) || (r == -EPERM)) {
  413. *advance = 0;
  414. kvmppc_set_dar(vcpu, vaddr);
  415. vcpu->arch.fault_dar = vaddr;
  416. dsisr = DSISR_ISSTORE;
  417. if (r == -ENOENT)
  418. dsisr |= DSISR_NOHPTE;
  419. else if (r == -EPERM)
  420. dsisr |= DSISR_PROTFAULT;
  421. kvmppc_set_dsisr(vcpu, dsisr);
  422. vcpu->arch.fault_dsisr = dsisr;
  423. kvmppc_book3s_queue_irqprio(vcpu,
  424. BOOK3S_INTERRUPT_DATA_STORAGE);
  425. }
  426. break;
  427. }
  428. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  429. case OP_31_XOP_TBEGIN:
  430. {
  431. if (!cpu_has_feature(CPU_FTR_TM))
  432. break;
  433. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  434. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  435. emulated = EMULATE_AGAIN;
  436. break;
  437. }
  438. if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
  439. preempt_disable();
  440. vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
  441. (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
  442. vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
  443. (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
  444. << TEXASR_FC_LG));
  445. if ((inst >> 21) & 0x1)
  446. vcpu->arch.texasr |= TEXASR_ROT;
  447. if (kvmppc_get_msr(vcpu) & MSR_HV)
  448. vcpu->arch.texasr |= TEXASR_HV;
  449. vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
  450. vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
  451. kvmppc_restore_tm_sprs(vcpu);
  452. preempt_enable();
  453. } else
  454. emulated = EMULATE_FAIL;
  455. break;
  456. }
  457. case OP_31_XOP_TABORT:
  458. {
  459. ulong guest_msr = kvmppc_get_msr(vcpu);
  460. unsigned long ra_val = 0;
  461. if (!cpu_has_feature(CPU_FTR_TM))
  462. break;
  463. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  464. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  465. emulated = EMULATE_AGAIN;
  466. break;
  467. }
  468. /* only emulate for privilege guest, since problem state
  469. * guest can run with TM enabled and we don't expect to
  470. * trap at here for that case.
  471. */
  472. WARN_ON(guest_msr & MSR_PR);
  473. if (ra)
  474. ra_val = kvmppc_get_gpr(vcpu, ra);
  475. kvmppc_emulate_tabort(vcpu, ra_val);
  476. break;
  477. }
  478. case OP_31_XOP_TRECLAIM:
  479. {
  480. ulong guest_msr = kvmppc_get_msr(vcpu);
  481. unsigned long ra_val = 0;
  482. if (!cpu_has_feature(CPU_FTR_TM))
  483. break;
  484. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  485. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  486. emulated = EMULATE_AGAIN;
  487. break;
  488. }
  489. /* generate interrupts based on priorities */
  490. if (guest_msr & MSR_PR) {
  491. /* Privileged Instruction type Program Interrupt */
  492. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  493. emulated = EMULATE_AGAIN;
  494. break;
  495. }
  496. if (!MSR_TM_ACTIVE(guest_msr)) {
  497. /* TM bad thing interrupt */
  498. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  499. emulated = EMULATE_AGAIN;
  500. break;
  501. }
  502. if (ra)
  503. ra_val = kvmppc_get_gpr(vcpu, ra);
  504. kvmppc_emulate_treclaim(vcpu, ra_val);
  505. break;
  506. }
  507. case OP_31_XOP_TRCHKPT:
  508. {
  509. ulong guest_msr = kvmppc_get_msr(vcpu);
  510. unsigned long texasr;
  511. if (!cpu_has_feature(CPU_FTR_TM))
  512. break;
  513. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  514. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  515. emulated = EMULATE_AGAIN;
  516. break;
  517. }
  518. /* generate interrupt based on priorities */
  519. if (guest_msr & MSR_PR) {
  520. /* Privileged Instruction type Program Intr */
  521. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  522. emulated = EMULATE_AGAIN;
  523. break;
  524. }
  525. tm_enable();
  526. texasr = mfspr(SPRN_TEXASR);
  527. tm_disable();
  528. if (MSR_TM_ACTIVE(guest_msr) ||
  529. !(texasr & (TEXASR_FS))) {
  530. /* TM bad thing interrupt */
  531. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  532. emulated = EMULATE_AGAIN;
  533. break;
  534. }
  535. kvmppc_emulate_trchkpt(vcpu);
  536. break;
  537. }
  538. #endif
  539. default:
  540. emulated = EMULATE_FAIL;
  541. }
  542. break;
  543. default:
  544. emulated = EMULATE_FAIL;
  545. }
  546. if (emulated == EMULATE_FAIL)
  547. emulated = kvmppc_emulate_paired_single(run, vcpu);
  548. return emulated;
  549. }
  550. void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
  551. u32 val)
  552. {
  553. if (upper) {
  554. /* Upper BAT */
  555. u32 bl = (val >> 2) & 0x7ff;
  556. bat->bepi_mask = (~bl << 17);
  557. bat->bepi = val & 0xfffe0000;
  558. bat->vs = (val & 2) ? 1 : 0;
  559. bat->vp = (val & 1) ? 1 : 0;
  560. bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
  561. } else {
  562. /* Lower BAT */
  563. bat->brpn = val & 0xfffe0000;
  564. bat->wimg = (val >> 3) & 0xf;
  565. bat->pp = val & 3;
  566. bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
  567. }
  568. }
  569. static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
  570. {
  571. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  572. struct kvmppc_bat *bat;
  573. switch (sprn) {
  574. case SPRN_IBAT0U ... SPRN_IBAT3L:
  575. bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
  576. break;
  577. case SPRN_IBAT4U ... SPRN_IBAT7L:
  578. bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
  579. break;
  580. case SPRN_DBAT0U ... SPRN_DBAT3L:
  581. bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
  582. break;
  583. case SPRN_DBAT4U ... SPRN_DBAT7L:
  584. bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
  585. break;
  586. default:
  587. BUG();
  588. }
  589. return bat;
  590. }
  591. int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
  592. {
  593. int emulated = EMULATE_DONE;
  594. switch (sprn) {
  595. case SPRN_SDR1:
  596. if (!spr_allowed(vcpu, PRIV_HYPER))
  597. goto unprivileged;
  598. to_book3s(vcpu)->sdr1 = spr_val;
  599. break;
  600. case SPRN_DSISR:
  601. kvmppc_set_dsisr(vcpu, spr_val);
  602. break;
  603. case SPRN_DAR:
  604. kvmppc_set_dar(vcpu, spr_val);
  605. break;
  606. case SPRN_HIOR:
  607. to_book3s(vcpu)->hior = spr_val;
  608. break;
  609. case SPRN_IBAT0U ... SPRN_IBAT3L:
  610. case SPRN_IBAT4U ... SPRN_IBAT7L:
  611. case SPRN_DBAT0U ... SPRN_DBAT3L:
  612. case SPRN_DBAT4U ... SPRN_DBAT7L:
  613. {
  614. struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
  615. kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
  616. /* BAT writes happen so rarely that we're ok to flush
  617. * everything here */
  618. kvmppc_mmu_pte_flush(vcpu, 0, 0);
  619. kvmppc_mmu_flush_segments(vcpu);
  620. break;
  621. }
  622. case SPRN_HID0:
  623. to_book3s(vcpu)->hid[0] = spr_val;
  624. break;
  625. case SPRN_HID1:
  626. to_book3s(vcpu)->hid[1] = spr_val;
  627. break;
  628. case SPRN_HID2:
  629. to_book3s(vcpu)->hid[2] = spr_val;
  630. break;
  631. case SPRN_HID2_GEKKO:
  632. to_book3s(vcpu)->hid[2] = spr_val;
  633. /* HID2.PSE controls paired single on gekko */
  634. switch (vcpu->arch.pvr) {
  635. case 0x00080200: /* lonestar 2.0 */
  636. case 0x00088202: /* lonestar 2.2 */
  637. case 0x70000100: /* gekko 1.0 */
  638. case 0x00080100: /* gekko 2.0 */
  639. case 0x00083203: /* gekko 2.3a */
  640. case 0x00083213: /* gekko 2.3b */
  641. case 0x00083204: /* gekko 2.4 */
  642. case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
  643. case 0x00087200: /* broadway */
  644. if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
  645. /* Native paired singles */
  646. } else if (spr_val & (1 << 29)) { /* HID2.PSE */
  647. vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
  648. kvmppc_giveup_ext(vcpu, MSR_FP);
  649. } else {
  650. vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
  651. }
  652. break;
  653. }
  654. break;
  655. case SPRN_HID4:
  656. case SPRN_HID4_GEKKO:
  657. to_book3s(vcpu)->hid[4] = spr_val;
  658. break;
  659. case SPRN_HID5:
  660. to_book3s(vcpu)->hid[5] = spr_val;
  661. /* guest HID5 set can change is_dcbz32 */
  662. if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
  663. (mfmsr() & MSR_HV))
  664. vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
  665. break;
  666. case SPRN_GQR0:
  667. case SPRN_GQR1:
  668. case SPRN_GQR2:
  669. case SPRN_GQR3:
  670. case SPRN_GQR4:
  671. case SPRN_GQR5:
  672. case SPRN_GQR6:
  673. case SPRN_GQR7:
  674. to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
  675. break;
  676. #ifdef CONFIG_PPC_BOOK3S_64
  677. case SPRN_FSCR:
  678. kvmppc_set_fscr(vcpu, spr_val);
  679. break;
  680. case SPRN_BESCR:
  681. vcpu->arch.bescr = spr_val;
  682. break;
  683. case SPRN_EBBHR:
  684. vcpu->arch.ebbhr = spr_val;
  685. break;
  686. case SPRN_EBBRR:
  687. vcpu->arch.ebbrr = spr_val;
  688. break;
  689. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  690. case SPRN_TFHAR:
  691. case SPRN_TEXASR:
  692. case SPRN_TFIAR:
  693. if (!cpu_has_feature(CPU_FTR_TM))
  694. break;
  695. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  696. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  697. emulated = EMULATE_AGAIN;
  698. break;
  699. }
  700. if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
  701. !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
  702. (sprn == SPRN_TFHAR))) {
  703. /* it is illegal to mtspr() TM regs in
  704. * other than non-transactional state, with
  705. * the exception of TFHAR in suspend state.
  706. */
  707. kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
  708. emulated = EMULATE_AGAIN;
  709. break;
  710. }
  711. tm_enable();
  712. if (sprn == SPRN_TFHAR)
  713. mtspr(SPRN_TFHAR, spr_val);
  714. else if (sprn == SPRN_TEXASR)
  715. mtspr(SPRN_TEXASR, spr_val);
  716. else
  717. mtspr(SPRN_TFIAR, spr_val);
  718. tm_disable();
  719. break;
  720. #endif
  721. #endif
  722. case SPRN_ICTC:
  723. case SPRN_THRM1:
  724. case SPRN_THRM2:
  725. case SPRN_THRM3:
  726. case SPRN_CTRLF:
  727. case SPRN_CTRLT:
  728. case SPRN_L2CR:
  729. case SPRN_DSCR:
  730. case SPRN_MMCR0_GEKKO:
  731. case SPRN_MMCR1_GEKKO:
  732. case SPRN_PMC1_GEKKO:
  733. case SPRN_PMC2_GEKKO:
  734. case SPRN_PMC3_GEKKO:
  735. case SPRN_PMC4_GEKKO:
  736. case SPRN_WPAR_GEKKO:
  737. case SPRN_MSSSR0:
  738. case SPRN_DABR:
  739. #ifdef CONFIG_PPC_BOOK3S_64
  740. case SPRN_MMCRS:
  741. case SPRN_MMCRA:
  742. case SPRN_MMCR0:
  743. case SPRN_MMCR1:
  744. case SPRN_MMCR2:
  745. case SPRN_UMMCR2:
  746. #endif
  747. break;
  748. unprivileged:
  749. default:
  750. pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
  751. if (sprn & 0x10) {
  752. if (kvmppc_get_msr(vcpu) & MSR_PR) {
  753. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  754. emulated = EMULATE_AGAIN;
  755. }
  756. } else {
  757. if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
  758. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  759. emulated = EMULATE_AGAIN;
  760. }
  761. }
  762. break;
  763. }
  764. return emulated;
  765. }
  766. int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
  767. {
  768. int emulated = EMULATE_DONE;
  769. switch (sprn) {
  770. case SPRN_IBAT0U ... SPRN_IBAT3L:
  771. case SPRN_IBAT4U ... SPRN_IBAT7L:
  772. case SPRN_DBAT0U ... SPRN_DBAT3L:
  773. case SPRN_DBAT4U ... SPRN_DBAT7L:
  774. {
  775. struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
  776. if (sprn % 2)
  777. *spr_val = bat->raw >> 32;
  778. else
  779. *spr_val = bat->raw;
  780. break;
  781. }
  782. case SPRN_SDR1:
  783. if (!spr_allowed(vcpu, PRIV_HYPER))
  784. goto unprivileged;
  785. *spr_val = to_book3s(vcpu)->sdr1;
  786. break;
  787. case SPRN_DSISR:
  788. *spr_val = kvmppc_get_dsisr(vcpu);
  789. break;
  790. case SPRN_DAR:
  791. *spr_val = kvmppc_get_dar(vcpu);
  792. break;
  793. case SPRN_HIOR:
  794. *spr_val = to_book3s(vcpu)->hior;
  795. break;
  796. case SPRN_HID0:
  797. *spr_val = to_book3s(vcpu)->hid[0];
  798. break;
  799. case SPRN_HID1:
  800. *spr_val = to_book3s(vcpu)->hid[1];
  801. break;
  802. case SPRN_HID2:
  803. case SPRN_HID2_GEKKO:
  804. *spr_val = to_book3s(vcpu)->hid[2];
  805. break;
  806. case SPRN_HID4:
  807. case SPRN_HID4_GEKKO:
  808. *spr_val = to_book3s(vcpu)->hid[4];
  809. break;
  810. case SPRN_HID5:
  811. *spr_val = to_book3s(vcpu)->hid[5];
  812. break;
  813. case SPRN_CFAR:
  814. case SPRN_DSCR:
  815. *spr_val = 0;
  816. break;
  817. case SPRN_PURR:
  818. /*
  819. * On exit we would have updated purr
  820. */
  821. *spr_val = vcpu->arch.purr;
  822. break;
  823. case SPRN_SPURR:
  824. /*
  825. * On exit we would have updated spurr
  826. */
  827. *spr_val = vcpu->arch.spurr;
  828. break;
  829. case SPRN_VTB:
  830. *spr_val = to_book3s(vcpu)->vtb;
  831. break;
  832. case SPRN_IC:
  833. *spr_val = vcpu->arch.ic;
  834. break;
  835. case SPRN_GQR0:
  836. case SPRN_GQR1:
  837. case SPRN_GQR2:
  838. case SPRN_GQR3:
  839. case SPRN_GQR4:
  840. case SPRN_GQR5:
  841. case SPRN_GQR6:
  842. case SPRN_GQR7:
  843. *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
  844. break;
  845. #ifdef CONFIG_PPC_BOOK3S_64
  846. case SPRN_FSCR:
  847. *spr_val = vcpu->arch.fscr;
  848. break;
  849. case SPRN_BESCR:
  850. *spr_val = vcpu->arch.bescr;
  851. break;
  852. case SPRN_EBBHR:
  853. *spr_val = vcpu->arch.ebbhr;
  854. break;
  855. case SPRN_EBBRR:
  856. *spr_val = vcpu->arch.ebbrr;
  857. break;
  858. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  859. case SPRN_TFHAR:
  860. case SPRN_TEXASR:
  861. case SPRN_TFIAR:
  862. if (!cpu_has_feature(CPU_FTR_TM))
  863. break;
  864. if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
  865. kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
  866. emulated = EMULATE_AGAIN;
  867. break;
  868. }
  869. tm_enable();
  870. if (sprn == SPRN_TFHAR)
  871. *spr_val = mfspr(SPRN_TFHAR);
  872. else if (sprn == SPRN_TEXASR)
  873. *spr_val = mfspr(SPRN_TEXASR);
  874. else if (sprn == SPRN_TFIAR)
  875. *spr_val = mfspr(SPRN_TFIAR);
  876. tm_disable();
  877. break;
  878. #endif
  879. #endif
  880. case SPRN_THRM1:
  881. case SPRN_THRM2:
  882. case SPRN_THRM3:
  883. case SPRN_CTRLF:
  884. case SPRN_CTRLT:
  885. case SPRN_L2CR:
  886. case SPRN_MMCR0_GEKKO:
  887. case SPRN_MMCR1_GEKKO:
  888. case SPRN_PMC1_GEKKO:
  889. case SPRN_PMC2_GEKKO:
  890. case SPRN_PMC3_GEKKO:
  891. case SPRN_PMC4_GEKKO:
  892. case SPRN_WPAR_GEKKO:
  893. case SPRN_MSSSR0:
  894. case SPRN_DABR:
  895. #ifdef CONFIG_PPC_BOOK3S_64
  896. case SPRN_MMCRS:
  897. case SPRN_MMCRA:
  898. case SPRN_MMCR0:
  899. case SPRN_MMCR1:
  900. case SPRN_MMCR2:
  901. case SPRN_UMMCR2:
  902. case SPRN_TIR:
  903. #endif
  904. *spr_val = 0;
  905. break;
  906. default:
  907. unprivileged:
  908. pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
  909. if (sprn & 0x10) {
  910. if (kvmppc_get_msr(vcpu) & MSR_PR) {
  911. kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
  912. emulated = EMULATE_AGAIN;
  913. }
  914. } else {
  915. if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
  916. sprn == 4 || sprn == 5 || sprn == 6) {
  917. kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
  918. emulated = EMULATE_AGAIN;
  919. }
  920. }
  921. break;
  922. }
  923. return emulated;
  924. }
  925. u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
  926. {
  927. return make_dsisr(inst);
  928. }
  929. ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
  930. {
  931. #ifdef CONFIG_PPC_BOOK3S_64
  932. /*
  933. * Linux's fix_alignment() assumes that DAR is valid, so can we
  934. */
  935. return vcpu->arch.fault_dar;
  936. #else
  937. ulong dar = 0;
  938. ulong ra = get_ra(inst);
  939. ulong rb = get_rb(inst);
  940. switch (get_op(inst)) {
  941. case OP_LFS:
  942. case OP_LFD:
  943. case OP_STFD:
  944. case OP_STFS:
  945. if (ra)
  946. dar = kvmppc_get_gpr(vcpu, ra);
  947. dar += (s32)((s16)inst);
  948. break;
  949. case 31:
  950. if (ra)
  951. dar = kvmppc_get_gpr(vcpu, ra);
  952. dar += kvmppc_get_gpr(vcpu, rb);
  953. break;
  954. default:
  955. printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
  956. break;
  957. }
  958. return dar;
  959. #endif
  960. }