book3s_64_mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright SUSE Linux Products GmbH 2009
  16. *
  17. * Authors: Alexander Graf <agraf@suse.de>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <asm/kvm_ppc.h>
  25. #include <asm/kvm_book3s.h>
  26. #include <asm/book3s/64/mmu-hash.h>
  27. /* #define DEBUG_MMU */
  28. #ifdef DEBUG_MMU
  29. #define dprintk(X...) printk(KERN_INFO X)
  30. #else
  31. #define dprintk(X...) do { } while(0)
  32. #endif
  33. static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
  34. {
  35. unsigned long msr = vcpu->arch.intr_msr;
  36. unsigned long cur_msr = kvmppc_get_msr(vcpu);
  37. /* If transactional, change to suspend mode on IRQ delivery */
  38. if (MSR_TM_TRANSACTIONAL(cur_msr))
  39. msr |= MSR_TS_S;
  40. else
  41. msr |= cur_msr & MSR_TS_MASK;
  42. kvmppc_set_msr(vcpu, msr);
  43. }
  44. static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
  45. struct kvm_vcpu *vcpu,
  46. gva_t eaddr)
  47. {
  48. int i;
  49. u64 esid = GET_ESID(eaddr);
  50. u64 esid_1t = GET_ESID_1T(eaddr);
  51. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  52. u64 cmp_esid = esid;
  53. if (!vcpu->arch.slb[i].valid)
  54. continue;
  55. if (vcpu->arch.slb[i].tb)
  56. cmp_esid = esid_1t;
  57. if (vcpu->arch.slb[i].esid == cmp_esid)
  58. return &vcpu->arch.slb[i];
  59. }
  60. dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
  61. eaddr, esid, esid_1t);
  62. for (i = 0; i < vcpu->arch.slb_nr; i++) {
  63. if (vcpu->arch.slb[i].vsid)
  64. dprintk(" %d: %c%c%c %llx %llx\n", i,
  65. vcpu->arch.slb[i].valid ? 'v' : ' ',
  66. vcpu->arch.slb[i].large ? 'l' : ' ',
  67. vcpu->arch.slb[i].tb ? 't' : ' ',
  68. vcpu->arch.slb[i].esid,
  69. vcpu->arch.slb[i].vsid);
  70. }
  71. return NULL;
  72. }
  73. static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
  74. {
  75. return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
  76. }
  77. static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
  78. {
  79. return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
  80. }
  81. static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
  82. {
  83. eaddr &= kvmppc_slb_offset_mask(slb);
  84. return (eaddr >> VPN_SHIFT) |
  85. ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
  86. }
  87. static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
  88. bool data)
  89. {
  90. struct kvmppc_slb *slb;
  91. slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
  92. if (!slb)
  93. return 0;
  94. return kvmppc_slb_calc_vpn(slb, eaddr);
  95. }
  96. static int mmu_pagesize(int mmu_pg)
  97. {
  98. switch (mmu_pg) {
  99. case MMU_PAGE_64K:
  100. return 16;
  101. case MMU_PAGE_16M:
  102. return 24;
  103. }
  104. return 12;
  105. }
  106. static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
  107. {
  108. return mmu_pagesize(slbe->base_page_size);
  109. }
  110. static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
  111. {
  112. int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
  113. return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
  114. }
  115. static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
  116. struct kvmppc_slb *slbe, gva_t eaddr,
  117. bool second)
  118. {
  119. struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
  120. u64 hash, pteg, htabsize;
  121. u32 ssize;
  122. hva_t r;
  123. u64 vpn;
  124. htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
  125. vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
  126. ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
  127. hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
  128. if (second)
  129. hash = ~hash;
  130. hash &= ((1ULL << 39ULL) - 1ULL);
  131. hash &= htabsize;
  132. hash <<= 7ULL;
  133. pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
  134. pteg |= hash;
  135. dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
  136. page, vcpu_book3s->sdr1, pteg, slbe->vsid);
  137. /* When running a PAPR guest, SDR1 contains a HVA address instead
  138. of a GPA */
  139. if (vcpu->arch.papr_enabled)
  140. r = pteg;
  141. else
  142. r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
  143. if (kvm_is_error_hva(r))
  144. return r;
  145. return r | (pteg & ~PAGE_MASK);
  146. }
  147. static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
  148. {
  149. int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
  150. u64 avpn;
  151. avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
  152. avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
  153. if (p < 16)
  154. avpn >>= ((80 - p) - 56) - 8; /* 16 - p */
  155. else
  156. avpn <<= p - 16;
  157. return avpn;
  158. }
  159. /*
  160. * Return page size encoded in the second word of a HPTE, or
  161. * -1 for an invalid encoding for the base page size indicated by
  162. * the SLB entry. This doesn't handle mixed pagesize segments yet.
  163. */
  164. static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
  165. {
  166. switch (slbe->base_page_size) {
  167. case MMU_PAGE_64K:
  168. if ((r & 0xf000) == 0x1000)
  169. return MMU_PAGE_64K;
  170. break;
  171. case MMU_PAGE_16M:
  172. if ((r & 0xff000) == 0)
  173. return MMU_PAGE_16M;
  174. break;
  175. }
  176. return -1;
  177. }
  178. static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
  179. struct kvmppc_pte *gpte, bool data,
  180. bool iswrite)
  181. {
  182. struct kvmppc_slb *slbe;
  183. hva_t ptegp;
  184. u64 pteg[16];
  185. u64 avpn = 0;
  186. u64 v, r;
  187. u64 v_val, v_mask;
  188. u64 eaddr_mask;
  189. int i;
  190. u8 pp, key = 0;
  191. bool found = false;
  192. bool second = false;
  193. int pgsize;
  194. ulong mp_ea = vcpu->arch.magic_page_ea;
  195. /* Magic page override */
  196. if (unlikely(mp_ea) &&
  197. unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
  198. !(kvmppc_get_msr(vcpu) & MSR_PR)) {
  199. gpte->eaddr = eaddr;
  200. gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
  201. gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
  202. gpte->raddr &= KVM_PAM;
  203. gpte->may_execute = true;
  204. gpte->may_read = true;
  205. gpte->may_write = true;
  206. gpte->page_size = MMU_PAGE_4K;
  207. gpte->wimg = HPTE_R_M;
  208. return 0;
  209. }
  210. slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
  211. if (!slbe)
  212. goto no_seg_found;
  213. avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
  214. v_val = avpn & HPTE_V_AVPN;
  215. if (slbe->tb)
  216. v_val |= SLB_VSID_B_1T;
  217. if (slbe->large)
  218. v_val |= HPTE_V_LARGE;
  219. v_val |= HPTE_V_VALID;
  220. v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
  221. HPTE_V_SECONDARY;
  222. pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
  223. mutex_lock(&vcpu->kvm->arch.hpt_mutex);
  224. do_second:
  225. ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
  226. if (kvm_is_error_hva(ptegp))
  227. goto no_page_found;
  228. if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
  229. printk_ratelimited(KERN_ERR
  230. "KVM: Can't copy data from 0x%lx!\n", ptegp);
  231. goto no_page_found;
  232. }
  233. if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
  234. key = 4;
  235. else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
  236. key = 4;
  237. for (i=0; i<16; i+=2) {
  238. u64 pte0 = be64_to_cpu(pteg[i]);
  239. u64 pte1 = be64_to_cpu(pteg[i + 1]);
  240. /* Check all relevant fields of 1st dword */
  241. if ((pte0 & v_mask) == v_val) {
  242. /* If large page bit is set, check pgsize encoding */
  243. if (slbe->large &&
  244. (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
  245. pgsize = decode_pagesize(slbe, pte1);
  246. if (pgsize < 0)
  247. continue;
  248. }
  249. found = true;
  250. break;
  251. }
  252. }
  253. if (!found) {
  254. if (second)
  255. goto no_page_found;
  256. v_val |= HPTE_V_SECONDARY;
  257. second = true;
  258. goto do_second;
  259. }
  260. v = be64_to_cpu(pteg[i]);
  261. r = be64_to_cpu(pteg[i+1]);
  262. pp = (r & HPTE_R_PP) | key;
  263. if (r & HPTE_R_PP0)
  264. pp |= 8;
  265. gpte->eaddr = eaddr;
  266. gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
  267. eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
  268. gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
  269. gpte->page_size = pgsize;
  270. gpte->may_execute = ((r & HPTE_R_N) ? false : true);
  271. if (unlikely(vcpu->arch.disable_kernel_nx) &&
  272. !(kvmppc_get_msr(vcpu) & MSR_PR))
  273. gpte->may_execute = true;
  274. gpte->may_read = false;
  275. gpte->may_write = false;
  276. gpte->wimg = r & HPTE_R_WIMG;
  277. switch (pp) {
  278. case 0:
  279. case 1:
  280. case 2:
  281. case 6:
  282. gpte->may_write = true;
  283. /* fall through */
  284. case 3:
  285. case 5:
  286. case 7:
  287. case 10:
  288. gpte->may_read = true;
  289. break;
  290. }
  291. dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
  292. "-> 0x%lx\n",
  293. eaddr, avpn, gpte->vpage, gpte->raddr);
  294. /* Update PTE R and C bits, so the guest's swapper knows we used the
  295. * page */
  296. if (gpte->may_read && !(r & HPTE_R_R)) {
  297. /*
  298. * Set the accessed flag.
  299. * We have to write this back with a single byte write
  300. * because another vcpu may be accessing this on
  301. * non-PAPR platforms such as mac99, and this is
  302. * what real hardware does.
  303. */
  304. char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
  305. r |= HPTE_R_R;
  306. put_user(r >> 8, addr + 6);
  307. }
  308. if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
  309. /* Set the dirty flag */
  310. /* Use a single byte write */
  311. char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
  312. r |= HPTE_R_C;
  313. put_user(r, addr + 7);
  314. }
  315. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  316. if (!gpte->may_read || (iswrite && !gpte->may_write))
  317. return -EPERM;
  318. return 0;
  319. no_page_found:
  320. mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
  321. return -ENOENT;
  322. no_seg_found:
  323. dprintk("KVM MMU: Trigger segment fault\n");
  324. return -EINVAL;
  325. }
  326. static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
  327. {
  328. u64 esid, esid_1t;
  329. int slb_nr;
  330. struct kvmppc_slb *slbe;
  331. dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
  332. esid = GET_ESID(rb);
  333. esid_1t = GET_ESID_1T(rb);
  334. slb_nr = rb & 0xfff;
  335. if (slb_nr > vcpu->arch.slb_nr)
  336. return;
  337. slbe = &vcpu->arch.slb[slb_nr];
  338. slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
  339. slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
  340. slbe->esid = slbe->tb ? esid_1t : esid;
  341. slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
  342. slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
  343. slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
  344. slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
  345. slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
  346. slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
  347. slbe->base_page_size = MMU_PAGE_4K;
  348. if (slbe->large) {
  349. if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
  350. switch (rs & SLB_VSID_LP) {
  351. case SLB_VSID_LP_00:
  352. slbe->base_page_size = MMU_PAGE_16M;
  353. break;
  354. case SLB_VSID_LP_01:
  355. slbe->base_page_size = MMU_PAGE_64K;
  356. break;
  357. }
  358. } else
  359. slbe->base_page_size = MMU_PAGE_16M;
  360. }
  361. slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
  362. slbe->origv = rs;
  363. /* Map the new segment */
  364. kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
  365. }
  366. static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
  367. {
  368. struct kvmppc_slb *slbe;
  369. if (slb_nr > vcpu->arch.slb_nr)
  370. return 0;
  371. slbe = &vcpu->arch.slb[slb_nr];
  372. return slbe->orige;
  373. }
  374. static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
  375. {
  376. struct kvmppc_slb *slbe;
  377. if (slb_nr > vcpu->arch.slb_nr)
  378. return 0;
  379. slbe = &vcpu->arch.slb[slb_nr];
  380. return slbe->origv;
  381. }
  382. static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
  383. {
  384. struct kvmppc_slb *slbe;
  385. u64 seg_size;
  386. dprintk("KVM MMU: slbie(0x%llx)\n", ea);
  387. slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
  388. if (!slbe)
  389. return;
  390. dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
  391. slbe->valid = false;
  392. slbe->orige = 0;
  393. slbe->origv = 0;
  394. seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
  395. kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
  396. }
  397. static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
  398. {
  399. int i;
  400. dprintk("KVM MMU: slbia()\n");
  401. for (i = 1; i < vcpu->arch.slb_nr; i++) {
  402. vcpu->arch.slb[i].valid = false;
  403. vcpu->arch.slb[i].orige = 0;
  404. vcpu->arch.slb[i].origv = 0;
  405. }
  406. if (kvmppc_get_msr(vcpu) & MSR_IR) {
  407. kvmppc_mmu_flush_segments(vcpu);
  408. kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
  409. }
  410. }
  411. static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
  412. ulong value)
  413. {
  414. u64 rb = 0, rs = 0;
  415. /*
  416. * According to Book3 2.01 mtsrin is implemented as:
  417. *
  418. * The SLB entry specified by (RB)32:35 is loaded from register
  419. * RS, as follows.
  420. *
  421. * SLBE Bit Source SLB Field
  422. *
  423. * 0:31 0x0000_0000 ESID-0:31
  424. * 32:35 (RB)32:35 ESID-32:35
  425. * 36 0b1 V
  426. * 37:61 0x00_0000|| 0b0 VSID-0:24
  427. * 62:88 (RS)37:63 VSID-25:51
  428. * 89:91 (RS)33:35 Ks Kp N
  429. * 92 (RS)36 L ((RS)36 must be 0b0)
  430. * 93 0b0 C
  431. */
  432. dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
  433. /* ESID = srnum */
  434. rb |= (srnum & 0xf) << 28;
  435. /* Set the valid bit */
  436. rb |= 1 << 27;
  437. /* Index = ESID */
  438. rb |= srnum;
  439. /* VSID = VSID */
  440. rs |= (value & 0xfffffff) << 12;
  441. /* flags = flags */
  442. rs |= ((value >> 28) & 0x7) << 9;
  443. kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
  444. }
  445. static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
  446. bool large)
  447. {
  448. u64 mask = 0xFFFFFFFFFULL;
  449. long i;
  450. struct kvm_vcpu *v;
  451. dprintk("KVM MMU: tlbie(0x%lx)\n", va);
  452. /*
  453. * The tlbie instruction changed behaviour starting with
  454. * POWER6. POWER6 and later don't have the large page flag
  455. * in the instruction but in the RB value, along with bits
  456. * indicating page and segment sizes.
  457. */
  458. if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
  459. /* POWER6 or later */
  460. if (va & 1) { /* L bit */
  461. if ((va & 0xf000) == 0x1000)
  462. mask = 0xFFFFFFFF0ULL; /* 64k page */
  463. else
  464. mask = 0xFFFFFF000ULL; /* 16M page */
  465. }
  466. } else {
  467. /* older processors, e.g. PPC970 */
  468. if (large)
  469. mask = 0xFFFFFF000ULL;
  470. }
  471. /* flush this VA on all vcpus */
  472. kvm_for_each_vcpu(i, v, vcpu->kvm)
  473. kvmppc_mmu_pte_vflush(v, va >> 12, mask);
  474. }
  475. #ifdef CONFIG_PPC_64K_PAGES
  476. static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
  477. {
  478. ulong mp_ea = vcpu->arch.magic_page_ea;
  479. return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
  480. (mp_ea >> SID_SHIFT) == esid;
  481. }
  482. #endif
  483. static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
  484. u64 *vsid)
  485. {
  486. ulong ea = esid << SID_SHIFT;
  487. struct kvmppc_slb *slb;
  488. u64 gvsid = esid;
  489. ulong mp_ea = vcpu->arch.magic_page_ea;
  490. int pagesize = MMU_PAGE_64K;
  491. u64 msr = kvmppc_get_msr(vcpu);
  492. if (msr & (MSR_DR|MSR_IR)) {
  493. slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
  494. if (slb) {
  495. gvsid = slb->vsid;
  496. pagesize = slb->base_page_size;
  497. if (slb->tb) {
  498. gvsid <<= SID_SHIFT_1T - SID_SHIFT;
  499. gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
  500. gvsid |= VSID_1T;
  501. }
  502. }
  503. }
  504. switch (msr & (MSR_DR|MSR_IR)) {
  505. case 0:
  506. gvsid = VSID_REAL | esid;
  507. break;
  508. case MSR_IR:
  509. gvsid |= VSID_REAL_IR;
  510. break;
  511. case MSR_DR:
  512. gvsid |= VSID_REAL_DR;
  513. break;
  514. case MSR_DR|MSR_IR:
  515. if (!slb)
  516. goto no_slb;
  517. break;
  518. default:
  519. BUG();
  520. break;
  521. }
  522. #ifdef CONFIG_PPC_64K_PAGES
  523. /*
  524. * Mark this as a 64k segment if the host is using
  525. * 64k pages, the host MMU supports 64k pages and
  526. * the guest segment page size is >= 64k,
  527. * but not if this segment contains the magic page.
  528. */
  529. if (pagesize >= MMU_PAGE_64K &&
  530. mmu_psize_defs[MMU_PAGE_64K].shift &&
  531. !segment_contains_magic_page(vcpu, esid))
  532. gvsid |= VSID_64K;
  533. #endif
  534. if (kvmppc_get_msr(vcpu) & MSR_PR)
  535. gvsid |= VSID_PR;
  536. *vsid = gvsid;
  537. return 0;
  538. no_slb:
  539. /* Catch magic page case */
  540. if (unlikely(mp_ea) &&
  541. unlikely(esid == (mp_ea >> SID_SHIFT)) &&
  542. !(kvmppc_get_msr(vcpu) & MSR_PR)) {
  543. *vsid = VSID_REAL | esid;
  544. return 0;
  545. }
  546. return -EINVAL;
  547. }
  548. static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
  549. {
  550. return (to_book3s(vcpu)->hid[5] & 0x80);
  551. }
  552. void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
  553. {
  554. struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
  555. mmu->mfsrin = NULL;
  556. mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
  557. mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
  558. mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
  559. mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
  560. mmu->slbie = kvmppc_mmu_book3s_64_slbie;
  561. mmu->slbia = kvmppc_mmu_book3s_64_slbia;
  562. mmu->xlate = kvmppc_mmu_book3s_64_xlate;
  563. mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
  564. mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
  565. mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
  566. mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
  567. mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
  568. vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
  569. }