slb_low.S 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Low-level SLB routines
  3. *
  4. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  5. *
  6. * Based on earlier C version:
  7. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  8. * Copyright (c) 2001 Dave Engebretsen
  9. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <asm/processor.h>
  17. #include <asm/ppc_asm.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/cputable.h>
  20. #include <asm/page.h>
  21. #include <asm/mmu.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/firmware.h>
  24. #include <asm/feature-fixups.h>
  25. /*
  26. * This macro generates asm code to compute the VSID scramble
  27. * function. Used in slb_allocate() and do_stab_bolted. The function
  28. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  29. *
  30. * rt = register containing the proto-VSID and into which the
  31. * VSID will be stored
  32. * rx = scratch register (clobbered)
  33. * rf = flags
  34. *
  35. * - rt and rx must be different registers
  36. * - The answer will end up in the low VSID_BITS bits of rt. The higher
  37. * bits may contain other garbage, so you may need to mask the
  38. * result.
  39. */
  40. #define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \
  41. lis rx,VSID_MULTIPLIER_##size@h; \
  42. ori rx,rx,VSID_MULTIPLIER_##size@l; \
  43. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  44. /* \
  45. * powermac get slb fault before feature fixup, so make 65 bit part \
  46. * the default part of feature fixup \
  47. */ \
  48. BEGIN_MMU_FTR_SECTION \
  49. srdi rx,rt,VSID_BITS_65_##size; \
  50. clrldi rt,rt,(64-VSID_BITS_65_##size); \
  51. add rt,rt,rx; \
  52. addi rx,rt,1; \
  53. srdi rx,rx,VSID_BITS_65_##size; \
  54. add rt,rt,rx; \
  55. rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \
  56. MMU_FTR_SECTION_ELSE \
  57. srdi rx,rt,VSID_BITS_##size; \
  58. clrldi rt,rt,(64-VSID_BITS_##size); \
  59. add rt,rt,rx; /* add high and low bits */ \
  60. addi rx,rt,1; \
  61. srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
  62. add rt,rt,rx; \
  63. rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \
  64. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
  65. /* void slb_allocate(unsigned long ea);
  66. *
  67. * Create an SLB entry for the given EA (user or kernel).
  68. * r3 = faulting address, r13 = PACA
  69. * r9, r10, r11 are clobbered by this function
  70. * r3 is preserved.
  71. * No other registers are examined or changed.
  72. */
  73. _GLOBAL(slb_allocate)
  74. /*
  75. * Check if the address falls within the range of the first context, or
  76. * if we may need to handle multi context. For the first context we
  77. * allocate the slb entry via the fast path below. For large address we
  78. * branch out to C-code and see if additional contexts have been
  79. * allocated.
  80. * The test here is:
  81. * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT)
  82. */
  83. rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4)
  84. bne- 8f
  85. srdi r9,r3,60 /* get region */
  86. srdi r10,r3,SID_SHIFT /* get esid */
  87. cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
  88. /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
  89. blt cr7,0f /* user or kernel? */
  90. /* Check if hitting the linear mapping or some other kernel space
  91. */
  92. bne cr7,1f
  93. /* Linear mapping encoding bits, the "li" instruction below will
  94. * be patched by the kernel at boot
  95. */
  96. .globl slb_miss_kernel_load_linear
  97. slb_miss_kernel_load_linear:
  98. li r11,0
  99. /*
  100. * context = (ea >> 60) - (0xc - 1)
  101. * r9 = region id.
  102. */
  103. subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
  104. BEGIN_FTR_SECTION
  105. b .Lslb_finish_load
  106. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  107. b .Lslb_finish_load_1T
  108. 1:
  109. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  110. cmpldi cr0,r9,0xf
  111. bne 1f
  112. /* Check virtual memmap region. To be patched at kernel boot */
  113. .globl slb_miss_kernel_load_vmemmap
  114. slb_miss_kernel_load_vmemmap:
  115. li r11,0
  116. b 6f
  117. 1:
  118. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  119. /*
  120. * r10 contains the ESID, which is the original faulting EA shifted
  121. * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28)
  122. * which is 0xd00038000. That can't be used as an immediate, even if we
  123. * ignored the 0xd, so we have to load it into a register, and we only
  124. * have one register free. So we must load all of (H_VMALLOC_END >> 28)
  125. * into a register and compare ESID against that.
  126. */
  127. lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000
  128. ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800
  129. // Rotate left 4, then mask with 0xffffffff0
  130. rldic r11,r11,4,28 // r11 = 0xd00038000
  131. cmpld r10,r11 // if r10 >= r11
  132. bge 5f // goto io_mapping
  133. /*
  134. * vmalloc mapping gets the encoding from the PACA as the mapping
  135. * can be demoted from 64K -> 4K dynamically on some machines.
  136. */
  137. lhz r11,PACAVMALLOCSLLP(r13)
  138. b 6f
  139. 5:
  140. /* IO mapping */
  141. .globl slb_miss_kernel_load_io
  142. slb_miss_kernel_load_io:
  143. li r11,0
  144. 6:
  145. /*
  146. * context = (ea >> 60) - (0xc - 1)
  147. * r9 = region id.
  148. */
  149. subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
  150. BEGIN_FTR_SECTION
  151. b .Lslb_finish_load
  152. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  153. b .Lslb_finish_load_1T
  154. 0: /*
  155. * For userspace addresses, make sure this is region 0.
  156. */
  157. cmpdi r9, 0
  158. bne- 8f
  159. /*
  160. * user space make sure we are within the allowed limit
  161. */
  162. ld r11,PACA_SLB_ADDR_LIMIT(r13)
  163. cmpld r3,r11
  164. bge- 8f
  165. /* when using slices, we extract the psize off the slice bitmaps
  166. * and then we need to get the sllp encoding off the mmu_psize_defs
  167. * array.
  168. *
  169. * XXX This is a bit inefficient especially for the normal case,
  170. * so we should try to implement a fast path for the standard page
  171. * size using the old sllp value so we avoid the array. We cannot
  172. * really do dynamic patching unfortunately as processes might flip
  173. * between 4k and 64k standard page size
  174. */
  175. #ifdef CONFIG_PPC_MM_SLICES
  176. /* r10 have esid */
  177. cmpldi r10,16
  178. /* below SLICE_LOW_TOP */
  179. blt 5f
  180. /*
  181. * Handle hpsizes,
  182. * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
  183. */
  184. srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
  185. addi r9,r11,PACAHIGHSLICEPSIZE
  186. lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
  187. /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
  188. rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
  189. b 6f
  190. 5:
  191. /*
  192. * Handle lpsizes
  193. * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index
  194. */
  195. srdi r11,r10,1 /* index */
  196. addi r9,r11,PACALOWSLICESPSIZE
  197. lbzx r9,r13,r9 /* r9 is lpsizes[r11] */
  198. rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */
  199. 6:
  200. sldi r11,r11,2 /* index * 4 */
  201. /* Extract the psize and multiply to get an array offset */
  202. srd r9,r9,r11
  203. andi. r9,r9,0xf
  204. mulli r9,r9,MMUPSIZEDEFSIZE
  205. /* Now get to the array and obtain the sllp
  206. */
  207. ld r11,PACATOC(r13)
  208. ld r11,mmu_psize_defs@got(r11)
  209. add r11,r11,r9
  210. ld r11,MMUPSIZESLLP(r11)
  211. ori r11,r11,SLB_VSID_USER
  212. #else
  213. /* paca context sllp already contains the SLB_VSID_USER bits */
  214. lhz r11,PACACONTEXTSLLP(r13)
  215. #endif /* CONFIG_PPC_MM_SLICES */
  216. ld r9,PACACONTEXTID(r13)
  217. BEGIN_FTR_SECTION
  218. cmpldi r10,0x1000
  219. bge .Lslb_finish_load_1T
  220. END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  221. b .Lslb_finish_load
  222. 8: /* invalid EA - return an error indication */
  223. crset 4*cr0+eq /* indicate failure */
  224. blr
  225. /*
  226. * Finish loading of an SLB entry and return
  227. *
  228. * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  229. */
  230. .Lslb_finish_load:
  231. rldimi r10,r9,ESID_BITS,0
  232. ASM_VSID_SCRAMBLE(r10,r9,r11,256M)
  233. /* r3 = EA, r11 = VSID data */
  234. /*
  235. * Find a slot, round robin. Previously we tried to find a
  236. * free slot first but that took too long. Unfortunately we
  237. * dont have any LRU information to help us choose a slot.
  238. */
  239. mr r9,r3
  240. /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */
  241. 7: ld r10,PACASTABRR(r13)
  242. addi r10,r10,1
  243. /* This gets soft patched on boot. */
  244. .globl slb_compare_rr_to_size
  245. slb_compare_rr_to_size:
  246. cmpldi r10,0
  247. blt+ 4f
  248. li r10,SLB_NUM_BOLTED
  249. 4:
  250. std r10,PACASTABRR(r13)
  251. 3:
  252. rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */
  253. oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */
  254. /* r9 = ESID data, r11 = VSID data */
  255. /*
  256. * No need for an isync before or after this slbmte. The exception
  257. * we enter with and the rfid we exit with are context synchronizing.
  258. */
  259. slbmte r11,r10
  260. /* we're done for kernel addresses */
  261. crclr 4*cr0+eq /* set result to "success" */
  262. bgelr cr7
  263. /* Update the slb cache */
  264. lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
  265. cmpldi r9,SLB_CACHE_ENTRIES
  266. bge 1f
  267. /* still room in the slb cache */
  268. sldi r11,r9,2 /* r11 = offset * sizeof(u32) */
  269. srdi r10,r10,28 /* get the 36 bits of the ESID */
  270. add r11,r11,r13 /* r11 = (u32 *)paca + offset */
  271. stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
  272. addi r9,r9,1 /* offset++ */
  273. b 2f
  274. 1: /* offset >= SLB_CACHE_ENTRIES */
  275. li r9,SLB_CACHE_ENTRIES+1
  276. 2:
  277. sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
  278. crclr 4*cr0+eq /* set result to "success" */
  279. blr
  280. /*
  281. * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
  282. *
  283. * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
  284. */
  285. .Lslb_finish_load_1T:
  286. srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
  287. rldimi r10,r9,ESID_BITS_1T,0
  288. ASM_VSID_SCRAMBLE(r10,r9,r11,1T)
  289. li r10,MMU_SEGSIZE_1T
  290. rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
  291. /* r3 = EA, r11 = VSID data */
  292. clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */
  293. b 7b
  294. _ASM_NOKPROBE_SYMBOL(slb_allocate)
  295. _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
  296. _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
  297. _ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)
  298. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  299. _ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap)
  300. #endif