pgtable-hash64.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /*
  2. * Copyright 2005, Paul Mackerras, IBM Corporation.
  3. * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
  4. * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/mm_types.h>
  13. #include <linux/mm.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/pgtable.h>
  16. #include <asm/sections.h>
  17. #include <asm/mmu.h>
  18. #include <asm/tlb.h>
  19. #include "mmu_decl.h"
  20. #define CREATE_TRACE_POINTS
  21. #include <trace/events/thp.h>
  22. #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
  23. #warning Limited user VSID range means pagetable space is wasted
  24. #endif
  25. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  26. /*
  27. * vmemmap is the starting address of the virtual address space where
  28. * struct pages are allocated for all possible PFNs present on the system
  29. * including holes and bad memory (hence sparse). These virtual struct
  30. * pages are stored in sequence in this virtual address space irrespective
  31. * of the fact whether the corresponding PFN is valid or not. This achieves
  32. * constant relationship between address of struct page and its PFN.
  33. *
  34. * During boot or memory hotplug operation when a new memory section is
  35. * added, physical memory allocation (including hash table bolting) will
  36. * be performed for the set of struct pages which are part of the memory
  37. * section. This saves memory by not allocating struct pages for PFNs
  38. * which are not valid.
  39. *
  40. * ----------------------------------------------
  41. * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
  42. * ----------------------------------------------
  43. *
  44. * f000000000000000 c000000000000000
  45. * vmemmap +--------------+ +--------------+
  46. * + | page struct | +--------------> | page struct |
  47. * | +--------------+ +--------------+
  48. * | | page struct | +--------------> | page struct |
  49. * | +--------------+ | +--------------+
  50. * | | page struct | + +------> | page struct |
  51. * | +--------------+ | +--------------+
  52. * | | page struct | | +--> | page struct |
  53. * | +--------------+ | | +--------------+
  54. * | | page struct | | |
  55. * | +--------------+ | |
  56. * | | page struct | | |
  57. * | +--------------+ | |
  58. * | | page struct | | |
  59. * | +--------------+ | |
  60. * | | page struct | | |
  61. * | +--------------+ | |
  62. * | | page struct | +-------+ |
  63. * | +--------------+ |
  64. * | | page struct | +-----------+
  65. * | +--------------+
  66. * | | page struct | No mapping
  67. * | +--------------+
  68. * | | page struct | No mapping
  69. * v +--------------+
  70. *
  71. * -----------------------------------------
  72. * | RELATION BETWEEN STRUCT PAGES AND PFNS|
  73. * -----------------------------------------
  74. *
  75. * vmemmap +--------------+ +---------------+
  76. * + | page struct | +-------------> | PFN |
  77. * | +--------------+ +---------------+
  78. * | | page struct | +-------------> | PFN |
  79. * | +--------------+ +---------------+
  80. * | | page struct | +-------------> | PFN |
  81. * | +--------------+ +---------------+
  82. * | | page struct | +-------------> | PFN |
  83. * | +--------------+ +---------------+
  84. * | | |
  85. * | +--------------+
  86. * | | |
  87. * | +--------------+
  88. * | | |
  89. * | +--------------+ +---------------+
  90. * | | page struct | +-------------> | PFN |
  91. * | +--------------+ +---------------+
  92. * | | |
  93. * | +--------------+
  94. * | | |
  95. * | +--------------+ +---------------+
  96. * | | page struct | +-------------> | PFN |
  97. * | +--------------+ +---------------+
  98. * | | page struct | +-------------> | PFN |
  99. * v +--------------+ +---------------+
  100. */
  101. /*
  102. * On hash-based CPUs, the vmemmap is bolted in the hash table.
  103. *
  104. */
  105. int __meminit hash__vmemmap_create_mapping(unsigned long start,
  106. unsigned long page_size,
  107. unsigned long phys)
  108. {
  109. int rc = htab_bolt_mapping(start, start + page_size, phys,
  110. pgprot_val(PAGE_KERNEL),
  111. mmu_vmemmap_psize, mmu_kernel_ssize);
  112. if (rc < 0) {
  113. int rc2 = htab_remove_mapping(start, start + page_size,
  114. mmu_vmemmap_psize,
  115. mmu_kernel_ssize);
  116. BUG_ON(rc2 && (rc2 != -ENOENT));
  117. }
  118. return rc;
  119. }
  120. #ifdef CONFIG_MEMORY_HOTPLUG
  121. void hash__vmemmap_remove_mapping(unsigned long start,
  122. unsigned long page_size)
  123. {
  124. int rc = htab_remove_mapping(start, start + page_size,
  125. mmu_vmemmap_psize,
  126. mmu_kernel_ssize);
  127. BUG_ON((rc < 0) && (rc != -ENOENT));
  128. WARN_ON(rc == -ENOENT);
  129. }
  130. #endif
  131. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  132. /*
  133. * map_kernel_page currently only called by __ioremap
  134. * map_kernel_page adds an entry to the ioremap page table
  135. * and adds an entry to the HPT, possibly bolting it
  136. */
  137. int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
  138. {
  139. pgd_t *pgdp;
  140. pud_t *pudp;
  141. pmd_t *pmdp;
  142. pte_t *ptep;
  143. BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
  144. if (slab_is_available()) {
  145. pgdp = pgd_offset_k(ea);
  146. pudp = pud_alloc(&init_mm, pgdp, ea);
  147. if (!pudp)
  148. return -ENOMEM;
  149. pmdp = pmd_alloc(&init_mm, pudp, ea);
  150. if (!pmdp)
  151. return -ENOMEM;
  152. ptep = pte_alloc_kernel(pmdp, ea);
  153. if (!ptep)
  154. return -ENOMEM;
  155. set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
  156. __pgprot(flags)));
  157. } else {
  158. /*
  159. * If the mm subsystem is not fully up, we cannot create a
  160. * linux page table entry for this mapping. Simply bolt an
  161. * entry in the hardware page table.
  162. *
  163. */
  164. if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
  165. mmu_io_psize, mmu_kernel_ssize)) {
  166. printk(KERN_ERR "Failed to do bolted mapping IO "
  167. "memory at %016lx !\n", pa);
  168. return -ENOMEM;
  169. }
  170. }
  171. smp_wmb();
  172. return 0;
  173. }
  174. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  175. unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
  176. pmd_t *pmdp, unsigned long clr,
  177. unsigned long set)
  178. {
  179. __be64 old_be, tmp;
  180. unsigned long old;
  181. #ifdef CONFIG_DEBUG_VM
  182. WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
  183. assert_spin_locked(pmd_lockptr(mm, pmdp));
  184. #endif
  185. __asm__ __volatile__(
  186. "1: ldarx %0,0,%3\n\
  187. and. %1,%0,%6\n\
  188. bne- 1b \n\
  189. andc %1,%0,%4 \n\
  190. or %1,%1,%7\n\
  191. stdcx. %1,0,%3 \n\
  192. bne- 1b"
  193. : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
  194. : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
  195. "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
  196. : "cc" );
  197. old = be64_to_cpu(old_be);
  198. trace_hugepage_update(addr, old, clr, set);
  199. if (old & H_PAGE_HASHPTE)
  200. hpte_do_hugepage_flush(mm, addr, pmdp, old);
  201. return old;
  202. }
  203. pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
  204. pmd_t *pmdp)
  205. {
  206. pmd_t pmd;
  207. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  208. VM_BUG_ON(pmd_trans_huge(*pmdp));
  209. VM_BUG_ON(pmd_devmap(*pmdp));
  210. pmd = *pmdp;
  211. pmd_clear(pmdp);
  212. /*
  213. * Wait for all pending hash_page to finish. This is needed
  214. * in case of subpage collapse. When we collapse normal pages
  215. * to hugepage, we first clear the pmd, then invalidate all
  216. * the PTE entries. The assumption here is that any low level
  217. * page fault will see a none pmd and take the slow path that
  218. * will wait on mmap_sem. But we could very well be in a
  219. * hash_page with local ptep pointer value. Such a hash page
  220. * can result in adding new HPTE entries for normal subpages.
  221. * That means we could be modifying the page content as we
  222. * copy them to a huge page. So wait for parallel hash_page
  223. * to finish before invalidating HPTE entries. We can do this
  224. * by sending an IPI to all the cpus and executing a dummy
  225. * function there.
  226. */
  227. serialize_against_pte_lookup(vma->vm_mm);
  228. /*
  229. * Now invalidate the hpte entries in the range
  230. * covered by pmd. This make sure we take a
  231. * fault and will find the pmd as none, which will
  232. * result in a major fault which takes mmap_sem and
  233. * hence wait for collapse to complete. Without this
  234. * the __collapse_huge_page_copy can result in copying
  235. * the old content.
  236. */
  237. flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
  238. return pmd;
  239. }
  240. /*
  241. * We want to put the pgtable in pmd and use pgtable for tracking
  242. * the base page size hptes
  243. */
  244. void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
  245. pgtable_t pgtable)
  246. {
  247. pgtable_t *pgtable_slot;
  248. assert_spin_locked(pmd_lockptr(mm, pmdp));
  249. /*
  250. * we store the pgtable in the second half of PMD
  251. */
  252. pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
  253. *pgtable_slot = pgtable;
  254. /*
  255. * expose the deposited pgtable to other cpus.
  256. * before we set the hugepage PTE at pmd level
  257. * hash fault code looks at the deposted pgtable
  258. * to store hash index values.
  259. */
  260. smp_wmb();
  261. }
  262. pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
  263. {
  264. pgtable_t pgtable;
  265. pgtable_t *pgtable_slot;
  266. assert_spin_locked(pmd_lockptr(mm, pmdp));
  267. pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
  268. pgtable = *pgtable_slot;
  269. /*
  270. * Once we withdraw, mark the entry NULL.
  271. */
  272. *pgtable_slot = NULL;
  273. /*
  274. * We store HPTE information in the deposited PTE fragment.
  275. * zero out the content on withdraw.
  276. */
  277. memset(pgtable, 0, PTE_FRAG_SIZE);
  278. return pgtable;
  279. }
  280. /*
  281. * A linux hugepage PMD was changed and the corresponding hash table entries
  282. * neesd to be flushed.
  283. */
  284. void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
  285. pmd_t *pmdp, unsigned long old_pmd)
  286. {
  287. int ssize;
  288. unsigned int psize;
  289. unsigned long vsid;
  290. unsigned long flags = 0;
  291. /* get the base page size,vsid and segment size */
  292. #ifdef CONFIG_DEBUG_VM
  293. psize = get_slice_psize(mm, addr);
  294. BUG_ON(psize == MMU_PAGE_16M);
  295. #endif
  296. if (old_pmd & H_PAGE_COMBO)
  297. psize = MMU_PAGE_4K;
  298. else
  299. psize = MMU_PAGE_64K;
  300. if (!is_kernel_addr(addr)) {
  301. ssize = user_segment_size(addr);
  302. vsid = get_user_vsid(&mm->context, addr, ssize);
  303. WARN_ON(vsid == 0);
  304. } else {
  305. vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
  306. ssize = mmu_kernel_ssize;
  307. }
  308. if (mm_is_thread_local(mm))
  309. flags |= HPTE_LOCAL_UPDATE;
  310. return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
  311. }
  312. pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
  313. unsigned long addr, pmd_t *pmdp)
  314. {
  315. pmd_t old_pmd;
  316. pgtable_t pgtable;
  317. unsigned long old;
  318. pgtable_t *pgtable_slot;
  319. old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
  320. old_pmd = __pmd(old);
  321. /*
  322. * We have pmd == none and we are holding page_table_lock.
  323. * So we can safely go and clear the pgtable hash
  324. * index info.
  325. */
  326. pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
  327. pgtable = *pgtable_slot;
  328. /*
  329. * Let's zero out old valid and hash index details
  330. * hash fault look at them.
  331. */
  332. memset(pgtable, 0, PTE_FRAG_SIZE);
  333. /*
  334. * Serialize against find_current_mm_pte variants which does lock-less
  335. * lookup in page tables with local interrupts disabled. For huge pages
  336. * it casts pmd_t to pte_t. Since format of pte_t is different from
  337. * pmd_t we want to prevent transit from pmd pointing to page table
  338. * to pmd pointing to huge page (and back) while interrupts are disabled.
  339. * We clear pmd to possibly replace it with page table pointer in
  340. * different code paths. So make sure we wait for the parallel
  341. * find_curren_mm_pte to finish.
  342. */
  343. serialize_against_pte_lookup(mm);
  344. return old_pmd;
  345. }
  346. int hash__has_transparent_hugepage(void)
  347. {
  348. if (!mmu_has_feature(MMU_FTR_16M_PAGE))
  349. return 0;
  350. /*
  351. * We support THP only if PMD_SIZE is 16MB.
  352. */
  353. if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
  354. return 0;
  355. /*
  356. * We need to make sure that we support 16MB hugepage in a segement
  357. * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
  358. * of 64K.
  359. */
  360. /*
  361. * If we have 64K HPTE, we will be using that by default
  362. */
  363. if (mmu_psize_defs[MMU_PAGE_64K].shift &&
  364. (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
  365. return 0;
  366. /*
  367. * Ok we only have 4K HPTE
  368. */
  369. if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
  370. return 0;
  371. return 1;
  372. }
  373. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  374. #ifdef CONFIG_STRICT_KERNEL_RWX
  375. static bool hash__change_memory_range(unsigned long start, unsigned long end,
  376. unsigned long newpp)
  377. {
  378. unsigned long idx;
  379. unsigned int step, shift;
  380. shift = mmu_psize_defs[mmu_linear_psize].shift;
  381. step = 1 << shift;
  382. start = ALIGN_DOWN(start, step);
  383. end = ALIGN(end, step); // aligns up
  384. if (start >= end)
  385. return false;
  386. pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
  387. start, end, newpp, step);
  388. for (idx = start; idx < end; idx += step)
  389. /* Not sure if we can do much with the return value */
  390. mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
  391. mmu_kernel_ssize);
  392. return true;
  393. }
  394. void hash__mark_rodata_ro(void)
  395. {
  396. unsigned long start, end;
  397. start = (unsigned long)_stext;
  398. end = (unsigned long)__init_begin;
  399. WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
  400. }
  401. void hash__mark_initmem_nx(void)
  402. {
  403. unsigned long start, end, pp;
  404. start = (unsigned long)__init_begin;
  405. end = (unsigned long)__init_end;
  406. pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
  407. WARN_ON(!hash__change_memory_range(start, end, pp));
  408. }
  409. #endif