hugetlbpage.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * IA-32 Huge TLB Page Support for Kernel.
  4. *
  5. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  6. */
  7. #include <linux/init.h>
  8. #include <linux/fs.h>
  9. #include <linux/mm.h>
  10. #include <linux/sched/mm.h>
  11. #include <linux/hugetlb.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/err.h>
  14. #include <linux/sysctl.h>
  15. #include <linux/compat.h>
  16. #include <asm/mman.h>
  17. #include <asm/tlb.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/elf.h>
  21. #include <asm/mpx.h>
  22. #if 0 /* This is just for testing */
  23. struct page *
  24. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  25. {
  26. unsigned long start = address;
  27. int length = 1;
  28. int nr;
  29. struct page *page;
  30. struct vm_area_struct *vma;
  31. vma = find_vma(mm, addr);
  32. if (!vma || !is_vm_hugetlb_page(vma))
  33. return ERR_PTR(-EINVAL);
  34. pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
  35. /* hugetlb should be locked, and hence, prefaulted */
  36. WARN_ON(!pte || pte_none(*pte));
  37. page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  38. WARN_ON(!PageHead(page));
  39. return page;
  40. }
  41. int pmd_huge(pmd_t pmd)
  42. {
  43. return 0;
  44. }
  45. int pud_huge(pud_t pud)
  46. {
  47. return 0;
  48. }
  49. #else
  50. /*
  51. * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
  52. * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
  53. * Otherwise, returns 0.
  54. */
  55. int pmd_huge(pmd_t pmd)
  56. {
  57. return !pmd_none(pmd) &&
  58. (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
  59. }
  60. int pud_huge(pud_t pud)
  61. {
  62. return !!(pud_val(pud) & _PAGE_PSE);
  63. }
  64. #endif
  65. #ifdef CONFIG_HUGETLB_PAGE
  66. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  67. unsigned long addr, unsigned long len,
  68. unsigned long pgoff, unsigned long flags)
  69. {
  70. struct hstate *h = hstate_file(file);
  71. struct vm_unmapped_area_info info;
  72. info.flags = 0;
  73. info.length = len;
  74. info.low_limit = get_mmap_base(1);
  75. /*
  76. * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
  77. * in the full address space.
  78. */
  79. info.high_limit = in_compat_syscall() ?
  80. task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
  81. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  82. info.align_offset = 0;
  83. return vm_unmapped_area(&info);
  84. }
  85. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  86. unsigned long addr, unsigned long len,
  87. unsigned long pgoff, unsigned long flags)
  88. {
  89. struct hstate *h = hstate_file(file);
  90. struct vm_unmapped_area_info info;
  91. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  92. info.length = len;
  93. info.low_limit = PAGE_SIZE;
  94. info.high_limit = get_mmap_base(0);
  95. /*
  96. * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
  97. * in the full address space.
  98. */
  99. if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
  100. info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
  101. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  102. info.align_offset = 0;
  103. addr = vm_unmapped_area(&info);
  104. /*
  105. * A failed mmap() very likely causes application failure,
  106. * so fall back to the bottom-up function here. This scenario
  107. * can happen with large stack limits and large mmap()
  108. * allocations.
  109. */
  110. if (addr & ~PAGE_MASK) {
  111. VM_BUG_ON(addr != -ENOMEM);
  112. info.flags = 0;
  113. info.low_limit = TASK_UNMAPPED_BASE;
  114. info.high_limit = TASK_SIZE_LOW;
  115. addr = vm_unmapped_area(&info);
  116. }
  117. return addr;
  118. }
  119. unsigned long
  120. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  121. unsigned long len, unsigned long pgoff, unsigned long flags)
  122. {
  123. struct hstate *h = hstate_file(file);
  124. struct mm_struct *mm = current->mm;
  125. struct vm_area_struct *vma;
  126. if (len & ~huge_page_mask(h))
  127. return -EINVAL;
  128. addr = mpx_unmapped_area_check(addr, len, flags);
  129. if (IS_ERR_VALUE(addr))
  130. return addr;
  131. if (len > TASK_SIZE)
  132. return -ENOMEM;
  133. /* No address checking. See comment at mmap_address_hint_valid() */
  134. if (flags & MAP_FIXED) {
  135. if (prepare_hugepage_range(file, addr, len))
  136. return -EINVAL;
  137. return addr;
  138. }
  139. if (addr) {
  140. addr &= huge_page_mask(h);
  141. if (!mmap_address_hint_valid(addr, len))
  142. goto get_unmapped_area;
  143. vma = find_vma(mm, addr);
  144. if (!vma || addr + len <= vm_start_gap(vma))
  145. return addr;
  146. }
  147. get_unmapped_area:
  148. if (mm->get_unmapped_area == arch_get_unmapped_area)
  149. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  150. pgoff, flags);
  151. else
  152. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  153. pgoff, flags);
  154. }
  155. #endif /* CONFIG_HUGETLB_PAGE */
  156. #ifdef CONFIG_X86_64
  157. static __init int setup_hugepagesz(char *opt)
  158. {
  159. unsigned long ps = memparse(opt, &opt);
  160. if (ps == PMD_SIZE) {
  161. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  162. } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
  163. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  164. } else {
  165. hugetlb_bad_size();
  166. printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
  167. ps >> 20);
  168. return 0;
  169. }
  170. return 1;
  171. }
  172. __setup("hugepagesz=", setup_hugepagesz);
  173. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  174. static __init int gigantic_pages_init(void)
  175. {
  176. /* With compaction or CMA we can allocate gigantic pages at runtime */
  177. if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
  178. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  179. return 0;
  180. }
  181. arch_initcall(gigantic_pages_init);
  182. #endif
  183. #endif