espfix_64.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* ----------------------------------------------------------------------- *
  3. *
  4. * Copyright 2014 Intel Corporation; author: H. Peter Anvin
  5. *
  6. * ----------------------------------------------------------------------- */
  7. /*
  8. * The IRET instruction, when returning to a 16-bit segment, only
  9. * restores the bottom 16 bits of the user space stack pointer. This
  10. * causes some 16-bit software to break, but it also leaks kernel state
  11. * to user space.
  12. *
  13. * This works around this by creating percpu "ministacks", each of which
  14. * is mapped 2^16 times 64K apart. When we detect that the return SS is
  15. * on the LDT, we copy the IRET frame to the ministack and use the
  16. * relevant alias to return to userspace. The ministacks are mapped
  17. * readonly, so if the IRET fault we promote #GP to #DF which is an IST
  18. * vector and thus has its own stack; we then do the fixup in the #DF
  19. * handler.
  20. *
  21. * This file sets up the ministacks and the related page tables. The
  22. * actual ministack invocation is in entry_64.S.
  23. */
  24. #include <linux/init.h>
  25. #include <linux/init_task.h>
  26. #include <linux/kernel.h>
  27. #include <linux/percpu.h>
  28. #include <linux/gfp.h>
  29. #include <linux/random.h>
  30. #include <linux/pgtable.h>
  31. #include <asm/pgalloc.h>
  32. #include <asm/setup.h>
  33. #include <asm/espfix.h>
  34. /*
  35. * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
  36. * it up to a cache line to avoid unnecessary sharing.
  37. */
  38. #define ESPFIX_STACK_SIZE (8*8UL)
  39. #define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
  40. /* There is address space for how many espfix pages? */
  41. #define ESPFIX_PAGE_SPACE (1UL << (P4D_SHIFT-PAGE_SHIFT-16))
  42. #define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
  43. #if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
  44. # error "Need more virtual address space for the ESPFIX hack"
  45. #endif
  46. #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
  47. /* This contains the *bottom* address of the espfix stack */
  48. DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
  49. DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
  50. /* Initialization mutex - should this be a spinlock? */
  51. static DEFINE_MUTEX(espfix_init_mutex);
  52. /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
  53. #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
  54. static void *espfix_pages[ESPFIX_MAX_PAGES];
  55. static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
  56. __aligned(PAGE_SIZE);
  57. static unsigned int page_random, slot_random;
  58. /*
  59. * This returns the bottom address of the espfix stack for a specific CPU.
  60. * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
  61. * we have to account for some amount of padding at the end of each page.
  62. */
  63. static inline unsigned long espfix_base_addr(unsigned int cpu)
  64. {
  65. unsigned long page, slot;
  66. unsigned long addr;
  67. page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
  68. slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
  69. addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
  70. addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
  71. addr += ESPFIX_BASE_ADDR;
  72. return addr;
  73. }
  74. #define PTE_STRIDE (65536/PAGE_SIZE)
  75. #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
  76. #define ESPFIX_PMD_CLONES PTRS_PER_PMD
  77. #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
  78. #define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
  79. static void init_espfix_random(void)
  80. {
  81. unsigned long rand = get_random_long();
  82. slot_random = rand % ESPFIX_STACKS_PER_PAGE;
  83. page_random = (rand / ESPFIX_STACKS_PER_PAGE)
  84. & (ESPFIX_PAGE_SPACE - 1);
  85. }
  86. void __init init_espfix_bsp(void)
  87. {
  88. pgd_t *pgd;
  89. p4d_t *p4d;
  90. /* FRED systems always restore the full value of %rsp */
  91. if (cpu_feature_enabled(X86_FEATURE_FRED))
  92. return;
  93. /* Install the espfix pud into the kernel page directory */
  94. pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
  95. p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
  96. p4d_populate(&init_mm, p4d, espfix_pud_page);
  97. /* Randomize the locations */
  98. init_espfix_random();
  99. /* The rest is the same as for any other processor */
  100. init_espfix_ap(0);
  101. }
  102. void init_espfix_ap(int cpu)
  103. {
  104. unsigned int page;
  105. unsigned long addr;
  106. pud_t pud, *pud_p;
  107. pmd_t pmd, *pmd_p;
  108. pte_t pte, *pte_p;
  109. int n, node;
  110. void *stack_page;
  111. pteval_t ptemask;
  112. /* FRED systems always restore the full value of %rsp */
  113. if (cpu_feature_enabled(X86_FEATURE_FRED))
  114. return;
  115. /* We only have to do this once... */
  116. if (likely(per_cpu(espfix_stack, cpu)))
  117. return; /* Already initialized */
  118. addr = espfix_base_addr(cpu);
  119. page = cpu/ESPFIX_STACKS_PER_PAGE;
  120. /* Did another CPU already set this up? */
  121. stack_page = READ_ONCE(espfix_pages[page]);
  122. if (likely(stack_page))
  123. goto done;
  124. mutex_lock(&espfix_init_mutex);
  125. /* Did we race on the lock? */
  126. stack_page = READ_ONCE(espfix_pages[page]);
  127. if (stack_page)
  128. goto unlock_done;
  129. node = cpu_to_node(cpu);
  130. ptemask = __supported_pte_mask;
  131. pud_p = &espfix_pud_page[pud_index(addr)];
  132. pud = *pud_p;
  133. if (!pud_present(pud)) {
  134. struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
  135. pmd_p = (pmd_t *)page_address(page);
  136. pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
  137. paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
  138. for (n = 0; n < ESPFIX_PUD_CLONES; n++)
  139. set_pud(&pud_p[n], pud);
  140. }
  141. pmd_p = pmd_offset(&pud, addr);
  142. pmd = *pmd_p;
  143. if (!pmd_present(pmd)) {
  144. struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
  145. pte_p = (pte_t *)page_address(page);
  146. pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
  147. paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
  148. for (n = 0; n < ESPFIX_PMD_CLONES; n++)
  149. set_pmd(&pmd_p[n], pmd);
  150. }
  151. pte_p = pte_offset_kernel(&pmd, addr);
  152. stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
  153. /*
  154. * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since
  155. * this is mapped to userspace.
  156. */
  157. pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
  158. for (n = 0; n < ESPFIX_PTE_CLONES; n++)
  159. set_pte(&pte_p[n*PTE_STRIDE], pte);
  160. /* Job is done for this CPU and any CPU which shares this page */
  161. WRITE_ONCE(espfix_pages[page], stack_page);
  162. unlock_done:
  163. mutex_unlock(&espfix_init_mutex);
  164. done:
  165. per_cpu(espfix_stack, cpu) = addr;
  166. per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
  167. + (addr & ~PAGE_MASK);
  168. }