cpu_entry_area.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/percpu.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/kcore.h>
  6. #include <asm/cpu_entry_area.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/fixmap.h>
  9. #include <asm/desc.h>
  10. static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
  11. #ifdef CONFIG_X86_64
  12. static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  13. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  14. static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
  15. #endif
  16. struct cpu_entry_area *get_cpu_entry_area(int cpu)
  17. {
  18. unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  19. BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  20. return (struct cpu_entry_area *) va;
  21. }
  22. EXPORT_SYMBOL(get_cpu_entry_area);
  23. void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  24. {
  25. unsigned long va = (unsigned long) cea_vaddr;
  26. pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
  27. /*
  28. * The cpu_entry_area is shared between the user and kernel
  29. * page tables. All of its ptes can safely be global.
  30. * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
  31. * non-present PTEs, so be careful not to set it in that
  32. * case to avoid confusion.
  33. */
  34. if (boot_cpu_has(X86_FEATURE_PGE) &&
  35. (pgprot_val(flags) & _PAGE_PRESENT))
  36. pte = pte_set_flags(pte, _PAGE_GLOBAL);
  37. set_pte_vaddr(va, pte);
  38. }
  39. static void __init
  40. cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  41. {
  42. for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  43. cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  44. }
  45. static void percpu_setup_debug_store(int cpu)
  46. {
  47. #ifdef CONFIG_CPU_SUP_INTEL
  48. int npages;
  49. void *cea;
  50. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  51. return;
  52. cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
  53. npages = sizeof(struct debug_store) / PAGE_SIZE;
  54. BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
  55. cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
  56. PAGE_KERNEL);
  57. cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
  58. /*
  59. * Force the population of PMDs for not yet allocated per cpu
  60. * memory like debug store buffers.
  61. */
  62. npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
  63. for (; npages; npages--, cea += PAGE_SIZE)
  64. cea_set_pte(cea, 0, PAGE_NONE);
  65. #endif
  66. }
  67. /* Setup the fixmap mappings only once per-processor */
  68. static void __init setup_cpu_entry_area(int cpu)
  69. {
  70. #ifdef CONFIG_X86_64
  71. extern char _entry_trampoline[];
  72. /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
  73. pgprot_t gdt_prot = PAGE_KERNEL_RO;
  74. pgprot_t tss_prot = PAGE_KERNEL_RO;
  75. #else
  76. /*
  77. * On native 32-bit systems, the GDT cannot be read-only because
  78. * our double fault handler uses a task gate, and entering through
  79. * a task gate needs to change an available TSS to busy. If the
  80. * GDT is read-only, that will triple fault. The TSS cannot be
  81. * read-only because the CPU writes to it on task switches.
  82. *
  83. * On Xen PV, the GDT must be read-only because the hypervisor
  84. * requires it.
  85. */
  86. pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
  87. PAGE_KERNEL_RO : PAGE_KERNEL;
  88. pgprot_t tss_prot = PAGE_KERNEL;
  89. #endif
  90. cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
  91. gdt_prot);
  92. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
  93. per_cpu_ptr(&entry_stack_storage, cpu), 1,
  94. PAGE_KERNEL);
  95. /*
  96. * The Intel SDM says (Volume 3, 7.2.1):
  97. *
  98. * Avoid placing a page boundary in the part of the TSS that the
  99. * processor reads during a task switch (the first 104 bytes). The
  100. * processor may not correctly perform address translations if a
  101. * boundary occurs in this area. During a task switch, the processor
  102. * reads and writes into the first 104 bytes of each TSS (using
  103. * contiguous physical addresses beginning with the physical address
  104. * of the first byte of the TSS). So, after TSS access begins, if
  105. * part of the 104 bytes is not physically contiguous, the processor
  106. * will access incorrect information without generating a page-fault
  107. * exception.
  108. *
  109. * There are also a lot of errata involving the TSS spanning a page
  110. * boundary. Assert that we're not doing that.
  111. */
  112. BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
  113. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  114. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  115. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
  116. &per_cpu(cpu_tss_rw, cpu),
  117. sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
  118. #ifdef CONFIG_X86_32
  119. per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  120. #endif
  121. #ifdef CONFIG_X86_64
  122. BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  123. BUILD_BUG_ON(sizeof(exception_stacks) !=
  124. sizeof(((struct cpu_entry_area *)0)->exception_stacks));
  125. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
  126. &per_cpu(exception_stacks, cpu),
  127. sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
  128. cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
  129. __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
  130. /*
  131. * The cpu_entry_area alias addresses are not in the kernel binary
  132. * so they do not show up in /proc/kcore normally. This adds entries
  133. * for them manually.
  134. */
  135. kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
  136. _entry_trampoline,
  137. &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
  138. #endif
  139. percpu_setup_debug_store(cpu);
  140. }
  141. #ifdef CONFIG_X86_64
  142. int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
  143. char *name)
  144. {
  145. unsigned int cpu, ncpu = 0;
  146. if (symnum >= num_possible_cpus())
  147. return -EINVAL;
  148. for_each_possible_cpu(cpu) {
  149. if (ncpu++ >= symnum)
  150. break;
  151. }
  152. *value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
  153. *type = 't';
  154. strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
  155. return 0;
  156. }
  157. #endif
  158. static __init void setup_cpu_entry_area_ptes(void)
  159. {
  160. #ifdef CONFIG_X86_32
  161. unsigned long start, end;
  162. BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
  163. BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
  164. start = CPU_ENTRY_AREA_BASE;
  165. end = start + CPU_ENTRY_AREA_MAP_SIZE;
  166. /* Careful here: start + PMD_SIZE might wrap around */
  167. for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
  168. populate_extra_pte(start);
  169. #endif
  170. }
  171. void __init setup_cpu_entry_areas(void)
  172. {
  173. unsigned int cpu;
  174. setup_cpu_entry_area_ptes();
  175. for_each_possible_cpu(cpu)
  176. setup_cpu_entry_area(cpu);
  177. /*
  178. * This is the last essential update to swapper_pgdir which needs
  179. * to be synchronized to initial_page_table on 32bit.
  180. */
  181. sync_initial_page_table();
  182. }