paca.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /*
  2. * c 2001 PPC 64 Team, IBM Corp
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/smp.h>
  10. #include <linux/export.h>
  11. #include <linux/memblock.h>
  12. #include <linux/sched/task.h>
  13. #include <asm/lppaca.h>
  14. #include <asm/paca.h>
  15. #include <asm/sections.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/kexec.h>
  18. #include "setup.h"
  19. #ifndef CONFIG_SMP
  20. #define boot_cpuid 0
  21. #endif
  22. static void *__init alloc_paca_data(unsigned long size, unsigned long align,
  23. unsigned long limit, int cpu)
  24. {
  25. unsigned long pa;
  26. int nid;
  27. /*
  28. * boot_cpuid paca is allocated very early before cpu_to_node is up.
  29. * Set bottom-up mode, because the boot CPU should be on node-0,
  30. * which will put its paca in the right place.
  31. */
  32. if (cpu == boot_cpuid) {
  33. nid = -1;
  34. memblock_set_bottom_up(true);
  35. } else {
  36. nid = early_cpu_to_node(cpu);
  37. }
  38. pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE);
  39. if (!pa) {
  40. pa = memblock_alloc_base(size, align, limit);
  41. if (!pa)
  42. panic("cannot allocate paca data");
  43. }
  44. if (cpu == boot_cpuid)
  45. memblock_set_bottom_up(false);
  46. return __va(pa);
  47. }
  48. #ifdef CONFIG_PPC_PSERIES
  49. /*
  50. * See asm/lppaca.h for more detail.
  51. *
  52. * lppaca structures must must be 1kB in size, L1 cache line aligned,
  53. * and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
  54. * these requirements.
  55. */
  56. static inline void init_lppaca(struct lppaca *lppaca)
  57. {
  58. BUILD_BUG_ON(sizeof(struct lppaca) != 640);
  59. *lppaca = (struct lppaca) {
  60. .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
  61. .size = cpu_to_be16(0x400),
  62. .fpregs_in_use = 1,
  63. .slb_count = cpu_to_be16(64),
  64. .vmxregs_in_use = 0,
  65. .page_ins = 0, };
  66. };
  67. static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
  68. {
  69. struct lppaca *lp;
  70. size_t size = 0x400;
  71. BUILD_BUG_ON(size < sizeof(struct lppaca));
  72. if (early_cpu_has_feature(CPU_FTR_HVMODE))
  73. return NULL;
  74. lp = alloc_paca_data(size, 0x400, limit, cpu);
  75. init_lppaca(lp);
  76. return lp;
  77. }
  78. #endif /* CONFIG_PPC_BOOK3S */
  79. #ifdef CONFIG_PPC_BOOK3S_64
  80. /*
  81. * 3 persistent SLBs are allocated here. The buffer will be zero
  82. * initially, hence will all be invaild until we actually write them.
  83. *
  84. * If you make the number of persistent SLB entries dynamic, please also
  85. * update PR KVM to flush and restore them accordingly.
  86. */
  87. static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
  88. {
  89. struct slb_shadow *s;
  90. if (cpu != boot_cpuid) {
  91. /*
  92. * Boot CPU comes here before early_radix_enabled
  93. * is parsed (e.g., for disable_radix). So allocate
  94. * always and this will be fixed up in free_unused_pacas.
  95. */
  96. if (early_radix_enabled())
  97. return NULL;
  98. }
  99. s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
  100. memset(s, 0, sizeof(*s));
  101. s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
  102. s->buffer_length = cpu_to_be32(sizeof(*s));
  103. return s;
  104. }
  105. #endif /* CONFIG_PPC_BOOK3S_64 */
  106. /* The Paca is an array with one entry per processor. Each contains an
  107. * lppaca, which contains the information shared between the
  108. * hypervisor and Linux.
  109. * On systems with hardware multi-threading, there are two threads
  110. * per processor. The Paca array must contain an entry for each thread.
  111. * The VPD Areas will give a max logical processors = 2 * max physical
  112. * processors. The processor VPD array needs one entry per physical
  113. * processor (not thread).
  114. */
  115. struct paca_struct **paca_ptrs __read_mostly;
  116. EXPORT_SYMBOL(paca_ptrs);
  117. void __init initialise_paca(struct paca_struct *new_paca, int cpu)
  118. {
  119. #ifdef CONFIG_PPC_PSERIES
  120. new_paca->lppaca_ptr = NULL;
  121. #endif
  122. #ifdef CONFIG_PPC_BOOK3E
  123. new_paca->kernel_pgd = swapper_pg_dir;
  124. #endif
  125. new_paca->lock_token = 0x8000;
  126. new_paca->paca_index = cpu;
  127. new_paca->kernel_toc = kernel_toc_addr();
  128. new_paca->kernelbase = (unsigned long) _stext;
  129. /* Only set MSR:IR/DR when MMU is initialized */
  130. new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
  131. new_paca->hw_cpu_id = 0xffff;
  132. new_paca->kexec_state = KEXEC_STATE_NONE;
  133. new_paca->__current = &init_task;
  134. new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
  135. #ifdef CONFIG_PPC_BOOK3S_64
  136. new_paca->slb_shadow_ptr = NULL;
  137. #endif
  138. #ifdef CONFIG_PPC_BOOK3E
  139. /* For now -- if we have threads this will be adjusted later */
  140. new_paca->tcd_ptr = &new_paca->tcd;
  141. #endif
  142. }
  143. /* Put the paca pointer into r13 and SPRG_PACA */
  144. void setup_paca(struct paca_struct *new_paca)
  145. {
  146. /* Setup r13 */
  147. local_paca = new_paca;
  148. #ifdef CONFIG_PPC_BOOK3E
  149. /* On Book3E, initialize the TLB miss exception frames */
  150. mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
  151. #else
  152. /* In HV mode, we setup both HPACA and PACA to avoid problems
  153. * if we do a GET_PACA() before the feature fixups have been
  154. * applied
  155. */
  156. if (early_cpu_has_feature(CPU_FTR_HVMODE))
  157. mtspr(SPRN_SPRG_HPACA, local_paca);
  158. #endif
  159. mtspr(SPRN_SPRG_PACA, local_paca);
  160. }
  161. static int __initdata paca_nr_cpu_ids;
  162. static int __initdata paca_ptrs_size;
  163. static int __initdata paca_struct_size;
  164. void __init allocate_paca_ptrs(void)
  165. {
  166. paca_nr_cpu_ids = nr_cpu_ids;
  167. paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
  168. paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
  169. memset(paca_ptrs, 0x88, paca_ptrs_size);
  170. }
  171. void __init allocate_paca(int cpu)
  172. {
  173. u64 limit;
  174. struct paca_struct *paca;
  175. BUG_ON(cpu >= paca_nr_cpu_ids);
  176. #ifdef CONFIG_PPC_BOOK3S_64
  177. /*
  178. * We access pacas in real mode, and cannot take SLB faults
  179. * on them when in virtual mode, so allocate them accordingly.
  180. */
  181. limit = min(ppc64_bolted_size(), ppc64_rma_size);
  182. #else
  183. limit = ppc64_rma_size;
  184. #endif
  185. paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
  186. limit, cpu);
  187. paca_ptrs[cpu] = paca;
  188. memset(paca, 0, sizeof(struct paca_struct));
  189. initialise_paca(paca, cpu);
  190. #ifdef CONFIG_PPC_PSERIES
  191. paca->lppaca_ptr = new_lppaca(cpu, limit);
  192. #endif
  193. #ifdef CONFIG_PPC_BOOK3S_64
  194. paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
  195. #endif
  196. paca_struct_size += sizeof(struct paca_struct);
  197. }
  198. void __init free_unused_pacas(void)
  199. {
  200. int new_ptrs_size;
  201. new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
  202. if (new_ptrs_size < paca_ptrs_size)
  203. memblock_free(__pa(paca_ptrs) + new_ptrs_size,
  204. paca_ptrs_size - new_ptrs_size);
  205. paca_nr_cpu_ids = nr_cpu_ids;
  206. paca_ptrs_size = new_ptrs_size;
  207. #ifdef CONFIG_PPC_BOOK3S_64
  208. if (early_radix_enabled()) {
  209. /* Ugly fixup, see new_slb_shadow() */
  210. memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
  211. sizeof(struct slb_shadow));
  212. paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
  213. }
  214. #endif
  215. printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
  216. paca_ptrs_size + paca_struct_size, nr_cpu_ids);
  217. }
  218. void copy_mm_to_paca(struct mm_struct *mm)
  219. {
  220. #ifdef CONFIG_PPC_BOOK3S
  221. mm_context_t *context = &mm->context;
  222. get_paca()->mm_ctx_id = context->id;
  223. #ifdef CONFIG_PPC_MM_SLICES
  224. VM_BUG_ON(!mm->context.slb_addr_limit);
  225. get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
  226. memcpy(&get_paca()->mm_ctx_low_slices_psize,
  227. &context->low_slices_psize, sizeof(context->low_slices_psize));
  228. memcpy(&get_paca()->mm_ctx_high_slices_psize,
  229. &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
  230. #else /* CONFIG_PPC_MM_SLICES */
  231. get_paca()->mm_ctx_user_psize = context->user_psize;
  232. get_paca()->mm_ctx_sllp = context->sllp;
  233. #endif
  234. #else /* !CONFIG_PPC_BOOK3S */
  235. return;
  236. #endif
  237. }