init_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/signal.h>
  8. #include <linux/sched.h>
  9. #include <linux/kernel.h>
  10. #include <linux/errno.h>
  11. #include <linux/string.h>
  12. #include <linux/types.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/mman.h>
  15. #include <linux/mm.h>
  16. #include <linux/hugetlb.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/highmem.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/pci.h>
  23. #include <linux/pfn.h>
  24. #include <linux/poison.h>
  25. #include <linux/bootmem.h>
  26. #include <linux/memblock.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/memory_hotplug.h>
  29. #include <linux/initrd.h>
  30. #include <linux/cpumask.h>
  31. #include <linux/gfp.h>
  32. #include <asm/asm.h>
  33. #include <asm/bios_ebda.h>
  34. #include <asm/processor.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/dma.h>
  38. #include <asm/fixmap.h>
  39. #include <asm/e820/api.h>
  40. #include <asm/apic.h>
  41. #include <asm/bugs.h>
  42. #include <asm/tlb.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/olpc_ofw.h>
  45. #include <asm/pgalloc.h>
  46. #include <asm/sections.h>
  47. #include <asm/paravirt.h>
  48. #include <asm/setup.h>
  49. #include <asm/set_memory.h>
  50. #include <asm/page_types.h>
  51. #include <asm/cpu_entry_area.h>
  52. #include <asm/init.h>
  53. #include "mm_internal.h"
  54. unsigned long highstart_pfn, highend_pfn;
  55. bool __read_mostly __vmalloc_start_set = false;
  56. /*
  57. * Creates a middle page table and puts a pointer to it in the
  58. * given global directory entry. This only returns the gd entry
  59. * in non-PAE compilation mode, since the middle layer is folded.
  60. */
  61. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  62. {
  63. p4d_t *p4d;
  64. pud_t *pud;
  65. pmd_t *pmd_table;
  66. #ifdef CONFIG_X86_PAE
  67. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  68. pmd_table = (pmd_t *)alloc_low_page();
  69. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  70. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  71. p4d = p4d_offset(pgd, 0);
  72. pud = pud_offset(p4d, 0);
  73. BUG_ON(pmd_table != pmd_offset(pud, 0));
  74. return pmd_table;
  75. }
  76. #endif
  77. p4d = p4d_offset(pgd, 0);
  78. pud = pud_offset(p4d, 0);
  79. pmd_table = pmd_offset(pud, 0);
  80. return pmd_table;
  81. }
  82. /*
  83. * Create a page table and place a pointer to it in a middle page
  84. * directory entry:
  85. */
  86. static pte_t * __init one_page_table_init(pmd_t *pmd)
  87. {
  88. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  89. pte_t *page_table = (pte_t *)alloc_low_page();
  90. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  91. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  92. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  93. }
  94. return pte_offset_kernel(pmd, 0);
  95. }
  96. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  97. {
  98. int pgd_idx = pgd_index(vaddr);
  99. int pmd_idx = pmd_index(vaddr);
  100. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  101. }
  102. pte_t * __init populate_extra_pte(unsigned long vaddr)
  103. {
  104. int pte_idx = pte_index(vaddr);
  105. pmd_t *pmd;
  106. pmd = populate_extra_pmd(vaddr);
  107. return one_page_table_init(pmd) + pte_idx;
  108. }
  109. static unsigned long __init
  110. page_table_range_init_count(unsigned long start, unsigned long end)
  111. {
  112. unsigned long count = 0;
  113. #ifdef CONFIG_HIGHMEM
  114. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  115. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  116. int pgd_idx, pmd_idx;
  117. unsigned long vaddr;
  118. if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
  119. return 0;
  120. vaddr = start;
  121. pgd_idx = pgd_index(vaddr);
  122. pmd_idx = pmd_index(vaddr);
  123. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
  124. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  125. pmd_idx++) {
  126. if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
  127. (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
  128. count++;
  129. vaddr += PMD_SIZE;
  130. }
  131. pmd_idx = 0;
  132. }
  133. #endif
  134. return count;
  135. }
  136. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  137. unsigned long vaddr, pte_t *lastpte,
  138. void **adr)
  139. {
  140. #ifdef CONFIG_HIGHMEM
  141. /*
  142. * Something (early fixmap) may already have put a pte
  143. * page here, which causes the page table allocation
  144. * to become nonlinear. Attempt to fix it, and if it
  145. * is still nonlinear then we have to bug.
  146. */
  147. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  148. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  149. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  150. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  151. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
  152. pte_t *newpte;
  153. int i;
  154. BUG_ON(after_bootmem);
  155. newpte = *adr;
  156. for (i = 0; i < PTRS_PER_PTE; i++)
  157. set_pte(newpte + i, pte[i]);
  158. *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
  159. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  160. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  161. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  162. __flush_tlb_all();
  163. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  164. pte = newpte;
  165. }
  166. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  167. && vaddr > fix_to_virt(FIX_KMAP_END)
  168. && lastpte && lastpte + PTRS_PER_PTE != pte);
  169. #endif
  170. return pte;
  171. }
  172. /*
  173. * This function initializes a certain range of kernel virtual memory
  174. * with new bootmem page tables, everywhere page tables are missing in
  175. * the given range.
  176. *
  177. * NOTE: The pagetables are allocated contiguous on the physical space
  178. * so we can cache the place of the first one and move around without
  179. * checking the pgd every time.
  180. */
  181. static void __init
  182. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  183. {
  184. int pgd_idx, pmd_idx;
  185. unsigned long vaddr;
  186. pgd_t *pgd;
  187. pmd_t *pmd;
  188. pte_t *pte = NULL;
  189. unsigned long count = page_table_range_init_count(start, end);
  190. void *adr = NULL;
  191. if (count)
  192. adr = alloc_low_pages(count);
  193. vaddr = start;
  194. pgd_idx = pgd_index(vaddr);
  195. pmd_idx = pmd_index(vaddr);
  196. pgd = pgd_base + pgd_idx;
  197. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  198. pmd = one_md_table_init(pgd);
  199. pmd = pmd + pmd_index(vaddr);
  200. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  201. pmd++, pmd_idx++) {
  202. pte = page_table_kmap_check(one_page_table_init(pmd),
  203. pmd, vaddr, pte, &adr);
  204. vaddr += PMD_SIZE;
  205. }
  206. pmd_idx = 0;
  207. }
  208. }
  209. static inline int is_kernel_text(unsigned long addr)
  210. {
  211. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  212. return 1;
  213. return 0;
  214. }
  215. /*
  216. * This maps the physical memory to kernel virtual address space, a total
  217. * of max_low_pfn pages, by creating page tables starting from address
  218. * PAGE_OFFSET:
  219. */
  220. unsigned long __init
  221. kernel_physical_mapping_init(unsigned long start,
  222. unsigned long end,
  223. unsigned long page_size_mask)
  224. {
  225. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  226. unsigned long last_map_addr = end;
  227. unsigned long start_pfn, end_pfn;
  228. pgd_t *pgd_base = swapper_pg_dir;
  229. int pgd_idx, pmd_idx, pte_ofs;
  230. unsigned long pfn;
  231. pgd_t *pgd;
  232. pmd_t *pmd;
  233. pte_t *pte;
  234. unsigned pages_2m, pages_4k;
  235. int mapping_iter;
  236. start_pfn = start >> PAGE_SHIFT;
  237. end_pfn = end >> PAGE_SHIFT;
  238. /*
  239. * First iteration will setup identity mapping using large/small pages
  240. * based on use_pse, with other attributes same as set by
  241. * the early code in head_32.S
  242. *
  243. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  244. * as desired for the kernel identity mapping.
  245. *
  246. * This two pass mechanism conforms to the TLB app note which says:
  247. *
  248. * "Software should not write to a paging-structure entry in a way
  249. * that would change, for any linear address, both the page size
  250. * and either the page frame or attributes."
  251. */
  252. mapping_iter = 1;
  253. if (!boot_cpu_has(X86_FEATURE_PSE))
  254. use_pse = 0;
  255. repeat:
  256. pages_2m = pages_4k = 0;
  257. pfn = start_pfn;
  258. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  259. pgd = pgd_base + pgd_idx;
  260. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  261. pmd = one_md_table_init(pgd);
  262. if (pfn >= end_pfn)
  263. continue;
  264. #ifdef CONFIG_X86_PAE
  265. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  266. pmd += pmd_idx;
  267. #else
  268. pmd_idx = 0;
  269. #endif
  270. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  271. pmd++, pmd_idx++) {
  272. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  273. /*
  274. * Map with big pages if possible, otherwise
  275. * create normal page tables:
  276. */
  277. if (use_pse) {
  278. unsigned int addr2;
  279. pgprot_t prot = PAGE_KERNEL_LARGE;
  280. /*
  281. * first pass will use the same initial
  282. * identity mapping attribute + _PAGE_PSE.
  283. */
  284. pgprot_t init_prot =
  285. __pgprot(PTE_IDENT_ATTR |
  286. _PAGE_PSE);
  287. pfn &= PMD_MASK >> PAGE_SHIFT;
  288. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  289. PAGE_OFFSET + PAGE_SIZE-1;
  290. if (is_kernel_text(addr) ||
  291. is_kernel_text(addr2))
  292. prot = PAGE_KERNEL_LARGE_EXEC;
  293. pages_2m++;
  294. if (mapping_iter == 1)
  295. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  296. else
  297. set_pmd(pmd, pfn_pmd(pfn, prot));
  298. pfn += PTRS_PER_PTE;
  299. continue;
  300. }
  301. pte = one_page_table_init(pmd);
  302. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  303. pte += pte_ofs;
  304. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  305. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  306. pgprot_t prot = PAGE_KERNEL;
  307. /*
  308. * first pass will use the same initial
  309. * identity mapping attribute.
  310. */
  311. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  312. if (is_kernel_text(addr))
  313. prot = PAGE_KERNEL_EXEC;
  314. pages_4k++;
  315. if (mapping_iter == 1) {
  316. set_pte(pte, pfn_pte(pfn, init_prot));
  317. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  318. } else
  319. set_pte(pte, pfn_pte(pfn, prot));
  320. }
  321. }
  322. }
  323. if (mapping_iter == 1) {
  324. /*
  325. * update direct mapping page count only in the first
  326. * iteration.
  327. */
  328. update_page_count(PG_LEVEL_2M, pages_2m);
  329. update_page_count(PG_LEVEL_4K, pages_4k);
  330. /*
  331. * local global flush tlb, which will flush the previous
  332. * mappings present in both small and large page TLB's.
  333. */
  334. __flush_tlb_all();
  335. /*
  336. * Second iteration will set the actual desired PTE attributes.
  337. */
  338. mapping_iter = 2;
  339. goto repeat;
  340. }
  341. return last_map_addr;
  342. }
  343. pte_t *kmap_pte;
  344. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  345. {
  346. pgd_t *pgd = pgd_offset_k(vaddr);
  347. p4d_t *p4d = p4d_offset(pgd, vaddr);
  348. pud_t *pud = pud_offset(p4d, vaddr);
  349. pmd_t *pmd = pmd_offset(pud, vaddr);
  350. return pte_offset_kernel(pmd, vaddr);
  351. }
  352. static void __init kmap_init(void)
  353. {
  354. unsigned long kmap_vstart;
  355. /*
  356. * Cache the first kmap pte:
  357. */
  358. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  359. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  360. }
  361. #ifdef CONFIG_HIGHMEM
  362. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  363. {
  364. unsigned long vaddr;
  365. pgd_t *pgd;
  366. p4d_t *p4d;
  367. pud_t *pud;
  368. pmd_t *pmd;
  369. pte_t *pte;
  370. vaddr = PKMAP_BASE;
  371. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  372. pgd = swapper_pg_dir + pgd_index(vaddr);
  373. p4d = p4d_offset(pgd, vaddr);
  374. pud = pud_offset(p4d, vaddr);
  375. pmd = pmd_offset(pud, vaddr);
  376. pte = pte_offset_kernel(pmd, vaddr);
  377. pkmap_page_table = pte;
  378. }
  379. void __init add_highpages_with_active_regions(int nid,
  380. unsigned long start_pfn, unsigned long end_pfn)
  381. {
  382. phys_addr_t start, end;
  383. u64 i;
  384. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
  385. unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  386. start_pfn, end_pfn);
  387. unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  388. start_pfn, end_pfn);
  389. for ( ; pfn < e_pfn; pfn++)
  390. if (pfn_valid(pfn))
  391. free_highmem_page(pfn_to_page(pfn));
  392. }
  393. }
  394. #else
  395. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  396. {
  397. }
  398. #endif /* CONFIG_HIGHMEM */
  399. void __init sync_initial_page_table(void)
  400. {
  401. clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
  402. swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  403. KERNEL_PGD_PTRS);
  404. /*
  405. * sync back low identity map too. It is used for example
  406. * in the 32-bit EFI stub.
  407. */
  408. clone_pgd_range(initial_page_table,
  409. swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  410. min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
  411. }
  412. void __init native_pagetable_init(void)
  413. {
  414. unsigned long pfn, va;
  415. pgd_t *pgd, *base = swapper_pg_dir;
  416. p4d_t *p4d;
  417. pud_t *pud;
  418. pmd_t *pmd;
  419. pte_t *pte;
  420. /*
  421. * Remove any mappings which extend past the end of physical
  422. * memory from the boot time page table.
  423. * In virtual address space, we should have at least two pages
  424. * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
  425. * definition. And max_low_pfn is set to VMALLOC_END physical
  426. * address. If initial memory mapping is doing right job, we
  427. * should have pte used near max_low_pfn or one pmd is not present.
  428. */
  429. for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  430. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  431. pgd = base + pgd_index(va);
  432. if (!pgd_present(*pgd))
  433. break;
  434. p4d = p4d_offset(pgd, va);
  435. pud = pud_offset(p4d, va);
  436. pmd = pmd_offset(pud, va);
  437. if (!pmd_present(*pmd))
  438. break;
  439. /* should not be large page here */
  440. if (pmd_large(*pmd)) {
  441. pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
  442. pfn, pmd, __pa(pmd));
  443. BUG_ON(1);
  444. }
  445. pte = pte_offset_kernel(pmd, va);
  446. if (!pte_present(*pte))
  447. break;
  448. printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
  449. pfn, pmd, __pa(pmd), pte, __pa(pte));
  450. pte_clear(NULL, va, pte);
  451. }
  452. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  453. paging_init();
  454. }
  455. /*
  456. * Build a proper pagetable for the kernel mappings. Up until this
  457. * point, we've been running on some set of pagetables constructed by
  458. * the boot process.
  459. *
  460. * If we're booting on native hardware, this will be a pagetable
  461. * constructed in arch/x86/kernel/head_32.S. The root of the
  462. * pagetable will be swapper_pg_dir.
  463. *
  464. * If we're booting paravirtualized under a hypervisor, then there are
  465. * more options: we may already be running PAE, and the pagetable may
  466. * or may not be based in swapper_pg_dir. In any case,
  467. * paravirt_pagetable_init() will set up swapper_pg_dir
  468. * appropriately for the rest of the initialization to work.
  469. *
  470. * In general, pagetable_init() assumes that the pagetable may already
  471. * be partially populated, and so it avoids stomping on any existing
  472. * mappings.
  473. */
  474. void __init early_ioremap_page_table_range_init(void)
  475. {
  476. pgd_t *pgd_base = swapper_pg_dir;
  477. unsigned long vaddr, end;
  478. /*
  479. * Fixed mappings, only the page table structure has to be
  480. * created - mappings will be set by set_fixmap():
  481. */
  482. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  483. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  484. page_table_range_init(vaddr, end, pgd_base);
  485. early_ioremap_reset();
  486. }
  487. static void __init pagetable_init(void)
  488. {
  489. pgd_t *pgd_base = swapper_pg_dir;
  490. permanent_kmaps_init(pgd_base);
  491. }
  492. #define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
  493. /* Bits supported by the hardware: */
  494. pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
  495. /* Bits allowed in normal kernel mappings: */
  496. pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
  497. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  498. /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
  499. EXPORT_SYMBOL(__default_kernel_pte_mask);
  500. /* user-defined highmem size */
  501. static unsigned int highmem_pages = -1;
  502. /*
  503. * highmem=size forces highmem to be exactly 'size' bytes.
  504. * This works even on boxes that have no highmem otherwise.
  505. * This also works to reduce highmem size on bigger boxes.
  506. */
  507. static int __init parse_highmem(char *arg)
  508. {
  509. if (!arg)
  510. return -EINVAL;
  511. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  512. return 0;
  513. }
  514. early_param("highmem", parse_highmem);
  515. #define MSG_HIGHMEM_TOO_BIG \
  516. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  517. #define MSG_LOWMEM_TOO_SMALL \
  518. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  519. /*
  520. * All of RAM fits into lowmem - but if user wants highmem
  521. * artificially via the highmem=x boot parameter then create
  522. * it:
  523. */
  524. static void __init lowmem_pfn_init(void)
  525. {
  526. /* max_low_pfn is 0, we already have early_res support */
  527. max_low_pfn = max_pfn;
  528. if (highmem_pages == -1)
  529. highmem_pages = 0;
  530. #ifdef CONFIG_HIGHMEM
  531. if (highmem_pages >= max_pfn) {
  532. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  533. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  534. highmem_pages = 0;
  535. }
  536. if (highmem_pages) {
  537. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  538. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  539. pages_to_mb(highmem_pages));
  540. highmem_pages = 0;
  541. }
  542. max_low_pfn -= highmem_pages;
  543. }
  544. #else
  545. if (highmem_pages)
  546. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  547. #endif
  548. }
  549. #define MSG_HIGHMEM_TOO_SMALL \
  550. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  551. #define MSG_HIGHMEM_TRIMMED \
  552. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  553. /*
  554. * We have more RAM than fits into lowmem - we try to put it into
  555. * highmem, also taking the highmem=x boot parameter into account:
  556. */
  557. static void __init highmem_pfn_init(void)
  558. {
  559. max_low_pfn = MAXMEM_PFN;
  560. if (highmem_pages == -1)
  561. highmem_pages = max_pfn - MAXMEM_PFN;
  562. if (highmem_pages + MAXMEM_PFN < max_pfn)
  563. max_pfn = MAXMEM_PFN + highmem_pages;
  564. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  565. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  566. pages_to_mb(max_pfn - MAXMEM_PFN),
  567. pages_to_mb(highmem_pages));
  568. highmem_pages = 0;
  569. }
  570. #ifndef CONFIG_HIGHMEM
  571. /* Maximum memory usable is what is directly addressable */
  572. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  573. if (max_pfn > MAX_NONPAE_PFN)
  574. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  575. else
  576. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  577. max_pfn = MAXMEM_PFN;
  578. #else /* !CONFIG_HIGHMEM */
  579. #ifndef CONFIG_HIGHMEM64G
  580. if (max_pfn > MAX_NONPAE_PFN) {
  581. max_pfn = MAX_NONPAE_PFN;
  582. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  583. }
  584. #endif /* !CONFIG_HIGHMEM64G */
  585. #endif /* !CONFIG_HIGHMEM */
  586. }
  587. /*
  588. * Determine low and high memory ranges:
  589. */
  590. void __init find_low_pfn_range(void)
  591. {
  592. /* it could update max_pfn */
  593. if (max_pfn <= MAXMEM_PFN)
  594. lowmem_pfn_init();
  595. else
  596. highmem_pfn_init();
  597. }
  598. #ifndef CONFIG_NEED_MULTIPLE_NODES
  599. void __init initmem_init(void)
  600. {
  601. #ifdef CONFIG_HIGHMEM
  602. highstart_pfn = highend_pfn = max_pfn;
  603. if (max_pfn > max_low_pfn)
  604. highstart_pfn = max_low_pfn;
  605. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  606. pages_to_mb(highend_pfn - highstart_pfn));
  607. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  608. #else
  609. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  610. #endif
  611. memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
  612. sparse_memory_present_with_active_regions(0);
  613. #ifdef CONFIG_FLATMEM
  614. max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
  615. #endif
  616. __vmalloc_start_set = true;
  617. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  618. pages_to_mb(max_low_pfn));
  619. setup_bootmem_allocator();
  620. }
  621. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  622. void __init setup_bootmem_allocator(void)
  623. {
  624. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  625. max_pfn_mapped<<PAGE_SHIFT);
  626. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  627. }
  628. /*
  629. * paging_init() sets up the page tables - note that the first 8MB are
  630. * already mapped by head.S.
  631. *
  632. * This routines also unmaps the page at virtual kernel address 0, so
  633. * that we can trap those pesky NULL-reference errors in the kernel.
  634. */
  635. void __init paging_init(void)
  636. {
  637. pagetable_init();
  638. __flush_tlb_all();
  639. kmap_init();
  640. /*
  641. * NOTE: at this point the bootmem allocator is fully available.
  642. */
  643. olpc_dt_build_devicetree();
  644. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  645. sparse_init();
  646. zone_sizes_init();
  647. }
  648. /*
  649. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  650. * and also on some strange 486's. All 586+'s are OK. This used to involve
  651. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  652. * switch to using exceptions got rid of all that.
  653. */
  654. static void __init test_wp_bit(void)
  655. {
  656. char z = 0;
  657. printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
  658. __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
  659. if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
  660. clear_fixmap(FIX_WP_TEST);
  661. printk(KERN_CONT "Ok.\n");
  662. return;
  663. }
  664. printk(KERN_CONT "No.\n");
  665. panic("Linux doesn't support CPUs with broken WP.");
  666. }
  667. void __init mem_init(void)
  668. {
  669. pci_iommu_alloc();
  670. #ifdef CONFIG_FLATMEM
  671. BUG_ON(!mem_map);
  672. #endif
  673. /*
  674. * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  675. * be done before free_all_bootmem(). Memblock use free low memory for
  676. * temporary data (see find_range_array()) and for this purpose can use
  677. * pages that was already passed to the buddy allocator, hence marked as
  678. * not accessible in the page tables when compiled with
  679. * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  680. * important here.
  681. */
  682. set_highmem_pages_init();
  683. /* this will put all low memory onto the freelists */
  684. free_all_bootmem();
  685. after_bootmem = 1;
  686. x86_init.hyper.init_after_bootmem();
  687. mem_init_print_info(NULL);
  688. printk(KERN_INFO "virtual kernel memory layout:\n"
  689. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  690. " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
  691. #ifdef CONFIG_HIGHMEM
  692. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  693. #endif
  694. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  695. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  696. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  697. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  698. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  699. FIXADDR_START, FIXADDR_TOP,
  700. (FIXADDR_TOP - FIXADDR_START) >> 10,
  701. CPU_ENTRY_AREA_BASE,
  702. CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
  703. CPU_ENTRY_AREA_MAP_SIZE >> 10,
  704. #ifdef CONFIG_HIGHMEM
  705. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  706. (LAST_PKMAP*PAGE_SIZE) >> 10,
  707. #endif
  708. VMALLOC_START, VMALLOC_END,
  709. (VMALLOC_END - VMALLOC_START) >> 20,
  710. (unsigned long)__va(0), (unsigned long)high_memory,
  711. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  712. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  713. ((unsigned long)&__init_end -
  714. (unsigned long)&__init_begin) >> 10,
  715. (unsigned long)&_etext, (unsigned long)&_edata,
  716. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  717. (unsigned long)&_text, (unsigned long)&_etext,
  718. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  719. /*
  720. * Check boundaries twice: Some fundamental inconsistencies can
  721. * be detected at build time already.
  722. */
  723. #define __FIXADDR_TOP (-PAGE_SIZE)
  724. #ifdef CONFIG_HIGHMEM
  725. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  726. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  727. #endif
  728. #define high_memory (-128UL << 20)
  729. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  730. #undef high_memory
  731. #undef __FIXADDR_TOP
  732. #ifdef CONFIG_HIGHMEM
  733. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  734. BUG_ON(VMALLOC_END > PKMAP_BASE);
  735. #endif
  736. BUG_ON(VMALLOC_START >= VMALLOC_END);
  737. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  738. test_wp_bit();
  739. }
  740. #ifdef CONFIG_MEMORY_HOTPLUG
  741. int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
  742. bool want_memblock)
  743. {
  744. unsigned long start_pfn = start >> PAGE_SHIFT;
  745. unsigned long nr_pages = size >> PAGE_SHIFT;
  746. return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
  747. }
  748. void arch_remove_memory(int nid, u64 start, u64 size,
  749. struct vmem_altmap *altmap)
  750. {
  751. unsigned long start_pfn = start >> PAGE_SHIFT;
  752. unsigned long nr_pages = size >> PAGE_SHIFT;
  753. __remove_pages(start_pfn, nr_pages, altmap);
  754. }
  755. #endif
  756. int kernel_set_to_readonly __read_mostly;
  757. void set_kernel_text_rw(void)
  758. {
  759. unsigned long start = PFN_ALIGN(_text);
  760. unsigned long size = PFN_ALIGN(_etext) - start;
  761. if (!kernel_set_to_readonly)
  762. return;
  763. pr_debug("Set kernel text: %lx - %lx for read write\n",
  764. start, start+size);
  765. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  766. }
  767. void set_kernel_text_ro(void)
  768. {
  769. unsigned long start = PFN_ALIGN(_text);
  770. unsigned long size = PFN_ALIGN(_etext) - start;
  771. if (!kernel_set_to_readonly)
  772. return;
  773. pr_debug("Set kernel text: %lx - %lx for read only\n",
  774. start, start+size);
  775. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  776. }
  777. static void mark_nxdata_nx(void)
  778. {
  779. /*
  780. * When this called, init has already been executed and released,
  781. * so everything past _etext should be NX.
  782. */
  783. unsigned long start = PFN_ALIGN(_etext);
  784. /*
  785. * This comes from is_kernel_text upper limit. Also HPAGE where used:
  786. */
  787. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  788. if (__supported_pte_mask & _PAGE_NX)
  789. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  790. set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  791. }
  792. void mark_rodata_ro(void)
  793. {
  794. unsigned long start = PFN_ALIGN(_text);
  795. unsigned long size = PFN_ALIGN(_etext) - start;
  796. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  797. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  798. size >> 10);
  799. kernel_set_to_readonly = 1;
  800. #ifdef CONFIG_CPA_DEBUG
  801. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  802. start, start+size);
  803. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  804. printk(KERN_INFO "Testing CPA: write protecting again\n");
  805. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  806. #endif
  807. start += size;
  808. size = (unsigned long)__end_rodata - start;
  809. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  810. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  811. size >> 10);
  812. #ifdef CONFIG_CPA_DEBUG
  813. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  814. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  815. printk(KERN_INFO "Testing CPA: write protecting again\n");
  816. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  817. #endif
  818. mark_nxdata_nx();
  819. if (__supported_pte_mask & _PAGE_NX)
  820. debug_checkwx();
  821. }