vmem.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2006
  4. */
  5. #include <linux/memory_hotplug.h>
  6. #include <linux/memblock.h>
  7. #include <linux/pfn.h>
  8. #include <linux/mm.h>
  9. #include <linux/init.h>
  10. #include <linux/list.h>
  11. #include <linux/hugetlb.h>
  12. #include <linux/slab.h>
  13. #include <linux/sort.h>
  14. #include <asm/page-states.h>
  15. #include <asm/abs_lowcore.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/maccess.h>
  18. #include <asm/nospec-branch.h>
  19. #include <asm/ctlreg.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/setup.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/sections.h>
  24. #include <asm/set_memory.h>
  25. #include <asm/physmem_info.h>
  26. static DEFINE_MUTEX(vmem_mutex);
  27. static void __ref *vmem_alloc_pages(unsigned int order)
  28. {
  29. unsigned long size = PAGE_SIZE << order;
  30. if (slab_is_available())
  31. return (void *)__get_free_pages(GFP_KERNEL, order);
  32. return memblock_alloc(size, size);
  33. }
  34. static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap)
  35. {
  36. if (altmap) {
  37. vmem_altmap_free(altmap, 1 << order);
  38. return;
  39. }
  40. /* We don't expect boot memory to be removed ever. */
  41. if (!slab_is_available() ||
  42. WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
  43. return;
  44. free_pages(addr, order);
  45. }
  46. void *vmem_crst_alloc(unsigned long val)
  47. {
  48. unsigned long *table;
  49. table = vmem_alloc_pages(CRST_ALLOC_ORDER);
  50. if (!table)
  51. return NULL;
  52. crst_table_init(table, val);
  53. __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
  54. return table;
  55. }
  56. pte_t __ref *vmem_pte_alloc(void)
  57. {
  58. unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
  59. pte_t *pte;
  60. if (slab_is_available())
  61. pte = (pte_t *) page_table_alloc(&init_mm);
  62. else
  63. pte = (pte_t *) memblock_alloc(size, size);
  64. if (!pte)
  65. return NULL;
  66. memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
  67. __arch_set_page_dat(pte, 1);
  68. return pte;
  69. }
  70. static void vmem_pte_free(unsigned long *table)
  71. {
  72. /* We don't expect boot memory to be removed ever. */
  73. if (!slab_is_available() ||
  74. WARN_ON_ONCE(PageReserved(virt_to_page(table))))
  75. return;
  76. page_table_free(&init_mm, table);
  77. }
  78. #define PAGE_UNUSED 0xFD
  79. /*
  80. * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
  81. * from unused_sub_pmd_start to next PMD_SIZE boundary.
  82. */
  83. static unsigned long unused_sub_pmd_start;
  84. static void vmemmap_flush_unused_sub_pmd(void)
  85. {
  86. if (!unused_sub_pmd_start)
  87. return;
  88. memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
  89. ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
  90. unused_sub_pmd_start = 0;
  91. }
  92. static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
  93. {
  94. /*
  95. * As we expect to add in the same granularity as we remove, it's
  96. * sufficient to mark only some piece used to block the memmap page from
  97. * getting removed (just in case the memmap never gets initialized,
  98. * e.g., because the memory block never gets onlined).
  99. */
  100. memset((void *)start, 0, sizeof(struct page));
  101. }
  102. static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
  103. {
  104. /*
  105. * We only optimize if the new used range directly follows the
  106. * previously unused range (esp., when populating consecutive sections).
  107. */
  108. if (unused_sub_pmd_start == start) {
  109. unused_sub_pmd_start = end;
  110. if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
  111. unused_sub_pmd_start = 0;
  112. return;
  113. }
  114. vmemmap_flush_unused_sub_pmd();
  115. vmemmap_mark_sub_pmd_used(start, end);
  116. }
  117. static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
  118. {
  119. unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
  120. vmemmap_flush_unused_sub_pmd();
  121. /* Could be our memmap page is filled with PAGE_UNUSED already ... */
  122. vmemmap_mark_sub_pmd_used(start, end);
  123. /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
  124. if (!IS_ALIGNED(start, PMD_SIZE))
  125. memset((void *)page, PAGE_UNUSED, start - page);
  126. /*
  127. * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
  128. * consecutive sections. Remember for the last added PMD the last
  129. * unused range in the populated PMD.
  130. */
  131. if (!IS_ALIGNED(end, PMD_SIZE))
  132. unused_sub_pmd_start = end;
  133. }
  134. /* Returns true if the PMD is completely unused and can be freed. */
  135. static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
  136. {
  137. unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
  138. vmemmap_flush_unused_sub_pmd();
  139. memset((void *)start, PAGE_UNUSED, end - start);
  140. return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
  141. }
  142. /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
  143. static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
  144. unsigned long end, bool add, bool direct,
  145. struct vmem_altmap *altmap)
  146. {
  147. unsigned long prot, pages = 0;
  148. int ret = -ENOMEM;
  149. pte_t *pte;
  150. prot = pgprot_val(PAGE_KERNEL);
  151. if (!MACHINE_HAS_NX)
  152. prot &= ~_PAGE_NOEXEC;
  153. pte = pte_offset_kernel(pmd, addr);
  154. for (; addr < end; addr += PAGE_SIZE, pte++) {
  155. if (!add) {
  156. if (pte_none(*pte))
  157. continue;
  158. if (!direct)
  159. vmem_free_pages((unsigned long)pfn_to_virt(pte_pfn(*pte)), get_order(PAGE_SIZE), altmap);
  160. pte_clear(&init_mm, addr, pte);
  161. } else if (pte_none(*pte)) {
  162. if (!direct) {
  163. void *new_page = vmemmap_alloc_block_buf(PAGE_SIZE, NUMA_NO_NODE, altmap);
  164. if (!new_page)
  165. goto out;
  166. set_pte(pte, __pte(__pa(new_page) | prot));
  167. } else {
  168. set_pte(pte, __pte(__pa(addr) | prot));
  169. }
  170. } else {
  171. continue;
  172. }
  173. pages++;
  174. }
  175. ret = 0;
  176. out:
  177. if (direct)
  178. update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
  179. return ret;
  180. }
  181. static void try_free_pte_table(pmd_t *pmd, unsigned long start)
  182. {
  183. pte_t *pte;
  184. int i;
  185. /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
  186. pte = pte_offset_kernel(pmd, start);
  187. for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
  188. if (!pte_none(*pte))
  189. return;
  190. }
  191. vmem_pte_free((unsigned long *) pmd_deref(*pmd));
  192. pmd_clear(pmd);
  193. }
  194. /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
  195. static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
  196. unsigned long end, bool add, bool direct,
  197. struct vmem_altmap *altmap)
  198. {
  199. unsigned long next, prot, pages = 0;
  200. int ret = -ENOMEM;
  201. pmd_t *pmd;
  202. pte_t *pte;
  203. prot = pgprot_val(SEGMENT_KERNEL);
  204. if (!MACHINE_HAS_NX)
  205. prot &= ~_SEGMENT_ENTRY_NOEXEC;
  206. pmd = pmd_offset(pud, addr);
  207. for (; addr < end; addr = next, pmd++) {
  208. next = pmd_addr_end(addr, end);
  209. if (!add) {
  210. if (pmd_none(*pmd))
  211. continue;
  212. if (pmd_leaf(*pmd)) {
  213. if (IS_ALIGNED(addr, PMD_SIZE) &&
  214. IS_ALIGNED(next, PMD_SIZE)) {
  215. if (!direct)
  216. vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
  217. pmd_clear(pmd);
  218. pages++;
  219. } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
  220. vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
  221. pmd_clear(pmd);
  222. }
  223. continue;
  224. }
  225. } else if (pmd_none(*pmd)) {
  226. if (IS_ALIGNED(addr, PMD_SIZE) &&
  227. IS_ALIGNED(next, PMD_SIZE) &&
  228. MACHINE_HAS_EDAT1 && direct &&
  229. !debug_pagealloc_enabled()) {
  230. set_pmd(pmd, __pmd(__pa(addr) | prot));
  231. pages++;
  232. continue;
  233. } else if (!direct && MACHINE_HAS_EDAT1) {
  234. void *new_page;
  235. /*
  236. * Use 1MB frames for vmemmap if available. We
  237. * always use large frames even if they are only
  238. * partially used. Otherwise we would have also
  239. * page tables since vmemmap_populate gets
  240. * called for each section separately.
  241. */
  242. new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap);
  243. if (new_page) {
  244. set_pmd(pmd, __pmd(__pa(new_page) | prot));
  245. if (!IS_ALIGNED(addr, PMD_SIZE) ||
  246. !IS_ALIGNED(next, PMD_SIZE)) {
  247. vmemmap_use_new_sub_pmd(addr, next);
  248. }
  249. continue;
  250. }
  251. }
  252. pte = vmem_pte_alloc();
  253. if (!pte)
  254. goto out;
  255. pmd_populate(&init_mm, pmd, pte);
  256. } else if (pmd_leaf(*pmd)) {
  257. if (!direct)
  258. vmemmap_use_sub_pmd(addr, next);
  259. continue;
  260. }
  261. ret = modify_pte_table(pmd, addr, next, add, direct, altmap);
  262. if (ret)
  263. goto out;
  264. if (!add)
  265. try_free_pte_table(pmd, addr & PMD_MASK);
  266. }
  267. ret = 0;
  268. out:
  269. if (direct)
  270. update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
  271. return ret;
  272. }
  273. static void try_free_pmd_table(pud_t *pud, unsigned long start)
  274. {
  275. pmd_t *pmd;
  276. int i;
  277. pmd = pmd_offset(pud, start);
  278. for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
  279. if (!pmd_none(*pmd))
  280. return;
  281. vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
  282. pud_clear(pud);
  283. }
  284. static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
  285. bool add, bool direct, struct vmem_altmap *altmap)
  286. {
  287. unsigned long next, prot, pages = 0;
  288. int ret = -ENOMEM;
  289. pud_t *pud;
  290. pmd_t *pmd;
  291. prot = pgprot_val(REGION3_KERNEL);
  292. if (!MACHINE_HAS_NX)
  293. prot &= ~_REGION_ENTRY_NOEXEC;
  294. pud = pud_offset(p4d, addr);
  295. for (; addr < end; addr = next, pud++) {
  296. next = pud_addr_end(addr, end);
  297. if (!add) {
  298. if (pud_none(*pud))
  299. continue;
  300. if (pud_leaf(*pud)) {
  301. if (IS_ALIGNED(addr, PUD_SIZE) &&
  302. IS_ALIGNED(next, PUD_SIZE)) {
  303. pud_clear(pud);
  304. pages++;
  305. }
  306. continue;
  307. }
  308. } else if (pud_none(*pud)) {
  309. if (IS_ALIGNED(addr, PUD_SIZE) &&
  310. IS_ALIGNED(next, PUD_SIZE) &&
  311. MACHINE_HAS_EDAT2 && direct &&
  312. !debug_pagealloc_enabled()) {
  313. set_pud(pud, __pud(__pa(addr) | prot));
  314. pages++;
  315. continue;
  316. }
  317. pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  318. if (!pmd)
  319. goto out;
  320. pud_populate(&init_mm, pud, pmd);
  321. } else if (pud_leaf(*pud)) {
  322. continue;
  323. }
  324. ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
  325. if (ret)
  326. goto out;
  327. if (!add)
  328. try_free_pmd_table(pud, addr & PUD_MASK);
  329. }
  330. ret = 0;
  331. out:
  332. if (direct)
  333. update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
  334. return ret;
  335. }
  336. static void try_free_pud_table(p4d_t *p4d, unsigned long start)
  337. {
  338. pud_t *pud;
  339. int i;
  340. pud = pud_offset(p4d, start);
  341. for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
  342. if (!pud_none(*pud))
  343. return;
  344. }
  345. vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER, NULL);
  346. p4d_clear(p4d);
  347. }
  348. static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
  349. bool add, bool direct, struct vmem_altmap *altmap)
  350. {
  351. unsigned long next;
  352. int ret = -ENOMEM;
  353. p4d_t *p4d;
  354. pud_t *pud;
  355. p4d = p4d_offset(pgd, addr);
  356. for (; addr < end; addr = next, p4d++) {
  357. next = p4d_addr_end(addr, end);
  358. if (!add) {
  359. if (p4d_none(*p4d))
  360. continue;
  361. } else if (p4d_none(*p4d)) {
  362. pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
  363. if (!pud)
  364. goto out;
  365. p4d_populate(&init_mm, p4d, pud);
  366. }
  367. ret = modify_pud_table(p4d, addr, next, add, direct, altmap);
  368. if (ret)
  369. goto out;
  370. if (!add)
  371. try_free_pud_table(p4d, addr & P4D_MASK);
  372. }
  373. ret = 0;
  374. out:
  375. return ret;
  376. }
  377. static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
  378. {
  379. p4d_t *p4d;
  380. int i;
  381. p4d = p4d_offset(pgd, start);
  382. for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
  383. if (!p4d_none(*p4d))
  384. return;
  385. }
  386. vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER, NULL);
  387. pgd_clear(pgd);
  388. }
  389. static int modify_pagetable(unsigned long start, unsigned long end, bool add,
  390. bool direct, struct vmem_altmap *altmap)
  391. {
  392. unsigned long addr, next;
  393. int ret = -ENOMEM;
  394. pgd_t *pgd;
  395. p4d_t *p4d;
  396. if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
  397. return -EINVAL;
  398. /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
  399. if (WARN_ON_ONCE(end > __abs_lowcore))
  400. return -EINVAL;
  401. for (addr = start; addr < end; addr = next) {
  402. next = pgd_addr_end(addr, end);
  403. pgd = pgd_offset_k(addr);
  404. if (!add) {
  405. if (pgd_none(*pgd))
  406. continue;
  407. } else if (pgd_none(*pgd)) {
  408. p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
  409. if (!p4d)
  410. goto out;
  411. pgd_populate(&init_mm, pgd, p4d);
  412. }
  413. ret = modify_p4d_table(pgd, addr, next, add, direct, altmap);
  414. if (ret)
  415. goto out;
  416. if (!add)
  417. try_free_p4d_table(pgd, addr & PGDIR_MASK);
  418. }
  419. ret = 0;
  420. out:
  421. if (!add)
  422. flush_tlb_kernel_range(start, end);
  423. return ret;
  424. }
  425. static int add_pagetable(unsigned long start, unsigned long end, bool direct,
  426. struct vmem_altmap *altmap)
  427. {
  428. return modify_pagetable(start, end, true, direct, altmap);
  429. }
  430. static int remove_pagetable(unsigned long start, unsigned long end, bool direct,
  431. struct vmem_altmap *altmap)
  432. {
  433. return modify_pagetable(start, end, false, direct, altmap);
  434. }
  435. /*
  436. * Add a physical memory range to the 1:1 mapping.
  437. */
  438. static int vmem_add_range(unsigned long start, unsigned long size)
  439. {
  440. start = (unsigned long)__va(start);
  441. return add_pagetable(start, start + size, true, NULL);
  442. }
  443. /*
  444. * Remove a physical memory range from the 1:1 mapping.
  445. */
  446. static void vmem_remove_range(unsigned long start, unsigned long size)
  447. {
  448. start = (unsigned long)__va(start);
  449. remove_pagetable(start, start + size, true, NULL);
  450. }
  451. /*
  452. * Add a backed mem_map array to the virtual mem_map array.
  453. */
  454. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
  455. struct vmem_altmap *altmap)
  456. {
  457. int ret;
  458. mutex_lock(&vmem_mutex);
  459. /* We don't care about the node, just use NUMA_NO_NODE on allocations */
  460. ret = add_pagetable(start, end, false, altmap);
  461. if (ret)
  462. remove_pagetable(start, end, false, altmap);
  463. mutex_unlock(&vmem_mutex);
  464. return ret;
  465. }
  466. #ifdef CONFIG_MEMORY_HOTPLUG
  467. void vmemmap_free(unsigned long start, unsigned long end,
  468. struct vmem_altmap *altmap)
  469. {
  470. mutex_lock(&vmem_mutex);
  471. remove_pagetable(start, end, false, altmap);
  472. mutex_unlock(&vmem_mutex);
  473. }
  474. #endif
  475. void vmem_remove_mapping(unsigned long start, unsigned long size)
  476. {
  477. mutex_lock(&vmem_mutex);
  478. vmem_remove_range(start, size);
  479. mutex_unlock(&vmem_mutex);
  480. }
  481. struct range arch_get_mappable_range(void)
  482. {
  483. struct range mhp_range;
  484. mhp_range.start = 0;
  485. mhp_range.end = max_mappable - 1;
  486. return mhp_range;
  487. }
  488. int vmem_add_mapping(unsigned long start, unsigned long size)
  489. {
  490. struct range range = arch_get_mappable_range();
  491. int ret;
  492. if (start < range.start ||
  493. start + size > range.end + 1 ||
  494. start + size < start)
  495. return -ERANGE;
  496. mutex_lock(&vmem_mutex);
  497. ret = vmem_add_range(start, size);
  498. if (ret)
  499. vmem_remove_range(start, size);
  500. mutex_unlock(&vmem_mutex);
  501. return ret;
  502. }
  503. /*
  504. * Allocate new or return existing page-table entry, but do not map it
  505. * to any physical address. If missing, allocate segment- and region-
  506. * table entries along. Meeting a large segment- or region-table entry
  507. * while traversing is an error, since the function is expected to be
  508. * called against virtual regions reserved for 4KB mappings only.
  509. */
  510. pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
  511. {
  512. pte_t *ptep = NULL;
  513. pgd_t *pgd;
  514. p4d_t *p4d;
  515. pud_t *pud;
  516. pmd_t *pmd;
  517. pte_t *pte;
  518. pgd = pgd_offset_k(addr);
  519. if (pgd_none(*pgd)) {
  520. if (!alloc)
  521. goto out;
  522. p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
  523. if (!p4d)
  524. goto out;
  525. pgd_populate(&init_mm, pgd, p4d);
  526. }
  527. p4d = p4d_offset(pgd, addr);
  528. if (p4d_none(*p4d)) {
  529. if (!alloc)
  530. goto out;
  531. pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
  532. if (!pud)
  533. goto out;
  534. p4d_populate(&init_mm, p4d, pud);
  535. }
  536. pud = pud_offset(p4d, addr);
  537. if (pud_none(*pud)) {
  538. if (!alloc)
  539. goto out;
  540. pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  541. if (!pmd)
  542. goto out;
  543. pud_populate(&init_mm, pud, pmd);
  544. } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
  545. goto out;
  546. }
  547. pmd = pmd_offset(pud, addr);
  548. if (pmd_none(*pmd)) {
  549. if (!alloc)
  550. goto out;
  551. pte = vmem_pte_alloc();
  552. if (!pte)
  553. goto out;
  554. pmd_populate(&init_mm, pmd, pte);
  555. } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
  556. goto out;
  557. }
  558. ptep = pte_offset_kernel(pmd, addr);
  559. out:
  560. return ptep;
  561. }
  562. int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
  563. {
  564. pte_t *ptep, pte;
  565. if (!IS_ALIGNED(addr, PAGE_SIZE))
  566. return -EINVAL;
  567. ptep = vmem_get_alloc_pte(addr, alloc);
  568. if (!ptep)
  569. return -ENOMEM;
  570. __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  571. pte = mk_pte_phys(phys, prot);
  572. set_pte(ptep, pte);
  573. return 0;
  574. }
  575. int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
  576. {
  577. int rc;
  578. mutex_lock(&vmem_mutex);
  579. rc = __vmem_map_4k_page(addr, phys, prot, true);
  580. mutex_unlock(&vmem_mutex);
  581. return rc;
  582. }
  583. void vmem_unmap_4k_page(unsigned long addr)
  584. {
  585. pte_t *ptep;
  586. mutex_lock(&vmem_mutex);
  587. ptep = virt_to_kpte(addr);
  588. __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  589. pte_clear(&init_mm, addr, ptep);
  590. mutex_unlock(&vmem_mutex);
  591. }
  592. void __init vmem_map_init(void)
  593. {
  594. __set_memory_rox(_stext, _etext);
  595. __set_memory_ro(_etext, __end_rodata);
  596. __set_memory_rox(__stext_amode31, __etext_amode31);
  597. /*
  598. * If the BEAR-enhancement facility is not installed the first
  599. * prefix page is used to return to the previous context with
  600. * an LPSWE instruction and therefore must be executable.
  601. */
  602. if (!static_key_enabled(&cpu_has_bear))
  603. set_memory_x(0, 1);
  604. if (debug_pagealloc_enabled())
  605. __set_memory_4k(__va(0), __va(0) + ident_map_size);
  606. pr_info("Write protected kernel read-only data: %luk\n",
  607. (unsigned long)(__end_rodata - _stext) >> 10);
  608. }