kasan_init.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * Xtensa KASAN shadow map initialization
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2017 Cadence Design Systems Inc.
  9. */
  10. #include <linux/memblock.h>
  11. #include <linux/init_task.h>
  12. #include <linux/kasan.h>
  13. #include <linux/kernel.h>
  14. #include <asm/initialize_mmu.h>
  15. #include <asm/tlbflush.h>
  16. void __init kasan_early_init(void)
  17. {
  18. unsigned long vaddr = KASAN_SHADOW_START;
  19. pmd_t *pmd = pmd_off_k(vaddr);
  20. int i;
  21. for (i = 0; i < PTRS_PER_PTE; ++i)
  22. set_pte(kasan_early_shadow_pte + i,
  23. mk_pte(virt_to_page(kasan_early_shadow_page),
  24. PAGE_KERNEL));
  25. for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
  26. BUG_ON(!pmd_none(*pmd));
  27. set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
  28. }
  29. }
  30. static void __init populate(void *start, void *end)
  31. {
  32. unsigned long n_pages = (end - start) / PAGE_SIZE;
  33. unsigned long n_pmds = n_pages / PTRS_PER_PTE;
  34. unsigned long i, j;
  35. unsigned long vaddr = (unsigned long)start;
  36. pmd_t *pmd = pmd_off_k(vaddr);
  37. pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
  38. if (!pte)
  39. panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  40. __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
  41. pr_debug("%s: %p - %p\n", __func__, start, end);
  42. for (i = j = 0; i < n_pmds; ++i) {
  43. int k;
  44. for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
  45. phys_addr_t phys =
  46. memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
  47. 0,
  48. MEMBLOCK_ALLOC_ANYWHERE);
  49. if (!phys)
  50. panic("Failed to allocate page table page\n");
  51. set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
  52. }
  53. }
  54. for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
  55. set_pmd(pmd + i, __pmd((unsigned long)pte));
  56. local_flush_tlb_all();
  57. memset(start, 0, end - start);
  58. }
  59. void __init kasan_init(void)
  60. {
  61. int i;
  62. BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
  63. (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
  64. BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
  65. /*
  66. * Replace shadow map pages that cover addresses from VMALLOC area
  67. * start to the end of KSEG with clean writable pages.
  68. */
  69. populate(kasan_mem_to_shadow((void *)VMALLOC_START),
  70. kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
  71. /*
  72. * Write protect kasan_early_shadow_page and zero-initialize it again.
  73. */
  74. for (i = 0; i < PTRS_PER_PTE; ++i)
  75. set_pte(kasan_early_shadow_pte + i,
  76. mk_pte(virt_to_page(kasan_early_shadow_page),
  77. PAGE_KERNEL_RO));
  78. local_flush_tlb_all();
  79. memset(kasan_early_shadow_page, 0, PAGE_SIZE);
  80. /* At this point kasan is fully initialized. Enable error messages. */
  81. current->kasan_depth = 0;
  82. pr_info("KernelAddressSanitizer initialized\n");
  83. }