kernel-pgtable.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Kernel page table mapping
  4. *
  5. * Copyright (C) 2015 ARM Ltd.
  6. */
  7. #ifndef __ASM_KERNEL_PGTABLE_H
  8. #define __ASM_KERNEL_PGTABLE_H
  9. #include <asm/boot.h>
  10. #include <asm/pgtable-hwdef.h>
  11. #include <asm/sparsemem.h>
  12. /*
  13. * The physical and virtual addresses of the start of the kernel image are
  14. * equal modulo 2 MiB (per the arm64 booting.txt requirements). Hence we can
  15. * use section mapping with 4K (section size = 2M) but not with 16K (section
  16. * size = 32M) or 64K (section size = 512M).
  17. */
  18. #if defined(PMD_SIZE) && PMD_SIZE <= MIN_KIMG_ALIGN
  19. #define SWAPPER_BLOCK_SHIFT PMD_SHIFT
  20. #define SWAPPER_SKIP_LEVEL 1
  21. #else
  22. #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
  23. #define SWAPPER_SKIP_LEVEL 0
  24. #endif
  25. #define SWAPPER_BLOCK_SIZE (UL(1) << SWAPPER_BLOCK_SHIFT)
  26. #define SWAPPER_TABLE_SHIFT (SWAPPER_BLOCK_SHIFT + PAGE_SHIFT - 3)
  27. #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - SWAPPER_SKIP_LEVEL)
  28. #define INIT_IDMAP_PGTABLE_LEVELS (IDMAP_LEVELS - SWAPPER_SKIP_LEVEL)
  29. #define IDMAP_VA_BITS 48
  30. #define IDMAP_LEVELS ARM64_HW_PGTABLE_LEVELS(IDMAP_VA_BITS)
  31. #define IDMAP_ROOT_LEVEL (4 - IDMAP_LEVELS)
  32. /*
  33. * A relocatable kernel may execute from an address that differs from the one at
  34. * which it was linked. In the worst case, its runtime placement may intersect
  35. * with two adjacent PGDIR entries, which means that an additional page table
  36. * may be needed at each subordinate level.
  37. */
  38. #define EXTRA_PAGE __is_defined(CONFIG_RELOCATABLE)
  39. #define SPAN_NR_ENTRIES(vstart, vend, shift) \
  40. ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
  41. #define EARLY_ENTRIES(vstart, vend, shift, add) \
  42. (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
  43. #define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
  44. (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
  45. #define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \
  46. + EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
  47. + EARLY_LEVEL(2, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
  48. + EARLY_LEVEL(1, (lvls), (vstart), (vend), add))/* each entry needs a next level page table */
  49. #define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
  50. + EARLY_SEGMENT_EXTRA_PAGES))
  51. #define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1))
  52. #define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
  53. #define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1)
  54. #define INIT_IDMAP_FDT_SIZE ((INIT_IDMAP_FDT_PAGES + EARLY_IDMAP_EXTRA_FDT_PAGES) * PAGE_SIZE)
  55. /* The number of segments in the kernel image (text, rodata, inittext, initdata, data+bss) */
  56. #define KERNEL_SEGMENT_COUNT 5
  57. #if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
  58. #define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
  59. /*
  60. * The initial ID map consists of the kernel image, mapped as two separate
  61. * segments, and may appear misaligned wrt the swapper block size. This means
  62. * we need 3 additional pages. The DT could straddle a swapper block boundary,
  63. * so it may need 2.
  64. */
  65. #define EARLY_IDMAP_EXTRA_PAGES 3
  66. #define EARLY_IDMAP_EXTRA_FDT_PAGES 2
  67. #else
  68. #define EARLY_SEGMENT_EXTRA_PAGES 0
  69. #define EARLY_IDMAP_EXTRA_PAGES 0
  70. #define EARLY_IDMAP_EXTRA_FDT_PAGES 0
  71. #endif
  72. #endif /* __ASM_KERNEL_PGTABLE_H */