tlb.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Based on arch/arm/include/asm/tlb.h
  4. *
  5. * Copyright (C) 2002 Russell King
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #ifndef __ASM_TLB_H
  9. #define __ASM_TLB_H
  10. #include <linux/pagemap.h>
  11. #include <linux/swap.h>
  12. static inline void __tlb_remove_table(void *_table)
  13. {
  14. free_page_and_swap_cache((struct page *)_table);
  15. }
  16. #define tlb_flush tlb_flush
  17. static void tlb_flush(struct mmu_gather *tlb);
  18. #include <asm-generic/tlb.h>
  19. /*
  20. * get the tlbi levels in arm64. Default value is TLBI_TTL_UNKNOWN if more than
  21. * one of cleared_* is set or neither is set - this elides the level hinting to
  22. * the hardware.
  23. */
  24. static inline int tlb_get_level(struct mmu_gather *tlb)
  25. {
  26. /* The TTL field is only valid for the leaf entry. */
  27. if (tlb->freed_tables)
  28. return TLBI_TTL_UNKNOWN;
  29. if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
  30. tlb->cleared_puds ||
  31. tlb->cleared_p4ds))
  32. return 3;
  33. if (tlb->cleared_pmds && !(tlb->cleared_ptes ||
  34. tlb->cleared_puds ||
  35. tlb->cleared_p4ds))
  36. return 2;
  37. if (tlb->cleared_puds && !(tlb->cleared_ptes ||
  38. tlb->cleared_pmds ||
  39. tlb->cleared_p4ds))
  40. return 1;
  41. if (tlb->cleared_p4ds && !(tlb->cleared_ptes ||
  42. tlb->cleared_pmds ||
  43. tlb->cleared_puds))
  44. return 0;
  45. return TLBI_TTL_UNKNOWN;
  46. }
  47. static inline void tlb_flush(struct mmu_gather *tlb)
  48. {
  49. struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
  50. bool last_level = !tlb->freed_tables;
  51. unsigned long stride = tlb_get_unmap_size(tlb);
  52. int tlb_level = tlb_get_level(tlb);
  53. /*
  54. * If we're tearing down the address space then we only care about
  55. * invalidating the walk-cache, since the ASID allocator won't
  56. * reallocate our ASID without invalidating the entire TLB.
  57. */
  58. if (tlb->fullmm) {
  59. if (!last_level)
  60. flush_tlb_mm(tlb->mm);
  61. return;
  62. }
  63. __flush_tlb_range(&vma, tlb->start, tlb->end, stride,
  64. last_level, tlb_level);
  65. }
  66. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  67. unsigned long addr)
  68. {
  69. struct ptdesc *ptdesc = page_ptdesc(pte);
  70. pagetable_pte_dtor(ptdesc);
  71. tlb_remove_ptdesc(tlb, ptdesc);
  72. }
  73. #if CONFIG_PGTABLE_LEVELS > 2
  74. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
  75. unsigned long addr)
  76. {
  77. struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
  78. pagetable_pmd_dtor(ptdesc);
  79. tlb_remove_ptdesc(tlb, ptdesc);
  80. }
  81. #endif
  82. #if CONFIG_PGTABLE_LEVELS > 3
  83. static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
  84. unsigned long addr)
  85. {
  86. struct ptdesc *ptdesc = virt_to_ptdesc(pudp);
  87. if (!pgtable_l4_enabled())
  88. return;
  89. pagetable_pud_dtor(ptdesc);
  90. tlb_remove_ptdesc(tlb, ptdesc);
  91. }
  92. #endif
  93. #endif