cacheflush.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_SH_CACHEFLUSH_H
  3. #define __ASM_SH_CACHEFLUSH_H
  4. #include <linux/mm.h>
  5. /*
  6. * Cache flushing:
  7. *
  8. * - flush_cache_all() flushes entire cache
  9. * - flush_cache_mm(mm) flushes the specified mm context's cache lines
  10. * - flush_cache_dup mm(mm) handles cache flushing when forking
  11. * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
  12. * - flush_cache_range(vma, start, end) flushes a range of pages
  13. *
  14. * - flush_dcache_folio(folio) flushes(wback&invalidates) a folio for dcache
  15. * - flush_icache_range(start, end) flushes(invalidates) a range for icache
  16. * - flush_icache_pages(vma, pg, nr) flushes(invalidates) pages for icache
  17. * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
  18. */
  19. extern void (*local_flush_cache_all)(void *args);
  20. extern void (*local_flush_cache_mm)(void *args);
  21. extern void (*local_flush_cache_dup_mm)(void *args);
  22. extern void (*local_flush_cache_page)(void *args);
  23. extern void (*local_flush_cache_range)(void *args);
  24. extern void (*local_flush_dcache_folio)(void *args);
  25. extern void (*local_flush_icache_range)(void *args);
  26. extern void (*local_flush_icache_folio)(void *args);
  27. extern void (*local_flush_cache_sigtramp)(void *args);
  28. static inline void cache_noop(void *args) { }
  29. extern void (*__flush_wback_region)(void *start, int size);
  30. extern void (*__flush_purge_region)(void *start, int size);
  31. extern void (*__flush_invalidate_region)(void *start, int size);
  32. extern void flush_cache_all(void);
  33. extern void flush_cache_mm(struct mm_struct *mm);
  34. extern void flush_cache_dup_mm(struct mm_struct *mm);
  35. extern void flush_cache_page(struct vm_area_struct *vma,
  36. unsigned long addr, unsigned long pfn);
  37. extern void flush_cache_range(struct vm_area_struct *vma,
  38. unsigned long start, unsigned long end);
  39. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  40. void flush_dcache_folio(struct folio *folio);
  41. #define flush_dcache_folio flush_dcache_folio
  42. static inline void flush_dcache_page(struct page *page)
  43. {
  44. flush_dcache_folio(page_folio(page));
  45. }
  46. extern void flush_icache_range(unsigned long start, unsigned long end);
  47. #define flush_icache_user_range flush_icache_range
  48. void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
  49. unsigned int nr);
  50. #define flush_icache_pages flush_icache_pages
  51. extern void flush_cache_sigtramp(unsigned long address);
  52. struct flusher_data {
  53. struct vm_area_struct *vma;
  54. unsigned long addr1, addr2;
  55. };
  56. #define ARCH_HAS_FLUSH_ANON_PAGE
  57. extern void __flush_anon_page(struct page *page, unsigned long);
  58. static inline void flush_anon_page(struct vm_area_struct *vma,
  59. struct page *page, unsigned long vmaddr)
  60. {
  61. if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
  62. __flush_anon_page(page, vmaddr);
  63. }
  64. #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
  65. static inline void flush_kernel_vmap_range(void *addr, int size)
  66. {
  67. __flush_wback_region(addr, size);
  68. }
  69. static inline void invalidate_kernel_vmap_range(void *addr, int size)
  70. {
  71. __flush_invalidate_region(addr, size);
  72. }
  73. extern void copy_to_user_page(struct vm_area_struct *vma,
  74. struct page *page, unsigned long vaddr, void *dst, const void *src,
  75. unsigned long len);
  76. extern void copy_from_user_page(struct vm_area_struct *vma,
  77. struct page *page, unsigned long vaddr, void *dst, const void *src,
  78. unsigned long len);
  79. #define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
  80. #define flush_cache_vmap_early(start, end) do { } while (0)
  81. #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
  82. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  83. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  84. void kmap_coherent_init(void);
  85. void *kmap_coherent(struct page *page, unsigned long addr);
  86. void kunmap_coherent(void *kvaddr);
  87. #define PG_dcache_clean PG_arch_1
  88. void cpu_cache_init(void);
  89. void __weak l2_cache_init(void);
  90. void __weak j2_cache_init(void);
  91. void __weak sh2_cache_init(void);
  92. void __weak sh2a_cache_init(void);
  93. void __weak sh3_cache_init(void);
  94. void __weak shx3_cache_init(void);
  95. void __weak sh4_cache_init(void);
  96. void __weak sh7705_cache_init(void);
  97. void __weak sh4__flush_region_init(void);
  98. static inline void *sh_cacheop_vaddr(void *vaddr)
  99. {
  100. if (__in_29bit_mode())
  101. vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
  102. return vaddr;
  103. }
  104. #endif /* __ASM_SH_CACHEFLUSH_H */