highmem.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * highmem.c: virtual kernel memory mappings for high memory
  4. *
  5. * Provides kernel-static versions of atomic kmap functions originally
  6. * found as inlines in include/asm-sparc/highmem.h. These became
  7. * needed as kmap_atomic() and kunmap_atomic() started getting
  8. * called from within modules.
  9. * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
  10. *
  11. * But kmap_atomic() and kunmap_atomic() cannot be inlined in
  12. * modules because they are loaded with btfixup-ped functions.
  13. */
  14. /*
  15. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  16. * gives a more generic (and caching) interface. But kmap_atomic can
  17. * be used in IRQ contexts, so in some (very limited) cases we need it.
  18. *
  19. * XXX This is an old text. Actually, it's good to use atomic kmaps,
  20. * provided you remember that they are atomic and not try to sleep
  21. * with a kmap taken, much like a spinlock. Non-atomic kmaps are
  22. * shared by CPUs, and so precious, and establishing them requires IPI.
  23. * Atomic kmaps are lightweight and we may have NCPUS more of them.
  24. */
  25. #include <linux/highmem.h>
  26. #include <linux/export.h>
  27. #include <linux/mm.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/tlbflush.h>
  30. #include <asm/pgalloc.h>
  31. #include <asm/vaddrs.h>
  32. pgprot_t kmap_prot;
  33. static pte_t *kmap_pte;
  34. void __init kmap_init(void)
  35. {
  36. unsigned long address;
  37. pmd_t *dir;
  38. address = __fix_to_virt(FIX_KMAP_BEGIN);
  39. dir = pmd_offset(pgd_offset_k(address), address);
  40. /* cache the first kmap pte */
  41. kmap_pte = pte_offset_kernel(dir, address);
  42. kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
  43. }
  44. void *kmap_atomic(struct page *page)
  45. {
  46. unsigned long vaddr;
  47. long idx, type;
  48. preempt_disable();
  49. pagefault_disable();
  50. if (!PageHighMem(page))
  51. return page_address(page);
  52. type = kmap_atomic_idx_push();
  53. idx = type + KM_TYPE_NR*smp_processor_id();
  54. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  55. /* XXX Fix - Anton */
  56. #if 0
  57. __flush_cache_one(vaddr);
  58. #else
  59. flush_cache_all();
  60. #endif
  61. #ifdef CONFIG_DEBUG_HIGHMEM
  62. BUG_ON(!pte_none(*(kmap_pte-idx)));
  63. #endif
  64. set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
  65. /* XXX Fix - Anton */
  66. #if 0
  67. __flush_tlb_one(vaddr);
  68. #else
  69. flush_tlb_all();
  70. #endif
  71. return (void*) vaddr;
  72. }
  73. EXPORT_SYMBOL(kmap_atomic);
  74. void __kunmap_atomic(void *kvaddr)
  75. {
  76. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  77. int type;
  78. if (vaddr < FIXADDR_START) { // FIXME
  79. pagefault_enable();
  80. preempt_enable();
  81. return;
  82. }
  83. type = kmap_atomic_idx();
  84. #ifdef CONFIG_DEBUG_HIGHMEM
  85. {
  86. unsigned long idx;
  87. idx = type + KM_TYPE_NR * smp_processor_id();
  88. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
  89. /* XXX Fix - Anton */
  90. #if 0
  91. __flush_cache_one(vaddr);
  92. #else
  93. flush_cache_all();
  94. #endif
  95. /*
  96. * force other mappings to Oops if they'll try to access
  97. * this pte without first remap it
  98. */
  99. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  100. /* XXX Fix - Anton */
  101. #if 0
  102. __flush_tlb_one(vaddr);
  103. #else
  104. flush_tlb_all();
  105. #endif
  106. }
  107. #endif
  108. kmap_atomic_idx_pop();
  109. pagefault_enable();
  110. preempt_enable();
  111. }
  112. EXPORT_SYMBOL(__kunmap_atomic);