highmem.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/compiler.h>
  3. #include <linux/init.h>
  4. #include <linux/export.h>
  5. #include <linux/highmem.h>
  6. #include <linux/sched.h>
  7. #include <linux/smp.h>
  8. #include <asm/fixmap.h>
  9. #include <asm/tlbflush.h>
  10. static pte_t *kmap_pte;
  11. unsigned long highstart_pfn, highend_pfn;
  12. void *kmap(struct page *page)
  13. {
  14. void *addr;
  15. might_sleep();
  16. if (!PageHighMem(page))
  17. return page_address(page);
  18. addr = kmap_high(page);
  19. flush_tlb_one((unsigned long)addr);
  20. return addr;
  21. }
  22. EXPORT_SYMBOL(kmap);
  23. void kunmap(struct page *page)
  24. {
  25. BUG_ON(in_interrupt());
  26. if (!PageHighMem(page))
  27. return;
  28. kunmap_high(page);
  29. }
  30. EXPORT_SYMBOL(kunmap);
  31. /*
  32. * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  33. * no global lock is needed and because the kmap code must perform a global TLB
  34. * invalidation when the kmap pool wraps.
  35. *
  36. * However when holding an atomic kmap is is not legal to sleep, so atomic
  37. * kmaps are appropriate for short, tight code paths only.
  38. */
  39. void *kmap_atomic(struct page *page)
  40. {
  41. unsigned long vaddr;
  42. int idx, type;
  43. preempt_disable();
  44. pagefault_disable();
  45. if (!PageHighMem(page))
  46. return page_address(page);
  47. type = kmap_atomic_idx_push();
  48. idx = type + KM_TYPE_NR*smp_processor_id();
  49. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  50. #ifdef CONFIG_DEBUG_HIGHMEM
  51. BUG_ON(!pte_none(*(kmap_pte - idx)));
  52. #endif
  53. set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
  54. local_flush_tlb_one((unsigned long)vaddr);
  55. return (void*) vaddr;
  56. }
  57. EXPORT_SYMBOL(kmap_atomic);
  58. void __kunmap_atomic(void *kvaddr)
  59. {
  60. unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
  61. int type __maybe_unused;
  62. if (vaddr < FIXADDR_START) { // FIXME
  63. pagefault_enable();
  64. preempt_enable();
  65. return;
  66. }
  67. type = kmap_atomic_idx();
  68. #ifdef CONFIG_DEBUG_HIGHMEM
  69. {
  70. int idx = type + KM_TYPE_NR * smp_processor_id();
  71. BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
  72. /*
  73. * force other mappings to Oops if they'll try to access
  74. * this pte without first remap it
  75. */
  76. pte_clear(&init_mm, vaddr, kmap_pte-idx);
  77. local_flush_tlb_one(vaddr);
  78. }
  79. #endif
  80. kmap_atomic_idx_pop();
  81. pagefault_enable();
  82. preempt_enable();
  83. }
  84. EXPORT_SYMBOL(__kunmap_atomic);
  85. /*
  86. * This is the same as kmap_atomic() but can map memory that doesn't
  87. * have a struct page associated with it.
  88. */
  89. void *kmap_atomic_pfn(unsigned long pfn)
  90. {
  91. unsigned long vaddr;
  92. int idx, type;
  93. preempt_disable();
  94. pagefault_disable();
  95. type = kmap_atomic_idx_push();
  96. idx = type + KM_TYPE_NR*smp_processor_id();
  97. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  98. set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
  99. flush_tlb_one(vaddr);
  100. return (void*) vaddr;
  101. }
  102. void __init kmap_init(void)
  103. {
  104. unsigned long kmap_vstart;
  105. /* cache the first kmap pte */
  106. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  107. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  108. }