ioremap.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * arch/sh/mm/ioremap.c
  3. *
  4. * (C) Copyright 1995 1996 Linus Torvalds
  5. * (C) Copyright 2005 - 2010 Paul Mundt
  6. *
  7. * Re-map IO memory to kernel address space so that we can access it.
  8. * This is needed for high PCI addresses that aren't mapped in the
  9. * 640k-1MB IO memory area on PC's
  10. *
  11. * This file is subject to the terms and conditions of the GNU General
  12. * Public License. See the file "COPYING" in the main directory of this
  13. * archive for more details.
  14. */
  15. #include <linux/vmalloc.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/mm.h>
  19. #include <linux/pci.h>
  20. #include <linux/io.h>
  21. #include <asm/io_trapped.h>
  22. #include <asm/page.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/addrspace.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/mmu.h>
  28. #include "ioremap.h"
  29. /*
  30. * On 32-bit SH, we traditionally have the whole physical address space mapped
  31. * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
  32. * anything but place the address in the proper segment. This is true for P1
  33. * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
  34. * and newer cores using extended addressing need to map through page tables, so
  35. * the ioremap() implementation becomes a bit more complicated.
  36. */
  37. #ifdef CONFIG_29BIT
  38. static void __iomem *
  39. __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
  40. {
  41. phys_addr_t last_addr = offset + size - 1;
  42. /*
  43. * For P1 and P2 space this is trivial, as everything is already
  44. * mapped. Uncached access for P1 addresses are done through P2.
  45. * In the P3 case or for addresses outside of the 29-bit space,
  46. * mapping must be done by the PMB or by using page tables.
  47. */
  48. if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
  49. u64 flags = pgprot_val(prot);
  50. /*
  51. * Anything using the legacy PTEA space attributes needs
  52. * to be kicked down to page table mappings.
  53. */
  54. if (unlikely(flags & _PAGE_PCC_MASK))
  55. return NULL;
  56. if (unlikely(flags & _PAGE_CACHABLE))
  57. return (void __iomem *)P1SEGADDR(offset);
  58. return (void __iomem *)P2SEGADDR(offset);
  59. }
  60. /* P4 above the store queues are always mapped. */
  61. if (unlikely(offset >= P3_ADDR_MAX))
  62. return (void __iomem *)P4SEGADDR(offset);
  63. return NULL;
  64. }
  65. #else
  66. #define __ioremap_29bit(offset, size, prot) NULL
  67. #endif /* CONFIG_29BIT */
  68. void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
  69. unsigned long prot)
  70. {
  71. void __iomem *mapped;
  72. pgprot_t pgprot = __pgprot(prot);
  73. mapped = __ioremap_trapped(phys_addr, size);
  74. if (mapped)
  75. return mapped;
  76. mapped = __ioremap_29bit(phys_addr, size, pgprot);
  77. if (mapped)
  78. return mapped;
  79. /*
  80. * If we can't yet use the regular approach, go the fixmap route.
  81. */
  82. if (!mem_init_done)
  83. return ioremap_fixed(phys_addr, size, pgprot);
  84. /*
  85. * First try to remap through the PMB.
  86. * PMB entries are all pre-faulted.
  87. */
  88. mapped = pmb_remap_caller(phys_addr, size, pgprot,
  89. __builtin_return_address(0));
  90. if (mapped && !IS_ERR(mapped))
  91. return mapped;
  92. return generic_ioremap_prot(phys_addr, size, pgprot);
  93. }
  94. EXPORT_SYMBOL(ioremap_prot);
  95. /*
  96. * Simple checks for non-translatable mappings.
  97. */
  98. static inline int iomapping_nontranslatable(unsigned long offset)
  99. {
  100. #ifdef CONFIG_29BIT
  101. /*
  102. * In 29-bit mode this includes the fixed P1/P2 areas, as well as
  103. * parts of P3.
  104. */
  105. if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
  106. return 1;
  107. #endif
  108. return 0;
  109. }
  110. void iounmap(volatile void __iomem *addr)
  111. {
  112. unsigned long vaddr = (unsigned long __force)addr;
  113. /*
  114. * Nothing to do if there is no translatable mapping.
  115. */
  116. if (iomapping_nontranslatable(vaddr))
  117. return;
  118. /*
  119. * There's no VMA if it's from an early fixed mapping.
  120. */
  121. if (iounmap_fixed((void __iomem *)addr) == 0)
  122. return;
  123. /*
  124. * If the PMB handled it, there's nothing else to do.
  125. */
  126. if (pmb_unmap((void __iomem *)addr) == 0)
  127. return;
  128. generic_iounmap(addr);
  129. }
  130. EXPORT_SYMBOL(iounmap);