pgtable.h 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
  3. * Copyright (C) 2009 Wind River Systems Inc
  4. *
  5. * Based on asm/pgtable-32.h from mips which is:
  6. *
  7. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  8. * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  9. *
  10. * This file is subject to the terms and conditions of the GNU General Public
  11. * License. See the file "COPYING" in the main directory of this archive
  12. * for more details.
  13. */
  14. #ifndef _ASM_NIOS2_PGTABLE_H
  15. #define _ASM_NIOS2_PGTABLE_H
  16. #include <linux/io.h>
  17. #include <linux/bug.h>
  18. #include <asm/page.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. #include <asm/pgtable-bits.h>
  22. #include <asm-generic/pgtable-nopmd.h>
  23. #define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
  24. #define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M - 1)
  25. #define MODULES_VADDR (CONFIG_NIOS2_KERNEL_REGION_BASE - SZ_32M)
  26. #define MODULES_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
  27. struct mm_struct;
  28. /* Helper macro */
  29. #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED | \
  30. ((x) ? _PAGE_EXEC : 0) | \
  31. ((r) ? _PAGE_READ : 0) | \
  32. ((w) ? _PAGE_WRITE : 0))
  33. /*
  34. * These are the macros that generic kernel code needs
  35. * (to populate protection_map[])
  36. */
  37. /* Remove W bit on private pages for COW support */
  38. /* Shared pages can have exact HW mapping */
  39. /* Used all over the kernel */
  40. #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
  41. _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
  42. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
  43. _PAGE_WRITE | _PAGE_ACCESSED)
  44. #define PAGE_COPY MKP(0, 0, 1)
  45. #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
  46. #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
  47. #define USER_PTRS_PER_PGD \
  48. (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
  49. #define PGDIR_SHIFT 22
  50. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  51. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  52. /*
  53. * ZERO_PAGE is a global shared page that is always zero: used
  54. * for zero-mapped memory areas etc..
  55. */
  56. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  57. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  58. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  59. extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
  60. /*
  61. * (pmds are folded into puds so this doesn't get actually called,
  62. * but the define is needed for a generic inline function.)
  63. */
  64. static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
  65. {
  66. *pmdptr = pmdval;
  67. }
  68. static inline int pte_write(pte_t pte) \
  69. { return pte_val(pte) & _PAGE_WRITE; }
  70. static inline int pte_dirty(pte_t pte) \
  71. { return pte_val(pte) & _PAGE_DIRTY; }
  72. static inline int pte_young(pte_t pte) \
  73. { return pte_val(pte) & _PAGE_ACCESSED; }
  74. #define pgprot_noncached pgprot_noncached
  75. static inline pgprot_t pgprot_noncached(pgprot_t _prot)
  76. {
  77. unsigned long prot = pgprot_val(_prot);
  78. prot &= ~_PAGE_CACHED;
  79. return __pgprot(prot);
  80. }
  81. static inline int pte_none(pte_t pte)
  82. {
  83. return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
  84. }
  85. static inline int pte_present(pte_t pte) \
  86. { return pte_val(pte) & _PAGE_PRESENT; }
  87. /*
  88. * The following only work if pte_present() is true.
  89. * Undefined behaviour if not..
  90. */
  91. static inline pte_t pte_wrprotect(pte_t pte)
  92. {
  93. pte_val(pte) &= ~_PAGE_WRITE;
  94. return pte;
  95. }
  96. static inline pte_t pte_mkclean(pte_t pte)
  97. {
  98. pte_val(pte) &= ~_PAGE_DIRTY;
  99. return pte;
  100. }
  101. static inline pte_t pte_mkold(pte_t pte)
  102. {
  103. pte_val(pte) &= ~_PAGE_ACCESSED;
  104. return pte;
  105. }
  106. static inline pte_t pte_mkwrite_novma(pte_t pte)
  107. {
  108. pte_val(pte) |= _PAGE_WRITE;
  109. return pte;
  110. }
  111. static inline pte_t pte_mkdirty(pte_t pte)
  112. {
  113. pte_val(pte) |= _PAGE_DIRTY;
  114. return pte;
  115. }
  116. static inline pte_t pte_mkyoung(pte_t pte)
  117. {
  118. pte_val(pte) |= _PAGE_ACCESSED;
  119. return pte;
  120. }
  121. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  122. {
  123. const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
  124. pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
  125. return pte;
  126. }
  127. static inline int pmd_present(pmd_t pmd)
  128. {
  129. return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
  130. && (pmd_val(pmd) != 0UL);
  131. }
  132. static inline void pmd_clear(pmd_t *pmdp)
  133. {
  134. pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
  135. }
  136. #define pte_pfn(pte) (pte_val(pte) & 0xfffff)
  137. #define pfn_pte(pfn, prot) (__pte(pfn | pgprot_val(prot)))
  138. #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
  139. /*
  140. * Store a linux PTE into the linux page table.
  141. */
  142. static inline void set_pte(pte_t *ptep, pte_t pteval)
  143. {
  144. *ptep = pteval;
  145. }
  146. #define PFN_PTE_SHIFT 0
  147. static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
  148. pte_t *ptep, pte_t pte, unsigned int nr)
  149. {
  150. unsigned long paddr = (unsigned long)page_to_virt(pte_page(pte));
  151. flush_dcache_range(paddr, paddr + nr * PAGE_SIZE);
  152. for (;;) {
  153. set_pte(ptep, pte);
  154. if (--nr == 0)
  155. break;
  156. ptep++;
  157. pte_val(pte) += 1;
  158. }
  159. }
  160. #define set_ptes set_ptes
  161. static inline int pmd_none(pmd_t pmd)
  162. {
  163. return (pmd_val(pmd) ==
  164. (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
  165. }
  166. #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
  167. static inline void pte_clear(struct mm_struct *mm,
  168. unsigned long addr, pte_t *ptep)
  169. {
  170. pte_t null;
  171. pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
  172. set_pte(ptep, null);
  173. }
  174. /*
  175. * Conversion functions: convert a page and protection to a page entry,
  176. * and a page entry and page directory to the page they refer to.
  177. */
  178. #define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
  179. /*
  180. * Conversion functions: convert a page and protection to a page entry,
  181. * and a page entry and page directory to the page they refer to.
  182. */
  183. #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
  184. #define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
  185. #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
  186. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  187. {
  188. return pmd_val(pmd);
  189. }
  190. #define pte_ERROR(e) \
  191. pr_err("%s:%d: bad pte %08lx.\n", \
  192. __FILE__, __LINE__, pte_val(e))
  193. #define pgd_ERROR(e) \
  194. pr_err("%s:%d: bad pgd %08lx.\n", \
  195. __FILE__, __LINE__, pgd_val(e))
  196. /*
  197. * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
  198. * are !pte_none() && !pte_present().
  199. *
  200. * Format of swap PTEs:
  201. *
  202. * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
  203. * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
  204. * E < type -> 0 0 0 0 0 0 <-------------- offset --------------->
  205. *
  206. * E is the exclusive marker that is not stored in swap entries.
  207. *
  208. * Note that the offset field is always non-zero if the swap type is 0, thus
  209. * !pte_none() is always true.
  210. */
  211. #define __swp_type(swp) (((swp).val >> 26) & 0x1f)
  212. #define __swp_offset(swp) ((swp).val & 0xfffff)
  213. #define __swp_entry(type, off) ((swp_entry_t) { (((type) & 0x1f) << 26) \
  214. | ((off) & 0xfffff) })
  215. #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
  216. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  217. static inline int pte_swp_exclusive(pte_t pte)
  218. {
  219. return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
  220. }
  221. static inline pte_t pte_swp_mkexclusive(pte_t pte)
  222. {
  223. pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
  224. return pte;
  225. }
  226. static inline pte_t pte_swp_clear_exclusive(pte_t pte)
  227. {
  228. pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
  229. return pte;
  230. }
  231. extern void __init paging_init(void);
  232. extern void __init mmu_init(void);
  233. void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
  234. unsigned long address, pte_t *ptep, unsigned int nr);
  235. #define update_mmu_cache(vma, addr, ptep) \
  236. update_mmu_cache_range(NULL, vma, addr, ptep, 1)
  237. #endif /* _ASM_NIOS2_PGTABLE_H */