tlb-r3k.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * r2300.c: R2000 and R3000 specific mmu/cache code.
  4. *
  5. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  6. *
  7. * with a lot of changes to make this thing work for R3000s
  8. * Tx39XX R4k style caches added. HK
  9. * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
  10. * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
  11. * Copyright (C) 2002 Ralf Baechle
  12. * Copyright (C) 2002 Maciej W. Rozycki
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/mm.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/tlbmisc.h>
  22. #include <asm/isadep.h>
  23. #include <asm/io.h>
  24. #include <asm/bootinfo.h>
  25. #include <asm/cpu.h>
  26. #undef DEBUG_TLB
  27. extern void build_tlb_refill_handler(void);
  28. /* CP0 hazard avoidance. */
  29. #define BARRIER \
  30. __asm__ __volatile__( \
  31. ".set push\n\t" \
  32. ".set noreorder\n\t" \
  33. "nop\n\t" \
  34. ".set pop\n\t")
  35. int r3k_have_wired_reg; /* Should be in cpu_data? */
  36. /* TLB operations. */
  37. static void local_flush_tlb_from(int entry)
  38. {
  39. unsigned long old_ctx;
  40. old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
  41. write_c0_entrylo0(0);
  42. while (entry < current_cpu_data.tlbsize) {
  43. write_c0_index(entry << 8);
  44. write_c0_entryhi((entry | 0x80000) << 12);
  45. entry++; /* BARRIER */
  46. tlb_write_indexed();
  47. }
  48. write_c0_entryhi(old_ctx);
  49. }
  50. void local_flush_tlb_all(void)
  51. {
  52. unsigned long flags;
  53. #ifdef DEBUG_TLB
  54. printk("[tlball]");
  55. #endif
  56. local_irq_save(flags);
  57. local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8);
  58. local_irq_restore(flags);
  59. }
  60. void local_flush_tlb_mm(struct mm_struct *mm)
  61. {
  62. int cpu = smp_processor_id();
  63. if (cpu_context(cpu, mm) != 0) {
  64. #ifdef DEBUG_TLB
  65. printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
  66. #endif
  67. drop_mmu_context(mm, cpu);
  68. }
  69. }
  70. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  71. unsigned long end)
  72. {
  73. unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
  74. struct mm_struct *mm = vma->vm_mm;
  75. int cpu = smp_processor_id();
  76. if (cpu_context(cpu, mm) != 0) {
  77. unsigned long size, flags;
  78. #ifdef DEBUG_TLB
  79. printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
  80. cpu_context(cpu, mm) & asid_mask, start, end);
  81. #endif
  82. local_irq_save(flags);
  83. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  84. if (size <= current_cpu_data.tlbsize) {
  85. int oldpid = read_c0_entryhi() & asid_mask;
  86. int newpid = cpu_context(cpu, mm) & asid_mask;
  87. start &= PAGE_MASK;
  88. end += PAGE_SIZE - 1;
  89. end &= PAGE_MASK;
  90. while (start < end) {
  91. int idx;
  92. write_c0_entryhi(start | newpid);
  93. start += PAGE_SIZE; /* BARRIER */
  94. tlb_probe();
  95. idx = read_c0_index();
  96. write_c0_entrylo0(0);
  97. write_c0_entryhi(KSEG0);
  98. if (idx < 0) /* BARRIER */
  99. continue;
  100. tlb_write_indexed();
  101. }
  102. write_c0_entryhi(oldpid);
  103. } else {
  104. drop_mmu_context(mm, cpu);
  105. }
  106. local_irq_restore(flags);
  107. }
  108. }
  109. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  110. {
  111. unsigned long size, flags;
  112. #ifdef DEBUG_TLB
  113. printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
  114. #endif
  115. local_irq_save(flags);
  116. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  117. if (size <= current_cpu_data.tlbsize) {
  118. int pid = read_c0_entryhi();
  119. start &= PAGE_MASK;
  120. end += PAGE_SIZE - 1;
  121. end &= PAGE_MASK;
  122. while (start < end) {
  123. int idx;
  124. write_c0_entryhi(start);
  125. start += PAGE_SIZE; /* BARRIER */
  126. tlb_probe();
  127. idx = read_c0_index();
  128. write_c0_entrylo0(0);
  129. write_c0_entryhi(KSEG0);
  130. if (idx < 0) /* BARRIER */
  131. continue;
  132. tlb_write_indexed();
  133. }
  134. write_c0_entryhi(pid);
  135. } else {
  136. local_flush_tlb_all();
  137. }
  138. local_irq_restore(flags);
  139. }
  140. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  141. {
  142. unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
  143. int cpu = smp_processor_id();
  144. if (cpu_context(cpu, vma->vm_mm) != 0) {
  145. unsigned long flags;
  146. int oldpid, newpid, idx;
  147. #ifdef DEBUG_TLB
  148. printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
  149. #endif
  150. newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
  151. page &= PAGE_MASK;
  152. local_irq_save(flags);
  153. oldpid = read_c0_entryhi() & asid_mask;
  154. write_c0_entryhi(page | newpid);
  155. BARRIER;
  156. tlb_probe();
  157. idx = read_c0_index();
  158. write_c0_entrylo0(0);
  159. write_c0_entryhi(KSEG0);
  160. if (idx < 0) /* BARRIER */
  161. goto finish;
  162. tlb_write_indexed();
  163. finish:
  164. write_c0_entryhi(oldpid);
  165. local_irq_restore(flags);
  166. }
  167. }
  168. void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
  169. {
  170. unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
  171. unsigned long flags;
  172. int idx, pid;
  173. /*
  174. * Handle debugger faulting in for debugee.
  175. */
  176. if (current->active_mm != vma->vm_mm)
  177. return;
  178. pid = read_c0_entryhi() & asid_mask;
  179. #ifdef DEBUG_TLB
  180. if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
  181. printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
  182. (cpu_context(cpu, vma->vm_mm)), pid);
  183. }
  184. #endif
  185. local_irq_save(flags);
  186. address &= PAGE_MASK;
  187. write_c0_entryhi(address | pid);
  188. BARRIER;
  189. tlb_probe();
  190. idx = read_c0_index();
  191. write_c0_entrylo0(pte_val(pte));
  192. write_c0_entryhi(address | pid);
  193. if (idx < 0) { /* BARRIER */
  194. tlb_write_random();
  195. } else {
  196. tlb_write_indexed();
  197. }
  198. write_c0_entryhi(pid);
  199. local_irq_restore(flags);
  200. }
  201. void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
  202. unsigned long entryhi, unsigned long pagemask)
  203. {
  204. unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
  205. unsigned long flags;
  206. unsigned long old_ctx;
  207. static unsigned long wired = 0;
  208. if (r3k_have_wired_reg) { /* TX39XX */
  209. unsigned long old_pagemask;
  210. unsigned long w;
  211. #ifdef DEBUG_TLB
  212. printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
  213. entrylo0, entryhi, pagemask);
  214. #endif
  215. local_irq_save(flags);
  216. /* Save old context and create impossible VPN2 value */
  217. old_ctx = read_c0_entryhi() & asid_mask;
  218. old_pagemask = read_c0_pagemask();
  219. w = read_c0_wired();
  220. write_c0_wired(w + 1);
  221. write_c0_index(w << 8);
  222. write_c0_pagemask(pagemask);
  223. write_c0_entryhi(entryhi);
  224. write_c0_entrylo0(entrylo0);
  225. BARRIER;
  226. tlb_write_indexed();
  227. write_c0_entryhi(old_ctx);
  228. write_c0_pagemask(old_pagemask);
  229. local_flush_tlb_all();
  230. local_irq_restore(flags);
  231. } else if (wired < 8) {
  232. #ifdef DEBUG_TLB
  233. printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
  234. entrylo0, entryhi);
  235. #endif
  236. local_irq_save(flags);
  237. old_ctx = read_c0_entryhi() & asid_mask;
  238. write_c0_entrylo0(entrylo0);
  239. write_c0_entryhi(entryhi);
  240. write_c0_index(wired);
  241. wired++; /* BARRIER */
  242. tlb_write_indexed();
  243. write_c0_entryhi(old_ctx);
  244. local_flush_tlb_all();
  245. local_irq_restore(flags);
  246. }
  247. }
  248. void tlb_init(void)
  249. {
  250. switch (current_cpu_type()) {
  251. case CPU_TX3922:
  252. case CPU_TX3927:
  253. r3k_have_wired_reg = 1;
  254. write_c0_wired(0); /* Set to 8 on reset... */
  255. break;
  256. }
  257. local_flush_tlb_from(0);
  258. build_tlb_refill_handler();
  259. }