pacache.S 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /*
  2. * PARISC TLB and cache flushing support
  3. * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
  4. * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
  5. * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2, or (at your option)
  10. * any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. */
  21. /*
  22. * NOTE: fdc,fic, and pdc instructions that use base register modification
  23. * should only use index and base registers that are not shadowed,
  24. * so that the fast path emulation in the non access miss handler
  25. * can be used.
  26. */
  27. #ifdef CONFIG_64BIT
  28. .level 2.0w
  29. #else
  30. .level 2.0
  31. #endif
  32. #include <asm/psw.h>
  33. #include <asm/assembly.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/cache.h>
  36. #include <asm/ldcw.h>
  37. #include <linux/linkage.h>
  38. #include <linux/init.h>
  39. .section .text.hot
  40. .align 16
  41. ENTRY_CFI(flush_tlb_all_local)
  42. /*
  43. * The pitlbe and pdtlbe instructions should only be used to
  44. * flush the entire tlb. Also, there needs to be no intervening
  45. * tlb operations, e.g. tlb misses, so the operation needs
  46. * to happen in real mode with all interruptions disabled.
  47. */
  48. /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
  49. rsm PSW_SM_I, %r19 /* save I-bit state */
  50. load32 PA(1f), %r1
  51. nop
  52. nop
  53. nop
  54. nop
  55. nop
  56. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  57. mtctl %r0, %cr17 /* Clear IIASQ tail */
  58. mtctl %r0, %cr17 /* Clear IIASQ head */
  59. mtctl %r1, %cr18 /* IIAOQ head */
  60. ldo 4(%r1), %r1
  61. mtctl %r1, %cr18 /* IIAOQ tail */
  62. load32 REAL_MODE_PSW, %r1
  63. mtctl %r1, %ipsw
  64. rfi
  65. nop
  66. 1: load32 PA(cache_info), %r1
  67. /* Flush Instruction Tlb */
  68. LDREG ITLB_SID_BASE(%r1), %r20
  69. LDREG ITLB_SID_STRIDE(%r1), %r21
  70. LDREG ITLB_SID_COUNT(%r1), %r22
  71. LDREG ITLB_OFF_BASE(%r1), %arg0
  72. LDREG ITLB_OFF_STRIDE(%r1), %arg1
  73. LDREG ITLB_OFF_COUNT(%r1), %arg2
  74. LDREG ITLB_LOOP(%r1), %arg3
  75. addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
  76. movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
  77. copy %arg0, %r28 /* Init base addr */
  78. fitmanyloop: /* Loop if LOOP >= 2 */
  79. mtsp %r20, %sr1
  80. add %r21, %r20, %r20 /* increment space */
  81. copy %arg2, %r29 /* Init middle loop count */
  82. fitmanymiddle: /* Loop if LOOP >= 2 */
  83. addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
  84. pitlbe %r0(%sr1, %r28)
  85. pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
  86. addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
  87. copy %arg3, %r31 /* Re-init inner loop count */
  88. movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
  89. addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
  90. fitoneloop: /* Loop if LOOP = 1 */
  91. mtsp %r20, %sr1
  92. copy %arg0, %r28 /* init base addr */
  93. copy %arg2, %r29 /* init middle loop count */
  94. fitonemiddle: /* Loop if LOOP = 1 */
  95. addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
  96. pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
  97. addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
  98. add %r21, %r20, %r20 /* increment space */
  99. fitdone:
  100. /* Flush Data Tlb */
  101. LDREG DTLB_SID_BASE(%r1), %r20
  102. LDREG DTLB_SID_STRIDE(%r1), %r21
  103. LDREG DTLB_SID_COUNT(%r1), %r22
  104. LDREG DTLB_OFF_BASE(%r1), %arg0
  105. LDREG DTLB_OFF_STRIDE(%r1), %arg1
  106. LDREG DTLB_OFF_COUNT(%r1), %arg2
  107. LDREG DTLB_LOOP(%r1), %arg3
  108. addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
  109. movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
  110. copy %arg0, %r28 /* Init base addr */
  111. fdtmanyloop: /* Loop if LOOP >= 2 */
  112. mtsp %r20, %sr1
  113. add %r21, %r20, %r20 /* increment space */
  114. copy %arg2, %r29 /* Init middle loop count */
  115. fdtmanymiddle: /* Loop if LOOP >= 2 */
  116. addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
  117. pdtlbe %r0(%sr1, %r28)
  118. pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
  119. addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
  120. copy %arg3, %r31 /* Re-init inner loop count */
  121. movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
  122. addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
  123. fdtoneloop: /* Loop if LOOP = 1 */
  124. mtsp %r20, %sr1
  125. copy %arg0, %r28 /* init base addr */
  126. copy %arg2, %r29 /* init middle loop count */
  127. fdtonemiddle: /* Loop if LOOP = 1 */
  128. addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
  129. pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
  130. addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
  131. add %r21, %r20, %r20 /* increment space */
  132. fdtdone:
  133. /*
  134. * Switch back to virtual mode
  135. */
  136. /* pcxt_ssm_bug */
  137. rsm PSW_SM_I, %r0
  138. load32 2f, %r1
  139. nop
  140. nop
  141. nop
  142. nop
  143. nop
  144. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  145. mtctl %r0, %cr17 /* Clear IIASQ tail */
  146. mtctl %r0, %cr17 /* Clear IIASQ head */
  147. mtctl %r1, %cr18 /* IIAOQ head */
  148. ldo 4(%r1), %r1
  149. mtctl %r1, %cr18 /* IIAOQ tail */
  150. load32 KERNEL_PSW, %r1
  151. or %r1, %r19, %r1 /* I-bit to state on entry */
  152. mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
  153. rfi
  154. nop
  155. 2: bv %r0(%r2)
  156. nop
  157. ENDPROC_CFI(flush_tlb_all_local)
  158. .import cache_info,data
  159. ENTRY_CFI(flush_instruction_cache_local)
  160. load32 cache_info, %r1
  161. /* Flush Instruction Cache */
  162. LDREG ICACHE_BASE(%r1), %arg0
  163. LDREG ICACHE_STRIDE(%r1), %arg1
  164. LDREG ICACHE_COUNT(%r1), %arg2
  165. LDREG ICACHE_LOOP(%r1), %arg3
  166. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  167. mtsp %r0, %sr1
  168. addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
  169. movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
  170. fimanyloop: /* Loop if LOOP >= 2 */
  171. addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
  172. fice %r0(%sr1, %arg0)
  173. fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
  174. movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
  175. addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
  176. fioneloop: /* Loop if LOOP = 1 */
  177. /* Some implementations may flush with a single fice instruction */
  178. cmpib,COND(>>=),n 15, %arg2, fioneloop2
  179. fioneloop1:
  180. fice,m %arg1(%sr1, %arg0)
  181. fice,m %arg1(%sr1, %arg0)
  182. fice,m %arg1(%sr1, %arg0)
  183. fice,m %arg1(%sr1, %arg0)
  184. fice,m %arg1(%sr1, %arg0)
  185. fice,m %arg1(%sr1, %arg0)
  186. fice,m %arg1(%sr1, %arg0)
  187. fice,m %arg1(%sr1, %arg0)
  188. fice,m %arg1(%sr1, %arg0)
  189. fice,m %arg1(%sr1, %arg0)
  190. fice,m %arg1(%sr1, %arg0)
  191. fice,m %arg1(%sr1, %arg0)
  192. fice,m %arg1(%sr1, %arg0)
  193. fice,m %arg1(%sr1, %arg0)
  194. fice,m %arg1(%sr1, %arg0)
  195. addib,COND(>) -16, %arg2, fioneloop1
  196. fice,m %arg1(%sr1, %arg0)
  197. /* Check if done */
  198. cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
  199. fioneloop2:
  200. addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
  201. fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
  202. fisync:
  203. sync
  204. mtsm %r22 /* restore I-bit */
  205. bv %r0(%r2)
  206. nop
  207. ENDPROC_CFI(flush_instruction_cache_local)
  208. .import cache_info, data
  209. ENTRY_CFI(flush_data_cache_local)
  210. load32 cache_info, %r1
  211. /* Flush Data Cache */
  212. LDREG DCACHE_BASE(%r1), %arg0
  213. LDREG DCACHE_STRIDE(%r1), %arg1
  214. LDREG DCACHE_COUNT(%r1), %arg2
  215. LDREG DCACHE_LOOP(%r1), %arg3
  216. rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
  217. mtsp %r0, %sr1
  218. addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
  219. movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
  220. fdmanyloop: /* Loop if LOOP >= 2 */
  221. addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
  222. fdce %r0(%sr1, %arg0)
  223. fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
  224. movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
  225. addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
  226. fdoneloop: /* Loop if LOOP = 1 */
  227. /* Some implementations may flush with a single fdce instruction */
  228. cmpib,COND(>>=),n 15, %arg2, fdoneloop2
  229. fdoneloop1:
  230. fdce,m %arg1(%sr1, %arg0)
  231. fdce,m %arg1(%sr1, %arg0)
  232. fdce,m %arg1(%sr1, %arg0)
  233. fdce,m %arg1(%sr1, %arg0)
  234. fdce,m %arg1(%sr1, %arg0)
  235. fdce,m %arg1(%sr1, %arg0)
  236. fdce,m %arg1(%sr1, %arg0)
  237. fdce,m %arg1(%sr1, %arg0)
  238. fdce,m %arg1(%sr1, %arg0)
  239. fdce,m %arg1(%sr1, %arg0)
  240. fdce,m %arg1(%sr1, %arg0)
  241. fdce,m %arg1(%sr1, %arg0)
  242. fdce,m %arg1(%sr1, %arg0)
  243. fdce,m %arg1(%sr1, %arg0)
  244. fdce,m %arg1(%sr1, %arg0)
  245. addib,COND(>) -16, %arg2, fdoneloop1
  246. fdce,m %arg1(%sr1, %arg0)
  247. /* Check if done */
  248. cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
  249. fdoneloop2:
  250. addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
  251. fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
  252. fdsync:
  253. syncdma
  254. sync
  255. mtsm %r22 /* restore I-bit */
  256. bv %r0(%r2)
  257. nop
  258. ENDPROC_CFI(flush_data_cache_local)
  259. /* Macros to serialize TLB purge operations on SMP. */
  260. .macro tlb_lock la,flags,tmp
  261. #ifdef CONFIG_SMP
  262. #if __PA_LDCW_ALIGNMENT > 4
  263. load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
  264. depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
  265. #else
  266. load32 pa_tlb_lock, \la
  267. #endif
  268. rsm PSW_SM_I,\flags
  269. 1: LDCW 0(\la),\tmp
  270. cmpib,<>,n 0,\tmp,3f
  271. 2: ldw 0(\la),\tmp
  272. cmpb,<> %r0,\tmp,1b
  273. nop
  274. b,n 2b
  275. 3:
  276. #endif
  277. .endm
  278. .macro tlb_unlock la,flags,tmp
  279. #ifdef CONFIG_SMP
  280. ldi 1,\tmp
  281. sync
  282. stw \tmp,0(\la)
  283. mtsm \flags
  284. #endif
  285. .endm
  286. /* Clear page using kernel mapping. */
  287. ENTRY_CFI(clear_page_asm)
  288. #ifdef CONFIG_64BIT
  289. /* Unroll the loop. */
  290. ldi (PAGE_SIZE / 128), %r1
  291. 1:
  292. std %r0, 0(%r26)
  293. std %r0, 8(%r26)
  294. std %r0, 16(%r26)
  295. std %r0, 24(%r26)
  296. std %r0, 32(%r26)
  297. std %r0, 40(%r26)
  298. std %r0, 48(%r26)
  299. std %r0, 56(%r26)
  300. std %r0, 64(%r26)
  301. std %r0, 72(%r26)
  302. std %r0, 80(%r26)
  303. std %r0, 88(%r26)
  304. std %r0, 96(%r26)
  305. std %r0, 104(%r26)
  306. std %r0, 112(%r26)
  307. std %r0, 120(%r26)
  308. /* Note reverse branch hint for addib is taken. */
  309. addib,COND(>),n -1, %r1, 1b
  310. ldo 128(%r26), %r26
  311. #else
  312. /*
  313. * Note that until (if) we start saving the full 64-bit register
  314. * values on interrupt, we can't use std on a 32 bit kernel.
  315. */
  316. ldi (PAGE_SIZE / 64), %r1
  317. 1:
  318. stw %r0, 0(%r26)
  319. stw %r0, 4(%r26)
  320. stw %r0, 8(%r26)
  321. stw %r0, 12(%r26)
  322. stw %r0, 16(%r26)
  323. stw %r0, 20(%r26)
  324. stw %r0, 24(%r26)
  325. stw %r0, 28(%r26)
  326. stw %r0, 32(%r26)
  327. stw %r0, 36(%r26)
  328. stw %r0, 40(%r26)
  329. stw %r0, 44(%r26)
  330. stw %r0, 48(%r26)
  331. stw %r0, 52(%r26)
  332. stw %r0, 56(%r26)
  333. stw %r0, 60(%r26)
  334. addib,COND(>),n -1, %r1, 1b
  335. ldo 64(%r26), %r26
  336. #endif
  337. bv %r0(%r2)
  338. nop
  339. ENDPROC_CFI(clear_page_asm)
  340. /* Copy page using kernel mapping. */
  341. ENTRY_CFI(copy_page_asm)
  342. #ifdef CONFIG_64BIT
  343. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  344. * Unroll the loop by hand and arrange insn appropriately.
  345. * Prefetch doesn't improve performance on rp3440.
  346. * GCC probably can do this just as well...
  347. */
  348. ldi (PAGE_SIZE / 128), %r1
  349. 1: ldd 0(%r25), %r19
  350. ldd 8(%r25), %r20
  351. ldd 16(%r25), %r21
  352. ldd 24(%r25), %r22
  353. std %r19, 0(%r26)
  354. std %r20, 8(%r26)
  355. ldd 32(%r25), %r19
  356. ldd 40(%r25), %r20
  357. std %r21, 16(%r26)
  358. std %r22, 24(%r26)
  359. ldd 48(%r25), %r21
  360. ldd 56(%r25), %r22
  361. std %r19, 32(%r26)
  362. std %r20, 40(%r26)
  363. ldd 64(%r25), %r19
  364. ldd 72(%r25), %r20
  365. std %r21, 48(%r26)
  366. std %r22, 56(%r26)
  367. ldd 80(%r25), %r21
  368. ldd 88(%r25), %r22
  369. std %r19, 64(%r26)
  370. std %r20, 72(%r26)
  371. ldd 96(%r25), %r19
  372. ldd 104(%r25), %r20
  373. std %r21, 80(%r26)
  374. std %r22, 88(%r26)
  375. ldd 112(%r25), %r21
  376. ldd 120(%r25), %r22
  377. ldo 128(%r25), %r25
  378. std %r19, 96(%r26)
  379. std %r20, 104(%r26)
  380. std %r21, 112(%r26)
  381. std %r22, 120(%r26)
  382. /* Note reverse branch hint for addib is taken. */
  383. addib,COND(>),n -1, %r1, 1b
  384. ldo 128(%r26), %r26
  385. #else
  386. /*
  387. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  388. * bundles (very restricted rules for bundling).
  389. * Note that until (if) we start saving
  390. * the full 64 bit register values on interrupt, we can't
  391. * use ldd/std on a 32 bit kernel.
  392. */
  393. ldw 0(%r25), %r19
  394. ldi (PAGE_SIZE / 64), %r1
  395. 1:
  396. ldw 4(%r25), %r20
  397. ldw 8(%r25), %r21
  398. ldw 12(%r25), %r22
  399. stw %r19, 0(%r26)
  400. stw %r20, 4(%r26)
  401. stw %r21, 8(%r26)
  402. stw %r22, 12(%r26)
  403. ldw 16(%r25), %r19
  404. ldw 20(%r25), %r20
  405. ldw 24(%r25), %r21
  406. ldw 28(%r25), %r22
  407. stw %r19, 16(%r26)
  408. stw %r20, 20(%r26)
  409. stw %r21, 24(%r26)
  410. stw %r22, 28(%r26)
  411. ldw 32(%r25), %r19
  412. ldw 36(%r25), %r20
  413. ldw 40(%r25), %r21
  414. ldw 44(%r25), %r22
  415. stw %r19, 32(%r26)
  416. stw %r20, 36(%r26)
  417. stw %r21, 40(%r26)
  418. stw %r22, 44(%r26)
  419. ldw 48(%r25), %r19
  420. ldw 52(%r25), %r20
  421. ldw 56(%r25), %r21
  422. ldw 60(%r25), %r22
  423. stw %r19, 48(%r26)
  424. stw %r20, 52(%r26)
  425. ldo 64(%r25), %r25
  426. stw %r21, 56(%r26)
  427. stw %r22, 60(%r26)
  428. ldo 64(%r26), %r26
  429. addib,COND(>),n -1, %r1, 1b
  430. ldw 0(%r25), %r19
  431. #endif
  432. bv %r0(%r2)
  433. nop
  434. ENDPROC_CFI(copy_page_asm)
  435. /*
  436. * NOTE: Code in clear_user_page has a hard coded dependency on the
  437. * maximum alias boundary being 4 Mb. We've been assured by the
  438. * parisc chip designers that there will not ever be a parisc
  439. * chip with a larger alias boundary (Never say never :-) ).
  440. *
  441. * Subtle: the dtlb miss handlers support the temp alias region by
  442. * "knowing" that if a dtlb miss happens within the temp alias
  443. * region it must have occurred while in clear_user_page. Since
  444. * this routine makes use of processor local translations, we
  445. * don't want to insert them into the kernel page table. Instead,
  446. * we load up some general registers (they need to be registers
  447. * which aren't shadowed) with the physical page numbers (preshifted
  448. * for tlb insertion) needed to insert the translations. When we
  449. * miss on the translation, the dtlb miss handler inserts the
  450. * translation into the tlb using these values:
  451. *
  452. * %r26 physical page (shifted for tlb insert) of "to" translation
  453. * %r23 physical page (shifted for tlb insert) of "from" translation
  454. */
  455. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  456. #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
  457. .macro convert_phys_for_tlb_insert20 phys
  458. extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
  459. #if _PAGE_SIZE_ENCODING_DEFAULT
  460. depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
  461. #endif
  462. .endm
  463. /*
  464. * copy_user_page_asm() performs a page copy using mappings
  465. * equivalent to the user page mappings. It can be used to
  466. * implement copy_user_page() but unfortunately both the `from'
  467. * and `to' pages need to be flushed through mappings equivalent
  468. * to the user mappings after the copy because the kernel accesses
  469. * the `from' page through the kmap kernel mapping and the `to'
  470. * page needs to be flushed since code can be copied. As a
  471. * result, this implementation is less efficient than the simpler
  472. * copy using the kernel mapping. It only needs the `from' page
  473. * to flushed via the user mapping. The kunmap routines handle
  474. * the flushes needed for the kernel mapping.
  475. *
  476. * I'm still keeping this around because it may be possible to
  477. * use it if more information is passed into copy_user_page().
  478. * Have to do some measurements to see if it is worthwhile to
  479. * lobby for such a change.
  480. *
  481. */
  482. ENTRY_CFI(copy_user_page_asm)
  483. /* Convert virtual `to' and `from' addresses to physical addresses.
  484. Move `from' physical address to non shadowed register. */
  485. ldil L%(__PAGE_OFFSET), %r1
  486. sub %r26, %r1, %r26
  487. sub %r25, %r1, %r23
  488. ldil L%(TMPALIAS_MAP_START), %r28
  489. #ifdef CONFIG_64BIT
  490. #if (TMPALIAS_MAP_START >= 0x80000000)
  491. depdi 0, 31,32, %r28 /* clear any sign extension */
  492. #endif
  493. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  494. convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
  495. depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
  496. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  497. copy %r28, %r29
  498. depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
  499. #else
  500. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  501. extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
  502. depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
  503. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  504. copy %r28, %r29
  505. depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
  506. #endif
  507. /* Purge any old translations */
  508. #ifdef CONFIG_PA20
  509. pdtlb,l %r0(%r28)
  510. pdtlb,l %r0(%r29)
  511. #else
  512. tlb_lock %r20,%r21,%r22
  513. pdtlb %r0(%r28)
  514. pdtlb %r0(%r29)
  515. tlb_unlock %r20,%r21,%r22
  516. #endif
  517. #ifdef CONFIG_64BIT
  518. /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
  519. * Unroll the loop by hand and arrange insn appropriately.
  520. * GCC probably can do this just as well.
  521. */
  522. ldd 0(%r29), %r19
  523. ldi (PAGE_SIZE / 128), %r1
  524. 1: ldd 8(%r29), %r20
  525. ldd 16(%r29), %r21
  526. ldd 24(%r29), %r22
  527. std %r19, 0(%r28)
  528. std %r20, 8(%r28)
  529. ldd 32(%r29), %r19
  530. ldd 40(%r29), %r20
  531. std %r21, 16(%r28)
  532. std %r22, 24(%r28)
  533. ldd 48(%r29), %r21
  534. ldd 56(%r29), %r22
  535. std %r19, 32(%r28)
  536. std %r20, 40(%r28)
  537. ldd 64(%r29), %r19
  538. ldd 72(%r29), %r20
  539. std %r21, 48(%r28)
  540. std %r22, 56(%r28)
  541. ldd 80(%r29), %r21
  542. ldd 88(%r29), %r22
  543. std %r19, 64(%r28)
  544. std %r20, 72(%r28)
  545. ldd 96(%r29), %r19
  546. ldd 104(%r29), %r20
  547. std %r21, 80(%r28)
  548. std %r22, 88(%r28)
  549. ldd 112(%r29), %r21
  550. ldd 120(%r29), %r22
  551. std %r19, 96(%r28)
  552. std %r20, 104(%r28)
  553. ldo 128(%r29), %r29
  554. std %r21, 112(%r28)
  555. std %r22, 120(%r28)
  556. ldo 128(%r28), %r28
  557. /* conditional branches nullify on forward taken branch, and on
  558. * non-taken backward branch. Note that .+4 is a backwards branch.
  559. * The ldd should only get executed if the branch is taken.
  560. */
  561. addib,COND(>),n -1, %r1, 1b /* bundle 10 */
  562. ldd 0(%r29), %r19 /* start next loads */
  563. #else
  564. ldi (PAGE_SIZE / 64), %r1
  565. /*
  566. * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
  567. * bundles (very restricted rules for bundling). It probably
  568. * does OK on PCXU and better, but we could do better with
  569. * ldd/std instructions. Note that until (if) we start saving
  570. * the full 64 bit register values on interrupt, we can't
  571. * use ldd/std on a 32 bit kernel.
  572. */
  573. 1: ldw 0(%r29), %r19
  574. ldw 4(%r29), %r20
  575. ldw 8(%r29), %r21
  576. ldw 12(%r29), %r22
  577. stw %r19, 0(%r28)
  578. stw %r20, 4(%r28)
  579. stw %r21, 8(%r28)
  580. stw %r22, 12(%r28)
  581. ldw 16(%r29), %r19
  582. ldw 20(%r29), %r20
  583. ldw 24(%r29), %r21
  584. ldw 28(%r29), %r22
  585. stw %r19, 16(%r28)
  586. stw %r20, 20(%r28)
  587. stw %r21, 24(%r28)
  588. stw %r22, 28(%r28)
  589. ldw 32(%r29), %r19
  590. ldw 36(%r29), %r20
  591. ldw 40(%r29), %r21
  592. ldw 44(%r29), %r22
  593. stw %r19, 32(%r28)
  594. stw %r20, 36(%r28)
  595. stw %r21, 40(%r28)
  596. stw %r22, 44(%r28)
  597. ldw 48(%r29), %r19
  598. ldw 52(%r29), %r20
  599. ldw 56(%r29), %r21
  600. ldw 60(%r29), %r22
  601. stw %r19, 48(%r28)
  602. stw %r20, 52(%r28)
  603. stw %r21, 56(%r28)
  604. stw %r22, 60(%r28)
  605. ldo 64(%r28), %r28
  606. addib,COND(>) -1, %r1,1b
  607. ldo 64(%r29), %r29
  608. #endif
  609. bv %r0(%r2)
  610. nop
  611. ENDPROC_CFI(copy_user_page_asm)
  612. ENTRY_CFI(clear_user_page_asm)
  613. tophys_r1 %r26
  614. ldil L%(TMPALIAS_MAP_START), %r28
  615. #ifdef CONFIG_64BIT
  616. #if (TMPALIAS_MAP_START >= 0x80000000)
  617. depdi 0, 31,32, %r28 /* clear any sign extension */
  618. #endif
  619. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  620. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  621. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  622. #else
  623. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  624. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  625. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  626. #endif
  627. /* Purge any old translation */
  628. #ifdef CONFIG_PA20
  629. pdtlb,l %r0(%r28)
  630. #else
  631. tlb_lock %r20,%r21,%r22
  632. pdtlb %r0(%r28)
  633. tlb_unlock %r20,%r21,%r22
  634. #endif
  635. #ifdef CONFIG_64BIT
  636. ldi (PAGE_SIZE / 128), %r1
  637. /* PREFETCH (Write) has not (yet) been proven to help here */
  638. /* #define PREFETCHW_OP ldd 256(%0), %r0 */
  639. 1: std %r0, 0(%r28)
  640. std %r0, 8(%r28)
  641. std %r0, 16(%r28)
  642. std %r0, 24(%r28)
  643. std %r0, 32(%r28)
  644. std %r0, 40(%r28)
  645. std %r0, 48(%r28)
  646. std %r0, 56(%r28)
  647. std %r0, 64(%r28)
  648. std %r0, 72(%r28)
  649. std %r0, 80(%r28)
  650. std %r0, 88(%r28)
  651. std %r0, 96(%r28)
  652. std %r0, 104(%r28)
  653. std %r0, 112(%r28)
  654. std %r0, 120(%r28)
  655. addib,COND(>) -1, %r1, 1b
  656. ldo 128(%r28), %r28
  657. #else /* ! CONFIG_64BIT */
  658. ldi (PAGE_SIZE / 64), %r1
  659. 1: stw %r0, 0(%r28)
  660. stw %r0, 4(%r28)
  661. stw %r0, 8(%r28)
  662. stw %r0, 12(%r28)
  663. stw %r0, 16(%r28)
  664. stw %r0, 20(%r28)
  665. stw %r0, 24(%r28)
  666. stw %r0, 28(%r28)
  667. stw %r0, 32(%r28)
  668. stw %r0, 36(%r28)
  669. stw %r0, 40(%r28)
  670. stw %r0, 44(%r28)
  671. stw %r0, 48(%r28)
  672. stw %r0, 52(%r28)
  673. stw %r0, 56(%r28)
  674. stw %r0, 60(%r28)
  675. addib,COND(>) -1, %r1, 1b
  676. ldo 64(%r28), %r28
  677. #endif /* CONFIG_64BIT */
  678. bv %r0(%r2)
  679. nop
  680. ENDPROC_CFI(clear_user_page_asm)
  681. ENTRY_CFI(flush_dcache_page_asm)
  682. ldil L%(TMPALIAS_MAP_START), %r28
  683. #ifdef CONFIG_64BIT
  684. #if (TMPALIAS_MAP_START >= 0x80000000)
  685. depdi 0, 31,32, %r28 /* clear any sign extension */
  686. #endif
  687. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  688. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  689. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  690. #else
  691. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  692. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  693. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  694. #endif
  695. /* Purge any old translation */
  696. #ifdef CONFIG_PA20
  697. pdtlb,l %r0(%r28)
  698. #else
  699. tlb_lock %r20,%r21,%r22
  700. pdtlb %r0(%r28)
  701. tlb_unlock %r20,%r21,%r22
  702. #endif
  703. ldil L%dcache_stride, %r1
  704. ldw R%dcache_stride(%r1), r31
  705. #ifdef CONFIG_64BIT
  706. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  707. #else
  708. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  709. #endif
  710. add %r28, %r25, %r25
  711. sub %r25, r31, %r25
  712. 1: fdc,m r31(%r28)
  713. fdc,m r31(%r28)
  714. fdc,m r31(%r28)
  715. fdc,m r31(%r28)
  716. fdc,m r31(%r28)
  717. fdc,m r31(%r28)
  718. fdc,m r31(%r28)
  719. fdc,m r31(%r28)
  720. fdc,m r31(%r28)
  721. fdc,m r31(%r28)
  722. fdc,m r31(%r28)
  723. fdc,m r31(%r28)
  724. fdc,m r31(%r28)
  725. fdc,m r31(%r28)
  726. fdc,m r31(%r28)
  727. cmpb,COND(<<) %r28, %r25,1b
  728. fdc,m r31(%r28)
  729. sync
  730. bv %r0(%r2)
  731. nop
  732. ENDPROC_CFI(flush_dcache_page_asm)
  733. ENTRY_CFI(flush_icache_page_asm)
  734. ldil L%(TMPALIAS_MAP_START), %r28
  735. #ifdef CONFIG_64BIT
  736. #if (TMPALIAS_MAP_START >= 0x80000000)
  737. depdi 0, 31,32, %r28 /* clear any sign extension */
  738. #endif
  739. convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
  740. depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
  741. depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
  742. #else
  743. extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
  744. depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
  745. depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
  746. #endif
  747. /* Purge any old translation. Note that the FIC instruction
  748. * may use either the instruction or data TLB. Given that we
  749. * have a flat address space, it's not clear which TLB will be
  750. * used. So, we purge both entries. */
  751. #ifdef CONFIG_PA20
  752. pdtlb,l %r0(%r28)
  753. pitlb,l %r0(%sr4,%r28)
  754. #else
  755. tlb_lock %r20,%r21,%r22
  756. pdtlb %r0(%r28)
  757. pitlb %r0(%sr4,%r28)
  758. tlb_unlock %r20,%r21,%r22
  759. #endif
  760. ldil L%icache_stride, %r1
  761. ldw R%icache_stride(%r1), %r31
  762. #ifdef CONFIG_64BIT
  763. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  764. #else
  765. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  766. #endif
  767. add %r28, %r25, %r25
  768. sub %r25, %r31, %r25
  769. /* fic only has the type 26 form on PA1.1, requiring an
  770. * explicit space specification, so use %sr4 */
  771. 1: fic,m %r31(%sr4,%r28)
  772. fic,m %r31(%sr4,%r28)
  773. fic,m %r31(%sr4,%r28)
  774. fic,m %r31(%sr4,%r28)
  775. fic,m %r31(%sr4,%r28)
  776. fic,m %r31(%sr4,%r28)
  777. fic,m %r31(%sr4,%r28)
  778. fic,m %r31(%sr4,%r28)
  779. fic,m %r31(%sr4,%r28)
  780. fic,m %r31(%sr4,%r28)
  781. fic,m %r31(%sr4,%r28)
  782. fic,m %r31(%sr4,%r28)
  783. fic,m %r31(%sr4,%r28)
  784. fic,m %r31(%sr4,%r28)
  785. fic,m %r31(%sr4,%r28)
  786. cmpb,COND(<<) %r28, %r25,1b
  787. fic,m %r31(%sr4,%r28)
  788. sync
  789. bv %r0(%r2)
  790. nop
  791. ENDPROC_CFI(flush_icache_page_asm)
  792. ENTRY_CFI(flush_kernel_dcache_page_asm)
  793. ldil L%dcache_stride, %r1
  794. ldw R%dcache_stride(%r1), %r23
  795. #ifdef CONFIG_64BIT
  796. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  797. #else
  798. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  799. #endif
  800. add %r26, %r25, %r25
  801. sub %r25, %r23, %r25
  802. 1: fdc,m %r23(%r26)
  803. fdc,m %r23(%r26)
  804. fdc,m %r23(%r26)
  805. fdc,m %r23(%r26)
  806. fdc,m %r23(%r26)
  807. fdc,m %r23(%r26)
  808. fdc,m %r23(%r26)
  809. fdc,m %r23(%r26)
  810. fdc,m %r23(%r26)
  811. fdc,m %r23(%r26)
  812. fdc,m %r23(%r26)
  813. fdc,m %r23(%r26)
  814. fdc,m %r23(%r26)
  815. fdc,m %r23(%r26)
  816. fdc,m %r23(%r26)
  817. cmpb,COND(<<) %r26, %r25,1b
  818. fdc,m %r23(%r26)
  819. sync
  820. bv %r0(%r2)
  821. nop
  822. ENDPROC_CFI(flush_kernel_dcache_page_asm)
  823. ENTRY_CFI(purge_kernel_dcache_page_asm)
  824. ldil L%dcache_stride, %r1
  825. ldw R%dcache_stride(%r1), %r23
  826. #ifdef CONFIG_64BIT
  827. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  828. #else
  829. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  830. #endif
  831. add %r26, %r25, %r25
  832. sub %r25, %r23, %r25
  833. 1: pdc,m %r23(%r26)
  834. pdc,m %r23(%r26)
  835. pdc,m %r23(%r26)
  836. pdc,m %r23(%r26)
  837. pdc,m %r23(%r26)
  838. pdc,m %r23(%r26)
  839. pdc,m %r23(%r26)
  840. pdc,m %r23(%r26)
  841. pdc,m %r23(%r26)
  842. pdc,m %r23(%r26)
  843. pdc,m %r23(%r26)
  844. pdc,m %r23(%r26)
  845. pdc,m %r23(%r26)
  846. pdc,m %r23(%r26)
  847. pdc,m %r23(%r26)
  848. cmpb,COND(<<) %r26, %r25, 1b
  849. pdc,m %r23(%r26)
  850. sync
  851. bv %r0(%r2)
  852. nop
  853. ENDPROC_CFI(purge_kernel_dcache_page_asm)
  854. ENTRY_CFI(flush_user_dcache_range_asm)
  855. ldil L%dcache_stride, %r1
  856. ldw R%dcache_stride(%r1), %r23
  857. ldo -1(%r23), %r21
  858. ANDCM %r26, %r21, %r26
  859. 1: cmpb,COND(<<),n %r26, %r25, 1b
  860. fdc,m %r23(%sr3, %r26)
  861. sync
  862. bv %r0(%r2)
  863. nop
  864. ENDPROC_CFI(flush_user_dcache_range_asm)
  865. ENTRY_CFI(flush_kernel_dcache_range_asm)
  866. ldil L%dcache_stride, %r1
  867. ldw R%dcache_stride(%r1), %r23
  868. ldo -1(%r23), %r21
  869. ANDCM %r26, %r21, %r26
  870. 1: cmpb,COND(<<),n %r26, %r25,1b
  871. fdc,m %r23(%r26)
  872. sync
  873. syncdma
  874. bv %r0(%r2)
  875. nop
  876. ENDPROC_CFI(flush_kernel_dcache_range_asm)
  877. ENTRY_CFI(purge_kernel_dcache_range_asm)
  878. ldil L%dcache_stride, %r1
  879. ldw R%dcache_stride(%r1), %r23
  880. ldo -1(%r23), %r21
  881. ANDCM %r26, %r21, %r26
  882. 1: cmpb,COND(<<),n %r26, %r25,1b
  883. pdc,m %r23(%r26)
  884. sync
  885. syncdma
  886. bv %r0(%r2)
  887. nop
  888. ENDPROC_CFI(purge_kernel_dcache_range_asm)
  889. ENTRY_CFI(flush_user_icache_range_asm)
  890. ldil L%icache_stride, %r1
  891. ldw R%icache_stride(%r1), %r23
  892. ldo -1(%r23), %r21
  893. ANDCM %r26, %r21, %r26
  894. 1: cmpb,COND(<<),n %r26, %r25,1b
  895. fic,m %r23(%sr3, %r26)
  896. sync
  897. bv %r0(%r2)
  898. nop
  899. ENDPROC_CFI(flush_user_icache_range_asm)
  900. ENTRY_CFI(flush_kernel_icache_page)
  901. ldil L%icache_stride, %r1
  902. ldw R%icache_stride(%r1), %r23
  903. #ifdef CONFIG_64BIT
  904. depdi,z 1, 63-PAGE_SHIFT,1, %r25
  905. #else
  906. depwi,z 1, 31-PAGE_SHIFT,1, %r25
  907. #endif
  908. add %r26, %r25, %r25
  909. sub %r25, %r23, %r25
  910. 1: fic,m %r23(%sr4, %r26)
  911. fic,m %r23(%sr4, %r26)
  912. fic,m %r23(%sr4, %r26)
  913. fic,m %r23(%sr4, %r26)
  914. fic,m %r23(%sr4, %r26)
  915. fic,m %r23(%sr4, %r26)
  916. fic,m %r23(%sr4, %r26)
  917. fic,m %r23(%sr4, %r26)
  918. fic,m %r23(%sr4, %r26)
  919. fic,m %r23(%sr4, %r26)
  920. fic,m %r23(%sr4, %r26)
  921. fic,m %r23(%sr4, %r26)
  922. fic,m %r23(%sr4, %r26)
  923. fic,m %r23(%sr4, %r26)
  924. fic,m %r23(%sr4, %r26)
  925. cmpb,COND(<<) %r26, %r25, 1b
  926. fic,m %r23(%sr4, %r26)
  927. sync
  928. bv %r0(%r2)
  929. nop
  930. ENDPROC_CFI(flush_kernel_icache_page)
  931. ENTRY_CFI(flush_kernel_icache_range_asm)
  932. ldil L%icache_stride, %r1
  933. ldw R%icache_stride(%r1), %r23
  934. ldo -1(%r23), %r21
  935. ANDCM %r26, %r21, %r26
  936. 1: cmpb,COND(<<),n %r26, %r25, 1b
  937. fic,m %r23(%sr4, %r26)
  938. sync
  939. bv %r0(%r2)
  940. nop
  941. ENDPROC_CFI(flush_kernel_icache_range_asm)
  942. __INIT
  943. /* align should cover use of rfi in disable_sr_hashing_asm and
  944. * srdis_done.
  945. */
  946. .align 256
  947. ENTRY_CFI(disable_sr_hashing_asm)
  948. /*
  949. * Switch to real mode
  950. */
  951. /* pcxt_ssm_bug */
  952. rsm PSW_SM_I, %r0
  953. load32 PA(1f), %r1
  954. nop
  955. nop
  956. nop
  957. nop
  958. nop
  959. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  960. mtctl %r0, %cr17 /* Clear IIASQ tail */
  961. mtctl %r0, %cr17 /* Clear IIASQ head */
  962. mtctl %r1, %cr18 /* IIAOQ head */
  963. ldo 4(%r1), %r1
  964. mtctl %r1, %cr18 /* IIAOQ tail */
  965. load32 REAL_MODE_PSW, %r1
  966. mtctl %r1, %ipsw
  967. rfi
  968. nop
  969. 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
  970. cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
  971. cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
  972. b,n srdis_done
  973. srdis_pcxs:
  974. /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
  975. .word 0x141c1a00 /* mfdiag %dr0, %r28 */
  976. .word 0x141c1a00 /* must issue twice */
  977. depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
  978. depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
  979. .word 0x141c1600 /* mtdiag %r28, %dr0 */
  980. .word 0x141c1600 /* must issue twice */
  981. b,n srdis_done
  982. srdis_pcxl:
  983. /* Disable Space Register Hashing for PCXL */
  984. .word 0x141c0600 /* mfdiag %dr0, %r28 */
  985. depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
  986. .word 0x141c0240 /* mtdiag %r28, %dr0 */
  987. b,n srdis_done
  988. srdis_pa20:
  989. /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
  990. .word 0x144008bc /* mfdiag %dr2, %r28 */
  991. depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
  992. .word 0x145c1840 /* mtdiag %r28, %dr2 */
  993. srdis_done:
  994. /* Switch back to virtual mode */
  995. rsm PSW_SM_I, %r0 /* prep to load iia queue */
  996. load32 2f, %r1
  997. nop
  998. nop
  999. nop
  1000. nop
  1001. nop
  1002. rsm PSW_SM_Q, %r0 /* prep to load iia queue */
  1003. mtctl %r0, %cr17 /* Clear IIASQ tail */
  1004. mtctl %r0, %cr17 /* Clear IIASQ head */
  1005. mtctl %r1, %cr18 /* IIAOQ head */
  1006. ldo 4(%r1), %r1
  1007. mtctl %r1, %cr18 /* IIAOQ tail */
  1008. load32 KERNEL_PSW, %r1
  1009. mtctl %r1, %ipsw
  1010. rfi
  1011. nop
  1012. 2: bv %r0(%r2)
  1013. nop
  1014. ENDPROC_CFI(disable_sr_hashing_asm)
  1015. .end