entry.S 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270
  1. /*
  2. * Linux/PA-RISC Project (http://www.parisc-linux.org/)
  3. *
  4. * kernel entry points (interruptions, system call wrappers)
  5. * Copyright (C) 1999,2000 Philipp Rumpf
  6. * Copyright (C) 1999 SuSE GmbH Nuernberg
  7. * Copyright (C) 2000 Hewlett-Packard (John Marvin)
  8. * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23. */
  24. #include <asm/asm-offsets.h>
  25. /* we have the following possibilities to act on an interruption:
  26. * - handle in assembly and use shadowed registers only
  27. * - save registers to kernel stack and handle in assembly or C */
  28. #include <asm/psw.h>
  29. #include <asm/cache.h> /* for L1_CACHE_SHIFT */
  30. #include <asm/assembly.h> /* for LDREG/STREG defines */
  31. #include <asm/pgtable.h>
  32. #include <asm/signal.h>
  33. #include <asm/unistd.h>
  34. #include <asm/ldcw.h>
  35. #include <asm/traps.h>
  36. #include <asm/thread_info.h>
  37. #include <linux/linkage.h>
  38. #ifdef CONFIG_64BIT
  39. .level 2.0w
  40. #else
  41. .level 2.0
  42. #endif
  43. .import pa_tlb_lock,data
  44. .macro load_pa_tlb_lock reg
  45. #if __PA_LDCW_ALIGNMENT > 4
  46. load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
  47. depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
  48. #else
  49. load32 PA(pa_tlb_lock), \reg
  50. #endif
  51. .endm
  52. /* space_to_prot macro creates a prot id from a space id */
  53. #if (SPACEID_SHIFT) == 0
  54. .macro space_to_prot spc prot
  55. depd,z \spc,62,31,\prot
  56. .endm
  57. #else
  58. .macro space_to_prot spc prot
  59. extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
  60. .endm
  61. #endif
  62. /* Switch to virtual mapping, trashing only %r1 */
  63. .macro virt_map
  64. /* pcxt_ssm_bug */
  65. rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
  66. mtsp %r0, %sr4
  67. mtsp %r0, %sr5
  68. mtsp %r0, %sr6
  69. tovirt_r1 %r29
  70. load32 KERNEL_PSW, %r1
  71. rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
  72. mtctl %r0, %cr17 /* Clear IIASQ tail */
  73. mtctl %r0, %cr17 /* Clear IIASQ head */
  74. mtctl %r1, %ipsw
  75. load32 4f, %r1
  76. mtctl %r1, %cr18 /* Set IIAOQ tail */
  77. ldo 4(%r1), %r1
  78. mtctl %r1, %cr18 /* Set IIAOQ head */
  79. rfir
  80. nop
  81. 4:
  82. .endm
  83. /*
  84. * The "get_stack" macros are responsible for determining the
  85. * kernel stack value.
  86. *
  87. * If sr7 == 0
  88. * Already using a kernel stack, so call the
  89. * get_stack_use_r30 macro to push a pt_regs structure
  90. * on the stack, and store registers there.
  91. * else
  92. * Need to set up a kernel stack, so call the
  93. * get_stack_use_cr30 macro to set up a pointer
  94. * to the pt_regs structure contained within the
  95. * task pointer pointed to by cr30. Set the stack
  96. * pointer to point to the end of the task structure.
  97. *
  98. * Note that we use shadowed registers for temps until
  99. * we can save %r26 and %r29. %r26 is used to preserve
  100. * %r8 (a shadowed register) which temporarily contained
  101. * either the fault type ("code") or the eirr. We need
  102. * to use a non-shadowed register to carry the value over
  103. * the rfir in virt_map. We use %r26 since this value winds
  104. * up being passed as the argument to either do_cpu_irq_mask
  105. * or handle_interruption. %r29 is used to hold a pointer
  106. * the register save area, and once again, it needs to
  107. * be a non-shadowed register so that it survives the rfir.
  108. *
  109. * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
  110. */
  111. .macro get_stack_use_cr30
  112. /* we save the registers in the task struct */
  113. copy %r30, %r17
  114. mfctl %cr30, %r1
  115. ldo THREAD_SZ_ALGN(%r1), %r30
  116. mtsp %r0,%sr7
  117. mtsp %r16,%sr3
  118. tophys %r1,%r9
  119. LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
  120. tophys %r1,%r9
  121. ldo TASK_REGS(%r9),%r9
  122. STREG %r17,PT_GR30(%r9)
  123. STREG %r29,PT_GR29(%r9)
  124. STREG %r26,PT_GR26(%r9)
  125. STREG %r16,PT_SR7(%r9)
  126. copy %r9,%r29
  127. .endm
  128. .macro get_stack_use_r30
  129. /* we put a struct pt_regs on the stack and save the registers there */
  130. tophys %r30,%r9
  131. copy %r30,%r1
  132. ldo PT_SZ_ALGN(%r30),%r30
  133. STREG %r1,PT_GR30(%r9)
  134. STREG %r29,PT_GR29(%r9)
  135. STREG %r26,PT_GR26(%r9)
  136. STREG %r16,PT_SR7(%r9)
  137. copy %r9,%r29
  138. .endm
  139. .macro rest_stack
  140. LDREG PT_GR1(%r29), %r1
  141. LDREG PT_GR30(%r29),%r30
  142. LDREG PT_GR29(%r29),%r29
  143. .endm
  144. /* default interruption handler
  145. * (calls traps.c:handle_interruption) */
  146. .macro def code
  147. b intr_save
  148. ldi \code, %r8
  149. .align 32
  150. .endm
  151. /* Interrupt interruption handler
  152. * (calls irq.c:do_cpu_irq_mask) */
  153. .macro extint code
  154. b intr_extint
  155. mfsp %sr7,%r16
  156. .align 32
  157. .endm
  158. .import os_hpmc, code
  159. /* HPMC handler */
  160. .macro hpmc code
  161. nop /* must be a NOP, will be patched later */
  162. load32 PA(os_hpmc), %r3
  163. bv,n 0(%r3)
  164. nop
  165. .word 0 /* checksum (will be patched) */
  166. .word 0 /* address of handler */
  167. .word 0 /* length of handler */
  168. .endm
  169. /*
  170. * Performance Note: Instructions will be moved up into
  171. * this part of the code later on, once we are sure
  172. * that the tlb miss handlers are close to final form.
  173. */
  174. /* Register definitions for tlb miss handler macros */
  175. va = r8 /* virtual address for which the trap occurred */
  176. spc = r24 /* space for which the trap occurred */
  177. #ifndef CONFIG_64BIT
  178. /*
  179. * itlb miss interruption handler (parisc 1.1 - 32 bit)
  180. */
  181. .macro itlb_11 code
  182. mfctl %pcsq, spc
  183. b itlb_miss_11
  184. mfctl %pcoq, va
  185. .align 32
  186. .endm
  187. #endif
  188. /*
  189. * itlb miss interruption handler (parisc 2.0)
  190. */
  191. .macro itlb_20 code
  192. mfctl %pcsq, spc
  193. #ifdef CONFIG_64BIT
  194. b itlb_miss_20w
  195. #else
  196. b itlb_miss_20
  197. #endif
  198. mfctl %pcoq, va
  199. .align 32
  200. .endm
  201. #ifndef CONFIG_64BIT
  202. /*
  203. * naitlb miss interruption handler (parisc 1.1 - 32 bit)
  204. */
  205. .macro naitlb_11 code
  206. mfctl %isr,spc
  207. b naitlb_miss_11
  208. mfctl %ior,va
  209. .align 32
  210. .endm
  211. #endif
  212. /*
  213. * naitlb miss interruption handler (parisc 2.0)
  214. */
  215. .macro naitlb_20 code
  216. mfctl %isr,spc
  217. #ifdef CONFIG_64BIT
  218. b naitlb_miss_20w
  219. #else
  220. b naitlb_miss_20
  221. #endif
  222. mfctl %ior,va
  223. .align 32
  224. .endm
  225. #ifndef CONFIG_64BIT
  226. /*
  227. * dtlb miss interruption handler (parisc 1.1 - 32 bit)
  228. */
  229. .macro dtlb_11 code
  230. mfctl %isr, spc
  231. b dtlb_miss_11
  232. mfctl %ior, va
  233. .align 32
  234. .endm
  235. #endif
  236. /*
  237. * dtlb miss interruption handler (parisc 2.0)
  238. */
  239. .macro dtlb_20 code
  240. mfctl %isr, spc
  241. #ifdef CONFIG_64BIT
  242. b dtlb_miss_20w
  243. #else
  244. b dtlb_miss_20
  245. #endif
  246. mfctl %ior, va
  247. .align 32
  248. .endm
  249. #ifndef CONFIG_64BIT
  250. /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
  251. .macro nadtlb_11 code
  252. mfctl %isr,spc
  253. b nadtlb_miss_11
  254. mfctl %ior,va
  255. .align 32
  256. .endm
  257. #endif
  258. /* nadtlb miss interruption handler (parisc 2.0) */
  259. .macro nadtlb_20 code
  260. mfctl %isr,spc
  261. #ifdef CONFIG_64BIT
  262. b nadtlb_miss_20w
  263. #else
  264. b nadtlb_miss_20
  265. #endif
  266. mfctl %ior,va
  267. .align 32
  268. .endm
  269. #ifndef CONFIG_64BIT
  270. /*
  271. * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
  272. */
  273. .macro dbit_11 code
  274. mfctl %isr,spc
  275. b dbit_trap_11
  276. mfctl %ior,va
  277. .align 32
  278. .endm
  279. #endif
  280. /*
  281. * dirty bit trap interruption handler (parisc 2.0)
  282. */
  283. .macro dbit_20 code
  284. mfctl %isr,spc
  285. #ifdef CONFIG_64BIT
  286. b dbit_trap_20w
  287. #else
  288. b dbit_trap_20
  289. #endif
  290. mfctl %ior,va
  291. .align 32
  292. .endm
  293. /* In LP64, the space contains part of the upper 32 bits of the
  294. * fault. We have to extract this and place it in the va,
  295. * zeroing the corresponding bits in the space register */
  296. .macro space_adjust spc,va,tmp
  297. #ifdef CONFIG_64BIT
  298. extrd,u \spc,63,SPACEID_SHIFT,\tmp
  299. depd %r0,63,SPACEID_SHIFT,\spc
  300. depd \tmp,31,SPACEID_SHIFT,\va
  301. #endif
  302. .endm
  303. .import swapper_pg_dir,code
  304. /* Get the pgd. For faults on space zero (kernel space), this
  305. * is simply swapper_pg_dir. For user space faults, the
  306. * pgd is stored in %cr25 */
  307. .macro get_pgd spc,reg
  308. ldil L%PA(swapper_pg_dir),\reg
  309. ldo R%PA(swapper_pg_dir)(\reg),\reg
  310. or,COND(=) %r0,\spc,%r0
  311. mfctl %cr25,\reg
  312. .endm
  313. /*
  314. space_check(spc,tmp,fault)
  315. spc - The space we saw the fault with.
  316. tmp - The place to store the current space.
  317. fault - Function to call on failure.
  318. Only allow faults on different spaces from the
  319. currently active one if we're the kernel
  320. */
  321. .macro space_check spc,tmp,fault
  322. mfsp %sr7,\tmp
  323. or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
  324. * as kernel, so defeat the space
  325. * check if it is */
  326. copy \spc,\tmp
  327. or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
  328. cmpb,COND(<>),n \tmp,\spc,\fault
  329. .endm
  330. /* Look up a PTE in a 2-Level scheme (faulting at each
  331. * level if the entry isn't present
  332. *
  333. * NOTE: we use ldw even for LP64, since the short pointers
  334. * can address up to 1TB
  335. */
  336. .macro L2_ptep pmd,pte,index,va,fault
  337. #if CONFIG_PGTABLE_LEVELS == 3
  338. extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
  339. #else
  340. # if defined(CONFIG_64BIT)
  341. extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
  342. #else
  343. # if PAGE_SIZE > 4096
  344. extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
  345. # else
  346. extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
  347. # endif
  348. # endif
  349. #endif
  350. dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
  351. copy %r0,\pte
  352. ldw,s \index(\pmd),\pmd
  353. bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
  354. dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
  355. copy \pmd,%r9
  356. SHLREG %r9,PxD_VALUE_SHIFT,\pmd
  357. extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
  358. dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
  359. shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
  360. LDREG %r0(\pmd),\pte
  361. bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
  362. .endm
  363. /* Look up PTE in a 3-Level scheme.
  364. *
  365. * Here we implement a Hybrid L2/L3 scheme: we allocate the
  366. * first pmd adjacent to the pgd. This means that we can
  367. * subtract a constant offset to get to it. The pmd and pgd
  368. * sizes are arranged so that a single pmd covers 4GB (giving
  369. * a full LP64 process access to 8TB) so our lookups are
  370. * effectively L2 for the first 4GB of the kernel (i.e. for
  371. * all ILP32 processes and all the kernel for machines with
  372. * under 4GB of memory) */
  373. .macro L3_ptep pgd,pte,index,va,fault
  374. #if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
  375. extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
  376. copy %r0,\pte
  377. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  378. ldw,s \index(\pgd),\pgd
  379. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  380. bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
  381. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  382. shld \pgd,PxD_VALUE_SHIFT,\index
  383. extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  384. copy \index,\pgd
  385. extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
  386. ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
  387. #endif
  388. L2_ptep \pgd,\pte,\index,\va,\fault
  389. .endm
  390. /* Acquire pa_tlb_lock lock and recheck page is still present. */
  391. .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
  392. #ifdef CONFIG_SMP
  393. cmpib,COND(=),n 0,\spc,2f
  394. load_pa_tlb_lock \tmp
  395. 1: LDCW 0(\tmp),\tmp1
  396. cmpib,COND(=) 0,\tmp1,1b
  397. nop
  398. LDREG 0(\ptp),\pte
  399. bb,<,n \pte,_PAGE_PRESENT_BIT,2f
  400. b \fault
  401. stw \spc,0(\tmp)
  402. 2:
  403. #endif
  404. .endm
  405. /* Release pa_tlb_lock lock without reloading lock address. */
  406. .macro tlb_unlock0 spc,tmp
  407. #ifdef CONFIG_SMP
  408. or,COND(=) %r0,\spc,%r0
  409. sync
  410. or,COND(=) %r0,\spc,%r0
  411. stw \spc,0(\tmp)
  412. #endif
  413. .endm
  414. /* Release pa_tlb_lock lock. */
  415. .macro tlb_unlock1 spc,tmp
  416. #ifdef CONFIG_SMP
  417. load_pa_tlb_lock \tmp
  418. tlb_unlock0 \spc,\tmp
  419. #endif
  420. .endm
  421. /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
  422. * don't needlessly dirty the cache line if it was already set */
  423. .macro update_accessed ptp,pte,tmp,tmp1
  424. ldi _PAGE_ACCESSED,\tmp1
  425. or \tmp1,\pte,\tmp
  426. and,COND(<>) \tmp1,\pte,%r0
  427. STREG \tmp,0(\ptp)
  428. .endm
  429. /* Set the dirty bit (and accessed bit). No need to be
  430. * clever, this is only used from the dirty fault */
  431. .macro update_dirty ptp,pte,tmp
  432. ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
  433. or \tmp,\pte,\pte
  434. STREG \pte,0(\ptp)
  435. .endm
  436. /* We have (depending on the page size):
  437. * - 38 to 52-bit Physical Page Number
  438. * - 12 to 26-bit page offset
  439. */
  440. /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
  441. * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
  442. #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
  443. #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
  444. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  445. .macro convert_for_tlb_insert20 pte,tmp
  446. #ifdef CONFIG_HUGETLB_PAGE
  447. copy \pte,\tmp
  448. extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
  449. 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
  450. depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
  451. (63-58)+PAGE_ADD_SHIFT,\pte
  452. extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
  453. depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
  454. (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
  455. #else /* Huge pages disabled */
  456. extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
  457. 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
  458. depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
  459. (63-58)+PAGE_ADD_SHIFT,\pte
  460. #endif
  461. .endm
  462. /* Convert the pte and prot to tlb insertion values. How
  463. * this happens is quite subtle, read below */
  464. .macro make_insert_tlb spc,pte,prot,tmp
  465. space_to_prot \spc \prot /* create prot id from space */
  466. /* The following is the real subtlety. This is depositing
  467. * T <-> _PAGE_REFTRAP
  468. * D <-> _PAGE_DIRTY
  469. * B <-> _PAGE_DMB (memory break)
  470. *
  471. * Then incredible subtlety: The access rights are
  472. * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
  473. * See 3-14 of the parisc 2.0 manual
  474. *
  475. * Finally, _PAGE_READ goes in the top bit of PL1 (so we
  476. * trigger an access rights trap in user space if the user
  477. * tries to read an unreadable page */
  478. depd \pte,8,7,\prot
  479. /* PAGE_USER indicates the page can be read with user privileges,
  480. * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
  481. * contains _PAGE_READ) */
  482. extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
  483. depdi 7,11,3,\prot
  484. /* If we're a gateway page, drop PL2 back to zero for promotion
  485. * to kernel privilege (so we can execute the page as kernel).
  486. * Any privilege promotion page always denys read and write */
  487. extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
  488. depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
  489. /* Enforce uncacheable pages.
  490. * This should ONLY be use for MMIO on PA 2.0 machines.
  491. * Memory/DMA is cache coherent on all PA2.0 machines we support
  492. * (that means T-class is NOT supported) and the memory controllers
  493. * on most of those machines only handles cache transactions.
  494. */
  495. extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
  496. depdi 1,12,1,\prot
  497. /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
  498. convert_for_tlb_insert20 \pte \tmp
  499. .endm
  500. /* Identical macro to make_insert_tlb above, except it
  501. * makes the tlb entry for the differently formatted pa11
  502. * insertion instructions */
  503. .macro make_insert_tlb_11 spc,pte,prot
  504. zdep \spc,30,15,\prot
  505. dep \pte,8,7,\prot
  506. extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
  507. depi 1,12,1,\prot
  508. extru,= \pte,_PAGE_USER_BIT,1,%r0
  509. depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
  510. extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
  511. depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
  512. /* Get rid of prot bits and convert to page addr for iitlba */
  513. depi 0,31,ASM_PFN_PTE_SHIFT,\pte
  514. SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
  515. .endm
  516. /* This is for ILP32 PA2.0 only. The TLB insertion needs
  517. * to extend into I/O space if the address is 0xfXXXXXXX
  518. * so we extend the f's into the top word of the pte in
  519. * this case */
  520. .macro f_extend pte,tmp
  521. extrd,s \pte,42,4,\tmp
  522. addi,<> 1,\tmp,%r0
  523. extrd,s \pte,63,25,\pte
  524. .endm
  525. /* The alias region is an 8MB aligned 16MB to do clear and
  526. * copy user pages at addresses congruent with the user
  527. * virtual address.
  528. *
  529. * To use the alias page, you set %r26 up with the to TLB
  530. * entry (identifying the physical page) and %r23 up with
  531. * the from tlb entry (or nothing if only a to entry---for
  532. * clear_user_page_asm) */
  533. .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
  534. cmpib,COND(<>),n 0,\spc,\fault
  535. ldil L%(TMPALIAS_MAP_START),\tmp
  536. #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
  537. /* on LP64, ldi will sign extend into the upper 32 bits,
  538. * which is behaviour we don't want */
  539. depdi 0,31,32,\tmp
  540. #endif
  541. copy \va,\tmp1
  542. depi 0,31,23,\tmp1
  543. cmpb,COND(<>),n \tmp,\tmp1,\fault
  544. mfctl %cr19,\tmp /* iir */
  545. /* get the opcode (first six bits) into \tmp */
  546. extrw,u \tmp,5,6,\tmp
  547. /*
  548. * Only setting the T bit prevents data cache movein
  549. * Setting access rights to zero prevents instruction cache movein
  550. *
  551. * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
  552. * to type field and _PAGE_READ goes to top bit of PL1
  553. */
  554. ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
  555. /*
  556. * so if the opcode is one (i.e. this is a memory management
  557. * instruction) nullify the next load so \prot is only T.
  558. * Otherwise this is a normal data operation
  559. */
  560. cmpiclr,= 0x01,\tmp,%r0
  561. ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
  562. .ifc \patype,20
  563. depd,z \prot,8,7,\prot
  564. .else
  565. .ifc \patype,11
  566. depw,z \prot,8,7,\prot
  567. .else
  568. .error "undefined PA type to do_alias"
  569. .endif
  570. .endif
  571. /*
  572. * OK, it is in the temp alias region, check whether "from" or "to".
  573. * Check "subtle" note in pacache.S re: r23/r26.
  574. */
  575. #ifdef CONFIG_64BIT
  576. extrd,u,*= \va,41,1,%r0
  577. #else
  578. extrw,u,= \va,9,1,%r0
  579. #endif
  580. or,COND(tr) %r23,%r0,\pte
  581. or %r26,%r0,\pte
  582. .endm
  583. /*
  584. * Fault_vectors are architecturally required to be aligned on a 2K
  585. * boundary
  586. */
  587. .section .text.hot
  588. .align 2048
  589. ENTRY(fault_vector_20)
  590. /* First vector is invalid (0) */
  591. .ascii "cows can fly"
  592. .byte 0
  593. .align 32
  594. hpmc 1
  595. def 2
  596. def 3
  597. extint 4
  598. def 5
  599. itlb_20 PARISC_ITLB_TRAP
  600. def 7
  601. def 8
  602. def 9
  603. def 10
  604. def 11
  605. def 12
  606. def 13
  607. def 14
  608. dtlb_20 15
  609. naitlb_20 16
  610. nadtlb_20 17
  611. def 18
  612. def 19
  613. dbit_20 20
  614. def 21
  615. def 22
  616. def 23
  617. def 24
  618. def 25
  619. def 26
  620. def 27
  621. def 28
  622. def 29
  623. def 30
  624. def 31
  625. END(fault_vector_20)
  626. #ifndef CONFIG_64BIT
  627. .align 2048
  628. ENTRY(fault_vector_11)
  629. /* First vector is invalid (0) */
  630. .ascii "cows can fly"
  631. .byte 0
  632. .align 32
  633. hpmc 1
  634. def 2
  635. def 3
  636. extint 4
  637. def 5
  638. itlb_11 PARISC_ITLB_TRAP
  639. def 7
  640. def 8
  641. def 9
  642. def 10
  643. def 11
  644. def 12
  645. def 13
  646. def 14
  647. dtlb_11 15
  648. naitlb_11 16
  649. nadtlb_11 17
  650. def 18
  651. def 19
  652. dbit_11 20
  653. def 21
  654. def 22
  655. def 23
  656. def 24
  657. def 25
  658. def 26
  659. def 27
  660. def 28
  661. def 29
  662. def 30
  663. def 31
  664. END(fault_vector_11)
  665. #endif
  666. /* Fault vector is separately protected and *must* be on its own page */
  667. .align PAGE_SIZE
  668. .import handle_interruption,code
  669. .import do_cpu_irq_mask,code
  670. /*
  671. * Child Returns here
  672. *
  673. * copy_thread moved args into task save area.
  674. */
  675. ENTRY(ret_from_kernel_thread)
  676. /* Call schedule_tail first though */
  677. BL schedule_tail, %r2
  678. nop
  679. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
  680. LDREG TASK_PT_GR25(%r1), %r26
  681. #ifdef CONFIG_64BIT
  682. LDREG TASK_PT_GR27(%r1), %r27
  683. #endif
  684. LDREG TASK_PT_GR26(%r1), %r1
  685. ble 0(%sr7, %r1)
  686. copy %r31, %r2
  687. b finish_child_return
  688. nop
  689. END(ret_from_kernel_thread)
  690. /*
  691. * struct task_struct *_switch_to(struct task_struct *prev,
  692. * struct task_struct *next)
  693. *
  694. * switch kernel stacks and return prev */
  695. ENTRY_CFI(_switch_to)
  696. STREG %r2, -RP_OFFSET(%r30)
  697. callee_save_float
  698. callee_save
  699. load32 _switch_to_ret, %r2
  700. STREG %r2, TASK_PT_KPC(%r26)
  701. LDREG TASK_PT_KPC(%r25), %r2
  702. STREG %r30, TASK_PT_KSP(%r26)
  703. LDREG TASK_PT_KSP(%r25), %r30
  704. LDREG TASK_THREAD_INFO(%r25), %r25
  705. bv %r0(%r2)
  706. mtctl %r25,%cr30
  707. ENTRY(_switch_to_ret)
  708. mtctl %r0, %cr0 /* Needed for single stepping */
  709. callee_rest
  710. callee_rest_float
  711. LDREG -RP_OFFSET(%r30), %r2
  712. bv %r0(%r2)
  713. copy %r26, %r28
  714. ENDPROC_CFI(_switch_to)
  715. /*
  716. * Common rfi return path for interruptions, kernel execve, and
  717. * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
  718. * return via this path if the signal was received when the process
  719. * was running; if the process was blocked on a syscall then the
  720. * normal syscall_exit path is used. All syscalls for traced
  721. * proceses exit via intr_restore.
  722. *
  723. * XXX If any syscalls that change a processes space id ever exit
  724. * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
  725. * adjust IASQ[0..1].
  726. *
  727. */
  728. .align PAGE_SIZE
  729. ENTRY_CFI(syscall_exit_rfi)
  730. mfctl %cr30,%r16
  731. LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
  732. ldo TASK_REGS(%r16),%r16
  733. /* Force iaoq to userspace, as the user has had access to our current
  734. * context via sigcontext. Also Filter the PSW for the same reason.
  735. */
  736. LDREG PT_IAOQ0(%r16),%r19
  737. depi 3,31,2,%r19
  738. STREG %r19,PT_IAOQ0(%r16)
  739. LDREG PT_IAOQ1(%r16),%r19
  740. depi 3,31,2,%r19
  741. STREG %r19,PT_IAOQ1(%r16)
  742. LDREG PT_PSW(%r16),%r19
  743. load32 USER_PSW_MASK,%r1
  744. #ifdef CONFIG_64BIT
  745. load32 USER_PSW_HI_MASK,%r20
  746. depd %r20,31,32,%r1
  747. #endif
  748. and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
  749. load32 USER_PSW,%r1
  750. or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
  751. STREG %r19,PT_PSW(%r16)
  752. /*
  753. * If we aren't being traced, we never saved space registers
  754. * (we don't store them in the sigcontext), so set them
  755. * to "proper" values now (otherwise we'll wind up restoring
  756. * whatever was last stored in the task structure, which might
  757. * be inconsistent if an interrupt occurred while on the gateway
  758. * page). Note that we may be "trashing" values the user put in
  759. * them, but we don't support the user changing them.
  760. */
  761. STREG %r0,PT_SR2(%r16)
  762. mfsp %sr3,%r19
  763. STREG %r19,PT_SR0(%r16)
  764. STREG %r19,PT_SR1(%r16)
  765. STREG %r19,PT_SR3(%r16)
  766. STREG %r19,PT_SR4(%r16)
  767. STREG %r19,PT_SR5(%r16)
  768. STREG %r19,PT_SR6(%r16)
  769. STREG %r19,PT_SR7(%r16)
  770. ENTRY(intr_return)
  771. /* check for reschedule */
  772. mfctl %cr30,%r1
  773. LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
  774. bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
  775. .import do_notify_resume,code
  776. intr_check_sig:
  777. /* As above */
  778. mfctl %cr30,%r1
  779. LDREG TI_FLAGS(%r1),%r19
  780. ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
  781. and,COND(<>) %r19, %r20, %r0
  782. b,n intr_restore /* skip past if we've nothing to do */
  783. /* This check is critical to having LWS
  784. * working. The IASQ is zero on the gateway
  785. * page and we cannot deliver any signals until
  786. * we get off the gateway page.
  787. *
  788. * Only do signals if we are returning to user space
  789. */
  790. LDREG PT_IASQ0(%r16), %r20
  791. cmpib,COND(=),n 0,%r20,intr_restore /* backward */
  792. LDREG PT_IASQ1(%r16), %r20
  793. cmpib,COND(=),n 0,%r20,intr_restore /* backward */
  794. /* NOTE: We need to enable interrupts if we have to deliver
  795. * signals. We used to do this earlier but it caused kernel
  796. * stack overflows. */
  797. ssm PSW_SM_I, %r0
  798. copy %r0, %r25 /* long in_syscall = 0 */
  799. #ifdef CONFIG_64BIT
  800. ldo -16(%r30),%r29 /* Reference param save area */
  801. #endif
  802. BL do_notify_resume,%r2
  803. copy %r16, %r26 /* struct pt_regs *regs */
  804. b,n intr_check_sig
  805. intr_restore:
  806. copy %r16,%r29
  807. ldo PT_FR31(%r29),%r1
  808. rest_fp %r1
  809. rest_general %r29
  810. /* inverse of virt_map */
  811. pcxt_ssm_bug
  812. rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
  813. tophys_r1 %r29
  814. /* Restore space id's and special cr's from PT_REGS
  815. * structure pointed to by r29
  816. */
  817. rest_specials %r29
  818. /* IMPORTANT: rest_stack restores r29 last (we are using it)!
  819. * It also restores r1 and r30.
  820. */
  821. rest_stack
  822. rfi
  823. nop
  824. #ifndef CONFIG_PREEMPT
  825. # define intr_do_preempt intr_restore
  826. #endif /* !CONFIG_PREEMPT */
  827. .import schedule,code
  828. intr_do_resched:
  829. /* Only call schedule on return to userspace. If we're returning
  830. * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
  831. * we jump back to intr_restore.
  832. */
  833. LDREG PT_IASQ0(%r16), %r20
  834. cmpib,COND(=) 0, %r20, intr_do_preempt
  835. nop
  836. LDREG PT_IASQ1(%r16), %r20
  837. cmpib,COND(=) 0, %r20, intr_do_preempt
  838. nop
  839. /* NOTE: We need to enable interrupts if we schedule. We used
  840. * to do this earlier but it caused kernel stack overflows. */
  841. ssm PSW_SM_I, %r0
  842. #ifdef CONFIG_64BIT
  843. ldo -16(%r30),%r29 /* Reference param save area */
  844. #endif
  845. ldil L%intr_check_sig, %r2
  846. #ifndef CONFIG_64BIT
  847. b schedule
  848. #else
  849. load32 schedule, %r20
  850. bv %r0(%r20)
  851. #endif
  852. ldo R%intr_check_sig(%r2), %r2
  853. /* preempt the current task on returning to kernel
  854. * mode from an interrupt, iff need_resched is set,
  855. * and preempt_count is 0. otherwise, we continue on
  856. * our merry way back to the current running task.
  857. */
  858. #ifdef CONFIG_PREEMPT
  859. .import preempt_schedule_irq,code
  860. intr_do_preempt:
  861. rsm PSW_SM_I, %r0 /* disable interrupts */
  862. /* current_thread_info()->preempt_count */
  863. mfctl %cr30, %r1
  864. LDREG TI_PRE_COUNT(%r1), %r19
  865. cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
  866. nop /* prev insn branched backwards */
  867. /* check if we interrupted a critical path */
  868. LDREG PT_PSW(%r16), %r20
  869. bb,<,n %r20, 31 - PSW_SM_I, intr_restore
  870. nop
  871. BL preempt_schedule_irq, %r2
  872. nop
  873. b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
  874. #endif /* CONFIG_PREEMPT */
  875. /*
  876. * External interrupts.
  877. */
  878. intr_extint:
  879. cmpib,COND(=),n 0,%r16,1f
  880. get_stack_use_cr30
  881. b,n 2f
  882. 1:
  883. get_stack_use_r30
  884. 2:
  885. save_specials %r29
  886. virt_map
  887. save_general %r29
  888. ldo PT_FR0(%r29), %r24
  889. save_fp %r24
  890. loadgp
  891. copy %r29, %r26 /* arg0 is pt_regs */
  892. copy %r29, %r16 /* save pt_regs */
  893. ldil L%intr_return, %r2
  894. #ifdef CONFIG_64BIT
  895. ldo -16(%r30),%r29 /* Reference param save area */
  896. #endif
  897. b do_cpu_irq_mask
  898. ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
  899. ENDPROC_CFI(syscall_exit_rfi)
  900. /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
  901. ENTRY_CFI(intr_save) /* for os_hpmc */
  902. mfsp %sr7,%r16
  903. cmpib,COND(=),n 0,%r16,1f
  904. get_stack_use_cr30
  905. b 2f
  906. copy %r8,%r26
  907. 1:
  908. get_stack_use_r30
  909. copy %r8,%r26
  910. 2:
  911. save_specials %r29
  912. /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
  913. cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
  914. mfctl %isr, %r16
  915. nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
  916. mfctl %ior, %r17
  917. #ifdef CONFIG_64BIT
  918. /*
  919. * If the interrupted code was running with W bit off (32 bit),
  920. * clear the b bits (bits 0 & 1) in the ior.
  921. * save_specials left ipsw value in r8 for us to test.
  922. */
  923. extrd,u,*<> %r8,PSW_W_BIT,1,%r0
  924. depdi 0,1,2,%r17
  925. /* adjust isr/ior: get high bits from isr and deposit in ior */
  926. space_adjust %r16,%r17,%r1
  927. #endif
  928. STREG %r16, PT_ISR(%r29)
  929. STREG %r17, PT_IOR(%r29)
  930. #if 0 && defined(CONFIG_64BIT)
  931. /* Revisit when we have 64-bit code above 4Gb */
  932. b,n intr_save2
  933. skip_save_ior:
  934. /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
  935. * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
  936. * above.
  937. */
  938. extrd,u,* %r8,PSW_W_BIT,1,%r1
  939. cmpib,COND(=),n 1,%r1,intr_save2
  940. LDREG PT_IASQ0(%r29), %r16
  941. LDREG PT_IAOQ0(%r29), %r17
  942. /* adjust iasq/iaoq */
  943. space_adjust %r16,%r17,%r1
  944. STREG %r16, PT_IASQ0(%r29)
  945. STREG %r17, PT_IAOQ0(%r29)
  946. #else
  947. skip_save_ior:
  948. #endif
  949. intr_save2:
  950. virt_map
  951. save_general %r29
  952. ldo PT_FR0(%r29), %r25
  953. save_fp %r25
  954. loadgp
  955. copy %r29, %r25 /* arg1 is pt_regs */
  956. #ifdef CONFIG_64BIT
  957. ldo -16(%r30),%r29 /* Reference param save area */
  958. #endif
  959. ldil L%intr_check_sig, %r2
  960. copy %r25, %r16 /* save pt_regs */
  961. b handle_interruption
  962. ldo R%intr_check_sig(%r2), %r2
  963. ENDPROC_CFI(intr_save)
  964. /*
  965. * Note for all tlb miss handlers:
  966. *
  967. * cr24 contains a pointer to the kernel address space
  968. * page directory.
  969. *
  970. * cr25 contains a pointer to the current user address
  971. * space page directory.
  972. *
  973. * sr3 will contain the space id of the user address space
  974. * of the current running thread while that thread is
  975. * running in the kernel.
  976. */
  977. /*
  978. * register number allocations. Note that these are all
  979. * in the shadowed registers
  980. */
  981. t0 = r1 /* temporary register 0 */
  982. va = r8 /* virtual address for which the trap occurred */
  983. t1 = r9 /* temporary register 1 */
  984. pte = r16 /* pte/phys page # */
  985. prot = r17 /* prot bits */
  986. spc = r24 /* space for which the trap occurred */
  987. ptp = r25 /* page directory/page table pointer */
  988. #ifdef CONFIG_64BIT
  989. dtlb_miss_20w:
  990. space_adjust spc,va,t0
  991. get_pgd spc,ptp
  992. space_check spc,t0,dtlb_fault
  993. L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
  994. tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
  995. update_accessed ptp,pte,t0,t1
  996. make_insert_tlb spc,pte,prot,t1
  997. idtlbt pte,prot
  998. tlb_unlock1 spc,t0
  999. rfir
  1000. nop
  1001. dtlb_check_alias_20w:
  1002. do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
  1003. idtlbt pte,prot
  1004. rfir
  1005. nop
  1006. nadtlb_miss_20w:
  1007. space_adjust spc,va,t0
  1008. get_pgd spc,ptp
  1009. space_check spc,t0,nadtlb_fault
  1010. L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
  1011. tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
  1012. update_accessed ptp,pte,t0,t1
  1013. make_insert_tlb spc,pte,prot,t1
  1014. idtlbt pte,prot
  1015. tlb_unlock1 spc,t0
  1016. rfir
  1017. nop
  1018. nadtlb_check_alias_20w:
  1019. do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
  1020. idtlbt pte,prot
  1021. rfir
  1022. nop
  1023. #else
  1024. dtlb_miss_11:
  1025. get_pgd spc,ptp
  1026. space_check spc,t0,dtlb_fault
  1027. L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
  1028. tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
  1029. update_accessed ptp,pte,t0,t1
  1030. make_insert_tlb_11 spc,pte,prot
  1031. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1032. mtsp spc,%sr1
  1033. idtlba pte,(%sr1,va)
  1034. idtlbp prot,(%sr1,va)
  1035. mtsp t1, %sr1 /* Restore sr1 */
  1036. tlb_unlock1 spc,t0
  1037. rfir
  1038. nop
  1039. dtlb_check_alias_11:
  1040. do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
  1041. idtlba pte,(va)
  1042. idtlbp prot,(va)
  1043. rfir
  1044. nop
  1045. nadtlb_miss_11:
  1046. get_pgd spc,ptp
  1047. space_check spc,t0,nadtlb_fault
  1048. L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
  1049. tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
  1050. update_accessed ptp,pte,t0,t1
  1051. make_insert_tlb_11 spc,pte,prot
  1052. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1053. mtsp spc,%sr1
  1054. idtlba pte,(%sr1,va)
  1055. idtlbp prot,(%sr1,va)
  1056. mtsp t1, %sr1 /* Restore sr1 */
  1057. tlb_unlock1 spc,t0
  1058. rfir
  1059. nop
  1060. nadtlb_check_alias_11:
  1061. do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
  1062. idtlba pte,(va)
  1063. idtlbp prot,(va)
  1064. rfir
  1065. nop
  1066. dtlb_miss_20:
  1067. space_adjust spc,va,t0
  1068. get_pgd spc,ptp
  1069. space_check spc,t0,dtlb_fault
  1070. L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
  1071. tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
  1072. update_accessed ptp,pte,t0,t1
  1073. make_insert_tlb spc,pte,prot,t1
  1074. f_extend pte,t1
  1075. idtlbt pte,prot
  1076. tlb_unlock1 spc,t0
  1077. rfir
  1078. nop
  1079. dtlb_check_alias_20:
  1080. do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
  1081. idtlbt pte,prot
  1082. rfir
  1083. nop
  1084. nadtlb_miss_20:
  1085. get_pgd spc,ptp
  1086. space_check spc,t0,nadtlb_fault
  1087. L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
  1088. tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
  1089. update_accessed ptp,pte,t0,t1
  1090. make_insert_tlb spc,pte,prot,t1
  1091. f_extend pte,t1
  1092. idtlbt pte,prot
  1093. tlb_unlock1 spc,t0
  1094. rfir
  1095. nop
  1096. nadtlb_check_alias_20:
  1097. do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
  1098. idtlbt pte,prot
  1099. rfir
  1100. nop
  1101. #endif
  1102. nadtlb_emulate:
  1103. /*
  1104. * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
  1105. * probei instructions. We don't want to fault for these
  1106. * instructions (not only does it not make sense, it can cause
  1107. * deadlocks, since some flushes are done with the mmap
  1108. * semaphore held). If the translation doesn't exist, we can't
  1109. * insert a translation, so have to emulate the side effects
  1110. * of the instruction. Since we don't insert a translation
  1111. * we can get a lot of faults during a flush loop, so it makes
  1112. * sense to try to do it here with minimum overhead. We only
  1113. * emulate fdc,fic,pdc,probew,prober instructions whose base
  1114. * and index registers are not shadowed. We defer everything
  1115. * else to the "slow" path.
  1116. */
  1117. mfctl %cr19,%r9 /* Get iir */
  1118. /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
  1119. Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
  1120. /* Checks for fdc,fdce,pdc,"fic,4f" only */
  1121. ldi 0x280,%r16
  1122. and %r9,%r16,%r17
  1123. cmpb,<>,n %r16,%r17,nadtlb_probe_check
  1124. bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
  1125. BL get_register,%r25
  1126. extrw,u %r9,15,5,%r8 /* Get index register # */
  1127. cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
  1128. copy %r1,%r24
  1129. BL get_register,%r25
  1130. extrw,u %r9,10,5,%r8 /* Get base register # */
  1131. cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
  1132. BL set_register,%r25
  1133. add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
  1134. nadtlb_nullify:
  1135. mfctl %ipsw,%r8
  1136. ldil L%PSW_N,%r9
  1137. or %r8,%r9,%r8 /* Set PSW_N */
  1138. mtctl %r8,%ipsw
  1139. rfir
  1140. nop
  1141. /*
  1142. When there is no translation for the probe address then we
  1143. must nullify the insn and return zero in the target register.
  1144. This will indicate to the calling code that it does not have
  1145. write/read privileges to this address.
  1146. This should technically work for prober and probew in PA 1.1,
  1147. and also probe,r and probe,w in PA 2.0
  1148. WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
  1149. THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
  1150. */
  1151. nadtlb_probe_check:
  1152. ldi 0x80,%r16
  1153. and %r9,%r16,%r17
  1154. cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
  1155. BL get_register,%r25 /* Find the target register */
  1156. extrw,u %r9,31,5,%r8 /* Get target register */
  1157. cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
  1158. BL set_register,%r25
  1159. copy %r0,%r1 /* Write zero to target register */
  1160. b nadtlb_nullify /* Nullify return insn */
  1161. nop
  1162. #ifdef CONFIG_64BIT
  1163. itlb_miss_20w:
  1164. /*
  1165. * I miss is a little different, since we allow users to fault
  1166. * on the gateway page which is in the kernel address space.
  1167. */
  1168. space_adjust spc,va,t0
  1169. get_pgd spc,ptp
  1170. space_check spc,t0,itlb_fault
  1171. L3_ptep ptp,pte,t0,va,itlb_fault
  1172. tlb_lock spc,ptp,pte,t0,t1,itlb_fault
  1173. update_accessed ptp,pte,t0,t1
  1174. make_insert_tlb spc,pte,prot,t1
  1175. iitlbt pte,prot
  1176. tlb_unlock1 spc,t0
  1177. rfir
  1178. nop
  1179. naitlb_miss_20w:
  1180. /*
  1181. * I miss is a little different, since we allow users to fault
  1182. * on the gateway page which is in the kernel address space.
  1183. */
  1184. space_adjust spc,va,t0
  1185. get_pgd spc,ptp
  1186. space_check spc,t0,naitlb_fault
  1187. L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
  1188. tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
  1189. update_accessed ptp,pte,t0,t1
  1190. make_insert_tlb spc,pte,prot,t1
  1191. iitlbt pte,prot
  1192. tlb_unlock1 spc,t0
  1193. rfir
  1194. nop
  1195. naitlb_check_alias_20w:
  1196. do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
  1197. iitlbt pte,prot
  1198. rfir
  1199. nop
  1200. #else
  1201. itlb_miss_11:
  1202. get_pgd spc,ptp
  1203. space_check spc,t0,itlb_fault
  1204. L2_ptep ptp,pte,t0,va,itlb_fault
  1205. tlb_lock spc,ptp,pte,t0,t1,itlb_fault
  1206. update_accessed ptp,pte,t0,t1
  1207. make_insert_tlb_11 spc,pte,prot
  1208. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1209. mtsp spc,%sr1
  1210. iitlba pte,(%sr1,va)
  1211. iitlbp prot,(%sr1,va)
  1212. mtsp t1, %sr1 /* Restore sr1 */
  1213. tlb_unlock1 spc,t0
  1214. rfir
  1215. nop
  1216. naitlb_miss_11:
  1217. get_pgd spc,ptp
  1218. space_check spc,t0,naitlb_fault
  1219. L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
  1220. tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
  1221. update_accessed ptp,pte,t0,t1
  1222. make_insert_tlb_11 spc,pte,prot
  1223. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1224. mtsp spc,%sr1
  1225. iitlba pte,(%sr1,va)
  1226. iitlbp prot,(%sr1,va)
  1227. mtsp t1, %sr1 /* Restore sr1 */
  1228. tlb_unlock1 spc,t0
  1229. rfir
  1230. nop
  1231. naitlb_check_alias_11:
  1232. do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
  1233. iitlba pte,(%sr0, va)
  1234. iitlbp prot,(%sr0, va)
  1235. rfir
  1236. nop
  1237. itlb_miss_20:
  1238. get_pgd spc,ptp
  1239. space_check spc,t0,itlb_fault
  1240. L2_ptep ptp,pte,t0,va,itlb_fault
  1241. tlb_lock spc,ptp,pte,t0,t1,itlb_fault
  1242. update_accessed ptp,pte,t0,t1
  1243. make_insert_tlb spc,pte,prot,t1
  1244. f_extend pte,t1
  1245. iitlbt pte,prot
  1246. tlb_unlock1 spc,t0
  1247. rfir
  1248. nop
  1249. naitlb_miss_20:
  1250. get_pgd spc,ptp
  1251. space_check spc,t0,naitlb_fault
  1252. L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
  1253. tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
  1254. update_accessed ptp,pte,t0,t1
  1255. make_insert_tlb spc,pte,prot,t1
  1256. f_extend pte,t1
  1257. iitlbt pte,prot
  1258. tlb_unlock1 spc,t0
  1259. rfir
  1260. nop
  1261. naitlb_check_alias_20:
  1262. do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
  1263. iitlbt pte,prot
  1264. rfir
  1265. nop
  1266. #endif
  1267. #ifdef CONFIG_64BIT
  1268. dbit_trap_20w:
  1269. space_adjust spc,va,t0
  1270. get_pgd spc,ptp
  1271. space_check spc,t0,dbit_fault
  1272. L3_ptep ptp,pte,t0,va,dbit_fault
  1273. tlb_lock spc,ptp,pte,t0,t1,dbit_fault
  1274. update_dirty ptp,pte,t1
  1275. make_insert_tlb spc,pte,prot,t1
  1276. idtlbt pte,prot
  1277. tlb_unlock0 spc,t0
  1278. rfir
  1279. nop
  1280. #else
  1281. dbit_trap_11:
  1282. get_pgd spc,ptp
  1283. space_check spc,t0,dbit_fault
  1284. L2_ptep ptp,pte,t0,va,dbit_fault
  1285. tlb_lock spc,ptp,pte,t0,t1,dbit_fault
  1286. update_dirty ptp,pte,t1
  1287. make_insert_tlb_11 spc,pte,prot
  1288. mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
  1289. mtsp spc,%sr1
  1290. idtlba pte,(%sr1,va)
  1291. idtlbp prot,(%sr1,va)
  1292. mtsp t1, %sr1 /* Restore sr1 */
  1293. tlb_unlock0 spc,t0
  1294. rfir
  1295. nop
  1296. dbit_trap_20:
  1297. get_pgd spc,ptp
  1298. space_check spc,t0,dbit_fault
  1299. L2_ptep ptp,pte,t0,va,dbit_fault
  1300. tlb_lock spc,ptp,pte,t0,t1,dbit_fault
  1301. update_dirty ptp,pte,t1
  1302. make_insert_tlb spc,pte,prot,t1
  1303. f_extend pte,t1
  1304. idtlbt pte,prot
  1305. tlb_unlock0 spc,t0
  1306. rfir
  1307. nop
  1308. #endif
  1309. .import handle_interruption,code
  1310. kernel_bad_space:
  1311. b intr_save
  1312. ldi 31,%r8 /* Use an unused code */
  1313. dbit_fault:
  1314. b intr_save
  1315. ldi 20,%r8
  1316. itlb_fault:
  1317. b intr_save
  1318. ldi 6,%r8
  1319. nadtlb_fault:
  1320. b intr_save
  1321. ldi 17,%r8
  1322. naitlb_fault:
  1323. b intr_save
  1324. ldi 16,%r8
  1325. dtlb_fault:
  1326. b intr_save
  1327. ldi 15,%r8
  1328. /* Register saving semantics for system calls:
  1329. %r1 clobbered by system call macro in userspace
  1330. %r2 saved in PT_REGS by gateway page
  1331. %r3 - %r18 preserved by C code (saved by signal code)
  1332. %r19 - %r20 saved in PT_REGS by gateway page
  1333. %r21 - %r22 non-standard syscall args
  1334. stored in kernel stack by gateway page
  1335. %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
  1336. %r27 - %r30 saved in PT_REGS by gateway page
  1337. %r31 syscall return pointer
  1338. */
  1339. /* Floating point registers (FIXME: what do we do with these?)
  1340. %fr0 - %fr3 status/exception, not preserved
  1341. %fr4 - %fr7 arguments
  1342. %fr8 - %fr11 not preserved by C code
  1343. %fr12 - %fr21 preserved by C code
  1344. %fr22 - %fr31 not preserved by C code
  1345. */
  1346. .macro reg_save regs
  1347. STREG %r3, PT_GR3(\regs)
  1348. STREG %r4, PT_GR4(\regs)
  1349. STREG %r5, PT_GR5(\regs)
  1350. STREG %r6, PT_GR6(\regs)
  1351. STREG %r7, PT_GR7(\regs)
  1352. STREG %r8, PT_GR8(\regs)
  1353. STREG %r9, PT_GR9(\regs)
  1354. STREG %r10,PT_GR10(\regs)
  1355. STREG %r11,PT_GR11(\regs)
  1356. STREG %r12,PT_GR12(\regs)
  1357. STREG %r13,PT_GR13(\regs)
  1358. STREG %r14,PT_GR14(\regs)
  1359. STREG %r15,PT_GR15(\regs)
  1360. STREG %r16,PT_GR16(\regs)
  1361. STREG %r17,PT_GR17(\regs)
  1362. STREG %r18,PT_GR18(\regs)
  1363. .endm
  1364. .macro reg_restore regs
  1365. LDREG PT_GR3(\regs), %r3
  1366. LDREG PT_GR4(\regs), %r4
  1367. LDREG PT_GR5(\regs), %r5
  1368. LDREG PT_GR6(\regs), %r6
  1369. LDREG PT_GR7(\regs), %r7
  1370. LDREG PT_GR8(\regs), %r8
  1371. LDREG PT_GR9(\regs), %r9
  1372. LDREG PT_GR10(\regs),%r10
  1373. LDREG PT_GR11(\regs),%r11
  1374. LDREG PT_GR12(\regs),%r12
  1375. LDREG PT_GR13(\regs),%r13
  1376. LDREG PT_GR14(\regs),%r14
  1377. LDREG PT_GR15(\regs),%r15
  1378. LDREG PT_GR16(\regs),%r16
  1379. LDREG PT_GR17(\regs),%r17
  1380. LDREG PT_GR18(\regs),%r18
  1381. .endm
  1382. .macro fork_like name
  1383. ENTRY_CFI(sys_\name\()_wrapper)
  1384. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
  1385. ldo TASK_REGS(%r1),%r1
  1386. reg_save %r1
  1387. mfctl %cr27, %r28
  1388. ldil L%sys_\name, %r31
  1389. be R%sys_\name(%sr4,%r31)
  1390. STREG %r28, PT_CR27(%r1)
  1391. ENDPROC_CFI(sys_\name\()_wrapper)
  1392. .endm
  1393. fork_like clone
  1394. fork_like fork
  1395. fork_like vfork
  1396. /* Set the return value for the child */
  1397. ENTRY(child_return)
  1398. BL schedule_tail, %r2
  1399. nop
  1400. finish_child_return:
  1401. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
  1402. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1403. LDREG PT_CR27(%r1), %r3
  1404. mtctl %r3, %cr27
  1405. reg_restore %r1
  1406. b syscall_exit
  1407. copy %r0,%r28
  1408. END(child_return)
  1409. ENTRY_CFI(sys_rt_sigreturn_wrapper)
  1410. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
  1411. ldo TASK_REGS(%r26),%r26 /* get pt regs */
  1412. /* Don't save regs, we are going to restore them from sigcontext. */
  1413. STREG %r2, -RP_OFFSET(%r30)
  1414. #ifdef CONFIG_64BIT
  1415. ldo FRAME_SIZE(%r30), %r30
  1416. BL sys_rt_sigreturn,%r2
  1417. ldo -16(%r30),%r29 /* Reference param save area */
  1418. #else
  1419. BL sys_rt_sigreturn,%r2
  1420. ldo FRAME_SIZE(%r30), %r30
  1421. #endif
  1422. ldo -FRAME_SIZE(%r30), %r30
  1423. LDREG -RP_OFFSET(%r30), %r2
  1424. /* FIXME: I think we need to restore a few more things here. */
  1425. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1426. ldo TASK_REGS(%r1),%r1 /* get pt regs */
  1427. reg_restore %r1
  1428. /* If the signal was received while the process was blocked on a
  1429. * syscall, then r2 will take us to syscall_exit; otherwise r2 will
  1430. * take us to syscall_exit_rfi and on to intr_return.
  1431. */
  1432. bv %r0(%r2)
  1433. LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
  1434. ENDPROC_CFI(sys_rt_sigreturn_wrapper)
  1435. ENTRY(syscall_exit)
  1436. /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
  1437. * via syscall_exit_rfi if the signal was received while the process
  1438. * was running.
  1439. */
  1440. /* save return value now */
  1441. mfctl %cr30, %r1
  1442. LDREG TI_TASK(%r1),%r1
  1443. STREG %r28,TASK_PT_GR28(%r1)
  1444. /* Seems to me that dp could be wrong here, if the syscall involved
  1445. * calling a module, and nothing got round to restoring dp on return.
  1446. */
  1447. loadgp
  1448. syscall_check_resched:
  1449. /* check for reschedule */
  1450. LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
  1451. bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
  1452. .import do_signal,code
  1453. syscall_check_sig:
  1454. LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
  1455. ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
  1456. and,COND(<>) %r19, %r26, %r0
  1457. b,n syscall_restore /* skip past if we've nothing to do */
  1458. syscall_do_signal:
  1459. /* Save callee-save registers (for sigcontext).
  1460. * FIXME: After this point the process structure should be
  1461. * consistent with all the relevant state of the process
  1462. * before the syscall. We need to verify this.
  1463. */
  1464. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1465. ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
  1466. reg_save %r26
  1467. #ifdef CONFIG_64BIT
  1468. ldo -16(%r30),%r29 /* Reference param save area */
  1469. #endif
  1470. BL do_notify_resume,%r2
  1471. ldi 1, %r25 /* long in_syscall = 1 */
  1472. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1473. ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
  1474. reg_restore %r20
  1475. b,n syscall_check_sig
  1476. syscall_restore:
  1477. LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
  1478. /* Are we being ptraced? */
  1479. ldw TASK_FLAGS(%r1),%r19
  1480. ldi _TIF_SYSCALL_TRACE_MASK,%r2
  1481. and,COND(=) %r19,%r2,%r0
  1482. b,n syscall_restore_rfi
  1483. ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
  1484. rest_fp %r19
  1485. LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
  1486. mtsar %r19
  1487. LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
  1488. LDREG TASK_PT_GR19(%r1),%r19
  1489. LDREG TASK_PT_GR20(%r1),%r20
  1490. LDREG TASK_PT_GR21(%r1),%r21
  1491. LDREG TASK_PT_GR22(%r1),%r22
  1492. LDREG TASK_PT_GR23(%r1),%r23
  1493. LDREG TASK_PT_GR24(%r1),%r24
  1494. LDREG TASK_PT_GR25(%r1),%r25
  1495. LDREG TASK_PT_GR26(%r1),%r26
  1496. LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
  1497. LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
  1498. LDREG TASK_PT_GR29(%r1),%r29
  1499. LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
  1500. /* NOTE: We use rsm/ssm pair to make this operation atomic */
  1501. LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
  1502. rsm PSW_SM_I, %r0
  1503. copy %r1,%r30 /* Restore user sp */
  1504. mfsp %sr3,%r1 /* Get user space id */
  1505. mtsp %r1,%sr7 /* Restore sr7 */
  1506. ssm PSW_SM_I, %r0
  1507. /* Set sr2 to zero for userspace syscalls to work. */
  1508. mtsp %r0,%sr2
  1509. mtsp %r1,%sr4 /* Restore sr4 */
  1510. mtsp %r1,%sr5 /* Restore sr5 */
  1511. mtsp %r1,%sr6 /* Restore sr6 */
  1512. depi 3,31,2,%r31 /* ensure return to user mode. */
  1513. #ifdef CONFIG_64BIT
  1514. /* decide whether to reset the wide mode bit
  1515. *
  1516. * For a syscall, the W bit is stored in the lowest bit
  1517. * of sp. Extract it and reset W if it is zero */
  1518. extrd,u,*<> %r30,63,1,%r1
  1519. rsm PSW_SM_W, %r0
  1520. /* now reset the lowest bit of sp if it was set */
  1521. xor %r30,%r1,%r30
  1522. #endif
  1523. be,n 0(%sr3,%r31) /* return to user space */
  1524. /* We have to return via an RFI, so that PSW T and R bits can be set
  1525. * appropriately.
  1526. * This sets up pt_regs so we can return via intr_restore, which is not
  1527. * the most efficient way of doing things, but it works.
  1528. */
  1529. syscall_restore_rfi:
  1530. ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
  1531. mtctl %r2,%cr0 /* for immediate trap */
  1532. LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
  1533. ldi 0x0b,%r20 /* Create new PSW */
  1534. depi -1,13,1,%r20 /* C, Q, D, and I bits */
  1535. /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
  1536. * set in thread_info.h and converted to PA bitmap
  1537. * numbers in asm-offsets.c */
  1538. /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
  1539. extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
  1540. depi -1,27,1,%r20 /* R bit */
  1541. /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
  1542. extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
  1543. depi -1,7,1,%r20 /* T bit */
  1544. STREG %r20,TASK_PT_PSW(%r1)
  1545. /* Always store space registers, since sr3 can be changed (e.g. fork) */
  1546. mfsp %sr3,%r25
  1547. STREG %r25,TASK_PT_SR3(%r1)
  1548. STREG %r25,TASK_PT_SR4(%r1)
  1549. STREG %r25,TASK_PT_SR5(%r1)
  1550. STREG %r25,TASK_PT_SR6(%r1)
  1551. STREG %r25,TASK_PT_SR7(%r1)
  1552. STREG %r25,TASK_PT_IASQ0(%r1)
  1553. STREG %r25,TASK_PT_IASQ1(%r1)
  1554. /* XXX W bit??? */
  1555. /* Now if old D bit is clear, it means we didn't save all registers
  1556. * on syscall entry, so do that now. This only happens on TRACEME
  1557. * calls, or if someone attached to us while we were on a syscall.
  1558. * We could make this more efficient by not saving r3-r18, but
  1559. * then we wouldn't be able to use the common intr_restore path.
  1560. * It is only for traced processes anyway, so performance is not
  1561. * an issue.
  1562. */
  1563. bb,< %r2,30,pt_regs_ok /* Branch if D set */
  1564. ldo TASK_REGS(%r1),%r25
  1565. reg_save %r25 /* Save r3 to r18 */
  1566. /* Save the current sr */
  1567. mfsp %sr0,%r2
  1568. STREG %r2,TASK_PT_SR0(%r1)
  1569. /* Save the scratch sr */
  1570. mfsp %sr1,%r2
  1571. STREG %r2,TASK_PT_SR1(%r1)
  1572. /* sr2 should be set to zero for userspace syscalls */
  1573. STREG %r0,TASK_PT_SR2(%r1)
  1574. LDREG TASK_PT_GR31(%r1),%r2
  1575. depi 3,31,2,%r2 /* ensure return to user mode. */
  1576. STREG %r2,TASK_PT_IAOQ0(%r1)
  1577. ldo 4(%r2),%r2
  1578. STREG %r2,TASK_PT_IAOQ1(%r1)
  1579. b intr_restore
  1580. copy %r25,%r16
  1581. pt_regs_ok:
  1582. LDREG TASK_PT_IAOQ0(%r1),%r2
  1583. depi 3,31,2,%r2 /* ensure return to user mode. */
  1584. STREG %r2,TASK_PT_IAOQ0(%r1)
  1585. LDREG TASK_PT_IAOQ1(%r1),%r2
  1586. depi 3,31,2,%r2
  1587. STREG %r2,TASK_PT_IAOQ1(%r1)
  1588. b intr_restore
  1589. copy %r25,%r16
  1590. syscall_do_resched:
  1591. load32 syscall_check_resched,%r2 /* if resched, we start over again */
  1592. load32 schedule,%r19
  1593. bv %r0(%r19) /* jumps to schedule() */
  1594. #ifdef CONFIG_64BIT
  1595. ldo -16(%r30),%r29 /* Reference param save area */
  1596. #else
  1597. nop
  1598. #endif
  1599. END(syscall_exit)
  1600. #ifdef CONFIG_FUNCTION_TRACER
  1601. .import ftrace_function_trampoline,code
  1602. .align L1_CACHE_BYTES
  1603. ENTRY_CFI(mcount, caller)
  1604. _mcount:
  1605. .export _mcount,data
  1606. /*
  1607. * The 64bit mcount() function pointer needs 4 dwords, of which the
  1608. * first two are free. We optimize it here and put 2 instructions for
  1609. * calling mcount(), and 2 instructions for ftrace_stub(). That way we
  1610. * have all on one L1 cacheline.
  1611. */
  1612. b ftrace_function_trampoline
  1613. copy %r3, %arg2 /* caller original %sp */
  1614. ftrace_stub:
  1615. .globl ftrace_stub
  1616. .type ftrace_stub, @function
  1617. #ifdef CONFIG_64BIT
  1618. bve (%rp)
  1619. #else
  1620. bv %r0(%rp)
  1621. #endif
  1622. nop
  1623. #ifdef CONFIG_64BIT
  1624. .dword mcount
  1625. .dword 0 /* code in head.S puts value of global gp here */
  1626. #endif
  1627. ENDPROC_CFI(mcount)
  1628. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1629. .align 8
  1630. ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
  1631. .export parisc_return_to_handler,data
  1632. parisc_return_to_handler:
  1633. copy %r3,%r1
  1634. STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
  1635. copy %sp,%r3
  1636. STREGM %r1,FRAME_SIZE(%sp)
  1637. STREG %ret0,8(%r3)
  1638. STREG %ret1,16(%r3)
  1639. #ifdef CONFIG_64BIT
  1640. loadgp
  1641. #endif
  1642. /* call ftrace_return_to_handler(0) */
  1643. .import ftrace_return_to_handler,code
  1644. load32 ftrace_return_to_handler,%ret0
  1645. load32 .Lftrace_ret,%r2
  1646. #ifdef CONFIG_64BIT
  1647. ldo -16(%sp),%ret1 /* Reference param save area */
  1648. bve (%ret0)
  1649. #else
  1650. bv %r0(%ret0)
  1651. #endif
  1652. ldi 0,%r26
  1653. .Lftrace_ret:
  1654. copy %ret0,%rp
  1655. /* restore original return values */
  1656. LDREG 8(%r3),%ret0
  1657. LDREG 16(%r3),%ret1
  1658. /* return from function */
  1659. #ifdef CONFIG_64BIT
  1660. bve (%rp)
  1661. #else
  1662. bv %r0(%rp)
  1663. #endif
  1664. LDREGM -FRAME_SIZE(%sp),%r3
  1665. ENDPROC_CFI(return_to_handler)
  1666. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  1667. #endif /* CONFIG_FUNCTION_TRACER */
  1668. #ifdef CONFIG_IRQSTACKS
  1669. /* void call_on_stack(unsigned long param1, void *func,
  1670. unsigned long new_stack) */
  1671. ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
  1672. ENTRY(_call_on_stack)
  1673. copy %sp, %r1
  1674. /* Regarding the HPPA calling conventions for function pointers,
  1675. we assume the PIC register is not changed across call. For
  1676. CONFIG_64BIT, the argument pointer is left to point at the
  1677. argument region allocated for the call to call_on_stack. */
  1678. /* Switch to new stack. We allocate two frames. */
  1679. ldo 2*FRAME_SIZE(%arg2), %sp
  1680. # ifdef CONFIG_64BIT
  1681. /* Save previous stack pointer and return pointer in frame marker */
  1682. STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
  1683. /* Calls always use function descriptor */
  1684. LDREG 16(%arg1), %arg1
  1685. bve,l (%arg1), %rp
  1686. STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
  1687. LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
  1688. bve (%rp)
  1689. LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
  1690. # else
  1691. /* Save previous stack pointer and return pointer in frame marker */
  1692. STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
  1693. STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
  1694. /* Calls use function descriptor if PLABEL bit is set */
  1695. bb,>=,n %arg1, 30, 1f
  1696. depwi 0,31,2, %arg1
  1697. LDREG 0(%arg1), %arg1
  1698. 1:
  1699. be,l 0(%sr4,%arg1), %sr0, %r31
  1700. copy %r31, %rp
  1701. LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
  1702. bv (%rp)
  1703. LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
  1704. # endif /* CONFIG_64BIT */
  1705. ENDPROC_CFI(call_on_stack)
  1706. #endif /* CONFIG_IRQSTACKS */
  1707. ENTRY_CFI(get_register)
  1708. /*
  1709. * get_register is used by the non access tlb miss handlers to
  1710. * copy the value of the general register specified in r8 into
  1711. * r1. This routine can't be used for shadowed registers, since
  1712. * the rfir will restore the original value. So, for the shadowed
  1713. * registers we put a -1 into r1 to indicate that the register
  1714. * should not be used (the register being copied could also have
  1715. * a -1 in it, but that is OK, it just means that we will have
  1716. * to use the slow path instead).
  1717. */
  1718. blr %r8,%r0
  1719. nop
  1720. bv %r0(%r25) /* r0 */
  1721. copy %r0,%r1
  1722. bv %r0(%r25) /* r1 - shadowed */
  1723. ldi -1,%r1
  1724. bv %r0(%r25) /* r2 */
  1725. copy %r2,%r1
  1726. bv %r0(%r25) /* r3 */
  1727. copy %r3,%r1
  1728. bv %r0(%r25) /* r4 */
  1729. copy %r4,%r1
  1730. bv %r0(%r25) /* r5 */
  1731. copy %r5,%r1
  1732. bv %r0(%r25) /* r6 */
  1733. copy %r6,%r1
  1734. bv %r0(%r25) /* r7 */
  1735. copy %r7,%r1
  1736. bv %r0(%r25) /* r8 - shadowed */
  1737. ldi -1,%r1
  1738. bv %r0(%r25) /* r9 - shadowed */
  1739. ldi -1,%r1
  1740. bv %r0(%r25) /* r10 */
  1741. copy %r10,%r1
  1742. bv %r0(%r25) /* r11 */
  1743. copy %r11,%r1
  1744. bv %r0(%r25) /* r12 */
  1745. copy %r12,%r1
  1746. bv %r0(%r25) /* r13 */
  1747. copy %r13,%r1
  1748. bv %r0(%r25) /* r14 */
  1749. copy %r14,%r1
  1750. bv %r0(%r25) /* r15 */
  1751. copy %r15,%r1
  1752. bv %r0(%r25) /* r16 - shadowed */
  1753. ldi -1,%r1
  1754. bv %r0(%r25) /* r17 - shadowed */
  1755. ldi -1,%r1
  1756. bv %r0(%r25) /* r18 */
  1757. copy %r18,%r1
  1758. bv %r0(%r25) /* r19 */
  1759. copy %r19,%r1
  1760. bv %r0(%r25) /* r20 */
  1761. copy %r20,%r1
  1762. bv %r0(%r25) /* r21 */
  1763. copy %r21,%r1
  1764. bv %r0(%r25) /* r22 */
  1765. copy %r22,%r1
  1766. bv %r0(%r25) /* r23 */
  1767. copy %r23,%r1
  1768. bv %r0(%r25) /* r24 - shadowed */
  1769. ldi -1,%r1
  1770. bv %r0(%r25) /* r25 - shadowed */
  1771. ldi -1,%r1
  1772. bv %r0(%r25) /* r26 */
  1773. copy %r26,%r1
  1774. bv %r0(%r25) /* r27 */
  1775. copy %r27,%r1
  1776. bv %r0(%r25) /* r28 */
  1777. copy %r28,%r1
  1778. bv %r0(%r25) /* r29 */
  1779. copy %r29,%r1
  1780. bv %r0(%r25) /* r30 */
  1781. copy %r30,%r1
  1782. bv %r0(%r25) /* r31 */
  1783. copy %r31,%r1
  1784. ENDPROC_CFI(get_register)
  1785. ENTRY_CFI(set_register)
  1786. /*
  1787. * set_register is used by the non access tlb miss handlers to
  1788. * copy the value of r1 into the general register specified in
  1789. * r8.
  1790. */
  1791. blr %r8,%r0
  1792. nop
  1793. bv %r0(%r25) /* r0 (silly, but it is a place holder) */
  1794. copy %r1,%r0
  1795. bv %r0(%r25) /* r1 */
  1796. copy %r1,%r1
  1797. bv %r0(%r25) /* r2 */
  1798. copy %r1,%r2
  1799. bv %r0(%r25) /* r3 */
  1800. copy %r1,%r3
  1801. bv %r0(%r25) /* r4 */
  1802. copy %r1,%r4
  1803. bv %r0(%r25) /* r5 */
  1804. copy %r1,%r5
  1805. bv %r0(%r25) /* r6 */
  1806. copy %r1,%r6
  1807. bv %r0(%r25) /* r7 */
  1808. copy %r1,%r7
  1809. bv %r0(%r25) /* r8 */
  1810. copy %r1,%r8
  1811. bv %r0(%r25) /* r9 */
  1812. copy %r1,%r9
  1813. bv %r0(%r25) /* r10 */
  1814. copy %r1,%r10
  1815. bv %r0(%r25) /* r11 */
  1816. copy %r1,%r11
  1817. bv %r0(%r25) /* r12 */
  1818. copy %r1,%r12
  1819. bv %r0(%r25) /* r13 */
  1820. copy %r1,%r13
  1821. bv %r0(%r25) /* r14 */
  1822. copy %r1,%r14
  1823. bv %r0(%r25) /* r15 */
  1824. copy %r1,%r15
  1825. bv %r0(%r25) /* r16 */
  1826. copy %r1,%r16
  1827. bv %r0(%r25) /* r17 */
  1828. copy %r1,%r17
  1829. bv %r0(%r25) /* r18 */
  1830. copy %r1,%r18
  1831. bv %r0(%r25) /* r19 */
  1832. copy %r1,%r19
  1833. bv %r0(%r25) /* r20 */
  1834. copy %r1,%r20
  1835. bv %r0(%r25) /* r21 */
  1836. copy %r1,%r21
  1837. bv %r0(%r25) /* r22 */
  1838. copy %r1,%r22
  1839. bv %r0(%r25) /* r23 */
  1840. copy %r1,%r23
  1841. bv %r0(%r25) /* r24 */
  1842. copy %r1,%r24
  1843. bv %r0(%r25) /* r25 */
  1844. copy %r1,%r25
  1845. bv %r0(%r25) /* r26 */
  1846. copy %r1,%r26
  1847. bv %r0(%r25) /* r27 */
  1848. copy %r1,%r27
  1849. bv %r0(%r25) /* r28 */
  1850. copy %r1,%r28
  1851. bv %r0(%r25) /* r29 */
  1852. copy %r1,%r29
  1853. bv %r0(%r25) /* r30 */
  1854. copy %r1,%r30
  1855. bv %r0(%r25) /* r31 */
  1856. copy %r1,%r31
  1857. ENDPROC_CFI(set_register)