vector.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/export.h>
  3. #include <linux/linkage.h>
  4. #include <asm/processor.h>
  5. #include <asm/ppc_asm.h>
  6. #include <asm/reg.h>
  7. #include <asm/asm-offsets.h>
  8. #include <asm/cputable.h>
  9. #include <asm/thread_info.h>
  10. #include <asm/page.h>
  11. #include <asm/ptrace.h>
  12. #include <asm/asm-compat.h>
  13. /*
  14. * Load state from memory into VMX registers including VSCR.
  15. * Assumes the caller has enabled VMX in the MSR.
  16. */
  17. _GLOBAL(load_vr_state)
  18. li r4,VRSTATE_VSCR
  19. lvx v0,r4,r3
  20. mtvscr v0
  21. REST_32VRS(0,r4,r3)
  22. blr
  23. EXPORT_SYMBOL(load_vr_state)
  24. _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
  25. /*
  26. * Store VMX state into memory, including VSCR.
  27. * Assumes the caller has enabled VMX in the MSR.
  28. */
  29. _GLOBAL(store_vr_state)
  30. SAVE_32VRS(0, r4, r3)
  31. mfvscr v0
  32. li r4, VRSTATE_VSCR
  33. stvx v0, r4, r3
  34. lvx v0, 0, r3
  35. blr
  36. EXPORT_SYMBOL(store_vr_state)
  37. /*
  38. * Disable VMX for the task which had it previously,
  39. * and save its vector registers in its thread_struct.
  40. * Enables the VMX for use in the kernel on return.
  41. * On SMP we know the VMX is free, since we give it up every
  42. * switch (ie, no lazy save of the vector registers).
  43. *
  44. * Note that on 32-bit this can only use registers that will be
  45. * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
  46. */
  47. _GLOBAL(load_up_altivec)
  48. mfmsr r5 /* grab the current MSR */
  49. #ifdef CONFIG_PPC_BOOK3S_64
  50. /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
  51. ori r5,r5,MSR_RI
  52. #endif
  53. oris r5,r5,MSR_VEC@h
  54. MTMSRD(r5) /* enable use of AltiVec now */
  55. isync
  56. /*
  57. * While userspace in general ignores VRSAVE, glibc uses it as a boolean
  58. * to optimise userspace context save/restore. Whenever we take an
  59. * altivec unavailable exception we must set VRSAVE to something non
  60. * zero. Set it to all 1s. See also the programming note in the ISA.
  61. */
  62. mfspr r4,SPRN_VRSAVE
  63. cmpwi 0,r4,0
  64. bne+ 1f
  65. li r4,-1
  66. mtspr SPRN_VRSAVE,r4
  67. 1:
  68. /* enable use of VMX after return */
  69. #ifdef CONFIG_PPC32
  70. addi r5,r2,THREAD
  71. oris r9,r9,MSR_VEC@h
  72. #else
  73. ld r4,PACACURRENT(r13)
  74. addi r5,r4,THREAD /* Get THREAD */
  75. oris r12,r12,MSR_VEC@h
  76. std r12,_MSR(r1)
  77. #ifdef CONFIG_PPC_BOOK3S_64
  78. li r4,0
  79. stb r4,PACASRR_VALID(r13)
  80. #endif
  81. #endif
  82. li r4,1
  83. stb r4,THREAD_LOAD_VEC(r5)
  84. addi r6,r5,THREAD_VRSTATE
  85. li r10,VRSTATE_VSCR
  86. stw r4,THREAD_USED_VR(r5)
  87. lvx v0,r10,r6
  88. mtvscr v0
  89. REST_32VRS(0,r4,r6)
  90. /* restore registers and return */
  91. blr
  92. _ASM_NOKPROBE_SYMBOL(load_up_altivec)
  93. /*
  94. * save_altivec(tsk)
  95. * Save the vector registers to its thread_struct
  96. */
  97. _GLOBAL(save_altivec)
  98. addi r3,r3,THREAD /* want THREAD of task */
  99. PPC_LL r7,THREAD_VRSAVEAREA(r3)
  100. PPC_LL r5,PT_REGS(r3)
  101. PPC_LCMPI 0,r7,0
  102. bne 2f
  103. addi r7,r3,THREAD_VRSTATE
  104. 2: SAVE_32VRS(0,r4,r7)
  105. mfvscr v0
  106. li r4,VRSTATE_VSCR
  107. stvx v0,r4,r7
  108. lvx v0,0,r7
  109. blr
  110. #ifdef CONFIG_VSX
  111. #ifdef CONFIG_PPC32
  112. #error This asm code isn't ready for 32-bit kernels
  113. #endif
  114. /*
  115. * load_up_vsx(unused, unused, tsk)
  116. * Disable VSX for the task which had it previously,
  117. * and save its vector registers in its thread_struct.
  118. * Reuse the fp and vsx saves, but first check to see if they have
  119. * been saved already.
  120. */
  121. _GLOBAL(load_up_vsx)
  122. /* Load FP and VSX registers if they haven't been done yet */
  123. andi. r5,r12,MSR_FP
  124. beql+ load_up_fpu /* skip if already loaded */
  125. andis. r5,r12,MSR_VEC@h
  126. beql+ load_up_altivec /* skip if already loaded */
  127. #ifdef CONFIG_PPC_BOOK3S_64
  128. /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
  129. li r5,MSR_RI
  130. mtmsrd r5,1
  131. #endif
  132. ld r4,PACACURRENT(r13)
  133. addi r4,r4,THREAD /* Get THREAD */
  134. li r6,1
  135. stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
  136. /* enable use of VSX after return */
  137. oris r12,r12,MSR_VSX@h
  138. std r12,_MSR(r1)
  139. li r4,0
  140. stb r4,PACASRR_VALID(r13)
  141. b fast_interrupt_return_srr
  142. #endif /* CONFIG_VSX */
  143. /*
  144. * The routines below are in assembler so we can closely control the
  145. * usage of floating-point registers. These routines must be called
  146. * with preempt disabled.
  147. */
  148. .data
  149. #ifdef CONFIG_PPC32
  150. fpzero:
  151. .long 0
  152. fpone:
  153. .long 0x3f800000 /* 1.0 in single-precision FP */
  154. fphalf:
  155. .long 0x3f000000 /* 0.5 in single-precision FP */
  156. #define LDCONST(fr, name) \
  157. lis r11,name@ha; \
  158. lfs fr,name@l(r11)
  159. #else
  160. fpzero:
  161. .quad 0
  162. fpone:
  163. .quad 0x3ff0000000000000 /* 1.0 */
  164. fphalf:
  165. .quad 0x3fe0000000000000 /* 0.5 */
  166. #ifdef CONFIG_PPC_KERNEL_PCREL
  167. #define LDCONST(fr, name) \
  168. pla r11,name@pcrel; \
  169. lfd fr,0(r11)
  170. #else
  171. #define LDCONST(fr, name) \
  172. addis r11,r2,name@toc@ha; \
  173. lfd fr,name@toc@l(r11)
  174. #endif
  175. #endif
  176. .text
  177. /*
  178. * Internal routine to enable floating point and set FPSCR to 0.
  179. * Don't call it from C; it doesn't use the normal calling convention.
  180. */
  181. SYM_FUNC_START_LOCAL(fpenable)
  182. #ifdef CONFIG_PPC32
  183. stwu r1,-64(r1)
  184. #else
  185. stdu r1,-64(r1)
  186. #endif
  187. mfmsr r10
  188. ori r11,r10,MSR_FP
  189. mtmsr r11
  190. isync
  191. stfd fr0,24(r1)
  192. stfd fr1,16(r1)
  193. stfd fr31,8(r1)
  194. LDCONST(fr1, fpzero)
  195. mffs fr31
  196. MTFSF_L(fr1)
  197. blr
  198. SYM_FUNC_END(fpenable)
  199. fpdisable:
  200. mtlr r12
  201. MTFSF_L(fr31)
  202. lfd fr31,8(r1)
  203. lfd fr1,16(r1)
  204. lfd fr0,24(r1)
  205. mtmsr r10
  206. isync
  207. addi r1,r1,64
  208. blr
  209. /*
  210. * Vector add, floating point.
  211. */
  212. _GLOBAL(vaddfp)
  213. mflr r12
  214. bl fpenable
  215. li r0,4
  216. mtctr r0
  217. li r6,0
  218. 1: lfsx fr0,r4,r6
  219. lfsx fr1,r5,r6
  220. fadds fr0,fr0,fr1
  221. stfsx fr0,r3,r6
  222. addi r6,r6,4
  223. bdnz 1b
  224. b fpdisable
  225. /*
  226. * Vector subtract, floating point.
  227. */
  228. _GLOBAL(vsubfp)
  229. mflr r12
  230. bl fpenable
  231. li r0,4
  232. mtctr r0
  233. li r6,0
  234. 1: lfsx fr0,r4,r6
  235. lfsx fr1,r5,r6
  236. fsubs fr0,fr0,fr1
  237. stfsx fr0,r3,r6
  238. addi r6,r6,4
  239. bdnz 1b
  240. b fpdisable
  241. /*
  242. * Vector multiply and add, floating point.
  243. */
  244. _GLOBAL(vmaddfp)
  245. mflr r12
  246. bl fpenable
  247. stfd fr2,32(r1)
  248. li r0,4
  249. mtctr r0
  250. li r7,0
  251. 1: lfsx fr0,r4,r7
  252. lfsx fr1,r5,r7
  253. lfsx fr2,r6,r7
  254. fmadds fr0,fr0,fr2,fr1
  255. stfsx fr0,r3,r7
  256. addi r7,r7,4
  257. bdnz 1b
  258. lfd fr2,32(r1)
  259. b fpdisable
  260. /*
  261. * Vector negative multiply and subtract, floating point.
  262. */
  263. _GLOBAL(vnmsubfp)
  264. mflr r12
  265. bl fpenable
  266. stfd fr2,32(r1)
  267. li r0,4
  268. mtctr r0
  269. li r7,0
  270. 1: lfsx fr0,r4,r7
  271. lfsx fr1,r5,r7
  272. lfsx fr2,r6,r7
  273. fnmsubs fr0,fr0,fr2,fr1
  274. stfsx fr0,r3,r7
  275. addi r7,r7,4
  276. bdnz 1b
  277. lfd fr2,32(r1)
  278. b fpdisable
  279. /*
  280. * Vector reciprocal estimate. We just compute 1.0/x.
  281. * r3 -> destination, r4 -> source.
  282. */
  283. _GLOBAL(vrefp)
  284. mflr r12
  285. bl fpenable
  286. li r0,4
  287. LDCONST(fr1, fpone)
  288. mtctr r0
  289. li r6,0
  290. 1: lfsx fr0,r4,r6
  291. fdivs fr0,fr1,fr0
  292. stfsx fr0,r3,r6
  293. addi r6,r6,4
  294. bdnz 1b
  295. b fpdisable
  296. /*
  297. * Vector reciprocal square-root estimate, floating point.
  298. * We use the frsqrte instruction for the initial estimate followed
  299. * by 2 iterations of Newton-Raphson to get sufficient accuracy.
  300. * r3 -> destination, r4 -> source.
  301. */
  302. _GLOBAL(vrsqrtefp)
  303. mflr r12
  304. bl fpenable
  305. stfd fr2,32(r1)
  306. stfd fr3,40(r1)
  307. stfd fr4,48(r1)
  308. stfd fr5,56(r1)
  309. li r0,4
  310. LDCONST(fr4, fpone)
  311. LDCONST(fr5, fphalf)
  312. mtctr r0
  313. li r6,0
  314. 1: lfsx fr0,r4,r6
  315. frsqrte fr1,fr0 /* r = frsqrte(s) */
  316. fmuls fr3,fr1,fr0 /* r * s */
  317. fmuls fr2,fr1,fr5 /* r * 0.5 */
  318. fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
  319. fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
  320. fmuls fr3,fr1,fr0 /* r * s */
  321. fmuls fr2,fr1,fr5 /* r * 0.5 */
  322. fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
  323. fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
  324. stfsx fr1,r3,r6
  325. addi r6,r6,4
  326. bdnz 1b
  327. lfd fr5,56(r1)
  328. lfd fr4,48(r1)
  329. lfd fr3,40(r1)
  330. lfd fr2,32(r1)
  331. b fpdisable