regset.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * FPU register's regset abstraction, for ptrace, core dumps, etc.
  4. */
  5. #include <linux/sched/task_stack.h>
  6. #include <linux/vmalloc.h>
  7. #include <asm/fpu/api.h>
  8. #include <asm/fpu/signal.h>
  9. #include <asm/fpu/regset.h>
  10. #include <asm/prctl.h>
  11. #include "context.h"
  12. #include "internal.h"
  13. #include "legacy.h"
  14. #include "xstate.h"
  15. /*
  16. * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
  17. * as the "regset->n" for the xstate regset will be updated based on the feature
  18. * capabilities supported by the xsave.
  19. */
  20. int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
  21. {
  22. return regset->n;
  23. }
  24. int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
  25. {
  26. if (boot_cpu_has(X86_FEATURE_FXSR))
  27. return regset->n;
  28. else
  29. return 0;
  30. }
  31. /*
  32. * The regset get() functions are invoked from:
  33. *
  34. * - coredump to dump the current task's fpstate. If the current task
  35. * owns the FPU then the memory state has to be synchronized and the
  36. * FPU register state preserved. Otherwise fpstate is already in sync.
  37. *
  38. * - ptrace to dump fpstate of a stopped task, in which case the registers
  39. * have already been saved to fpstate on context switch.
  40. */
  41. static void sync_fpstate(struct fpu *fpu)
  42. {
  43. if (fpu == &current->thread.fpu)
  44. fpu_sync_fpstate(fpu);
  45. }
  46. /*
  47. * Invalidate cached FPU registers before modifying the stopped target
  48. * task's fpstate.
  49. *
  50. * This forces the target task on resume to restore the FPU registers from
  51. * modified fpstate. Otherwise the task might skip the restore and operate
  52. * with the cached FPU registers which discards the modifications.
  53. */
  54. static void fpu_force_restore(struct fpu *fpu)
  55. {
  56. /*
  57. * Only stopped child tasks can be used to modify the FPU
  58. * state in the fpstate buffer:
  59. */
  60. WARN_ON_FPU(fpu == &current->thread.fpu);
  61. __fpu_invalidate_fpregs_state(fpu);
  62. }
  63. int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
  64. struct membuf to)
  65. {
  66. struct fpu *fpu = &target->thread.fpu;
  67. if (!cpu_feature_enabled(X86_FEATURE_FXSR))
  68. return -ENODEV;
  69. sync_fpstate(fpu);
  70. if (!use_xsave()) {
  71. return membuf_write(&to, &fpu->fpstate->regs.fxsave,
  72. sizeof(fpu->fpstate->regs.fxsave));
  73. }
  74. copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_FX);
  75. return 0;
  76. }
  77. int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
  78. unsigned int pos, unsigned int count,
  79. const void *kbuf, const void __user *ubuf)
  80. {
  81. struct fpu *fpu = &target->thread.fpu;
  82. struct fxregs_state newstate;
  83. int ret;
  84. if (!cpu_feature_enabled(X86_FEATURE_FXSR))
  85. return -ENODEV;
  86. /* No funny business with partial or oversized writes is permitted. */
  87. if (pos != 0 || count != sizeof(newstate))
  88. return -EINVAL;
  89. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
  90. if (ret)
  91. return ret;
  92. /* Do not allow an invalid MXCSR value. */
  93. if (newstate.mxcsr & ~mxcsr_feature_mask)
  94. return -EINVAL;
  95. fpu_force_restore(fpu);
  96. /* Copy the state */
  97. memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
  98. /* Clear xmm8..15 for 32-bit callers */
  99. BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
  100. if (in_ia32_syscall())
  101. memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
  102. /* Mark FP and SSE as in use when XSAVE is enabled */
  103. if (use_xsave())
  104. fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
  105. return 0;
  106. }
  107. int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
  108. struct membuf to)
  109. {
  110. if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
  111. return -ENODEV;
  112. sync_fpstate(&target->thread.fpu);
  113. copy_xstate_to_uabi_buf(to, target, XSTATE_COPY_XSAVE);
  114. return 0;
  115. }
  116. int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
  117. unsigned int pos, unsigned int count,
  118. const void *kbuf, const void __user *ubuf)
  119. {
  120. struct fpu *fpu = &target->thread.fpu;
  121. struct xregs_state *tmpbuf = NULL;
  122. int ret;
  123. if (!cpu_feature_enabled(X86_FEATURE_XSAVE))
  124. return -ENODEV;
  125. /*
  126. * A whole standard-format XSAVE buffer is needed:
  127. */
  128. if (pos != 0 || count != fpu_user_cfg.max_size)
  129. return -EFAULT;
  130. if (!kbuf) {
  131. tmpbuf = vmalloc(count);
  132. if (!tmpbuf)
  133. return -ENOMEM;
  134. if (copy_from_user(tmpbuf, ubuf, count)) {
  135. ret = -EFAULT;
  136. goto out;
  137. }
  138. }
  139. fpu_force_restore(fpu);
  140. ret = copy_uabi_from_kernel_to_xstate(fpu->fpstate, kbuf ?: tmpbuf, &target->thread.pkru);
  141. out:
  142. vfree(tmpbuf);
  143. return ret;
  144. }
  145. #ifdef CONFIG_X86_USER_SHADOW_STACK
  146. int ssp_active(struct task_struct *target, const struct user_regset *regset)
  147. {
  148. if (target->thread.features & ARCH_SHSTK_SHSTK)
  149. return regset->n;
  150. return 0;
  151. }
  152. int ssp_get(struct task_struct *target, const struct user_regset *regset,
  153. struct membuf to)
  154. {
  155. struct fpu *fpu = &target->thread.fpu;
  156. struct cet_user_state *cetregs;
  157. if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
  158. !ssp_active(target, regset))
  159. return -ENODEV;
  160. sync_fpstate(fpu);
  161. cetregs = get_xsave_addr(&fpu->fpstate->regs.xsave, XFEATURE_CET_USER);
  162. if (WARN_ON(!cetregs)) {
  163. /*
  164. * This shouldn't ever be NULL because shadow stack was
  165. * verified to be enabled above. This means
  166. * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so
  167. * XFEATURE_CET_USER should not be in the init state.
  168. */
  169. return -ENODEV;
  170. }
  171. return membuf_write(&to, (unsigned long *)&cetregs->user_ssp,
  172. sizeof(cetregs->user_ssp));
  173. }
  174. int ssp_set(struct task_struct *target, const struct user_regset *regset,
  175. unsigned int pos, unsigned int count,
  176. const void *kbuf, const void __user *ubuf)
  177. {
  178. struct fpu *fpu = &target->thread.fpu;
  179. struct xregs_state *xsave = &fpu->fpstate->regs.xsave;
  180. struct cet_user_state *cetregs;
  181. unsigned long user_ssp;
  182. int r;
  183. if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
  184. !ssp_active(target, regset))
  185. return -ENODEV;
  186. if (pos != 0 || count != sizeof(user_ssp))
  187. return -EINVAL;
  188. r = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_ssp, 0, -1);
  189. if (r)
  190. return r;
  191. /*
  192. * Some kernel instructions (IRET, etc) can cause exceptions in the case
  193. * of disallowed CET register values. Just prevent invalid values.
  194. */
  195. if (user_ssp >= TASK_SIZE_MAX || !IS_ALIGNED(user_ssp, 8))
  196. return -EINVAL;
  197. fpu_force_restore(fpu);
  198. cetregs = get_xsave_addr(xsave, XFEATURE_CET_USER);
  199. if (WARN_ON(!cetregs)) {
  200. /*
  201. * This shouldn't ever be NULL because shadow stack was
  202. * verified to be enabled above. This means
  203. * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so
  204. * XFEATURE_CET_USER should not be in the init state.
  205. */
  206. return -ENODEV;
  207. }
  208. cetregs->user_ssp = user_ssp;
  209. return 0;
  210. }
  211. #endif /* CONFIG_X86_USER_SHADOW_STACK */
  212. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  213. /*
  214. * FPU tag word conversions.
  215. */
  216. static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
  217. {
  218. unsigned int tmp; /* to avoid 16 bit prefixes in the code */
  219. /* Transform each pair of bits into 01 (valid) or 00 (empty) */
  220. tmp = ~twd;
  221. tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
  222. /* and move the valid bits to the lower byte. */
  223. tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
  224. tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
  225. tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
  226. return tmp;
  227. }
  228. #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
  229. #define FP_EXP_TAG_VALID 0
  230. #define FP_EXP_TAG_ZERO 1
  231. #define FP_EXP_TAG_SPECIAL 2
  232. #define FP_EXP_TAG_EMPTY 3
  233. static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
  234. {
  235. struct _fpxreg *st;
  236. u32 tos = (fxsave->swd >> 11) & 7;
  237. u32 twd = (unsigned long) fxsave->twd;
  238. u32 tag;
  239. u32 ret = 0xffff0000u;
  240. int i;
  241. for (i = 0; i < 8; i++, twd >>= 1) {
  242. if (twd & 0x1) {
  243. st = FPREG_ADDR(fxsave, (i - tos) & 7);
  244. switch (st->exponent & 0x7fff) {
  245. case 0x7fff:
  246. tag = FP_EXP_TAG_SPECIAL;
  247. break;
  248. case 0x0000:
  249. if (!st->significand[0] &&
  250. !st->significand[1] &&
  251. !st->significand[2] &&
  252. !st->significand[3])
  253. tag = FP_EXP_TAG_ZERO;
  254. else
  255. tag = FP_EXP_TAG_SPECIAL;
  256. break;
  257. default:
  258. if (st->significand[3] & 0x8000)
  259. tag = FP_EXP_TAG_VALID;
  260. else
  261. tag = FP_EXP_TAG_SPECIAL;
  262. break;
  263. }
  264. } else {
  265. tag = FP_EXP_TAG_EMPTY;
  266. }
  267. ret |= tag << (2 * i);
  268. }
  269. return ret;
  270. }
  271. /*
  272. * FXSR floating point environment conversions.
  273. */
  274. static void __convert_from_fxsr(struct user_i387_ia32_struct *env,
  275. struct task_struct *tsk,
  276. struct fxregs_state *fxsave)
  277. {
  278. struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
  279. struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
  280. int i;
  281. env->cwd = fxsave->cwd | 0xffff0000u;
  282. env->swd = fxsave->swd | 0xffff0000u;
  283. env->twd = twd_fxsr_to_i387(fxsave);
  284. #ifdef CONFIG_X86_64
  285. env->fip = fxsave->rip;
  286. env->foo = fxsave->rdp;
  287. /*
  288. * should be actually ds/cs at fpu exception time, but
  289. * that information is not available in 64bit mode.
  290. */
  291. env->fcs = task_pt_regs(tsk)->cs;
  292. if (tsk == current) {
  293. savesegment(ds, env->fos);
  294. } else {
  295. env->fos = tsk->thread.ds;
  296. }
  297. env->fos |= 0xffff0000;
  298. #else
  299. env->fip = fxsave->fip;
  300. env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
  301. env->foo = fxsave->foo;
  302. env->fos = fxsave->fos;
  303. #endif
  304. for (i = 0; i < 8; ++i)
  305. memcpy(&to[i], &from[i], sizeof(to[0]));
  306. }
  307. void
  308. convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
  309. {
  310. __convert_from_fxsr(env, tsk, &tsk->thread.fpu.fpstate->regs.fxsave);
  311. }
  312. void convert_to_fxsr(struct fxregs_state *fxsave,
  313. const struct user_i387_ia32_struct *env)
  314. {
  315. struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
  316. struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
  317. int i;
  318. fxsave->cwd = env->cwd;
  319. fxsave->swd = env->swd;
  320. fxsave->twd = twd_i387_to_fxsr(env->twd);
  321. fxsave->fop = (u16) ((u32) env->fcs >> 16);
  322. #ifdef CONFIG_X86_64
  323. fxsave->rip = env->fip;
  324. fxsave->rdp = env->foo;
  325. /* cs and ds ignored */
  326. #else
  327. fxsave->fip = env->fip;
  328. fxsave->fcs = (env->fcs & 0xffff);
  329. fxsave->foo = env->foo;
  330. fxsave->fos = env->fos;
  331. #endif
  332. for (i = 0; i < 8; ++i)
  333. memcpy(&to[i], &from[i], sizeof(from[0]));
  334. }
  335. int fpregs_get(struct task_struct *target, const struct user_regset *regset,
  336. struct membuf to)
  337. {
  338. struct fpu *fpu = &target->thread.fpu;
  339. struct user_i387_ia32_struct env;
  340. struct fxregs_state fxsave, *fx;
  341. sync_fpstate(fpu);
  342. if (!cpu_feature_enabled(X86_FEATURE_FPU))
  343. return fpregs_soft_get(target, regset, to);
  344. if (!cpu_feature_enabled(X86_FEATURE_FXSR)) {
  345. return membuf_write(&to, &fpu->fpstate->regs.fsave,
  346. sizeof(struct fregs_state));
  347. }
  348. if (use_xsave()) {
  349. struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) };
  350. /* Handle init state optimized xstate correctly */
  351. copy_xstate_to_uabi_buf(mb, target, XSTATE_COPY_FP);
  352. fx = &fxsave;
  353. } else {
  354. fx = &fpu->fpstate->regs.fxsave;
  355. }
  356. __convert_from_fxsr(&env, target, fx);
  357. return membuf_write(&to, &env, sizeof(env));
  358. }
  359. int fpregs_set(struct task_struct *target, const struct user_regset *regset,
  360. unsigned int pos, unsigned int count,
  361. const void *kbuf, const void __user *ubuf)
  362. {
  363. struct fpu *fpu = &target->thread.fpu;
  364. struct user_i387_ia32_struct env;
  365. int ret;
  366. /* No funny business with partial or oversized writes is permitted. */
  367. if (pos != 0 || count != sizeof(struct user_i387_ia32_struct))
  368. return -EINVAL;
  369. if (!cpu_feature_enabled(X86_FEATURE_FPU))
  370. return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
  371. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
  372. if (ret)
  373. return ret;
  374. fpu_force_restore(fpu);
  375. if (cpu_feature_enabled(X86_FEATURE_FXSR))
  376. convert_to_fxsr(&fpu->fpstate->regs.fxsave, &env);
  377. else
  378. memcpy(&fpu->fpstate->regs.fsave, &env, sizeof(env));
  379. /*
  380. * Update the header bit in the xsave header, indicating the
  381. * presence of FP.
  382. */
  383. if (cpu_feature_enabled(X86_FEATURE_XSAVE))
  384. fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FP;
  385. return 0;
  386. }
  387. #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */