ptrace.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1992 Ross Biro
  7. * Copyright (C) Linus Torvalds
  8. * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  9. * Copyright (C) 1996 David S. Miller
  10. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11. * Copyright (C) 1999 MIPS Technologies, Inc.
  12. * Copyright (C) 2000 Ulf Carlsson
  13. *
  14. * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15. * binaries.
  16. */
  17. #include <linux/compiler.h>
  18. #include <linux/context_tracking.h>
  19. #include <linux/elf.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/sched/task_stack.h>
  23. #include <linux/mm.h>
  24. #include <linux/errno.h>
  25. #include <linux/ptrace.h>
  26. #include <linux/regset.h>
  27. #include <linux/smp.h>
  28. #include <linux/security.h>
  29. #include <linux/stddef.h>
  30. #include <linux/audit.h>
  31. #include <linux/seccomp.h>
  32. #include <linux/ftrace.h>
  33. #include <asm/branch.h>
  34. #include <asm/byteorder.h>
  35. #include <asm/cpu.h>
  36. #include <asm/cpu-info.h>
  37. #include <asm/dsp.h>
  38. #include <asm/fpu.h>
  39. #include <asm/mipsregs.h>
  40. #include <asm/mipsmtregs.h>
  41. #include <asm/page.h>
  42. #include <asm/processor.h>
  43. #include <asm/syscall.h>
  44. #include <linux/uaccess.h>
  45. #include <asm/bootinfo.h>
  46. #include <asm/reg.h>
  47. #define CREATE_TRACE_POINTS
  48. #include <trace/events/syscalls.h>
  49. unsigned long exception_ip(struct pt_regs *regs)
  50. {
  51. return exception_epc(regs);
  52. }
  53. EXPORT_SYMBOL(exception_ip);
  54. /*
  55. * Called by kernel/ptrace.c when detaching..
  56. *
  57. * Make sure single step bits etc are not set.
  58. */
  59. void ptrace_disable(struct task_struct *child)
  60. {
  61. /* Don't load the watchpoint registers for the ex-child. */
  62. clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  63. }
  64. /*
  65. * Read a general register set. We always use the 64-bit format, even
  66. * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  67. * Registers are sign extended to fill the available space.
  68. */
  69. int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  70. {
  71. struct pt_regs *regs;
  72. int i;
  73. if (!access_ok(data, 38 * 8))
  74. return -EIO;
  75. regs = task_pt_regs(child);
  76. for (i = 0; i < 32; i++)
  77. __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  78. __put_user((long)regs->lo, (__s64 __user *)&data->lo);
  79. __put_user((long)regs->hi, (__s64 __user *)&data->hi);
  80. __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  81. __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  82. __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  83. __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  84. return 0;
  85. }
  86. /*
  87. * Write a general register set. As for PTRACE_GETREGS, we always use
  88. * the 64-bit format. On a 32-bit kernel only the lower order half
  89. * (according to endianness) will be used.
  90. */
  91. int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  92. {
  93. struct pt_regs *regs;
  94. int i;
  95. if (!access_ok(data, 38 * 8))
  96. return -EIO;
  97. regs = task_pt_regs(child);
  98. for (i = 0; i < 32; i++)
  99. __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
  100. __get_user(regs->lo, (__s64 __user *)&data->lo);
  101. __get_user(regs->hi, (__s64 __user *)&data->hi);
  102. __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  103. /* badvaddr, status, and cause may not be written. */
  104. /* System call number may have been changed */
  105. mips_syscall_update_nr(child, regs);
  106. return 0;
  107. }
  108. int ptrace_get_watch_regs(struct task_struct *child,
  109. struct pt_watch_regs __user *addr)
  110. {
  111. enum pt_watch_style style;
  112. int i;
  113. if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
  114. return -EIO;
  115. if (!access_ok(addr, sizeof(struct pt_watch_regs)))
  116. return -EIO;
  117. #ifdef CONFIG_32BIT
  118. style = pt_watch_style_mips32;
  119. #define WATCH_STYLE mips32
  120. #else
  121. style = pt_watch_style_mips64;
  122. #define WATCH_STYLE mips64
  123. #endif
  124. __put_user(style, &addr->style);
  125. __put_user(boot_cpu_data.watch_reg_use_cnt,
  126. &addr->WATCH_STYLE.num_valid);
  127. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  128. __put_user(child->thread.watch.mips3264.watchlo[i],
  129. &addr->WATCH_STYLE.watchlo[i]);
  130. __put_user(child->thread.watch.mips3264.watchhi[i] &
  131. (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
  132. &addr->WATCH_STYLE.watchhi[i]);
  133. __put_user(boot_cpu_data.watch_reg_masks[i],
  134. &addr->WATCH_STYLE.watch_masks[i]);
  135. }
  136. for (; i < 8; i++) {
  137. __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
  138. __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
  139. __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
  140. }
  141. return 0;
  142. }
  143. int ptrace_set_watch_regs(struct task_struct *child,
  144. struct pt_watch_regs __user *addr)
  145. {
  146. int i;
  147. int watch_active = 0;
  148. unsigned long lt[NUM_WATCH_REGS];
  149. u16 ht[NUM_WATCH_REGS];
  150. if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
  151. return -EIO;
  152. if (!access_ok(addr, sizeof(struct pt_watch_regs)))
  153. return -EIO;
  154. /* Check the values. */
  155. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  156. __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
  157. #ifdef CONFIG_32BIT
  158. if (lt[i] & __UA_LIMIT)
  159. return -EINVAL;
  160. #else
  161. if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
  162. if (lt[i] & 0xffffffff80000000UL)
  163. return -EINVAL;
  164. } else {
  165. if (lt[i] & __UA_LIMIT)
  166. return -EINVAL;
  167. }
  168. #endif
  169. __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
  170. if (ht[i] & ~MIPS_WATCHHI_MASK)
  171. return -EINVAL;
  172. }
  173. /* Install them. */
  174. for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
  175. if (lt[i] & MIPS_WATCHLO_IRW)
  176. watch_active = 1;
  177. child->thread.watch.mips3264.watchlo[i] = lt[i];
  178. /* Set the G bit. */
  179. child->thread.watch.mips3264.watchhi[i] = ht[i];
  180. }
  181. if (watch_active)
  182. set_tsk_thread_flag(child, TIF_LOAD_WATCH);
  183. else
  184. clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  185. return 0;
  186. }
  187. /* regset get/set implementations */
  188. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  189. static int gpr32_get(struct task_struct *target,
  190. const struct user_regset *regset,
  191. struct membuf to)
  192. {
  193. struct pt_regs *regs = task_pt_regs(target);
  194. u32 uregs[ELF_NGREG] = {};
  195. mips_dump_regs32(uregs, regs);
  196. return membuf_write(&to, uregs, sizeof(uregs));
  197. }
  198. static int gpr32_set(struct task_struct *target,
  199. const struct user_regset *regset,
  200. unsigned int pos, unsigned int count,
  201. const void *kbuf, const void __user *ubuf)
  202. {
  203. struct pt_regs *regs = task_pt_regs(target);
  204. u32 uregs[ELF_NGREG];
  205. unsigned start, num_regs, i;
  206. int err;
  207. start = pos / sizeof(u32);
  208. num_regs = count / sizeof(u32);
  209. if (start + num_regs > ELF_NGREG)
  210. return -EIO;
  211. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  212. sizeof(uregs));
  213. if (err)
  214. return err;
  215. for (i = start; i < num_regs; i++) {
  216. /*
  217. * Cast all values to signed here so that if this is a 64-bit
  218. * kernel, the supplied 32-bit values will be sign extended.
  219. */
  220. switch (i) {
  221. case MIPS32_EF_R1 ... MIPS32_EF_R25:
  222. /* k0/k1 are ignored. */
  223. case MIPS32_EF_R28 ... MIPS32_EF_R31:
  224. regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
  225. break;
  226. case MIPS32_EF_LO:
  227. regs->lo = (s32)uregs[i];
  228. break;
  229. case MIPS32_EF_HI:
  230. regs->hi = (s32)uregs[i];
  231. break;
  232. case MIPS32_EF_CP0_EPC:
  233. regs->cp0_epc = (s32)uregs[i];
  234. break;
  235. }
  236. }
  237. /* System call number may have been changed */
  238. mips_syscall_update_nr(target, regs);
  239. return 0;
  240. }
  241. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  242. #ifdef CONFIG_64BIT
  243. static int gpr64_get(struct task_struct *target,
  244. const struct user_regset *regset,
  245. struct membuf to)
  246. {
  247. struct pt_regs *regs = task_pt_regs(target);
  248. u64 uregs[ELF_NGREG] = {};
  249. mips_dump_regs64(uregs, regs);
  250. return membuf_write(&to, uregs, sizeof(uregs));
  251. }
  252. static int gpr64_set(struct task_struct *target,
  253. const struct user_regset *regset,
  254. unsigned int pos, unsigned int count,
  255. const void *kbuf, const void __user *ubuf)
  256. {
  257. struct pt_regs *regs = task_pt_regs(target);
  258. u64 uregs[ELF_NGREG];
  259. unsigned start, num_regs, i;
  260. int err;
  261. start = pos / sizeof(u64);
  262. num_regs = count / sizeof(u64);
  263. if (start + num_regs > ELF_NGREG)
  264. return -EIO;
  265. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
  266. sizeof(uregs));
  267. if (err)
  268. return err;
  269. for (i = start; i < num_regs; i++) {
  270. switch (i) {
  271. case MIPS64_EF_R1 ... MIPS64_EF_R25:
  272. /* k0/k1 are ignored. */
  273. case MIPS64_EF_R28 ... MIPS64_EF_R31:
  274. regs->regs[i - MIPS64_EF_R0] = uregs[i];
  275. break;
  276. case MIPS64_EF_LO:
  277. regs->lo = uregs[i];
  278. break;
  279. case MIPS64_EF_HI:
  280. regs->hi = uregs[i];
  281. break;
  282. case MIPS64_EF_CP0_EPC:
  283. regs->cp0_epc = uregs[i];
  284. break;
  285. }
  286. }
  287. /* System call number may have been changed */
  288. mips_syscall_update_nr(target, regs);
  289. return 0;
  290. }
  291. #endif /* CONFIG_64BIT */
  292. #ifdef CONFIG_MIPS_FP_SUPPORT
  293. /*
  294. * Poke at FCSR according to its mask. Set the Cause bits even
  295. * if a corresponding Enable bit is set. This will be noticed at
  296. * the time the thread is switched to and SIGFPE thrown accordingly.
  297. */
  298. static void ptrace_setfcr31(struct task_struct *child, u32 value)
  299. {
  300. u32 fcr31;
  301. u32 mask;
  302. fcr31 = child->thread.fpu.fcr31;
  303. mask = boot_cpu_data.fpu_msk31;
  304. child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
  305. }
  306. int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
  307. {
  308. int i;
  309. if (!access_ok(data, 33 * 8))
  310. return -EIO;
  311. if (tsk_used_math(child)) {
  312. union fpureg *fregs = get_fpu_regs(child);
  313. for (i = 0; i < 32; i++)
  314. __put_user(get_fpr64(&fregs[i], 0),
  315. i + (__u64 __user *)data);
  316. } else {
  317. for (i = 0; i < 32; i++)
  318. __put_user((__u64) -1, i + (__u64 __user *) data);
  319. }
  320. __put_user(child->thread.fpu.fcr31, data + 64);
  321. __put_user(boot_cpu_data.fpu_id, data + 65);
  322. return 0;
  323. }
  324. int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
  325. {
  326. union fpureg *fregs;
  327. u64 fpr_val;
  328. u32 value;
  329. int i;
  330. if (!access_ok(data, 33 * 8))
  331. return -EIO;
  332. init_fp_ctx(child);
  333. fregs = get_fpu_regs(child);
  334. for (i = 0; i < 32; i++) {
  335. __get_user(fpr_val, i + (__u64 __user *)data);
  336. set_fpr64(&fregs[i], 0, fpr_val);
  337. }
  338. __get_user(value, data + 64);
  339. ptrace_setfcr31(child, value);
  340. /* FIR may not be written. */
  341. return 0;
  342. }
  343. /*
  344. * Copy the floating-point context to the supplied NT_PRFPREG buffer,
  345. * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
  346. * correspond 1:1 to buffer slots. Only general registers are copied.
  347. */
  348. static void fpr_get_fpa(struct task_struct *target,
  349. struct membuf *to)
  350. {
  351. membuf_write(to, &target->thread.fpu,
  352. NUM_FPU_REGS * sizeof(elf_fpreg_t));
  353. }
  354. /*
  355. * Copy the floating-point context to the supplied NT_PRFPREG buffer,
  356. * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
  357. * general register slots are copied to buffer slots. Only general
  358. * registers are copied.
  359. */
  360. static void fpr_get_msa(struct task_struct *target, struct membuf *to)
  361. {
  362. unsigned int i;
  363. BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
  364. for (i = 0; i < NUM_FPU_REGS; i++)
  365. membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
  366. }
  367. /*
  368. * Copy the floating-point context to the supplied NT_PRFPREG buffer.
  369. * Choose the appropriate helper for general registers, and then copy
  370. * the FCSR and FIR registers separately.
  371. */
  372. static int fpr_get(struct task_struct *target,
  373. const struct user_regset *regset,
  374. struct membuf to)
  375. {
  376. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  377. fpr_get_fpa(target, &to);
  378. else
  379. fpr_get_msa(target, &to);
  380. membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
  381. membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
  382. return 0;
  383. }
  384. /*
  385. * Copy the supplied NT_PRFPREG buffer to the floating-point context,
  386. * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
  387. * context's general register slots. Only general registers are copied.
  388. */
  389. static int fpr_set_fpa(struct task_struct *target,
  390. unsigned int *pos, unsigned int *count,
  391. const void **kbuf, const void __user **ubuf)
  392. {
  393. return user_regset_copyin(pos, count, kbuf, ubuf,
  394. &target->thread.fpu,
  395. 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
  396. }
  397. /*
  398. * Copy the supplied NT_PRFPREG buffer to the floating-point context,
  399. * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
  400. * bits only of FP context's general register slots. Only general
  401. * registers are copied.
  402. */
  403. static int fpr_set_msa(struct task_struct *target,
  404. unsigned int *pos, unsigned int *count,
  405. const void **kbuf, const void __user **ubuf)
  406. {
  407. unsigned int i;
  408. u64 fpr_val;
  409. int err;
  410. BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
  411. for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
  412. err = user_regset_copyin(pos, count, kbuf, ubuf,
  413. &fpr_val, i * sizeof(elf_fpreg_t),
  414. (i + 1) * sizeof(elf_fpreg_t));
  415. if (err)
  416. return err;
  417. set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
  418. }
  419. return 0;
  420. }
  421. /*
  422. * Copy the supplied NT_PRFPREG buffer to the floating-point context.
  423. * Choose the appropriate helper for general registers, and then copy
  424. * the FCSR register separately. Ignore the incoming FIR register
  425. * contents though, as the register is read-only.
  426. *
  427. * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
  428. * which is supposed to have been guaranteed by the kernel before
  429. * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
  430. * so that we can safely avoid preinitializing temporaries for
  431. * partial register writes.
  432. */
  433. static int fpr_set(struct task_struct *target,
  434. const struct user_regset *regset,
  435. unsigned int pos, unsigned int count,
  436. const void *kbuf, const void __user *ubuf)
  437. {
  438. const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
  439. const int fir_pos = fcr31_pos + sizeof(u32);
  440. u32 fcr31;
  441. int err;
  442. BUG_ON(count % sizeof(elf_fpreg_t));
  443. if (pos + count > sizeof(elf_fpregset_t))
  444. return -EIO;
  445. init_fp_ctx(target);
  446. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  447. err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
  448. else
  449. err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
  450. if (err)
  451. return err;
  452. if (count > 0) {
  453. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  454. &fcr31,
  455. fcr31_pos, fcr31_pos + sizeof(u32));
  456. if (err)
  457. return err;
  458. ptrace_setfcr31(target, fcr31);
  459. }
  460. if (count > 0) {
  461. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  462. fir_pos, fir_pos + sizeof(u32));
  463. return 0;
  464. }
  465. return err;
  466. }
  467. /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
  468. static int fp_mode_get(struct task_struct *target,
  469. const struct user_regset *regset,
  470. struct membuf to)
  471. {
  472. return membuf_store(&to, (int)mips_get_process_fp_mode(target));
  473. }
  474. /*
  475. * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
  476. *
  477. * We optimize for the case where `count % sizeof(int) == 0', which
  478. * is supposed to have been guaranteed by the kernel before calling
  479. * us, e.g. in `ptrace_regset'. We enforce that requirement, so
  480. * that we can safely avoid preinitializing temporaries for partial
  481. * mode writes.
  482. */
  483. static int fp_mode_set(struct task_struct *target,
  484. const struct user_regset *regset,
  485. unsigned int pos, unsigned int count,
  486. const void *kbuf, const void __user *ubuf)
  487. {
  488. int fp_mode;
  489. int err;
  490. BUG_ON(count % sizeof(int));
  491. if (pos + count > sizeof(fp_mode))
  492. return -EIO;
  493. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
  494. sizeof(fp_mode));
  495. if (err)
  496. return err;
  497. if (count > 0)
  498. err = mips_set_process_fp_mode(target, fp_mode);
  499. return err;
  500. }
  501. #endif /* CONFIG_MIPS_FP_SUPPORT */
  502. #ifdef CONFIG_CPU_HAS_MSA
  503. struct msa_control_regs {
  504. unsigned int fir;
  505. unsigned int fcsr;
  506. unsigned int msair;
  507. unsigned int msacsr;
  508. };
  509. static void copy_pad_fprs(struct task_struct *target,
  510. const struct user_regset *regset,
  511. struct membuf *to,
  512. unsigned int live_sz)
  513. {
  514. int i, j;
  515. unsigned long long fill = ~0ull;
  516. unsigned int cp_sz, pad_sz;
  517. cp_sz = min(regset->size, live_sz);
  518. pad_sz = regset->size - cp_sz;
  519. WARN_ON(pad_sz % sizeof(fill));
  520. for (i = 0; i < NUM_FPU_REGS; i++) {
  521. membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
  522. for (j = 0; j < (pad_sz / sizeof(fill)); j++)
  523. membuf_store(to, fill);
  524. }
  525. }
  526. static int msa_get(struct task_struct *target,
  527. const struct user_regset *regset,
  528. struct membuf to)
  529. {
  530. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  531. const struct msa_control_regs ctrl_regs = {
  532. .fir = boot_cpu_data.fpu_id,
  533. .fcsr = target->thread.fpu.fcr31,
  534. .msair = boot_cpu_data.msa_id,
  535. .msacsr = target->thread.fpu.msacsr,
  536. };
  537. if (!tsk_used_math(target)) {
  538. /* The task hasn't used FP or MSA, fill with 0xff */
  539. copy_pad_fprs(target, regset, &to, 0);
  540. } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
  541. /* Copy scalar FP context, fill the rest with 0xff */
  542. copy_pad_fprs(target, regset, &to, 8);
  543. } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  544. /* Trivially copy the vector registers */
  545. membuf_write(&to, &target->thread.fpu.fpr, wr_size);
  546. } else {
  547. /* Copy as much context as possible, fill the rest with 0xff */
  548. copy_pad_fprs(target, regset, &to,
  549. sizeof(target->thread.fpu.fpr[0]));
  550. }
  551. return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
  552. }
  553. static int msa_set(struct task_struct *target,
  554. const struct user_regset *regset,
  555. unsigned int pos, unsigned int count,
  556. const void *kbuf, const void __user *ubuf)
  557. {
  558. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  559. struct msa_control_regs ctrl_regs;
  560. unsigned int cp_sz;
  561. int i, err, start;
  562. init_fp_ctx(target);
  563. if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  564. /* Trivially copy the vector registers */
  565. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  566. &target->thread.fpu.fpr,
  567. 0, wr_size);
  568. } else {
  569. /* Copy as much context as possible */
  570. cp_sz = min_t(unsigned int, regset->size,
  571. sizeof(target->thread.fpu.fpr[0]));
  572. i = start = err = 0;
  573. for (; i < NUM_FPU_REGS; i++, start += regset->size) {
  574. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  575. &target->thread.fpu.fpr[i],
  576. start, start + cp_sz);
  577. }
  578. }
  579. if (!err)
  580. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
  581. wr_size, wr_size + sizeof(ctrl_regs));
  582. if (!err) {
  583. target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
  584. target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
  585. }
  586. return err;
  587. }
  588. #endif /* CONFIG_CPU_HAS_MSA */
  589. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  590. /*
  591. * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
  592. */
  593. static int dsp32_get(struct task_struct *target,
  594. const struct user_regset *regset,
  595. struct membuf to)
  596. {
  597. u32 dspregs[NUM_DSP_REGS + 1];
  598. unsigned int i;
  599. BUG_ON(to.left % sizeof(u32));
  600. if (!cpu_has_dsp)
  601. return -EIO;
  602. for (i = 0; i < NUM_DSP_REGS; i++)
  603. dspregs[i] = target->thread.dsp.dspr[i];
  604. dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
  605. return membuf_write(&to, dspregs, sizeof(dspregs));
  606. }
  607. /*
  608. * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
  609. */
  610. static int dsp32_set(struct task_struct *target,
  611. const struct user_regset *regset,
  612. unsigned int pos, unsigned int count,
  613. const void *kbuf, const void __user *ubuf)
  614. {
  615. unsigned int start, num_regs, i;
  616. u32 dspregs[NUM_DSP_REGS + 1];
  617. int err;
  618. BUG_ON(count % sizeof(u32));
  619. if (!cpu_has_dsp)
  620. return -EIO;
  621. start = pos / sizeof(u32);
  622. num_regs = count / sizeof(u32);
  623. if (start + num_regs > NUM_DSP_REGS + 1)
  624. return -EIO;
  625. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
  626. sizeof(dspregs));
  627. if (err)
  628. return err;
  629. for (i = start; i < num_regs; i++)
  630. switch (i) {
  631. case 0 ... NUM_DSP_REGS - 1:
  632. target->thread.dsp.dspr[i] = (s32)dspregs[i];
  633. break;
  634. case NUM_DSP_REGS:
  635. target->thread.dsp.dspcontrol = (s32)dspregs[i];
  636. break;
  637. }
  638. return 0;
  639. }
  640. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  641. #ifdef CONFIG_64BIT
  642. /*
  643. * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
  644. */
  645. static int dsp64_get(struct task_struct *target,
  646. const struct user_regset *regset,
  647. struct membuf to)
  648. {
  649. u64 dspregs[NUM_DSP_REGS + 1];
  650. unsigned int i;
  651. BUG_ON(to.left % sizeof(u64));
  652. if (!cpu_has_dsp)
  653. return -EIO;
  654. for (i = 0; i < NUM_DSP_REGS; i++)
  655. dspregs[i] = target->thread.dsp.dspr[i];
  656. dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
  657. return membuf_write(&to, dspregs, sizeof(dspregs));
  658. }
  659. /*
  660. * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
  661. */
  662. static int dsp64_set(struct task_struct *target,
  663. const struct user_regset *regset,
  664. unsigned int pos, unsigned int count,
  665. const void *kbuf, const void __user *ubuf)
  666. {
  667. unsigned int start, num_regs, i;
  668. u64 dspregs[NUM_DSP_REGS + 1];
  669. int err;
  670. BUG_ON(count % sizeof(u64));
  671. if (!cpu_has_dsp)
  672. return -EIO;
  673. start = pos / sizeof(u64);
  674. num_regs = count / sizeof(u64);
  675. if (start + num_regs > NUM_DSP_REGS + 1)
  676. return -EIO;
  677. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
  678. sizeof(dspregs));
  679. if (err)
  680. return err;
  681. for (i = start; i < num_regs; i++)
  682. switch (i) {
  683. case 0 ... NUM_DSP_REGS - 1:
  684. target->thread.dsp.dspr[i] = dspregs[i];
  685. break;
  686. case NUM_DSP_REGS:
  687. target->thread.dsp.dspcontrol = dspregs[i];
  688. break;
  689. }
  690. return 0;
  691. }
  692. #endif /* CONFIG_64BIT */
  693. /*
  694. * Determine whether the DSP context is present.
  695. */
  696. static int dsp_active(struct task_struct *target,
  697. const struct user_regset *regset)
  698. {
  699. return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
  700. }
  701. enum mips_regset {
  702. REGSET_GPR,
  703. REGSET_DSP,
  704. #ifdef CONFIG_MIPS_FP_SUPPORT
  705. REGSET_FPR,
  706. REGSET_FP_MODE,
  707. #endif
  708. #ifdef CONFIG_CPU_HAS_MSA
  709. REGSET_MSA,
  710. #endif
  711. };
  712. struct pt_regs_offset {
  713. const char *name;
  714. int offset;
  715. };
  716. #define REG_OFFSET_NAME(reg, r) { \
  717. .name = #reg, \
  718. .offset = offsetof(struct pt_regs, r) \
  719. }
  720. #define REG_OFFSET_END { \
  721. .name = NULL, \
  722. .offset = 0 \
  723. }
  724. static const struct pt_regs_offset regoffset_table[] = {
  725. REG_OFFSET_NAME(r0, regs[0]),
  726. REG_OFFSET_NAME(r1, regs[1]),
  727. REG_OFFSET_NAME(r2, regs[2]),
  728. REG_OFFSET_NAME(r3, regs[3]),
  729. REG_OFFSET_NAME(r4, regs[4]),
  730. REG_OFFSET_NAME(r5, regs[5]),
  731. REG_OFFSET_NAME(r6, regs[6]),
  732. REG_OFFSET_NAME(r7, regs[7]),
  733. REG_OFFSET_NAME(r8, regs[8]),
  734. REG_OFFSET_NAME(r9, regs[9]),
  735. REG_OFFSET_NAME(r10, regs[10]),
  736. REG_OFFSET_NAME(r11, regs[11]),
  737. REG_OFFSET_NAME(r12, regs[12]),
  738. REG_OFFSET_NAME(r13, regs[13]),
  739. REG_OFFSET_NAME(r14, regs[14]),
  740. REG_OFFSET_NAME(r15, regs[15]),
  741. REG_OFFSET_NAME(r16, regs[16]),
  742. REG_OFFSET_NAME(r17, regs[17]),
  743. REG_OFFSET_NAME(r18, regs[18]),
  744. REG_OFFSET_NAME(r19, regs[19]),
  745. REG_OFFSET_NAME(r20, regs[20]),
  746. REG_OFFSET_NAME(r21, regs[21]),
  747. REG_OFFSET_NAME(r22, regs[22]),
  748. REG_OFFSET_NAME(r23, regs[23]),
  749. REG_OFFSET_NAME(r24, regs[24]),
  750. REG_OFFSET_NAME(r25, regs[25]),
  751. REG_OFFSET_NAME(r26, regs[26]),
  752. REG_OFFSET_NAME(r27, regs[27]),
  753. REG_OFFSET_NAME(r28, regs[28]),
  754. REG_OFFSET_NAME(r29, regs[29]),
  755. REG_OFFSET_NAME(r30, regs[30]),
  756. REG_OFFSET_NAME(r31, regs[31]),
  757. REG_OFFSET_NAME(c0_status, cp0_status),
  758. REG_OFFSET_NAME(hi, hi),
  759. REG_OFFSET_NAME(lo, lo),
  760. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  761. REG_OFFSET_NAME(acx, acx),
  762. #endif
  763. REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
  764. REG_OFFSET_NAME(c0_cause, cp0_cause),
  765. REG_OFFSET_NAME(c0_epc, cp0_epc),
  766. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  767. REG_OFFSET_NAME(mpl0, mpl[0]),
  768. REG_OFFSET_NAME(mpl1, mpl[1]),
  769. REG_OFFSET_NAME(mpl2, mpl[2]),
  770. REG_OFFSET_NAME(mtp0, mtp[0]),
  771. REG_OFFSET_NAME(mtp1, mtp[1]),
  772. REG_OFFSET_NAME(mtp2, mtp[2]),
  773. #endif
  774. REG_OFFSET_END,
  775. };
  776. /**
  777. * regs_query_register_offset() - query register offset from its name
  778. * @name: the name of a register
  779. *
  780. * regs_query_register_offset() returns the offset of a register in struct
  781. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  782. */
  783. int regs_query_register_offset(const char *name)
  784. {
  785. const struct pt_regs_offset *roff;
  786. for (roff = regoffset_table; roff->name != NULL; roff++)
  787. if (!strcmp(roff->name, name))
  788. return roff->offset;
  789. return -EINVAL;
  790. }
  791. #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
  792. static const struct user_regset mips_regsets[] = {
  793. [REGSET_GPR] = {
  794. .core_note_type = NT_PRSTATUS,
  795. .n = ELF_NGREG,
  796. .size = sizeof(unsigned int),
  797. .align = sizeof(unsigned int),
  798. .regset_get = gpr32_get,
  799. .set = gpr32_set,
  800. },
  801. [REGSET_DSP] = {
  802. .core_note_type = NT_MIPS_DSP,
  803. .n = NUM_DSP_REGS + 1,
  804. .size = sizeof(u32),
  805. .align = sizeof(u32),
  806. .regset_get = dsp32_get,
  807. .set = dsp32_set,
  808. .active = dsp_active,
  809. },
  810. #ifdef CONFIG_MIPS_FP_SUPPORT
  811. [REGSET_FPR] = {
  812. .core_note_type = NT_PRFPREG,
  813. .n = ELF_NFPREG,
  814. .size = sizeof(elf_fpreg_t),
  815. .align = sizeof(elf_fpreg_t),
  816. .regset_get = fpr_get,
  817. .set = fpr_set,
  818. },
  819. [REGSET_FP_MODE] = {
  820. .core_note_type = NT_MIPS_FP_MODE,
  821. .n = 1,
  822. .size = sizeof(int),
  823. .align = sizeof(int),
  824. .regset_get = fp_mode_get,
  825. .set = fp_mode_set,
  826. },
  827. #endif
  828. #ifdef CONFIG_CPU_HAS_MSA
  829. [REGSET_MSA] = {
  830. .core_note_type = NT_MIPS_MSA,
  831. .n = NUM_FPU_REGS + 1,
  832. .size = 16,
  833. .align = 16,
  834. .regset_get = msa_get,
  835. .set = msa_set,
  836. },
  837. #endif
  838. };
  839. static const struct user_regset_view user_mips_view = {
  840. .name = "mips",
  841. .e_machine = ELF_ARCH,
  842. .ei_osabi = ELF_OSABI,
  843. .regsets = mips_regsets,
  844. .n = ARRAY_SIZE(mips_regsets),
  845. };
  846. #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
  847. #ifdef CONFIG_64BIT
  848. static const struct user_regset mips64_regsets[] = {
  849. [REGSET_GPR] = {
  850. .core_note_type = NT_PRSTATUS,
  851. .n = ELF_NGREG,
  852. .size = sizeof(unsigned long),
  853. .align = sizeof(unsigned long),
  854. .regset_get = gpr64_get,
  855. .set = gpr64_set,
  856. },
  857. [REGSET_DSP] = {
  858. .core_note_type = NT_MIPS_DSP,
  859. .n = NUM_DSP_REGS + 1,
  860. .size = sizeof(u64),
  861. .align = sizeof(u64),
  862. .regset_get = dsp64_get,
  863. .set = dsp64_set,
  864. .active = dsp_active,
  865. },
  866. #ifdef CONFIG_MIPS_FP_SUPPORT
  867. [REGSET_FP_MODE] = {
  868. .core_note_type = NT_MIPS_FP_MODE,
  869. .n = 1,
  870. .size = sizeof(int),
  871. .align = sizeof(int),
  872. .regset_get = fp_mode_get,
  873. .set = fp_mode_set,
  874. },
  875. [REGSET_FPR] = {
  876. .core_note_type = NT_PRFPREG,
  877. .n = ELF_NFPREG,
  878. .size = sizeof(elf_fpreg_t),
  879. .align = sizeof(elf_fpreg_t),
  880. .regset_get = fpr_get,
  881. .set = fpr_set,
  882. },
  883. #endif
  884. #ifdef CONFIG_CPU_HAS_MSA
  885. [REGSET_MSA] = {
  886. .core_note_type = NT_MIPS_MSA,
  887. .n = NUM_FPU_REGS + 1,
  888. .size = 16,
  889. .align = 16,
  890. .regset_get = msa_get,
  891. .set = msa_set,
  892. },
  893. #endif
  894. };
  895. static const struct user_regset_view user_mips64_view = {
  896. .name = "mips64",
  897. .e_machine = ELF_ARCH,
  898. .ei_osabi = ELF_OSABI,
  899. .regsets = mips64_regsets,
  900. .n = ARRAY_SIZE(mips64_regsets),
  901. };
  902. #ifdef CONFIG_MIPS32_N32
  903. static const struct user_regset_view user_mipsn32_view = {
  904. .name = "mipsn32",
  905. .e_flags = EF_MIPS_ABI2,
  906. .e_machine = ELF_ARCH,
  907. .ei_osabi = ELF_OSABI,
  908. .regsets = mips64_regsets,
  909. .n = ARRAY_SIZE(mips64_regsets),
  910. };
  911. #endif /* CONFIG_MIPS32_N32 */
  912. #endif /* CONFIG_64BIT */
  913. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  914. {
  915. #ifdef CONFIG_32BIT
  916. return &user_mips_view;
  917. #else
  918. #ifdef CONFIG_MIPS32_O32
  919. if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
  920. return &user_mips_view;
  921. #endif
  922. #ifdef CONFIG_MIPS32_N32
  923. if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
  924. return &user_mipsn32_view;
  925. #endif
  926. return &user_mips64_view;
  927. #endif
  928. }
  929. long arch_ptrace(struct task_struct *child, long request,
  930. unsigned long addr, unsigned long data)
  931. {
  932. int ret;
  933. void __user *addrp = (void __user *) addr;
  934. void __user *datavp = (void __user *) data;
  935. unsigned long __user *datalp = (void __user *) data;
  936. switch (request) {
  937. /* when I and D space are separate, these will need to be fixed. */
  938. case PTRACE_PEEKTEXT: /* read word at location addr. */
  939. case PTRACE_PEEKDATA:
  940. ret = generic_ptrace_peekdata(child, addr, data);
  941. break;
  942. /* Read the word at location addr in the USER area. */
  943. case PTRACE_PEEKUSR: {
  944. struct pt_regs *regs;
  945. unsigned long tmp = 0;
  946. regs = task_pt_regs(child);
  947. ret = 0; /* Default return value. */
  948. switch (addr) {
  949. case 0 ... 31:
  950. tmp = regs->regs[addr];
  951. break;
  952. #ifdef CONFIG_MIPS_FP_SUPPORT
  953. case FPR_BASE ... FPR_BASE + 31: {
  954. union fpureg *fregs;
  955. if (!tsk_used_math(child)) {
  956. /* FP not yet used */
  957. tmp = -1;
  958. break;
  959. }
  960. fregs = get_fpu_regs(child);
  961. #ifdef CONFIG_32BIT
  962. if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
  963. /*
  964. * The odd registers are actually the high
  965. * order bits of the values stored in the even
  966. * registers.
  967. */
  968. tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
  969. addr & 1);
  970. break;
  971. }
  972. #endif
  973. tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
  974. break;
  975. }
  976. case FPC_CSR:
  977. tmp = child->thread.fpu.fcr31;
  978. break;
  979. case FPC_EIR:
  980. /* implementation / version register */
  981. tmp = boot_cpu_data.fpu_id;
  982. break;
  983. #endif
  984. case PC:
  985. tmp = regs->cp0_epc;
  986. break;
  987. case CAUSE:
  988. tmp = regs->cp0_cause;
  989. break;
  990. case BADVADDR:
  991. tmp = regs->cp0_badvaddr;
  992. break;
  993. case MMHI:
  994. tmp = regs->hi;
  995. break;
  996. case MMLO:
  997. tmp = regs->lo;
  998. break;
  999. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  1000. case ACX:
  1001. tmp = regs->acx;
  1002. break;
  1003. #endif
  1004. case DSP_BASE ... DSP_BASE + 5: {
  1005. dspreg_t *dregs;
  1006. if (!cpu_has_dsp) {
  1007. tmp = 0;
  1008. ret = -EIO;
  1009. goto out;
  1010. }
  1011. dregs = __get_dsp_regs(child);
  1012. tmp = dregs[addr - DSP_BASE];
  1013. break;
  1014. }
  1015. case DSP_CONTROL:
  1016. if (!cpu_has_dsp) {
  1017. tmp = 0;
  1018. ret = -EIO;
  1019. goto out;
  1020. }
  1021. tmp = child->thread.dsp.dspcontrol;
  1022. break;
  1023. default:
  1024. tmp = 0;
  1025. ret = -EIO;
  1026. goto out;
  1027. }
  1028. ret = put_user(tmp, datalp);
  1029. break;
  1030. }
  1031. /* when I and D space are separate, this will have to be fixed. */
  1032. case PTRACE_POKETEXT: /* write the word at location addr. */
  1033. case PTRACE_POKEDATA:
  1034. ret = generic_ptrace_pokedata(child, addr, data);
  1035. break;
  1036. case PTRACE_POKEUSR: {
  1037. struct pt_regs *regs;
  1038. ret = 0;
  1039. regs = task_pt_regs(child);
  1040. switch (addr) {
  1041. case 0 ... 31:
  1042. regs->regs[addr] = data;
  1043. /* System call number may have been changed */
  1044. if (addr == 2)
  1045. mips_syscall_update_nr(child, regs);
  1046. else if (addr == 4 &&
  1047. mips_syscall_is_indirect(child, regs))
  1048. mips_syscall_update_nr(child, regs);
  1049. break;
  1050. #ifdef CONFIG_MIPS_FP_SUPPORT
  1051. case FPR_BASE ... FPR_BASE + 31: {
  1052. union fpureg *fregs = get_fpu_regs(child);
  1053. init_fp_ctx(child);
  1054. #ifdef CONFIG_32BIT
  1055. if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
  1056. /*
  1057. * The odd registers are actually the high
  1058. * order bits of the values stored in the even
  1059. * registers.
  1060. */
  1061. set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
  1062. addr & 1, data);
  1063. break;
  1064. }
  1065. #endif
  1066. set_fpr64(&fregs[addr - FPR_BASE], 0, data);
  1067. break;
  1068. }
  1069. case FPC_CSR:
  1070. init_fp_ctx(child);
  1071. ptrace_setfcr31(child, data);
  1072. break;
  1073. #endif
  1074. case PC:
  1075. regs->cp0_epc = data;
  1076. break;
  1077. case MMHI:
  1078. regs->hi = data;
  1079. break;
  1080. case MMLO:
  1081. regs->lo = data;
  1082. break;
  1083. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  1084. case ACX:
  1085. regs->acx = data;
  1086. break;
  1087. #endif
  1088. case DSP_BASE ... DSP_BASE + 5: {
  1089. dspreg_t *dregs;
  1090. if (!cpu_has_dsp) {
  1091. ret = -EIO;
  1092. break;
  1093. }
  1094. dregs = __get_dsp_regs(child);
  1095. dregs[addr - DSP_BASE] = data;
  1096. break;
  1097. }
  1098. case DSP_CONTROL:
  1099. if (!cpu_has_dsp) {
  1100. ret = -EIO;
  1101. break;
  1102. }
  1103. child->thread.dsp.dspcontrol = data;
  1104. break;
  1105. default:
  1106. /* The rest are not allowed. */
  1107. ret = -EIO;
  1108. break;
  1109. }
  1110. break;
  1111. }
  1112. case PTRACE_GETREGS:
  1113. ret = ptrace_getregs(child, datavp);
  1114. break;
  1115. case PTRACE_SETREGS:
  1116. ret = ptrace_setregs(child, datavp);
  1117. break;
  1118. #ifdef CONFIG_MIPS_FP_SUPPORT
  1119. case PTRACE_GETFPREGS:
  1120. ret = ptrace_getfpregs(child, datavp);
  1121. break;
  1122. case PTRACE_SETFPREGS:
  1123. ret = ptrace_setfpregs(child, datavp);
  1124. break;
  1125. #endif
  1126. case PTRACE_GET_THREAD_AREA:
  1127. ret = put_user(task_thread_info(child)->tp_value, datalp);
  1128. break;
  1129. case PTRACE_GET_WATCH_REGS:
  1130. ret = ptrace_get_watch_regs(child, addrp);
  1131. break;
  1132. case PTRACE_SET_WATCH_REGS:
  1133. ret = ptrace_set_watch_regs(child, addrp);
  1134. break;
  1135. default:
  1136. ret = ptrace_request(child, request, addr, data);
  1137. break;
  1138. }
  1139. out:
  1140. return ret;
  1141. }
  1142. /*
  1143. * Notification of system call entry/exit
  1144. * - triggered by current->work.syscall_trace
  1145. */
  1146. asmlinkage long syscall_trace_enter(struct pt_regs *regs)
  1147. {
  1148. user_exit();
  1149. if (test_thread_flag(TIF_SYSCALL_TRACE)) {
  1150. if (ptrace_report_syscall_entry(regs))
  1151. return -1;
  1152. }
  1153. #ifdef CONFIG_SECCOMP
  1154. if (unlikely(test_thread_flag(TIF_SECCOMP))) {
  1155. int ret, i;
  1156. struct seccomp_data sd;
  1157. unsigned long args[6];
  1158. sd.nr = current_thread_info()->syscall;
  1159. sd.arch = syscall_get_arch(current);
  1160. syscall_get_arguments(current, regs, args);
  1161. for (i = 0; i < 6; i++)
  1162. sd.args[i] = args[i];
  1163. sd.instruction_pointer = KSTK_EIP(current);
  1164. ret = __secure_computing(&sd);
  1165. if (ret == -1)
  1166. return ret;
  1167. }
  1168. #endif
  1169. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1170. trace_sys_enter(regs, regs->regs[2]);
  1171. audit_syscall_entry(current_thread_info()->syscall,
  1172. regs->regs[4], regs->regs[5],
  1173. regs->regs[6], regs->regs[7]);
  1174. /*
  1175. * Negative syscall numbers are mistaken for rejected syscalls, but
  1176. * won't have had the return value set appropriately, so we do so now.
  1177. */
  1178. if (current_thread_info()->syscall < 0)
  1179. syscall_set_return_value(current, regs, -ENOSYS, 0);
  1180. return current_thread_info()->syscall;
  1181. }
  1182. /*
  1183. * Notification of system call entry/exit
  1184. * - triggered by current->work.syscall_trace
  1185. */
  1186. asmlinkage void syscall_trace_leave(struct pt_regs *regs)
  1187. {
  1188. /*
  1189. * We may come here right after calling schedule_user()
  1190. * or do_notify_resume(), in which case we can be in RCU
  1191. * user mode.
  1192. */
  1193. user_exit();
  1194. audit_syscall_exit(regs);
  1195. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1196. trace_sys_exit(regs, regs_return_value(regs));
  1197. if (test_thread_flag(TIF_SYSCALL_TRACE))
  1198. ptrace_report_syscall_exit(regs, 0);
  1199. user_enter();
  1200. }