ptrace.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Author: Hanlu Li <lihanlu@loongson.cn>
  4. * Huacai Chen <chenhuacai@loongson.cn>
  5. *
  6. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  7. *
  8. * Derived from MIPS:
  9. * Copyright (C) 1992 Ross Biro
  10. * Copyright (C) Linus Torvalds
  11. * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  12. * Copyright (C) 1996 David S. Miller
  13. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  14. * Copyright (C) 1999 MIPS Technologies, Inc.
  15. * Copyright (C) 2000 Ulf Carlsson
  16. */
  17. #include <linux/kernel.h>
  18. #include <linux/audit.h>
  19. #include <linux/compiler.h>
  20. #include <linux/context_tracking.h>
  21. #include <linux/elf.h>
  22. #include <linux/errno.h>
  23. #include <linux/hw_breakpoint.h>
  24. #include <linux/mm.h>
  25. #include <linux/nospec.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/regset.h>
  28. #include <linux/sched.h>
  29. #include <linux/sched/task_stack.h>
  30. #include <linux/security.h>
  31. #include <linux/smp.h>
  32. #include <linux/stddef.h>
  33. #include <linux/seccomp.h>
  34. #include <linux/thread_info.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/byteorder.h>
  37. #include <asm/cpu.h>
  38. #include <asm/cpu-info.h>
  39. #include <asm/fpu.h>
  40. #include <asm/lbt.h>
  41. #include <asm/loongarch.h>
  42. #include <asm/page.h>
  43. #include <asm/pgtable.h>
  44. #include <asm/processor.h>
  45. #include <asm/ptrace.h>
  46. #include <asm/reg.h>
  47. #include <asm/syscall.h>
  48. static void init_fp_ctx(struct task_struct *target)
  49. {
  50. /* The target already has context */
  51. if (tsk_used_math(target))
  52. return;
  53. /* Begin with data registers set to all 1s... */
  54. memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
  55. set_stopped_child_used_math(target);
  56. }
  57. /*
  58. * Called by kernel/ptrace.c when detaching..
  59. *
  60. * Make sure single step bits etc are not set.
  61. */
  62. void ptrace_disable(struct task_struct *child)
  63. {
  64. /* Don't load the watchpoint registers for the ex-child. */
  65. clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  66. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  67. }
  68. /* regset get/set implementations */
  69. static int gpr_get(struct task_struct *target,
  70. const struct user_regset *regset,
  71. struct membuf to)
  72. {
  73. int r;
  74. struct pt_regs *regs = task_pt_regs(target);
  75. r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
  76. r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
  77. r = membuf_write(&to, &regs->csr_era, sizeof(u64));
  78. r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
  79. return r;
  80. }
  81. static int gpr_set(struct task_struct *target,
  82. const struct user_regset *regset,
  83. unsigned int pos, unsigned int count,
  84. const void *kbuf, const void __user *ubuf)
  85. {
  86. int err;
  87. int a0_start = sizeof(u64) * GPR_NUM;
  88. int era_start = a0_start + sizeof(u64);
  89. int badvaddr_start = era_start + sizeof(u64);
  90. struct pt_regs *regs = task_pt_regs(target);
  91. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  92. &regs->regs,
  93. 0, a0_start);
  94. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  95. &regs->orig_a0,
  96. a0_start, a0_start + sizeof(u64));
  97. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  98. &regs->csr_era,
  99. era_start, era_start + sizeof(u64));
  100. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  101. &regs->csr_badvaddr,
  102. badvaddr_start, badvaddr_start + sizeof(u64));
  103. return err;
  104. }
  105. /*
  106. * Get the general floating-point registers.
  107. */
  108. static int gfpr_get(struct task_struct *target, struct membuf *to)
  109. {
  110. return membuf_write(to, &target->thread.fpu.fpr,
  111. sizeof(elf_fpreg_t) * NUM_FPU_REGS);
  112. }
  113. static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
  114. {
  115. int i, r;
  116. u64 fpr_val;
  117. BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
  118. for (i = 0; i < NUM_FPU_REGS; i++) {
  119. fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
  120. r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
  121. }
  122. return r;
  123. }
  124. /*
  125. * Choose the appropriate helper for general registers, and then copy
  126. * the FCC and FCSR registers separately.
  127. */
  128. static int fpr_get(struct task_struct *target,
  129. const struct user_regset *regset,
  130. struct membuf to)
  131. {
  132. int r;
  133. save_fpu_regs(target);
  134. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  135. r = gfpr_get(target, &to);
  136. else
  137. r = gfpr_get_simd(target, &to);
  138. r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
  139. r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
  140. return r;
  141. }
  142. static int gfpr_set(struct task_struct *target,
  143. unsigned int *pos, unsigned int *count,
  144. const void **kbuf, const void __user **ubuf)
  145. {
  146. return user_regset_copyin(pos, count, kbuf, ubuf,
  147. &target->thread.fpu.fpr,
  148. 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
  149. }
  150. static int gfpr_set_simd(struct task_struct *target,
  151. unsigned int *pos, unsigned int *count,
  152. const void **kbuf, const void __user **ubuf)
  153. {
  154. int i, err;
  155. u64 fpr_val;
  156. BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
  157. for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
  158. err = user_regset_copyin(pos, count, kbuf, ubuf,
  159. &fpr_val, i * sizeof(elf_fpreg_t),
  160. (i + 1) * sizeof(elf_fpreg_t));
  161. if (err)
  162. return err;
  163. set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
  164. }
  165. return 0;
  166. }
  167. /*
  168. * Choose the appropriate helper for general registers, and then copy
  169. * the FCC register separately.
  170. */
  171. static int fpr_set(struct task_struct *target,
  172. const struct user_regset *regset,
  173. unsigned int pos, unsigned int count,
  174. const void *kbuf, const void __user *ubuf)
  175. {
  176. const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
  177. const int fcsr_start = fcc_start + sizeof(u64);
  178. int err;
  179. BUG_ON(count % sizeof(elf_fpreg_t));
  180. if (pos + count > sizeof(elf_fpregset_t))
  181. return -EIO;
  182. init_fp_ctx(target);
  183. if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
  184. err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
  185. else
  186. err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
  187. if (err)
  188. return err;
  189. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  190. &target->thread.fpu.fcc, fcc_start,
  191. fcc_start + sizeof(u64));
  192. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  193. &target->thread.fpu.fcsr, fcsr_start,
  194. fcsr_start + sizeof(u32));
  195. return err;
  196. }
  197. static int cfg_get(struct task_struct *target,
  198. const struct user_regset *regset,
  199. struct membuf to)
  200. {
  201. int i, r;
  202. u32 cfg_val;
  203. i = 0;
  204. while (to.left > 0) {
  205. cfg_val = read_cpucfg(i++);
  206. r = membuf_write(&to, &cfg_val, sizeof(u32));
  207. }
  208. return r;
  209. }
  210. /*
  211. * CFG registers are read-only.
  212. */
  213. static int cfg_set(struct task_struct *target,
  214. const struct user_regset *regset,
  215. unsigned int pos, unsigned int count,
  216. const void *kbuf, const void __user *ubuf)
  217. {
  218. return 0;
  219. }
  220. #ifdef CONFIG_CPU_HAS_LSX
  221. static void copy_pad_fprs(struct task_struct *target,
  222. const struct user_regset *regset,
  223. struct membuf *to, unsigned int live_sz)
  224. {
  225. int i, j;
  226. unsigned long long fill = ~0ull;
  227. unsigned int cp_sz, pad_sz;
  228. cp_sz = min(regset->size, live_sz);
  229. pad_sz = regset->size - cp_sz;
  230. WARN_ON(pad_sz % sizeof(fill));
  231. for (i = 0; i < NUM_FPU_REGS; i++) {
  232. membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
  233. for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
  234. membuf_store(to, fill);
  235. }
  236. }
  237. }
  238. static int simd_get(struct task_struct *target,
  239. const struct user_regset *regset,
  240. struct membuf to)
  241. {
  242. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  243. save_fpu_regs(target);
  244. if (!tsk_used_math(target)) {
  245. /* The task hasn't used FP or LSX, fill with 0xff */
  246. copy_pad_fprs(target, regset, &to, 0);
  247. } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
  248. /* Copy scalar FP context, fill the rest with 0xff */
  249. copy_pad_fprs(target, regset, &to, 8);
  250. #ifdef CONFIG_CPU_HAS_LASX
  251. } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
  252. /* Copy LSX 128 Bit context, fill the rest with 0xff */
  253. copy_pad_fprs(target, regset, &to, 16);
  254. #endif
  255. } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  256. /* Trivially copy the vector registers */
  257. membuf_write(&to, &target->thread.fpu.fpr, wr_size);
  258. } else {
  259. /* Copy as much context as possible, fill the rest with 0xff */
  260. copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
  261. }
  262. return 0;
  263. }
  264. static int simd_set(struct task_struct *target,
  265. const struct user_regset *regset,
  266. unsigned int pos, unsigned int count,
  267. const void *kbuf, const void __user *ubuf)
  268. {
  269. const unsigned int wr_size = NUM_FPU_REGS * regset->size;
  270. unsigned int cp_sz;
  271. int i, err, start;
  272. init_fp_ctx(target);
  273. if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
  274. /* Trivially copy the vector registers */
  275. err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  276. &target->thread.fpu.fpr,
  277. 0, wr_size);
  278. } else {
  279. /* Copy as much context as possible */
  280. cp_sz = min_t(unsigned int, regset->size,
  281. sizeof(target->thread.fpu.fpr[0]));
  282. i = start = err = 0;
  283. for (; i < NUM_FPU_REGS; i++, start += regset->size) {
  284. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  285. &target->thread.fpu.fpr[i],
  286. start, start + cp_sz);
  287. }
  288. }
  289. return err;
  290. }
  291. #endif /* CONFIG_CPU_HAS_LSX */
  292. #ifdef CONFIG_CPU_HAS_LBT
  293. static int lbt_get(struct task_struct *target,
  294. const struct user_regset *regset,
  295. struct membuf to)
  296. {
  297. int r;
  298. r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
  299. r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
  300. r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
  301. r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
  302. r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
  303. r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
  304. return r;
  305. }
  306. static int lbt_set(struct task_struct *target,
  307. const struct user_regset *regset,
  308. unsigned int pos, unsigned int count,
  309. const void *kbuf, const void __user *ubuf)
  310. {
  311. int err = 0;
  312. const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
  313. const int ftop_start = eflags_start + sizeof(u32);
  314. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  315. &target->thread.lbt.scr0,
  316. 0, 4 * sizeof(target->thread.lbt.scr0));
  317. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  318. &target->thread.lbt.eflags,
  319. eflags_start, ftop_start);
  320. err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  321. &target->thread.fpu.ftop,
  322. ftop_start, ftop_start + sizeof(u32));
  323. return err;
  324. }
  325. #endif /* CONFIG_CPU_HAS_LBT */
  326. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  327. /*
  328. * Handle hitting a HW-breakpoint.
  329. */
  330. static void ptrace_hbptriggered(struct perf_event *bp,
  331. struct perf_sample_data *data,
  332. struct pt_regs *regs)
  333. {
  334. int i;
  335. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  336. for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
  337. if (current->thread.hbp_break[i] == bp)
  338. break;
  339. for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
  340. if (current->thread.hbp_watch[i] == bp)
  341. break;
  342. force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
  343. }
  344. static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
  345. struct task_struct *tsk,
  346. unsigned long idx)
  347. {
  348. struct perf_event *bp;
  349. switch (note_type) {
  350. case NT_LOONGARCH_HW_BREAK:
  351. if (idx >= LOONGARCH_MAX_BRP)
  352. return ERR_PTR(-EINVAL);
  353. idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
  354. bp = tsk->thread.hbp_break[idx];
  355. break;
  356. case NT_LOONGARCH_HW_WATCH:
  357. if (idx >= LOONGARCH_MAX_WRP)
  358. return ERR_PTR(-EINVAL);
  359. idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
  360. bp = tsk->thread.hbp_watch[idx];
  361. break;
  362. }
  363. return bp;
  364. }
  365. static int ptrace_hbp_set_event(unsigned int note_type,
  366. struct task_struct *tsk,
  367. unsigned long idx,
  368. struct perf_event *bp)
  369. {
  370. switch (note_type) {
  371. case NT_LOONGARCH_HW_BREAK:
  372. if (idx >= LOONGARCH_MAX_BRP)
  373. return -EINVAL;
  374. idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
  375. tsk->thread.hbp_break[idx] = bp;
  376. break;
  377. case NT_LOONGARCH_HW_WATCH:
  378. if (idx >= LOONGARCH_MAX_WRP)
  379. return -EINVAL;
  380. idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
  381. tsk->thread.hbp_watch[idx] = bp;
  382. break;
  383. }
  384. return 0;
  385. }
  386. static struct perf_event *ptrace_hbp_create(unsigned int note_type,
  387. struct task_struct *tsk,
  388. unsigned long idx)
  389. {
  390. int err, type;
  391. struct perf_event *bp;
  392. struct perf_event_attr attr;
  393. switch (note_type) {
  394. case NT_LOONGARCH_HW_BREAK:
  395. type = HW_BREAKPOINT_X;
  396. break;
  397. case NT_LOONGARCH_HW_WATCH:
  398. type = HW_BREAKPOINT_RW;
  399. break;
  400. default:
  401. return ERR_PTR(-EINVAL);
  402. }
  403. ptrace_breakpoint_init(&attr);
  404. /*
  405. * Initialise fields to sane defaults
  406. * (i.e. values that will pass validation).
  407. */
  408. attr.bp_addr = 0;
  409. attr.bp_len = HW_BREAKPOINT_LEN_4;
  410. attr.bp_type = type;
  411. attr.disabled = 1;
  412. bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
  413. if (IS_ERR(bp))
  414. return bp;
  415. err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
  416. if (err)
  417. return ERR_PTR(err);
  418. return bp;
  419. }
  420. static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
  421. struct arch_hw_breakpoint_ctrl ctrl,
  422. struct perf_event_attr *attr)
  423. {
  424. int err, len, type;
  425. err = arch_bp_generic_fields(ctrl, &len, &type);
  426. if (err)
  427. return err;
  428. attr->bp_len = len;
  429. attr->bp_type = type;
  430. return 0;
  431. }
  432. static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
  433. {
  434. u8 num;
  435. u64 reg = 0;
  436. switch (note_type) {
  437. case NT_LOONGARCH_HW_BREAK:
  438. num = hw_breakpoint_slots(TYPE_INST);
  439. break;
  440. case NT_LOONGARCH_HW_WATCH:
  441. num = hw_breakpoint_slots(TYPE_DATA);
  442. break;
  443. default:
  444. return -EINVAL;
  445. }
  446. *info = reg | num;
  447. return 0;
  448. }
  449. static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
  450. struct task_struct *tsk,
  451. unsigned long idx)
  452. {
  453. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  454. if (!bp)
  455. bp = ptrace_hbp_create(note_type, tsk, idx);
  456. return bp;
  457. }
  458. static int ptrace_hbp_get_ctrl(unsigned int note_type,
  459. struct task_struct *tsk,
  460. unsigned long idx, u32 *ctrl)
  461. {
  462. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  463. if (IS_ERR(bp))
  464. return PTR_ERR(bp);
  465. *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
  466. return 0;
  467. }
  468. static int ptrace_hbp_get_mask(unsigned int note_type,
  469. struct task_struct *tsk,
  470. unsigned long idx, u64 *mask)
  471. {
  472. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  473. if (IS_ERR(bp))
  474. return PTR_ERR(bp);
  475. *mask = bp ? counter_arch_bp(bp)->mask : 0;
  476. return 0;
  477. }
  478. static int ptrace_hbp_get_addr(unsigned int note_type,
  479. struct task_struct *tsk,
  480. unsigned long idx, u64 *addr)
  481. {
  482. struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
  483. if (IS_ERR(bp))
  484. return PTR_ERR(bp);
  485. *addr = bp ? counter_arch_bp(bp)->address : 0;
  486. return 0;
  487. }
  488. static int ptrace_hbp_set_ctrl(unsigned int note_type,
  489. struct task_struct *tsk,
  490. unsigned long idx, u32 uctrl)
  491. {
  492. int err;
  493. struct perf_event *bp;
  494. struct perf_event_attr attr;
  495. struct arch_hw_breakpoint_ctrl ctrl;
  496. struct thread_info *ti = task_thread_info(tsk);
  497. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  498. if (IS_ERR(bp))
  499. return PTR_ERR(bp);
  500. attr = bp->attr;
  501. switch (note_type) {
  502. case NT_LOONGARCH_HW_BREAK:
  503. ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
  504. ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
  505. break;
  506. case NT_LOONGARCH_HW_WATCH:
  507. decode_ctrl_reg(uctrl, &ctrl);
  508. break;
  509. default:
  510. return -EINVAL;
  511. }
  512. if (uctrl & CTRL_PLV_ENABLE) {
  513. err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
  514. if (err)
  515. return err;
  516. attr.disabled = 0;
  517. set_ti_thread_flag(ti, TIF_LOAD_WATCH);
  518. } else {
  519. attr.disabled = 1;
  520. clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
  521. }
  522. return modify_user_hw_breakpoint(bp, &attr);
  523. }
  524. static int ptrace_hbp_set_mask(unsigned int note_type,
  525. struct task_struct *tsk,
  526. unsigned long idx, u64 mask)
  527. {
  528. struct perf_event *bp;
  529. struct perf_event_attr attr;
  530. struct arch_hw_breakpoint *info;
  531. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  532. if (IS_ERR(bp))
  533. return PTR_ERR(bp);
  534. attr = bp->attr;
  535. info = counter_arch_bp(bp);
  536. info->mask = mask;
  537. return modify_user_hw_breakpoint(bp, &attr);
  538. }
  539. static int ptrace_hbp_set_addr(unsigned int note_type,
  540. struct task_struct *tsk,
  541. unsigned long idx, u64 addr)
  542. {
  543. struct perf_event *bp;
  544. struct perf_event_attr attr;
  545. /* Kernel-space address cannot be monitored by user-space */
  546. if ((unsigned long)addr >= XKPRANGE)
  547. return -EINVAL;
  548. bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
  549. if (IS_ERR(bp))
  550. return PTR_ERR(bp);
  551. attr = bp->attr;
  552. attr.bp_addr = addr;
  553. return modify_user_hw_breakpoint(bp, &attr);
  554. }
  555. #define PTRACE_HBP_ADDR_SZ sizeof(u64)
  556. #define PTRACE_HBP_MASK_SZ sizeof(u64)
  557. #define PTRACE_HBP_CTRL_SZ sizeof(u32)
  558. #define PTRACE_HBP_PAD_SZ sizeof(u32)
  559. static int hw_break_get(struct task_struct *target,
  560. const struct user_regset *regset,
  561. struct membuf to)
  562. {
  563. u64 info;
  564. u32 ctrl;
  565. u64 addr, mask;
  566. int ret, idx = 0;
  567. unsigned int note_type = regset->core_note_type;
  568. /* Resource info */
  569. ret = ptrace_hbp_get_resource_info(note_type, &info);
  570. if (ret)
  571. return ret;
  572. membuf_write(&to, &info, sizeof(info));
  573. /* (address, mask, ctrl) registers */
  574. while (to.left) {
  575. ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
  576. if (ret)
  577. return ret;
  578. ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
  579. if (ret)
  580. return ret;
  581. ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
  582. if (ret)
  583. return ret;
  584. membuf_store(&to, addr);
  585. membuf_store(&to, mask);
  586. membuf_store(&to, ctrl);
  587. membuf_zero(&to, sizeof(u32));
  588. idx++;
  589. }
  590. return 0;
  591. }
  592. static int hw_break_set(struct task_struct *target,
  593. const struct user_regset *regset,
  594. unsigned int pos, unsigned int count,
  595. const void *kbuf, const void __user *ubuf)
  596. {
  597. u32 ctrl;
  598. u64 addr, mask;
  599. int ret, idx = 0, offset, limit;
  600. unsigned int note_type = regset->core_note_type;
  601. /* Resource info */
  602. offset = offsetof(struct user_watch_state_v2, dbg_regs);
  603. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
  604. /* (address, mask, ctrl) registers */
  605. limit = regset->n * regset->size;
  606. while (count && offset < limit) {
  607. if (count < PTRACE_HBP_ADDR_SZ)
  608. return -EINVAL;
  609. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
  610. offset, offset + PTRACE_HBP_ADDR_SZ);
  611. if (ret)
  612. return ret;
  613. ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
  614. if (ret)
  615. return ret;
  616. offset += PTRACE_HBP_ADDR_SZ;
  617. if (!count)
  618. break;
  619. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
  620. offset, offset + PTRACE_HBP_MASK_SZ);
  621. if (ret)
  622. return ret;
  623. ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
  624. if (ret)
  625. return ret;
  626. offset += PTRACE_HBP_MASK_SZ;
  627. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
  628. offset, offset + PTRACE_HBP_CTRL_SZ);
  629. if (ret)
  630. return ret;
  631. ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
  632. if (ret)
  633. return ret;
  634. offset += PTRACE_HBP_CTRL_SZ;
  635. user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  636. offset, offset + PTRACE_HBP_PAD_SZ);
  637. offset += PTRACE_HBP_PAD_SZ;
  638. idx++;
  639. }
  640. return 0;
  641. }
  642. #endif
  643. struct pt_regs_offset {
  644. const char *name;
  645. int offset;
  646. };
  647. #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
  648. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  649. static const struct pt_regs_offset regoffset_table[] = {
  650. REG_OFFSET_NAME(r0, regs[0]),
  651. REG_OFFSET_NAME(r1, regs[1]),
  652. REG_OFFSET_NAME(r2, regs[2]),
  653. REG_OFFSET_NAME(r3, regs[3]),
  654. REG_OFFSET_NAME(r4, regs[4]),
  655. REG_OFFSET_NAME(r5, regs[5]),
  656. REG_OFFSET_NAME(r6, regs[6]),
  657. REG_OFFSET_NAME(r7, regs[7]),
  658. REG_OFFSET_NAME(r8, regs[8]),
  659. REG_OFFSET_NAME(r9, regs[9]),
  660. REG_OFFSET_NAME(r10, regs[10]),
  661. REG_OFFSET_NAME(r11, regs[11]),
  662. REG_OFFSET_NAME(r12, regs[12]),
  663. REG_OFFSET_NAME(r13, regs[13]),
  664. REG_OFFSET_NAME(r14, regs[14]),
  665. REG_OFFSET_NAME(r15, regs[15]),
  666. REG_OFFSET_NAME(r16, regs[16]),
  667. REG_OFFSET_NAME(r17, regs[17]),
  668. REG_OFFSET_NAME(r18, regs[18]),
  669. REG_OFFSET_NAME(r19, regs[19]),
  670. REG_OFFSET_NAME(r20, regs[20]),
  671. REG_OFFSET_NAME(r21, regs[21]),
  672. REG_OFFSET_NAME(r22, regs[22]),
  673. REG_OFFSET_NAME(r23, regs[23]),
  674. REG_OFFSET_NAME(r24, regs[24]),
  675. REG_OFFSET_NAME(r25, regs[25]),
  676. REG_OFFSET_NAME(r26, regs[26]),
  677. REG_OFFSET_NAME(r27, regs[27]),
  678. REG_OFFSET_NAME(r28, regs[28]),
  679. REG_OFFSET_NAME(r29, regs[29]),
  680. REG_OFFSET_NAME(r30, regs[30]),
  681. REG_OFFSET_NAME(r31, regs[31]),
  682. REG_OFFSET_NAME(orig_a0, orig_a0),
  683. REG_OFFSET_NAME(csr_era, csr_era),
  684. REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
  685. REG_OFFSET_NAME(csr_crmd, csr_crmd),
  686. REG_OFFSET_NAME(csr_prmd, csr_prmd),
  687. REG_OFFSET_NAME(csr_euen, csr_euen),
  688. REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
  689. REG_OFFSET_NAME(csr_estat, csr_estat),
  690. REG_OFFSET_END,
  691. };
  692. /**
  693. * regs_query_register_offset() - query register offset from its name
  694. * @name: the name of a register
  695. *
  696. * regs_query_register_offset() returns the offset of a register in struct
  697. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  698. */
  699. int regs_query_register_offset(const char *name)
  700. {
  701. const struct pt_regs_offset *roff;
  702. for (roff = regoffset_table; roff->name != NULL; roff++)
  703. if (!strcmp(roff->name, name))
  704. return roff->offset;
  705. return -EINVAL;
  706. }
  707. enum loongarch_regset {
  708. REGSET_GPR,
  709. REGSET_FPR,
  710. REGSET_CPUCFG,
  711. #ifdef CONFIG_CPU_HAS_LSX
  712. REGSET_LSX,
  713. #endif
  714. #ifdef CONFIG_CPU_HAS_LASX
  715. REGSET_LASX,
  716. #endif
  717. #ifdef CONFIG_CPU_HAS_LBT
  718. REGSET_LBT,
  719. #endif
  720. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  721. REGSET_HW_BREAK,
  722. REGSET_HW_WATCH,
  723. #endif
  724. };
  725. static const struct user_regset loongarch64_regsets[] = {
  726. [REGSET_GPR] = {
  727. .core_note_type = NT_PRSTATUS,
  728. .n = ELF_NGREG,
  729. .size = sizeof(elf_greg_t),
  730. .align = sizeof(elf_greg_t),
  731. .regset_get = gpr_get,
  732. .set = gpr_set,
  733. },
  734. [REGSET_FPR] = {
  735. .core_note_type = NT_PRFPREG,
  736. .n = ELF_NFPREG,
  737. .size = sizeof(elf_fpreg_t),
  738. .align = sizeof(elf_fpreg_t),
  739. .regset_get = fpr_get,
  740. .set = fpr_set,
  741. },
  742. [REGSET_CPUCFG] = {
  743. .core_note_type = NT_LOONGARCH_CPUCFG,
  744. .n = 64,
  745. .size = sizeof(u32),
  746. .align = sizeof(u32),
  747. .regset_get = cfg_get,
  748. .set = cfg_set,
  749. },
  750. #ifdef CONFIG_CPU_HAS_LSX
  751. [REGSET_LSX] = {
  752. .core_note_type = NT_LOONGARCH_LSX,
  753. .n = NUM_FPU_REGS,
  754. .size = 16,
  755. .align = 16,
  756. .regset_get = simd_get,
  757. .set = simd_set,
  758. },
  759. #endif
  760. #ifdef CONFIG_CPU_HAS_LASX
  761. [REGSET_LASX] = {
  762. .core_note_type = NT_LOONGARCH_LASX,
  763. .n = NUM_FPU_REGS,
  764. .size = 32,
  765. .align = 32,
  766. .regset_get = simd_get,
  767. .set = simd_set,
  768. },
  769. #endif
  770. #ifdef CONFIG_CPU_HAS_LBT
  771. [REGSET_LBT] = {
  772. .core_note_type = NT_LOONGARCH_LBT,
  773. .n = 5,
  774. .size = sizeof(u64),
  775. .align = sizeof(u64),
  776. .regset_get = lbt_get,
  777. .set = lbt_set,
  778. },
  779. #endif
  780. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  781. [REGSET_HW_BREAK] = {
  782. .core_note_type = NT_LOONGARCH_HW_BREAK,
  783. .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
  784. .size = sizeof(u32),
  785. .align = sizeof(u32),
  786. .regset_get = hw_break_get,
  787. .set = hw_break_set,
  788. },
  789. [REGSET_HW_WATCH] = {
  790. .core_note_type = NT_LOONGARCH_HW_WATCH,
  791. .n = sizeof(struct user_watch_state_v2) / sizeof(u32),
  792. .size = sizeof(u32),
  793. .align = sizeof(u32),
  794. .regset_get = hw_break_get,
  795. .set = hw_break_set,
  796. },
  797. #endif
  798. };
  799. static const struct user_regset_view user_loongarch64_view = {
  800. .name = "loongarch64",
  801. .e_machine = ELF_ARCH,
  802. .regsets = loongarch64_regsets,
  803. .n = ARRAY_SIZE(loongarch64_regsets),
  804. };
  805. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  806. {
  807. return &user_loongarch64_view;
  808. }
  809. static inline int read_user(struct task_struct *target, unsigned long addr,
  810. unsigned long __user *data)
  811. {
  812. unsigned long tmp = 0;
  813. switch (addr) {
  814. case 0 ... 31:
  815. tmp = task_pt_regs(target)->regs[addr];
  816. break;
  817. case ARG0:
  818. tmp = task_pt_regs(target)->orig_a0;
  819. break;
  820. case PC:
  821. tmp = task_pt_regs(target)->csr_era;
  822. break;
  823. case BADVADDR:
  824. tmp = task_pt_regs(target)->csr_badvaddr;
  825. break;
  826. default:
  827. return -EIO;
  828. }
  829. return put_user(tmp, data);
  830. }
  831. static inline int write_user(struct task_struct *target, unsigned long addr,
  832. unsigned long data)
  833. {
  834. switch (addr) {
  835. case 0 ... 31:
  836. task_pt_regs(target)->regs[addr] = data;
  837. break;
  838. case ARG0:
  839. task_pt_regs(target)->orig_a0 = data;
  840. break;
  841. case PC:
  842. task_pt_regs(target)->csr_era = data;
  843. break;
  844. case BADVADDR:
  845. task_pt_regs(target)->csr_badvaddr = data;
  846. break;
  847. default:
  848. return -EIO;
  849. }
  850. return 0;
  851. }
  852. long arch_ptrace(struct task_struct *child, long request,
  853. unsigned long addr, unsigned long data)
  854. {
  855. int ret;
  856. unsigned long __user *datap = (void __user *) data;
  857. switch (request) {
  858. case PTRACE_PEEKUSR:
  859. ret = read_user(child, addr, datap);
  860. break;
  861. case PTRACE_POKEUSR:
  862. ret = write_user(child, addr, data);
  863. break;
  864. default:
  865. ret = ptrace_request(child, request, addr, data);
  866. break;
  867. }
  868. return ret;
  869. }
  870. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  871. static void ptrace_triggered(struct perf_event *bp,
  872. struct perf_sample_data *data, struct pt_regs *regs)
  873. {
  874. struct perf_event_attr attr;
  875. attr = bp->attr;
  876. attr.disabled = true;
  877. modify_user_hw_breakpoint(bp, &attr);
  878. }
  879. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  880. {
  881. struct perf_event *bp;
  882. struct perf_event_attr attr;
  883. struct arch_hw_breakpoint *info;
  884. struct thread_struct *thread = &tsk->thread;
  885. bp = thread->hbp_break[0];
  886. if (!bp) {
  887. ptrace_breakpoint_init(&attr);
  888. attr.bp_addr = addr;
  889. attr.bp_len = HW_BREAKPOINT_LEN_8;
  890. attr.bp_type = HW_BREAKPOINT_X;
  891. bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
  892. NULL, tsk);
  893. if (IS_ERR(bp))
  894. return PTR_ERR(bp);
  895. thread->hbp_break[0] = bp;
  896. } else {
  897. int err;
  898. attr = bp->attr;
  899. attr.bp_addr = addr;
  900. /* Reenable breakpoint */
  901. attr.disabled = false;
  902. err = modify_user_hw_breakpoint(bp, &attr);
  903. if (unlikely(err))
  904. return err;
  905. csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
  906. }
  907. info = counter_arch_bp(bp);
  908. info->mask = TASK_SIZE - 1;
  909. return 0;
  910. }
  911. /* ptrace API */
  912. void user_enable_single_step(struct task_struct *task)
  913. {
  914. struct thread_info *ti = task_thread_info(task);
  915. set_single_step(task, task_pt_regs(task)->csr_era);
  916. task->thread.single_step = task_pt_regs(task)->csr_era;
  917. set_ti_thread_flag(ti, TIF_SINGLESTEP);
  918. }
  919. void user_disable_single_step(struct task_struct *task)
  920. {
  921. clear_tsk_thread_flag(task, TIF_SINGLESTEP);
  922. }
  923. #endif