kgdb.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * LoongArch KGDB support
  4. *
  5. * Copyright (C) 2023 Loongson Technology Corporation Limited
  6. */
  7. #include <linux/hw_breakpoint.h>
  8. #include <linux/kdebug.h>
  9. #include <linux/kgdb.h>
  10. #include <linux/processor.h>
  11. #include <linux/ptrace.h>
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/fpu.h>
  16. #include <asm/hw_breakpoint.h>
  17. #include <asm/inst.h>
  18. #include <asm/irq_regs.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/sigcontext.h>
  21. int kgdb_watch_activated;
  22. static unsigned int stepped_opcode;
  23. static unsigned long stepped_address;
  24. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
  25. { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
  26. { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
  27. { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
  28. { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
  29. { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
  30. { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
  31. { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
  32. { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
  33. { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
  34. { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
  35. { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
  36. { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
  37. { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
  38. { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
  39. { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
  40. { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
  41. { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
  42. { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
  43. { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
  44. { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
  45. { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
  46. { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
  47. { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
  48. { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
  49. { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
  50. { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
  51. { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
  52. { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
  53. { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
  54. { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
  55. { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
  56. { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
  57. { "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
  58. { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
  59. { "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
  60. { "f0", GDB_SIZEOF_REG, 0 },
  61. { "f1", GDB_SIZEOF_REG, 1 },
  62. { "f2", GDB_SIZEOF_REG, 2 },
  63. { "f3", GDB_SIZEOF_REG, 3 },
  64. { "f4", GDB_SIZEOF_REG, 4 },
  65. { "f5", GDB_SIZEOF_REG, 5 },
  66. { "f6", GDB_SIZEOF_REG, 6 },
  67. { "f7", GDB_SIZEOF_REG, 7 },
  68. { "f8", GDB_SIZEOF_REG, 8 },
  69. { "f9", GDB_SIZEOF_REG, 9 },
  70. { "f10", GDB_SIZEOF_REG, 10 },
  71. { "f11", GDB_SIZEOF_REG, 11 },
  72. { "f12", GDB_SIZEOF_REG, 12 },
  73. { "f13", GDB_SIZEOF_REG, 13 },
  74. { "f14", GDB_SIZEOF_REG, 14 },
  75. { "f15", GDB_SIZEOF_REG, 15 },
  76. { "f16", GDB_SIZEOF_REG, 16 },
  77. { "f17", GDB_SIZEOF_REG, 17 },
  78. { "f18", GDB_SIZEOF_REG, 18 },
  79. { "f19", GDB_SIZEOF_REG, 19 },
  80. { "f20", GDB_SIZEOF_REG, 20 },
  81. { "f21", GDB_SIZEOF_REG, 21 },
  82. { "f22", GDB_SIZEOF_REG, 22 },
  83. { "f23", GDB_SIZEOF_REG, 23 },
  84. { "f24", GDB_SIZEOF_REG, 24 },
  85. { "f25", GDB_SIZEOF_REG, 25 },
  86. { "f26", GDB_SIZEOF_REG, 26 },
  87. { "f27", GDB_SIZEOF_REG, 27 },
  88. { "f28", GDB_SIZEOF_REG, 28 },
  89. { "f29", GDB_SIZEOF_REG, 29 },
  90. { "f30", GDB_SIZEOF_REG, 30 },
  91. { "f31", GDB_SIZEOF_REG, 31 },
  92. { "fcc0", 1, 0 },
  93. { "fcc1", 1, 1 },
  94. { "fcc2", 1, 2 },
  95. { "fcc3", 1, 3 },
  96. { "fcc4", 1, 4 },
  97. { "fcc5", 1, 5 },
  98. { "fcc6", 1, 6 },
  99. { "fcc7", 1, 7 },
  100. { "fcsr", 4, 0 },
  101. };
  102. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  103. {
  104. int reg_offset, reg_size;
  105. if (regno < 0 || regno >= DBG_MAX_REG_NUM)
  106. return NULL;
  107. reg_offset = dbg_reg_def[regno].offset;
  108. reg_size = dbg_reg_def[regno].size;
  109. if (reg_offset == -1)
  110. goto out;
  111. /* Handle general-purpose/orig_a0/pc/badv registers */
  112. if (regno <= DBG_PT_REGS_END) {
  113. memcpy(mem, (void *)regs + reg_offset, reg_size);
  114. goto out;
  115. }
  116. if (!(regs->csr_euen & CSR_EUEN_FPEN))
  117. goto out;
  118. save_fp(current);
  119. /* Handle FP registers */
  120. switch (regno) {
  121. case DBG_FCSR: /* Process the fcsr */
  122. memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
  123. break;
  124. case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
  125. memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
  126. break;
  127. case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
  128. memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
  129. break;
  130. default:
  131. break;
  132. }
  133. out:
  134. return dbg_reg_def[regno].name;
  135. }
  136. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  137. {
  138. int reg_offset, reg_size;
  139. if (regno < 0 || regno >= DBG_MAX_REG_NUM)
  140. return -EINVAL;
  141. reg_offset = dbg_reg_def[regno].offset;
  142. reg_size = dbg_reg_def[regno].size;
  143. if (reg_offset == -1)
  144. return 0;
  145. /* Handle general-purpose/orig_a0/pc/badv registers */
  146. if (regno <= DBG_PT_REGS_END) {
  147. memcpy((void *)regs + reg_offset, mem, reg_size);
  148. return 0;
  149. }
  150. if (!(regs->csr_euen & CSR_EUEN_FPEN))
  151. return 0;
  152. /* Handle FP registers */
  153. switch (regno) {
  154. case DBG_FCSR: /* Process the fcsr */
  155. memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
  156. break;
  157. case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
  158. memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
  159. break;
  160. case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
  161. memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
  162. break;
  163. default:
  164. break;
  165. }
  166. restore_fp(current);
  167. return 0;
  168. }
  169. /*
  170. * Similar to regs_to_gdb_regs() except that process is sleeping and so
  171. * we may not be able to get all the info.
  172. */
  173. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  174. {
  175. /* Initialize to zero */
  176. memset((char *)gdb_regs, 0, NUMREGBYTES);
  177. gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
  178. gdb_regs[DBG_LOONGARCH_TP] = (long)p;
  179. gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
  180. /* S0 - S8 */
  181. gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
  182. gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
  183. gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
  184. gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
  185. gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
  186. gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
  187. gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
  188. gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
  189. gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
  190. /*
  191. * PC use return address (RA), i.e. the moment after return from __switch_to()
  192. */
  193. gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
  194. }
  195. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
  196. {
  197. regs->csr_era = pc;
  198. }
  199. void arch_kgdb_breakpoint(void)
  200. {
  201. __asm__ __volatile__ ( \
  202. ".globl kgdb_breakinst\n\t" \
  203. "nop\n" \
  204. "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
  205. }
  206. /*
  207. * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
  208. * then try to fall into the debugger
  209. */
  210. static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  211. {
  212. struct die_args *args = (struct die_args *)ptr;
  213. struct pt_regs *regs = args->regs;
  214. /* Userspace events, ignore. */
  215. if (user_mode(regs))
  216. return NOTIFY_DONE;
  217. if (!kgdb_io_module_registered)
  218. return NOTIFY_DONE;
  219. if (atomic_read(&kgdb_active) != -1)
  220. kgdb_nmicallback(smp_processor_id(), regs);
  221. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  222. return NOTIFY_DONE;
  223. if (atomic_read(&kgdb_setting_breakpoint))
  224. if (regs->csr_era == (unsigned long)&kgdb_breakinst)
  225. regs->csr_era += LOONGARCH_INSN_SIZE;
  226. return NOTIFY_STOP;
  227. }
  228. bool kgdb_breakpoint_handler(struct pt_regs *regs)
  229. {
  230. struct die_args args = {
  231. .regs = regs,
  232. .str = "Break",
  233. .err = BRK_KDB,
  234. .trapnr = read_csr_excode(),
  235. .signr = SIGTRAP,
  236. };
  237. return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
  238. }
  239. static struct notifier_block kgdb_notifier = {
  240. .notifier_call = kgdb_loongarch_notify,
  241. };
  242. static inline void kgdb_arch_update_addr(struct pt_regs *regs,
  243. char *remcom_in_buffer)
  244. {
  245. unsigned long addr;
  246. char *ptr;
  247. ptr = &remcom_in_buffer[1];
  248. if (kgdb_hex2long(&ptr, &addr))
  249. regs->csr_era = addr;
  250. }
  251. /* Calculate the new address for after a step */
  252. static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
  253. {
  254. char cj_val;
  255. unsigned int si, si_l, si_h, rd, rj, cj;
  256. unsigned long pc = instruction_pointer(regs);
  257. union loongarch_instruction *ip = (union loongarch_instruction *)pc;
  258. if (pc & 3) {
  259. pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
  260. return -EINVAL;
  261. }
  262. *next_addr = pc + LOONGARCH_INSN_SIZE;
  263. si_h = ip->reg0i26_format.immediate_h;
  264. si_l = ip->reg0i26_format.immediate_l;
  265. switch (ip->reg0i26_format.opcode) {
  266. case b_op:
  267. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
  268. return 0;
  269. case bl_op:
  270. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
  271. regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
  272. return 0;
  273. }
  274. rj = ip->reg1i21_format.rj;
  275. cj = (rj & 0x07) + DBG_FCC_BASE;
  276. si_l = ip->reg1i21_format.immediate_l;
  277. si_h = ip->reg1i21_format.immediate_h;
  278. dbg_get_reg(cj, &cj_val, regs);
  279. switch (ip->reg1i21_format.opcode) {
  280. case beqz_op:
  281. if (regs->regs[rj] == 0)
  282. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
  283. return 0;
  284. case bnez_op:
  285. if (regs->regs[rj] != 0)
  286. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
  287. return 0;
  288. case bceqz_op: /* bceqz_op = bcnez_op */
  289. if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
  290. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
  291. if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
  292. *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
  293. return 0;
  294. }
  295. rj = ip->reg2i16_format.rj;
  296. rd = ip->reg2i16_format.rd;
  297. si = ip->reg2i16_format.immediate;
  298. switch (ip->reg2i16_format.opcode) {
  299. case beq_op:
  300. if (regs->regs[rj] == regs->regs[rd])
  301. *next_addr = pc + sign_extend64(si << 2, 17);
  302. return 0;
  303. case bne_op:
  304. if (regs->regs[rj] != regs->regs[rd])
  305. *next_addr = pc + sign_extend64(si << 2, 17);
  306. return 0;
  307. case blt_op:
  308. if ((long)regs->regs[rj] < (long)regs->regs[rd])
  309. *next_addr = pc + sign_extend64(si << 2, 17);
  310. return 0;
  311. case bge_op:
  312. if ((long)regs->regs[rj] >= (long)regs->regs[rd])
  313. *next_addr = pc + sign_extend64(si << 2, 17);
  314. return 0;
  315. case bltu_op:
  316. if (regs->regs[rj] < regs->regs[rd])
  317. *next_addr = pc + sign_extend64(si << 2, 17);
  318. return 0;
  319. case bgeu_op:
  320. if (regs->regs[rj] >= regs->regs[rd])
  321. *next_addr = pc + sign_extend64(si << 2, 17);
  322. return 0;
  323. case jirl_op:
  324. regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
  325. *next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
  326. return 0;
  327. }
  328. return 0;
  329. }
  330. static int do_single_step(struct pt_regs *regs)
  331. {
  332. int error = 0;
  333. unsigned long addr = 0; /* Determine where the target instruction will send us to */
  334. error = get_step_address(regs, &addr);
  335. if (error)
  336. return error;
  337. /* Store the opcode in the stepped address */
  338. error = get_kernel_nofault(stepped_opcode, (void *)addr);
  339. if (error)
  340. return error;
  341. stepped_address = addr;
  342. /* Replace the opcode with the break instruction */
  343. error = copy_to_kernel_nofault((void *)stepped_address,
  344. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  345. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  346. if (error) {
  347. stepped_opcode = 0;
  348. stepped_address = 0;
  349. } else {
  350. kgdb_single_step = 1;
  351. atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
  352. }
  353. return error;
  354. }
  355. /* Undo a single step */
  356. static void undo_single_step(struct pt_regs *regs)
  357. {
  358. if (stepped_opcode) {
  359. copy_to_kernel_nofault((void *)stepped_address,
  360. (void *)&stepped_opcode, BREAK_INSTR_SIZE);
  361. flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
  362. }
  363. stepped_opcode = 0;
  364. stepped_address = 0;
  365. kgdb_single_step = 0;
  366. atomic_set(&kgdb_cpu_doing_single_step, -1);
  367. }
  368. int kgdb_arch_handle_exception(int vector, int signo, int err_code,
  369. char *remcom_in_buffer, char *remcom_out_buffer,
  370. struct pt_regs *regs)
  371. {
  372. int ret = 0;
  373. undo_single_step(regs);
  374. regs->csr_prmd |= CSR_PRMD_PWE;
  375. switch (remcom_in_buffer[0]) {
  376. case 'D':
  377. case 'k':
  378. regs->csr_prmd &= ~CSR_PRMD_PWE;
  379. fallthrough;
  380. case 'c':
  381. kgdb_arch_update_addr(regs, remcom_in_buffer);
  382. break;
  383. case 's':
  384. kgdb_arch_update_addr(regs, remcom_in_buffer);
  385. ret = do_single_step(regs);
  386. break;
  387. default:
  388. ret = -1;
  389. }
  390. return ret;
  391. }
  392. static struct hw_breakpoint {
  393. unsigned int enabled;
  394. unsigned long addr;
  395. int len;
  396. int type;
  397. struct perf_event * __percpu *pev;
  398. } breakinfo[LOONGARCH_MAX_BRP];
  399. static int hw_break_reserve_slot(int breakno)
  400. {
  401. int cpu, cnt = 0;
  402. struct perf_event **pevent;
  403. for_each_online_cpu(cpu) {
  404. cnt++;
  405. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  406. if (dbg_reserve_bp_slot(*pevent))
  407. goto fail;
  408. }
  409. return 0;
  410. fail:
  411. for_each_online_cpu(cpu) {
  412. cnt--;
  413. if (!cnt)
  414. break;
  415. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  416. dbg_release_bp_slot(*pevent);
  417. }
  418. return -1;
  419. }
  420. static int hw_break_release_slot(int breakno)
  421. {
  422. int cpu;
  423. struct perf_event **pevent;
  424. if (dbg_is_early)
  425. return 0;
  426. for_each_online_cpu(cpu) {
  427. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  428. if (dbg_release_bp_slot(*pevent))
  429. /*
  430. * The debugger is responsible for handing the retry on
  431. * remove failure.
  432. */
  433. return -1;
  434. }
  435. return 0;
  436. }
  437. static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  438. {
  439. int i;
  440. for (i = 0; i < LOONGARCH_MAX_BRP; i++)
  441. if (!breakinfo[i].enabled)
  442. break;
  443. if (i == LOONGARCH_MAX_BRP)
  444. return -1;
  445. switch (bptype) {
  446. case BP_HARDWARE_BREAKPOINT:
  447. breakinfo[i].type = HW_BREAKPOINT_X;
  448. break;
  449. case BP_READ_WATCHPOINT:
  450. breakinfo[i].type = HW_BREAKPOINT_R;
  451. break;
  452. case BP_WRITE_WATCHPOINT:
  453. breakinfo[i].type = HW_BREAKPOINT_W;
  454. break;
  455. case BP_ACCESS_WATCHPOINT:
  456. breakinfo[i].type = HW_BREAKPOINT_RW;
  457. break;
  458. default:
  459. return -1;
  460. }
  461. switch (len) {
  462. case 1:
  463. breakinfo[i].len = HW_BREAKPOINT_LEN_1;
  464. break;
  465. case 2:
  466. breakinfo[i].len = HW_BREAKPOINT_LEN_2;
  467. break;
  468. case 4:
  469. breakinfo[i].len = HW_BREAKPOINT_LEN_4;
  470. break;
  471. case 8:
  472. breakinfo[i].len = HW_BREAKPOINT_LEN_8;
  473. break;
  474. default:
  475. return -1;
  476. }
  477. breakinfo[i].addr = addr;
  478. if (hw_break_reserve_slot(i)) {
  479. breakinfo[i].addr = 0;
  480. return -1;
  481. }
  482. breakinfo[i].enabled = 1;
  483. return 0;
  484. }
  485. static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  486. {
  487. int i;
  488. for (i = 0; i < LOONGARCH_MAX_BRP; i++)
  489. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  490. break;
  491. if (i == LOONGARCH_MAX_BRP)
  492. return -1;
  493. if (hw_break_release_slot(i)) {
  494. pr_err("Cannot remove hw breakpoint at %lx\n", addr);
  495. return -1;
  496. }
  497. breakinfo[i].enabled = 0;
  498. return 0;
  499. }
  500. static void kgdb_disable_hw_break(struct pt_regs *regs)
  501. {
  502. int i;
  503. int cpu = raw_smp_processor_id();
  504. struct perf_event *bp;
  505. for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
  506. if (!breakinfo[i].enabled)
  507. continue;
  508. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  509. if (bp->attr.disabled == 1)
  510. continue;
  511. arch_uninstall_hw_breakpoint(bp);
  512. bp->attr.disabled = 1;
  513. }
  514. /* Disable hardware debugging while we are in kgdb */
  515. csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
  516. }
  517. static void kgdb_remove_all_hw_break(void)
  518. {
  519. int i;
  520. int cpu = raw_smp_processor_id();
  521. struct perf_event *bp;
  522. for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
  523. if (!breakinfo[i].enabled)
  524. continue;
  525. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  526. if (!bp->attr.disabled) {
  527. arch_uninstall_hw_breakpoint(bp);
  528. bp->attr.disabled = 1;
  529. continue;
  530. }
  531. if (hw_break_release_slot(i))
  532. pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
  533. breakinfo[i].enabled = 0;
  534. }
  535. csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
  536. kgdb_watch_activated = 0;
  537. }
  538. static void kgdb_correct_hw_break(void)
  539. {
  540. int i, activated = 0;
  541. for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
  542. struct perf_event *bp;
  543. int val;
  544. int cpu = raw_smp_processor_id();
  545. if (!breakinfo[i].enabled)
  546. continue;
  547. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  548. if (bp->attr.disabled != 1)
  549. continue;
  550. bp->attr.bp_addr = breakinfo[i].addr;
  551. bp->attr.bp_len = breakinfo[i].len;
  552. bp->attr.bp_type = breakinfo[i].type;
  553. val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
  554. if (val)
  555. return;
  556. val = arch_install_hw_breakpoint(bp);
  557. if (!val)
  558. bp->attr.disabled = 0;
  559. activated = 1;
  560. }
  561. csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
  562. kgdb_watch_activated = activated;
  563. }
  564. const struct kgdb_arch arch_kgdb_ops = {
  565. .gdb_bpt_instr = {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
  566. .flags = KGDB_HW_BREAKPOINT,
  567. .set_hw_breakpoint = kgdb_set_hw_break,
  568. .remove_hw_breakpoint = kgdb_remove_hw_break,
  569. .disable_hw_break = kgdb_disable_hw_break,
  570. .remove_all_hw_break = kgdb_remove_all_hw_break,
  571. .correct_hw_break = kgdb_correct_hw_break,
  572. };
  573. int kgdb_arch_init(void)
  574. {
  575. return register_die_notifier(&kgdb_notifier);
  576. }
  577. void kgdb_arch_late(void)
  578. {
  579. int i, cpu;
  580. struct perf_event_attr attr;
  581. struct perf_event **pevent;
  582. hw_breakpoint_init(&attr);
  583. attr.bp_addr = (unsigned long)kgdb_arch_init;
  584. attr.bp_len = HW_BREAKPOINT_LEN_4;
  585. attr.bp_type = HW_BREAKPOINT_W;
  586. attr.disabled = 1;
  587. for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
  588. if (breakinfo[i].pev)
  589. continue;
  590. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  591. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  592. pr_err("kgdb: Could not allocate hw breakpoints.\n");
  593. breakinfo[i].pev = NULL;
  594. return;
  595. }
  596. for_each_online_cpu(cpu) {
  597. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  598. if (pevent[0]->destroy) {
  599. pevent[0]->destroy = NULL;
  600. release_bp_slot(*pevent);
  601. }
  602. }
  603. }
  604. }
  605. void kgdb_arch_exit(void)
  606. {
  607. int i;
  608. for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
  609. if (breakinfo[i].pev) {
  610. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  611. breakinfo[i].pev = NULL;
  612. }
  613. }
  614. unregister_die_notifier(&kgdb_notifier);
  615. }