kprobes.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Kernel Probes (KProbes)
  4. *
  5. * Copyright IBM Corp. 2002, 2006
  6. *
  7. * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
  8. */
  9. #include <linux/kprobes.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/preempt.h>
  12. #include <linux/stop_machine.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/extable.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/hardirq.h>
  19. #include <linux/ftrace.h>
  20. #include <asm/set_memory.h>
  21. #include <asm/sections.h>
  22. #include <asm/dis.h>
  23. DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  24. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  25. struct kretprobe_blackpoint kretprobe_blacklist[] = { };
  26. DEFINE_INSN_CACHE_OPS(dmainsn);
  27. static void *alloc_dmainsn_page(void)
  28. {
  29. void *page;
  30. page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
  31. if (page)
  32. set_memory_x((unsigned long) page, 1);
  33. return page;
  34. }
  35. static void free_dmainsn_page(void *page)
  36. {
  37. set_memory_nx((unsigned long) page, 1);
  38. free_page((unsigned long)page);
  39. }
  40. struct kprobe_insn_cache kprobe_dmainsn_slots = {
  41. .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
  42. .alloc = alloc_dmainsn_page,
  43. .free = free_dmainsn_page,
  44. .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
  45. .insn_size = MAX_INSN_SIZE,
  46. };
  47. static void copy_instruction(struct kprobe *p)
  48. {
  49. unsigned long ip = (unsigned long) p->addr;
  50. s64 disp, new_disp;
  51. u64 addr, new_addr;
  52. if (ftrace_location(ip) == ip) {
  53. /*
  54. * If kprobes patches the instruction that is morphed by
  55. * ftrace make sure that kprobes always sees the branch
  56. * "jg .+24" that skips the mcount block or the "brcl 0,0"
  57. * in case of hotpatch.
  58. */
  59. ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
  60. p->ainsn.is_ftrace_insn = 1;
  61. } else
  62. memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
  63. p->opcode = p->ainsn.insn[0];
  64. if (!probe_is_insn_relative_long(p->ainsn.insn))
  65. return;
  66. /*
  67. * For pc-relative instructions in RIL-b or RIL-c format patch the
  68. * RI2 displacement field. We have already made sure that the insn
  69. * slot for the patched instruction is within the same 2GB area
  70. * as the original instruction (either kernel image or module area).
  71. * Therefore the new displacement will always fit.
  72. */
  73. disp = *(s32 *)&p->ainsn.insn[1];
  74. addr = (u64)(unsigned long)p->addr;
  75. new_addr = (u64)(unsigned long)p->ainsn.insn;
  76. new_disp = ((addr + (disp * 2)) - new_addr) / 2;
  77. *(s32 *)&p->ainsn.insn[1] = new_disp;
  78. }
  79. NOKPROBE_SYMBOL(copy_instruction);
  80. static inline int is_kernel_addr(void *addr)
  81. {
  82. return addr < (void *)_end;
  83. }
  84. static int s390_get_insn_slot(struct kprobe *p)
  85. {
  86. /*
  87. * Get an insn slot that is within the same 2GB area like the original
  88. * instruction. That way instructions with a 32bit signed displacement
  89. * field can be patched and executed within the insn slot.
  90. */
  91. p->ainsn.insn = NULL;
  92. if (is_kernel_addr(p->addr))
  93. p->ainsn.insn = get_dmainsn_slot();
  94. else if (is_module_addr(p->addr))
  95. p->ainsn.insn = get_insn_slot();
  96. return p->ainsn.insn ? 0 : -ENOMEM;
  97. }
  98. NOKPROBE_SYMBOL(s390_get_insn_slot);
  99. static void s390_free_insn_slot(struct kprobe *p)
  100. {
  101. if (!p->ainsn.insn)
  102. return;
  103. if (is_kernel_addr(p->addr))
  104. free_dmainsn_slot(p->ainsn.insn, 0);
  105. else
  106. free_insn_slot(p->ainsn.insn, 0);
  107. p->ainsn.insn = NULL;
  108. }
  109. NOKPROBE_SYMBOL(s390_free_insn_slot);
  110. int arch_prepare_kprobe(struct kprobe *p)
  111. {
  112. if ((unsigned long) p->addr & 0x01)
  113. return -EINVAL;
  114. /* Make sure the probe isn't going on a difficult instruction */
  115. if (probe_is_prohibited_opcode(p->addr))
  116. return -EINVAL;
  117. if (s390_get_insn_slot(p))
  118. return -ENOMEM;
  119. copy_instruction(p);
  120. return 0;
  121. }
  122. NOKPROBE_SYMBOL(arch_prepare_kprobe);
  123. int arch_check_ftrace_location(struct kprobe *p)
  124. {
  125. return 0;
  126. }
  127. struct swap_insn_args {
  128. struct kprobe *p;
  129. unsigned int arm_kprobe : 1;
  130. };
  131. static int swap_instruction(void *data)
  132. {
  133. struct swap_insn_args *args = data;
  134. struct ftrace_insn new_insn, *insn;
  135. struct kprobe *p = args->p;
  136. size_t len;
  137. new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
  138. len = sizeof(new_insn.opc);
  139. if (!p->ainsn.is_ftrace_insn)
  140. goto skip_ftrace;
  141. len = sizeof(new_insn);
  142. insn = (struct ftrace_insn *) p->addr;
  143. if (args->arm_kprobe) {
  144. if (is_ftrace_nop(insn))
  145. new_insn.disp = KPROBE_ON_FTRACE_NOP;
  146. else
  147. new_insn.disp = KPROBE_ON_FTRACE_CALL;
  148. } else {
  149. ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
  150. if (insn->disp == KPROBE_ON_FTRACE_NOP)
  151. ftrace_generate_nop_insn(&new_insn);
  152. }
  153. skip_ftrace:
  154. s390_kernel_write(p->addr, &new_insn, len);
  155. return 0;
  156. }
  157. NOKPROBE_SYMBOL(swap_instruction);
  158. void arch_arm_kprobe(struct kprobe *p)
  159. {
  160. struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
  161. stop_machine_cpuslocked(swap_instruction, &args, NULL);
  162. }
  163. NOKPROBE_SYMBOL(arch_arm_kprobe);
  164. void arch_disarm_kprobe(struct kprobe *p)
  165. {
  166. struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
  167. stop_machine_cpuslocked(swap_instruction, &args, NULL);
  168. }
  169. NOKPROBE_SYMBOL(arch_disarm_kprobe);
  170. void arch_remove_kprobe(struct kprobe *p)
  171. {
  172. s390_free_insn_slot(p);
  173. }
  174. NOKPROBE_SYMBOL(arch_remove_kprobe);
  175. static void enable_singlestep(struct kprobe_ctlblk *kcb,
  176. struct pt_regs *regs,
  177. unsigned long ip)
  178. {
  179. struct per_regs per_kprobe;
  180. /* Set up the PER control registers %cr9-%cr11 */
  181. per_kprobe.control = PER_EVENT_IFETCH;
  182. per_kprobe.start = ip;
  183. per_kprobe.end = ip;
  184. /* Save control regs and psw mask */
  185. __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
  186. kcb->kprobe_saved_imask = regs->psw.mask &
  187. (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
  188. /* Set PER control regs, turns on single step for the given address */
  189. __ctl_load(per_kprobe, 9, 11);
  190. regs->psw.mask |= PSW_MASK_PER;
  191. regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
  192. regs->psw.addr = ip;
  193. }
  194. NOKPROBE_SYMBOL(enable_singlestep);
  195. static void disable_singlestep(struct kprobe_ctlblk *kcb,
  196. struct pt_regs *regs,
  197. unsigned long ip)
  198. {
  199. /* Restore control regs and psw mask, set new psw address */
  200. __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
  201. regs->psw.mask &= ~PSW_MASK_PER;
  202. regs->psw.mask |= kcb->kprobe_saved_imask;
  203. regs->psw.addr = ip;
  204. }
  205. NOKPROBE_SYMBOL(disable_singlestep);
  206. /*
  207. * Activate a kprobe by storing its pointer to current_kprobe. The
  208. * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
  209. * two kprobes can be active, see KPROBE_REENTER.
  210. */
  211. static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
  212. {
  213. kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
  214. kcb->prev_kprobe.status = kcb->kprobe_status;
  215. __this_cpu_write(current_kprobe, p);
  216. }
  217. NOKPROBE_SYMBOL(push_kprobe);
  218. /*
  219. * Deactivate a kprobe by backing up to the previous state. If the
  220. * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
  221. * for any other state prev_kprobe.kp will be NULL.
  222. */
  223. static void pop_kprobe(struct kprobe_ctlblk *kcb)
  224. {
  225. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  226. kcb->kprobe_status = kcb->prev_kprobe.status;
  227. }
  228. NOKPROBE_SYMBOL(pop_kprobe);
  229. void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
  230. {
  231. ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
  232. /* Replace the return addr with trampoline addr */
  233. regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
  234. }
  235. NOKPROBE_SYMBOL(arch_prepare_kretprobe);
  236. static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
  237. {
  238. switch (kcb->kprobe_status) {
  239. case KPROBE_HIT_SSDONE:
  240. case KPROBE_HIT_ACTIVE:
  241. kprobes_inc_nmissed_count(p);
  242. break;
  243. case KPROBE_HIT_SS:
  244. case KPROBE_REENTER:
  245. default:
  246. /*
  247. * A kprobe on the code path to single step an instruction
  248. * is a BUG. The code path resides in the .kprobes.text
  249. * section and is executed with interrupts disabled.
  250. */
  251. pr_err("Invalid kprobe detected.\n");
  252. dump_kprobe(p);
  253. BUG();
  254. }
  255. }
  256. NOKPROBE_SYMBOL(kprobe_reenter_check);
  257. static int kprobe_handler(struct pt_regs *regs)
  258. {
  259. struct kprobe_ctlblk *kcb;
  260. struct kprobe *p;
  261. /*
  262. * We want to disable preemption for the entire duration of kprobe
  263. * processing. That includes the calls to the pre/post handlers
  264. * and single stepping the kprobe instruction.
  265. */
  266. preempt_disable();
  267. kcb = get_kprobe_ctlblk();
  268. p = get_kprobe((void *)(regs->psw.addr - 2));
  269. if (p) {
  270. if (kprobe_running()) {
  271. /*
  272. * We have hit a kprobe while another is still
  273. * active. This can happen in the pre and post
  274. * handler. Single step the instruction of the
  275. * new probe but do not call any handler function
  276. * of this secondary kprobe.
  277. * push_kprobe and pop_kprobe saves and restores
  278. * the currently active kprobe.
  279. */
  280. kprobe_reenter_check(kcb, p);
  281. push_kprobe(kcb, p);
  282. kcb->kprobe_status = KPROBE_REENTER;
  283. } else {
  284. /*
  285. * If we have no pre-handler or it returned 0, we
  286. * continue with single stepping. If we have a
  287. * pre-handler and it returned non-zero, it prepped
  288. * for changing execution path, so get out doing
  289. * nothing more here.
  290. */
  291. push_kprobe(kcb, p);
  292. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  293. if (p->pre_handler && p->pre_handler(p, regs)) {
  294. pop_kprobe(kcb);
  295. preempt_enable_no_resched();
  296. return 1;
  297. }
  298. kcb->kprobe_status = KPROBE_HIT_SS;
  299. }
  300. enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
  301. return 1;
  302. } /* else:
  303. * No kprobe at this address and no active kprobe. The trap has
  304. * not been caused by a kprobe breakpoint. The race of breakpoint
  305. * vs. kprobe remove does not exist because on s390 as we use
  306. * stop_machine to arm/disarm the breakpoints.
  307. */
  308. preempt_enable_no_resched();
  309. return 0;
  310. }
  311. NOKPROBE_SYMBOL(kprobe_handler);
  312. /*
  313. * Function return probe trampoline:
  314. * - init_kprobes() establishes a probepoint here
  315. * - When the probed function returns, this probe
  316. * causes the handlers to fire
  317. */
  318. static void __used kretprobe_trampoline_holder(void)
  319. {
  320. asm volatile(".global kretprobe_trampoline\n"
  321. "kretprobe_trampoline: bcr 0,0\n");
  322. }
  323. /*
  324. * Called when the probe at kretprobe trampoline is hit
  325. */
  326. static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
  327. {
  328. struct kretprobe_instance *ri;
  329. struct hlist_head *head, empty_rp;
  330. struct hlist_node *tmp;
  331. unsigned long flags, orig_ret_address;
  332. unsigned long trampoline_address;
  333. kprobe_opcode_t *correct_ret_addr;
  334. INIT_HLIST_HEAD(&empty_rp);
  335. kretprobe_hash_lock(current, &head, &flags);
  336. /*
  337. * It is possible to have multiple instances associated with a given
  338. * task either because an multiple functions in the call path
  339. * have a return probe installed on them, and/or more than one return
  340. * return probe was registered for a target function.
  341. *
  342. * We can handle this because:
  343. * - instances are always inserted at the head of the list
  344. * - when multiple return probes are registered for the same
  345. * function, the first instance's ret_addr will point to the
  346. * real return address, and all the rest will point to
  347. * kretprobe_trampoline
  348. */
  349. ri = NULL;
  350. orig_ret_address = 0;
  351. correct_ret_addr = NULL;
  352. trampoline_address = (unsigned long) &kretprobe_trampoline;
  353. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  354. if (ri->task != current)
  355. /* another task is sharing our hash bucket */
  356. continue;
  357. orig_ret_address = (unsigned long) ri->ret_addr;
  358. if (orig_ret_address != trampoline_address)
  359. /*
  360. * This is the real return address. Any other
  361. * instances associated with this task are for
  362. * other calls deeper on the call stack
  363. */
  364. break;
  365. }
  366. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  367. correct_ret_addr = ri->ret_addr;
  368. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  369. if (ri->task != current)
  370. /* another task is sharing our hash bucket */
  371. continue;
  372. orig_ret_address = (unsigned long) ri->ret_addr;
  373. if (ri->rp && ri->rp->handler) {
  374. ri->ret_addr = correct_ret_addr;
  375. ri->rp->handler(ri, regs);
  376. }
  377. recycle_rp_inst(ri, &empty_rp);
  378. if (orig_ret_address != trampoline_address)
  379. /*
  380. * This is the real return address. Any other
  381. * instances associated with this task are for
  382. * other calls deeper on the call stack
  383. */
  384. break;
  385. }
  386. regs->psw.addr = orig_ret_address;
  387. kretprobe_hash_unlock(current, &flags);
  388. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  389. hlist_del(&ri->hlist);
  390. kfree(ri);
  391. }
  392. /*
  393. * By returning a non-zero value, we are telling
  394. * kprobe_handler() that we don't want the post_handler
  395. * to run (and have re-enabled preemption)
  396. */
  397. return 1;
  398. }
  399. NOKPROBE_SYMBOL(trampoline_probe_handler);
  400. /*
  401. * Called after single-stepping. p->addr is the address of the
  402. * instruction whose first byte has been replaced by the "breakpoint"
  403. * instruction. To avoid the SMP problems that can occur when we
  404. * temporarily put back the original opcode to single-step, we
  405. * single-stepped a copy of the instruction. The address of this
  406. * copy is p->ainsn.insn.
  407. */
  408. static void resume_execution(struct kprobe *p, struct pt_regs *regs)
  409. {
  410. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  411. unsigned long ip = regs->psw.addr;
  412. int fixup = probe_get_fixup_type(p->ainsn.insn);
  413. /* Check if the kprobes location is an enabled ftrace caller */
  414. if (p->ainsn.is_ftrace_insn) {
  415. struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
  416. struct ftrace_insn call_insn;
  417. ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
  418. /*
  419. * A kprobe on an enabled ftrace call site actually single
  420. * stepped an unconditional branch (ftrace nop equivalent).
  421. * Now we need to fixup things and pretend that a brasl r0,...
  422. * was executed instead.
  423. */
  424. if (insn->disp == KPROBE_ON_FTRACE_CALL) {
  425. ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
  426. regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
  427. }
  428. }
  429. if (fixup & FIXUP_PSW_NORMAL)
  430. ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
  431. if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
  432. int ilen = insn_length(p->ainsn.insn[0] >> 8);
  433. if (ip - (unsigned long) p->ainsn.insn == ilen)
  434. ip = (unsigned long) p->addr + ilen;
  435. }
  436. if (fixup & FIXUP_RETURN_REGISTER) {
  437. int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
  438. regs->gprs[reg] += (unsigned long) p->addr -
  439. (unsigned long) p->ainsn.insn;
  440. }
  441. disable_singlestep(kcb, regs, ip);
  442. }
  443. NOKPROBE_SYMBOL(resume_execution);
  444. static int post_kprobe_handler(struct pt_regs *regs)
  445. {
  446. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  447. struct kprobe *p = kprobe_running();
  448. if (!p)
  449. return 0;
  450. if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
  451. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  452. p->post_handler(p, regs, 0);
  453. }
  454. resume_execution(p, regs);
  455. pop_kprobe(kcb);
  456. preempt_enable_no_resched();
  457. /*
  458. * if somebody else is singlestepping across a probe point, psw mask
  459. * will have PER set, in which case, continue the remaining processing
  460. * of do_single_step, as if this is not a probe hit.
  461. */
  462. if (regs->psw.mask & PSW_MASK_PER)
  463. return 0;
  464. return 1;
  465. }
  466. NOKPROBE_SYMBOL(post_kprobe_handler);
  467. static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
  468. {
  469. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  470. struct kprobe *p = kprobe_running();
  471. const struct exception_table_entry *entry;
  472. switch(kcb->kprobe_status) {
  473. case KPROBE_HIT_SS:
  474. case KPROBE_REENTER:
  475. /*
  476. * We are here because the instruction being single
  477. * stepped caused a page fault. We reset the current
  478. * kprobe and the nip points back to the probe address
  479. * and allow the page fault handler to continue as a
  480. * normal page fault.
  481. */
  482. disable_singlestep(kcb, regs, (unsigned long) p->addr);
  483. pop_kprobe(kcb);
  484. preempt_enable_no_resched();
  485. break;
  486. case KPROBE_HIT_ACTIVE:
  487. case KPROBE_HIT_SSDONE:
  488. /*
  489. * We increment the nmissed count for accounting,
  490. * we can also use npre/npostfault count for accounting
  491. * these specific fault cases.
  492. */
  493. kprobes_inc_nmissed_count(p);
  494. /*
  495. * We come here because instructions in the pre/post
  496. * handler caused the page_fault, this could happen
  497. * if handler tries to access user space by
  498. * copy_from_user(), get_user() etc. Let the
  499. * user-specified handler try to fix it first.
  500. */
  501. if (p->fault_handler && p->fault_handler(p, regs, trapnr))
  502. return 1;
  503. /*
  504. * In case the user-specified fault handler returned
  505. * zero, try to fix up.
  506. */
  507. entry = search_exception_tables(regs->psw.addr);
  508. if (entry) {
  509. regs->psw.addr = extable_fixup(entry);
  510. return 1;
  511. }
  512. /*
  513. * fixup_exception() could not handle it,
  514. * Let do_page_fault() fix it.
  515. */
  516. break;
  517. default:
  518. break;
  519. }
  520. return 0;
  521. }
  522. NOKPROBE_SYMBOL(kprobe_trap_handler);
  523. int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  524. {
  525. int ret;
  526. if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
  527. local_irq_disable();
  528. ret = kprobe_trap_handler(regs, trapnr);
  529. if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
  530. local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
  531. return ret;
  532. }
  533. NOKPROBE_SYMBOL(kprobe_fault_handler);
  534. /*
  535. * Wrapper routine to for handling exceptions.
  536. */
  537. int kprobe_exceptions_notify(struct notifier_block *self,
  538. unsigned long val, void *data)
  539. {
  540. struct die_args *args = (struct die_args *) data;
  541. struct pt_regs *regs = args->regs;
  542. int ret = NOTIFY_DONE;
  543. if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
  544. local_irq_disable();
  545. switch (val) {
  546. case DIE_BPT:
  547. if (kprobe_handler(regs))
  548. ret = NOTIFY_STOP;
  549. break;
  550. case DIE_SSTEP:
  551. if (post_kprobe_handler(regs))
  552. ret = NOTIFY_STOP;
  553. break;
  554. case DIE_TRAP:
  555. if (!preemptible() && kprobe_running() &&
  556. kprobe_trap_handler(regs, args->trapnr))
  557. ret = NOTIFY_STOP;
  558. break;
  559. default:
  560. break;
  561. }
  562. if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
  563. local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
  564. return ret;
  565. }
  566. NOKPROBE_SYMBOL(kprobe_exceptions_notify);
  567. static struct kprobe trampoline = {
  568. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  569. .pre_handler = trampoline_probe_handler
  570. };
  571. int __init arch_init_kprobes(void)
  572. {
  573. return register_kprobe(&trampoline);
  574. }
  575. int arch_trampoline_kprobe(struct kprobe *p)
  576. {
  577. return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
  578. }
  579. NOKPROBE_SYMBOL(arch_trampoline_kprobe);