perf_callchain.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ARM callchain support
  4. *
  5. * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6. * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  7. *
  8. * This code is based on the ARM OProfile backtrace code.
  9. */
  10. #include <linux/perf_event.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/stacktrace.h>
  13. /*
  14. * The registers we're interested in are at the end of the variable
  15. * length saved register structure. The fp points at the end of this
  16. * structure so the address of this struct is:
  17. * (struct frame_tail *)(xxx->fp)-1
  18. *
  19. * This code has been adapted from the ARM OProfile support.
  20. */
  21. struct frame_tail {
  22. struct frame_tail __user *fp;
  23. unsigned long sp;
  24. unsigned long lr;
  25. } __attribute__((packed));
  26. /*
  27. * Get the return address for a single stackframe and return a pointer to the
  28. * next frame tail.
  29. */
  30. static struct frame_tail __user *
  31. user_backtrace(struct frame_tail __user *tail,
  32. struct perf_callchain_entry_ctx *entry)
  33. {
  34. struct frame_tail buftail;
  35. unsigned long err;
  36. if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
  37. return NULL;
  38. pagefault_disable();
  39. err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
  40. pagefault_enable();
  41. if (err)
  42. return NULL;
  43. perf_callchain_store(entry, buftail.lr);
  44. /*
  45. * Frame pointers should strictly progress back up the stack
  46. * (towards higher addresses).
  47. */
  48. if (tail + 1 >= buftail.fp)
  49. return NULL;
  50. return buftail.fp - 1;
  51. }
  52. void
  53. perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  54. {
  55. struct frame_tail __user *tail;
  56. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  57. /* We don't support guest os callchain now */
  58. return;
  59. }
  60. perf_callchain_store(entry, regs->ARM_pc);
  61. if (!current->mm)
  62. return;
  63. tail = (struct frame_tail __user *)regs->ARM_fp - 1;
  64. while ((entry->nr < entry->max_stack) &&
  65. tail && !((unsigned long)tail & 0x3))
  66. tail = user_backtrace(tail, entry);
  67. }
  68. /*
  69. * Gets called by walk_stackframe() for every stackframe. This will be called
  70. * whist unwinding the stackframe and is like a subroutine return so we use
  71. * the PC.
  72. */
  73. static int
  74. callchain_trace(struct stackframe *fr,
  75. void *data)
  76. {
  77. struct perf_callchain_entry_ctx *entry = data;
  78. perf_callchain_store(entry, fr->pc);
  79. return 0;
  80. }
  81. void
  82. perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  83. {
  84. struct stackframe fr;
  85. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  86. /* We don't support guest os callchain now */
  87. return;
  88. }
  89. arm_get_current_stackframe(regs, &fr);
  90. walk_stackframe(&fr, callchain_trace, entry);
  91. }
  92. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  93. {
  94. if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
  95. return perf_guest_cbs->get_guest_ip();
  96. return instruction_pointer(regs);
  97. }
  98. unsigned long perf_misc_flags(struct pt_regs *regs)
  99. {
  100. int misc = 0;
  101. if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
  102. if (perf_guest_cbs->is_user_mode())
  103. misc |= PERF_RECORD_MISC_GUEST_USER;
  104. else
  105. misc |= PERF_RECORD_MISC_GUEST_KERNEL;
  106. } else {
  107. if (user_mode(regs))
  108. misc |= PERF_RECORD_MISC_USER;
  109. else
  110. misc |= PERF_RECORD_MISC_KERNEL;
  111. }
  112. return misc;
  113. }