trace_preemptirq.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * preemptoff and irqoff tracepoints
  4. *
  5. * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
  6. */
  7. #include <linux/kallsyms.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/module.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/kprobes.h>
  12. #include "trace.h"
  13. #define CREATE_TRACE_POINTS
  14. #include <trace/events/preemptirq.h>
  15. /*
  16. * Use regular trace points on architectures that implement noinstr
  17. * tooling: these calls will only happen with RCU enabled, which can
  18. * use a regular tracepoint.
  19. *
  20. * On older architectures, use the rcuidle tracing methods (which
  21. * aren't NMI-safe - so exclude NMI contexts):
  22. */
  23. #ifdef CONFIG_ARCH_WANTS_NO_INSTR
  24. #define trace(point) trace_##point
  25. #else
  26. #define trace(point) if (!in_nmi()) trace_##point##_rcuidle
  27. #endif
  28. #ifdef CONFIG_TRACE_IRQFLAGS
  29. /* Per-cpu variable to prevent redundant calls when IRQs already off */
  30. static DEFINE_PER_CPU(int, tracing_irq_cpu);
  31. /*
  32. * Like trace_hardirqs_on() but without the lockdep invocation. This is
  33. * used in the low level entry code where the ordering vs. RCU is important
  34. * and lockdep uses a staged approach which splits the lockdep hardirq
  35. * tracking into a RCU on and a RCU off section.
  36. */
  37. void trace_hardirqs_on_prepare(void)
  38. {
  39. if (this_cpu_read(tracing_irq_cpu)) {
  40. trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
  41. tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
  42. this_cpu_write(tracing_irq_cpu, 0);
  43. }
  44. }
  45. EXPORT_SYMBOL(trace_hardirqs_on_prepare);
  46. NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
  47. void trace_hardirqs_on(void)
  48. {
  49. if (this_cpu_read(tracing_irq_cpu)) {
  50. trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
  51. tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
  52. this_cpu_write(tracing_irq_cpu, 0);
  53. }
  54. lockdep_hardirqs_on_prepare();
  55. lockdep_hardirqs_on(CALLER_ADDR0);
  56. }
  57. EXPORT_SYMBOL(trace_hardirqs_on);
  58. NOKPROBE_SYMBOL(trace_hardirqs_on);
  59. /*
  60. * Like trace_hardirqs_off() but without the lockdep invocation. This is
  61. * used in the low level entry code where the ordering vs. RCU is important
  62. * and lockdep uses a staged approach which splits the lockdep hardirq
  63. * tracking into a RCU on and a RCU off section.
  64. */
  65. void trace_hardirqs_off_finish(void)
  66. {
  67. if (!this_cpu_read(tracing_irq_cpu)) {
  68. this_cpu_write(tracing_irq_cpu, 1);
  69. tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
  70. trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
  71. }
  72. }
  73. EXPORT_SYMBOL(trace_hardirqs_off_finish);
  74. NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
  75. void trace_hardirqs_off(void)
  76. {
  77. lockdep_hardirqs_off(CALLER_ADDR0);
  78. if (!this_cpu_read(tracing_irq_cpu)) {
  79. this_cpu_write(tracing_irq_cpu, 1);
  80. tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
  81. trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
  82. }
  83. }
  84. EXPORT_SYMBOL(trace_hardirqs_off);
  85. NOKPROBE_SYMBOL(trace_hardirqs_off);
  86. #endif /* CONFIG_TRACE_IRQFLAGS */
  87. #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
  88. void trace_preempt_on(unsigned long a0, unsigned long a1)
  89. {
  90. trace(preempt_enable)(a0, a1);
  91. tracer_preempt_on(a0, a1);
  92. }
  93. void trace_preempt_off(unsigned long a0, unsigned long a1)
  94. {
  95. trace(preempt_disable)(a0, a1);
  96. tracer_preempt_off(a0, a1);
  97. }
  98. #endif