mmap_lock.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define CREATE_TRACE_POINTS
  3. #include <trace/events/mmap_lock.h>
  4. #include <linux/mm.h>
  5. #include <linux/cgroup.h>
  6. #include <linux/memcontrol.h>
  7. #include <linux/mmap_lock.h>
  8. #include <linux/mutex.h>
  9. #include <linux/percpu.h>
  10. #include <linux/rcupdate.h>
  11. #include <linux/smp.h>
  12. #include <linux/trace_events.h>
  13. #include <linux/local_lock.h>
  14. EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking);
  15. EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned);
  16. EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released);
  17. #ifdef CONFIG_MEMCG
  18. static atomic_t reg_refcount;
  19. /*
  20. * Size of the buffer for memcg path names. Ignoring stack trace support,
  21. * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it.
  22. */
  23. #define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL
  24. int trace_mmap_lock_reg(void)
  25. {
  26. atomic_inc(&reg_refcount);
  27. return 0;
  28. }
  29. void trace_mmap_lock_unreg(void)
  30. {
  31. atomic_dec(&reg_refcount);
  32. }
  33. #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
  34. do { \
  35. char buf[MEMCG_PATH_BUF_SIZE]; \
  36. get_mm_memcg_path(mm, buf, sizeof(buf)); \
  37. trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \
  38. } while (0)
  39. #else /* !CONFIG_MEMCG */
  40. int trace_mmap_lock_reg(void)
  41. {
  42. return 0;
  43. }
  44. void trace_mmap_lock_unreg(void)
  45. {
  46. }
  47. #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \
  48. trace_mmap_lock_##type(mm, "", ##__VA_ARGS__)
  49. #endif /* CONFIG_MEMCG */
  50. #ifdef CONFIG_TRACING
  51. #ifdef CONFIG_MEMCG
  52. /*
  53. * Write the given mm_struct's memcg path to a buffer. If the path cannot be
  54. * determined or the trace event is being unregistered, empty string is written.
  55. */
  56. static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen)
  57. {
  58. struct mem_cgroup *memcg;
  59. buf[0] = '\0';
  60. /* No need to get path if no trace event is registered. */
  61. if (!atomic_read(&reg_refcount))
  62. return;
  63. memcg = get_mem_cgroup_from_mm(mm);
  64. if (memcg == NULL)
  65. return;
  66. if (memcg->css.cgroup)
  67. cgroup_path(memcg->css.cgroup, buf, buflen);
  68. css_put(&memcg->css);
  69. }
  70. #endif /* CONFIG_MEMCG */
  71. /*
  72. * Trace calls must be in a separate file, as otherwise there's a circular
  73. * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h.
  74. */
  75. void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write)
  76. {
  77. TRACE_MMAP_LOCK_EVENT(start_locking, mm, write);
  78. }
  79. EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking);
  80. void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
  81. bool success)
  82. {
  83. TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success);
  84. }
  85. EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned);
  86. void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write)
  87. {
  88. TRACE_MMAP_LOCK_EVENT(released, mm, write);
  89. }
  90. EXPORT_SYMBOL(__mmap_lock_do_trace_released);
  91. #endif /* CONFIG_TRACING */