perf_event.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Performance event support for s390x
  4. *
  5. * Copyright IBM Corp. 2012, 2013
  6. * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
  7. */
  8. #define KMSG_COMPONENT "perf"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/perf_event.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/percpu.h>
  14. #include <linux/export.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/compat.h>
  19. #include <linux/sysfs.h>
  20. #include <asm/stacktrace.h>
  21. #include <asm/irq.h>
  22. #include <asm/cpu_mf.h>
  23. #include <asm/lowcore.h>
  24. #include <asm/processor.h>
  25. #include <asm/sysinfo.h>
  26. #include <asm/unwind.h>
  27. static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
  28. {
  29. struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
  30. if (!stack)
  31. return NULL;
  32. return (struct kvm_s390_sie_block *)stack->sie_control_block;
  33. }
  34. static bool is_in_guest(struct pt_regs *regs)
  35. {
  36. if (user_mode(regs))
  37. return false;
  38. #if IS_ENABLED(CONFIG_KVM)
  39. return instruction_pointer(regs) == (unsigned long) &sie_exit;
  40. #else
  41. return false;
  42. #endif
  43. }
  44. static unsigned long guest_is_user_mode(struct pt_regs *regs)
  45. {
  46. return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
  47. }
  48. static unsigned long instruction_pointer_guest(struct pt_regs *regs)
  49. {
  50. return sie_block(regs)->gpsw.addr;
  51. }
  52. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  53. {
  54. return is_in_guest(regs) ? instruction_pointer_guest(regs)
  55. : instruction_pointer(regs);
  56. }
  57. static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
  58. {
  59. return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
  60. : PERF_RECORD_MISC_GUEST_KERNEL;
  61. }
  62. static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
  63. {
  64. struct perf_sf_sde_regs *sde_regs;
  65. unsigned long flags;
  66. sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
  67. if (sde_regs->in_guest)
  68. flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
  69. : PERF_RECORD_MISC_GUEST_KERNEL;
  70. else
  71. flags = user_mode(regs) ? PERF_RECORD_MISC_USER
  72. : PERF_RECORD_MISC_KERNEL;
  73. return flags;
  74. }
  75. unsigned long perf_misc_flags(struct pt_regs *regs)
  76. {
  77. /* Check if the cpum_sf PMU has created the pt_regs structure.
  78. * In this case, perf misc flags can be easily extracted. Otherwise,
  79. * do regular checks on the pt_regs content.
  80. */
  81. if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
  82. if (!regs->gprs[15])
  83. return perf_misc_flags_sf(regs);
  84. if (is_in_guest(regs))
  85. return perf_misc_guest_flags(regs);
  86. return user_mode(regs) ? PERF_RECORD_MISC_USER
  87. : PERF_RECORD_MISC_KERNEL;
  88. }
  89. static void print_debug_cf(void)
  90. {
  91. struct cpumf_ctr_info cf_info;
  92. int cpu = smp_processor_id();
  93. memset(&cf_info, 0, sizeof(cf_info));
  94. if (!qctri(&cf_info))
  95. pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
  96. cpu, cf_info.cfvn, cf_info.csvn,
  97. cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
  98. }
  99. static void print_debug_sf(void)
  100. {
  101. struct hws_qsi_info_block si;
  102. int cpu = smp_processor_id();
  103. memset(&si, 0, sizeof(si));
  104. if (qsi(&si))
  105. return;
  106. pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
  107. cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
  108. si.cpu_speed);
  109. if (si.as)
  110. pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
  111. " bsdes=%i tear=%016lx dear=%016lx\n", cpu,
  112. si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
  113. if (si.ad)
  114. pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
  115. " dsdes=%i tear=%016lx dear=%016lx\n", cpu,
  116. si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
  117. }
  118. void perf_event_print_debug(void)
  119. {
  120. unsigned long flags;
  121. local_irq_save(flags);
  122. if (cpum_cf_avail())
  123. print_debug_cf();
  124. if (cpum_sf_avail())
  125. print_debug_sf();
  126. local_irq_restore(flags);
  127. }
  128. /* Service level infrastructure */
  129. static void sl_print_counter(struct seq_file *m)
  130. {
  131. struct cpumf_ctr_info ci;
  132. memset(&ci, 0, sizeof(ci));
  133. if (qctri(&ci))
  134. return;
  135. seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
  136. "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
  137. }
  138. static void sl_print_sampling(struct seq_file *m)
  139. {
  140. struct hws_qsi_info_block si;
  141. memset(&si, 0, sizeof(si));
  142. if (qsi(&si))
  143. return;
  144. if (!si.as && !si.ad)
  145. return;
  146. seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
  147. " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
  148. si.cpu_speed);
  149. if (si.as)
  150. seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
  151. " sample_size=%u\n", si.bsdes);
  152. if (si.ad)
  153. seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
  154. " sample_size=%u\n", si.dsdes);
  155. }
  156. static void service_level_perf_print(struct seq_file *m,
  157. struct service_level *sl)
  158. {
  159. if (cpum_cf_avail())
  160. sl_print_counter(m);
  161. if (cpum_sf_avail())
  162. sl_print_sampling(m);
  163. }
  164. static struct service_level service_level_perf = {
  165. .seq_print = service_level_perf_print,
  166. };
  167. static int __init service_level_perf_register(void)
  168. {
  169. return register_service_level(&service_level_perf);
  170. }
  171. arch_initcall(service_level_perf_register);
  172. void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  173. struct pt_regs *regs)
  174. {
  175. struct unwind_state state;
  176. unsigned long addr;
  177. unwind_for_each_frame(&state, current, regs, 0) {
  178. addr = unwind_get_return_address(&state);
  179. if (!addr || perf_callchain_store(entry, addr))
  180. return;
  181. }
  182. }
  183. void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
  184. struct pt_regs *regs)
  185. {
  186. arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
  187. }
  188. /* Perf definitions for PMU event attributes in sysfs */
  189. ssize_t cpumf_events_sysfs_show(struct device *dev,
  190. struct device_attribute *attr, char *page)
  191. {
  192. struct perf_pmu_events_attr *pmu_attr;
  193. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  194. return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
  195. }