arm_pmu_acpi.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ACPI probing code for ARM performance counters.
  4. *
  5. * Copyright (C) 2017 ARM Ltd.
  6. */
  7. #include <linux/acpi.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/init.h>
  10. #include <linux/irq.h>
  11. #include <linux/irqdesc.h>
  12. #include <linux/percpu.h>
  13. #include <linux/perf/arm_pmu.h>
  14. #include <asm/cpu.h>
  15. #include <asm/cputype.h>
  16. static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus);
  17. static DEFINE_PER_CPU(int, pmu_irqs);
  18. static int arm_pmu_acpi_register_irq(int cpu)
  19. {
  20. struct acpi_madt_generic_interrupt *gicc;
  21. int gsi, trigger;
  22. gicc = acpi_cpu_get_madt_gicc(cpu);
  23. gsi = gicc->performance_interrupt;
  24. /*
  25. * Per the ACPI spec, the MADT cannot describe a PMU that doesn't
  26. * have an interrupt. QEMU advertises this by using a GSI of zero,
  27. * which is not known to be valid on any hardware despite being
  28. * valid per the spec. Take the pragmatic approach and reject a
  29. * GSI of zero for now.
  30. */
  31. if (!gsi)
  32. return 0;
  33. if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE)
  34. trigger = ACPI_EDGE_SENSITIVE;
  35. else
  36. trigger = ACPI_LEVEL_SENSITIVE;
  37. /*
  38. * Helpfully, the MADT GICC doesn't have a polarity flag for the
  39. * "performance interrupt". Luckily, on compliant GICs the polarity is
  40. * a fixed value in HW (for both SPIs and PPIs) that we cannot change
  41. * from SW.
  42. *
  43. * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This
  44. * may not match the real polarity, but that should not matter.
  45. *
  46. * Other interrupt controllers are not supported with ACPI.
  47. */
  48. return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH);
  49. }
  50. static void arm_pmu_acpi_unregister_irq(int cpu)
  51. {
  52. struct acpi_madt_generic_interrupt *gicc;
  53. int gsi;
  54. gicc = acpi_cpu_get_madt_gicc(cpu);
  55. gsi = gicc->performance_interrupt;
  56. if (gsi)
  57. acpi_unregister_gsi(gsi);
  58. }
  59. static int __maybe_unused
  60. arm_acpi_register_pmu_device(struct platform_device *pdev, u8 len,
  61. u16 (*parse_gsi)(struct acpi_madt_generic_interrupt *))
  62. {
  63. int cpu, this_hetid, hetid, irq, ret;
  64. u16 this_gsi = 0, gsi = 0;
  65. /*
  66. * Ensure that platform device must have IORESOURCE_IRQ
  67. * resource to hold gsi interrupt.
  68. */
  69. if (pdev->num_resources != 1)
  70. return -ENXIO;
  71. if (pdev->resource[0].flags != IORESOURCE_IRQ)
  72. return -ENXIO;
  73. /*
  74. * Sanity check all the GICC tables for the same interrupt
  75. * number. For now, only support homogeneous ACPI machines.
  76. */
  77. for_each_possible_cpu(cpu) {
  78. struct acpi_madt_generic_interrupt *gicc;
  79. gicc = acpi_cpu_get_madt_gicc(cpu);
  80. if (gicc->header.length < len)
  81. return gsi ? -ENXIO : 0;
  82. this_gsi = parse_gsi(gicc);
  83. this_hetid = find_acpi_cpu_topology_hetero_id(cpu);
  84. if (!gsi) {
  85. hetid = this_hetid;
  86. gsi = this_gsi;
  87. } else if (hetid != this_hetid || gsi != this_gsi) {
  88. pr_warn("ACPI: %s: must be homogeneous\n", pdev->name);
  89. return -ENXIO;
  90. }
  91. }
  92. if (!this_gsi)
  93. return 0;
  94. irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
  95. if (irq < 0) {
  96. pr_warn("ACPI: %s Unable to register interrupt: %d\n", pdev->name, gsi);
  97. return -ENXIO;
  98. }
  99. pdev->resource[0].start = irq;
  100. ret = platform_device_register(pdev);
  101. if (ret)
  102. acpi_unregister_gsi(gsi);
  103. return ret;
  104. }
  105. #if IS_ENABLED(CONFIG_ARM_SPE_PMU)
  106. static struct resource spe_resources[] = {
  107. {
  108. /* irq */
  109. .flags = IORESOURCE_IRQ,
  110. }
  111. };
  112. static struct platform_device spe_dev = {
  113. .name = ARMV8_SPE_PDEV_NAME,
  114. .id = -1,
  115. .resource = spe_resources,
  116. .num_resources = ARRAY_SIZE(spe_resources)
  117. };
  118. static u16 arm_spe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
  119. {
  120. return gicc->spe_interrupt;
  121. }
  122. /*
  123. * For lack of a better place, hook the normal PMU MADT walk
  124. * and create a SPE device if we detect a recent MADT with
  125. * a homogeneous PPI mapping.
  126. */
  127. static void arm_spe_acpi_register_device(void)
  128. {
  129. int ret = arm_acpi_register_pmu_device(&spe_dev, ACPI_MADT_GICC_SPE,
  130. arm_spe_parse_gsi);
  131. if (ret)
  132. pr_warn("ACPI: SPE: Unable to register device\n");
  133. }
  134. #else
  135. static inline void arm_spe_acpi_register_device(void)
  136. {
  137. }
  138. #endif /* CONFIG_ARM_SPE_PMU */
  139. #if IS_ENABLED(CONFIG_CORESIGHT_TRBE)
  140. static struct resource trbe_resources[] = {
  141. {
  142. /* irq */
  143. .flags = IORESOURCE_IRQ,
  144. }
  145. };
  146. static struct platform_device trbe_dev = {
  147. .name = ARMV8_TRBE_PDEV_NAME,
  148. .id = -1,
  149. .resource = trbe_resources,
  150. .num_resources = ARRAY_SIZE(trbe_resources)
  151. };
  152. static u16 arm_trbe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
  153. {
  154. return gicc->trbe_interrupt;
  155. }
  156. static void arm_trbe_acpi_register_device(void)
  157. {
  158. int ret = arm_acpi_register_pmu_device(&trbe_dev, ACPI_MADT_GICC_TRBE,
  159. arm_trbe_parse_gsi);
  160. if (ret)
  161. pr_warn("ACPI: TRBE: Unable to register device\n");
  162. }
  163. #else
  164. static inline void arm_trbe_acpi_register_device(void)
  165. {
  166. }
  167. #endif /* CONFIG_CORESIGHT_TRBE */
  168. static int arm_pmu_acpi_parse_irqs(void)
  169. {
  170. int irq, cpu, irq_cpu, err;
  171. for_each_possible_cpu(cpu) {
  172. irq = arm_pmu_acpi_register_irq(cpu);
  173. if (irq < 0) {
  174. err = irq;
  175. pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n",
  176. cpu, err);
  177. goto out_err;
  178. } else if (irq == 0) {
  179. pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
  180. }
  181. /*
  182. * Log and request the IRQ so the core arm_pmu code can manage
  183. * it. We'll have to sanity-check IRQs later when we associate
  184. * them with their PMUs.
  185. */
  186. per_cpu(pmu_irqs, cpu) = irq;
  187. err = armpmu_request_irq(irq, cpu);
  188. if (err)
  189. goto out_err;
  190. }
  191. return 0;
  192. out_err:
  193. for_each_possible_cpu(cpu) {
  194. irq = per_cpu(pmu_irqs, cpu);
  195. if (!irq)
  196. continue;
  197. arm_pmu_acpi_unregister_irq(cpu);
  198. /*
  199. * Blat all copies of the IRQ so that we only unregister the
  200. * corresponding GSI once (e.g. when we have PPIs).
  201. */
  202. for_each_possible_cpu(irq_cpu) {
  203. if (per_cpu(pmu_irqs, irq_cpu) == irq)
  204. per_cpu(pmu_irqs, irq_cpu) = 0;
  205. }
  206. }
  207. return err;
  208. }
  209. static struct arm_pmu *arm_pmu_acpi_find_pmu(void)
  210. {
  211. unsigned long cpuid = read_cpuid_id();
  212. struct arm_pmu *pmu;
  213. int cpu;
  214. for_each_possible_cpu(cpu) {
  215. pmu = per_cpu(probed_pmus, cpu);
  216. if (!pmu || pmu->acpi_cpuid != cpuid)
  217. continue;
  218. return pmu;
  219. }
  220. return NULL;
  221. }
  222. /*
  223. * Check whether the new IRQ is compatible with those already associated with
  224. * the PMU (e.g. we don't have mismatched PPIs).
  225. */
  226. static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
  227. {
  228. struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
  229. int cpu;
  230. if (!irq)
  231. return true;
  232. for_each_cpu(cpu, &pmu->supported_cpus) {
  233. int other_irq = per_cpu(hw_events->irq, cpu);
  234. if (!other_irq)
  235. continue;
  236. if (irq == other_irq)
  237. continue;
  238. if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
  239. continue;
  240. pr_warn("mismatched PPIs detected\n");
  241. return false;
  242. }
  243. return true;
  244. }
  245. static void arm_pmu_acpi_associate_pmu_cpu(struct arm_pmu *pmu,
  246. unsigned int cpu)
  247. {
  248. int irq = per_cpu(pmu_irqs, cpu);
  249. per_cpu(probed_pmus, cpu) = pmu;
  250. if (pmu_irq_matches(pmu, irq)) {
  251. struct pmu_hw_events __percpu *hw_events;
  252. hw_events = pmu->hw_events;
  253. per_cpu(hw_events->irq, cpu) = irq;
  254. }
  255. cpumask_set_cpu(cpu, &pmu->supported_cpus);
  256. }
  257. /*
  258. * This must run before the common arm_pmu hotplug logic, so that we can
  259. * associate a CPU and its interrupt before the common code tries to manage the
  260. * affinity and so on.
  261. *
  262. * Note that hotplug events are serialized, so we cannot race with another CPU
  263. * coming up. The perf core won't open events while a hotplug event is in
  264. * progress.
  265. */
  266. static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
  267. {
  268. struct arm_pmu *pmu;
  269. /* If we've already probed this CPU, we have nothing to do */
  270. if (per_cpu(probed_pmus, cpu))
  271. return 0;
  272. pmu = arm_pmu_acpi_find_pmu();
  273. if (!pmu) {
  274. pr_warn_ratelimited("Unable to associate CPU%d with a PMU\n",
  275. cpu);
  276. return 0;
  277. }
  278. arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
  279. return 0;
  280. }
  281. static void arm_pmu_acpi_probe_matching_cpus(struct arm_pmu *pmu,
  282. unsigned long cpuid)
  283. {
  284. int cpu;
  285. for_each_online_cpu(cpu) {
  286. unsigned long cpu_cpuid = per_cpu(cpu_data, cpu).reg_midr;
  287. if (cpu_cpuid == cpuid)
  288. arm_pmu_acpi_associate_pmu_cpu(pmu, cpu);
  289. }
  290. }
  291. int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
  292. {
  293. int pmu_idx = 0;
  294. unsigned int cpu;
  295. int ret;
  296. ret = arm_pmu_acpi_parse_irqs();
  297. if (ret)
  298. return ret;
  299. ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_ACPI_STARTING,
  300. "perf/arm/pmu_acpi:starting",
  301. arm_pmu_acpi_cpu_starting, NULL);
  302. if (ret)
  303. return ret;
  304. /*
  305. * Initialise and register the set of PMUs which we know about right
  306. * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
  307. * could handle late hotplug, but this may lead to deadlock since we
  308. * might try to register a hotplug notifier instance from within a
  309. * hotplug notifier.
  310. *
  311. * There's also the problem of having access to the right init_fn,
  312. * without tying this too deeply into the "real" PMU driver.
  313. *
  314. * For the moment, as with the platform/DT case, we need at least one
  315. * of a PMU's CPUs to be online at probe time.
  316. */
  317. for_each_online_cpu(cpu) {
  318. struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
  319. unsigned long cpuid;
  320. char *base_name;
  321. /* If we've already probed this CPU, we have nothing to do */
  322. if (pmu)
  323. continue;
  324. pmu = armpmu_alloc();
  325. if (!pmu) {
  326. pr_warn("Unable to allocate PMU for CPU%d\n",
  327. cpu);
  328. return -ENOMEM;
  329. }
  330. cpuid = per_cpu(cpu_data, cpu).reg_midr;
  331. pmu->acpi_cpuid = cpuid;
  332. arm_pmu_acpi_probe_matching_cpus(pmu, cpuid);
  333. ret = init_fn(pmu);
  334. if (ret == -ENODEV) {
  335. /* PMU not handled by this driver, or not present */
  336. continue;
  337. } else if (ret) {
  338. pr_warn("Unable to initialise PMU for CPU%d\n", cpu);
  339. return ret;
  340. }
  341. base_name = pmu->name;
  342. pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
  343. if (!pmu->name) {
  344. pr_warn("Unable to allocate PMU name for CPU%d\n", cpu);
  345. return -ENOMEM;
  346. }
  347. ret = armpmu_register(pmu);
  348. if (ret) {
  349. pr_warn("Failed to register PMU for CPU%d\n", cpu);
  350. kfree(pmu->name);
  351. return ret;
  352. }
  353. }
  354. return ret;
  355. }
  356. static int arm_pmu_acpi_init(void)
  357. {
  358. if (acpi_disabled)
  359. return 0;
  360. arm_spe_acpi_register_device();
  361. arm_trbe_acpi_register_device();
  362. return 0;
  363. }
  364. subsys_initcall(arm_pmu_acpi_init)