core.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Zhaoxin PMU; like Intel Architectural PerfMon-v2
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/stddef.h>
  7. #include <linux/types.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/export.h>
  11. #include <linux/nmi.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/hardirq.h>
  14. #include <asm/apic.h>
  15. #include "../perf_event.h"
  16. /*
  17. * Zhaoxin PerfMon, used on zxc and later.
  18. */
  19. static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = {
  20. [PERF_COUNT_HW_CPU_CYCLES] = 0x0082,
  21. [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
  22. [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515,
  23. [PERF_COUNT_HW_CACHE_MISSES] = 0x051a,
  24. [PERF_COUNT_HW_BUS_CYCLES] = 0x0083,
  25. };
  26. static struct event_constraint zxc_event_constraints[] __read_mostly = {
  27. FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */
  28. EVENT_CONSTRAINT_END
  29. };
  30. static struct event_constraint zxd_event_constraints[] __read_mostly = {
  31. FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */
  32. FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */
  33. FIXED_EVENT_CONSTRAINT(0x0083, 2), /* unhalted bus clock cycles */
  34. EVENT_CONSTRAINT_END
  35. };
  36. static __initconst const u64 zxd_hw_cache_event_ids
  37. [PERF_COUNT_HW_CACHE_MAX]
  38. [PERF_COUNT_HW_CACHE_OP_MAX]
  39. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  40. [C(L1D)] = {
  41. [C(OP_READ)] = {
  42. [C(RESULT_ACCESS)] = 0x0042,
  43. [C(RESULT_MISS)] = 0x0538,
  44. },
  45. [C(OP_WRITE)] = {
  46. [C(RESULT_ACCESS)] = 0x0043,
  47. [C(RESULT_MISS)] = 0x0562,
  48. },
  49. [C(OP_PREFETCH)] = {
  50. [C(RESULT_ACCESS)] = -1,
  51. [C(RESULT_MISS)] = -1,
  52. },
  53. },
  54. [C(L1I)] = {
  55. [C(OP_READ)] = {
  56. [C(RESULT_ACCESS)] = 0x0300,
  57. [C(RESULT_MISS)] = 0x0301,
  58. },
  59. [C(OP_WRITE)] = {
  60. [C(RESULT_ACCESS)] = -1,
  61. [C(RESULT_MISS)] = -1,
  62. },
  63. [C(OP_PREFETCH)] = {
  64. [C(RESULT_ACCESS)] = 0x030a,
  65. [C(RESULT_MISS)] = 0x030b,
  66. },
  67. },
  68. [C(LL)] = {
  69. [C(OP_READ)] = {
  70. [C(RESULT_ACCESS)] = -1,
  71. [C(RESULT_MISS)] = -1,
  72. },
  73. [C(OP_WRITE)] = {
  74. [C(RESULT_ACCESS)] = -1,
  75. [C(RESULT_MISS)] = -1,
  76. },
  77. [C(OP_PREFETCH)] = {
  78. [C(RESULT_ACCESS)] = -1,
  79. [C(RESULT_MISS)] = -1,
  80. },
  81. },
  82. [C(DTLB)] = {
  83. [C(OP_READ)] = {
  84. [C(RESULT_ACCESS)] = 0x0042,
  85. [C(RESULT_MISS)] = 0x052c,
  86. },
  87. [C(OP_WRITE)] = {
  88. [C(RESULT_ACCESS)] = 0x0043,
  89. [C(RESULT_MISS)] = 0x0530,
  90. },
  91. [C(OP_PREFETCH)] = {
  92. [C(RESULT_ACCESS)] = 0x0564,
  93. [C(RESULT_MISS)] = 0x0565,
  94. },
  95. },
  96. [C(ITLB)] = {
  97. [C(OP_READ)] = {
  98. [C(RESULT_ACCESS)] = 0x00c0,
  99. [C(RESULT_MISS)] = 0x0534,
  100. },
  101. [C(OP_WRITE)] = {
  102. [C(RESULT_ACCESS)] = -1,
  103. [C(RESULT_MISS)] = -1,
  104. },
  105. [C(OP_PREFETCH)] = {
  106. [C(RESULT_ACCESS)] = -1,
  107. [C(RESULT_MISS)] = -1,
  108. },
  109. },
  110. [C(BPU)] = {
  111. [C(OP_READ)] = {
  112. [C(RESULT_ACCESS)] = 0x0700,
  113. [C(RESULT_MISS)] = 0x0709,
  114. },
  115. [C(OP_WRITE)] = {
  116. [C(RESULT_ACCESS)] = -1,
  117. [C(RESULT_MISS)] = -1,
  118. },
  119. [C(OP_PREFETCH)] = {
  120. [C(RESULT_ACCESS)] = -1,
  121. [C(RESULT_MISS)] = -1,
  122. },
  123. },
  124. [C(NODE)] = {
  125. [C(OP_READ)] = {
  126. [C(RESULT_ACCESS)] = -1,
  127. [C(RESULT_MISS)] = -1,
  128. },
  129. [C(OP_WRITE)] = {
  130. [C(RESULT_ACCESS)] = -1,
  131. [C(RESULT_MISS)] = -1,
  132. },
  133. [C(OP_PREFETCH)] = {
  134. [C(RESULT_ACCESS)] = -1,
  135. [C(RESULT_MISS)] = -1,
  136. },
  137. },
  138. };
  139. static __initconst const u64 zxe_hw_cache_event_ids
  140. [PERF_COUNT_HW_CACHE_MAX]
  141. [PERF_COUNT_HW_CACHE_OP_MAX]
  142. [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
  143. [C(L1D)] = {
  144. [C(OP_READ)] = {
  145. [C(RESULT_ACCESS)] = 0x0568,
  146. [C(RESULT_MISS)] = 0x054b,
  147. },
  148. [C(OP_WRITE)] = {
  149. [C(RESULT_ACCESS)] = 0x0669,
  150. [C(RESULT_MISS)] = 0x0562,
  151. },
  152. [C(OP_PREFETCH)] = {
  153. [C(RESULT_ACCESS)] = -1,
  154. [C(RESULT_MISS)] = -1,
  155. },
  156. },
  157. [C(L1I)] = {
  158. [C(OP_READ)] = {
  159. [C(RESULT_ACCESS)] = 0x0300,
  160. [C(RESULT_MISS)] = 0x0301,
  161. },
  162. [C(OP_WRITE)] = {
  163. [C(RESULT_ACCESS)] = -1,
  164. [C(RESULT_MISS)] = -1,
  165. },
  166. [C(OP_PREFETCH)] = {
  167. [C(RESULT_ACCESS)] = 0x030a,
  168. [C(RESULT_MISS)] = 0x030b,
  169. },
  170. },
  171. [C(LL)] = {
  172. [C(OP_READ)] = {
  173. [C(RESULT_ACCESS)] = 0x0,
  174. [C(RESULT_MISS)] = 0x0,
  175. },
  176. [C(OP_WRITE)] = {
  177. [C(RESULT_ACCESS)] = 0x0,
  178. [C(RESULT_MISS)] = 0x0,
  179. },
  180. [C(OP_PREFETCH)] = {
  181. [C(RESULT_ACCESS)] = 0x0,
  182. [C(RESULT_MISS)] = 0x0,
  183. },
  184. },
  185. [C(DTLB)] = {
  186. [C(OP_READ)] = {
  187. [C(RESULT_ACCESS)] = 0x0568,
  188. [C(RESULT_MISS)] = 0x052c,
  189. },
  190. [C(OP_WRITE)] = {
  191. [C(RESULT_ACCESS)] = 0x0669,
  192. [C(RESULT_MISS)] = 0x0530,
  193. },
  194. [C(OP_PREFETCH)] = {
  195. [C(RESULT_ACCESS)] = 0x0564,
  196. [C(RESULT_MISS)] = 0x0565,
  197. },
  198. },
  199. [C(ITLB)] = {
  200. [C(OP_READ)] = {
  201. [C(RESULT_ACCESS)] = 0x00c0,
  202. [C(RESULT_MISS)] = 0x0534,
  203. },
  204. [C(OP_WRITE)] = {
  205. [C(RESULT_ACCESS)] = -1,
  206. [C(RESULT_MISS)] = -1,
  207. },
  208. [C(OP_PREFETCH)] = {
  209. [C(RESULT_ACCESS)] = -1,
  210. [C(RESULT_MISS)] = -1,
  211. },
  212. },
  213. [C(BPU)] = {
  214. [C(OP_READ)] = {
  215. [C(RESULT_ACCESS)] = 0x0028,
  216. [C(RESULT_MISS)] = 0x0029,
  217. },
  218. [C(OP_WRITE)] = {
  219. [C(RESULT_ACCESS)] = -1,
  220. [C(RESULT_MISS)] = -1,
  221. },
  222. [C(OP_PREFETCH)] = {
  223. [C(RESULT_ACCESS)] = -1,
  224. [C(RESULT_MISS)] = -1,
  225. },
  226. },
  227. [C(NODE)] = {
  228. [C(OP_READ)] = {
  229. [C(RESULT_ACCESS)] = -1,
  230. [C(RESULT_MISS)] = -1,
  231. },
  232. [C(OP_WRITE)] = {
  233. [C(RESULT_ACCESS)] = -1,
  234. [C(RESULT_MISS)] = -1,
  235. },
  236. [C(OP_PREFETCH)] = {
  237. [C(RESULT_ACCESS)] = -1,
  238. [C(RESULT_MISS)] = -1,
  239. },
  240. },
  241. };
  242. static void zhaoxin_pmu_disable_all(void)
  243. {
  244. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
  245. }
  246. static void zhaoxin_pmu_enable_all(int added)
  247. {
  248. wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
  249. }
  250. static inline u64 zhaoxin_pmu_get_status(void)
  251. {
  252. u64 status;
  253. rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
  254. return status;
  255. }
  256. static inline void zhaoxin_pmu_ack_status(u64 ack)
  257. {
  258. wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
  259. }
  260. static inline void zxc_pmu_ack_status(u64 ack)
  261. {
  262. /*
  263. * ZXC needs global control enabled in order to clear status bits.
  264. */
  265. zhaoxin_pmu_enable_all(0);
  266. zhaoxin_pmu_ack_status(ack);
  267. zhaoxin_pmu_disable_all();
  268. }
  269. static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc)
  270. {
  271. int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
  272. u64 ctrl_val, mask;
  273. mask = 0xfULL << (idx * 4);
  274. rdmsrl(hwc->config_base, ctrl_val);
  275. ctrl_val &= ~mask;
  276. wrmsrl(hwc->config_base, ctrl_val);
  277. }
  278. static void zhaoxin_pmu_disable_event(struct perf_event *event)
  279. {
  280. struct hw_perf_event *hwc = &event->hw;
  281. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  282. zhaoxin_pmu_disable_fixed(hwc);
  283. return;
  284. }
  285. x86_pmu_disable_event(event);
  286. }
  287. static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc)
  288. {
  289. int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
  290. u64 ctrl_val, bits, mask;
  291. /*
  292. * Enable IRQ generation (0x8),
  293. * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
  294. * if requested:
  295. */
  296. bits = 0x8ULL;
  297. if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
  298. bits |= 0x2;
  299. if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
  300. bits |= 0x1;
  301. bits <<= (idx * 4);
  302. mask = 0xfULL << (idx * 4);
  303. rdmsrl(hwc->config_base, ctrl_val);
  304. ctrl_val &= ~mask;
  305. ctrl_val |= bits;
  306. wrmsrl(hwc->config_base, ctrl_val);
  307. }
  308. static void zhaoxin_pmu_enable_event(struct perf_event *event)
  309. {
  310. struct hw_perf_event *hwc = &event->hw;
  311. if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
  312. zhaoxin_pmu_enable_fixed(hwc);
  313. return;
  314. }
  315. __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
  316. }
  317. /*
  318. * This handler is triggered by the local APIC, so the APIC IRQ handling
  319. * rules apply:
  320. */
  321. static int zhaoxin_pmu_handle_irq(struct pt_regs *regs)
  322. {
  323. struct perf_sample_data data;
  324. struct cpu_hw_events *cpuc;
  325. int handled = 0;
  326. u64 status;
  327. int bit;
  328. cpuc = this_cpu_ptr(&cpu_hw_events);
  329. apic_write(APIC_LVTPC, APIC_DM_NMI);
  330. zhaoxin_pmu_disable_all();
  331. status = zhaoxin_pmu_get_status();
  332. if (!status)
  333. goto done;
  334. again:
  335. if (x86_pmu.enabled_ack)
  336. zxc_pmu_ack_status(status);
  337. else
  338. zhaoxin_pmu_ack_status(status);
  339. inc_irq_stat(apic_perf_irqs);
  340. /*
  341. * CondChgd bit 63 doesn't mean any overflow status. Ignore
  342. * and clear the bit.
  343. */
  344. if (__test_and_clear_bit(63, (unsigned long *)&status)) {
  345. if (!status)
  346. goto done;
  347. }
  348. for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
  349. struct perf_event *event = cpuc->events[bit];
  350. handled++;
  351. if (!test_bit(bit, cpuc->active_mask))
  352. continue;
  353. x86_perf_event_update(event);
  354. perf_sample_data_init(&data, 0, event->hw.last_period);
  355. if (!x86_perf_event_set_period(event))
  356. continue;
  357. if (perf_event_overflow(event, &data, regs))
  358. x86_pmu_stop(event, 0);
  359. }
  360. /*
  361. * Repeat if there is more work to be done:
  362. */
  363. status = zhaoxin_pmu_get_status();
  364. if (status)
  365. goto again;
  366. done:
  367. zhaoxin_pmu_enable_all(0);
  368. return handled;
  369. }
  370. static u64 zhaoxin_pmu_event_map(int hw_event)
  371. {
  372. return zx_pmon_event_map[hw_event];
  373. }
  374. static struct event_constraint *
  375. zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
  376. struct perf_event *event)
  377. {
  378. struct event_constraint *c;
  379. if (x86_pmu.event_constraints) {
  380. for_each_event_constraint(c, x86_pmu.event_constraints) {
  381. if ((event->hw.config & c->cmask) == c->code)
  382. return c;
  383. }
  384. }
  385. return &unconstrained;
  386. }
  387. PMU_FORMAT_ATTR(event, "config:0-7");
  388. PMU_FORMAT_ATTR(umask, "config:8-15");
  389. PMU_FORMAT_ATTR(edge, "config:18");
  390. PMU_FORMAT_ATTR(inv, "config:23");
  391. PMU_FORMAT_ATTR(cmask, "config:24-31");
  392. static struct attribute *zx_arch_formats_attr[] = {
  393. &format_attr_event.attr,
  394. &format_attr_umask.attr,
  395. &format_attr_edge.attr,
  396. &format_attr_inv.attr,
  397. &format_attr_cmask.attr,
  398. NULL,
  399. };
  400. static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config)
  401. {
  402. u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
  403. return x86_event_sysfs_show(page, config, event);
  404. }
  405. static const struct x86_pmu zhaoxin_pmu __initconst = {
  406. .name = "zhaoxin",
  407. .handle_irq = zhaoxin_pmu_handle_irq,
  408. .disable_all = zhaoxin_pmu_disable_all,
  409. .enable_all = zhaoxin_pmu_enable_all,
  410. .enable = zhaoxin_pmu_enable_event,
  411. .disable = zhaoxin_pmu_disable_event,
  412. .hw_config = x86_pmu_hw_config,
  413. .schedule_events = x86_schedule_events,
  414. .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
  415. .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
  416. .event_map = zhaoxin_pmu_event_map,
  417. .max_events = ARRAY_SIZE(zx_pmon_event_map),
  418. .apic = 1,
  419. /*
  420. * For zxd/zxe, read/write operation for PMCx MSR is 48 bits.
  421. */
  422. .max_period = (1ULL << 47) - 1,
  423. .get_event_constraints = zhaoxin_get_event_constraints,
  424. .format_attrs = zx_arch_formats_attr,
  425. .events_sysfs_show = zhaoxin_event_sysfs_show,
  426. };
  427. static const struct { int id; char *name; } zx_arch_events_map[] __initconst = {
  428. { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
  429. { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
  430. { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
  431. { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
  432. { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
  433. { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
  434. { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
  435. };
  436. static __init void zhaoxin_arch_events_quirk(void)
  437. {
  438. int bit;
  439. /* disable event that reported as not present by cpuid */
  440. for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
  441. zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
  442. pr_warn("CPUID marked event: \'%s\' unavailable\n",
  443. zx_arch_events_map[bit].name);
  444. }
  445. }
  446. __init int zhaoxin_pmu_init(void)
  447. {
  448. union cpuid10_edx edx;
  449. union cpuid10_eax eax;
  450. union cpuid10_ebx ebx;
  451. struct event_constraint *c;
  452. unsigned int unused;
  453. int version;
  454. pr_info("Welcome to zhaoxin pmu!\n");
  455. /*
  456. * Check whether the Architectural PerfMon supports
  457. * hw_event or not.
  458. */
  459. cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
  460. if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT - 1)
  461. return -ENODEV;
  462. version = eax.split.version_id;
  463. if (version != 2)
  464. return -ENODEV;
  465. x86_pmu = zhaoxin_pmu;
  466. pr_info("Version check pass!\n");
  467. x86_pmu.version = version;
  468. x86_pmu.cntr_mask64 = GENMASK_ULL(eax.split.num_counters - 1, 0);
  469. x86_pmu.cntval_bits = eax.split.bit_width;
  470. x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
  471. x86_pmu.events_maskl = ebx.full;
  472. x86_pmu.events_mask_len = eax.split.mask_length;
  473. x86_pmu.fixed_cntr_mask64 = GENMASK_ULL(edx.split.num_counters_fixed - 1, 0);
  474. x86_add_quirk(zhaoxin_arch_events_quirk);
  475. switch (boot_cpu_data.x86) {
  476. case 0x06:
  477. /*
  478. * Support Zhaoxin CPU from ZXC series, exclude Nano series through FMS.
  479. * Nano FMS: Family=6, Model=F, Stepping=[0-A][C-D]
  480. * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3
  481. */
  482. if ((boot_cpu_data.x86_model == 0x0f && boot_cpu_data.x86_stepping >= 0x0e) ||
  483. boot_cpu_data.x86_model == 0x19) {
  484. x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
  485. /* Clearing status works only if the global control is enable on zxc. */
  486. x86_pmu.enabled_ack = 1;
  487. x86_pmu.event_constraints = zxc_event_constraints;
  488. zx_pmon_event_map[PERF_COUNT_HW_INSTRUCTIONS] = 0;
  489. zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0;
  490. zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0;
  491. zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0;
  492. pr_cont("ZXC events, ");
  493. break;
  494. }
  495. return -ENODEV;
  496. case 0x07:
  497. zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
  498. X86_CONFIG(.event = 0x01, .umask = 0x01, .inv = 0x01, .cmask = 0x01);
  499. zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
  500. X86_CONFIG(.event = 0x0f, .umask = 0x04, .inv = 0, .cmask = 0);
  501. switch (boot_cpu_data.x86_model) {
  502. case 0x1b:
  503. memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids,
  504. sizeof(hw_cache_event_ids));
  505. x86_pmu.event_constraints = zxd_event_constraints;
  506. zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700;
  507. zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709;
  508. pr_cont("ZXD events, ");
  509. break;
  510. case 0x3b:
  511. memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids,
  512. sizeof(hw_cache_event_ids));
  513. x86_pmu.event_constraints = zxd_event_constraints;
  514. zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028;
  515. zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029;
  516. pr_cont("ZXE events, ");
  517. break;
  518. default:
  519. return -ENODEV;
  520. }
  521. break;
  522. default:
  523. return -ENODEV;
  524. }
  525. x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;
  526. x86_pmu.intel_ctrl |= x86_pmu.fixed_cntr_mask64 << INTEL_PMC_IDX_FIXED;
  527. if (x86_pmu.event_constraints) {
  528. for_each_event_constraint(c, x86_pmu.event_constraints) {
  529. c->idxmsk64 |= x86_pmu.cntr_mask64;
  530. c->weight += x86_pmu_num_counters(NULL);
  531. }
  532. }
  533. return 0;
  534. }