uncore.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/slab.h>
  3. #include <linux/pci.h>
  4. #include <asm/apicdef.h>
  5. #include <linux/perf_event.h>
  6. #include "../perf_event.h"
  7. #define UNCORE_PMU_NAME_LEN 32
  8. #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
  9. #define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
  10. #define UNCORE_FIXED_EVENT 0xff
  11. #define UNCORE_PMC_IDX_MAX_GENERIC 8
  12. #define UNCORE_PMC_IDX_MAX_FIXED 1
  13. #define UNCORE_PMC_IDX_MAX_FREERUNNING 1
  14. #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
  15. #define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
  16. UNCORE_PMC_IDX_MAX_FIXED)
  17. #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
  18. UNCORE_PMC_IDX_MAX_FREERUNNING)
  19. #define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
  20. ((dev << 24) | (func << 16) | (type << 8) | idx)
  21. #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
  22. #define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
  23. #define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
  24. #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
  25. #define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
  26. #define UNCORE_EXTRA_PCI_DEV 0xff
  27. #define UNCORE_EXTRA_PCI_DEV_MAX 4
  28. #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
  29. struct pci_extra_dev {
  30. struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
  31. };
  32. struct intel_uncore_ops;
  33. struct intel_uncore_pmu;
  34. struct intel_uncore_box;
  35. struct uncore_event_desc;
  36. struct freerunning_counters;
  37. struct intel_uncore_type {
  38. const char *name;
  39. int num_counters;
  40. int num_boxes;
  41. int perf_ctr_bits;
  42. int fixed_ctr_bits;
  43. int num_freerunning_types;
  44. unsigned perf_ctr;
  45. unsigned event_ctl;
  46. unsigned event_mask;
  47. unsigned event_mask_ext;
  48. unsigned fixed_ctr;
  49. unsigned fixed_ctl;
  50. unsigned box_ctl;
  51. unsigned msr_offset;
  52. unsigned num_shared_regs:8;
  53. unsigned single_fixed:1;
  54. unsigned pair_ctr_ctl:1;
  55. unsigned *msr_offsets;
  56. struct event_constraint unconstrainted;
  57. struct event_constraint *constraints;
  58. struct intel_uncore_pmu *pmus;
  59. struct intel_uncore_ops *ops;
  60. struct uncore_event_desc *event_descs;
  61. struct freerunning_counters *freerunning;
  62. const struct attribute_group *attr_groups[4];
  63. struct pmu *pmu; /* for custom pmu ops */
  64. };
  65. #define pmu_group attr_groups[0]
  66. #define format_group attr_groups[1]
  67. #define events_group attr_groups[2]
  68. struct intel_uncore_ops {
  69. void (*init_box)(struct intel_uncore_box *);
  70. void (*exit_box)(struct intel_uncore_box *);
  71. void (*disable_box)(struct intel_uncore_box *);
  72. void (*enable_box)(struct intel_uncore_box *);
  73. void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
  74. void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
  75. u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
  76. int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
  77. struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
  78. struct perf_event *);
  79. void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
  80. };
  81. struct intel_uncore_pmu {
  82. struct pmu pmu;
  83. char name[UNCORE_PMU_NAME_LEN];
  84. int pmu_idx;
  85. int func_id;
  86. bool registered;
  87. atomic_t activeboxes;
  88. struct intel_uncore_type *type;
  89. struct intel_uncore_box **boxes;
  90. };
  91. struct intel_uncore_extra_reg {
  92. raw_spinlock_t lock;
  93. u64 config, config1, config2;
  94. atomic_t ref;
  95. };
  96. struct intel_uncore_box {
  97. int pci_phys_id;
  98. int pkgid; /* Logical package ID */
  99. int n_active; /* number of active events */
  100. int n_events;
  101. int cpu; /* cpu to collect events */
  102. unsigned long flags;
  103. atomic_t refcnt;
  104. struct perf_event *events[UNCORE_PMC_IDX_MAX];
  105. struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
  106. struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
  107. unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  108. u64 tags[UNCORE_PMC_IDX_MAX];
  109. struct pci_dev *pci_dev;
  110. struct intel_uncore_pmu *pmu;
  111. u64 hrtimer_duration; /* hrtimer timeout for this box */
  112. struct hrtimer hrtimer;
  113. struct list_head list;
  114. struct list_head active_list;
  115. void *io_addr;
  116. struct intel_uncore_extra_reg shared_regs[0];
  117. };
  118. #define UNCORE_BOX_FLAG_INITIATED 0
  119. #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
  120. struct uncore_event_desc {
  121. struct device_attribute attr;
  122. const char *config;
  123. };
  124. struct freerunning_counters {
  125. unsigned int counter_base;
  126. unsigned int counter_offset;
  127. unsigned int box_offset;
  128. unsigned int num_counters;
  129. unsigned int bits;
  130. };
  131. struct pci2phy_map {
  132. struct list_head list;
  133. int segment;
  134. int pbus_to_physid[256];
  135. };
  136. struct pci2phy_map *__find_pci2phy_map(int segment);
  137. ssize_t uncore_event_show(struct device *dev,
  138. struct device_attribute *attr, char *buf);
  139. #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
  140. { \
  141. .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
  142. .config = _config, \
  143. }
  144. #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
  145. static ssize_t __uncore_##_var##_show(struct device *dev, \
  146. struct device_attribute *attr, \
  147. char *page) \
  148. { \
  149. BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
  150. return sprintf(page, _format "\n"); \
  151. } \
  152. static struct device_attribute format_attr_##_var = \
  153. __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
  154. static inline bool uncore_pmc_fixed(int idx)
  155. {
  156. return idx == UNCORE_PMC_IDX_FIXED;
  157. }
  158. static inline bool uncore_pmc_freerunning(int idx)
  159. {
  160. return idx == UNCORE_PMC_IDX_FREERUNNING;
  161. }
  162. static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
  163. {
  164. return box->pmu->type->box_ctl;
  165. }
  166. static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
  167. {
  168. return box->pmu->type->fixed_ctl;
  169. }
  170. static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
  171. {
  172. return box->pmu->type->fixed_ctr;
  173. }
  174. static inline
  175. unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
  176. {
  177. if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
  178. return idx * 8 + box->pmu->type->event_ctl;
  179. return idx * 4 + box->pmu->type->event_ctl;
  180. }
  181. static inline
  182. unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
  183. {
  184. return idx * 8 + box->pmu->type->perf_ctr;
  185. }
  186. static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
  187. {
  188. struct intel_uncore_pmu *pmu = box->pmu;
  189. return pmu->type->msr_offsets ?
  190. pmu->type->msr_offsets[pmu->pmu_idx] :
  191. pmu->type->msr_offset * pmu->pmu_idx;
  192. }
  193. static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
  194. {
  195. if (!box->pmu->type->box_ctl)
  196. return 0;
  197. return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
  198. }
  199. static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
  200. {
  201. if (!box->pmu->type->fixed_ctl)
  202. return 0;
  203. return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
  204. }
  205. static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
  206. {
  207. return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
  208. }
  209. /*
  210. * In the uncore document, there is no event-code assigned to free running
  211. * counters. Some events need to be defined to indicate the free running
  212. * counters. The events are encoded as event-code + umask-code.
  213. *
  214. * The event-code for all free running counters is 0xff, which is the same as
  215. * the fixed counters.
  216. *
  217. * The umask-code is used to distinguish a fixed counter and a free running
  218. * counter, and different types of free running counters.
  219. * - For fixed counters, the umask-code is 0x0X.
  220. * X indicates the index of the fixed counter, which starts from 0.
  221. * - For free running counters, the umask-code uses the rest of the space.
  222. * It would bare the format of 0xXY.
  223. * X stands for the type of free running counters, which starts from 1.
  224. * Y stands for the index of free running counters of same type, which
  225. * starts from 0.
  226. *
  227. * For example, there are three types of IIO free running counters on Skylake
  228. * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
  229. * The event-code for all the free running counters is 0xff.
  230. * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
  231. * which umask-code starts from 0x10.
  232. * So 'ioclk' is encoded as event=0xff,umask=0x10
  233. * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
  234. * the second type, which umask-code starts from 0x20.
  235. * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
  236. */
  237. static inline unsigned int uncore_freerunning_idx(u64 config)
  238. {
  239. return ((config >> 8) & 0xf);
  240. }
  241. #define UNCORE_FREERUNNING_UMASK_START 0x10
  242. static inline unsigned int uncore_freerunning_type(u64 config)
  243. {
  244. return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
  245. }
  246. static inline
  247. unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
  248. struct perf_event *event)
  249. {
  250. unsigned int type = uncore_freerunning_type(event->hw.config);
  251. unsigned int idx = uncore_freerunning_idx(event->hw.config);
  252. struct intel_uncore_pmu *pmu = box->pmu;
  253. return pmu->type->freerunning[type].counter_base +
  254. pmu->type->freerunning[type].counter_offset * idx +
  255. pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
  256. }
  257. static inline
  258. unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
  259. {
  260. return box->pmu->type->event_ctl +
  261. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  262. uncore_msr_box_offset(box);
  263. }
  264. static inline
  265. unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
  266. {
  267. return box->pmu->type->perf_ctr +
  268. (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
  269. uncore_msr_box_offset(box);
  270. }
  271. static inline
  272. unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
  273. {
  274. if (box->pci_dev)
  275. return uncore_pci_fixed_ctl(box);
  276. else
  277. return uncore_msr_fixed_ctl(box);
  278. }
  279. static inline
  280. unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
  281. {
  282. if (box->pci_dev)
  283. return uncore_pci_fixed_ctr(box);
  284. else
  285. return uncore_msr_fixed_ctr(box);
  286. }
  287. static inline
  288. unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
  289. {
  290. if (box->pci_dev)
  291. return uncore_pci_event_ctl(box, idx);
  292. else
  293. return uncore_msr_event_ctl(box, idx);
  294. }
  295. static inline
  296. unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
  297. {
  298. if (box->pci_dev)
  299. return uncore_pci_perf_ctr(box, idx);
  300. else
  301. return uncore_msr_perf_ctr(box, idx);
  302. }
  303. static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
  304. {
  305. return box->pmu->type->perf_ctr_bits;
  306. }
  307. static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
  308. {
  309. return box->pmu->type->fixed_ctr_bits;
  310. }
  311. static inline
  312. unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
  313. struct perf_event *event)
  314. {
  315. unsigned int type = uncore_freerunning_type(event->hw.config);
  316. return box->pmu->type->freerunning[type].bits;
  317. }
  318. static inline int uncore_num_freerunning(struct intel_uncore_box *box,
  319. struct perf_event *event)
  320. {
  321. unsigned int type = uncore_freerunning_type(event->hw.config);
  322. return box->pmu->type->freerunning[type].num_counters;
  323. }
  324. static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
  325. struct perf_event *event)
  326. {
  327. return box->pmu->type->num_freerunning_types;
  328. }
  329. static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
  330. struct perf_event *event)
  331. {
  332. unsigned int type = uncore_freerunning_type(event->hw.config);
  333. unsigned int idx = uncore_freerunning_idx(event->hw.config);
  334. return (type < uncore_num_freerunning_types(box, event)) &&
  335. (idx < uncore_num_freerunning(box, event));
  336. }
  337. static inline int uncore_num_counters(struct intel_uncore_box *box)
  338. {
  339. return box->pmu->type->num_counters;
  340. }
  341. static inline bool is_freerunning_event(struct perf_event *event)
  342. {
  343. u64 cfg = event->attr.config;
  344. return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
  345. (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
  346. }
  347. /* Check and reject invalid config */
  348. static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
  349. struct perf_event *event)
  350. {
  351. if (is_freerunning_event(event))
  352. return 0;
  353. return -EINVAL;
  354. }
  355. static inline void uncore_disable_event(struct intel_uncore_box *box,
  356. struct perf_event *event)
  357. {
  358. box->pmu->type->ops->disable_event(box, event);
  359. }
  360. static inline void uncore_enable_event(struct intel_uncore_box *box,
  361. struct perf_event *event)
  362. {
  363. box->pmu->type->ops->enable_event(box, event);
  364. }
  365. static inline u64 uncore_read_counter(struct intel_uncore_box *box,
  366. struct perf_event *event)
  367. {
  368. return box->pmu->type->ops->read_counter(box, event);
  369. }
  370. static inline void uncore_box_init(struct intel_uncore_box *box)
  371. {
  372. if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  373. if (box->pmu->type->ops->init_box)
  374. box->pmu->type->ops->init_box(box);
  375. }
  376. }
  377. static inline void uncore_box_exit(struct intel_uncore_box *box)
  378. {
  379. if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
  380. if (box->pmu->type->ops->exit_box)
  381. box->pmu->type->ops->exit_box(box);
  382. }
  383. }
  384. static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
  385. {
  386. return (box->pkgid < 0);
  387. }
  388. static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
  389. {
  390. return container_of(event->pmu, struct intel_uncore_pmu, pmu);
  391. }
  392. static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
  393. {
  394. return event->pmu_private;
  395. }
  396. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
  397. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
  398. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
  399. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
  400. void uncore_pmu_event_start(struct perf_event *event, int flags);
  401. void uncore_pmu_event_stop(struct perf_event *event, int flags);
  402. int uncore_pmu_event_add(struct perf_event *event, int flags);
  403. void uncore_pmu_event_del(struct perf_event *event, int flags);
  404. void uncore_pmu_event_read(struct perf_event *event);
  405. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
  406. struct event_constraint *
  407. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
  408. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
  409. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
  410. extern struct intel_uncore_type **uncore_msr_uncores;
  411. extern struct intel_uncore_type **uncore_pci_uncores;
  412. extern struct pci_driver *uncore_pci_driver;
  413. extern raw_spinlock_t pci2phy_map_lock;
  414. extern struct list_head pci2phy_map_head;
  415. extern struct pci_extra_dev *uncore_extra_pci_dev;
  416. extern struct event_constraint uncore_constraint_empty;
  417. /* uncore_snb.c */
  418. int snb_uncore_pci_init(void);
  419. int ivb_uncore_pci_init(void);
  420. int hsw_uncore_pci_init(void);
  421. int bdw_uncore_pci_init(void);
  422. int skl_uncore_pci_init(void);
  423. void snb_uncore_cpu_init(void);
  424. void nhm_uncore_cpu_init(void);
  425. void skl_uncore_cpu_init(void);
  426. int snb_pci2phy_map_init(int devid);
  427. /* uncore_snbep.c */
  428. int snbep_uncore_pci_init(void);
  429. void snbep_uncore_cpu_init(void);
  430. int ivbep_uncore_pci_init(void);
  431. void ivbep_uncore_cpu_init(void);
  432. int hswep_uncore_pci_init(void);
  433. void hswep_uncore_cpu_init(void);
  434. int bdx_uncore_pci_init(void);
  435. void bdx_uncore_cpu_init(void);
  436. int knl_uncore_pci_init(void);
  437. void knl_uncore_cpu_init(void);
  438. int skx_uncore_pci_init(void);
  439. void skx_uncore_cpu_init(void);
  440. /* uncore_nhmex.c */
  441. void nhmex_uncore_cpu_init(void);