uncore.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. #include <linux/module.h>
  2. #include <asm/cpu_device_id.h>
  3. #include <asm/intel-family.h>
  4. #include "uncore.h"
  5. static struct intel_uncore_type *empty_uncore[] = { NULL, };
  6. struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
  7. struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
  8. static bool pcidrv_registered;
  9. struct pci_driver *uncore_pci_driver;
  10. /* pci bus to socket mapping */
  11. DEFINE_RAW_SPINLOCK(pci2phy_map_lock);
  12. struct list_head pci2phy_map_head = LIST_HEAD_INIT(pci2phy_map_head);
  13. struct pci_extra_dev *uncore_extra_pci_dev;
  14. static int max_packages;
  15. /* mask of cpus that collect uncore events */
  16. static cpumask_t uncore_cpu_mask;
  17. /* constraint for the fixed counter */
  18. static struct event_constraint uncore_constraint_fixed =
  19. EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
  20. struct event_constraint uncore_constraint_empty =
  21. EVENT_CONSTRAINT(0, 0, 0);
  22. MODULE_LICENSE("GPL");
  23. static int uncore_pcibus_to_physid(struct pci_bus *bus)
  24. {
  25. struct pci2phy_map *map;
  26. int phys_id = -1;
  27. raw_spin_lock(&pci2phy_map_lock);
  28. list_for_each_entry(map, &pci2phy_map_head, list) {
  29. if (map->segment == pci_domain_nr(bus)) {
  30. phys_id = map->pbus_to_physid[bus->number];
  31. break;
  32. }
  33. }
  34. raw_spin_unlock(&pci2phy_map_lock);
  35. return phys_id;
  36. }
  37. static void uncore_free_pcibus_map(void)
  38. {
  39. struct pci2phy_map *map, *tmp;
  40. list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) {
  41. list_del(&map->list);
  42. kfree(map);
  43. }
  44. }
  45. struct pci2phy_map *__find_pci2phy_map(int segment)
  46. {
  47. struct pci2phy_map *map, *alloc = NULL;
  48. int i;
  49. lockdep_assert_held(&pci2phy_map_lock);
  50. lookup:
  51. list_for_each_entry(map, &pci2phy_map_head, list) {
  52. if (map->segment == segment)
  53. goto end;
  54. }
  55. if (!alloc) {
  56. raw_spin_unlock(&pci2phy_map_lock);
  57. alloc = kmalloc(sizeof(struct pci2phy_map), GFP_KERNEL);
  58. raw_spin_lock(&pci2phy_map_lock);
  59. if (!alloc)
  60. return NULL;
  61. goto lookup;
  62. }
  63. map = alloc;
  64. alloc = NULL;
  65. map->segment = segment;
  66. for (i = 0; i < 256; i++)
  67. map->pbus_to_physid[i] = -1;
  68. list_add_tail(&map->list, &pci2phy_map_head);
  69. end:
  70. kfree(alloc);
  71. return map;
  72. }
  73. ssize_t uncore_event_show(struct device *dev,
  74. struct device_attribute *attr, char *buf)
  75. {
  76. struct uncore_event_desc *event =
  77. container_of(attr, struct uncore_event_desc, attr);
  78. return sprintf(buf, "%s", event->config);
  79. }
  80. struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
  81. {
  82. unsigned int pkgid = topology_logical_package_id(cpu);
  83. /*
  84. * The unsigned check also catches the '-1' return value for non
  85. * existent mappings in the topology map.
  86. */
  87. return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
  88. }
  89. u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
  90. {
  91. u64 count;
  92. rdmsrl(event->hw.event_base, count);
  93. return count;
  94. }
  95. /*
  96. * generic get constraint function for shared match/mask registers.
  97. */
  98. struct event_constraint *
  99. uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
  100. {
  101. struct intel_uncore_extra_reg *er;
  102. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  103. struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
  104. unsigned long flags;
  105. bool ok = false;
  106. /*
  107. * reg->alloc can be set due to existing state, so for fake box we
  108. * need to ignore this, otherwise we might fail to allocate proper
  109. * fake state for this extra reg constraint.
  110. */
  111. if (reg1->idx == EXTRA_REG_NONE ||
  112. (!uncore_box_is_fake(box) && reg1->alloc))
  113. return NULL;
  114. er = &box->shared_regs[reg1->idx];
  115. raw_spin_lock_irqsave(&er->lock, flags);
  116. if (!atomic_read(&er->ref) ||
  117. (er->config1 == reg1->config && er->config2 == reg2->config)) {
  118. atomic_inc(&er->ref);
  119. er->config1 = reg1->config;
  120. er->config2 = reg2->config;
  121. ok = true;
  122. }
  123. raw_spin_unlock_irqrestore(&er->lock, flags);
  124. if (ok) {
  125. if (!uncore_box_is_fake(box))
  126. reg1->alloc = 1;
  127. return NULL;
  128. }
  129. return &uncore_constraint_empty;
  130. }
  131. void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
  132. {
  133. struct intel_uncore_extra_reg *er;
  134. struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
  135. /*
  136. * Only put constraint if extra reg was actually allocated. Also
  137. * takes care of event which do not use an extra shared reg.
  138. *
  139. * Also, if this is a fake box we shouldn't touch any event state
  140. * (reg->alloc) and we don't care about leaving inconsistent box
  141. * state either since it will be thrown out.
  142. */
  143. if (uncore_box_is_fake(box) || !reg1->alloc)
  144. return;
  145. er = &box->shared_regs[reg1->idx];
  146. atomic_dec(&er->ref);
  147. reg1->alloc = 0;
  148. }
  149. u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
  150. {
  151. struct intel_uncore_extra_reg *er;
  152. unsigned long flags;
  153. u64 config;
  154. er = &box->shared_regs[idx];
  155. raw_spin_lock_irqsave(&er->lock, flags);
  156. config = er->config;
  157. raw_spin_unlock_irqrestore(&er->lock, flags);
  158. return config;
  159. }
  160. static void uncore_assign_hw_event(struct intel_uncore_box *box,
  161. struct perf_event *event, int idx)
  162. {
  163. struct hw_perf_event *hwc = &event->hw;
  164. hwc->idx = idx;
  165. hwc->last_tag = ++box->tags[idx];
  166. if (uncore_pmc_fixed(hwc->idx)) {
  167. hwc->event_base = uncore_fixed_ctr(box);
  168. hwc->config_base = uncore_fixed_ctl(box);
  169. return;
  170. }
  171. hwc->config_base = uncore_event_ctl(box, hwc->idx);
  172. hwc->event_base = uncore_perf_ctr(box, hwc->idx);
  173. }
  174. void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
  175. {
  176. u64 prev_count, new_count, delta;
  177. int shift;
  178. if (uncore_pmc_freerunning(event->hw.idx))
  179. shift = 64 - uncore_freerunning_bits(box, event);
  180. else if (uncore_pmc_fixed(event->hw.idx))
  181. shift = 64 - uncore_fixed_ctr_bits(box);
  182. else
  183. shift = 64 - uncore_perf_ctr_bits(box);
  184. /* the hrtimer might modify the previous event value */
  185. again:
  186. prev_count = local64_read(&event->hw.prev_count);
  187. new_count = uncore_read_counter(box, event);
  188. if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
  189. goto again;
  190. delta = (new_count << shift) - (prev_count << shift);
  191. delta >>= shift;
  192. local64_add(delta, &event->count);
  193. }
  194. /*
  195. * The overflow interrupt is unavailable for SandyBridge-EP, is broken
  196. * for SandyBridge. So we use hrtimer to periodically poll the counter
  197. * to avoid overflow.
  198. */
  199. static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
  200. {
  201. struct intel_uncore_box *box;
  202. struct perf_event *event;
  203. unsigned long flags;
  204. int bit;
  205. box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
  206. if (!box->n_active || box->cpu != smp_processor_id())
  207. return HRTIMER_NORESTART;
  208. /*
  209. * disable local interrupt to prevent uncore_pmu_event_start/stop
  210. * to interrupt the update process
  211. */
  212. local_irq_save(flags);
  213. /*
  214. * handle boxes with an active event list as opposed to active
  215. * counters
  216. */
  217. list_for_each_entry(event, &box->active_list, active_entry) {
  218. uncore_perf_event_update(box, event);
  219. }
  220. for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
  221. uncore_perf_event_update(box, box->events[bit]);
  222. local_irq_restore(flags);
  223. hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
  224. return HRTIMER_RESTART;
  225. }
  226. void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
  227. {
  228. hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
  229. HRTIMER_MODE_REL_PINNED);
  230. }
  231. void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
  232. {
  233. hrtimer_cancel(&box->hrtimer);
  234. }
  235. static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
  236. {
  237. hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  238. box->hrtimer.function = uncore_pmu_hrtimer;
  239. }
  240. static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
  241. int node)
  242. {
  243. int i, size, numshared = type->num_shared_regs ;
  244. struct intel_uncore_box *box;
  245. size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
  246. box = kzalloc_node(size, GFP_KERNEL, node);
  247. if (!box)
  248. return NULL;
  249. for (i = 0; i < numshared; i++)
  250. raw_spin_lock_init(&box->shared_regs[i].lock);
  251. uncore_pmu_init_hrtimer(box);
  252. box->cpu = -1;
  253. box->pci_phys_id = -1;
  254. box->pkgid = -1;
  255. /* set default hrtimer timeout */
  256. box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
  257. INIT_LIST_HEAD(&box->active_list);
  258. return box;
  259. }
  260. /*
  261. * Using uncore_pmu_event_init pmu event_init callback
  262. * as a detection point for uncore events.
  263. */
  264. static int uncore_pmu_event_init(struct perf_event *event);
  265. static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
  266. {
  267. return &box->pmu->pmu == event->pmu;
  268. }
  269. static int
  270. uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
  271. bool dogrp)
  272. {
  273. struct perf_event *event;
  274. int n, max_count;
  275. max_count = box->pmu->type->num_counters;
  276. if (box->pmu->type->fixed_ctl)
  277. max_count++;
  278. if (box->n_events >= max_count)
  279. return -EINVAL;
  280. n = box->n_events;
  281. if (is_box_event(box, leader)) {
  282. box->event_list[n] = leader;
  283. n++;
  284. }
  285. if (!dogrp)
  286. return n;
  287. for_each_sibling_event(event, leader) {
  288. if (!is_box_event(box, event) ||
  289. event->state <= PERF_EVENT_STATE_OFF)
  290. continue;
  291. if (n >= max_count)
  292. return -EINVAL;
  293. box->event_list[n] = event;
  294. n++;
  295. }
  296. return n;
  297. }
  298. static struct event_constraint *
  299. uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
  300. {
  301. struct intel_uncore_type *type = box->pmu->type;
  302. struct event_constraint *c;
  303. if (type->ops->get_constraint) {
  304. c = type->ops->get_constraint(box, event);
  305. if (c)
  306. return c;
  307. }
  308. if (event->attr.config == UNCORE_FIXED_EVENT)
  309. return &uncore_constraint_fixed;
  310. if (type->constraints) {
  311. for_each_event_constraint(c, type->constraints) {
  312. if ((event->hw.config & c->cmask) == c->code)
  313. return c;
  314. }
  315. }
  316. return &type->unconstrainted;
  317. }
  318. static void uncore_put_event_constraint(struct intel_uncore_box *box,
  319. struct perf_event *event)
  320. {
  321. if (box->pmu->type->ops->put_constraint)
  322. box->pmu->type->ops->put_constraint(box, event);
  323. }
  324. static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
  325. {
  326. unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
  327. struct event_constraint *c;
  328. int i, wmin, wmax, ret = 0;
  329. struct hw_perf_event *hwc;
  330. bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
  331. for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
  332. c = uncore_get_event_constraint(box, box->event_list[i]);
  333. box->event_constraint[i] = c;
  334. wmin = min(wmin, c->weight);
  335. wmax = max(wmax, c->weight);
  336. }
  337. /* fastpath, try to reuse previous register */
  338. for (i = 0; i < n; i++) {
  339. hwc = &box->event_list[i]->hw;
  340. c = box->event_constraint[i];
  341. /* never assigned */
  342. if (hwc->idx == -1)
  343. break;
  344. /* constraint still honored */
  345. if (!test_bit(hwc->idx, c->idxmsk))
  346. break;
  347. /* not already used */
  348. if (test_bit(hwc->idx, used_mask))
  349. break;
  350. __set_bit(hwc->idx, used_mask);
  351. if (assign)
  352. assign[i] = hwc->idx;
  353. }
  354. /* slow path */
  355. if (i != n)
  356. ret = perf_assign_events(box->event_constraint, n,
  357. wmin, wmax, n, assign);
  358. if (!assign || ret) {
  359. for (i = 0; i < n; i++)
  360. uncore_put_event_constraint(box, box->event_list[i]);
  361. }
  362. return ret ? -EINVAL : 0;
  363. }
  364. void uncore_pmu_event_start(struct perf_event *event, int flags)
  365. {
  366. struct intel_uncore_box *box = uncore_event_to_box(event);
  367. int idx = event->hw.idx;
  368. if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
  369. return;
  370. /*
  371. * Free running counter is read-only and always active.
  372. * Use the current counter value as start point.
  373. * There is no overflow interrupt for free running counter.
  374. * Use hrtimer to periodically poll the counter to avoid overflow.
  375. */
  376. if (uncore_pmc_freerunning(event->hw.idx)) {
  377. list_add_tail(&event->active_entry, &box->active_list);
  378. local64_set(&event->hw.prev_count,
  379. uncore_read_counter(box, event));
  380. if (box->n_active++ == 0)
  381. uncore_pmu_start_hrtimer(box);
  382. return;
  383. }
  384. if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
  385. return;
  386. event->hw.state = 0;
  387. box->events[idx] = event;
  388. box->n_active++;
  389. __set_bit(idx, box->active_mask);
  390. local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
  391. uncore_enable_event(box, event);
  392. if (box->n_active == 1)
  393. uncore_pmu_start_hrtimer(box);
  394. }
  395. void uncore_pmu_event_stop(struct perf_event *event, int flags)
  396. {
  397. struct intel_uncore_box *box = uncore_event_to_box(event);
  398. struct hw_perf_event *hwc = &event->hw;
  399. /* Cannot disable free running counter which is read-only */
  400. if (uncore_pmc_freerunning(hwc->idx)) {
  401. list_del(&event->active_entry);
  402. if (--box->n_active == 0)
  403. uncore_pmu_cancel_hrtimer(box);
  404. uncore_perf_event_update(box, event);
  405. return;
  406. }
  407. if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
  408. uncore_disable_event(box, event);
  409. box->n_active--;
  410. box->events[hwc->idx] = NULL;
  411. WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
  412. hwc->state |= PERF_HES_STOPPED;
  413. if (box->n_active == 0)
  414. uncore_pmu_cancel_hrtimer(box);
  415. }
  416. if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
  417. /*
  418. * Drain the remaining delta count out of a event
  419. * that we are disabling:
  420. */
  421. uncore_perf_event_update(box, event);
  422. hwc->state |= PERF_HES_UPTODATE;
  423. }
  424. }
  425. int uncore_pmu_event_add(struct perf_event *event, int flags)
  426. {
  427. struct intel_uncore_box *box = uncore_event_to_box(event);
  428. struct hw_perf_event *hwc = &event->hw;
  429. int assign[UNCORE_PMC_IDX_MAX];
  430. int i, n, ret;
  431. if (!box)
  432. return -ENODEV;
  433. /*
  434. * The free funning counter is assigned in event_init().
  435. * The free running counter event and free running counter
  436. * are 1:1 mapped. It doesn't need to be tracked in event_list.
  437. */
  438. if (uncore_pmc_freerunning(hwc->idx)) {
  439. if (flags & PERF_EF_START)
  440. uncore_pmu_event_start(event, 0);
  441. return 0;
  442. }
  443. ret = n = uncore_collect_events(box, event, false);
  444. if (ret < 0)
  445. return ret;
  446. hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
  447. if (!(flags & PERF_EF_START))
  448. hwc->state |= PERF_HES_ARCH;
  449. ret = uncore_assign_events(box, assign, n);
  450. if (ret)
  451. return ret;
  452. /* save events moving to new counters */
  453. for (i = 0; i < box->n_events; i++) {
  454. event = box->event_list[i];
  455. hwc = &event->hw;
  456. if (hwc->idx == assign[i] &&
  457. hwc->last_tag == box->tags[assign[i]])
  458. continue;
  459. /*
  460. * Ensure we don't accidentally enable a stopped
  461. * counter simply because we rescheduled.
  462. */
  463. if (hwc->state & PERF_HES_STOPPED)
  464. hwc->state |= PERF_HES_ARCH;
  465. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  466. }
  467. /* reprogram moved events into new counters */
  468. for (i = 0; i < n; i++) {
  469. event = box->event_list[i];
  470. hwc = &event->hw;
  471. if (hwc->idx != assign[i] ||
  472. hwc->last_tag != box->tags[assign[i]])
  473. uncore_assign_hw_event(box, event, assign[i]);
  474. else if (i < box->n_events)
  475. continue;
  476. if (hwc->state & PERF_HES_ARCH)
  477. continue;
  478. uncore_pmu_event_start(event, 0);
  479. }
  480. box->n_events = n;
  481. return 0;
  482. }
  483. void uncore_pmu_event_del(struct perf_event *event, int flags)
  484. {
  485. struct intel_uncore_box *box = uncore_event_to_box(event);
  486. int i;
  487. uncore_pmu_event_stop(event, PERF_EF_UPDATE);
  488. /*
  489. * The event for free running counter is not tracked by event_list.
  490. * It doesn't need to force event->hw.idx = -1 to reassign the counter.
  491. * Because the event and the free running counter are 1:1 mapped.
  492. */
  493. if (uncore_pmc_freerunning(event->hw.idx))
  494. return;
  495. for (i = 0; i < box->n_events; i++) {
  496. if (event == box->event_list[i]) {
  497. uncore_put_event_constraint(box, event);
  498. for (++i; i < box->n_events; i++)
  499. box->event_list[i - 1] = box->event_list[i];
  500. --box->n_events;
  501. break;
  502. }
  503. }
  504. event->hw.idx = -1;
  505. event->hw.last_tag = ~0ULL;
  506. }
  507. void uncore_pmu_event_read(struct perf_event *event)
  508. {
  509. struct intel_uncore_box *box = uncore_event_to_box(event);
  510. uncore_perf_event_update(box, event);
  511. }
  512. /*
  513. * validation ensures the group can be loaded onto the
  514. * PMU if it was the only group available.
  515. */
  516. static int uncore_validate_group(struct intel_uncore_pmu *pmu,
  517. struct perf_event *event)
  518. {
  519. struct perf_event *leader = event->group_leader;
  520. struct intel_uncore_box *fake_box;
  521. int ret = -EINVAL, n;
  522. /* The free running counter is always active. */
  523. if (uncore_pmc_freerunning(event->hw.idx))
  524. return 0;
  525. fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
  526. if (!fake_box)
  527. return -ENOMEM;
  528. fake_box->pmu = pmu;
  529. /*
  530. * the event is not yet connected with its
  531. * siblings therefore we must first collect
  532. * existing siblings, then add the new event
  533. * before we can simulate the scheduling
  534. */
  535. n = uncore_collect_events(fake_box, leader, true);
  536. if (n < 0)
  537. goto out;
  538. fake_box->n_events = n;
  539. n = uncore_collect_events(fake_box, event, false);
  540. if (n < 0)
  541. goto out;
  542. fake_box->n_events = n;
  543. ret = uncore_assign_events(fake_box, NULL, n);
  544. out:
  545. kfree(fake_box);
  546. return ret;
  547. }
  548. static int uncore_pmu_event_init(struct perf_event *event)
  549. {
  550. struct intel_uncore_pmu *pmu;
  551. struct intel_uncore_box *box;
  552. struct hw_perf_event *hwc = &event->hw;
  553. int ret;
  554. if (event->attr.type != event->pmu->type)
  555. return -ENOENT;
  556. pmu = uncore_event_to_pmu(event);
  557. /* no device found for this pmu */
  558. if (pmu->func_id < 0)
  559. return -ENOENT;
  560. /*
  561. * Uncore PMU does measure at all privilege level all the time.
  562. * So it doesn't make sense to specify any exclude bits.
  563. */
  564. if (event->attr.exclude_user || event->attr.exclude_kernel ||
  565. event->attr.exclude_hv || event->attr.exclude_idle)
  566. return -EINVAL;
  567. /* Sampling not supported yet */
  568. if (hwc->sample_period)
  569. return -EINVAL;
  570. /*
  571. * Place all uncore events for a particular physical package
  572. * onto a single cpu
  573. */
  574. if (event->cpu < 0)
  575. return -EINVAL;
  576. box = uncore_pmu_to_box(pmu, event->cpu);
  577. if (!box || box->cpu < 0)
  578. return -EINVAL;
  579. event->cpu = box->cpu;
  580. event->pmu_private = box;
  581. event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
  582. event->hw.idx = -1;
  583. event->hw.last_tag = ~0ULL;
  584. event->hw.extra_reg.idx = EXTRA_REG_NONE;
  585. event->hw.branch_reg.idx = EXTRA_REG_NONE;
  586. if (event->attr.config == UNCORE_FIXED_EVENT) {
  587. /* no fixed counter */
  588. if (!pmu->type->fixed_ctl)
  589. return -EINVAL;
  590. /*
  591. * if there is only one fixed counter, only the first pmu
  592. * can access the fixed counter
  593. */
  594. if (pmu->type->single_fixed && pmu->pmu_idx > 0)
  595. return -EINVAL;
  596. /* fixed counters have event field hardcoded to zero */
  597. hwc->config = 0ULL;
  598. } else if (is_freerunning_event(event)) {
  599. hwc->config = event->attr.config;
  600. if (!check_valid_freerunning_event(box, event))
  601. return -EINVAL;
  602. event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
  603. /*
  604. * The free running counter event and free running counter
  605. * are always 1:1 mapped.
  606. * The free running counter is always active.
  607. * Assign the free running counter here.
  608. */
  609. event->hw.event_base = uncore_freerunning_counter(box, event);
  610. } else {
  611. hwc->config = event->attr.config &
  612. (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
  613. if (pmu->type->ops->hw_config) {
  614. ret = pmu->type->ops->hw_config(box, event);
  615. if (ret)
  616. return ret;
  617. }
  618. }
  619. if (event->group_leader != event)
  620. ret = uncore_validate_group(pmu, event);
  621. else
  622. ret = 0;
  623. return ret;
  624. }
  625. static void uncore_pmu_enable(struct pmu *pmu)
  626. {
  627. struct intel_uncore_pmu *uncore_pmu;
  628. struct intel_uncore_box *box;
  629. uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
  630. if (!uncore_pmu)
  631. return;
  632. box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
  633. if (!box)
  634. return;
  635. if (uncore_pmu->type->ops->enable_box)
  636. uncore_pmu->type->ops->enable_box(box);
  637. }
  638. static void uncore_pmu_disable(struct pmu *pmu)
  639. {
  640. struct intel_uncore_pmu *uncore_pmu;
  641. struct intel_uncore_box *box;
  642. uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
  643. if (!uncore_pmu)
  644. return;
  645. box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
  646. if (!box)
  647. return;
  648. if (uncore_pmu->type->ops->disable_box)
  649. uncore_pmu->type->ops->disable_box(box);
  650. }
  651. static ssize_t uncore_get_attr_cpumask(struct device *dev,
  652. struct device_attribute *attr, char *buf)
  653. {
  654. return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask);
  655. }
  656. static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL);
  657. static struct attribute *uncore_pmu_attrs[] = {
  658. &dev_attr_cpumask.attr,
  659. NULL,
  660. };
  661. static const struct attribute_group uncore_pmu_attr_group = {
  662. .attrs = uncore_pmu_attrs,
  663. };
  664. static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
  665. {
  666. int ret;
  667. if (!pmu->type->pmu) {
  668. pmu->pmu = (struct pmu) {
  669. .attr_groups = pmu->type->attr_groups,
  670. .task_ctx_nr = perf_invalid_context,
  671. .pmu_enable = uncore_pmu_enable,
  672. .pmu_disable = uncore_pmu_disable,
  673. .event_init = uncore_pmu_event_init,
  674. .add = uncore_pmu_event_add,
  675. .del = uncore_pmu_event_del,
  676. .start = uncore_pmu_event_start,
  677. .stop = uncore_pmu_event_stop,
  678. .read = uncore_pmu_event_read,
  679. .module = THIS_MODULE,
  680. };
  681. } else {
  682. pmu->pmu = *pmu->type->pmu;
  683. pmu->pmu.attr_groups = pmu->type->attr_groups;
  684. }
  685. if (pmu->type->num_boxes == 1) {
  686. if (strlen(pmu->type->name) > 0)
  687. sprintf(pmu->name, "uncore_%s", pmu->type->name);
  688. else
  689. sprintf(pmu->name, "uncore");
  690. } else {
  691. sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
  692. pmu->pmu_idx);
  693. }
  694. ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
  695. if (!ret)
  696. pmu->registered = true;
  697. return ret;
  698. }
  699. static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
  700. {
  701. if (!pmu->registered)
  702. return;
  703. perf_pmu_unregister(&pmu->pmu);
  704. pmu->registered = false;
  705. }
  706. static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
  707. {
  708. int pkg;
  709. for (pkg = 0; pkg < max_packages; pkg++)
  710. kfree(pmu->boxes[pkg]);
  711. kfree(pmu->boxes);
  712. }
  713. static void uncore_type_exit(struct intel_uncore_type *type)
  714. {
  715. struct intel_uncore_pmu *pmu = type->pmus;
  716. int i;
  717. if (pmu) {
  718. for (i = 0; i < type->num_boxes; i++, pmu++) {
  719. uncore_pmu_unregister(pmu);
  720. uncore_free_boxes(pmu);
  721. }
  722. kfree(type->pmus);
  723. type->pmus = NULL;
  724. }
  725. kfree(type->events_group);
  726. type->events_group = NULL;
  727. }
  728. static void uncore_types_exit(struct intel_uncore_type **types)
  729. {
  730. for (; *types; types++)
  731. uncore_type_exit(*types);
  732. }
  733. static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
  734. {
  735. struct intel_uncore_pmu *pmus;
  736. size_t size;
  737. int i, j;
  738. pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL);
  739. if (!pmus)
  740. return -ENOMEM;
  741. size = max_packages * sizeof(struct intel_uncore_box *);
  742. for (i = 0; i < type->num_boxes; i++) {
  743. pmus[i].func_id = setid ? i : -1;
  744. pmus[i].pmu_idx = i;
  745. pmus[i].type = type;
  746. pmus[i].boxes = kzalloc(size, GFP_KERNEL);
  747. if (!pmus[i].boxes)
  748. goto err;
  749. }
  750. type->pmus = pmus;
  751. type->unconstrainted = (struct event_constraint)
  752. __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
  753. 0, type->num_counters, 0, 0);
  754. if (type->event_descs) {
  755. struct {
  756. struct attribute_group group;
  757. struct attribute *attrs[];
  758. } *attr_group;
  759. for (i = 0; type->event_descs[i].attr.attr.name; i++);
  760. attr_group = kzalloc(struct_size(attr_group, attrs, i + 1),
  761. GFP_KERNEL);
  762. if (!attr_group)
  763. goto err;
  764. attr_group->group.name = "events";
  765. attr_group->group.attrs = attr_group->attrs;
  766. for (j = 0; j < i; j++)
  767. attr_group->attrs[j] = &type->event_descs[j].attr.attr;
  768. type->events_group = &attr_group->group;
  769. }
  770. type->pmu_group = &uncore_pmu_attr_group;
  771. return 0;
  772. err:
  773. for (i = 0; i < type->num_boxes; i++)
  774. kfree(pmus[i].boxes);
  775. kfree(pmus);
  776. return -ENOMEM;
  777. }
  778. static int __init
  779. uncore_types_init(struct intel_uncore_type **types, bool setid)
  780. {
  781. int ret;
  782. for (; *types; types++) {
  783. ret = uncore_type_init(*types, setid);
  784. if (ret)
  785. return ret;
  786. }
  787. return 0;
  788. }
  789. /*
  790. * add a pci uncore device
  791. */
  792. static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  793. {
  794. struct intel_uncore_type *type;
  795. struct intel_uncore_pmu *pmu = NULL;
  796. struct intel_uncore_box *box;
  797. int phys_id, pkg, ret;
  798. phys_id = uncore_pcibus_to_physid(pdev->bus);
  799. if (phys_id < 0)
  800. return -ENODEV;
  801. pkg = topology_phys_to_logical_pkg(phys_id);
  802. if (pkg < 0)
  803. return -EINVAL;
  804. if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
  805. int idx = UNCORE_PCI_DEV_IDX(id->driver_data);
  806. uncore_extra_pci_dev[pkg].dev[idx] = pdev;
  807. pci_set_drvdata(pdev, NULL);
  808. return 0;
  809. }
  810. type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
  811. /*
  812. * Some platforms, e.g. Knights Landing, use a common PCI device ID
  813. * for multiple instances of an uncore PMU device type. We should check
  814. * PCI slot and func to indicate the uncore box.
  815. */
  816. if (id->driver_data & ~0xffff) {
  817. struct pci_driver *pci_drv = pdev->driver;
  818. const struct pci_device_id *ids = pci_drv->id_table;
  819. unsigned int devfn;
  820. while (ids && ids->vendor) {
  821. if ((ids->vendor == pdev->vendor) &&
  822. (ids->device == pdev->device)) {
  823. devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
  824. UNCORE_PCI_DEV_FUNC(ids->driver_data));
  825. if (devfn == pdev->devfn) {
  826. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
  827. break;
  828. }
  829. }
  830. ids++;
  831. }
  832. if (pmu == NULL)
  833. return -ENODEV;
  834. } else {
  835. /*
  836. * for performance monitoring unit with multiple boxes,
  837. * each box has a different function id.
  838. */
  839. pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
  840. }
  841. if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
  842. return -EINVAL;
  843. box = uncore_alloc_box(type, NUMA_NO_NODE);
  844. if (!box)
  845. return -ENOMEM;
  846. if (pmu->func_id < 0)
  847. pmu->func_id = pdev->devfn;
  848. else
  849. WARN_ON_ONCE(pmu->func_id != pdev->devfn);
  850. atomic_inc(&box->refcnt);
  851. box->pci_phys_id = phys_id;
  852. box->pkgid = pkg;
  853. box->pci_dev = pdev;
  854. box->pmu = pmu;
  855. uncore_box_init(box);
  856. pci_set_drvdata(pdev, box);
  857. pmu->boxes[pkg] = box;
  858. if (atomic_inc_return(&pmu->activeboxes) > 1)
  859. return 0;
  860. /* First active box registers the pmu */
  861. ret = uncore_pmu_register(pmu);
  862. if (ret) {
  863. pci_set_drvdata(pdev, NULL);
  864. pmu->boxes[pkg] = NULL;
  865. uncore_box_exit(box);
  866. kfree(box);
  867. }
  868. return ret;
  869. }
  870. static void uncore_pci_remove(struct pci_dev *pdev)
  871. {
  872. struct intel_uncore_box *box;
  873. struct intel_uncore_pmu *pmu;
  874. int i, phys_id, pkg;
  875. phys_id = uncore_pcibus_to_physid(pdev->bus);
  876. box = pci_get_drvdata(pdev);
  877. if (!box) {
  878. pkg = topology_phys_to_logical_pkg(phys_id);
  879. for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
  880. if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
  881. uncore_extra_pci_dev[pkg].dev[i] = NULL;
  882. break;
  883. }
  884. }
  885. WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
  886. return;
  887. }
  888. pmu = box->pmu;
  889. if (WARN_ON_ONCE(phys_id != box->pci_phys_id))
  890. return;
  891. pci_set_drvdata(pdev, NULL);
  892. pmu->boxes[box->pkgid] = NULL;
  893. if (atomic_dec_return(&pmu->activeboxes) == 0)
  894. uncore_pmu_unregister(pmu);
  895. uncore_box_exit(box);
  896. kfree(box);
  897. }
  898. static int __init uncore_pci_init(void)
  899. {
  900. size_t size;
  901. int ret;
  902. size = max_packages * sizeof(struct pci_extra_dev);
  903. uncore_extra_pci_dev = kzalloc(size, GFP_KERNEL);
  904. if (!uncore_extra_pci_dev) {
  905. ret = -ENOMEM;
  906. goto err;
  907. }
  908. ret = uncore_types_init(uncore_pci_uncores, false);
  909. if (ret)
  910. goto errtype;
  911. uncore_pci_driver->probe = uncore_pci_probe;
  912. uncore_pci_driver->remove = uncore_pci_remove;
  913. ret = pci_register_driver(uncore_pci_driver);
  914. if (ret)
  915. goto errtype;
  916. pcidrv_registered = true;
  917. return 0;
  918. errtype:
  919. uncore_types_exit(uncore_pci_uncores);
  920. kfree(uncore_extra_pci_dev);
  921. uncore_extra_pci_dev = NULL;
  922. uncore_free_pcibus_map();
  923. err:
  924. uncore_pci_uncores = empty_uncore;
  925. return ret;
  926. }
  927. static void uncore_pci_exit(void)
  928. {
  929. if (pcidrv_registered) {
  930. pcidrv_registered = false;
  931. pci_unregister_driver(uncore_pci_driver);
  932. uncore_types_exit(uncore_pci_uncores);
  933. kfree(uncore_extra_pci_dev);
  934. uncore_free_pcibus_map();
  935. }
  936. }
  937. static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
  938. int new_cpu)
  939. {
  940. struct intel_uncore_pmu *pmu = type->pmus;
  941. struct intel_uncore_box *box;
  942. int i, pkg;
  943. pkg = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu);
  944. for (i = 0; i < type->num_boxes; i++, pmu++) {
  945. box = pmu->boxes[pkg];
  946. if (!box)
  947. continue;
  948. if (old_cpu < 0) {
  949. WARN_ON_ONCE(box->cpu != -1);
  950. box->cpu = new_cpu;
  951. continue;
  952. }
  953. WARN_ON_ONCE(box->cpu != old_cpu);
  954. box->cpu = -1;
  955. if (new_cpu < 0)
  956. continue;
  957. uncore_pmu_cancel_hrtimer(box);
  958. perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
  959. box->cpu = new_cpu;
  960. }
  961. }
  962. static void uncore_change_context(struct intel_uncore_type **uncores,
  963. int old_cpu, int new_cpu)
  964. {
  965. for (; *uncores; uncores++)
  966. uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
  967. }
  968. static int uncore_event_cpu_offline(unsigned int cpu)
  969. {
  970. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  971. struct intel_uncore_pmu *pmu;
  972. struct intel_uncore_box *box;
  973. int i, pkg, target;
  974. /* Check if exiting cpu is used for collecting uncore events */
  975. if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
  976. goto unref;
  977. /* Find a new cpu to collect uncore events */
  978. target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
  979. /* Migrate uncore events to the new target */
  980. if (target < nr_cpu_ids)
  981. cpumask_set_cpu(target, &uncore_cpu_mask);
  982. else
  983. target = -1;
  984. uncore_change_context(uncore_msr_uncores, cpu, target);
  985. uncore_change_context(uncore_pci_uncores, cpu, target);
  986. unref:
  987. /* Clear the references */
  988. pkg = topology_logical_package_id(cpu);
  989. for (; *types; types++) {
  990. type = *types;
  991. pmu = type->pmus;
  992. for (i = 0; i < type->num_boxes; i++, pmu++) {
  993. box = pmu->boxes[pkg];
  994. if (box && atomic_dec_return(&box->refcnt) == 0)
  995. uncore_box_exit(box);
  996. }
  997. }
  998. return 0;
  999. }
  1000. static int allocate_boxes(struct intel_uncore_type **types,
  1001. unsigned int pkg, unsigned int cpu)
  1002. {
  1003. struct intel_uncore_box *box, *tmp;
  1004. struct intel_uncore_type *type;
  1005. struct intel_uncore_pmu *pmu;
  1006. LIST_HEAD(allocated);
  1007. int i;
  1008. /* Try to allocate all required boxes */
  1009. for (; *types; types++) {
  1010. type = *types;
  1011. pmu = type->pmus;
  1012. for (i = 0; i < type->num_boxes; i++, pmu++) {
  1013. if (pmu->boxes[pkg])
  1014. continue;
  1015. box = uncore_alloc_box(type, cpu_to_node(cpu));
  1016. if (!box)
  1017. goto cleanup;
  1018. box->pmu = pmu;
  1019. box->pkgid = pkg;
  1020. list_add(&box->active_list, &allocated);
  1021. }
  1022. }
  1023. /* Install them in the pmus */
  1024. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  1025. list_del_init(&box->active_list);
  1026. box->pmu->boxes[pkg] = box;
  1027. }
  1028. return 0;
  1029. cleanup:
  1030. list_for_each_entry_safe(box, tmp, &allocated, active_list) {
  1031. list_del_init(&box->active_list);
  1032. kfree(box);
  1033. }
  1034. return -ENOMEM;
  1035. }
  1036. static int uncore_event_cpu_online(unsigned int cpu)
  1037. {
  1038. struct intel_uncore_type *type, **types = uncore_msr_uncores;
  1039. struct intel_uncore_pmu *pmu;
  1040. struct intel_uncore_box *box;
  1041. int i, ret, pkg, target;
  1042. pkg = topology_logical_package_id(cpu);
  1043. ret = allocate_boxes(types, pkg, cpu);
  1044. if (ret)
  1045. return ret;
  1046. for (; *types; types++) {
  1047. type = *types;
  1048. pmu = type->pmus;
  1049. for (i = 0; i < type->num_boxes; i++, pmu++) {
  1050. box = pmu->boxes[pkg];
  1051. if (box && atomic_inc_return(&box->refcnt) == 1)
  1052. uncore_box_init(box);
  1053. }
  1054. }
  1055. /*
  1056. * Check if there is an online cpu in the package
  1057. * which collects uncore events already.
  1058. */
  1059. target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
  1060. if (target < nr_cpu_ids)
  1061. return 0;
  1062. cpumask_set_cpu(cpu, &uncore_cpu_mask);
  1063. uncore_change_context(uncore_msr_uncores, -1, cpu);
  1064. uncore_change_context(uncore_pci_uncores, -1, cpu);
  1065. return 0;
  1066. }
  1067. static int __init type_pmu_register(struct intel_uncore_type *type)
  1068. {
  1069. int i, ret;
  1070. for (i = 0; i < type->num_boxes; i++) {
  1071. ret = uncore_pmu_register(&type->pmus[i]);
  1072. if (ret)
  1073. return ret;
  1074. }
  1075. return 0;
  1076. }
  1077. static int __init uncore_msr_pmus_register(void)
  1078. {
  1079. struct intel_uncore_type **types = uncore_msr_uncores;
  1080. int ret;
  1081. for (; *types; types++) {
  1082. ret = type_pmu_register(*types);
  1083. if (ret)
  1084. return ret;
  1085. }
  1086. return 0;
  1087. }
  1088. static int __init uncore_cpu_init(void)
  1089. {
  1090. int ret;
  1091. ret = uncore_types_init(uncore_msr_uncores, true);
  1092. if (ret)
  1093. goto err;
  1094. ret = uncore_msr_pmus_register();
  1095. if (ret)
  1096. goto err;
  1097. return 0;
  1098. err:
  1099. uncore_types_exit(uncore_msr_uncores);
  1100. uncore_msr_uncores = empty_uncore;
  1101. return ret;
  1102. }
  1103. #define X86_UNCORE_MODEL_MATCH(model, init) \
  1104. { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
  1105. struct intel_uncore_init_fun {
  1106. void (*cpu_init)(void);
  1107. int (*pci_init)(void);
  1108. };
  1109. static const struct intel_uncore_init_fun nhm_uncore_init __initconst = {
  1110. .cpu_init = nhm_uncore_cpu_init,
  1111. };
  1112. static const struct intel_uncore_init_fun snb_uncore_init __initconst = {
  1113. .cpu_init = snb_uncore_cpu_init,
  1114. .pci_init = snb_uncore_pci_init,
  1115. };
  1116. static const struct intel_uncore_init_fun ivb_uncore_init __initconst = {
  1117. .cpu_init = snb_uncore_cpu_init,
  1118. .pci_init = ivb_uncore_pci_init,
  1119. };
  1120. static const struct intel_uncore_init_fun hsw_uncore_init __initconst = {
  1121. .cpu_init = snb_uncore_cpu_init,
  1122. .pci_init = hsw_uncore_pci_init,
  1123. };
  1124. static const struct intel_uncore_init_fun bdw_uncore_init __initconst = {
  1125. .cpu_init = snb_uncore_cpu_init,
  1126. .pci_init = bdw_uncore_pci_init,
  1127. };
  1128. static const struct intel_uncore_init_fun snbep_uncore_init __initconst = {
  1129. .cpu_init = snbep_uncore_cpu_init,
  1130. .pci_init = snbep_uncore_pci_init,
  1131. };
  1132. static const struct intel_uncore_init_fun nhmex_uncore_init __initconst = {
  1133. .cpu_init = nhmex_uncore_cpu_init,
  1134. };
  1135. static const struct intel_uncore_init_fun ivbep_uncore_init __initconst = {
  1136. .cpu_init = ivbep_uncore_cpu_init,
  1137. .pci_init = ivbep_uncore_pci_init,
  1138. };
  1139. static const struct intel_uncore_init_fun hswep_uncore_init __initconst = {
  1140. .cpu_init = hswep_uncore_cpu_init,
  1141. .pci_init = hswep_uncore_pci_init,
  1142. };
  1143. static const struct intel_uncore_init_fun bdx_uncore_init __initconst = {
  1144. .cpu_init = bdx_uncore_cpu_init,
  1145. .pci_init = bdx_uncore_pci_init,
  1146. };
  1147. static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
  1148. .cpu_init = knl_uncore_cpu_init,
  1149. .pci_init = knl_uncore_pci_init,
  1150. };
  1151. static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
  1152. .cpu_init = skl_uncore_cpu_init,
  1153. .pci_init = skl_uncore_pci_init,
  1154. };
  1155. static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
  1156. .cpu_init = skx_uncore_cpu_init,
  1157. .pci_init = skx_uncore_pci_init,
  1158. };
  1159. static const struct x86_cpu_id intel_uncore_match[] __initconst = {
  1160. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
  1161. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
  1162. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
  1163. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
  1164. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
  1165. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
  1166. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
  1167. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
  1168. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
  1169. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
  1170. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
  1171. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
  1172. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
  1173. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
  1174. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
  1175. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
  1176. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
  1177. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
  1178. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
  1179. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
  1180. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
  1181. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
  1182. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
  1183. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
  1184. X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
  1185. {},
  1186. };
  1187. MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
  1188. static int __init intel_uncore_init(void)
  1189. {
  1190. const struct x86_cpu_id *id;
  1191. struct intel_uncore_init_fun *uncore_init;
  1192. int pret = 0, cret = 0, ret;
  1193. id = x86_match_cpu(intel_uncore_match);
  1194. if (!id)
  1195. return -ENODEV;
  1196. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  1197. return -ENODEV;
  1198. max_packages = topology_max_packages();
  1199. uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
  1200. if (uncore_init->pci_init) {
  1201. pret = uncore_init->pci_init();
  1202. if (!pret)
  1203. pret = uncore_pci_init();
  1204. }
  1205. if (uncore_init->cpu_init) {
  1206. uncore_init->cpu_init();
  1207. cret = uncore_cpu_init();
  1208. }
  1209. if (cret && pret)
  1210. return -ENODEV;
  1211. /* Install hotplug callbacks to setup the targets for each package */
  1212. ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
  1213. "perf/x86/intel/uncore:online",
  1214. uncore_event_cpu_online,
  1215. uncore_event_cpu_offline);
  1216. if (ret)
  1217. goto err;
  1218. return 0;
  1219. err:
  1220. uncore_types_exit(uncore_msr_uncores);
  1221. uncore_pci_exit();
  1222. return ret;
  1223. }
  1224. module_init(intel_uncore_init);
  1225. static void __exit intel_uncore_exit(void)
  1226. {
  1227. cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
  1228. uncore_types_exit(uncore_msr_uncores);
  1229. uncore_pci_exit();
  1230. }
  1231. module_exit(intel_uncore_exit);