123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619 |
- /*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
- *
- * Author: Jacob Shin <jacob.shin@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
- #include <linux/perf_event.h>
- #include <linux/percpu.h>
- #include <linux/types.h>
- #include <linux/slab.h>
- #include <linux/init.h>
- #include <linux/cpu.h>
- #include <linux/cpumask.h>
- #include <asm/cpufeature.h>
- #include <asm/perf_event.h>
- #include <asm/msr.h>
- #include <asm/smp.h>
- #define NUM_COUNTERS_NB 4
- #define NUM_COUNTERS_L2 4
- #define NUM_COUNTERS_L3 6
- #define MAX_COUNTERS 6
- #define RDPMC_BASE_NB 6
- #define RDPMC_BASE_LLC 10
- #define COUNTER_SHIFT 16
- #undef pr_fmt
- #define pr_fmt(fmt) "amd_uncore: " fmt
- static int num_counters_llc;
- static int num_counters_nb;
- static bool l3_mask;
- static HLIST_HEAD(uncore_unused_list);
- struct amd_uncore {
- int id;
- int refcnt;
- int cpu;
- int num_counters;
- int rdpmc_base;
- u32 msr_base;
- cpumask_t *active_mask;
- struct pmu *pmu;
- struct perf_event *events[MAX_COUNTERS];
- struct hlist_node node;
- };
- static struct amd_uncore * __percpu *amd_uncore_nb;
- static struct amd_uncore * __percpu *amd_uncore_llc;
- static struct pmu amd_nb_pmu;
- static struct pmu amd_llc_pmu;
- static cpumask_t amd_nb_active_mask;
- static cpumask_t amd_llc_active_mask;
- static bool is_nb_event(struct perf_event *event)
- {
- return event->pmu->type == amd_nb_pmu.type;
- }
- static bool is_llc_event(struct perf_event *event)
- {
- return event->pmu->type == amd_llc_pmu.type;
- }
- static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
- {
- if (is_nb_event(event) && amd_uncore_nb)
- return *per_cpu_ptr(amd_uncore_nb, event->cpu);
- else if (is_llc_event(event) && amd_uncore_llc)
- return *per_cpu_ptr(amd_uncore_llc, event->cpu);
- return NULL;
- }
- static void amd_uncore_read(struct perf_event *event)
- {
- struct hw_perf_event *hwc = &event->hw;
- u64 prev, new;
- s64 delta;
- /*
- * since we do not enable counter overflow interrupts,
- * we do not have to worry about prev_count changing on us
- */
- prev = local64_read(&hwc->prev_count);
- rdpmcl(hwc->event_base_rdpmc, new);
- local64_set(&hwc->prev_count, new);
- delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
- delta >>= COUNTER_SHIFT;
- local64_add(delta, &event->count);
- }
- static void amd_uncore_start(struct perf_event *event, int flags)
- {
- struct hw_perf_event *hwc = &event->hw;
- if (flags & PERF_EF_RELOAD)
- wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
- hwc->state = 0;
- wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
- perf_event_update_userpage(event);
- }
- static void amd_uncore_stop(struct perf_event *event, int flags)
- {
- struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
- hwc->state |= PERF_HES_STOPPED;
- if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
- amd_uncore_read(event);
- hwc->state |= PERF_HES_UPTODATE;
- }
- }
- static int amd_uncore_add(struct perf_event *event, int flags)
- {
- int i;
- struct amd_uncore *uncore = event_to_amd_uncore(event);
- struct hw_perf_event *hwc = &event->hw;
- /* are we already assigned? */
- if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
- goto out;
- for (i = 0; i < uncore->num_counters; i++) {
- if (uncore->events[i] == event) {
- hwc->idx = i;
- goto out;
- }
- }
- /* if not, take the first available counter */
- hwc->idx = -1;
- for (i = 0; i < uncore->num_counters; i++) {
- if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
- hwc->idx = i;
- break;
- }
- }
- out:
- if (hwc->idx == -1)
- return -EBUSY;
- hwc->config_base = uncore->msr_base + (2 * hwc->idx);
- hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
- hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
- hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
- if (flags & PERF_EF_START)
- amd_uncore_start(event, PERF_EF_RELOAD);
- return 0;
- }
- static void amd_uncore_del(struct perf_event *event, int flags)
- {
- int i;
- struct amd_uncore *uncore = event_to_amd_uncore(event);
- struct hw_perf_event *hwc = &event->hw;
- amd_uncore_stop(event, PERF_EF_UPDATE);
- for (i = 0; i < uncore->num_counters; i++) {
- if (cmpxchg(&uncore->events[i], event, NULL) == event)
- break;
- }
- hwc->idx = -1;
- }
- static int amd_uncore_event_init(struct perf_event *event)
- {
- struct amd_uncore *uncore;
- struct hw_perf_event *hwc = &event->hw;
- if (event->attr.type != event->pmu->type)
- return -ENOENT;
- /*
- * NB and Last level cache counters (MSRs) are shared across all cores
- * that share the same NB / Last level cache. On family 16h and below,
- * Interrupts can be directed to a single target core, however, event
- * counts generated by processes running on other cores cannot be masked
- * out. So we do not support sampling and per-thread events via
- * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
- */
- /* NB and Last level cache counters do not have usr/os/guest/host bits */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
- hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
- hwc->idx = -1;
- if (event->cpu < 0)
- return -EINVAL;
- /*
- * SliceMask and ThreadMask need to be set for certain L3 events in
- * Family 17h. For other events, the two fields do not affect the count.
- */
- if (l3_mask && is_llc_event(event)) {
- int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
- if (smp_num_siblings > 1)
- thread += cpu_data(event->cpu).apicid & 1;
- hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
- AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
- }
- uncore = event_to_amd_uncore(event);
- if (!uncore)
- return -ENODEV;
- /*
- * since request can come in to any of the shared cores, we will remap
- * to a single common cpu.
- */
- event->cpu = uncore->cpu;
- return 0;
- }
- static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- cpumask_t *active_mask;
- struct pmu *pmu = dev_get_drvdata(dev);
- if (pmu->type == amd_nb_pmu.type)
- active_mask = &amd_nb_active_mask;
- else if (pmu->type == amd_llc_pmu.type)
- active_mask = &amd_llc_active_mask;
- else
- return 0;
- return cpumap_print_to_pagebuf(true, buf, active_mask);
- }
- static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
- static struct attribute *amd_uncore_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
- };
- static struct attribute_group amd_uncore_attr_group = {
- .attrs = amd_uncore_attrs,
- };
- /*
- * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
- * on family
- */
- #define AMD_FORMAT_ATTR(_dev, _name, _format) \
- static ssize_t \
- _dev##_show##_name(struct device *dev, \
- struct device_attribute *attr, \
- char *page) \
- { \
- BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
- return sprintf(page, _format "\n"); \
- } \
- static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
- /* Used for each uncore counter type */
- #define AMD_ATTRIBUTE(_name) \
- static struct attribute *amd_uncore_format_attr_##_name[] = { \
- &format_attr_event_##_name.attr, \
- &format_attr_umask.attr, \
- NULL, \
- }; \
- static struct attribute_group amd_uncore_format_group_##_name = { \
- .name = "format", \
- .attrs = amd_uncore_format_attr_##_name, \
- }; \
- static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
- &amd_uncore_attr_group, \
- &amd_uncore_format_group_##_name, \
- NULL, \
- };
- AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
- AMD_FORMAT_ATTR(umask, , "config:8-15");
- AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
- AMD_FORMAT_ATTR(event, _l3, "config:0-7");
- AMD_ATTRIBUTE(df);
- AMD_ATTRIBUTE(l3);
- static struct pmu amd_nb_pmu = {
- .task_ctx_nr = perf_invalid_context,
- .event_init = amd_uncore_event_init,
- .add = amd_uncore_add,
- .del = amd_uncore_del,
- .start = amd_uncore_start,
- .stop = amd_uncore_stop,
- .read = amd_uncore_read,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
- };
- static struct pmu amd_llc_pmu = {
- .task_ctx_nr = perf_invalid_context,
- .event_init = amd_uncore_event_init,
- .add = amd_uncore_add,
- .del = amd_uncore_del,
- .start = amd_uncore_start,
- .stop = amd_uncore_stop,
- .read = amd_uncore_read,
- .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
- };
- static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
- {
- return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
- cpu_to_node(cpu));
- }
- static int amd_uncore_cpu_up_prepare(unsigned int cpu)
- {
- struct amd_uncore *uncore_nb = NULL, *uncore_llc;
- if (amd_uncore_nb) {
- uncore_nb = amd_uncore_alloc(cpu);
- if (!uncore_nb)
- goto fail;
- uncore_nb->cpu = cpu;
- uncore_nb->num_counters = num_counters_nb;
- uncore_nb->rdpmc_base = RDPMC_BASE_NB;
- uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
- uncore_nb->active_mask = &amd_nb_active_mask;
- uncore_nb->pmu = &amd_nb_pmu;
- uncore_nb->id = -1;
- *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
- }
- if (amd_uncore_llc) {
- uncore_llc = amd_uncore_alloc(cpu);
- if (!uncore_llc)
- goto fail;
- uncore_llc->cpu = cpu;
- uncore_llc->num_counters = num_counters_llc;
- uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
- uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
- uncore_llc->active_mask = &amd_llc_active_mask;
- uncore_llc->pmu = &amd_llc_pmu;
- uncore_llc->id = -1;
- *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
- }
- return 0;
- fail:
- if (amd_uncore_nb)
- *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
- kfree(uncore_nb);
- return -ENOMEM;
- }
- static struct amd_uncore *
- amd_uncore_find_online_sibling(struct amd_uncore *this,
- struct amd_uncore * __percpu *uncores)
- {
- unsigned int cpu;
- struct amd_uncore *that;
- for_each_online_cpu(cpu) {
- that = *per_cpu_ptr(uncores, cpu);
- if (!that)
- continue;
- if (this == that)
- continue;
- if (this->id == that->id) {
- hlist_add_head(&this->node, &uncore_unused_list);
- this = that;
- break;
- }
- }
- this->refcnt++;
- return this;
- }
- static int amd_uncore_cpu_starting(unsigned int cpu)
- {
- unsigned int eax, ebx, ecx, edx;
- struct amd_uncore *uncore;
- if (amd_uncore_nb) {
- uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- uncore->id = ecx & 0xff;
- uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
- *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
- }
- if (amd_uncore_llc) {
- uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
- uncore->id = per_cpu(cpu_llc_id, cpu);
- uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
- *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
- }
- return 0;
- }
- static void uncore_clean_online(void)
- {
- struct amd_uncore *uncore;
- struct hlist_node *n;
- hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
- hlist_del(&uncore->node);
- kfree(uncore);
- }
- }
- static void uncore_online(unsigned int cpu,
- struct amd_uncore * __percpu *uncores)
- {
- struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
- uncore_clean_online();
- if (cpu == uncore->cpu)
- cpumask_set_cpu(cpu, uncore->active_mask);
- }
- static int amd_uncore_cpu_online(unsigned int cpu)
- {
- if (amd_uncore_nb)
- uncore_online(cpu, amd_uncore_nb);
- if (amd_uncore_llc)
- uncore_online(cpu, amd_uncore_llc);
- return 0;
- }
- static void uncore_down_prepare(unsigned int cpu,
- struct amd_uncore * __percpu *uncores)
- {
- unsigned int i;
- struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
- if (this->cpu != cpu)
- return;
- /* this cpu is going down, migrate to a shared sibling if possible */
- for_each_online_cpu(i) {
- struct amd_uncore *that = *per_cpu_ptr(uncores, i);
- if (cpu == i)
- continue;
- if (this == that) {
- perf_pmu_migrate_context(this->pmu, cpu, i);
- cpumask_clear_cpu(cpu, that->active_mask);
- cpumask_set_cpu(i, that->active_mask);
- that->cpu = i;
- break;
- }
- }
- }
- static int amd_uncore_cpu_down_prepare(unsigned int cpu)
- {
- if (amd_uncore_nb)
- uncore_down_prepare(cpu, amd_uncore_nb);
- if (amd_uncore_llc)
- uncore_down_prepare(cpu, amd_uncore_llc);
- return 0;
- }
- static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
- {
- struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
- if (cpu == uncore->cpu)
- cpumask_clear_cpu(cpu, uncore->active_mask);
- if (!--uncore->refcnt)
- kfree(uncore);
- *per_cpu_ptr(uncores, cpu) = NULL;
- }
- static int amd_uncore_cpu_dead(unsigned int cpu)
- {
- if (amd_uncore_nb)
- uncore_dead(cpu, amd_uncore_nb);
- if (amd_uncore_llc)
- uncore_dead(cpu, amd_uncore_llc);
- return 0;
- }
- static int __init amd_uncore_init(void)
- {
- int ret = -ENODEV;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return -ENODEV;
- if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
- return -ENODEV;
- if (boot_cpu_data.x86 == 0x17) {
- /*
- * For F17h, the Northbridge counters are repurposed as Data
- * Fabric counters. Also, L3 counters are supported too. The PMUs
- * are exported based on family as either L2 or L3 and NB or DF.
- */
- num_counters_nb = NUM_COUNTERS_NB;
- num_counters_llc = NUM_COUNTERS_L3;
- amd_nb_pmu.name = "amd_df";
- amd_llc_pmu.name = "amd_l3";
- format_attr_event_df.show = &event_show_df;
- format_attr_event_l3.show = &event_show_l3;
- l3_mask = true;
- } else {
- num_counters_nb = NUM_COUNTERS_NB;
- num_counters_llc = NUM_COUNTERS_L2;
- amd_nb_pmu.name = "amd_nb";
- amd_llc_pmu.name = "amd_l2";
- format_attr_event_df = format_attr_event;
- format_attr_event_l3 = format_attr_event;
- l3_mask = false;
- }
- amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
- amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
- if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
- amd_uncore_nb = alloc_percpu(struct amd_uncore *);
- if (!amd_uncore_nb) {
- ret = -ENOMEM;
- goto fail_nb;
- }
- ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
- if (ret)
- goto fail_nb;
- pr_info("AMD NB counters detected\n");
- ret = 0;
- }
- if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
- amd_uncore_llc = alloc_percpu(struct amd_uncore *);
- if (!amd_uncore_llc) {
- ret = -ENOMEM;
- goto fail_llc;
- }
- ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
- if (ret)
- goto fail_llc;
- pr_info("AMD LLC counters detected\n");
- ret = 0;
- }
- /*
- * Install callbacks. Core will call them for each online cpu.
- */
- if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
- "perf/x86/amd/uncore:prepare",
- amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
- goto fail_llc;
- if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
- "perf/x86/amd/uncore:starting",
- amd_uncore_cpu_starting, NULL))
- goto fail_prep;
- if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
- "perf/x86/amd/uncore:online",
- amd_uncore_cpu_online,
- amd_uncore_cpu_down_prepare))
- goto fail_start;
- return 0;
- fail_start:
- cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
- fail_prep:
- cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
- fail_llc:
- if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
- perf_pmu_unregister(&amd_nb_pmu);
- if (amd_uncore_llc)
- free_percpu(amd_uncore_llc);
- fail_nb:
- if (amd_uncore_nb)
- free_percpu(amd_uncore_nb);
- return ret;
- }
- device_initcall(amd_uncore_init);
|