linux/arch/x86/events/intel/rapl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Support Intel RAPL energy consumption counters
   4 * Copyright (C) 2013 Google, Inc., Stephane Eranian
   5 *
   6 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
   7 * section 14.7.1 (September 2013)
   8 *
   9 * RAPL provides more controls than just reporting energy consumption
  10 * however here we only expose the 3 energy consumption free running
  11 * counters (pp0, pkg, dram).
  12 *
  13 * Each of those counters increments in a power unit defined by the
  14 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
  15 * but it can vary.
  16 *
  17 * Counter to rapl events mappings:
  18 *
  19 *  pp0 counter: consumption of all physical cores (power plane 0)
  20 *        event: rapl_energy_cores
  21 *    perf code: 0x1
  22 *
  23 *  pkg counter: consumption of the whole processor package
  24 *        event: rapl_energy_pkg
  25 *    perf code: 0x2
  26 *
  27 * dram counter: consumption of the dram domain (servers only)
  28 *        event: rapl_energy_dram
  29 *    perf code: 0x3
  30 *
  31 * gpu counter: consumption of the builtin-gpu domain (client only)
  32 *        event: rapl_energy_gpu
  33 *    perf code: 0x4
  34 *
  35 *  psys counter: consumption of the builtin-psys domain (client only)
  36 *        event: rapl_energy_psys
  37 *    perf code: 0x5
  38 *
  39 * We manage those counters as free running (read-only). They may be
  40 * use simultaneously by other tools, such as turbostat.
  41 *
  42 * The events only support system-wide mode counting. There is no
  43 * sampling support because it does not make sense and is not
  44 * supported by the RAPL hardware.
  45 *
  46 * Because we want to avoid floating-point operations in the kernel,
  47 * the events are all reported in fixed point arithmetic (32.32).
  48 * Tools must adjust the counts to convert them to Watts using
  49 * the duration of the measurement. Tools may use a function such as
  50 * ldexp(raw_count, -32);
  51 */
  52
  53#define pr_fmt(fmt) "RAPL PMU: " fmt
  54
  55#include <linux/module.h>
  56#include <linux/slab.h>
  57#include <linux/perf_event.h>
  58#include <asm/cpu_device_id.h>
  59#include <asm/intel-family.h>
  60#include "../perf_event.h"
  61
  62MODULE_LICENSE("GPL");
  63
  64/*
  65 * RAPL energy status counters
  66 */
  67#define RAPL_IDX_PP0_NRG_STAT   0       /* all cores */
  68#define INTEL_RAPL_PP0          0x1     /* pseudo-encoding */
  69#define RAPL_IDX_PKG_NRG_STAT   1       /* entire package */
  70#define INTEL_RAPL_PKG          0x2     /* pseudo-encoding */
  71#define RAPL_IDX_RAM_NRG_STAT   2       /* DRAM */
  72#define INTEL_RAPL_RAM          0x3     /* pseudo-encoding */
  73#define RAPL_IDX_PP1_NRG_STAT   3       /* gpu */
  74#define INTEL_RAPL_PP1          0x4     /* pseudo-encoding */
  75#define RAPL_IDX_PSYS_NRG_STAT  4       /* psys */
  76#define INTEL_RAPL_PSYS         0x5     /* pseudo-encoding */
  77
  78#define NR_RAPL_DOMAINS         0x5
  79static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
  80        "pp0-core",
  81        "package",
  82        "dram",
  83        "pp1-gpu",
  84        "psys",
  85};
  86
  87/* Clients have PP0, PKG */
  88#define RAPL_IDX_CLN    (1<<RAPL_IDX_PP0_NRG_STAT|\
  89                         1<<RAPL_IDX_PKG_NRG_STAT|\
  90                         1<<RAPL_IDX_PP1_NRG_STAT)
  91
  92/* Servers have PP0, PKG, RAM */
  93#define RAPL_IDX_SRV    (1<<RAPL_IDX_PP0_NRG_STAT|\
  94                         1<<RAPL_IDX_PKG_NRG_STAT|\
  95                         1<<RAPL_IDX_RAM_NRG_STAT)
  96
  97/* Servers have PP0, PKG, RAM, PP1 */
  98#define RAPL_IDX_HSW    (1<<RAPL_IDX_PP0_NRG_STAT|\
  99                         1<<RAPL_IDX_PKG_NRG_STAT|\
 100                         1<<RAPL_IDX_RAM_NRG_STAT|\
 101                         1<<RAPL_IDX_PP1_NRG_STAT)
 102
 103/* SKL clients have PP0, PKG, RAM, PP1, PSYS */
 104#define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
 105                          1<<RAPL_IDX_PKG_NRG_STAT|\
 106                          1<<RAPL_IDX_RAM_NRG_STAT|\
 107                          1<<RAPL_IDX_PP1_NRG_STAT|\
 108                          1<<RAPL_IDX_PSYS_NRG_STAT)
 109
 110/* Knights Landing has PKG, RAM */
 111#define RAPL_IDX_KNL    (1<<RAPL_IDX_PKG_NRG_STAT|\
 112                         1<<RAPL_IDX_RAM_NRG_STAT)
 113
 114/*
 115 * event code: LSB 8 bits, passed in attr->config
 116 * any other bit is reserved
 117 */
 118#define RAPL_EVENT_MASK 0xFFULL
 119
 120#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format)           \
 121static ssize_t __rapl_##_var##_show(struct kobject *kobj,       \
 122                                struct kobj_attribute *attr,    \
 123                                char *page)                     \
 124{                                                               \
 125        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);             \
 126        return sprintf(page, _format "\n");                     \
 127}                                                               \
 128static struct kobj_attribute format_attr_##_var =               \
 129        __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
 130
 131#define RAPL_CNTR_WIDTH 32
 132
 133#define RAPL_EVENT_ATTR_STR(_name, v, str)                                      \
 134static struct perf_pmu_events_attr event_attr_##v = {                           \
 135        .attr           = __ATTR(_name, 0444, perf_event_sysfs_show, NULL),     \
 136        .id             = 0,                                                    \
 137        .event_str      = str,                                                  \
 138};
 139
 140struct rapl_pmu {
 141        raw_spinlock_t          lock;
 142        int                     n_active;
 143        int                     cpu;
 144        struct list_head        active_list;
 145        struct pmu              *pmu;
 146        ktime_t                 timer_interval;
 147        struct hrtimer          hrtimer;
 148};
 149
 150struct rapl_pmus {
 151        struct pmu              pmu;
 152        unsigned int            maxpkg;
 153        struct rapl_pmu         *pmus[];
 154};
 155
 156 /* 1/2^hw_unit Joule */
 157static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
 158static struct rapl_pmus *rapl_pmus;
 159static cpumask_t rapl_cpu_mask;
 160static unsigned int rapl_cntr_mask;
 161static u64 rapl_timer_ms;
 162
 163static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 164{
 165        unsigned int pkgid = topology_logical_package_id(cpu);
 166
 167        /*
 168         * The unsigned check also catches the '-1' return value for non
 169         * existent mappings in the topology map.
 170         */
 171        return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 172}
 173
 174static inline u64 rapl_read_counter(struct perf_event *event)
 175{
 176        u64 raw;
 177        rdmsrl(event->hw.event_base, raw);
 178        return raw;
 179}
 180
 181static inline u64 rapl_scale(u64 v, int cfg)
 182{
 183        if (cfg > NR_RAPL_DOMAINS) {
 184                pr_warn("Invalid domain %d, failed to scale data\n", cfg);
 185                return v;
 186        }
 187        /*
 188         * scale delta to smallest unit (1/2^32)
 189         * users must then scale back: count * 1/(1e9*2^32) to get Joules
 190         * or use ldexp(count, -32).
 191         * Watts = Joules/Time delta
 192         */
 193        return v << (32 - rapl_hw_unit[cfg - 1]);
 194}
 195
 196static u64 rapl_event_update(struct perf_event *event)
 197{
 198        struct hw_perf_event *hwc = &event->hw;
 199        u64 prev_raw_count, new_raw_count;
 200        s64 delta, sdelta;
 201        int shift = RAPL_CNTR_WIDTH;
 202
 203again:
 204        prev_raw_count = local64_read(&hwc->prev_count);
 205        rdmsrl(event->hw.event_base, new_raw_count);
 206
 207        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 208                            new_raw_count) != prev_raw_count) {
 209                cpu_relax();
 210                goto again;
 211        }
 212
 213        /*
 214         * Now we have the new raw value and have updated the prev
 215         * timestamp already. We can now calculate the elapsed delta
 216         * (event-)time and add that to the generic event.
 217         *
 218         * Careful, not all hw sign-extends above the physical width
 219         * of the count.
 220         */
 221        delta = (new_raw_count << shift) - (prev_raw_count << shift);
 222        delta >>= shift;
 223
 224        sdelta = rapl_scale(delta, event->hw.config);
 225
 226        local64_add(sdelta, &event->count);
 227
 228        return new_raw_count;
 229}
 230
 231static void rapl_start_hrtimer(struct rapl_pmu *pmu)
 232{
 233       hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
 234                     HRTIMER_MODE_REL_PINNED);
 235}
 236
 237static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
 238{
 239        struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
 240        struct perf_event *event;
 241        unsigned long flags;
 242
 243        if (!pmu->n_active)
 244                return HRTIMER_NORESTART;
 245
 246        raw_spin_lock_irqsave(&pmu->lock, flags);
 247
 248        list_for_each_entry(event, &pmu->active_list, active_entry)
 249                rapl_event_update(event);
 250
 251        raw_spin_unlock_irqrestore(&pmu->lock, flags);
 252
 253        hrtimer_forward_now(hrtimer, pmu->timer_interval);
 254
 255        return HRTIMER_RESTART;
 256}
 257
 258static void rapl_hrtimer_init(struct rapl_pmu *pmu)
 259{
 260        struct hrtimer *hr = &pmu->hrtimer;
 261
 262        hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 263        hr->function = rapl_hrtimer_handle;
 264}
 265
 266static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
 267                                   struct perf_event *event)
 268{
 269        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
 270                return;
 271
 272        event->hw.state = 0;
 273
 274        list_add_tail(&event->active_entry, &pmu->active_list);
 275
 276        local64_set(&event->hw.prev_count, rapl_read_counter(event));
 277
 278        pmu->n_active++;
 279        if (pmu->n_active == 1)
 280                rapl_start_hrtimer(pmu);
 281}
 282
 283static void rapl_pmu_event_start(struct perf_event *event, int mode)
 284{
 285        struct rapl_pmu *pmu = event->pmu_private;
 286        unsigned long flags;
 287
 288        raw_spin_lock_irqsave(&pmu->lock, flags);
 289        __rapl_pmu_event_start(pmu, event);
 290        raw_spin_unlock_irqrestore(&pmu->lock, flags);
 291}
 292
 293static void rapl_pmu_event_stop(struct perf_event *event, int mode)
 294{
 295        struct rapl_pmu *pmu = event->pmu_private;
 296        struct hw_perf_event *hwc = &event->hw;
 297        unsigned long flags;
 298
 299        raw_spin_lock_irqsave(&pmu->lock, flags);
 300
 301        /* mark event as deactivated and stopped */
 302        if (!(hwc->state & PERF_HES_STOPPED)) {
 303                WARN_ON_ONCE(pmu->n_active <= 0);
 304                pmu->n_active--;
 305                if (pmu->n_active == 0)
 306                        hrtimer_cancel(&pmu->hrtimer);
 307
 308                list_del(&event->active_entry);
 309
 310                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 311                hwc->state |= PERF_HES_STOPPED;
 312        }
 313
 314        /* check if update of sw counter is necessary */
 315        if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
 316                /*
 317                 * Drain the remaining delta count out of a event
 318                 * that we are disabling:
 319                 */
 320                rapl_event_update(event);
 321                hwc->state |= PERF_HES_UPTODATE;
 322        }
 323
 324        raw_spin_unlock_irqrestore(&pmu->lock, flags);
 325}
 326
 327static int rapl_pmu_event_add(struct perf_event *event, int mode)
 328{
 329        struct rapl_pmu *pmu = event->pmu_private;
 330        struct hw_perf_event *hwc = &event->hw;
 331        unsigned long flags;
 332
 333        raw_spin_lock_irqsave(&pmu->lock, flags);
 334
 335        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 336
 337        if (mode & PERF_EF_START)
 338                __rapl_pmu_event_start(pmu, event);
 339
 340        raw_spin_unlock_irqrestore(&pmu->lock, flags);
 341
 342        return 0;
 343}
 344
 345static void rapl_pmu_event_del(struct perf_event *event, int flags)
 346{
 347        rapl_pmu_event_stop(event, PERF_EF_UPDATE);
 348}
 349
 350static int rapl_pmu_event_init(struct perf_event *event)
 351{
 352        u64 cfg = event->attr.config & RAPL_EVENT_MASK;
 353        int bit, msr, ret = 0;
 354        struct rapl_pmu *pmu;
 355
 356        /* only look at RAPL events */
 357        if (event->attr.type != rapl_pmus->pmu.type)
 358                return -ENOENT;
 359
 360        /* check only supported bits are set */
 361        if (event->attr.config & ~RAPL_EVENT_MASK)
 362                return -EINVAL;
 363
 364        if (event->cpu < 0)
 365                return -EINVAL;
 366
 367        event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
 368
 369        /*
 370         * check event is known (determines counter)
 371         */
 372        switch (cfg) {
 373        case INTEL_RAPL_PP0:
 374                bit = RAPL_IDX_PP0_NRG_STAT;
 375                msr = MSR_PP0_ENERGY_STATUS;
 376                break;
 377        case INTEL_RAPL_PKG:
 378                bit = RAPL_IDX_PKG_NRG_STAT;
 379                msr = MSR_PKG_ENERGY_STATUS;
 380                break;
 381        case INTEL_RAPL_RAM:
 382                bit = RAPL_IDX_RAM_NRG_STAT;
 383                msr = MSR_DRAM_ENERGY_STATUS;
 384                break;
 385        case INTEL_RAPL_PP1:
 386                bit = RAPL_IDX_PP1_NRG_STAT;
 387                msr = MSR_PP1_ENERGY_STATUS;
 388                break;
 389        case INTEL_RAPL_PSYS:
 390                bit = RAPL_IDX_PSYS_NRG_STAT;
 391                msr = MSR_PLATFORM_ENERGY_STATUS;
 392                break;
 393        default:
 394                return -EINVAL;
 395        }
 396        /* check event supported */
 397        if (!(rapl_cntr_mask & (1 << bit)))
 398                return -EINVAL;
 399
 400        /* unsupported modes and filters */
 401        if (event->attr.sample_period) /* no sampling */
 402                return -EINVAL;
 403
 404        /* must be done before validate_group */
 405        pmu = cpu_to_rapl_pmu(event->cpu);
 406        if (!pmu)
 407                return -EINVAL;
 408        event->cpu = pmu->cpu;
 409        event->pmu_private = pmu;
 410        event->hw.event_base = msr;
 411        event->hw.config = cfg;
 412        event->hw.idx = bit;
 413
 414        return ret;
 415}
 416
 417static void rapl_pmu_event_read(struct perf_event *event)
 418{
 419        rapl_event_update(event);
 420}
 421
 422static ssize_t rapl_get_attr_cpumask(struct device *dev,
 423                                struct device_attribute *attr, char *buf)
 424{
 425        return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
 426}
 427
 428static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
 429
 430static struct attribute *rapl_pmu_attrs[] = {
 431        &dev_attr_cpumask.attr,
 432        NULL,
 433};
 434
 435static struct attribute_group rapl_pmu_attr_group = {
 436        .attrs = rapl_pmu_attrs,
 437};
 438
 439RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
 440RAPL_EVENT_ATTR_STR(energy-pkg  ,   rapl_pkg, "event=0x02");
 441RAPL_EVENT_ATTR_STR(energy-ram  ,   rapl_ram, "event=0x03");
 442RAPL_EVENT_ATTR_STR(energy-gpu  ,   rapl_gpu, "event=0x04");
 443RAPL_EVENT_ATTR_STR(energy-psys,   rapl_psys, "event=0x05");
 444
 445RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
 446RAPL_EVENT_ATTR_STR(energy-pkg.unit  ,   rapl_pkg_unit, "Joules");
 447RAPL_EVENT_ATTR_STR(energy-ram.unit  ,   rapl_ram_unit, "Joules");
 448RAPL_EVENT_ATTR_STR(energy-gpu.unit  ,   rapl_gpu_unit, "Joules");
 449RAPL_EVENT_ATTR_STR(energy-psys.unit,   rapl_psys_unit, "Joules");
 450
 451/*
 452 * we compute in 0.23 nJ increments regardless of MSR
 453 */
 454RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
 455RAPL_EVENT_ATTR_STR(energy-pkg.scale,     rapl_pkg_scale, "2.3283064365386962890625e-10");
 456RAPL_EVENT_ATTR_STR(energy-ram.scale,     rapl_ram_scale, "2.3283064365386962890625e-10");
 457RAPL_EVENT_ATTR_STR(energy-gpu.scale,     rapl_gpu_scale, "2.3283064365386962890625e-10");
 458RAPL_EVENT_ATTR_STR(energy-psys.scale,   rapl_psys_scale, "2.3283064365386962890625e-10");
 459
 460static struct attribute *rapl_events_srv_attr[] = {
 461        EVENT_PTR(rapl_cores),
 462        EVENT_PTR(rapl_pkg),
 463        EVENT_PTR(rapl_ram),
 464
 465        EVENT_PTR(rapl_cores_unit),
 466        EVENT_PTR(rapl_pkg_unit),
 467        EVENT_PTR(rapl_ram_unit),
 468
 469        EVENT_PTR(rapl_cores_scale),
 470        EVENT_PTR(rapl_pkg_scale),
 471        EVENT_PTR(rapl_ram_scale),
 472        NULL,
 473};
 474
 475static struct attribute *rapl_events_cln_attr[] = {
 476        EVENT_PTR(rapl_cores),
 477        EVENT_PTR(rapl_pkg),
 478        EVENT_PTR(rapl_gpu),
 479
 480        EVENT_PTR(rapl_cores_unit),
 481        EVENT_PTR(rapl_pkg_unit),
 482        EVENT_PTR(rapl_gpu_unit),
 483
 484        EVENT_PTR(rapl_cores_scale),
 485        EVENT_PTR(rapl_pkg_scale),
 486        EVENT_PTR(rapl_gpu_scale),
 487        NULL,
 488};
 489
 490static struct attribute *rapl_events_hsw_attr[] = {
 491        EVENT_PTR(rapl_cores),
 492        EVENT_PTR(rapl_pkg),
 493        EVENT_PTR(rapl_gpu),
 494        EVENT_PTR(rapl_ram),
 495
 496        EVENT_PTR(rapl_cores_unit),
 497        EVENT_PTR(rapl_pkg_unit),
 498        EVENT_PTR(rapl_gpu_unit),
 499        EVENT_PTR(rapl_ram_unit),
 500
 501        EVENT_PTR(rapl_cores_scale),
 502        EVENT_PTR(rapl_pkg_scale),
 503        EVENT_PTR(rapl_gpu_scale),
 504        EVENT_PTR(rapl_ram_scale),
 505        NULL,
 506};
 507
 508static struct attribute *rapl_events_skl_attr[] = {
 509        EVENT_PTR(rapl_cores),
 510        EVENT_PTR(rapl_pkg),
 511        EVENT_PTR(rapl_gpu),
 512        EVENT_PTR(rapl_ram),
 513        EVENT_PTR(rapl_psys),
 514
 515        EVENT_PTR(rapl_cores_unit),
 516        EVENT_PTR(rapl_pkg_unit),
 517        EVENT_PTR(rapl_gpu_unit),
 518        EVENT_PTR(rapl_ram_unit),
 519        EVENT_PTR(rapl_psys_unit),
 520
 521        EVENT_PTR(rapl_cores_scale),
 522        EVENT_PTR(rapl_pkg_scale),
 523        EVENT_PTR(rapl_gpu_scale),
 524        EVENT_PTR(rapl_ram_scale),
 525        EVENT_PTR(rapl_psys_scale),
 526        NULL,
 527};
 528
 529static struct attribute *rapl_events_knl_attr[] = {
 530        EVENT_PTR(rapl_pkg),
 531        EVENT_PTR(rapl_ram),
 532
 533        EVENT_PTR(rapl_pkg_unit),
 534        EVENT_PTR(rapl_ram_unit),
 535
 536        EVENT_PTR(rapl_pkg_scale),
 537        EVENT_PTR(rapl_ram_scale),
 538        NULL,
 539};
 540
 541static struct attribute_group rapl_pmu_events_group = {
 542        .name = "events",
 543        .attrs = NULL, /* patched at runtime */
 544};
 545
 546DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
 547static struct attribute *rapl_formats_attr[] = {
 548        &format_attr_event.attr,
 549        NULL,
 550};
 551
 552static struct attribute_group rapl_pmu_format_group = {
 553        .name = "format",
 554        .attrs = rapl_formats_attr,
 555};
 556
 557static const struct attribute_group *rapl_attr_groups[] = {
 558        &rapl_pmu_attr_group,
 559        &rapl_pmu_format_group,
 560        &rapl_pmu_events_group,
 561        NULL,
 562};
 563
 564static int rapl_cpu_offline(unsigned int cpu)
 565{
 566        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 567        int target;
 568
 569        /* Check if exiting cpu is used for collecting rapl events */
 570        if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
 571                return 0;
 572
 573        pmu->cpu = -1;
 574        /* Find a new cpu to collect rapl events */
 575        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 576
 577        /* Migrate rapl events to the new target */
 578        if (target < nr_cpu_ids) {
 579                cpumask_set_cpu(target, &rapl_cpu_mask);
 580                pmu->cpu = target;
 581                perf_pmu_migrate_context(pmu->pmu, cpu, target);
 582        }
 583        return 0;
 584}
 585
 586static int rapl_cpu_online(unsigned int cpu)
 587{
 588        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 589        int target;
 590
 591        if (!pmu) {
 592                pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
 593                if (!pmu)
 594                        return -ENOMEM;
 595
 596                raw_spin_lock_init(&pmu->lock);
 597                INIT_LIST_HEAD(&pmu->active_list);
 598                pmu->pmu = &rapl_pmus->pmu;
 599                pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
 600                rapl_hrtimer_init(pmu);
 601
 602                rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
 603        }
 604
 605        /*
 606         * Check if there is an online cpu in the package which collects rapl
 607         * events already.
 608         */
 609        target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
 610        if (target < nr_cpu_ids)
 611                return 0;
 612
 613        cpumask_set_cpu(cpu, &rapl_cpu_mask);
 614        pmu->cpu = cpu;
 615        return 0;
 616}
 617
 618static int rapl_check_hw_unit(bool apply_quirk)
 619{
 620        u64 msr_rapl_power_unit_bits;
 621        int i;
 622
 623        /* protect rdmsrl() to handle virtualization */
 624        if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
 625                return -1;
 626        for (i = 0; i < NR_RAPL_DOMAINS; i++)
 627                rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
 628
 629        /*
 630         * DRAM domain on HSW server and KNL has fixed energy unit which can be
 631         * different than the unit from power unit MSR. See
 632         * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
 633         * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
 634         */
 635        if (apply_quirk)
 636                rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
 637
 638        /*
 639         * Calculate the timer rate:
 640         * Use reference of 200W for scaling the timeout to avoid counter
 641         * overflows. 200W = 200 Joules/sec
 642         * Divide interval by 2 to avoid lockstep (2 * 100)
 643         * if hw unit is 32, then we use 2 ms 1/200/2
 644         */
 645        rapl_timer_ms = 2;
 646        if (rapl_hw_unit[0] < 32) {
 647                rapl_timer_ms = (1000 / (2 * 100));
 648                rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
 649        }
 650        return 0;
 651}
 652
 653static void __init rapl_advertise(void)
 654{
 655        int i;
 656
 657        pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
 658                hweight32(rapl_cntr_mask), rapl_timer_ms);
 659
 660        for (i = 0; i < NR_RAPL_DOMAINS; i++) {
 661                if (rapl_cntr_mask & (1 << i)) {
 662                        pr_info("hw unit of domain %s 2^-%d Joules\n",
 663                                rapl_domain_names[i], rapl_hw_unit[i]);
 664                }
 665        }
 666}
 667
 668static void cleanup_rapl_pmus(void)
 669{
 670        int i;
 671
 672        for (i = 0; i < rapl_pmus->maxpkg; i++)
 673                kfree(rapl_pmus->pmus[i]);
 674        kfree(rapl_pmus);
 675}
 676
 677static int __init init_rapl_pmus(void)
 678{
 679        int maxpkg = topology_max_packages();
 680        size_t size;
 681
 682        size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
 683        rapl_pmus = kzalloc(size, GFP_KERNEL);
 684        if (!rapl_pmus)
 685                return -ENOMEM;
 686
 687        rapl_pmus->maxpkg               = maxpkg;
 688        rapl_pmus->pmu.attr_groups      = rapl_attr_groups;
 689        rapl_pmus->pmu.task_ctx_nr      = perf_invalid_context;
 690        rapl_pmus->pmu.event_init       = rapl_pmu_event_init;
 691        rapl_pmus->pmu.add              = rapl_pmu_event_add;
 692        rapl_pmus->pmu.del              = rapl_pmu_event_del;
 693        rapl_pmus->pmu.start            = rapl_pmu_event_start;
 694        rapl_pmus->pmu.stop             = rapl_pmu_event_stop;
 695        rapl_pmus->pmu.read             = rapl_pmu_event_read;
 696        rapl_pmus->pmu.module           = THIS_MODULE;
 697        rapl_pmus->pmu.capabilities     = PERF_PMU_CAP_NO_EXCLUDE;
 698        return 0;
 699}
 700
 701#define X86_RAPL_MODEL_MATCH(model, init)       \
 702        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
 703
 704struct intel_rapl_init_fun {
 705        bool apply_quirk;
 706        int cntr_mask;
 707        struct attribute **attrs;
 708};
 709
 710static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
 711        .apply_quirk = false,
 712        .cntr_mask = RAPL_IDX_CLN,
 713        .attrs = rapl_events_cln_attr,
 714};
 715
 716static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
 717        .apply_quirk = true,
 718        .cntr_mask = RAPL_IDX_SRV,
 719        .attrs = rapl_events_srv_attr,
 720};
 721
 722static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
 723        .apply_quirk = false,
 724        .cntr_mask = RAPL_IDX_HSW,
 725        .attrs = rapl_events_hsw_attr,
 726};
 727
 728static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
 729        .apply_quirk = false,
 730        .cntr_mask = RAPL_IDX_SRV,
 731        .attrs = rapl_events_srv_attr,
 732};
 733
 734static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
 735        .apply_quirk = true,
 736        .cntr_mask = RAPL_IDX_KNL,
 737        .attrs = rapl_events_knl_attr,
 738};
 739
 740static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
 741        .apply_quirk = false,
 742        .cntr_mask = RAPL_IDX_SKL_CLN,
 743        .attrs = rapl_events_skl_attr,
 744};
 745
 746static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
 747        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,   snb_rapl_init),
 748        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
 749
 750        X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,   snb_rapl_init),
 751        X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
 752
 753        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
 754        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X,    hsx_rapl_init),
 755        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,  hsw_rapl_init),
 756        X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
 757
 758        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
 759        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
 760        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsx_rapl_init),
 761        X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
 762
 763        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
 764        X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
 765
 766        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE,  skl_rapl_init),
 767        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
 768        X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,       hsx_rapl_init),
 769
 770        X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE,  skl_rapl_init),
 771        X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
 772
 773        X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE,  skl_rapl_init),
 774
 775        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
 776        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
 777
 778        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
 779
 780        X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE,  skl_rapl_init),
 781        {},
 782};
 783
 784MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
 785
 786static int __init rapl_pmu_init(void)
 787{
 788        const struct x86_cpu_id *id;
 789        struct intel_rapl_init_fun *rapl_init;
 790        bool apply_quirk;
 791        int ret;
 792
 793        id = x86_match_cpu(rapl_cpu_match);
 794        if (!id)
 795                return -ENODEV;
 796
 797        rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
 798        apply_quirk = rapl_init->apply_quirk;
 799        rapl_cntr_mask = rapl_init->cntr_mask;
 800        rapl_pmu_events_group.attrs = rapl_init->attrs;
 801
 802        ret = rapl_check_hw_unit(apply_quirk);
 803        if (ret)
 804                return ret;
 805
 806        ret = init_rapl_pmus();
 807        if (ret)
 808                return ret;
 809
 810        /*
 811         * Install callbacks. Core will call them for each online cpu.
 812         */
 813        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 814                                "perf/x86/rapl:online",
 815                                rapl_cpu_online, rapl_cpu_offline);
 816        if (ret)
 817                goto out;
 818
 819        ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 820        if (ret)
 821                goto out1;
 822
 823        rapl_advertise();
 824        return 0;
 825
 826out1:
 827        cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 828out:
 829        pr_warn("Initialization failed (%d), disabled\n", ret);
 830        cleanup_rapl_pmus();
 831        return ret;
 832}
 833module_init(rapl_pmu_init);
 834
 835static void __exit intel_rapl_exit(void)
 836{
 837        cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 838        perf_pmu_unregister(&rapl_pmus->pmu);
 839        cleanup_rapl_pmus();
 840}
 841module_exit(intel_rapl_exit);
 842