linux/drivers/perf/thunderx2_pmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * CAVIUM THUNDERX2 SoC PMU UNCORE
   4 * Copyright (C) 2018 Cavium Inc.
   5 * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/cpuhotplug.h>
  10#include <linux/perf_event.h>
  11#include <linux/platform_device.h>
  12
  13/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
  14 * Each UNCORE PMU device consists of 4 independent programmable counters.
  15 * Counters are 32 bit and do not support overflow interrupt,
  16 * they need to be sampled before overflow(i.e, at every 2 seconds).
  17 */
  18
  19#define TX2_PMU_MAX_COUNTERS            4
  20#define TX2_PMU_DMC_CHANNELS            8
  21#define TX2_PMU_L3_TILES                16
  22
  23#define TX2_PMU_HRTIMER_INTERVAL        (2 * NSEC_PER_SEC)
  24#define GET_EVENTID(ev)                 ((ev->hw.config) & 0x1f)
  25#define GET_COUNTERID(ev)               ((ev->hw.idx) & 0x3)
  26 /* 1 byte per counter(4 counters).
  27  * Event id is encoded in bits [5:1] of a byte,
  28  */
  29#define DMC_EVENT_CFG(idx, val)         ((val) << (((idx) * 8) + 1))
  30
  31#define L3C_COUNTER_CTL                 0xA8
  32#define L3C_COUNTER_DATA                0xAC
  33#define DMC_COUNTER_CTL                 0x234
  34#define DMC_COUNTER_DATA                0x240
  35
  36/* L3C event IDs */
  37#define L3_EVENT_READ_REQ               0xD
  38#define L3_EVENT_WRITEBACK_REQ          0xE
  39#define L3_EVENT_INV_N_WRITE_REQ        0xF
  40#define L3_EVENT_INV_REQ                0x10
  41#define L3_EVENT_EVICT_REQ              0x13
  42#define L3_EVENT_INV_N_WRITE_HIT        0x14
  43#define L3_EVENT_INV_HIT                0x15
  44#define L3_EVENT_READ_HIT               0x17
  45#define L3_EVENT_MAX                    0x18
  46
  47/* DMC event IDs */
  48#define DMC_EVENT_COUNT_CYCLES          0x1
  49#define DMC_EVENT_WRITE_TXNS            0xB
  50#define DMC_EVENT_DATA_TRANSFERS        0xD
  51#define DMC_EVENT_READ_TXNS             0xF
  52#define DMC_EVENT_MAX                   0x10
  53
  54enum tx2_uncore_type {
  55        PMU_TYPE_L3C,
  56        PMU_TYPE_DMC,
  57        PMU_TYPE_INVALID,
  58};
  59
  60/*
  61 * pmu on each socket has 2 uncore devices(dmc and l3c),
  62 * each device has 4 counters.
  63 */
  64struct tx2_uncore_pmu {
  65        struct hlist_node hpnode;
  66        struct list_head  entry;
  67        struct pmu pmu;
  68        char *name;
  69        int node;
  70        int cpu;
  71        u32 max_counters;
  72        u32 prorate_factor;
  73        u32 max_events;
  74        u64 hrtimer_interval;
  75        void __iomem *base;
  76        DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
  77        struct perf_event *events[TX2_PMU_MAX_COUNTERS];
  78        struct device *dev;
  79        struct hrtimer hrtimer;
  80        const struct attribute_group **attr_groups;
  81        enum tx2_uncore_type type;
  82        void (*init_cntr_base)(struct perf_event *event,
  83                        struct tx2_uncore_pmu *tx2_pmu);
  84        void (*stop_event)(struct perf_event *event);
  85        void (*start_event)(struct perf_event *event, int flags);
  86};
  87
  88static LIST_HEAD(tx2_pmus);
  89
  90static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
  91{
  92        return container_of(pmu, struct tx2_uncore_pmu, pmu);
  93}
  94
  95PMU_FORMAT_ATTR(event,  "config:0-4");
  96
  97static struct attribute *l3c_pmu_format_attrs[] = {
  98        &format_attr_event.attr,
  99        NULL,
 100};
 101
 102static struct attribute *dmc_pmu_format_attrs[] = {
 103        &format_attr_event.attr,
 104        NULL,
 105};
 106
 107static const struct attribute_group l3c_pmu_format_attr_group = {
 108        .name = "format",
 109        .attrs = l3c_pmu_format_attrs,
 110};
 111
 112static const struct attribute_group dmc_pmu_format_attr_group = {
 113        .name = "format",
 114        .attrs = dmc_pmu_format_attrs,
 115};
 116
 117/*
 118 * sysfs event attributes
 119 */
 120static ssize_t tx2_pmu_event_show(struct device *dev,
 121                                    struct device_attribute *attr, char *buf)
 122{
 123        struct dev_ext_attribute *eattr;
 124
 125        eattr = container_of(attr, struct dev_ext_attribute, attr);
 126        return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
 127}
 128
 129#define TX2_EVENT_ATTR(name, config) \
 130        PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
 131                        config, tx2_pmu_event_show)
 132
 133TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
 134TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
 135TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
 136TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
 137TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
 138TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
 139TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
 140TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
 141
 142static struct attribute *l3c_pmu_events_attrs[] = {
 143        &tx2_pmu_event_attr_read_request.attr.attr,
 144        &tx2_pmu_event_attr_writeback_request.attr.attr,
 145        &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
 146        &tx2_pmu_event_attr_inv_request.attr.attr,
 147        &tx2_pmu_event_attr_evict_request.attr.attr,
 148        &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
 149        &tx2_pmu_event_attr_inv_hit.attr.attr,
 150        &tx2_pmu_event_attr_read_hit.attr.attr,
 151        NULL,
 152};
 153
 154TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
 155TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
 156TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
 157TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
 158
 159static struct attribute *dmc_pmu_events_attrs[] = {
 160        &tx2_pmu_event_attr_cnt_cycles.attr.attr,
 161        &tx2_pmu_event_attr_write_txns.attr.attr,
 162        &tx2_pmu_event_attr_data_transfers.attr.attr,
 163        &tx2_pmu_event_attr_read_txns.attr.attr,
 164        NULL,
 165};
 166
 167static const struct attribute_group l3c_pmu_events_attr_group = {
 168        .name = "events",
 169        .attrs = l3c_pmu_events_attrs,
 170};
 171
 172static const struct attribute_group dmc_pmu_events_attr_group = {
 173        .name = "events",
 174        .attrs = dmc_pmu_events_attrs,
 175};
 176
 177/*
 178 * sysfs cpumask attributes
 179 */
 180static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
 181                char *buf)
 182{
 183        struct tx2_uncore_pmu *tx2_pmu;
 184
 185        tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
 186        return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
 187}
 188static DEVICE_ATTR_RO(cpumask);
 189
 190static struct attribute *tx2_pmu_cpumask_attrs[] = {
 191        &dev_attr_cpumask.attr,
 192        NULL,
 193};
 194
 195static const struct attribute_group pmu_cpumask_attr_group = {
 196        .attrs = tx2_pmu_cpumask_attrs,
 197};
 198
 199/*
 200 * Per PMU device attribute groups
 201 */
 202static const struct attribute_group *l3c_pmu_attr_groups[] = {
 203        &l3c_pmu_format_attr_group,
 204        &pmu_cpumask_attr_group,
 205        &l3c_pmu_events_attr_group,
 206        NULL
 207};
 208
 209static const struct attribute_group *dmc_pmu_attr_groups[] = {
 210        &dmc_pmu_format_attr_group,
 211        &pmu_cpumask_attr_group,
 212        &dmc_pmu_events_attr_group,
 213        NULL
 214};
 215
 216static inline u32 reg_readl(unsigned long addr)
 217{
 218        return readl((void __iomem *)addr);
 219}
 220
 221static inline void reg_writel(u32 val, unsigned long addr)
 222{
 223        writel(val, (void __iomem *)addr);
 224}
 225
 226static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
 227{
 228        int counter;
 229
 230        counter = find_first_zero_bit(tx2_pmu->active_counters,
 231                                tx2_pmu->max_counters);
 232        if (counter == tx2_pmu->max_counters)
 233                return -ENOSPC;
 234
 235        set_bit(counter, tx2_pmu->active_counters);
 236        return counter;
 237}
 238
 239static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
 240{
 241        clear_bit(counter, tx2_pmu->active_counters);
 242}
 243
 244static void init_cntr_base_l3c(struct perf_event *event,
 245                struct tx2_uncore_pmu *tx2_pmu)
 246{
 247        struct hw_perf_event *hwc = &event->hw;
 248
 249        /* counter ctrl/data reg offset at 8 */
 250        hwc->config_base = (unsigned long)tx2_pmu->base
 251                + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
 252        hwc->event_base =  (unsigned long)tx2_pmu->base
 253                + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
 254}
 255
 256static void init_cntr_base_dmc(struct perf_event *event,
 257                struct tx2_uncore_pmu *tx2_pmu)
 258{
 259        struct hw_perf_event *hwc = &event->hw;
 260
 261        hwc->config_base = (unsigned long)tx2_pmu->base
 262                + DMC_COUNTER_CTL;
 263        /* counter data reg offset at 0xc */
 264        hwc->event_base = (unsigned long)tx2_pmu->base
 265                + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
 266}
 267
 268static void uncore_start_event_l3c(struct perf_event *event, int flags)
 269{
 270        u32 val;
 271        struct hw_perf_event *hwc = &event->hw;
 272
 273        /* event id encoded in bits [07:03] */
 274        val = GET_EVENTID(event) << 3;
 275        reg_writel(val, hwc->config_base);
 276        local64_set(&hwc->prev_count, 0);
 277        reg_writel(0, hwc->event_base);
 278}
 279
 280static inline void uncore_stop_event_l3c(struct perf_event *event)
 281{
 282        reg_writel(0, event->hw.config_base);
 283}
 284
 285static void uncore_start_event_dmc(struct perf_event *event, int flags)
 286{
 287        u32 val;
 288        struct hw_perf_event *hwc = &event->hw;
 289        int idx = GET_COUNTERID(event);
 290        int event_id = GET_EVENTID(event);
 291
 292        /* enable and start counters.
 293         * 8 bits for each counter, bits[05:01] of a counter to set event type.
 294         */
 295        val = reg_readl(hwc->config_base);
 296        val &= ~DMC_EVENT_CFG(idx, 0x1f);
 297        val |= DMC_EVENT_CFG(idx, event_id);
 298        reg_writel(val, hwc->config_base);
 299        local64_set(&hwc->prev_count, 0);
 300        reg_writel(0, hwc->event_base);
 301}
 302
 303static void uncore_stop_event_dmc(struct perf_event *event)
 304{
 305        u32 val;
 306        struct hw_perf_event *hwc = &event->hw;
 307        int idx = GET_COUNTERID(event);
 308
 309        /* clear event type(bits[05:01]) to stop counter */
 310        val = reg_readl(hwc->config_base);
 311        val &= ~DMC_EVENT_CFG(idx, 0x1f);
 312        reg_writel(val, hwc->config_base);
 313}
 314
 315static void tx2_uncore_event_update(struct perf_event *event)
 316{
 317        s64 prev, delta, new = 0;
 318        struct hw_perf_event *hwc = &event->hw;
 319        struct tx2_uncore_pmu *tx2_pmu;
 320        enum tx2_uncore_type type;
 321        u32 prorate_factor;
 322
 323        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 324        type = tx2_pmu->type;
 325        prorate_factor = tx2_pmu->prorate_factor;
 326
 327        new = reg_readl(hwc->event_base);
 328        prev = local64_xchg(&hwc->prev_count, new);
 329
 330        /* handles rollover of 32 bit counter */
 331        delta = (u32)(((1UL << 32) - prev) + new);
 332
 333        /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
 334        if (type == PMU_TYPE_DMC &&
 335                        GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
 336                delta = delta/4;
 337
 338        /* L3C and DMC has 16 and 8 interleave channels respectively.
 339         * The sampled value is for channel 0 and multiplied with
 340         * prorate_factor to get the count for a device.
 341         */
 342        local64_add(delta * prorate_factor, &event->count);
 343}
 344
 345static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
 346{
 347        int i = 0;
 348        struct acpi_tx2_pmu_device {
 349                __u8 id[ACPI_ID_LEN];
 350                enum tx2_uncore_type type;
 351        } devices[] = {
 352                {"CAV901D", PMU_TYPE_L3C},
 353                {"CAV901F", PMU_TYPE_DMC},
 354                {"", PMU_TYPE_INVALID}
 355        };
 356
 357        while (devices[i].type != PMU_TYPE_INVALID) {
 358                if (!strcmp(acpi_device_hid(adev), devices[i].id))
 359                        break;
 360                i++;
 361        }
 362
 363        return devices[i].type;
 364}
 365
 366static bool tx2_uncore_validate_event(struct pmu *pmu,
 367                                  struct perf_event *event, int *counters)
 368{
 369        if (is_software_event(event))
 370                return true;
 371        /* Reject groups spanning multiple HW PMUs. */
 372        if (event->pmu != pmu)
 373                return false;
 374
 375        *counters = *counters + 1;
 376        return true;
 377}
 378
 379/*
 380 * Make sure the group of events can be scheduled at once
 381 * on the PMU.
 382 */
 383static bool tx2_uncore_validate_event_group(struct perf_event *event)
 384{
 385        struct perf_event *sibling, *leader = event->group_leader;
 386        int counters = 0;
 387
 388        if (event->group_leader == event)
 389                return true;
 390
 391        if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
 392                return false;
 393
 394        for_each_sibling_event(sibling, leader) {
 395                if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
 396                        return false;
 397        }
 398
 399        if (!tx2_uncore_validate_event(event->pmu, event, &counters))
 400                return false;
 401
 402        /*
 403         * If the group requires more counters than the HW has,
 404         * it cannot ever be scheduled.
 405         */
 406        return counters <= TX2_PMU_MAX_COUNTERS;
 407}
 408
 409
 410static int tx2_uncore_event_init(struct perf_event *event)
 411{
 412        struct hw_perf_event *hwc = &event->hw;
 413        struct tx2_uncore_pmu *tx2_pmu;
 414
 415        /* Test the event attr type check for PMU enumeration */
 416        if (event->attr.type != event->pmu->type)
 417                return -ENOENT;
 418
 419        /*
 420         * SOC PMU counters are shared across all cores.
 421         * Therefore, it does not support per-process mode.
 422         * Also, it does not support event sampling mode.
 423         */
 424        if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
 425                return -EINVAL;
 426
 427        if (event->cpu < 0)
 428                return -EINVAL;
 429
 430        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 431        if (tx2_pmu->cpu >= nr_cpu_ids)
 432                return -EINVAL;
 433        event->cpu = tx2_pmu->cpu;
 434
 435        if (event->attr.config >= tx2_pmu->max_events)
 436                return -EINVAL;
 437
 438        /* store event id */
 439        hwc->config = event->attr.config;
 440
 441        /* Validate the group */
 442        if (!tx2_uncore_validate_event_group(event))
 443                return -EINVAL;
 444
 445        return 0;
 446}
 447
 448static void tx2_uncore_event_start(struct perf_event *event, int flags)
 449{
 450        struct hw_perf_event *hwc = &event->hw;
 451        struct tx2_uncore_pmu *tx2_pmu;
 452
 453        hwc->state = 0;
 454        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 455
 456        tx2_pmu->start_event(event, flags);
 457        perf_event_update_userpage(event);
 458
 459        /* Start timer for first event */
 460        if (bitmap_weight(tx2_pmu->active_counters,
 461                                tx2_pmu->max_counters) == 1) {
 462                hrtimer_start(&tx2_pmu->hrtimer,
 463                        ns_to_ktime(tx2_pmu->hrtimer_interval),
 464                        HRTIMER_MODE_REL_PINNED);
 465        }
 466}
 467
 468static void tx2_uncore_event_stop(struct perf_event *event, int flags)
 469{
 470        struct hw_perf_event *hwc = &event->hw;
 471        struct tx2_uncore_pmu *tx2_pmu;
 472
 473        if (hwc->state & PERF_HES_UPTODATE)
 474                return;
 475
 476        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 477        tx2_pmu->stop_event(event);
 478        WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 479        hwc->state |= PERF_HES_STOPPED;
 480        if (flags & PERF_EF_UPDATE) {
 481                tx2_uncore_event_update(event);
 482                hwc->state |= PERF_HES_UPTODATE;
 483        }
 484}
 485
 486static int tx2_uncore_event_add(struct perf_event *event, int flags)
 487{
 488        struct hw_perf_event *hwc = &event->hw;
 489        struct tx2_uncore_pmu *tx2_pmu;
 490
 491        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 492
 493        /* Allocate a free counter */
 494        hwc->idx  = alloc_counter(tx2_pmu);
 495        if (hwc->idx < 0)
 496                return -EAGAIN;
 497
 498        tx2_pmu->events[hwc->idx] = event;
 499        /* set counter control and data registers base address */
 500        tx2_pmu->init_cntr_base(event, tx2_pmu);
 501
 502        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 503        if (flags & PERF_EF_START)
 504                tx2_uncore_event_start(event, flags);
 505
 506        return 0;
 507}
 508
 509static void tx2_uncore_event_del(struct perf_event *event, int flags)
 510{
 511        struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 512        struct hw_perf_event *hwc = &event->hw;
 513
 514        tx2_uncore_event_stop(event, PERF_EF_UPDATE);
 515
 516        /* clear the assigned counter */
 517        free_counter(tx2_pmu, GET_COUNTERID(event));
 518
 519        perf_event_update_userpage(event);
 520        tx2_pmu->events[hwc->idx] = NULL;
 521        hwc->idx = -1;
 522}
 523
 524static void tx2_uncore_event_read(struct perf_event *event)
 525{
 526        tx2_uncore_event_update(event);
 527}
 528
 529static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
 530{
 531        struct tx2_uncore_pmu *tx2_pmu;
 532        int max_counters, idx;
 533
 534        tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
 535        max_counters = tx2_pmu->max_counters;
 536
 537        if (bitmap_empty(tx2_pmu->active_counters, max_counters))
 538                return HRTIMER_NORESTART;
 539
 540        for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
 541                struct perf_event *event = tx2_pmu->events[idx];
 542
 543                tx2_uncore_event_update(event);
 544        }
 545        hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
 546        return HRTIMER_RESTART;
 547}
 548
 549static int tx2_uncore_pmu_register(
 550                struct tx2_uncore_pmu *tx2_pmu)
 551{
 552        struct device *dev = tx2_pmu->dev;
 553        char *name = tx2_pmu->name;
 554
 555        /* Perf event registration */
 556        tx2_pmu->pmu = (struct pmu) {
 557                .module         = THIS_MODULE,
 558                .attr_groups    = tx2_pmu->attr_groups,
 559                .task_ctx_nr    = perf_invalid_context,
 560                .event_init     = tx2_uncore_event_init,
 561                .add            = tx2_uncore_event_add,
 562                .del            = tx2_uncore_event_del,
 563                .start          = tx2_uncore_event_start,
 564                .stop           = tx2_uncore_event_stop,
 565                .read           = tx2_uncore_event_read,
 566                .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
 567        };
 568
 569        tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
 570                        "%s", name);
 571
 572        return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
 573}
 574
 575static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
 576{
 577        int ret, cpu;
 578
 579        cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
 580                        cpu_online_mask);
 581
 582        tx2_pmu->cpu = cpu;
 583        hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 584        tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
 585
 586        ret = tx2_uncore_pmu_register(tx2_pmu);
 587        if (ret) {
 588                dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
 589                                tx2_pmu->name);
 590                return -ENODEV;
 591        }
 592
 593        /* register hotplug callback for the pmu */
 594        ret = cpuhp_state_add_instance(
 595                        CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
 596                        &tx2_pmu->hpnode);
 597        if (ret) {
 598                dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
 599                return ret;
 600        }
 601
 602        /* Add to list */
 603        list_add(&tx2_pmu->entry, &tx2_pmus);
 604
 605        dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
 606                        tx2_pmu->pmu.name);
 607        return ret;
 608}
 609
 610static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
 611                acpi_handle handle, struct acpi_device *adev, u32 type)
 612{
 613        struct tx2_uncore_pmu *tx2_pmu;
 614        void __iomem *base;
 615        struct resource res;
 616        struct resource_entry *rentry;
 617        struct list_head list;
 618        int ret;
 619
 620        INIT_LIST_HEAD(&list);
 621        ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
 622        if (ret <= 0) {
 623                dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
 624                return NULL;
 625        }
 626
 627        list_for_each_entry(rentry, &list, node) {
 628                if (resource_type(rentry->res) == IORESOURCE_MEM) {
 629                        res = *rentry->res;
 630                        break;
 631                }
 632        }
 633
 634        if (!rentry->res)
 635                return NULL;
 636
 637        acpi_dev_free_resource_list(&list);
 638        base = devm_ioremap_resource(dev, &res);
 639        if (IS_ERR(base)) {
 640                dev_err(dev, "PMU type %d: Fail to map resource\n", type);
 641                return NULL;
 642        }
 643
 644        tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
 645        if (!tx2_pmu)
 646                return NULL;
 647
 648        tx2_pmu->dev = dev;
 649        tx2_pmu->type = type;
 650        tx2_pmu->base = base;
 651        tx2_pmu->node = dev_to_node(dev);
 652        INIT_LIST_HEAD(&tx2_pmu->entry);
 653
 654        switch (tx2_pmu->type) {
 655        case PMU_TYPE_L3C:
 656                tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
 657                tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
 658                tx2_pmu->max_events = L3_EVENT_MAX;
 659                tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
 660                tx2_pmu->attr_groups = l3c_pmu_attr_groups;
 661                tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
 662                                "uncore_l3c_%d", tx2_pmu->node);
 663                tx2_pmu->init_cntr_base = init_cntr_base_l3c;
 664                tx2_pmu->start_event = uncore_start_event_l3c;
 665                tx2_pmu->stop_event = uncore_stop_event_l3c;
 666                break;
 667        case PMU_TYPE_DMC:
 668                tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
 669                tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
 670                tx2_pmu->max_events = DMC_EVENT_MAX;
 671                tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
 672                tx2_pmu->attr_groups = dmc_pmu_attr_groups;
 673                tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
 674                                "uncore_dmc_%d", tx2_pmu->node);
 675                tx2_pmu->init_cntr_base = init_cntr_base_dmc;
 676                tx2_pmu->start_event = uncore_start_event_dmc;
 677                tx2_pmu->stop_event = uncore_stop_event_dmc;
 678                break;
 679        case PMU_TYPE_INVALID:
 680                devm_kfree(dev, tx2_pmu);
 681                return NULL;
 682        }
 683
 684        return tx2_pmu;
 685}
 686
 687static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
 688                                    void *data, void **return_value)
 689{
 690        struct tx2_uncore_pmu *tx2_pmu;
 691        struct acpi_device *adev;
 692        enum tx2_uncore_type type;
 693
 694        if (acpi_bus_get_device(handle, &adev))
 695                return AE_OK;
 696        if (acpi_bus_get_status(adev) || !adev->status.present)
 697                return AE_OK;
 698
 699        type = get_tx2_pmu_type(adev);
 700        if (type == PMU_TYPE_INVALID)
 701                return AE_OK;
 702
 703        tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
 704                        handle, adev, type);
 705
 706        if (!tx2_pmu)
 707                return AE_ERROR;
 708
 709        if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
 710                /* Can't add the PMU device, abort */
 711                return AE_ERROR;
 712        }
 713        return AE_OK;
 714}
 715
 716static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
 717                struct hlist_node *hpnode)
 718{
 719        struct tx2_uncore_pmu *tx2_pmu;
 720
 721        tx2_pmu = hlist_entry_safe(hpnode,
 722                        struct tx2_uncore_pmu, hpnode);
 723
 724        /* Pick this CPU, If there is no CPU/PMU association and both are
 725         * from same node.
 726         */
 727        if ((tx2_pmu->cpu >= nr_cpu_ids) &&
 728                (tx2_pmu->node == cpu_to_node(cpu)))
 729                tx2_pmu->cpu = cpu;
 730
 731        return 0;
 732}
 733
 734static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
 735                struct hlist_node *hpnode)
 736{
 737        int new_cpu;
 738        struct tx2_uncore_pmu *tx2_pmu;
 739        struct cpumask cpu_online_mask_temp;
 740
 741        tx2_pmu = hlist_entry_safe(hpnode,
 742                        struct tx2_uncore_pmu, hpnode);
 743
 744        if (cpu != tx2_pmu->cpu)
 745                return 0;
 746
 747        hrtimer_cancel(&tx2_pmu->hrtimer);
 748        cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
 749        cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
 750        new_cpu = cpumask_any_and(
 751                        cpumask_of_node(tx2_pmu->node),
 752                        &cpu_online_mask_temp);
 753
 754        tx2_pmu->cpu = new_cpu;
 755        if (new_cpu >= nr_cpu_ids)
 756                return 0;
 757        perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
 758
 759        return 0;
 760}
 761
 762static const struct acpi_device_id tx2_uncore_acpi_match[] = {
 763        {"CAV901C", 0},
 764        {},
 765};
 766MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
 767
 768static int tx2_uncore_probe(struct platform_device *pdev)
 769{
 770        struct device *dev = &pdev->dev;
 771        acpi_handle handle;
 772        acpi_status status;
 773
 774        set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
 775
 776        if (!has_acpi_companion(dev))
 777                return -ENODEV;
 778
 779        handle = ACPI_HANDLE(dev);
 780        if (!handle)
 781                return -EINVAL;
 782
 783        /* Walk through the tree for all PMU UNCORE devices */
 784        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
 785                                     tx2_uncore_pmu_add,
 786                                     NULL, dev, NULL);
 787        if (ACPI_FAILURE(status)) {
 788                dev_err(dev, "failed to probe PMU devices\n");
 789                return_ACPI_STATUS(status);
 790        }
 791
 792        dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
 793        return 0;
 794}
 795
 796static int tx2_uncore_remove(struct platform_device *pdev)
 797{
 798        struct tx2_uncore_pmu *tx2_pmu, *temp;
 799        struct device *dev = &pdev->dev;
 800
 801        if (!list_empty(&tx2_pmus)) {
 802                list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
 803                        if (tx2_pmu->node == dev_to_node(dev)) {
 804                                cpuhp_state_remove_instance_nocalls(
 805                                        CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
 806                                        &tx2_pmu->hpnode);
 807                                perf_pmu_unregister(&tx2_pmu->pmu);
 808                                list_del(&tx2_pmu->entry);
 809                        }
 810                }
 811        }
 812        return 0;
 813}
 814
 815static struct platform_driver tx2_uncore_driver = {
 816        .driver = {
 817                .name           = "tx2-uncore-pmu",
 818                .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
 819        },
 820        .probe = tx2_uncore_probe,
 821        .remove = tx2_uncore_remove,
 822};
 823
 824static int __init tx2_uncore_driver_init(void)
 825{
 826        int ret;
 827
 828        ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
 829                                      "perf/tx2/uncore:online",
 830                                      tx2_uncore_pmu_online_cpu,
 831                                      tx2_uncore_pmu_offline_cpu);
 832        if (ret) {
 833                pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
 834                return ret;
 835        }
 836        ret = platform_driver_register(&tx2_uncore_driver);
 837        if (ret)
 838                cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
 839
 840        return ret;
 841}
 842module_init(tx2_uncore_driver_init);
 843
 844static void __exit tx2_uncore_driver_exit(void)
 845{
 846        platform_driver_unregister(&tx2_uncore_driver);
 847        cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
 848}
 849module_exit(tx2_uncore_driver_exit);
 850
 851MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
 852MODULE_LICENSE("GPL v2");
 853MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");
 854