linux/drivers/perf/thunderx2_pmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * CAVIUM THUNDERX2 SoC PMU UNCORE
   4 * Copyright (C) 2018 Cavium Inc.
   5 * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
   6 */
   7
   8#include <linux/acpi.h>
   9#include <linux/cpuhotplug.h>
  10#include <linux/perf_event.h>
  11#include <linux/platform_device.h>
  12
  13/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
  14 * Each UNCORE PMU device consists of 4 independent programmable counters.
  15 * Counters are 32 bit and do not support overflow interrupt,
  16 * they need to be sampled before overflow(i.e, at every 2 seconds).
  17 */
  18
  19#define TX2_PMU_DMC_L3C_MAX_COUNTERS    4
  20#define TX2_PMU_CCPI2_MAX_COUNTERS      8
  21#define TX2_PMU_MAX_COUNTERS            TX2_PMU_CCPI2_MAX_COUNTERS
  22
  23
  24#define TX2_PMU_DMC_CHANNELS            8
  25#define TX2_PMU_L3_TILES                16
  26
  27#define TX2_PMU_HRTIMER_INTERVAL        (2 * NSEC_PER_SEC)
  28#define GET_EVENTID(ev, mask)           ((ev->hw.config) & mask)
  29#define GET_COUNTERID(ev, mask)         ((ev->hw.idx) & mask)
  30 /* 1 byte per counter(4 counters).
  31  * Event id is encoded in bits [5:1] of a byte,
  32  */
  33#define DMC_EVENT_CFG(idx, val)         ((val) << (((idx) * 8) + 1))
  34
  35/* bits[3:0] to select counters, are indexed from 8 to 15. */
  36#define CCPI2_COUNTER_OFFSET            8
  37
  38#define L3C_COUNTER_CTL                 0xA8
  39#define L3C_COUNTER_DATA                0xAC
  40#define DMC_COUNTER_CTL                 0x234
  41#define DMC_COUNTER_DATA                0x240
  42
  43#define CCPI2_PERF_CTL                  0x108
  44#define CCPI2_COUNTER_CTL               0x10C
  45#define CCPI2_COUNTER_SEL               0x12c
  46#define CCPI2_COUNTER_DATA_L            0x130
  47#define CCPI2_COUNTER_DATA_H            0x134
  48
  49/* L3C event IDs */
  50#define L3_EVENT_READ_REQ               0xD
  51#define L3_EVENT_WRITEBACK_REQ          0xE
  52#define L3_EVENT_INV_N_WRITE_REQ        0xF
  53#define L3_EVENT_INV_REQ                0x10
  54#define L3_EVENT_EVICT_REQ              0x13
  55#define L3_EVENT_INV_N_WRITE_HIT        0x14
  56#define L3_EVENT_INV_HIT                0x15
  57#define L3_EVENT_READ_HIT               0x17
  58#define L3_EVENT_MAX                    0x18
  59
  60/* DMC event IDs */
  61#define DMC_EVENT_COUNT_CYCLES          0x1
  62#define DMC_EVENT_WRITE_TXNS            0xB
  63#define DMC_EVENT_DATA_TRANSFERS        0xD
  64#define DMC_EVENT_READ_TXNS             0xF
  65#define DMC_EVENT_MAX                   0x10
  66
  67#define CCPI2_EVENT_REQ_PKT_SENT        0x3D
  68#define CCPI2_EVENT_SNOOP_PKT_SENT      0x65
  69#define CCPI2_EVENT_DATA_PKT_SENT       0x105
  70#define CCPI2_EVENT_GIC_PKT_SENT        0x12D
  71#define CCPI2_EVENT_MAX                 0x200
  72
  73#define CCPI2_PERF_CTL_ENABLE           BIT(0)
  74#define CCPI2_PERF_CTL_START            BIT(1)
  75#define CCPI2_PERF_CTL_RESET            BIT(4)
  76#define CCPI2_EVENT_LEVEL_RISING_EDGE   BIT(10)
  77#define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11)
  78
  79enum tx2_uncore_type {
  80        PMU_TYPE_L3C,
  81        PMU_TYPE_DMC,
  82        PMU_TYPE_CCPI2,
  83        PMU_TYPE_INVALID,
  84};
  85
  86/*
  87 * Each socket has 3 uncore devices associated with a PMU. The DMC and
  88 * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters.
  89 */
  90struct tx2_uncore_pmu {
  91        struct hlist_node hpnode;
  92        struct list_head  entry;
  93        struct pmu pmu;
  94        char *name;
  95        int node;
  96        int cpu;
  97        u32 max_counters;
  98        u32 counters_mask;
  99        u32 prorate_factor;
 100        u32 max_events;
 101        u32 events_mask;
 102        u64 hrtimer_interval;
 103        void __iomem *base;
 104        DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
 105        struct perf_event *events[TX2_PMU_MAX_COUNTERS];
 106        struct device *dev;
 107        struct hrtimer hrtimer;
 108        const struct attribute_group **attr_groups;
 109        enum tx2_uncore_type type;
 110        enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb);
 111        void (*init_cntr_base)(struct perf_event *event,
 112                        struct tx2_uncore_pmu *tx2_pmu);
 113        void (*stop_event)(struct perf_event *event);
 114        void (*start_event)(struct perf_event *event, int flags);
 115};
 116
 117static LIST_HEAD(tx2_pmus);
 118
 119static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
 120{
 121        return container_of(pmu, struct tx2_uncore_pmu, pmu);
 122}
 123
 124#define TX2_PMU_FORMAT_ATTR(_var, _name, _format)                       \
 125static ssize_t                                                          \
 126__tx2_pmu_##_var##_show(struct device *dev,                             \
 127                               struct device_attribute *attr,           \
 128                               char *page)                              \
 129{                                                                       \
 130        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
 131        return sprintf(page, _format "\n");                             \
 132}                                                                       \
 133                                                                        \
 134static struct device_attribute format_attr_##_var =                     \
 135        __ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL)
 136
 137TX2_PMU_FORMAT_ATTR(event, event, "config:0-4");
 138TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9");
 139
 140static struct attribute *l3c_pmu_format_attrs[] = {
 141        &format_attr_event.attr,
 142        NULL,
 143};
 144
 145static struct attribute *dmc_pmu_format_attrs[] = {
 146        &format_attr_event.attr,
 147        NULL,
 148};
 149
 150static struct attribute *ccpi2_pmu_format_attrs[] = {
 151        &format_attr_event_ccpi2.attr,
 152        NULL,
 153};
 154
 155static const struct attribute_group l3c_pmu_format_attr_group = {
 156        .name = "format",
 157        .attrs = l3c_pmu_format_attrs,
 158};
 159
 160static const struct attribute_group dmc_pmu_format_attr_group = {
 161        .name = "format",
 162        .attrs = dmc_pmu_format_attrs,
 163};
 164
 165static const struct attribute_group ccpi2_pmu_format_attr_group = {
 166        .name = "format",
 167        .attrs = ccpi2_pmu_format_attrs,
 168};
 169
 170/*
 171 * sysfs event attributes
 172 */
 173static ssize_t tx2_pmu_event_show(struct device *dev,
 174                                    struct device_attribute *attr, char *buf)
 175{
 176        struct dev_ext_attribute *eattr;
 177
 178        eattr = container_of(attr, struct dev_ext_attribute, attr);
 179        return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
 180}
 181
 182#define TX2_EVENT_ATTR(name, config) \
 183        PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
 184                        config, tx2_pmu_event_show)
 185
 186TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
 187TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
 188TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
 189TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
 190TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
 191TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
 192TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
 193TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
 194
 195static struct attribute *l3c_pmu_events_attrs[] = {
 196        &tx2_pmu_event_attr_read_request.attr.attr,
 197        &tx2_pmu_event_attr_writeback_request.attr.attr,
 198        &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
 199        &tx2_pmu_event_attr_inv_request.attr.attr,
 200        &tx2_pmu_event_attr_evict_request.attr.attr,
 201        &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
 202        &tx2_pmu_event_attr_inv_hit.attr.attr,
 203        &tx2_pmu_event_attr_read_hit.attr.attr,
 204        NULL,
 205};
 206
 207TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
 208TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
 209TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
 210TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
 211
 212static struct attribute *dmc_pmu_events_attrs[] = {
 213        &tx2_pmu_event_attr_cnt_cycles.attr.attr,
 214        &tx2_pmu_event_attr_write_txns.attr.attr,
 215        &tx2_pmu_event_attr_data_transfers.attr.attr,
 216        &tx2_pmu_event_attr_read_txns.attr.attr,
 217        NULL,
 218};
 219
 220TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT);
 221TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT);
 222TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT);
 223TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT);
 224
 225static struct attribute *ccpi2_pmu_events_attrs[] = {
 226        &tx2_pmu_event_attr_req_pktsent.attr.attr,
 227        &tx2_pmu_event_attr_snoop_pktsent.attr.attr,
 228        &tx2_pmu_event_attr_data_pktsent.attr.attr,
 229        &tx2_pmu_event_attr_gic_pktsent.attr.attr,
 230        NULL,
 231};
 232
 233static const struct attribute_group l3c_pmu_events_attr_group = {
 234        .name = "events",
 235        .attrs = l3c_pmu_events_attrs,
 236};
 237
 238static const struct attribute_group dmc_pmu_events_attr_group = {
 239        .name = "events",
 240        .attrs = dmc_pmu_events_attrs,
 241};
 242
 243static const struct attribute_group ccpi2_pmu_events_attr_group = {
 244        .name = "events",
 245        .attrs = ccpi2_pmu_events_attrs,
 246};
 247
 248/*
 249 * sysfs cpumask attributes
 250 */
 251static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
 252                char *buf)
 253{
 254        struct tx2_uncore_pmu *tx2_pmu;
 255
 256        tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
 257        return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
 258}
 259static DEVICE_ATTR_RO(cpumask);
 260
 261static struct attribute *tx2_pmu_cpumask_attrs[] = {
 262        &dev_attr_cpumask.attr,
 263        NULL,
 264};
 265
 266static const struct attribute_group pmu_cpumask_attr_group = {
 267        .attrs = tx2_pmu_cpumask_attrs,
 268};
 269
 270/*
 271 * Per PMU device attribute groups
 272 */
 273static const struct attribute_group *l3c_pmu_attr_groups[] = {
 274        &l3c_pmu_format_attr_group,
 275        &pmu_cpumask_attr_group,
 276        &l3c_pmu_events_attr_group,
 277        NULL
 278};
 279
 280static const struct attribute_group *dmc_pmu_attr_groups[] = {
 281        &dmc_pmu_format_attr_group,
 282        &pmu_cpumask_attr_group,
 283        &dmc_pmu_events_attr_group,
 284        NULL
 285};
 286
 287static const struct attribute_group *ccpi2_pmu_attr_groups[] = {
 288        &ccpi2_pmu_format_attr_group,
 289        &pmu_cpumask_attr_group,
 290        &ccpi2_pmu_events_attr_group,
 291        NULL
 292};
 293
 294static inline u32 reg_readl(unsigned long addr)
 295{
 296        return readl((void __iomem *)addr);
 297}
 298
 299static inline void reg_writel(u32 val, unsigned long addr)
 300{
 301        writel(val, (void __iomem *)addr);
 302}
 303
 304static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
 305{
 306        int counter;
 307
 308        counter = find_first_zero_bit(tx2_pmu->active_counters,
 309                                tx2_pmu->max_counters);
 310        if (counter == tx2_pmu->max_counters)
 311                return -ENOSPC;
 312
 313        set_bit(counter, tx2_pmu->active_counters);
 314        return counter;
 315}
 316
 317static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
 318{
 319        clear_bit(counter, tx2_pmu->active_counters);
 320}
 321
 322static void init_cntr_base_l3c(struct perf_event *event,
 323                struct tx2_uncore_pmu *tx2_pmu)
 324{
 325        struct hw_perf_event *hwc = &event->hw;
 326        u32 cmask;
 327
 328        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 329        cmask = tx2_pmu->counters_mask;
 330
 331        /* counter ctrl/data reg offset at 8 */
 332        hwc->config_base = (unsigned long)tx2_pmu->base
 333                + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask));
 334        hwc->event_base =  (unsigned long)tx2_pmu->base
 335                + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask));
 336}
 337
 338static void init_cntr_base_dmc(struct perf_event *event,
 339                struct tx2_uncore_pmu *tx2_pmu)
 340{
 341        struct hw_perf_event *hwc = &event->hw;
 342        u32 cmask;
 343
 344        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 345        cmask = tx2_pmu->counters_mask;
 346
 347        hwc->config_base = (unsigned long)tx2_pmu->base
 348                + DMC_COUNTER_CTL;
 349        /* counter data reg offset at 0xc */
 350        hwc->event_base = (unsigned long)tx2_pmu->base
 351                + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask));
 352}
 353
 354static void init_cntr_base_ccpi2(struct perf_event *event,
 355                struct tx2_uncore_pmu *tx2_pmu)
 356{
 357        struct hw_perf_event *hwc = &event->hw;
 358        u32 cmask;
 359
 360        cmask = tx2_pmu->counters_mask;
 361
 362        hwc->config_base = (unsigned long)tx2_pmu->base
 363                + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask));
 364        hwc->event_base =  (unsigned long)tx2_pmu->base;
 365}
 366
 367static void uncore_start_event_l3c(struct perf_event *event, int flags)
 368{
 369        u32 val, emask;
 370        struct hw_perf_event *hwc = &event->hw;
 371        struct tx2_uncore_pmu *tx2_pmu;
 372
 373        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 374        emask = tx2_pmu->events_mask;
 375
 376        /* event id encoded in bits [07:03] */
 377        val = GET_EVENTID(event, emask) << 3;
 378        reg_writel(val, hwc->config_base);
 379        local64_set(&hwc->prev_count, 0);
 380        reg_writel(0, hwc->event_base);
 381}
 382
 383static inline void uncore_stop_event_l3c(struct perf_event *event)
 384{
 385        reg_writel(0, event->hw.config_base);
 386}
 387
 388static void uncore_start_event_dmc(struct perf_event *event, int flags)
 389{
 390        u32 val, cmask, emask;
 391        struct hw_perf_event *hwc = &event->hw;
 392        struct tx2_uncore_pmu *tx2_pmu;
 393        int idx, event_id;
 394
 395        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 396        cmask = tx2_pmu->counters_mask;
 397        emask = tx2_pmu->events_mask;
 398
 399        idx = GET_COUNTERID(event, cmask);
 400        event_id = GET_EVENTID(event, emask);
 401
 402        /* enable and start counters.
 403         * 8 bits for each counter, bits[05:01] of a counter to set event type.
 404         */
 405        val = reg_readl(hwc->config_base);
 406        val &= ~DMC_EVENT_CFG(idx, 0x1f);
 407        val |= DMC_EVENT_CFG(idx, event_id);
 408        reg_writel(val, hwc->config_base);
 409        local64_set(&hwc->prev_count, 0);
 410        reg_writel(0, hwc->event_base);
 411}
 412
 413static void uncore_stop_event_dmc(struct perf_event *event)
 414{
 415        u32 val, cmask;
 416        struct hw_perf_event *hwc = &event->hw;
 417        struct tx2_uncore_pmu *tx2_pmu;
 418        int idx;
 419
 420        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 421        cmask = tx2_pmu->counters_mask;
 422        idx = GET_COUNTERID(event, cmask);
 423
 424        /* clear event type(bits[05:01]) to stop counter */
 425        val = reg_readl(hwc->config_base);
 426        val &= ~DMC_EVENT_CFG(idx, 0x1f);
 427        reg_writel(val, hwc->config_base);
 428}
 429
 430static void uncore_start_event_ccpi2(struct perf_event *event, int flags)
 431{
 432        u32 emask;
 433        struct hw_perf_event *hwc = &event->hw;
 434        struct tx2_uncore_pmu *tx2_pmu;
 435
 436        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 437        emask = tx2_pmu->events_mask;
 438
 439        /* Bit [09:00] to set event id.
 440         * Bits [10], set level to rising edge.
 441         * Bits [11], set type to edge sensitive.
 442         */
 443        reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE |
 444                        CCPI2_EVENT_LEVEL_RISING_EDGE |
 445                        GET_EVENTID(event, emask)), hwc->config_base);
 446
 447        /* reset[4], enable[0] and start[1] counters */
 448        reg_writel(CCPI2_PERF_CTL_RESET |
 449                        CCPI2_PERF_CTL_START |
 450                        CCPI2_PERF_CTL_ENABLE,
 451                        hwc->event_base + CCPI2_PERF_CTL);
 452        local64_set(&event->hw.prev_count, 0ULL);
 453}
 454
 455static void uncore_stop_event_ccpi2(struct perf_event *event)
 456{
 457        struct hw_perf_event *hwc = &event->hw;
 458
 459        /* disable and stop counter */
 460        reg_writel(0, hwc->event_base + CCPI2_PERF_CTL);
 461}
 462
 463static void tx2_uncore_event_update(struct perf_event *event)
 464{
 465        u64 prev, delta, new = 0;
 466        struct hw_perf_event *hwc = &event->hw;
 467        struct tx2_uncore_pmu *tx2_pmu;
 468        enum tx2_uncore_type type;
 469        u32 prorate_factor;
 470        u32 cmask, emask;
 471
 472        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 473        type = tx2_pmu->type;
 474        cmask = tx2_pmu->counters_mask;
 475        emask = tx2_pmu->events_mask;
 476        prorate_factor = tx2_pmu->prorate_factor;
 477        if (type == PMU_TYPE_CCPI2) {
 478                reg_writel(CCPI2_COUNTER_OFFSET +
 479                                GET_COUNTERID(event, cmask),
 480                                hwc->event_base + CCPI2_COUNTER_SEL);
 481                new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H);
 482                new = (new << 32) +
 483                        reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L);
 484                prev = local64_xchg(&hwc->prev_count, new);
 485                delta = new - prev;
 486        } else {
 487                new = reg_readl(hwc->event_base);
 488                prev = local64_xchg(&hwc->prev_count, new);
 489                /* handles rollover of 32 bit counter */
 490                delta = (u32)(((1UL << 32) - prev) + new);
 491        }
 492
 493        /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
 494        if (type == PMU_TYPE_DMC &&
 495                        GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS)
 496                delta = delta/4;
 497
 498        /* L3C and DMC has 16 and 8 interleave channels respectively.
 499         * The sampled value is for channel 0 and multiplied with
 500         * prorate_factor to get the count for a device.
 501         */
 502        local64_add(delta * prorate_factor, &event->count);
 503}
 504
 505static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
 506{
 507        int i = 0;
 508        struct acpi_tx2_pmu_device {
 509                __u8 id[ACPI_ID_LEN];
 510                enum tx2_uncore_type type;
 511        } devices[] = {
 512                {"CAV901D", PMU_TYPE_L3C},
 513                {"CAV901F", PMU_TYPE_DMC},
 514                {"CAV901E", PMU_TYPE_CCPI2},
 515                {"", PMU_TYPE_INVALID}
 516        };
 517
 518        while (devices[i].type != PMU_TYPE_INVALID) {
 519                if (!strcmp(acpi_device_hid(adev), devices[i].id))
 520                        break;
 521                i++;
 522        }
 523
 524        return devices[i].type;
 525}
 526
 527static bool tx2_uncore_validate_event(struct pmu *pmu,
 528                                  struct perf_event *event, int *counters)
 529{
 530        if (is_software_event(event))
 531                return true;
 532        /* Reject groups spanning multiple HW PMUs. */
 533        if (event->pmu != pmu)
 534                return false;
 535
 536        *counters = *counters + 1;
 537        return true;
 538}
 539
 540/*
 541 * Make sure the group of events can be scheduled at once
 542 * on the PMU.
 543 */
 544static bool tx2_uncore_validate_event_group(struct perf_event *event,
 545                int max_counters)
 546{
 547        struct perf_event *sibling, *leader = event->group_leader;
 548        int counters = 0;
 549
 550        if (event->group_leader == event)
 551                return true;
 552
 553        if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
 554                return false;
 555
 556        for_each_sibling_event(sibling, leader) {
 557                if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
 558                        return false;
 559        }
 560
 561        if (!tx2_uncore_validate_event(event->pmu, event, &counters))
 562                return false;
 563
 564        /*
 565         * If the group requires more counters than the HW has,
 566         * it cannot ever be scheduled.
 567         */
 568        return counters <= max_counters;
 569}
 570
 571
 572static int tx2_uncore_event_init(struct perf_event *event)
 573{
 574        struct hw_perf_event *hwc = &event->hw;
 575        struct tx2_uncore_pmu *tx2_pmu;
 576
 577        /* Test the event attr type check for PMU enumeration */
 578        if (event->attr.type != event->pmu->type)
 579                return -ENOENT;
 580
 581        /*
 582         * SOC PMU counters are shared across all cores.
 583         * Therefore, it does not support per-process mode.
 584         * Also, it does not support event sampling mode.
 585         */
 586        if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
 587                return -EINVAL;
 588
 589        /* We have no filtering of any kind */
 590        if (event->attr.exclude_user    ||
 591            event->attr.exclude_kernel  ||
 592            event->attr.exclude_hv      ||
 593            event->attr.exclude_idle    ||
 594            event->attr.exclude_host    ||
 595            event->attr.exclude_guest)
 596                return -EINVAL;
 597
 598        if (event->cpu < 0)
 599                return -EINVAL;
 600
 601        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 602        if (tx2_pmu->cpu >= nr_cpu_ids)
 603                return -EINVAL;
 604        event->cpu = tx2_pmu->cpu;
 605
 606        if (event->attr.config >= tx2_pmu->max_events)
 607                return -EINVAL;
 608
 609        /* store event id */
 610        hwc->config = event->attr.config;
 611
 612        /* Validate the group */
 613        if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters))
 614                return -EINVAL;
 615
 616        return 0;
 617}
 618
 619static void tx2_uncore_event_start(struct perf_event *event, int flags)
 620{
 621        struct hw_perf_event *hwc = &event->hw;
 622        struct tx2_uncore_pmu *tx2_pmu;
 623
 624        hwc->state = 0;
 625        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 626
 627        tx2_pmu->start_event(event, flags);
 628        perf_event_update_userpage(event);
 629
 630        /* No hrtimer needed for CCPI2, 64-bit counters */
 631        if (!tx2_pmu->hrtimer_callback)
 632                return;
 633
 634        /* Start timer for first event */
 635        if (bitmap_weight(tx2_pmu->active_counters,
 636                                tx2_pmu->max_counters) == 1) {
 637                hrtimer_start(&tx2_pmu->hrtimer,
 638                        ns_to_ktime(tx2_pmu->hrtimer_interval),
 639                        HRTIMER_MODE_REL_PINNED);
 640        }
 641}
 642
 643static void tx2_uncore_event_stop(struct perf_event *event, int flags)
 644{
 645        struct hw_perf_event *hwc = &event->hw;
 646        struct tx2_uncore_pmu *tx2_pmu;
 647
 648        if (hwc->state & PERF_HES_UPTODATE)
 649                return;
 650
 651        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 652        tx2_pmu->stop_event(event);
 653        WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
 654        hwc->state |= PERF_HES_STOPPED;
 655        if (flags & PERF_EF_UPDATE) {
 656                tx2_uncore_event_update(event);
 657                hwc->state |= PERF_HES_UPTODATE;
 658        }
 659}
 660
 661static int tx2_uncore_event_add(struct perf_event *event, int flags)
 662{
 663        struct hw_perf_event *hwc = &event->hw;
 664        struct tx2_uncore_pmu *tx2_pmu;
 665
 666        tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 667
 668        /* Allocate a free counter */
 669        hwc->idx  = alloc_counter(tx2_pmu);
 670        if (hwc->idx < 0)
 671                return -EAGAIN;
 672
 673        tx2_pmu->events[hwc->idx] = event;
 674        /* set counter control and data registers base address */
 675        tx2_pmu->init_cntr_base(event, tx2_pmu);
 676
 677        hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 678        if (flags & PERF_EF_START)
 679                tx2_uncore_event_start(event, flags);
 680
 681        return 0;
 682}
 683
 684static void tx2_uncore_event_del(struct perf_event *event, int flags)
 685{
 686        struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
 687        struct hw_perf_event *hwc = &event->hw;
 688        u32 cmask;
 689
 690        cmask = tx2_pmu->counters_mask;
 691        tx2_uncore_event_stop(event, PERF_EF_UPDATE);
 692
 693        /* clear the assigned counter */
 694        free_counter(tx2_pmu, GET_COUNTERID(event, cmask));
 695
 696        perf_event_update_userpage(event);
 697        tx2_pmu->events[hwc->idx] = NULL;
 698        hwc->idx = -1;
 699
 700        if (!tx2_pmu->hrtimer_callback)
 701                return;
 702
 703        if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters))
 704                hrtimer_cancel(&tx2_pmu->hrtimer);
 705}
 706
 707static void tx2_uncore_event_read(struct perf_event *event)
 708{
 709        tx2_uncore_event_update(event);
 710}
 711
 712static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
 713{
 714        struct tx2_uncore_pmu *tx2_pmu;
 715        int max_counters, idx;
 716
 717        tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
 718        max_counters = tx2_pmu->max_counters;
 719
 720        if (bitmap_empty(tx2_pmu->active_counters, max_counters))
 721                return HRTIMER_NORESTART;
 722
 723        for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
 724                struct perf_event *event = tx2_pmu->events[idx];
 725
 726                tx2_uncore_event_update(event);
 727        }
 728        hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
 729        return HRTIMER_RESTART;
 730}
 731
 732static int tx2_uncore_pmu_register(
 733                struct tx2_uncore_pmu *tx2_pmu)
 734{
 735        struct device *dev = tx2_pmu->dev;
 736        char *name = tx2_pmu->name;
 737
 738        /* Perf event registration */
 739        tx2_pmu->pmu = (struct pmu) {
 740                .module         = THIS_MODULE,
 741                .attr_groups    = tx2_pmu->attr_groups,
 742                .task_ctx_nr    = perf_invalid_context,
 743                .event_init     = tx2_uncore_event_init,
 744                .add            = tx2_uncore_event_add,
 745                .del            = tx2_uncore_event_del,
 746                .start          = tx2_uncore_event_start,
 747                .stop           = tx2_uncore_event_stop,
 748                .read           = tx2_uncore_event_read,
 749        };
 750
 751        tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
 752                        "%s", name);
 753
 754        return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
 755}
 756
 757static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
 758{
 759        int ret, cpu;
 760
 761        cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
 762                        cpu_online_mask);
 763
 764        tx2_pmu->cpu = cpu;
 765
 766        if (tx2_pmu->hrtimer_callback) {
 767                hrtimer_init(&tx2_pmu->hrtimer,
 768                                CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 769                tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
 770        }
 771
 772        ret = tx2_uncore_pmu_register(tx2_pmu);
 773        if (ret) {
 774                dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
 775                                tx2_pmu->name);
 776                return -ENODEV;
 777        }
 778
 779        /* register hotplug callback for the pmu */
 780        ret = cpuhp_state_add_instance(
 781                        CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
 782                        &tx2_pmu->hpnode);
 783        if (ret) {
 784                dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
 785                return ret;
 786        }
 787
 788        /* Add to list */
 789        list_add(&tx2_pmu->entry, &tx2_pmus);
 790
 791        dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
 792                        tx2_pmu->pmu.name);
 793        return ret;
 794}
 795
 796static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
 797                acpi_handle handle, struct acpi_device *adev, u32 type)
 798{
 799        struct tx2_uncore_pmu *tx2_pmu;
 800        void __iomem *base;
 801        struct resource res;
 802        struct resource_entry *rentry;
 803        struct list_head list;
 804        int ret;
 805
 806        INIT_LIST_HEAD(&list);
 807        ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
 808        if (ret <= 0) {
 809                dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
 810                return NULL;
 811        }
 812
 813        list_for_each_entry(rentry, &list, node) {
 814                if (resource_type(rentry->res) == IORESOURCE_MEM) {
 815                        res = *rentry->res;
 816                        break;
 817                }
 818        }
 819
 820        if (!rentry->res)
 821                return NULL;
 822
 823        acpi_dev_free_resource_list(&list);
 824        base = devm_ioremap_resource(dev, &res);
 825        if (IS_ERR(base)) {
 826                dev_err(dev, "PMU type %d: Fail to map resource\n", type);
 827                return NULL;
 828        }
 829
 830        tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
 831        if (!tx2_pmu)
 832                return NULL;
 833
 834        tx2_pmu->dev = dev;
 835        tx2_pmu->type = type;
 836        tx2_pmu->base = base;
 837        tx2_pmu->node = dev_to_node(dev);
 838        INIT_LIST_HEAD(&tx2_pmu->entry);
 839
 840        switch (tx2_pmu->type) {
 841        case PMU_TYPE_L3C:
 842                tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
 843                tx2_pmu->counters_mask = 0x3;
 844                tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
 845                tx2_pmu->max_events = L3_EVENT_MAX;
 846                tx2_pmu->events_mask = 0x1f;
 847                tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
 848                tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
 849                tx2_pmu->attr_groups = l3c_pmu_attr_groups;
 850                tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
 851                                "uncore_l3c_%d", tx2_pmu->node);
 852                tx2_pmu->init_cntr_base = init_cntr_base_l3c;
 853                tx2_pmu->start_event = uncore_start_event_l3c;
 854                tx2_pmu->stop_event = uncore_stop_event_l3c;
 855                break;
 856        case PMU_TYPE_DMC:
 857                tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS;
 858                tx2_pmu->counters_mask = 0x3;
 859                tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
 860                tx2_pmu->max_events = DMC_EVENT_MAX;
 861                tx2_pmu->events_mask = 0x1f;
 862                tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
 863                tx2_pmu->hrtimer_callback = tx2_hrtimer_callback;
 864                tx2_pmu->attr_groups = dmc_pmu_attr_groups;
 865                tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
 866                                "uncore_dmc_%d", tx2_pmu->node);
 867                tx2_pmu->init_cntr_base = init_cntr_base_dmc;
 868                tx2_pmu->start_event = uncore_start_event_dmc;
 869                tx2_pmu->stop_event = uncore_stop_event_dmc;
 870                break;
 871        case PMU_TYPE_CCPI2:
 872                /* CCPI2 has 8 counters */
 873                tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS;
 874                tx2_pmu->counters_mask = 0x7;
 875                tx2_pmu->prorate_factor = 1;
 876                tx2_pmu->max_events = CCPI2_EVENT_MAX;
 877                tx2_pmu->events_mask = 0x1ff;
 878                tx2_pmu->attr_groups = ccpi2_pmu_attr_groups;
 879                tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
 880                                "uncore_ccpi2_%d", tx2_pmu->node);
 881                tx2_pmu->init_cntr_base = init_cntr_base_ccpi2;
 882                tx2_pmu->start_event = uncore_start_event_ccpi2;
 883                tx2_pmu->stop_event = uncore_stop_event_ccpi2;
 884                tx2_pmu->hrtimer_callback = NULL;
 885                break;
 886        case PMU_TYPE_INVALID:
 887                devm_kfree(dev, tx2_pmu);
 888                return NULL;
 889        }
 890
 891        return tx2_pmu;
 892}
 893
 894static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
 895                                    void *data, void **return_value)
 896{
 897        struct tx2_uncore_pmu *tx2_pmu;
 898        struct acpi_device *adev;
 899        enum tx2_uncore_type type;
 900
 901        if (acpi_bus_get_device(handle, &adev))
 902                return AE_OK;
 903        if (acpi_bus_get_status(adev) || !adev->status.present)
 904                return AE_OK;
 905
 906        type = get_tx2_pmu_type(adev);
 907        if (type == PMU_TYPE_INVALID)
 908                return AE_OK;
 909
 910        tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
 911                        handle, adev, type);
 912
 913        if (!tx2_pmu)
 914                return AE_ERROR;
 915
 916        if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
 917                /* Can't add the PMU device, abort */
 918                return AE_ERROR;
 919        }
 920        return AE_OK;
 921}
 922
 923static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
 924                struct hlist_node *hpnode)
 925{
 926        struct tx2_uncore_pmu *tx2_pmu;
 927
 928        tx2_pmu = hlist_entry_safe(hpnode,
 929                        struct tx2_uncore_pmu, hpnode);
 930
 931        /* Pick this CPU, If there is no CPU/PMU association and both are
 932         * from same node.
 933         */
 934        if ((tx2_pmu->cpu >= nr_cpu_ids) &&
 935                (tx2_pmu->node == cpu_to_node(cpu)))
 936                tx2_pmu->cpu = cpu;
 937
 938        return 0;
 939}
 940
 941static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
 942                struct hlist_node *hpnode)
 943{
 944        int new_cpu;
 945        struct tx2_uncore_pmu *tx2_pmu;
 946        struct cpumask cpu_online_mask_temp;
 947
 948        tx2_pmu = hlist_entry_safe(hpnode,
 949                        struct tx2_uncore_pmu, hpnode);
 950
 951        if (cpu != tx2_pmu->cpu)
 952                return 0;
 953
 954        if (tx2_pmu->hrtimer_callback)
 955                hrtimer_cancel(&tx2_pmu->hrtimer);
 956
 957        cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
 958        cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
 959        new_cpu = cpumask_any_and(
 960                        cpumask_of_node(tx2_pmu->node),
 961                        &cpu_online_mask_temp);
 962
 963        tx2_pmu->cpu = new_cpu;
 964        if (new_cpu >= nr_cpu_ids)
 965                return 0;
 966        perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
 967
 968        return 0;
 969}
 970
 971static const struct acpi_device_id tx2_uncore_acpi_match[] = {
 972        {"CAV901C", 0},
 973        {},
 974};
 975MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
 976
 977static int tx2_uncore_probe(struct platform_device *pdev)
 978{
 979        struct device *dev = &pdev->dev;
 980        acpi_handle handle;
 981        acpi_status status;
 982
 983        set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
 984
 985        if (!has_acpi_companion(dev))
 986                return -ENODEV;
 987
 988        handle = ACPI_HANDLE(dev);
 989        if (!handle)
 990                return -EINVAL;
 991
 992        /* Walk through the tree for all PMU UNCORE devices */
 993        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
 994                                     tx2_uncore_pmu_add,
 995                                     NULL, dev, NULL);
 996        if (ACPI_FAILURE(status)) {
 997                dev_err(dev, "failed to probe PMU devices\n");
 998                return_ACPI_STATUS(status);
 999        }
1000
1001        dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
1002        return 0;
1003}
1004
1005static int tx2_uncore_remove(struct platform_device *pdev)
1006{
1007        struct tx2_uncore_pmu *tx2_pmu, *temp;
1008        struct device *dev = &pdev->dev;
1009
1010        if (!list_empty(&tx2_pmus)) {
1011                list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
1012                        if (tx2_pmu->node == dev_to_node(dev)) {
1013                                cpuhp_state_remove_instance_nocalls(
1014                                        CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
1015                                        &tx2_pmu->hpnode);
1016                                perf_pmu_unregister(&tx2_pmu->pmu);
1017                                list_del(&tx2_pmu->entry);
1018                        }
1019                }
1020        }
1021        return 0;
1022}
1023
1024static struct platform_driver tx2_uncore_driver = {
1025        .driver = {
1026                .name           = "tx2-uncore-pmu",
1027                .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
1028        },
1029        .probe = tx2_uncore_probe,
1030        .remove = tx2_uncore_remove,
1031};
1032
1033static int __init tx2_uncore_driver_init(void)
1034{
1035        int ret;
1036
1037        ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
1038                                      "perf/tx2/uncore:online",
1039                                      tx2_uncore_pmu_online_cpu,
1040                                      tx2_uncore_pmu_offline_cpu);
1041        if (ret) {
1042                pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
1043                return ret;
1044        }
1045        ret = platform_driver_register(&tx2_uncore_driver);
1046        if (ret)
1047                cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
1048
1049        return ret;
1050}
1051module_init(tx2_uncore_driver_init);
1052
1053static void __exit tx2_uncore_driver_exit(void)
1054{
1055        platform_driver_unregister(&tx2_uncore_driver);
1056        cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
1057}
1058module_exit(tx2_uncore_driver_exit);
1059
1060MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
1061MODULE_LICENSE("GPL v2");
1062MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");
1063