linux/drivers/perf/xgene_pmu.c
<<
>>
Prefs
   1/*
   2 * APM X-Gene SoC PMU (Performance Monitor Unit)
   3 *
   4 * Copyright (c) 2016, Applied Micro Circuits Corporation
   5 * Author: Hoan Tran <hotran@apm.com>
   6 *         Tai Nguyen <ttnguyen@apm.com>
   7 *
   8 * This program is free software; you can redistribute  it and/or modify it
   9 * under  the terms of  the GNU General  Public License as published by the
  10 * Free Software Foundation;  either version 2 of the  License, or (at your
  11 * option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  20 */
  21
  22#include <linux/acpi.h>
  23#include <linux/clk.h>
  24#include <linux/cpumask.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/mfd/syscon.h>
  28#include <linux/module.h>
  29#include <linux/of_address.h>
  30#include <linux/of_fdt.h>
  31#include <linux/of_irq.h>
  32#include <linux/of_platform.h>
  33#include <linux/perf_event.h>
  34#include <linux/platform_device.h>
  35#include <linux/regmap.h>
  36#include <linux/slab.h>
  37
  38#define CSW_CSWCR                       0x0000
  39#define  CSW_CSWCR_DUALMCB_MASK         BIT(0)
  40#define MCBADDRMR                       0x0000
  41#define  MCBADDRMR_DUALMCU_MODE_MASK    BIT(2)
  42
  43#define PCPPMU_INTSTATUS_REG    0x000
  44#define PCPPMU_INTMASK_REG      0x004
  45#define  PCPPMU_INTMASK         0x0000000F
  46#define  PCPPMU_INTENMASK       0xFFFFFFFF
  47#define  PCPPMU_INTCLRMASK      0xFFFFFFF0
  48#define  PCPPMU_INT_MCU         BIT(0)
  49#define  PCPPMU_INT_MCB         BIT(1)
  50#define  PCPPMU_INT_L3C         BIT(2)
  51#define  PCPPMU_INT_IOB         BIT(3)
  52
  53#define PMU_MAX_COUNTERS        4
  54#define PMU_CNT_MAX_PERIOD      0x100000000ULL
  55#define PMU_OVERFLOW_MASK       0xF
  56#define PMU_PMCR_E              BIT(0)
  57#define PMU_PMCR_P              BIT(1)
  58
  59#define PMU_PMEVCNTR0           0x000
  60#define PMU_PMEVCNTR1           0x004
  61#define PMU_PMEVCNTR2           0x008
  62#define PMU_PMEVCNTR3           0x00C
  63#define PMU_PMEVTYPER0          0x400
  64#define PMU_PMEVTYPER1          0x404
  65#define PMU_PMEVTYPER2          0x408
  66#define PMU_PMEVTYPER3          0x40C
  67#define PMU_PMAMR0              0xA00
  68#define PMU_PMAMR1              0xA04
  69#define PMU_PMCNTENSET          0xC00
  70#define PMU_PMCNTENCLR          0xC20
  71#define PMU_PMINTENSET          0xC40
  72#define PMU_PMINTENCLR          0xC60
  73#define PMU_PMOVSR              0xC80
  74#define PMU_PMCR                0xE04
  75
  76#define to_pmu_dev(p)     container_of(p, struct xgene_pmu_dev, pmu)
  77#define GET_CNTR(ev)      (ev->hw.idx)
  78#define GET_EVENTID(ev)   (ev->hw.config & 0xFFULL)
  79#define GET_AGENTID(ev)   (ev->hw.config_base & 0xFFFFFFFFUL)
  80#define GET_AGENT1ID(ev)  ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
  81
  82struct hw_pmu_info {
  83        u32 type;
  84        u32 enable_mask;
  85        void __iomem *csr;
  86};
  87
  88struct xgene_pmu_dev {
  89        struct hw_pmu_info *inf;
  90        struct xgene_pmu *parent;
  91        struct pmu pmu;
  92        u8 max_counters;
  93        DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
  94        u64 max_period;
  95        const struct attribute_group **attr_groups;
  96        struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
  97};
  98
  99struct xgene_pmu {
 100        struct device *dev;
 101        int version;
 102        void __iomem *pcppmu_csr;
 103        u32 mcb_active_mask;
 104        u32 mc_active_mask;
 105        cpumask_t cpu;
 106        raw_spinlock_t lock;
 107        struct list_head l3cpmus;
 108        struct list_head iobpmus;
 109        struct list_head mcbpmus;
 110        struct list_head mcpmus;
 111};
 112
 113struct xgene_pmu_dev_ctx {
 114        char *name;
 115        struct list_head next;
 116        struct xgene_pmu_dev *pmu_dev;
 117        struct hw_pmu_info inf;
 118};
 119
 120struct xgene_pmu_data {
 121        int id;
 122        u32 data;
 123};
 124
 125enum xgene_pmu_version {
 126        PCP_PMU_V1 = 1,
 127        PCP_PMU_V2,
 128};
 129
 130enum xgene_pmu_dev_type {
 131        PMU_TYPE_L3C = 0,
 132        PMU_TYPE_IOB,
 133        PMU_TYPE_MCB,
 134        PMU_TYPE_MC,
 135};
 136
 137/*
 138 * sysfs format attributes
 139 */
 140static ssize_t xgene_pmu_format_show(struct device *dev,
 141                                     struct device_attribute *attr, char *buf)
 142{
 143        struct dev_ext_attribute *eattr;
 144
 145        eattr = container_of(attr, struct dev_ext_attribute, attr);
 146        return sprintf(buf, "%s\n", (char *) eattr->var);
 147}
 148
 149#define XGENE_PMU_FORMAT_ATTR(_name, _config)           \
 150        (&((struct dev_ext_attribute[]) {               \
 151                { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
 152                  .var = (void *) _config, }            \
 153        })[0].attr.attr)
 154
 155static struct attribute *l3c_pmu_format_attrs[] = {
 156        XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
 157        XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
 158        NULL,
 159};
 160
 161static struct attribute *iob_pmu_format_attrs[] = {
 162        XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
 163        XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
 164        NULL,
 165};
 166
 167static struct attribute *mcb_pmu_format_attrs[] = {
 168        XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
 169        XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
 170        NULL,
 171};
 172
 173static struct attribute *mc_pmu_format_attrs[] = {
 174        XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
 175        NULL,
 176};
 177
 178static const struct attribute_group l3c_pmu_format_attr_group = {
 179        .name = "format",
 180        .attrs = l3c_pmu_format_attrs,
 181};
 182
 183static const struct attribute_group iob_pmu_format_attr_group = {
 184        .name = "format",
 185        .attrs = iob_pmu_format_attrs,
 186};
 187
 188static const struct attribute_group mcb_pmu_format_attr_group = {
 189        .name = "format",
 190        .attrs = mcb_pmu_format_attrs,
 191};
 192
 193static const struct attribute_group mc_pmu_format_attr_group = {
 194        .name = "format",
 195        .attrs = mc_pmu_format_attrs,
 196};
 197
 198/*
 199 * sysfs event attributes
 200 */
 201static ssize_t xgene_pmu_event_show(struct device *dev,
 202                                    struct device_attribute *attr, char *buf)
 203{
 204        struct dev_ext_attribute *eattr;
 205
 206        eattr = container_of(attr, struct dev_ext_attribute, attr);
 207        return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
 208}
 209
 210#define XGENE_PMU_EVENT_ATTR(_name, _config)            \
 211        (&((struct dev_ext_attribute[]) {               \
 212                { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
 213                  .var = (void *) _config, }            \
 214         })[0].attr.attr)
 215
 216static struct attribute *l3c_pmu_events_attrs[] = {
 217        XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
 218        XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
 219        XGENE_PMU_EVENT_ATTR(read-hit,                          0x02),
 220        XGENE_PMU_EVENT_ATTR(read-miss,                         0x03),
 221        XGENE_PMU_EVENT_ATTR(write-need-replacement,            0x06),
 222        XGENE_PMU_EVENT_ATTR(write-not-need-replacement,        0x07),
 223        XGENE_PMU_EVENT_ATTR(tq-full,                           0x08),
 224        XGENE_PMU_EVENT_ATTR(ackq-full,                         0x09),
 225        XGENE_PMU_EVENT_ATTR(wdb-full,                          0x0a),
 226        XGENE_PMU_EVENT_ATTR(bank-fifo-full,                    0x0b),
 227        XGENE_PMU_EVENT_ATTR(odb-full,                          0x0c),
 228        XGENE_PMU_EVENT_ATTR(wbq-full,                          0x0d),
 229        XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue,          0x0e),
 230        XGENE_PMU_EVENT_ATTR(bank-fifo-issue,                   0x0f),
 231        NULL,
 232};
 233
 234static struct attribute *iob_pmu_events_attrs[] = {
 235        XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
 236        XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
 237        XGENE_PMU_EVENT_ATTR(axi0-read,                         0x02),
 238        XGENE_PMU_EVENT_ATTR(axi0-read-partial,                 0x03),
 239        XGENE_PMU_EVENT_ATTR(axi1-read,                         0x04),
 240        XGENE_PMU_EVENT_ATTR(axi1-read-partial,                 0x05),
 241        XGENE_PMU_EVENT_ATTR(csw-read-block,                    0x06),
 242        XGENE_PMU_EVENT_ATTR(csw-read-partial,                  0x07),
 243        XGENE_PMU_EVENT_ATTR(axi0-write,                        0x10),
 244        XGENE_PMU_EVENT_ATTR(axi0-write-partial,                0x11),
 245        XGENE_PMU_EVENT_ATTR(axi1-write,                        0x13),
 246        XGENE_PMU_EVENT_ATTR(axi1-write-partial,                0x14),
 247        XGENE_PMU_EVENT_ATTR(csw-inbound-dirty,                 0x16),
 248        NULL,
 249};
 250
 251static struct attribute *mcb_pmu_events_attrs[] = {
 252        XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
 253        XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
 254        XGENE_PMU_EVENT_ATTR(csw-read,                          0x02),
 255        XGENE_PMU_EVENT_ATTR(csw-write-request,                 0x03),
 256        XGENE_PMU_EVENT_ATTR(mcb-csw-stall,                     0x04),
 257        XGENE_PMU_EVENT_ATTR(cancel-read-gack,                  0x05),
 258        NULL,
 259};
 260
 261static struct attribute *mc_pmu_events_attrs[] = {
 262        XGENE_PMU_EVENT_ATTR(cycle-count,                       0x00),
 263        XGENE_PMU_EVENT_ATTR(cycle-count-div-64,                0x01),
 264        XGENE_PMU_EVENT_ATTR(act-cmd-sent,                      0x02),
 265        XGENE_PMU_EVENT_ATTR(pre-cmd-sent,                      0x03),
 266        XGENE_PMU_EVENT_ATTR(rd-cmd-sent,                       0x04),
 267        XGENE_PMU_EVENT_ATTR(rda-cmd-sent,                      0x05),
 268        XGENE_PMU_EVENT_ATTR(wr-cmd-sent,                       0x06),
 269        XGENE_PMU_EVENT_ATTR(wra-cmd-sent,                      0x07),
 270        XGENE_PMU_EVENT_ATTR(pde-cmd-sent,                      0x08),
 271        XGENE_PMU_EVENT_ATTR(sre-cmd-sent,                      0x09),
 272        XGENE_PMU_EVENT_ATTR(prea-cmd-sent,                     0x0a),
 273        XGENE_PMU_EVENT_ATTR(ref-cmd-sent,                      0x0b),
 274        XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent,                   0x0c),
 275        XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent,                   0x0d),
 276        XGENE_PMU_EVENT_ATTR(in-rd-collision,                   0x0e),
 277        XGENE_PMU_EVENT_ATTR(in-wr-collision,                   0x0f),
 278        XGENE_PMU_EVENT_ATTR(collision-queue-not-empty,         0x10),
 279        XGENE_PMU_EVENT_ATTR(collision-queue-full,              0x11),
 280        XGENE_PMU_EVENT_ATTR(mcu-request,                       0x12),
 281        XGENE_PMU_EVENT_ATTR(mcu-rd-request,                    0x13),
 282        XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request,                 0x14),
 283        XGENE_PMU_EVENT_ATTR(mcu-wr-request,                    0x15),
 284        XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all,                0x16),
 285        XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel,             0x17),
 286        XGENE_PMU_EVENT_ATTR(mcu-rd-response,                   0x18),
 287        XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all,    0x19),
 288        XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
 289        XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all,                0x1b),
 290        XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel,             0x1c),
 291        NULL,
 292};
 293
 294static const struct attribute_group l3c_pmu_events_attr_group = {
 295        .name = "events",
 296        .attrs = l3c_pmu_events_attrs,
 297};
 298
 299static const struct attribute_group iob_pmu_events_attr_group = {
 300        .name = "events",
 301        .attrs = iob_pmu_events_attrs,
 302};
 303
 304static const struct attribute_group mcb_pmu_events_attr_group = {
 305        .name = "events",
 306        .attrs = mcb_pmu_events_attrs,
 307};
 308
 309static const struct attribute_group mc_pmu_events_attr_group = {
 310        .name = "events",
 311        .attrs = mc_pmu_events_attrs,
 312};
 313
 314/*
 315 * sysfs cpumask attributes
 316 */
 317static ssize_t xgene_pmu_cpumask_show(struct device *dev,
 318                                      struct device_attribute *attr, char *buf)
 319{
 320        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
 321
 322        return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
 323}
 324
 325static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
 326
 327static struct attribute *xgene_pmu_cpumask_attrs[] = {
 328        &dev_attr_cpumask.attr,
 329        NULL,
 330};
 331
 332static const struct attribute_group pmu_cpumask_attr_group = {
 333        .attrs = xgene_pmu_cpumask_attrs,
 334};
 335
 336/*
 337 * Per PMU device attribute groups
 338 */
 339static const struct attribute_group *l3c_pmu_attr_groups[] = {
 340        &l3c_pmu_format_attr_group,
 341        &pmu_cpumask_attr_group,
 342        &l3c_pmu_events_attr_group,
 343        NULL
 344};
 345
 346static const struct attribute_group *iob_pmu_attr_groups[] = {
 347        &iob_pmu_format_attr_group,
 348        &pmu_cpumask_attr_group,
 349        &iob_pmu_events_attr_group,
 350        NULL
 351};
 352
 353static const struct attribute_group *mcb_pmu_attr_groups[] = {
 354        &mcb_pmu_format_attr_group,
 355        &pmu_cpumask_attr_group,
 356        &mcb_pmu_events_attr_group,
 357        NULL
 358};
 359
 360static const struct attribute_group *mc_pmu_attr_groups[] = {
 361        &mc_pmu_format_attr_group,
 362        &pmu_cpumask_attr_group,
 363        &mc_pmu_events_attr_group,
 364        NULL
 365};
 366
 367static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
 368{
 369        int cntr;
 370
 371        cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
 372                                pmu_dev->max_counters);
 373        if (cntr == pmu_dev->max_counters)
 374                return -ENOSPC;
 375        set_bit(cntr, pmu_dev->cntr_assign_mask);
 376
 377        return cntr;
 378}
 379
 380static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
 381{
 382        clear_bit(cntr, pmu_dev->cntr_assign_mask);
 383}
 384
 385static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
 386{
 387        writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
 388}
 389
 390static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
 391{
 392        writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
 393}
 394
 395static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx)
 396{
 397        return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
 398}
 399
 400static inline void
 401xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
 402{
 403        writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
 404}
 405
 406static inline void
 407xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
 408{
 409        writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
 410}
 411
 412static inline void
 413xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
 414{
 415        writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
 416}
 417
 418static inline void
 419xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
 420{
 421        writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
 422}
 423
 424static inline void
 425xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
 426{
 427        u32 val;
 428
 429        val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
 430        val |= 1 << idx;
 431        writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
 432}
 433
 434static inline void
 435xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
 436{
 437        u32 val;
 438
 439        val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
 440        val |= 1 << idx;
 441        writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
 442}
 443
 444static inline void
 445xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
 446{
 447        u32 val;
 448
 449        val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
 450        val |= 1 << idx;
 451        writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
 452}
 453
 454static inline void
 455xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
 456{
 457        u32 val;
 458
 459        val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
 460        val |= 1 << idx;
 461        writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
 462}
 463
 464static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
 465{
 466        u32 val;
 467
 468        val = readl(pmu_dev->inf->csr + PMU_PMCR);
 469        val |= PMU_PMCR_P;
 470        writel(val, pmu_dev->inf->csr + PMU_PMCR);
 471}
 472
 473static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
 474{
 475        u32 val;
 476
 477        val = readl(pmu_dev->inf->csr + PMU_PMCR);
 478        val |= PMU_PMCR_E;
 479        writel(val, pmu_dev->inf->csr + PMU_PMCR);
 480}
 481
 482static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
 483{
 484        u32 val;
 485
 486        val = readl(pmu_dev->inf->csr + PMU_PMCR);
 487        val &= ~PMU_PMCR_E;
 488        writel(val, pmu_dev->inf->csr + PMU_PMCR);
 489}
 490
 491static void xgene_perf_pmu_enable(struct pmu *pmu)
 492{
 493        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
 494        int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
 495                        pmu_dev->max_counters);
 496
 497        if (!enabled)
 498                return;
 499
 500        xgene_pmu_start_counters(pmu_dev);
 501}
 502
 503static void xgene_perf_pmu_disable(struct pmu *pmu)
 504{
 505        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
 506
 507        xgene_pmu_stop_counters(pmu_dev);
 508}
 509
 510static int xgene_perf_event_init(struct perf_event *event)
 511{
 512        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 513        struct hw_perf_event *hw = &event->hw;
 514        struct perf_event *sibling;
 515
 516        /* Test the event attr type check for PMU enumeration */
 517        if (event->attr.type != event->pmu->type)
 518                return -ENOENT;
 519
 520        /*
 521         * SOC PMU counters are shared across all cores.
 522         * Therefore, it does not support per-process mode.
 523         * Also, it does not support event sampling mode.
 524         */
 525        if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
 526                return -EINVAL;
 527
 528        /* SOC counters do not have usr/os/guest/host bits */
 529        if (event->attr.exclude_user || event->attr.exclude_kernel ||
 530            event->attr.exclude_host || event->attr.exclude_guest)
 531                return -EINVAL;
 532
 533        if (event->cpu < 0)
 534                return -EINVAL;
 535        /*
 536         * Many perf core operations (eg. events rotation) operate on a
 537         * single CPU context. This is obvious for CPU PMUs, where one
 538         * expects the same sets of events being observed on all CPUs,
 539         * but can lead to issues for off-core PMUs, where each
 540         * event could be theoretically assigned to a different CPU. To
 541         * mitigate this, we enforce CPU assignment to one, selected
 542         * processor (the one described in the "cpumask" attribute).
 543         */
 544        event->cpu = cpumask_first(&pmu_dev->parent->cpu);
 545
 546        hw->config = event->attr.config;
 547        /*
 548         * Each bit of the config1 field represents an agent from which the
 549         * request of the event come. The event is counted only if it's caused
 550         * by a request of an agent has the bit cleared.
 551         * By default, the event is counted for all agents.
 552         */
 553        hw->config_base = event->attr.config1;
 554
 555        /*
 556         * We must NOT create groups containing mixed PMUs, although software
 557         * events are acceptable
 558         */
 559        if (event->group_leader->pmu != event->pmu &&
 560                        !is_software_event(event->group_leader))
 561                return -EINVAL;
 562
 563        list_for_each_entry(sibling, &event->group_leader->sibling_list,
 564                        group_entry)
 565                if (sibling->pmu != event->pmu &&
 566                                !is_software_event(sibling))
 567                        return -EINVAL;
 568
 569        return 0;
 570}
 571
 572static void xgene_perf_enable_event(struct perf_event *event)
 573{
 574        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 575
 576        xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event));
 577        xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
 578        if (pmu_dev->inf->type == PMU_TYPE_IOB)
 579                xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event)));
 580
 581        xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event));
 582        xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event));
 583}
 584
 585static void xgene_perf_disable_event(struct perf_event *event)
 586{
 587        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 588
 589        xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event));
 590        xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event));
 591}
 592
 593static void xgene_perf_event_set_period(struct perf_event *event)
 594{
 595        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 596        struct hw_perf_event *hw = &event->hw;
 597        /*
 598         * The X-Gene PMU counters have a period of 2^32. To account for the
 599         * possiblity of extreme interrupt latency we program for a period of
 600         * half that. Hopefully we can handle the interrupt before another 2^31
 601         * events occur and the counter overtakes its previous value.
 602         */
 603        u64 val = 1ULL << 31;
 604
 605        local64_set(&hw->prev_count, val);
 606        xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val);
 607}
 608
 609static void xgene_perf_event_update(struct perf_event *event)
 610{
 611        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 612        struct hw_perf_event *hw = &event->hw;
 613        u64 delta, prev_raw_count, new_raw_count;
 614
 615again:
 616        prev_raw_count = local64_read(&hw->prev_count);
 617        new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event));
 618
 619        if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
 620                            new_raw_count) != prev_raw_count)
 621                goto again;
 622
 623        delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
 624
 625        local64_add(delta, &event->count);
 626}
 627
 628static void xgene_perf_read(struct perf_event *event)
 629{
 630        xgene_perf_event_update(event);
 631}
 632
 633static void xgene_perf_start(struct perf_event *event, int flags)
 634{
 635        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 636        struct hw_perf_event *hw = &event->hw;
 637
 638        if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
 639                return;
 640
 641        WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
 642        hw->state = 0;
 643
 644        xgene_perf_event_set_period(event);
 645
 646        if (flags & PERF_EF_RELOAD) {
 647                u64 prev_raw_count =  local64_read(&hw->prev_count);
 648
 649                xgene_pmu_write_counter(pmu_dev, GET_CNTR(event),
 650                                        (u32) prev_raw_count);
 651        }
 652
 653        xgene_perf_enable_event(event);
 654        perf_event_update_userpage(event);
 655}
 656
 657static void xgene_perf_stop(struct perf_event *event, int flags)
 658{
 659        struct hw_perf_event *hw = &event->hw;
 660        u64 config;
 661
 662        if (hw->state & PERF_HES_UPTODATE)
 663                return;
 664
 665        xgene_perf_disable_event(event);
 666        WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
 667        hw->state |= PERF_HES_STOPPED;
 668
 669        if (hw->state & PERF_HES_UPTODATE)
 670                return;
 671
 672        config = hw->config;
 673        xgene_perf_read(event);
 674        hw->state |= PERF_HES_UPTODATE;
 675}
 676
 677static int xgene_perf_add(struct perf_event *event, int flags)
 678{
 679        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 680        struct hw_perf_event *hw = &event->hw;
 681
 682        hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
 683
 684        /* Allocate an event counter */
 685        hw->idx = get_next_avail_cntr(pmu_dev);
 686        if (hw->idx < 0)
 687                return -EAGAIN;
 688
 689        /* Update counter event pointer for Interrupt handler */
 690        pmu_dev->pmu_counter_event[hw->idx] = event;
 691
 692        if (flags & PERF_EF_START)
 693                xgene_perf_start(event, PERF_EF_RELOAD);
 694
 695        return 0;
 696}
 697
 698static void xgene_perf_del(struct perf_event *event, int flags)
 699{
 700        struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
 701        struct hw_perf_event *hw = &event->hw;
 702
 703        xgene_perf_stop(event, PERF_EF_UPDATE);
 704
 705        /* clear the assigned counter */
 706        clear_avail_cntr(pmu_dev, GET_CNTR(event));
 707
 708        perf_event_update_userpage(event);
 709        pmu_dev->pmu_counter_event[hw->idx] = NULL;
 710}
 711
 712static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
 713{
 714        struct xgene_pmu *xgene_pmu;
 715
 716        pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1;
 717        /* First version PMU supports only single event counter */
 718        xgene_pmu = pmu_dev->parent;
 719        if (xgene_pmu->version == PCP_PMU_V1)
 720                pmu_dev->max_counters = 1;
 721        else
 722                pmu_dev->max_counters = PMU_MAX_COUNTERS;
 723
 724        /* Perf driver registration */
 725        pmu_dev->pmu = (struct pmu) {
 726                .attr_groups    = pmu_dev->attr_groups,
 727                .task_ctx_nr    = perf_invalid_context,
 728                .pmu_enable     = xgene_perf_pmu_enable,
 729                .pmu_disable    = xgene_perf_pmu_disable,
 730                .event_init     = xgene_perf_event_init,
 731                .add            = xgene_perf_add,
 732                .del            = xgene_perf_del,
 733                .start          = xgene_perf_start,
 734                .stop           = xgene_perf_stop,
 735                .read           = xgene_perf_read,
 736        };
 737
 738        /* Hardware counter init */
 739        xgene_pmu_stop_counters(pmu_dev);
 740        xgene_pmu_reset_counters(pmu_dev);
 741
 742        return perf_pmu_register(&pmu_dev->pmu, name, -1);
 743}
 744
 745static int
 746xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
 747{
 748        struct device *dev = xgene_pmu->dev;
 749        struct xgene_pmu_dev *pmu;
 750        int rc;
 751
 752        pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
 753        if (!pmu)
 754                return -ENOMEM;
 755        pmu->parent = xgene_pmu;
 756        pmu->inf = &ctx->inf;
 757        ctx->pmu_dev = pmu;
 758
 759        switch (pmu->inf->type) {
 760        case PMU_TYPE_L3C:
 761                pmu->attr_groups = l3c_pmu_attr_groups;
 762                break;
 763        case PMU_TYPE_IOB:
 764                pmu->attr_groups = iob_pmu_attr_groups;
 765                break;
 766        case PMU_TYPE_MCB:
 767                if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
 768                        goto dev_err;
 769                pmu->attr_groups = mcb_pmu_attr_groups;
 770                break;
 771        case PMU_TYPE_MC:
 772                if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
 773                        goto dev_err;
 774                pmu->attr_groups = mc_pmu_attr_groups;
 775                break;
 776        default:
 777                return -EINVAL;
 778        }
 779
 780        rc = xgene_init_perf(pmu, ctx->name);
 781        if (rc) {
 782                dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
 783                goto dev_err;
 784        }
 785
 786        dev_info(dev, "%s PMU registered\n", ctx->name);
 787
 788        return rc;
 789
 790dev_err:
 791        devm_kfree(dev, pmu);
 792        return -ENODEV;
 793}
 794
 795static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
 796{
 797        struct xgene_pmu *xgene_pmu = pmu_dev->parent;
 798        u32 pmovsr;
 799        int idx;
 800
 801        pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
 802        if (!pmovsr)
 803                return;
 804
 805        /* Clear interrupt flag */
 806        if (xgene_pmu->version == PCP_PMU_V1)
 807                writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR);
 808        else
 809                writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR);
 810
 811        for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
 812                struct perf_event *event = pmu_dev->pmu_counter_event[idx];
 813                int overflowed = pmovsr & BIT(idx);
 814
 815                /* Ignore if we don't have an event. */
 816                if (!event || !overflowed)
 817                        continue;
 818                xgene_perf_event_update(event);
 819                xgene_perf_event_set_period(event);
 820        }
 821}
 822
 823static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
 824{
 825        struct xgene_pmu_dev_ctx *ctx;
 826        struct xgene_pmu *xgene_pmu = dev_id;
 827        unsigned long flags;
 828        u32 val;
 829
 830        raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
 831
 832        /* Get Interrupt PMU source */
 833        val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
 834        if (val & PCPPMU_INT_MCU) {
 835                list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
 836                        _xgene_pmu_isr(irq, ctx->pmu_dev);
 837                }
 838        }
 839        if (val & PCPPMU_INT_MCB) {
 840                list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
 841                        _xgene_pmu_isr(irq, ctx->pmu_dev);
 842                }
 843        }
 844        if (val & PCPPMU_INT_L3C) {
 845                list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
 846                        _xgene_pmu_isr(irq, ctx->pmu_dev);
 847                }
 848        }
 849        if (val & PCPPMU_INT_IOB) {
 850                list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
 851                        _xgene_pmu_isr(irq, ctx->pmu_dev);
 852                }
 853        }
 854
 855        raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
 856
 857        return IRQ_HANDLED;
 858}
 859
 860static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
 861                                         struct platform_device *pdev)
 862{
 863        void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
 864        struct resource *res;
 865        unsigned int reg;
 866
 867        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 868        csw_csr = devm_ioremap_resource(&pdev->dev, res);
 869        if (IS_ERR(csw_csr)) {
 870                dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
 871                return PTR_ERR(csw_csr);
 872        }
 873
 874        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
 875        mcba_csr = devm_ioremap_resource(&pdev->dev, res);
 876        if (IS_ERR(mcba_csr)) {
 877                dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
 878                return PTR_ERR(mcba_csr);
 879        }
 880
 881        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
 882        mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
 883        if (IS_ERR(mcbb_csr)) {
 884                dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
 885                return PTR_ERR(mcbb_csr);
 886        }
 887
 888        reg = readl(csw_csr + CSW_CSWCR);
 889        if (reg & CSW_CSWCR_DUALMCB_MASK) {
 890                /* Dual MCB active */
 891                xgene_pmu->mcb_active_mask = 0x3;
 892                /* Probe all active MC(s) */
 893                reg = readl(mcbb_csr + CSW_CSWCR);
 894                xgene_pmu->mc_active_mask =
 895                        (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
 896        } else {
 897                /* Single MCB active */
 898                xgene_pmu->mcb_active_mask = 0x1;
 899                /* Probe all active MC(s) */
 900                reg = readl(mcba_csr + CSW_CSWCR);
 901                xgene_pmu->mc_active_mask =
 902                        (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
 903        }
 904
 905        return 0;
 906}
 907
 908static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
 909                                        struct platform_device *pdev)
 910{
 911        struct regmap *csw_map, *mcba_map, *mcbb_map;
 912        struct device_node *np = pdev->dev.of_node;
 913        unsigned int reg;
 914
 915        csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
 916        if (IS_ERR(csw_map)) {
 917                dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
 918                return PTR_ERR(csw_map);
 919        }
 920
 921        mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
 922        if (IS_ERR(mcba_map)) {
 923                dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
 924                return PTR_ERR(mcba_map);
 925        }
 926
 927        mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
 928        if (IS_ERR(mcbb_map)) {
 929                dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
 930                return PTR_ERR(mcbb_map);
 931        }
 932
 933        if (regmap_read(csw_map, CSW_CSWCR, &reg))
 934                return -EINVAL;
 935
 936        if (reg & CSW_CSWCR_DUALMCB_MASK) {
 937                /* Dual MCB active */
 938                xgene_pmu->mcb_active_mask = 0x3;
 939                /* Probe all active MC(s) */
 940                if (regmap_read(mcbb_map, MCBADDRMR, &reg))
 941                        return 0;
 942                xgene_pmu->mc_active_mask =
 943                        (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
 944        } else {
 945                /* Single MCB active */
 946                xgene_pmu->mcb_active_mask = 0x1;
 947                /* Probe all active MC(s) */
 948                if (regmap_read(mcba_map, MCBADDRMR, &reg))
 949                        return 0;
 950                xgene_pmu->mc_active_mask =
 951                        (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
 952        }
 953
 954        return 0;
 955}
 956
 957static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
 958                                          struct platform_device *pdev)
 959{
 960        if (has_acpi_companion(&pdev->dev))
 961                return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
 962        return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
 963}
 964
 965static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
 966{
 967        switch (type) {
 968        case PMU_TYPE_L3C:
 969                return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
 970        case PMU_TYPE_IOB:
 971                return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
 972        case PMU_TYPE_MCB:
 973                return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
 974        case PMU_TYPE_MC:
 975                return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
 976        default:
 977                return devm_kasprintf(dev, GFP_KERNEL, "unknown");
 978        }
 979}
 980
 981#if defined(CONFIG_ACPI)
 982static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
 983{
 984        struct resource *res = data;
 985
 986        if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
 987                acpi_dev_resource_memory(ares, res);
 988
 989        /* Always tell the ACPI core to skip this resource */
 990        return 1;
 991}
 992
 993static struct
 994xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
 995                                       struct acpi_device *adev, u32 type)
 996{
 997        struct device *dev = xgene_pmu->dev;
 998        struct list_head resource_list;
 999        struct xgene_pmu_dev_ctx *ctx;
1000        const union acpi_object *obj;
1001        struct hw_pmu_info *inf;
1002        void __iomem *dev_csr;
1003        struct resource res;
1004        int enable_bit;
1005        int rc;
1006
1007        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1008        if (!ctx)
1009                return NULL;
1010
1011        INIT_LIST_HEAD(&resource_list);
1012        rc = acpi_dev_get_resources(adev, &resource_list,
1013                                    acpi_pmu_dev_add_resource, &res);
1014        acpi_dev_free_resource_list(&resource_list);
1015        if (rc < 0) {
1016                dev_err(dev, "PMU type %d: No resource address found\n", type);
1017                goto err;
1018        }
1019
1020        dev_csr = devm_ioremap_resource(dev, &res);
1021        if (IS_ERR(dev_csr)) {
1022                dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1023                goto err;
1024        }
1025
1026        /* A PMU device node without enable-bit-index is always enabled */
1027        rc = acpi_dev_get_property(adev, "enable-bit-index",
1028                                   ACPI_TYPE_INTEGER, &obj);
1029        if (rc < 0)
1030                enable_bit = 0;
1031        else
1032                enable_bit = (int) obj->integer.value;
1033
1034        ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1035        if (!ctx->name) {
1036                dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1037                goto err;
1038        }
1039        inf = &ctx->inf;
1040        inf->type = type;
1041        inf->csr = dev_csr;
1042        inf->enable_mask = 1 << enable_bit;
1043
1044        return ctx;
1045err:
1046        devm_kfree(dev, ctx);
1047        return NULL;
1048}
1049
1050static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
1051                                    void *data, void **return_value)
1052{
1053        struct xgene_pmu *xgene_pmu = data;
1054        struct xgene_pmu_dev_ctx *ctx;
1055        struct acpi_device *adev;
1056
1057        if (acpi_bus_get_device(handle, &adev))
1058                return AE_OK;
1059        if (acpi_bus_get_status(adev) || !adev->status.present)
1060                return AE_OK;
1061
1062        if (!strcmp(acpi_device_hid(adev), "APMC0D5D"))
1063                ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C);
1064        else if (!strcmp(acpi_device_hid(adev), "APMC0D5E"))
1065                ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB);
1066        else if (!strcmp(acpi_device_hid(adev), "APMC0D5F"))
1067                ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB);
1068        else if (!strcmp(acpi_device_hid(adev), "APMC0D60"))
1069                ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC);
1070        else
1071                ctx = NULL;
1072
1073        if (!ctx)
1074                return AE_OK;
1075
1076        if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1077                /* Can't add the PMU device, skip it */
1078                devm_kfree(xgene_pmu->dev, ctx);
1079                return AE_OK;
1080        }
1081
1082        switch (ctx->inf.type) {
1083        case PMU_TYPE_L3C:
1084                list_add(&ctx->next, &xgene_pmu->l3cpmus);
1085                break;
1086        case PMU_TYPE_IOB:
1087                list_add(&ctx->next, &xgene_pmu->iobpmus);
1088                break;
1089        case PMU_TYPE_MCB:
1090                list_add(&ctx->next, &xgene_pmu->mcbpmus);
1091                break;
1092        case PMU_TYPE_MC:
1093                list_add(&ctx->next, &xgene_pmu->mcpmus);
1094                break;
1095        }
1096        return AE_OK;
1097}
1098
1099static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1100                                  struct platform_device *pdev)
1101{
1102        struct device *dev = xgene_pmu->dev;
1103        acpi_handle handle;
1104        acpi_status status;
1105
1106        handle = ACPI_HANDLE(dev);
1107        if (!handle)
1108                return -EINVAL;
1109
1110        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1111                                     acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
1112        if (ACPI_FAILURE(status)) {
1113                dev_err(dev, "failed to probe PMU devices\n");
1114                return -ENODEV;
1115        }
1116
1117        return 0;
1118}
1119#else
1120static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1121                                  struct platform_device *pdev)
1122{
1123        return 0;
1124}
1125#endif
1126
1127static struct
1128xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
1129                                      struct device_node *np, u32 type)
1130{
1131        struct device *dev = xgene_pmu->dev;
1132        struct xgene_pmu_dev_ctx *ctx;
1133        struct hw_pmu_info *inf;
1134        void __iomem *dev_csr;
1135        struct resource res;
1136        int enable_bit;
1137        int rc;
1138
1139        ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1140        if (!ctx)
1141                return NULL;
1142        rc = of_address_to_resource(np, 0, &res);
1143        if (rc < 0) {
1144                dev_err(dev, "PMU type %d: No resource address found\n", type);
1145                goto err;
1146        }
1147        dev_csr = devm_ioremap_resource(dev, &res);
1148        if (IS_ERR(dev_csr)) {
1149                dev_err(dev, "PMU type %d: Fail to map resource\n", type);
1150                goto err;
1151        }
1152
1153        /* A PMU device node without enable-bit-index is always enabled */
1154        if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
1155                enable_bit = 0;
1156
1157        ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
1158        if (!ctx->name) {
1159                dev_err(dev, "PMU type %d: Fail to get device name\n", type);
1160                goto err;
1161        }
1162        inf = &ctx->inf;
1163        inf->type = type;
1164        inf->csr = dev_csr;
1165        inf->enable_mask = 1 << enable_bit;
1166
1167        return ctx;
1168err:
1169        devm_kfree(dev, ctx);
1170        return NULL;
1171}
1172
1173static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1174                                 struct platform_device *pdev)
1175{
1176        struct xgene_pmu_dev_ctx *ctx;
1177        struct device_node *np;
1178
1179        for_each_child_of_node(pdev->dev.of_node, np) {
1180                if (!of_device_is_available(np))
1181                        continue;
1182
1183                if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
1184                        ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
1185                else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
1186                        ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
1187                else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
1188                        ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
1189                else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
1190                        ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
1191                else
1192                        ctx = NULL;
1193
1194                if (!ctx)
1195                        continue;
1196
1197                if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
1198                        /* Can't add the PMU device, skip it */
1199                        devm_kfree(xgene_pmu->dev, ctx);
1200                        continue;
1201                }
1202
1203                switch (ctx->inf.type) {
1204                case PMU_TYPE_L3C:
1205                        list_add(&ctx->next, &xgene_pmu->l3cpmus);
1206                        break;
1207                case PMU_TYPE_IOB:
1208                        list_add(&ctx->next, &xgene_pmu->iobpmus);
1209                        break;
1210                case PMU_TYPE_MCB:
1211                        list_add(&ctx->next, &xgene_pmu->mcbpmus);
1212                        break;
1213                case PMU_TYPE_MC:
1214                        list_add(&ctx->next, &xgene_pmu->mcpmus);
1215                        break;
1216                }
1217        }
1218
1219        return 0;
1220}
1221
1222static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
1223                                   struct platform_device *pdev)
1224{
1225        if (has_acpi_companion(&pdev->dev))
1226                return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
1227        return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
1228}
1229
1230static const struct xgene_pmu_data xgene_pmu_data = {
1231        .id   = PCP_PMU_V1,
1232};
1233
1234static const struct xgene_pmu_data xgene_pmu_v2_data = {
1235        .id   = PCP_PMU_V2,
1236};
1237
1238static const struct of_device_id xgene_pmu_of_match[] = {
1239        { .compatible   = "apm,xgene-pmu",      .data = &xgene_pmu_data },
1240        { .compatible   = "apm,xgene-pmu-v2",   .data = &xgene_pmu_v2_data },
1241        {},
1242};
1243MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
1244#ifdef CONFIG_ACPI
1245static const struct acpi_device_id xgene_pmu_acpi_match[] = {
1246        {"APMC0D5B", PCP_PMU_V1},
1247        {"APMC0D5C", PCP_PMU_V2},
1248        {},
1249};
1250MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
1251#endif
1252
1253static int xgene_pmu_probe(struct platform_device *pdev)
1254{
1255        const struct xgene_pmu_data *dev_data;
1256        const struct of_device_id *of_id;
1257        struct xgene_pmu *xgene_pmu;
1258        struct resource *res;
1259        int irq, rc;
1260        int version;
1261
1262        xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
1263        if (!xgene_pmu)
1264                return -ENOMEM;
1265        xgene_pmu->dev = &pdev->dev;
1266        platform_set_drvdata(pdev, xgene_pmu);
1267
1268        version = -EINVAL;
1269        of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
1270        if (of_id) {
1271                dev_data = (const struct xgene_pmu_data *) of_id->data;
1272                version = dev_data->id;
1273        }
1274
1275#ifdef CONFIG_ACPI
1276        if (ACPI_COMPANION(&pdev->dev)) {
1277                const struct acpi_device_id *acpi_id;
1278
1279                acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
1280                if (acpi_id)
1281                        version = (int) acpi_id->driver_data;
1282        }
1283#endif
1284        if (version < 0)
1285                return -ENODEV;
1286
1287        INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
1288        INIT_LIST_HEAD(&xgene_pmu->iobpmus);
1289        INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
1290        INIT_LIST_HEAD(&xgene_pmu->mcpmus);
1291
1292        xgene_pmu->version = version;
1293        dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
1294
1295        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1296        xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
1297        if (IS_ERR(xgene_pmu->pcppmu_csr)) {
1298                dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
1299                rc = PTR_ERR(xgene_pmu->pcppmu_csr);
1300                goto err;
1301        }
1302
1303        irq = platform_get_irq(pdev, 0);
1304        if (irq < 0) {
1305                dev_err(&pdev->dev, "No IRQ resource\n");
1306                rc = -EINVAL;
1307                goto err;
1308        }
1309        rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1310                                IRQF_NOBALANCING | IRQF_NO_THREAD,
1311                                dev_name(&pdev->dev), xgene_pmu);
1312        if (rc) {
1313                dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
1314                goto err;
1315        }
1316
1317        raw_spin_lock_init(&xgene_pmu->lock);
1318
1319        /* Check for active MCBs and MCUs */
1320        rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
1321        if (rc) {
1322                dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
1323                xgene_pmu->mcb_active_mask = 0x1;
1324                xgene_pmu->mc_active_mask = 0x1;
1325        }
1326
1327        /* Pick one core to use for cpumask attributes */
1328        cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
1329
1330        /* Make sure that the overflow interrupt is handled by this CPU */
1331        rc = irq_set_affinity(irq, &xgene_pmu->cpu);
1332        if (rc) {
1333                dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
1334                goto err;
1335        }
1336
1337        /* Walk through the tree for all PMU perf devices */
1338        rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
1339        if (rc) {
1340                dev_err(&pdev->dev, "No PMU perf devices found!\n");
1341                goto err;
1342        }
1343
1344        /* Enable interrupt */
1345        xgene_pmu_unmask_int(xgene_pmu);
1346
1347        return 0;
1348
1349err:
1350        if (xgene_pmu->pcppmu_csr)
1351                devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
1352        devm_kfree(&pdev->dev, xgene_pmu);
1353
1354        return rc;
1355}
1356
1357static void
1358xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
1359{
1360        struct xgene_pmu_dev_ctx *ctx;
1361        struct device *dev = xgene_pmu->dev;
1362        struct xgene_pmu_dev *pmu_dev;
1363
1364        list_for_each_entry(ctx, pmus, next) {
1365                pmu_dev = ctx->pmu_dev;
1366                if (pmu_dev->inf->csr)
1367                        devm_iounmap(dev, pmu_dev->inf->csr);
1368                devm_kfree(dev, ctx);
1369                devm_kfree(dev, pmu_dev);
1370        }
1371}
1372
1373static int xgene_pmu_remove(struct platform_device *pdev)
1374{
1375        struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
1376
1377        xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
1378        xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
1379        xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
1380        xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
1381
1382        if (xgene_pmu->pcppmu_csr)
1383                devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
1384        devm_kfree(&pdev->dev, xgene_pmu);
1385
1386        return 0;
1387}
1388
1389static struct platform_driver xgene_pmu_driver = {
1390        .probe = xgene_pmu_probe,
1391        .remove = xgene_pmu_remove,
1392        .driver = {
1393                .name           = "xgene-pmu",
1394                .of_match_table = xgene_pmu_of_match,
1395                .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
1396        },
1397};
1398
1399builtin_platform_driver(xgene_pmu_driver);
1400