linux/arch/metag/kernel/perf/perf_event.c
<<
>>
Prefs
   1/*
   2 * Meta performance counter support.
   3 *  Copyright (C) 2012 Imagination Technologies Ltd
   4 *
   5 * This code is based on the sh pmu code:
   6 *  Copyright (C) 2009 Paul Mundt
   7 *
   8 * and on the arm pmu code:
   9 *  Copyright (C) 2009 picoChip Designs, Ltd., James Iles
  10 *  Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  11 *
  12 * This file is subject to the terms and conditions of the GNU General Public
  13 * License.  See the file "COPYING" in the main directory of this archive
  14 * for more details.
  15 */
  16
  17#include <linux/atomic.h>
  18#include <linux/export.h>
  19#include <linux/init.h>
  20#include <linux/irqchip/metag.h>
  21#include <linux/perf_event.h>
  22#include <linux/slab.h>
  23
  24#include <asm/core_reg.h>
  25#include <asm/io.h>
  26#include <asm/irq.h>
  27#include <asm/processor.h>
  28
  29#include "perf_event.h"
  30
  31static int _hw_perf_event_init(struct perf_event *);
  32static void _hw_perf_event_destroy(struct perf_event *);
  33
  34/* Determines which core type we are */
  35static struct metag_pmu *metag_pmu __read_mostly;
  36
  37/* Processor specific data */
  38static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
  39
  40/* PMU admin */
  41const char *perf_pmu_name(void)
  42{
  43        if (!metag_pmu)
  44                return NULL;
  45
  46        return metag_pmu->name;
  47}
  48EXPORT_SYMBOL_GPL(perf_pmu_name);
  49
  50int perf_num_counters(void)
  51{
  52        if (metag_pmu)
  53                return metag_pmu->max_events;
  54
  55        return 0;
  56}
  57EXPORT_SYMBOL_GPL(perf_num_counters);
  58
  59static inline int metag_pmu_initialised(void)
  60{
  61        return !!metag_pmu;
  62}
  63
  64static void release_pmu_hardware(void)
  65{
  66        int irq;
  67        unsigned int version = (metag_pmu->version &
  68                        (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
  69                        METAC_ID_REV_S;
  70
  71        /* Early cores don't have overflow interrupts */
  72        if (version < 0x0104)
  73                return;
  74
  75        irq = internal_irq_map(17);
  76        if (irq >= 0)
  77                free_irq(irq, (void *)1);
  78
  79        irq = internal_irq_map(16);
  80        if (irq >= 0)
  81                free_irq(irq, (void *)0);
  82}
  83
  84static int reserve_pmu_hardware(void)
  85{
  86        int err = 0, irq[2];
  87        unsigned int version = (metag_pmu->version &
  88                        (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS)) >>
  89                        METAC_ID_REV_S;
  90
  91        /* Early cores don't have overflow interrupts */
  92        if (version < 0x0104)
  93                goto out;
  94
  95        /*
  96         * Bit 16 on HWSTATMETA is the interrupt for performance counter 0;
  97         * similarly, 17 is the interrupt for performance counter 1.
  98         * We can't (yet) interrupt on the cycle counter, because it's a
  99         * register, however it holds a 32-bit value as opposed to 24-bit.
 100         */
 101        irq[0] = internal_irq_map(16);
 102        if (irq[0] < 0) {
 103                pr_err("unable to map internal IRQ %d\n", 16);
 104                goto out;
 105        }
 106        err = request_irq(irq[0], metag_pmu->handle_irq, IRQF_NOBALANCING,
 107                        "metagpmu0", (void *)0);
 108        if (err) {
 109                pr_err("unable to request IRQ%d for metag PMU counters\n",
 110                                irq[0]);
 111                goto out;
 112        }
 113
 114        irq[1] = internal_irq_map(17);
 115        if (irq[1] < 0) {
 116                pr_err("unable to map internal IRQ %d\n", 17);
 117                goto out_irq1;
 118        }
 119        err = request_irq(irq[1], metag_pmu->handle_irq, IRQF_NOBALANCING,
 120                        "metagpmu1", (void *)1);
 121        if (err) {
 122                pr_err("unable to request IRQ%d for metag PMU counters\n",
 123                                irq[1]);
 124                goto out_irq1;
 125        }
 126
 127        return 0;
 128
 129out_irq1:
 130        free_irq(irq[0], (void *)0);
 131out:
 132        return err;
 133}
 134
 135/* PMU operations */
 136static void metag_pmu_enable(struct pmu *pmu)
 137{
 138}
 139
 140static void metag_pmu_disable(struct pmu *pmu)
 141{
 142}
 143
 144static int metag_pmu_event_init(struct perf_event *event)
 145{
 146        int err = 0;
 147        atomic_t *active_events = &metag_pmu->active_events;
 148
 149        if (!metag_pmu_initialised()) {
 150                err = -ENODEV;
 151                goto out;
 152        }
 153
 154        if (has_branch_stack(event))
 155                return -EOPNOTSUPP;
 156
 157        event->destroy = _hw_perf_event_destroy;
 158
 159        if (!atomic_inc_not_zero(active_events)) {
 160                mutex_lock(&metag_pmu->reserve_mutex);
 161                if (atomic_read(active_events) == 0)
 162                        err = reserve_pmu_hardware();
 163
 164                if (!err)
 165                        atomic_inc(active_events);
 166
 167                mutex_unlock(&metag_pmu->reserve_mutex);
 168        }
 169
 170        /* Hardware and caches counters */
 171        switch (event->attr.type) {
 172        case PERF_TYPE_HARDWARE:
 173        case PERF_TYPE_HW_CACHE:
 174        case PERF_TYPE_RAW:
 175                err = _hw_perf_event_init(event);
 176                break;
 177
 178        default:
 179                return -ENOENT;
 180        }
 181
 182        if (err)
 183                event->destroy(event);
 184
 185out:
 186        return err;
 187}
 188
 189void metag_pmu_event_update(struct perf_event *event,
 190                struct hw_perf_event *hwc, int idx)
 191{
 192        u64 prev_raw_count, new_raw_count;
 193        s64 delta;
 194
 195        /*
 196         * If this counter is chained, it may be that the previous counter
 197         * value has been changed beneath us.
 198         *
 199         * To get around this, we read and exchange the new raw count, then
 200         * add the delta (new - prev) to the generic counter atomically.
 201         *
 202         * Without interrupts, this is the simplest approach.
 203         */
 204again:
 205        prev_raw_count = local64_read(&hwc->prev_count);
 206        new_raw_count = metag_pmu->read(idx);
 207
 208        if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 209                        new_raw_count) != prev_raw_count)
 210                goto again;
 211
 212        /*
 213         * Calculate the delta and add it to the counter.
 214         */
 215        delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
 216
 217        local64_add(delta, &event->count);
 218        local64_sub(delta, &hwc->period_left);
 219}
 220
 221int metag_pmu_event_set_period(struct perf_event *event,
 222                struct hw_perf_event *hwc, int idx)
 223{
 224        s64 left = local64_read(&hwc->period_left);
 225        s64 period = hwc->sample_period;
 226        int ret = 0;
 227
 228        /* The period may have been changed */
 229        if (unlikely(period != hwc->last_period))
 230                left += period - hwc->last_period;
 231
 232        if (unlikely(left <= -period)) {
 233                left = period;
 234                local64_set(&hwc->period_left, left);
 235                hwc->last_period = period;
 236                ret = 1;
 237        }
 238
 239        if (unlikely(left <= 0)) {
 240                left += period;
 241                local64_set(&hwc->period_left, left);
 242                hwc->last_period = period;
 243                ret = 1;
 244        }
 245
 246        if (left > (s64)metag_pmu->max_period)
 247                left = metag_pmu->max_period;
 248
 249        if (metag_pmu->write) {
 250                local64_set(&hwc->prev_count, -(s32)left);
 251                metag_pmu->write(idx, -left & MAX_PERIOD);
 252        }
 253
 254        perf_event_update_userpage(event);
 255
 256        return ret;
 257}
 258
 259static void metag_pmu_start(struct perf_event *event, int flags)
 260{
 261        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 262        struct hw_perf_event *hwc = &event->hw;
 263        int idx = hwc->idx;
 264
 265        if (WARN_ON_ONCE(idx == -1))
 266                return;
 267
 268        /*
 269         * We always have to reprogram the period, so ignore PERF_EF_RELOAD.
 270         */
 271        if (flags & PERF_EF_RELOAD)
 272                WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 273
 274        hwc->state = 0;
 275
 276        /*
 277         * Reset the period.
 278         * Some counters can't be stopped (i.e. are core global), so when the
 279         * counter was 'stopped' we merely disabled the IRQ. If we don't reset
 280         * the period, then we'll either: a) get an overflow too soon;
 281         * or b) too late if the overflow happened since disabling.
 282         * Obviously, this has little bearing on cores without the overflow
 283         * interrupt, as the performance counter resets to zero on write
 284         * anyway.
 285         */
 286        if (metag_pmu->max_period)
 287                metag_pmu_event_set_period(event, hwc, hwc->idx);
 288        cpuc->events[idx] = event;
 289        metag_pmu->enable(hwc, idx);
 290}
 291
 292static void metag_pmu_stop(struct perf_event *event, int flags)
 293{
 294        struct hw_perf_event *hwc = &event->hw;
 295
 296        /*
 297         * We should always update the counter on stop; see comment above
 298         * why.
 299         */
 300        if (!(hwc->state & PERF_HES_STOPPED)) {
 301                metag_pmu_event_update(event, hwc, hwc->idx);
 302                metag_pmu->disable(hwc, hwc->idx);
 303                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 304        }
 305}
 306
 307static int metag_pmu_add(struct perf_event *event, int flags)
 308{
 309        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 310        struct hw_perf_event *hwc = &event->hw;
 311        int idx = 0, ret = 0;
 312
 313        perf_pmu_disable(event->pmu);
 314
 315        /* check whether we're counting instructions */
 316        if (hwc->config == 0x100) {
 317                if (__test_and_set_bit(METAG_INST_COUNTER,
 318                                cpuc->used_mask)) {
 319                        ret = -EAGAIN;
 320                        goto out;
 321                }
 322                idx = METAG_INST_COUNTER;
 323        } else {
 324                /* Check whether we have a spare counter */
 325                idx = find_first_zero_bit(cpuc->used_mask,
 326                                atomic_read(&metag_pmu->active_events));
 327                if (idx >= METAG_INST_COUNTER) {
 328                        ret = -EAGAIN;
 329                        goto out;
 330                }
 331
 332                __set_bit(idx, cpuc->used_mask);
 333        }
 334        hwc->idx = idx;
 335
 336        /* Make sure the counter is disabled */
 337        metag_pmu->disable(hwc, idx);
 338
 339        hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 340        if (flags & PERF_EF_START)
 341                metag_pmu_start(event, PERF_EF_RELOAD);
 342
 343        perf_event_update_userpage(event);
 344out:
 345        perf_pmu_enable(event->pmu);
 346        return ret;
 347}
 348
 349static void metag_pmu_del(struct perf_event *event, int flags)
 350{
 351        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 352        struct hw_perf_event *hwc = &event->hw;
 353        int idx = hwc->idx;
 354
 355        WARN_ON(idx < 0);
 356        metag_pmu_stop(event, PERF_EF_UPDATE);
 357        cpuc->events[idx] = NULL;
 358        __clear_bit(idx, cpuc->used_mask);
 359
 360        perf_event_update_userpage(event);
 361}
 362
 363static void metag_pmu_read(struct perf_event *event)
 364{
 365        struct hw_perf_event *hwc = &event->hw;
 366
 367        /* Don't read disabled counters! */
 368        if (hwc->idx < 0)
 369                return;
 370
 371        metag_pmu_event_update(event, hwc, hwc->idx);
 372}
 373
 374static struct pmu pmu = {
 375        .pmu_enable     = metag_pmu_enable,
 376        .pmu_disable    = metag_pmu_disable,
 377
 378        .event_init     = metag_pmu_event_init,
 379
 380        .add            = metag_pmu_add,
 381        .del            = metag_pmu_del,
 382        .start          = metag_pmu_start,
 383        .stop           = metag_pmu_stop,
 384        .read           = metag_pmu_read,
 385};
 386
 387/* Core counter specific functions */
 388static const int metag_general_events[] = {
 389        [PERF_COUNT_HW_CPU_CYCLES] = 0x03,
 390        [PERF_COUNT_HW_INSTRUCTIONS] = 0x100,
 391        [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
 392        [PERF_COUNT_HW_CACHE_MISSES] = -1,
 393        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
 394        [PERF_COUNT_HW_BRANCH_MISSES] = -1,
 395        [PERF_COUNT_HW_BUS_CYCLES] = -1,
 396        [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = -1,
 397        [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = -1,
 398        [PERF_COUNT_HW_REF_CPU_CYCLES] = -1,
 399};
 400
 401static const int metag_pmu_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
 402        [C(L1D)] = {
 403                [C(OP_READ)] = {
 404                        [C(RESULT_ACCESS)] = 0x08,
 405                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 406                },
 407                [C(OP_WRITE)] = {
 408                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 409                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 410                },
 411                [C(OP_PREFETCH)] = {
 412                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 413                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 414                },
 415        },
 416        [C(L1I)] = {
 417                [C(OP_READ)] = {
 418                        [C(RESULT_ACCESS)] = 0x09,
 419                        [C(RESULT_MISS)] = 0x0a,
 420                },
 421                [C(OP_WRITE)] = {
 422                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 423                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 424                },
 425                [C(OP_PREFETCH)] = {
 426                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 427                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 428                },
 429        },
 430        [C(LL)] = {
 431                [C(OP_READ)] = {
 432                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 433                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 434                },
 435                [C(OP_WRITE)] = {
 436                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 437                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 438                },
 439                [C(OP_PREFETCH)] = {
 440                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 441                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 442                },
 443        },
 444        [C(DTLB)] = {
 445                [C(OP_READ)] = {
 446                        [C(RESULT_ACCESS)] = 0xd0,
 447                        [C(RESULT_MISS)] = 0xd2,
 448                },
 449                [C(OP_WRITE)] = {
 450                        [C(RESULT_ACCESS)] = 0xd4,
 451                        [C(RESULT_MISS)] = 0xd5,
 452                },
 453                [C(OP_PREFETCH)] = {
 454                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 455                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 456                },
 457        },
 458        [C(ITLB)] = {
 459                [C(OP_READ)] = {
 460                        [C(RESULT_ACCESS)] = 0xd1,
 461                        [C(RESULT_MISS)] = 0xd3,
 462                },
 463                [C(OP_WRITE)] = {
 464                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 465                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 466                },
 467                [C(OP_PREFETCH)] = {
 468                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 469                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 470                },
 471        },
 472        [C(BPU)] = {
 473                [C(OP_READ)] = {
 474                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 475                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 476                },
 477                [C(OP_WRITE)] = {
 478                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 479                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 480                },
 481                [C(OP_PREFETCH)] = {
 482                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 483                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 484                },
 485        },
 486        [C(NODE)] = {
 487                [C(OP_READ)] = {
 488                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 489                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 490                },
 491                [C(OP_WRITE)] = {
 492                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 493                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 494                },
 495                [C(OP_PREFETCH)] = {
 496                        [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
 497                        [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
 498                },
 499        },
 500};
 501
 502
 503static void _hw_perf_event_destroy(struct perf_event *event)
 504{
 505        atomic_t *active_events = &metag_pmu->active_events;
 506        struct mutex *pmu_mutex = &metag_pmu->reserve_mutex;
 507
 508        if (atomic_dec_and_mutex_lock(active_events, pmu_mutex)) {
 509                release_pmu_hardware();
 510                mutex_unlock(pmu_mutex);
 511        }
 512}
 513
 514static int _hw_perf_cache_event(int config, int *evp)
 515{
 516        unsigned long type, op, result;
 517        int ev;
 518
 519        if (!metag_pmu->cache_events)
 520                return -EINVAL;
 521
 522        /* Unpack config */
 523        type = config & 0xff;
 524        op = (config >> 8) & 0xff;
 525        result = (config >> 16) & 0xff;
 526
 527        if (type >= PERF_COUNT_HW_CACHE_MAX ||
 528                        op >= PERF_COUNT_HW_CACHE_OP_MAX ||
 529                        result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 530                return -EINVAL;
 531
 532        ev = (*metag_pmu->cache_events)[type][op][result];
 533        if (ev == 0)
 534                return -EOPNOTSUPP;
 535        if (ev == -1)
 536                return -EINVAL;
 537        *evp = ev;
 538        return 0;
 539}
 540
 541static int _hw_perf_event_init(struct perf_event *event)
 542{
 543        struct perf_event_attr *attr = &event->attr;
 544        struct hw_perf_event *hwc = &event->hw;
 545        int mapping = 0, err;
 546
 547        switch (attr->type) {
 548        case PERF_TYPE_HARDWARE:
 549                if (attr->config >= PERF_COUNT_HW_MAX)
 550                        return -EINVAL;
 551
 552                mapping = metag_pmu->event_map(attr->config);
 553                break;
 554
 555        case PERF_TYPE_HW_CACHE:
 556                err = _hw_perf_cache_event(attr->config, &mapping);
 557                if (err)
 558                        return err;
 559                break;
 560
 561        case PERF_TYPE_RAW:
 562                mapping = attr->config;
 563                break;
 564        }
 565
 566        /* Return early if the event is unsupported */
 567        if (mapping == -1)
 568                return -EINVAL;
 569
 570        /*
 571         * Early cores have "limited" counters - they have no overflow
 572         * interrupts - and so are unable to do sampling without extra work
 573         * and timer assistance.
 574         */
 575        if (metag_pmu->max_period == 0) {
 576                if (hwc->sample_period)
 577                        return -EINVAL;
 578        }
 579
 580        /*
 581         * Don't assign an index until the event is placed into the hardware.
 582         * -1 signifies that we're still deciding where to put it. On SMP
 583         * systems each core has its own set of counters, so we can't do any
 584         * constraint checking yet.
 585         */
 586        hwc->idx = -1;
 587
 588        /* Store the event encoding */
 589        hwc->config |= (unsigned long)mapping;
 590
 591        /*
 592         * For non-sampling runs, limit the sample_period to half of the
 593         * counter width. This way, the new counter value should be less
 594         * likely to overtake the previous one (unless there are IRQ latency
 595         * issues...)
 596         */
 597        if (metag_pmu->max_period) {
 598                if (!hwc->sample_period) {
 599                        hwc->sample_period = metag_pmu->max_period >> 1;
 600                        hwc->last_period = hwc->sample_period;
 601                        local64_set(&hwc->period_left, hwc->sample_period);
 602                }
 603        }
 604
 605        return 0;
 606}
 607
 608static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
 609{
 610        struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
 611        unsigned int config = event->config;
 612        unsigned int tmp = config & 0xf0;
 613        unsigned long flags;
 614
 615        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 616
 617        /*
 618         * Check if we're enabling the instruction counter (index of
 619         * MAX_HWEVENTS - 1)
 620         */
 621        if (METAG_INST_COUNTER == idx) {
 622                WARN_ONCE((config != 0x100),
 623                        "invalid configuration (%d) for counter (%d)\n",
 624                        config, idx);
 625                local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
 626                goto unlock;
 627        }
 628
 629        /* Check for a core internal or performance channel event. */
 630        if (tmp) {
 631                void *perf_addr;
 632
 633                /*
 634                 * Anything other than a cycle count will write the low-
 635                 * nibble to the correct counter register.
 636                 */
 637                switch (tmp) {
 638                case 0xd0:
 639                        perf_addr = (void *)PERF_ICORE(idx);
 640                        break;
 641
 642                case 0xf0:
 643                        perf_addr = (void *)PERF_CHAN(idx);
 644                        break;
 645
 646                default:
 647                        perf_addr = NULL;
 648                        break;
 649                }
 650
 651                if (perf_addr)
 652                        metag_out32((config & 0x0f), perf_addr);
 653
 654                /*
 655                 * Now we use the high nibble as the performance event to
 656                 * to count.
 657                 */
 658                config = tmp >> 4;
 659        }
 660
 661        tmp = ((config & 0xf) << 28) |
 662                        ((1 << 24) << hard_processor_id());
 663        if (metag_pmu->max_period)
 664                /*
 665                 * Cores supporting overflow interrupts may have had the counter
 666                 * set to a specific value that needs preserving.
 667                 */
 668                tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
 669        else
 670                /*
 671                 * Older cores reset the counter on write, so prev_count needs
 672                 * resetting too so we can calculate a correct delta.
 673                 */
 674                local64_set(&event->prev_count, 0);
 675
 676        metag_out32(tmp, PERF_COUNT(idx));
 677unlock:
 678        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 679}
 680
 681static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
 682{
 683        struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
 684        unsigned int tmp = 0;
 685        unsigned long flags;
 686
 687        /*
 688         * The cycle counter can't be disabled per se, as it's a hardware
 689         * thread register which is always counting. We merely return if this
 690         * is the counter we're attempting to disable.
 691         */
 692        if (METAG_INST_COUNTER == idx)
 693                return;
 694
 695        /*
 696         * The counter value _should_ have been read prior to disabling,
 697         * as if we're running on an early core then the value gets reset to
 698         * 0, and any read after that would be useless. On the newer cores,
 699         * however, it's better to read-modify-update this for purposes of
 700         * the overflow interrupt.
 701         * Here we remove the thread id AND the event nibble (there are at
 702         * least two events that count events that are core global and ignore
 703         * the thread id mask). This only works because we don't mix thread
 704         * performance counts, and event 0x00 requires a thread id mask!
 705         */
 706        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 707
 708        tmp = metag_in32(PERF_COUNT(idx));
 709        tmp &= 0x00ffffff;
 710        metag_out32(tmp, PERF_COUNT(idx));
 711
 712        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 713}
 714
 715static u64 metag_pmu_read_counter(int idx)
 716{
 717        u32 tmp = 0;
 718
 719        if (METAG_INST_COUNTER == idx) {
 720                tmp = __core_reg_get(TXTACTCYC);
 721                goto out;
 722        }
 723
 724        tmp = metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
 725out:
 726        return tmp;
 727}
 728
 729static void metag_pmu_write_counter(int idx, u32 val)
 730{
 731        struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
 732        u32 tmp = 0;
 733        unsigned long flags;
 734
 735        /*
 736         * This _shouldn't_ happen, but if it does, then we can just
 737         * ignore the write, as the register is read-only and clear-on-write.
 738         */
 739        if (METAG_INST_COUNTER == idx)
 740                return;
 741
 742        /*
 743         * We'll keep the thread mask and event id, and just update the
 744         * counter itself. Also , we should bound the value to 24-bits.
 745         */
 746        raw_spin_lock_irqsave(&events->pmu_lock, flags);
 747
 748        val &= 0x00ffffff;
 749        tmp = metag_in32(PERF_COUNT(idx)) & 0xff000000;
 750        val |= tmp;
 751        metag_out32(val, PERF_COUNT(idx));
 752
 753        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 754}
 755
 756static int metag_pmu_event_map(int idx)
 757{
 758        return metag_general_events[idx];
 759}
 760
 761static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
 762{
 763        int idx = (int)dev;
 764        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 765        struct perf_event *event = cpuhw->events[idx];
 766        struct hw_perf_event *hwc = &event->hw;
 767        struct pt_regs *regs = get_irq_regs();
 768        struct perf_sample_data sampledata;
 769        unsigned long flags;
 770        u32 counter = 0;
 771
 772        /*
 773         * We need to stop the core temporarily from generating another
 774         * interrupt while we disable this counter. However, we don't want
 775         * to flag the counter as free
 776         */
 777        __global_lock2(flags);
 778        counter = metag_in32(PERF_COUNT(idx));
 779        metag_out32((counter & 0x00ffffff), PERF_COUNT(idx));
 780        __global_unlock2(flags);
 781
 782        /* Update the counts and reset the sample period */
 783        metag_pmu_event_update(event, hwc, idx);
 784        perf_sample_data_init(&sampledata, 0, hwc->last_period);
 785        metag_pmu_event_set_period(event, hwc, idx);
 786
 787        /*
 788         * Enable the counter again once core overflow processing has
 789         * completed. Note the counter value may have been modified while it was
 790         * inactive to set it up ready for the next interrupt.
 791         */
 792        if (!perf_event_overflow(event, &sampledata, regs)) {
 793                __global_lock2(flags);
 794                counter = (counter & 0xff000000) |
 795                          (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
 796                metag_out32(counter, PERF_COUNT(idx));
 797                __global_unlock2(flags);
 798        }
 799
 800        return IRQ_HANDLED;
 801}
 802
 803static struct metag_pmu _metag_pmu = {
 804        .handle_irq     = metag_pmu_counter_overflow,
 805        .enable         = metag_pmu_enable_counter,
 806        .disable        = metag_pmu_disable_counter,
 807        .read           = metag_pmu_read_counter,
 808        .write          = metag_pmu_write_counter,
 809        .event_map      = metag_pmu_event_map,
 810        .cache_events   = &metag_pmu_cache_events,
 811        .max_period     = MAX_PERIOD,
 812        .max_events     = MAX_HWEVENTS,
 813};
 814
 815/* PMU CPU hotplug notifier */
 816static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
 817                unsigned long action, void *hcpu)
 818{
 819        unsigned int cpu = (unsigned int)hcpu;
 820        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 821
 822        if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
 823                return NOTIFY_DONE;
 824
 825        memset(cpuc, 0, sizeof(struct cpu_hw_events));
 826        raw_spin_lock_init(&cpuc->pmu_lock);
 827
 828        return NOTIFY_OK;
 829}
 830
 831static struct notifier_block __cpuinitdata metag_pmu_notifier = {
 832        .notifier_call = metag_pmu_cpu_notify,
 833};
 834
 835/* PMU Initialisation */
 836static int __init init_hw_perf_events(void)
 837{
 838        int ret = 0, cpu;
 839        u32 version = *(u32 *)METAC_ID;
 840        int major = (version & METAC_ID_MAJOR_BITS) >> METAC_ID_MAJOR_S;
 841        int min_rev = (version & (METAC_ID_MINOR_BITS | METAC_ID_REV_BITS))
 842                        >> METAC_ID_REV_S;
 843
 844        /* Not a Meta 2 core, then not supported */
 845        if (0x02 > major) {
 846                pr_info("no hardware counter support available\n");
 847                goto out;
 848        } else if (0x02 == major) {
 849                metag_pmu = &_metag_pmu;
 850
 851                if (min_rev < 0x0104) {
 852                        /*
 853                         * A core without overflow interrupts, and clear-on-
 854                         * write counters.
 855                         */
 856                        metag_pmu->handle_irq = NULL;
 857                        metag_pmu->write = NULL;
 858                        metag_pmu->max_period = 0;
 859                }
 860
 861                metag_pmu->name = "meta2";
 862                metag_pmu->version = version;
 863                metag_pmu->pmu = pmu;
 864        }
 865
 866        pr_info("enabled with %s PMU driver, %d counters available\n",
 867                        metag_pmu->name, metag_pmu->max_events);
 868
 869        /* Initialise the active events and reservation mutex */
 870        atomic_set(&metag_pmu->active_events, 0);
 871        mutex_init(&metag_pmu->reserve_mutex);
 872
 873        /* Clear the counters */
 874        metag_out32(0, PERF_COUNT(0));
 875        metag_out32(0, PERF_COUNT(1));
 876
 877        for_each_possible_cpu(cpu) {
 878                struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 879
 880                memset(cpuc, 0, sizeof(struct cpu_hw_events));
 881                raw_spin_lock_init(&cpuc->pmu_lock);
 882        }
 883
 884        register_cpu_notifier(&metag_pmu_notifier);
 885        ret = perf_pmu_register(&pmu, (char *)metag_pmu->name, PERF_TYPE_RAW);
 886out:
 887        return ret;
 888}
 889early_initcall(init_hw_perf_events);
 890