linux/arch/s390/kernel/perf_cpum_sf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Performance event support for the System z CPU-measurement Sampling Facility
   4 *
   5 * Copyright IBM Corp. 2013, 2018
   6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
   7 */
   8#define KMSG_COMPONENT  "cpum_sf"
   9#define pr_fmt(fmt)     KMSG_COMPONENT ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/kernel_stat.h>
  13#include <linux/perf_event.h>
  14#include <linux/percpu.h>
  15#include <linux/pid.h>
  16#include <linux/notifier.h>
  17#include <linux/export.h>
  18#include <linux/slab.h>
  19#include <linux/mm.h>
  20#include <linux/moduleparam.h>
  21#include <asm/cpu_mf.h>
  22#include <asm/irq.h>
  23#include <asm/debug.h>
  24#include <asm/timex.h>
  25
  26/* Minimum number of sample-data-block-tables:
  27 * At least one table is required for the sampling buffer structure.
  28 * A single table contains up to 511 pointers to sample-data-blocks.
  29 */
  30#define CPUM_SF_MIN_SDBT        1
  31
  32/* Number of sample-data-blocks per sample-data-block-table (SDBT):
  33 * A table contains SDB pointers (8 bytes) and one table-link entry
  34 * that points to the origin of the next SDBT.
  35 */
  36#define CPUM_SF_SDB_PER_TABLE   ((PAGE_SIZE - 8) / 8)
  37
  38/* Maximum page offset for an SDBT table-link entry:
  39 * If this page offset is reached, a table-link entry to the next SDBT
  40 * must be added.
  41 */
  42#define CPUM_SF_SDBT_TL_OFFSET  (CPUM_SF_SDB_PER_TABLE * 8)
  43static inline int require_table_link(const void *sdbt)
  44{
  45        return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
  46}
  47
  48/* Minimum and maximum sampling buffer sizes:
  49 *
  50 * This number represents the maximum size of the sampling buffer taking
  51 * the number of sample-data-block-tables into account.  Note that these
  52 * numbers apply to the basic-sampling function only.
  53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if
  54 * the diagnostic-sampling function is active.
  55 *
  56 * Sampling buffer size         Buffer characteristics
  57 * ---------------------------------------------------
  58 *       64KB               ==    16 pages (4KB per page)
  59 *                                 1 page  for SDB-tables
  60 *                                15 pages for SDBs
  61 *
  62 *  32MB                    ==  8192 pages (4KB per page)
  63 *                                16 pages for SDB-tables
  64 *                              8176 pages for SDBs
  65 */
  66static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15;
  67static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176;
  68static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1;
  69
  70struct sf_buffer {
  71        unsigned long    *sdbt;     /* Sample-data-block-table origin */
  72        /* buffer characteristics (required for buffer increments) */
  73        unsigned long  num_sdb;     /* Number of sample-data-blocks */
  74        unsigned long num_sdbt;     /* Number of sample-data-block-tables */
  75        unsigned long    *tail;     /* last sample-data-block-table */
  76};
  77
  78struct aux_buffer {
  79        struct sf_buffer sfb;
  80        unsigned long head;        /* index of SDB of buffer head */
  81        unsigned long alert_mark;  /* index of SDB of alert request position */
  82        unsigned long empty_mark;  /* mark of SDB not marked full */
  83        unsigned long *sdb_index;  /* SDB address for fast lookup */
  84        unsigned long *sdbt_index; /* SDBT address for fast lookup */
  85};
  86
  87struct cpu_hw_sf {
  88        /* CPU-measurement sampling information block */
  89        struct hws_qsi_info_block qsi;
  90        /* CPU-measurement sampling control block */
  91        struct hws_lsctl_request_block lsctl;
  92        struct sf_buffer sfb;       /* Sampling buffer */
  93        unsigned int flags;         /* Status flags */
  94        struct perf_event *event;   /* Scheduled perf event */
  95        struct perf_output_handle handle; /* AUX buffer output handle */
  96};
  97static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
  98
  99/* Debug feature */
 100static debug_info_t *sfdbg;
 101
 102/*
 103 * sf_disable() - Switch off sampling facility
 104 */
 105static int sf_disable(void)
 106{
 107        struct hws_lsctl_request_block sreq;
 108
 109        memset(&sreq, 0, sizeof(sreq));
 110        return lsctl(&sreq);
 111}
 112
 113/*
 114 * sf_buffer_available() - Check for an allocated sampling buffer
 115 */
 116static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
 117{
 118        return !!cpuhw->sfb.sdbt;
 119}
 120
 121/*
 122 * deallocate sampling facility buffer
 123 */
 124static void free_sampling_buffer(struct sf_buffer *sfb)
 125{
 126        unsigned long *sdbt, *curr;
 127
 128        if (!sfb->sdbt)
 129                return;
 130
 131        sdbt = sfb->sdbt;
 132        curr = sdbt;
 133
 134        /* Free the SDBT after all SDBs are processed... */
 135        while (1) {
 136                if (!*curr || !sdbt)
 137                        break;
 138
 139                /* Process table-link entries */
 140                if (is_link_entry(curr)) {
 141                        curr = get_next_sdbt(curr);
 142                        if (sdbt)
 143                                free_page((unsigned long) sdbt);
 144
 145                        /* If the origin is reached, sampling buffer is freed */
 146                        if (curr == sfb->sdbt)
 147                                break;
 148                        else
 149                                sdbt = curr;
 150                } else {
 151                        /* Process SDB pointer */
 152                        if (*curr) {
 153                                free_page(*curr);
 154                                curr++;
 155                        }
 156                }
 157        }
 158
 159        debug_sprintf_event(sfdbg, 5,
 160                            "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
 161        memset(sfb, 0, sizeof(*sfb));
 162}
 163
 164static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
 165{
 166        unsigned long sdb, *trailer;
 167
 168        /* Allocate and initialize sample-data-block */
 169        sdb = get_zeroed_page(gfp_flags);
 170        if (!sdb)
 171                return -ENOMEM;
 172        trailer = trailer_entry_ptr(sdb);
 173        *trailer = SDB_TE_ALERT_REQ_MASK;
 174
 175        /* Link SDB into the sample-data-block-table */
 176        *sdbt = sdb;
 177
 178        return 0;
 179}
 180
 181/*
 182 * realloc_sampling_buffer() - extend sampler memory
 183 *
 184 * Allocates new sample-data-blocks and adds them to the specified sampling
 185 * buffer memory.
 186 *
 187 * Important: This modifies the sampling buffer and must be called when the
 188 *            sampling facility is disabled.
 189 *
 190 * Returns zero on success, non-zero otherwise.
 191 */
 192static int realloc_sampling_buffer(struct sf_buffer *sfb,
 193                                   unsigned long num_sdb, gfp_t gfp_flags)
 194{
 195        int i, rc;
 196        unsigned long *new, *tail;
 197
 198        if (!sfb->sdbt || !sfb->tail)
 199                return -EINVAL;
 200
 201        if (!is_link_entry(sfb->tail))
 202                return -EINVAL;
 203
 204        /* Append to the existing sampling buffer, overwriting the table-link
 205         * register.
 206         * The tail variables always points to the "tail" (last and table-link)
 207         * entry in an SDB-table.
 208         */
 209        tail = sfb->tail;
 210
 211        /* Do a sanity check whether the table-link entry points to
 212         * the sampling buffer origin.
 213         */
 214        if (sfb->sdbt != get_next_sdbt(tail)) {
 215                debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
 216                                    "sampling buffer is not linked: origin=%p"
 217                                    "tail=%p\n",
 218                                    (void *) sfb->sdbt, (void *) tail);
 219                return -EINVAL;
 220        }
 221
 222        /* Allocate remaining SDBs */
 223        rc = 0;
 224        for (i = 0; i < num_sdb; i++) {
 225                /* Allocate a new SDB-table if it is full. */
 226                if (require_table_link(tail)) {
 227                        new = (unsigned long *) get_zeroed_page(gfp_flags);
 228                        if (!new) {
 229                                rc = -ENOMEM;
 230                                break;
 231                        }
 232                        sfb->num_sdbt++;
 233                        /* Link current page to tail of chain */
 234                        *tail = (unsigned long)(void *) new + 1;
 235                        tail = new;
 236                }
 237
 238                /* Allocate a new sample-data-block.
 239                 * If there is not enough memory, stop the realloc process
 240                 * and simply use what was allocated.  If this is a temporary
 241                 * issue, a new realloc call (if required) might succeed.
 242                 */
 243                rc = alloc_sample_data_block(tail, gfp_flags);
 244                if (rc)
 245                        break;
 246                sfb->num_sdb++;
 247                tail++;
 248        }
 249
 250        /* Link sampling buffer to its origin */
 251        *tail = (unsigned long) sfb->sdbt + 1;
 252        sfb->tail = tail;
 253
 254        debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
 255                            " settings: sdbt=%lu sdb=%lu\n",
 256                            sfb->num_sdbt, sfb->num_sdb);
 257        return rc;
 258}
 259
 260/*
 261 * allocate_sampling_buffer() - allocate sampler memory
 262 *
 263 * Allocates and initializes a sampling buffer structure using the
 264 * specified number of sample-data-blocks (SDB).  For each allocation,
 265 * a 4K page is used.  The number of sample-data-block-tables (SDBT)
 266 * are calculated from SDBs.
 267 * Also set the ALERT_REQ mask in each SDBs trailer.
 268 *
 269 * Returns zero on success, non-zero otherwise.
 270 */
 271static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
 272{
 273        int rc;
 274
 275        if (sfb->sdbt)
 276                return -EINVAL;
 277
 278        /* Allocate the sample-data-block-table origin */
 279        sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
 280        if (!sfb->sdbt)
 281                return -ENOMEM;
 282        sfb->num_sdb = 0;
 283        sfb->num_sdbt = 1;
 284
 285        /* Link the table origin to point to itself to prepare for
 286         * realloc_sampling_buffer() invocation.
 287         */
 288        sfb->tail = sfb->sdbt;
 289        *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1;
 290
 291        /* Allocate requested number of sample-data-blocks */
 292        rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
 293        if (rc) {
 294                free_sampling_buffer(sfb);
 295                debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
 296                        "realloc_sampling_buffer failed with rc=%i\n", rc);
 297        } else
 298                debug_sprintf_event(sfdbg, 4,
 299                        "alloc_sampling_buffer: tear=%p dear=%p\n",
 300                        sfb->sdbt, (void *) *sfb->sdbt);
 301        return rc;
 302}
 303
 304static void sfb_set_limits(unsigned long min, unsigned long max)
 305{
 306        struct hws_qsi_info_block si;
 307
 308        CPUM_SF_MIN_SDB = min;
 309        CPUM_SF_MAX_SDB = max;
 310
 311        memset(&si, 0, sizeof(si));
 312        if (!qsi(&si))
 313                CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
 314}
 315
 316static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
 317{
 318        return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR
 319                                    : CPUM_SF_MAX_SDB;
 320}
 321
 322static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
 323                                        struct hw_perf_event *hwc)
 324{
 325        if (!sfb->sdbt)
 326                return SFB_ALLOC_REG(hwc);
 327        if (SFB_ALLOC_REG(hwc) > sfb->num_sdb)
 328                return SFB_ALLOC_REG(hwc) - sfb->num_sdb;
 329        return 0;
 330}
 331
 332static int sfb_has_pending_allocs(struct sf_buffer *sfb,
 333                                   struct hw_perf_event *hwc)
 334{
 335        return sfb_pending_allocs(sfb, hwc) > 0;
 336}
 337
 338static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
 339{
 340        /* Limit the number of SDBs to not exceed the maximum */
 341        num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc));
 342        if (num)
 343                SFB_ALLOC_REG(hwc) += num;
 344}
 345
 346static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
 347{
 348        SFB_ALLOC_REG(hwc) = 0;
 349        sfb_account_allocs(num, hwc);
 350}
 351
 352static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
 353{
 354        if (cpuhw->sfb.sdbt)
 355                free_sampling_buffer(&cpuhw->sfb);
 356}
 357
 358static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
 359{
 360        unsigned long n_sdb, freq, factor;
 361        size_t sample_size;
 362
 363        /* Calculate sampling buffers using 4K pages
 364         *
 365         *    1. Determine the sample data size which depends on the used
 366         *       sampling functions, for example, basic-sampling or
 367         *       basic-sampling with diagnostic-sampling.
 368         *
 369         *    2. Use the sampling frequency as input.  The sampling buffer is
 370         *       designed for almost one second.  This can be adjusted through
 371         *       the "factor" variable.
 372         *       In any case, alloc_sampling_buffer() sets the Alert Request
 373         *       Control indicator to trigger a measurement-alert to harvest
 374         *       sample-data-blocks (sdb).
 375         *
 376         *    3. Compute the number of sample-data-blocks and ensure a minimum
 377         *       of CPUM_SF_MIN_SDB.  Also ensure the upper limit does not
 378         *       exceed a "calculated" maximum.  The symbolic maximum is
 379         *       designed for basic-sampling only and needs to be increased if
 380         *       diagnostic-sampling is active.
 381         *       See also the remarks for these symbolic constants.
 382         *
 383         *    4. Compute the number of sample-data-block-tables (SDBT) and
 384         *       ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
 385         *       to 511 SDBs).
 386         */
 387        sample_size = sizeof(struct hws_basic_entry);
 388        freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
 389        factor = 1;
 390        n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
 391        if (n_sdb < CPUM_SF_MIN_SDB)
 392                n_sdb = CPUM_SF_MIN_SDB;
 393
 394        /* If there is already a sampling buffer allocated, it is very likely
 395         * that the sampling facility is enabled too.  If the event to be
 396         * initialized requires a greater sampling buffer, the allocation must
 397         * be postponed.  Changing the sampling buffer requires the sampling
 398         * facility to be in the disabled state.  So, account the number of
 399         * required SDBs and let cpumsf_pmu_enable() resize the buffer just
 400         * before the event is started.
 401         */
 402        sfb_init_allocs(n_sdb, hwc);
 403        if (sf_buffer_available(cpuhw))
 404                return 0;
 405
 406        debug_sprintf_event(sfdbg, 3,
 407                            "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
 408                            " sample_size=%lu cpuhw=%p\n",
 409                            SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
 410                            sample_size, cpuhw);
 411
 412        return alloc_sampling_buffer(&cpuhw->sfb,
 413                                     sfb_pending_allocs(&cpuhw->sfb, hwc));
 414}
 415
 416static unsigned long min_percent(unsigned int percent, unsigned long base,
 417                                 unsigned long min)
 418{
 419        return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100));
 420}
 421
 422static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base)
 423{
 424        /* Use a percentage-based approach to extend the sampling facility
 425         * buffer.  Accept up to 5% sample data loss.
 426         * Vary the extents between 1% to 5% of the current number of
 427         * sample-data-blocks.
 428         */
 429        if (ratio <= 5)
 430                return 0;
 431        if (ratio <= 25)
 432                return min_percent(1, base, 1);
 433        if (ratio <= 50)
 434                return min_percent(1, base, 1);
 435        if (ratio <= 75)
 436                return min_percent(2, base, 2);
 437        if (ratio <= 100)
 438                return min_percent(3, base, 3);
 439        if (ratio <= 250)
 440                return min_percent(4, base, 4);
 441
 442        return min_percent(5, base, 8);
 443}
 444
 445static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
 446                                  struct hw_perf_event *hwc)
 447{
 448        unsigned long ratio, num;
 449
 450        if (!OVERFLOW_REG(hwc))
 451                return;
 452
 453        /* The sample_overflow contains the average number of sample data
 454         * that has been lost because sample-data-blocks were full.
 455         *
 456         * Calculate the total number of sample data entries that has been
 457         * discarded.  Then calculate the ratio of lost samples to total samples
 458         * per second in percent.
 459         */
 460        ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb,
 461                             sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)));
 462
 463        /* Compute number of sample-data-blocks */
 464        num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb);
 465        if (num)
 466                sfb_account_allocs(num, hwc);
 467
 468        debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
 469                            " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
 470        OVERFLOW_REG(hwc) = 0;
 471}
 472
 473/* extend_sampling_buffer() - Extend sampling buffer
 474 * @sfb:        Sampling buffer structure (for local CPU)
 475 * @hwc:        Perf event hardware structure
 476 *
 477 * Use this function to extend the sampling buffer based on the overflow counter
 478 * and postponed allocation extents stored in the specified Perf event hardware.
 479 *
 480 * Important: This function disables the sampling facility in order to safely
 481 *            change the sampling buffer structure.  Do not call this function
 482 *            when the PMU is active.
 483 */
 484static void extend_sampling_buffer(struct sf_buffer *sfb,
 485                                   struct hw_perf_event *hwc)
 486{
 487        unsigned long num, num_old;
 488        int rc;
 489
 490        num = sfb_pending_allocs(sfb, hwc);
 491        if (!num)
 492                return;
 493        num_old = sfb->num_sdb;
 494
 495        /* Disable the sampling facility to reset any states and also
 496         * clear pending measurement alerts.
 497         */
 498        sf_disable();
 499
 500        /* Extend the sampling buffer.
 501         * This memory allocation typically happens in an atomic context when
 502         * called by perf.  Because this is a reallocation, it is fine if the
 503         * new SDB-request cannot be satisfied immediately.
 504         */
 505        rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
 506        if (rc)
 507                debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
 508                                    "failed with rc=%i\n", rc);
 509
 510        if (sfb_has_pending_allocs(sfb, hwc))
 511                debug_sprintf_event(sfdbg, 5, "sfb: extend: "
 512                                    "req=%lu alloc=%lu remaining=%lu\n",
 513                                    num, sfb->num_sdb - num_old,
 514                                    sfb_pending_allocs(sfb, hwc));
 515}
 516
 517
 518/* Number of perf events counting hardware events */
 519static atomic_t num_events;
 520/* Used to avoid races in calling reserve/release_cpumf_hardware */
 521static DEFINE_MUTEX(pmc_reserve_mutex);
 522
 523#define PMC_INIT      0
 524#define PMC_RELEASE   1
 525#define PMC_FAILURE   2
 526static void setup_pmc_cpu(void *flags)
 527{
 528        int err;
 529        struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
 530
 531        err = 0;
 532        switch (*((int *) flags)) {
 533        case PMC_INIT:
 534                memset(cpusf, 0, sizeof(*cpusf));
 535                err = qsi(&cpusf->qsi);
 536                if (err)
 537                        break;
 538                cpusf->flags |= PMU_F_RESERVED;
 539                err = sf_disable();
 540                if (err)
 541                        pr_err("Switching off the sampling facility failed "
 542                               "with rc=%i\n", err);
 543                debug_sprintf_event(sfdbg, 5,
 544                                    "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
 545                break;
 546        case PMC_RELEASE:
 547                cpusf->flags &= ~PMU_F_RESERVED;
 548                err = sf_disable();
 549                if (err) {
 550                        pr_err("Switching off the sampling facility failed "
 551                               "with rc=%i\n", err);
 552                } else
 553                        deallocate_buffers(cpusf);
 554                debug_sprintf_event(sfdbg, 5,
 555                                    "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
 556                break;
 557        }
 558        if (err)
 559                *((int *) flags) |= PMC_FAILURE;
 560}
 561
 562static void release_pmc_hardware(void)
 563{
 564        int flags = PMC_RELEASE;
 565
 566        irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 567        on_each_cpu(setup_pmc_cpu, &flags, 1);
 568}
 569
 570static int reserve_pmc_hardware(void)
 571{
 572        int flags = PMC_INIT;
 573
 574        on_each_cpu(setup_pmc_cpu, &flags, 1);
 575        if (flags & PMC_FAILURE) {
 576                release_pmc_hardware();
 577                return -ENODEV;
 578        }
 579        irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
 580
 581        return 0;
 582}
 583
 584static void hw_perf_event_destroy(struct perf_event *event)
 585{
 586        /* Release PMC if this is the last perf event */
 587        if (!atomic_add_unless(&num_events, -1, 1)) {
 588                mutex_lock(&pmc_reserve_mutex);
 589                if (atomic_dec_return(&num_events) == 0)
 590                        release_pmc_hardware();
 591                mutex_unlock(&pmc_reserve_mutex);
 592        }
 593}
 594
 595static void hw_init_period(struct hw_perf_event *hwc, u64 period)
 596{
 597        hwc->sample_period = period;
 598        hwc->last_period = hwc->sample_period;
 599        local64_set(&hwc->period_left, hwc->sample_period);
 600}
 601
 602static void hw_reset_registers(struct hw_perf_event *hwc,
 603                               unsigned long *sdbt_origin)
 604{
 605        /* (Re)set to first sample-data-block-table */
 606        TEAR_REG(hwc) = (unsigned long) sdbt_origin;
 607}
 608
 609static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
 610                                   unsigned long rate)
 611{
 612        return clamp_t(unsigned long, rate,
 613                       si->min_sampl_rate, si->max_sampl_rate);
 614}
 615
 616static u32 cpumsf_pid_type(struct perf_event *event,
 617                           u32 pid, enum pid_type type)
 618{
 619        struct task_struct *tsk;
 620
 621        /* Idle process */
 622        if (!pid)
 623                goto out;
 624
 625        tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 626        pid = -1;
 627        if (tsk) {
 628                /*
 629                 * Only top level events contain the pid namespace in which
 630                 * they are created.
 631                 */
 632                if (event->parent)
 633                        event = event->parent;
 634                pid = __task_pid_nr_ns(tsk, type, event->ns);
 635                /*
 636                 * See also 1d953111b648
 637                 * "perf/core: Don't report zero PIDs for exiting tasks".
 638                 */
 639                if (!pid && !pid_alive(tsk))
 640                        pid = -1;
 641        }
 642out:
 643        return pid;
 644}
 645
 646static void cpumsf_output_event_pid(struct perf_event *event,
 647                                    struct perf_sample_data *data,
 648                                    struct pt_regs *regs)
 649{
 650        u32 pid;
 651        struct perf_event_header header;
 652        struct perf_output_handle handle;
 653
 654        /*
 655         * Obtain the PID from the basic-sampling data entry and
 656         * correct the data->tid_entry.pid value.
 657         */
 658        pid = data->tid_entry.pid;
 659
 660        /* Protect callchain buffers, tasks */
 661        rcu_read_lock();
 662
 663        perf_prepare_sample(&header, data, event, regs);
 664        if (perf_output_begin(&handle, data, event, header.size))
 665                goto out;
 666
 667        /* Update the process ID (see also kernel/events/core.c) */
 668        data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID);
 669        data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID);
 670
 671        perf_output_sample(&handle, &header, data, event);
 672        perf_output_end(&handle);
 673out:
 674        rcu_read_unlock();
 675}
 676
 677static int __hw_perf_event_init(struct perf_event *event)
 678{
 679        struct cpu_hw_sf *cpuhw;
 680        struct hws_qsi_info_block si;
 681        struct perf_event_attr *attr = &event->attr;
 682        struct hw_perf_event *hwc = &event->hw;
 683        unsigned long rate;
 684        int cpu, err;
 685
 686        /* Reserve CPU-measurement sampling facility */
 687        err = 0;
 688        if (!atomic_inc_not_zero(&num_events)) {
 689                mutex_lock(&pmc_reserve_mutex);
 690                if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
 691                        err = -EBUSY;
 692                else
 693                        atomic_inc(&num_events);
 694                mutex_unlock(&pmc_reserve_mutex);
 695        }
 696        event->destroy = hw_perf_event_destroy;
 697
 698        if (err)
 699                goto out;
 700
 701        /* Access per-CPU sampling information (query sampling info) */
 702        /*
 703         * The event->cpu value can be -1 to count on every CPU, for example,
 704         * when attaching to a task.  If this is specified, use the query
 705         * sampling info from the current CPU, otherwise use event->cpu to
 706         * retrieve the per-CPU information.
 707         * Later, cpuhw indicates whether to allocate sampling buffers for a
 708         * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL).
 709         */
 710        memset(&si, 0, sizeof(si));
 711        cpuhw = NULL;
 712        if (event->cpu == -1)
 713                qsi(&si);
 714        else {
 715                /* Event is pinned to a particular CPU, retrieve the per-CPU
 716                 * sampling structure for accessing the CPU-specific QSI.
 717                 */
 718                cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
 719                si = cpuhw->qsi;
 720        }
 721
 722        /* Check sampling facility authorization and, if not authorized,
 723         * fall back to other PMUs.  It is safe to check any CPU because
 724         * the authorization is identical for all configured CPUs.
 725         */
 726        if (!si.as) {
 727                err = -ENOENT;
 728                goto out;
 729        }
 730
 731        if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
 732                pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
 733                err = -EBUSY;
 734                goto out;
 735        }
 736
 737        /* Always enable basic sampling */
 738        SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
 739
 740        /* Check if diagnostic sampling is requested.  Deny if the required
 741         * sampling authorization is missing.
 742         */
 743        if (attr->config == PERF_EVENT_CPUM_SF_DIAG) {
 744                if (!si.ad) {
 745                        err = -EPERM;
 746                        goto out;
 747                }
 748                SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
 749        }
 750
 751        /* Check and set other sampling flags */
 752        if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
 753                SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
 754
 755        /* The sampling information (si) contains information about the
 756         * min/max sampling intervals and the CPU speed.  So calculate the
 757         * correct sampling interval and avoid the whole period adjust
 758         * feedback loop.
 759         */
 760        rate = 0;
 761        if (attr->freq) {
 762                if (!attr->sample_freq) {
 763                        err = -EINVAL;
 764                        goto out;
 765                }
 766                rate = freq_to_sample_rate(&si, attr->sample_freq);
 767                rate = hw_limit_rate(&si, rate);
 768                attr->freq = 0;
 769                attr->sample_period = rate;
 770        } else {
 771                /* The min/max sampling rates specifies the valid range
 772                 * of sample periods.  If the specified sample period is
 773                 * out of range, limit the period to the range boundary.
 774                 */
 775                rate = hw_limit_rate(&si, hwc->sample_period);
 776
 777                /* The perf core maintains a maximum sample rate that is
 778                 * configurable through the sysctl interface.  Ensure the
 779                 * sampling rate does not exceed this value.  This also helps
 780                 * to avoid throttling when pushing samples with
 781                 * perf_event_overflow().
 782                 */
 783                if (sample_rate_to_freq(&si, rate) >
 784                      sysctl_perf_event_sample_rate) {
 785                        err = -EINVAL;
 786                        debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n");
 787                        goto out;
 788                }
 789        }
 790        SAMPL_RATE(hwc) = rate;
 791        hw_init_period(hwc, SAMPL_RATE(hwc));
 792
 793        /* Initialize sample data overflow accounting */
 794        hwc->extra_reg.reg = REG_OVERFLOW;
 795        OVERFLOW_REG(hwc) = 0;
 796
 797        /* Use AUX buffer. No need to allocate it by ourself */
 798        if (attr->config == PERF_EVENT_CPUM_SF_DIAG)
 799                return 0;
 800
 801        /* Allocate the per-CPU sampling buffer using the CPU information
 802         * from the event.  If the event is not pinned to a particular
 803         * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
 804         * buffers for each online CPU.
 805         */
 806        if (cpuhw)
 807                /* Event is pinned to a particular CPU */
 808                err = allocate_buffers(cpuhw, hwc);
 809        else {
 810                /* Event is not pinned, allocate sampling buffer on
 811                 * each online CPU
 812                 */
 813                for_each_online_cpu(cpu) {
 814                        cpuhw = &per_cpu(cpu_hw_sf, cpu);
 815                        err = allocate_buffers(cpuhw, hwc);
 816                        if (err)
 817                                break;
 818                }
 819        }
 820
 821        /* If PID/TID sampling is active, replace the default overflow
 822         * handler to extract and resolve the PIDs from the basic-sampling
 823         * data entries.
 824         */
 825        if (event->attr.sample_type & PERF_SAMPLE_TID)
 826                if (is_default_overflow_handler(event))
 827                        event->overflow_handler = cpumsf_output_event_pid;
 828out:
 829        return err;
 830}
 831
 832static int cpumsf_pmu_event_init(struct perf_event *event)
 833{
 834        int err;
 835
 836        /* No support for taken branch sampling */
 837        if (has_branch_stack(event))
 838                return -EOPNOTSUPP;
 839
 840        switch (event->attr.type) {
 841        case PERF_TYPE_RAW:
 842                if ((event->attr.config != PERF_EVENT_CPUM_SF) &&
 843                    (event->attr.config != PERF_EVENT_CPUM_SF_DIAG))
 844                        return -ENOENT;
 845                break;
 846        case PERF_TYPE_HARDWARE:
 847                /* Support sampling of CPU cycles in addition to the
 848                 * counter facility.  However, the counter facility
 849                 * is more precise and, hence, restrict this PMU to
 850                 * sampling events only.
 851                 */
 852                if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES)
 853                        return -ENOENT;
 854                if (!is_sampling_event(event))
 855                        return -ENOENT;
 856                break;
 857        default:
 858                return -ENOENT;
 859        }
 860
 861        /* Check online status of the CPU to which the event is pinned */
 862        if (event->cpu >= 0 && !cpu_online(event->cpu))
 863                        return -ENODEV;
 864
 865        /* Force reset of idle/hv excludes regardless of what the
 866         * user requested.
 867         */
 868        if (event->attr.exclude_hv)
 869                event->attr.exclude_hv = 0;
 870        if (event->attr.exclude_idle)
 871                event->attr.exclude_idle = 0;
 872
 873        err = __hw_perf_event_init(event);
 874        if (unlikely(err))
 875                if (event->destroy)
 876                        event->destroy(event);
 877        return err;
 878}
 879
 880static void cpumsf_pmu_enable(struct pmu *pmu)
 881{
 882        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
 883        struct hw_perf_event *hwc;
 884        int err;
 885
 886        if (cpuhw->flags & PMU_F_ENABLED)
 887                return;
 888
 889        if (cpuhw->flags & PMU_F_ERR_MASK)
 890                return;
 891
 892        /* Check whether to extent the sampling buffer.
 893         *
 894         * Two conditions trigger an increase of the sampling buffer for a
 895         * perf event:
 896         *    1. Postponed buffer allocations from the event initialization.
 897         *    2. Sampling overflows that contribute to pending allocations.
 898         *
 899         * Note that the extend_sampling_buffer() function disables the sampling
 900         * facility, but it can be fully re-enabled using sampling controls that
 901         * have been saved in cpumsf_pmu_disable().
 902         */
 903        if (cpuhw->event) {
 904                hwc = &cpuhw->event->hw;
 905                if (!(SAMPL_DIAG_MODE(hwc))) {
 906                        /*
 907                         * Account number of overflow-designated
 908                         * buffer extents
 909                         */
 910                        sfb_account_overflows(cpuhw, hwc);
 911                        if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
 912                                extend_sampling_buffer(&cpuhw->sfb, hwc);
 913                }
 914        }
 915
 916        /* (Re)enable the PMU and sampling facility */
 917        cpuhw->flags |= PMU_F_ENABLED;
 918        barrier();
 919
 920        err = lsctl(&cpuhw->lsctl);
 921        if (err) {
 922                cpuhw->flags &= ~PMU_F_ENABLED;
 923                pr_err("Loading sampling controls failed: op=%i err=%i\n",
 924                        1, err);
 925                return;
 926        }
 927
 928        /* Load current program parameter */
 929        lpp(&S390_lowcore.lpp);
 930
 931        debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
 932                            "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
 933                            cpuhw->lsctl.ed, cpuhw->lsctl.cd,
 934                            (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear);
 935}
 936
 937static void cpumsf_pmu_disable(struct pmu *pmu)
 938{
 939        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
 940        struct hws_lsctl_request_block inactive;
 941        struct hws_qsi_info_block si;
 942        int err;
 943
 944        if (!(cpuhw->flags & PMU_F_ENABLED))
 945                return;
 946
 947        if (cpuhw->flags & PMU_F_ERR_MASK)
 948                return;
 949
 950        /* Switch off sampling activation control */
 951        inactive = cpuhw->lsctl;
 952        inactive.cs = 0;
 953        inactive.cd = 0;
 954
 955        err = lsctl(&inactive);
 956        if (err) {
 957                pr_err("Loading sampling controls failed: op=%i err=%i\n",
 958                        2, err);
 959                return;
 960        }
 961
 962        /* Save state of TEAR and DEAR register contents */
 963        if (!qsi(&si)) {
 964                /* TEAR/DEAR values are valid only if the sampling facility is
 965                 * enabled.  Note that cpumsf_pmu_disable() might be called even
 966                 * for a disabled sampling facility because cpumsf_pmu_enable()
 967                 * controls the enable/disable state.
 968                 */
 969                if (si.es) {
 970                        cpuhw->lsctl.tear = si.tear;
 971                        cpuhw->lsctl.dear = si.dear;
 972                }
 973        } else
 974                debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
 975                                    "qsi() failed with err=%i\n", err);
 976
 977        cpuhw->flags &= ~PMU_F_ENABLED;
 978}
 979
 980/* perf_exclude_event() - Filter event
 981 * @event:      The perf event
 982 * @regs:       pt_regs structure
 983 * @sde_regs:   Sample-data-entry (sde) regs structure
 984 *
 985 * Filter perf events according to their exclude specification.
 986 *
 987 * Return non-zero if the event shall be excluded.
 988 */
 989static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
 990                              struct perf_sf_sde_regs *sde_regs)
 991{
 992        if (event->attr.exclude_user && user_mode(regs))
 993                return 1;
 994        if (event->attr.exclude_kernel && !user_mode(regs))
 995                return 1;
 996        if (event->attr.exclude_guest && sde_regs->in_guest)
 997                return 1;
 998        if (event->attr.exclude_host && !sde_regs->in_guest)
 999                return 1;
1000        return 0;
1001}
1002
1003/* perf_push_sample() - Push samples to perf
1004 * @event:      The perf event
1005 * @sample:     Hardware sample data
1006 *
1007 * Use the hardware sample data to create perf event sample.  The sample
1008 * is the pushed to the event subsystem and the function checks for
1009 * possible event overflows.  If an event overflow occurs, the PMU is
1010 * stopped.
1011 *
1012 * Return non-zero if an event overflow occurred.
1013 */
1014static int perf_push_sample(struct perf_event *event,
1015                            struct hws_basic_entry *basic)
1016{
1017        int overflow;
1018        struct pt_regs regs;
1019        struct perf_sf_sde_regs *sde_regs;
1020        struct perf_sample_data data;
1021
1022        /* Setup perf sample */
1023        perf_sample_data_init(&data, 0, event->hw.last_period);
1024
1025        /* Setup pt_regs to look like an CPU-measurement external interrupt
1026         * using the Program Request Alert code.  The regs.int_parm_long
1027         * field which is unused contains additional sample-data-entry related
1028         * indicators.
1029         */
1030        memset(&regs, 0, sizeof(regs));
1031        regs.int_code = 0x1407;
1032        regs.int_parm = CPU_MF_INT_SF_PRA;
1033        sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
1034
1035        psw_bits(regs.psw).ia   = basic->ia;
1036        psw_bits(regs.psw).dat  = basic->T;
1037        psw_bits(regs.psw).wait = basic->W;
1038        psw_bits(regs.psw).pstate = basic->P;
1039        psw_bits(regs.psw).as   = basic->AS;
1040
1041        /*
1042         * Use the hardware provided configuration level to decide if the
1043         * sample belongs to a guest or host. If that is not available,
1044         * fall back to the following heuristics:
1045         * A non-zero guest program parameter always indicates a guest
1046         * sample. Some early samples or samples from guests without
1047         * lpp usage would be misaccounted to the host. We use the asn
1048         * value as an addon heuristic to detect most of these guest samples.
1049         * If the value differs from 0xffff (the host value), we assume to
1050         * be a KVM guest.
1051         */
1052        switch (basic->CL) {
1053        case 1: /* logical partition */
1054                sde_regs->in_guest = 0;
1055                break;
1056        case 2: /* virtual machine */
1057                sde_regs->in_guest = 1;
1058                break;
1059        default: /* old machine, use heuristics */
1060                if (basic->gpp || basic->prim_asn != 0xffff)
1061                        sde_regs->in_guest = 1;
1062                break;
1063        }
1064
1065        /*
1066         * Store the PID value from the sample-data-entry to be
1067         * processed and resolved by cpumsf_output_event_pid().
1068         */
1069        data.tid_entry.pid = basic->hpp & LPP_PID_MASK;
1070
1071        overflow = 0;
1072        if (perf_exclude_event(event, &regs, sde_regs))
1073                goto out;
1074        if (perf_event_overflow(event, &data, &regs)) {
1075                overflow = 1;
1076                event->pmu->stop(event, 0);
1077        }
1078        perf_event_update_userpage(event);
1079out:
1080        return overflow;
1081}
1082
1083static void perf_event_count_update(struct perf_event *event, u64 count)
1084{
1085        local64_add(count, &event->count);
1086}
1087
1088static void debug_sample_entry(struct hws_basic_entry *sample,
1089                               struct hws_trailer_entry *te)
1090{
1091        debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
1092                            "sampling data entry: te->f=%i basic.def=%04x (%p)\n",
1093                            te->f, sample->def, sample);
1094}
1095
1096/* hw_collect_samples() - Walk through a sample-data-block and collect samples
1097 * @event:      The perf event
1098 * @sdbt:       Sample-data-block table
1099 * @overflow:   Event overflow counter
1100 *
1101 * Walks through a sample-data-block and collects sampling data entries that are
1102 * then pushed to the perf event subsystem.  Depending on the sampling function,
1103 * there can be either basic-sampling or combined-sampling data entries.  A
1104 * combined-sampling data entry consists of a basic- and a diagnostic-sampling
1105 * data entry.  The sampling function is determined by the flags in the perf
1106 * event hardware structure.  The function always works with a combined-sampling
1107 * data entry but ignores the the diagnostic portion if it is not available.
1108 *
1109 * Note that the implementation focuses on basic-sampling data entries and, if
1110 * such an entry is not valid, the entire combined-sampling data entry is
1111 * ignored.
1112 *
1113 * The overflow variables counts the number of samples that has been discarded
1114 * due to a perf event overflow.
1115 */
1116static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
1117                               unsigned long long *overflow)
1118{
1119        struct hws_trailer_entry *te;
1120        struct hws_basic_entry *sample;
1121
1122        te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
1123        sample = (struct hws_basic_entry *) *sdbt;
1124        while ((unsigned long *) sample < (unsigned long *) te) {
1125                /* Check for an empty sample */
1126                if (!sample->def)
1127                        break;
1128
1129                /* Update perf event period */
1130                perf_event_count_update(event, SAMPL_RATE(&event->hw));
1131
1132                /* Check whether sample is valid */
1133                if (sample->def == 0x0001) {
1134                        /* If an event overflow occurred, the PMU is stopped to
1135                         * throttle event delivery.  Remaining sample data is
1136                         * discarded.
1137                         */
1138                        if (!*overflow) {
1139                                /* Check whether sample is consistent */
1140                                if (sample->I == 0 && sample->W == 0) {
1141                                        /* Deliver sample data to perf */
1142                                        *overflow = perf_push_sample(event,
1143                                                                     sample);
1144                                }
1145                        } else
1146                                /* Count discarded samples */
1147                                *overflow += 1;
1148                } else {
1149                        debug_sample_entry(sample, te);
1150                        /* Sample slot is not yet written or other record.
1151                         *
1152                         * This condition can occur if the buffer was reused
1153                         * from a combined basic- and diagnostic-sampling.
1154                         * If only basic-sampling is then active, entries are
1155                         * written into the larger diagnostic entries.
1156                         * This is typically the case for sample-data-blocks
1157                         * that are not full.  Stop processing if the first
1158                         * invalid format was detected.
1159                         */
1160                        if (!te->f)
1161                                break;
1162                }
1163
1164                /* Reset sample slot and advance to next sample */
1165                sample->def = 0;
1166                sample++;
1167        }
1168}
1169
1170/* hw_perf_event_update() - Process sampling buffer
1171 * @event:      The perf event
1172 * @flush_all:  Flag to also flush partially filled sample-data-blocks
1173 *
1174 * Processes the sampling buffer and create perf event samples.
1175 * The sampling buffer position are retrieved and saved in the TEAR_REG
1176 * register of the specified perf event.
1177 *
1178 * Only full sample-data-blocks are processed.  Specify the flash_all flag
1179 * to also walk through partially filled sample-data-blocks.  It is ignored
1180 * if PERF_CPUM_SF_FULL_BLOCKS is set.  The PERF_CPUM_SF_FULL_BLOCKS flag
1181 * enforces the processing of full sample-data-blocks only (trailer entries
1182 * with the block-full-indicator bit set).
1183 */
1184static void hw_perf_event_update(struct perf_event *event, int flush_all)
1185{
1186        struct hw_perf_event *hwc = &event->hw;
1187        struct hws_trailer_entry *te;
1188        unsigned long *sdbt;
1189        unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
1190        int done;
1191
1192        /*
1193         * AUX buffer is used when in diagnostic sampling mode.
1194         * No perf events/samples are created.
1195         */
1196        if (SAMPL_DIAG_MODE(&event->hw))
1197                return;
1198
1199        if (flush_all && SDB_FULL_BLOCKS(hwc))
1200                flush_all = 0;
1201
1202        sdbt = (unsigned long *) TEAR_REG(hwc);
1203        done = event_overflow = sampl_overflow = num_sdb = 0;
1204        while (!done) {
1205                /* Get the trailer entry of the sample-data-block */
1206                te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
1207
1208                /* Leave loop if no more work to do (block full indicator) */
1209                if (!te->f) {
1210                        done = 1;
1211                        if (!flush_all)
1212                                break;
1213                }
1214
1215                /* Check the sample overflow count */
1216                if (te->overflow)
1217                        /* Account sample overflows and, if a particular limit
1218                         * is reached, extend the sampling buffer.
1219                         * For details, see sfb_account_overflows().
1220                         */
1221                        sampl_overflow += te->overflow;
1222
1223                /* Timestamps are valid for full sample-data-blocks only */
1224                debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
1225                                    "overflow=%llu timestamp=0x%llx\n",
1226                                    sdbt, te->overflow,
1227                                    (te->f) ? trailer_timestamp(te) : 0ULL);
1228
1229                /* Collect all samples from a single sample-data-block and
1230                 * flag if an (perf) event overflow happened.  If so, the PMU
1231                 * is stopped and remaining samples will be discarded.
1232                 */
1233                hw_collect_samples(event, sdbt, &event_overflow);
1234                num_sdb++;
1235
1236                /* Reset trailer (using compare-double-and-swap) */
1237                do {
1238                        te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
1239                        te_flags |= SDB_TE_ALERT_REQ_MASK;
1240                } while (!cmpxchg_double(&te->flags, &te->overflow,
1241                                         te->flags, te->overflow,
1242                                         te_flags, 0ULL));
1243
1244                /* Advance to next sample-data-block */
1245                sdbt++;
1246                if (is_link_entry(sdbt))
1247                        sdbt = get_next_sdbt(sdbt);
1248
1249                /* Update event hardware registers */
1250                TEAR_REG(hwc) = (unsigned long) sdbt;
1251
1252                /* Stop processing sample-data if all samples of the current
1253                 * sample-data-block were flushed even if it was not full.
1254                 */
1255                if (flush_all && done)
1256                        break;
1257
1258                /* If an event overflow happened, discard samples by
1259                 * processing any remaining sample-data-blocks.
1260                 */
1261                if (event_overflow)
1262                        flush_all = 1;
1263        }
1264
1265        /* Account sample overflows in the event hardware structure */
1266        if (sampl_overflow)
1267                OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
1268                                                 sampl_overflow, 1 + num_sdb);
1269        if (sampl_overflow || event_overflow)
1270                debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
1271                                    "overflow stats: sample=%llu event=%llu\n",
1272                                    sampl_overflow, event_overflow);
1273}
1274
1275#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
1276#define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0)
1277#define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark)
1278#define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark)
1279
1280/*
1281 * Get trailer entry by index of SDB.
1282 */
1283static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux,
1284                                                 unsigned long index)
1285{
1286        unsigned long sdb;
1287
1288        index = AUX_SDB_INDEX(aux, index);
1289        sdb = aux->sdb_index[index];
1290        return (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
1291}
1292
1293/*
1294 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu
1295 * disabled. Collect the full SDBs in AUX buffer which have not reached
1296 * the point of alert indicator. And ignore the SDBs which are not
1297 * full.
1298 *
1299 * 1. Scan SDBs to see how much data is there and consume them.
1300 * 2. Remove alert indicator in the buffer.
1301 */
1302static void aux_output_end(struct perf_output_handle *handle)
1303{
1304        unsigned long i, range_scan, idx;
1305        struct aux_buffer *aux;
1306        struct hws_trailer_entry *te;
1307
1308        aux = perf_get_aux(handle);
1309        if (!aux)
1310                return;
1311
1312        range_scan = AUX_SDB_NUM_ALERT(aux);
1313        for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
1314                te = aux_sdb_trailer(aux, idx);
1315                if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
1316                        break;
1317        }
1318        /* i is num of SDBs which are full */
1319        perf_aux_output_end(handle, i << PAGE_SHIFT);
1320
1321        /* Remove alert indicators in the buffer */
1322        te = aux_sdb_trailer(aux, aux->alert_mark);
1323        te->flags &= ~SDB_TE_ALERT_REQ_MASK;
1324
1325        debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
1326}
1327
1328/*
1329 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event
1330 * is first added to the CPU or rescheduled again to the CPU. It is called
1331 * with pmu disabled.
1332 *
1333 * 1. Reset the trailer of SDBs to get ready for new data.
1334 * 2. Tell the hardware where to put the data by reset the SDBs buffer
1335 *    head(tear/dear).
1336 */
1337static int aux_output_begin(struct perf_output_handle *handle,
1338                            struct aux_buffer *aux,
1339                            struct cpu_hw_sf *cpuhw)
1340{
1341        unsigned long range;
1342        unsigned long i, range_scan, idx;
1343        unsigned long head, base, offset;
1344        struct hws_trailer_entry *te;
1345
1346        if (WARN_ON_ONCE(handle->head & ~PAGE_MASK))
1347                return -EINVAL;
1348
1349        aux->head = handle->head >> PAGE_SHIFT;
1350        range = (handle->size + 1) >> PAGE_SHIFT;
1351        if (range <= 1)
1352                return -ENOMEM;
1353
1354        /*
1355         * SDBs between aux->head and aux->empty_mark are already ready
1356         * for new data. range_scan is num of SDBs not within them.
1357         */
1358        if (range > AUX_SDB_NUM_EMPTY(aux)) {
1359                range_scan = range - AUX_SDB_NUM_EMPTY(aux);
1360                idx = aux->empty_mark + 1;
1361                for (i = 0; i < range_scan; i++, idx++) {
1362                        te = aux_sdb_trailer(aux, idx);
1363                        te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
1364                        te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
1365                        te->overflow = 0;
1366                }
1367                /* Save the position of empty SDBs */
1368                aux->empty_mark = aux->head + range - 1;
1369        }
1370
1371        /* Set alert indicator */
1372        aux->alert_mark = aux->head + range/2 - 1;
1373        te = aux_sdb_trailer(aux, aux->alert_mark);
1374        te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
1375
1376        /* Reset hardware buffer head */
1377        head = AUX_SDB_INDEX(aux, aux->head);
1378        base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE];
1379        offset = head % CPUM_SF_SDB_PER_TABLE;
1380        cpuhw->lsctl.tear = base + offset * sizeof(unsigned long);
1381        cpuhw->lsctl.dear = aux->sdb_index[head];
1382
1383        debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
1384                            "head->alert_mark->empty_mark (num_alert, range)"
1385                            "[%lx -> %lx -> %lx] (%lx, %lx) "
1386                            "tear index %lx, tear %lx dear %lx\n",
1387                            aux->head, aux->alert_mark, aux->empty_mark,
1388                            AUX_SDB_NUM_ALERT(aux), range,
1389                            head / CPUM_SF_SDB_PER_TABLE,
1390                            cpuhw->lsctl.tear,
1391                            cpuhw->lsctl.dear);
1392
1393        return 0;
1394}
1395
1396/*
1397 * Set alert indicator on SDB at index @alert_index while sampler is running.
1398 *
1399 * Return true if successfully.
1400 * Return false if full indicator is already set by hardware sampler.
1401 */
1402static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
1403                          unsigned long long *overflow)
1404{
1405        unsigned long long orig_overflow, orig_flags, new_flags;
1406        struct hws_trailer_entry *te;
1407
1408        te = aux_sdb_trailer(aux, alert_index);
1409        do {
1410                orig_flags = te->flags;
1411                orig_overflow = te->overflow;
1412                *overflow = orig_overflow;
1413                if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
1414                        /*
1415                         * SDB is already set by hardware.
1416                         * Abort and try to set somewhere
1417                         * behind.
1418                         */
1419                        return false;
1420                }
1421                new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
1422        } while (!cmpxchg_double(&te->flags, &te->overflow,
1423                                 orig_flags, orig_overflow,
1424                                 new_flags, 0ULL));
1425        return true;
1426}
1427
1428/*
1429 * aux_reset_buffer() - Scan and setup SDBs for new samples
1430 * @aux:        The AUX buffer to set
1431 * @range:      The range of SDBs to scan started from aux->head
1432 * @overflow:   Set to overflow count
1433 *
1434 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is
1435 * marked as empty, check if it is already set full by the hardware sampler.
1436 * If yes, that means new data is already there before we can set an alert
1437 * indicator. Caller should try to set alert indicator to some position behind.
1438 *
1439 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used
1440 * previously and have already been consumed by user space. Reset these SDBs
1441 * (clear full indicator and alert indicator) for new data.
1442 * If aux->alert_mark fall in this area, just set it. Overflow count is
1443 * recorded while scanning.
1444 *
1445 * SDBs between aux->head and aux->empty_mark are already reset at last time.
1446 * and ready for new samples. So scanning on this area could be skipped.
1447 *
1448 * Return true if alert indicator is set successfully and false if not.
1449 */
1450static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
1451                             unsigned long long *overflow)
1452{
1453        unsigned long long orig_overflow, orig_flags, new_flags;
1454        unsigned long i, range_scan, idx;
1455        struct hws_trailer_entry *te;
1456
1457        if (range <= AUX_SDB_NUM_EMPTY(aux))
1458                /*
1459                 * No need to scan. All SDBs in range are marked as empty.
1460                 * Just set alert indicator. Should check race with hardware
1461                 * sampler.
1462                 */
1463                return aux_set_alert(aux, aux->alert_mark, overflow);
1464
1465        if (aux->alert_mark <= aux->empty_mark)
1466                /*
1467                 * Set alert indicator on empty SDB. Should check race
1468                 * with hardware sampler.
1469                 */
1470                if (!aux_set_alert(aux, aux->alert_mark, overflow))
1471                        return false;
1472
1473        /*
1474         * Scan the SDBs to clear full and alert indicator used previously.
1475         * Start scanning from one SDB behind empty_mark. If the new alert
1476         * indicator fall into this range, set it.
1477         */
1478        range_scan = range - AUX_SDB_NUM_EMPTY(aux);
1479        idx = aux->empty_mark + 1;
1480        for (i = 0; i < range_scan; i++, idx++) {
1481                te = aux_sdb_trailer(aux, idx);
1482                do {
1483                        orig_flags = te->flags;
1484                        orig_overflow = te->overflow;
1485                        new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
1486                        if (idx == aux->alert_mark)
1487                                new_flags |= SDB_TE_ALERT_REQ_MASK;
1488                        else
1489                                new_flags &= ~SDB_TE_ALERT_REQ_MASK;
1490                } while (!cmpxchg_double(&te->flags, &te->overflow,
1491                                         orig_flags, orig_overflow,
1492                                         new_flags, 0ULL));
1493                *overflow += orig_overflow;
1494        }
1495
1496        /* Update empty_mark to new position */
1497        aux->empty_mark = aux->head + range - 1;
1498
1499        return true;
1500}
1501
1502/*
1503 * Measurement alert handler for diagnostic mode sampling.
1504 */
1505static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
1506{
1507        struct aux_buffer *aux;
1508        int done = 0;
1509        unsigned long range = 0, size;
1510        unsigned long long overflow = 0;
1511        struct perf_output_handle *handle = &cpuhw->handle;
1512        unsigned long num_sdb;
1513
1514        aux = perf_get_aux(handle);
1515        if (WARN_ON_ONCE(!aux))
1516                return;
1517
1518        /* Inform user space new data arrived */
1519        size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
1520        perf_aux_output_end(handle, size);
1521        num_sdb = aux->sfb.num_sdb;
1522
1523        while (!done) {
1524                /* Get an output handle */
1525                aux = perf_aux_output_begin(handle, cpuhw->event);
1526                if (handle->size == 0) {
1527                        pr_err("The AUX buffer with %lu pages for the "
1528                               "diagnostic-sampling mode is full\n",
1529                                num_sdb);
1530                        debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n");
1531                        break;
1532                }
1533                if (WARN_ON_ONCE(!aux))
1534                        return;
1535
1536                /* Update head and alert_mark to new position */
1537                aux->head = handle->head >> PAGE_SHIFT;
1538                range = (handle->size + 1) >> PAGE_SHIFT;
1539                if (range == 1)
1540                        aux->alert_mark = aux->head;
1541                else
1542                        aux->alert_mark = aux->head + range/2 - 1;
1543
1544                if (aux_reset_buffer(aux, range, &overflow)) {
1545                        if (!overflow) {
1546                                done = 1;
1547                                break;
1548                        }
1549                        size = range << PAGE_SHIFT;
1550                        perf_aux_output_end(&cpuhw->handle, size);
1551                        pr_err("Sample data caused the AUX buffer with %lu "
1552                               "pages to overflow\n", num_sdb);
1553                        debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
1554                                            "overflow %llx\n",
1555                                            aux->head, range, overflow);
1556                } else {
1557                        size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
1558                        perf_aux_output_end(&cpuhw->handle, size);
1559                        debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
1560                                            "already full, try another\n",
1561                                            aux->head, aux->alert_mark);
1562                }
1563        }
1564
1565        if (done)
1566                debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
1567                                    "[%lx -> %lx -> %lx] (%lx, %lx)\n",
1568                                    aux->head, aux->alert_mark, aux->empty_mark,
1569                                    AUX_SDB_NUM_ALERT(aux), range);
1570}
1571
1572/*
1573 * Callback when freeing AUX buffers.
1574 */
1575static void aux_buffer_free(void *data)
1576{
1577        struct aux_buffer *aux = data;
1578        unsigned long i, num_sdbt;
1579
1580        if (!aux)
1581                return;
1582
1583        /* Free SDBT. SDB is freed by the caller */
1584        num_sdbt = aux->sfb.num_sdbt;
1585        for (i = 0; i < num_sdbt; i++)
1586                free_page(aux->sdbt_index[i]);
1587
1588        kfree(aux->sdbt_index);
1589        kfree(aux->sdb_index);
1590        kfree(aux);
1591
1592        debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free "
1593                            "%lu SDBTs\n", num_sdbt);
1594}
1595
1596static void aux_sdb_init(unsigned long sdb)
1597{
1598        struct hws_trailer_entry *te;
1599
1600        te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
1601
1602        /* Save clock base */
1603        te->clock_base = 1;
1604        memcpy(&te->progusage2, &tod_clock_base[1], 8);
1605}
1606
1607/*
1608 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
1609 * @event:      Event the buffer is setup for, event->cpu == -1 means current
1610 * @pages:      Array of pointers to buffer pages passed from perf core
1611 * @nr_pages:   Total pages
1612 * @snapshot:   Flag for snapshot mode
1613 *
1614 * This is the callback when setup an event using AUX buffer. Perf tool can
1615 * trigger this by an additional mmap() call on the event. Unlike the buffer
1616 * for basic samples, AUX buffer belongs to the event. It is scheduled with
1617 * the task among online cpus when it is a per-thread event.
1618 *
1619 * Return the private AUX buffer structure if success or NULL if fails.
1620 */
1621static void *aux_buffer_setup(struct perf_event *event, void **pages,
1622                              int nr_pages, bool snapshot)
1623{
1624        struct sf_buffer *sfb;
1625        struct aux_buffer *aux;
1626        unsigned long *new, *tail;
1627        int i, n_sdbt;
1628
1629        if (!nr_pages || !pages)
1630                return NULL;
1631
1632        if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1633                pr_err("AUX buffer size (%i pages) is larger than the "
1634                       "maximum sampling buffer limit\n",
1635                       nr_pages);
1636                return NULL;
1637        } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) {
1638                pr_err("AUX buffer size (%i pages) is less than the "
1639                       "minimum sampling buffer limit\n",
1640                       nr_pages);
1641                return NULL;
1642        }
1643
1644        /* Allocate aux_buffer struct for the event */
1645        aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
1646        if (!aux)
1647                goto no_aux;
1648        sfb = &aux->sfb;
1649
1650        /* Allocate sdbt_index for fast reference */
1651        n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE;
1652        aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL);
1653        if (!aux->sdbt_index)
1654                goto no_sdbt_index;
1655
1656        /* Allocate sdb_index for fast reference */
1657        aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL);
1658        if (!aux->sdb_index)
1659                goto no_sdb_index;
1660
1661        /* Allocate the first SDBT */
1662        sfb->num_sdbt = 0;
1663        sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
1664        if (!sfb->sdbt)
1665                goto no_sdbt;
1666        aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt;
1667        tail = sfb->tail = sfb->sdbt;
1668
1669        /*
1670         * Link the provided pages of AUX buffer to SDBT.
1671         * Allocate SDBT if needed.
1672         */
1673        for (i = 0; i < nr_pages; i++, tail++) {
1674                if (require_table_link(tail)) {
1675                        new = (unsigned long *) get_zeroed_page(GFP_KERNEL);
1676                        if (!new)
1677                                goto no_sdbt;
1678                        aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new;
1679                        /* Link current page to tail of chain */
1680                        *tail = (unsigned long)(void *) new + 1;
1681                        tail = new;
1682                }
1683                /* Tail is the entry in a SDBT */
1684                *tail = (unsigned long)pages[i];
1685                aux->sdb_index[i] = (unsigned long)pages[i];
1686                aux_sdb_init((unsigned long)pages[i]);
1687        }
1688        sfb->num_sdb = nr_pages;
1689
1690        /* Link the last entry in the SDBT to the first SDBT */
1691        *tail = (unsigned long) sfb->sdbt + 1;
1692        sfb->tail = tail;
1693
1694        /*
1695         * Initial all SDBs are zeroed. Mark it as empty.
1696         * So there is no need to clear the full indicator
1697         * when this event is first added.
1698         */
1699        aux->empty_mark = sfb->num_sdb - 1;
1700
1701        debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs"
1702                            " and %lu SDBs\n",
1703                            sfb->num_sdbt, sfb->num_sdb);
1704
1705        return aux;
1706
1707no_sdbt:
1708        /* SDBs (AUX buffer pages) are freed by caller */
1709        for (i = 0; i < sfb->num_sdbt; i++)
1710                free_page(aux->sdbt_index[i]);
1711        kfree(aux->sdb_index);
1712no_sdb_index:
1713        kfree(aux->sdbt_index);
1714no_sdbt_index:
1715        kfree(aux);
1716no_aux:
1717        return NULL;
1718}
1719
1720static void cpumsf_pmu_read(struct perf_event *event)
1721{
1722        /* Nothing to do ... updates are interrupt-driven */
1723}
1724
1725/* Activate sampling control.
1726 * Next call of pmu_enable() starts sampling.
1727 */
1728static void cpumsf_pmu_start(struct perf_event *event, int flags)
1729{
1730        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1731
1732        if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1733                return;
1734
1735        if (flags & PERF_EF_RELOAD)
1736                WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1737
1738        perf_pmu_disable(event->pmu);
1739        event->hw.state = 0;
1740        cpuhw->lsctl.cs = 1;
1741        if (SAMPL_DIAG_MODE(&event->hw))
1742                cpuhw->lsctl.cd = 1;
1743        perf_pmu_enable(event->pmu);
1744}
1745
1746/* Deactivate sampling control.
1747 * Next call of pmu_enable() stops sampling.
1748 */
1749static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1750{
1751        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1752
1753        if (event->hw.state & PERF_HES_STOPPED)
1754                return;
1755
1756        perf_pmu_disable(event->pmu);
1757        cpuhw->lsctl.cs = 0;
1758        cpuhw->lsctl.cd = 0;
1759        event->hw.state |= PERF_HES_STOPPED;
1760
1761        if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
1762                hw_perf_event_update(event, 1);
1763                event->hw.state |= PERF_HES_UPTODATE;
1764        }
1765        perf_pmu_enable(event->pmu);
1766}
1767
1768static int cpumsf_pmu_add(struct perf_event *event, int flags)
1769{
1770        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1771        struct aux_buffer *aux;
1772        int err;
1773
1774        if (cpuhw->flags & PMU_F_IN_USE)
1775                return -EAGAIN;
1776
1777        if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt)
1778                return -EINVAL;
1779
1780        err = 0;
1781        perf_pmu_disable(event->pmu);
1782
1783        event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1784
1785        /* Set up sampling controls.  Always program the sampling register
1786         * using the SDB-table start.  Reset TEAR_REG event hardware register
1787         * that is used by hw_perf_event_update() to store the sampling buffer
1788         * position after samples have been flushed.
1789         */
1790        cpuhw->lsctl.s = 0;
1791        cpuhw->lsctl.h = 1;
1792        cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
1793        if (!SAMPL_DIAG_MODE(&event->hw)) {
1794                cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
1795                cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
1796                hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
1797        }
1798
1799        /* Ensure sampling functions are in the disabled state.  If disabled,
1800         * switch on sampling enable control. */
1801        if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) {
1802                err = -EAGAIN;
1803                goto out;
1804        }
1805        if (SAMPL_DIAG_MODE(&event->hw)) {
1806                aux = perf_aux_output_begin(&cpuhw->handle, event);
1807                if (!aux) {
1808                        err = -EINVAL;
1809                        goto out;
1810                }
1811                err = aux_output_begin(&cpuhw->handle, aux, cpuhw);
1812                if (err)
1813                        goto out;
1814                cpuhw->lsctl.ed = 1;
1815        }
1816        cpuhw->lsctl.es = 1;
1817
1818        /* Set in_use flag and store event */
1819        cpuhw->event = event;
1820        cpuhw->flags |= PMU_F_IN_USE;
1821
1822        if (flags & PERF_EF_START)
1823                cpumsf_pmu_start(event, PERF_EF_RELOAD);
1824out:
1825        perf_event_update_userpage(event);
1826        perf_pmu_enable(event->pmu);
1827        return err;
1828}
1829
1830static void cpumsf_pmu_del(struct perf_event *event, int flags)
1831{
1832        struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
1833
1834        perf_pmu_disable(event->pmu);
1835        cpumsf_pmu_stop(event, PERF_EF_UPDATE);
1836
1837        cpuhw->lsctl.es = 0;
1838        cpuhw->lsctl.ed = 0;
1839        cpuhw->flags &= ~PMU_F_IN_USE;
1840        cpuhw->event = NULL;
1841
1842        if (SAMPL_DIAG_MODE(&event->hw))
1843                aux_output_end(&cpuhw->handle);
1844        perf_event_update_userpage(event);
1845        perf_pmu_enable(event->pmu);
1846}
1847
1848CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1849CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1850
1851static struct attribute *cpumsf_pmu_events_attr[] = {
1852        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1853        NULL,
1854        NULL,
1855};
1856
1857PMU_FORMAT_ATTR(event, "config:0-63");
1858
1859static struct attribute *cpumsf_pmu_format_attr[] = {
1860        &format_attr_event.attr,
1861        NULL,
1862};
1863
1864static struct attribute_group cpumsf_pmu_events_group = {
1865        .name = "events",
1866        .attrs = cpumsf_pmu_events_attr,
1867};
1868static struct attribute_group cpumsf_pmu_format_group = {
1869        .name = "format",
1870        .attrs = cpumsf_pmu_format_attr,
1871};
1872static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
1873        &cpumsf_pmu_events_group,
1874        &cpumsf_pmu_format_group,
1875        NULL,
1876};
1877
1878static struct pmu cpumf_sampling = {
1879        .pmu_enable   = cpumsf_pmu_enable,
1880        .pmu_disable  = cpumsf_pmu_disable,
1881
1882        .event_init   = cpumsf_pmu_event_init,
1883        .add          = cpumsf_pmu_add,
1884        .del          = cpumsf_pmu_del,
1885
1886        .start        = cpumsf_pmu_start,
1887        .stop         = cpumsf_pmu_stop,
1888        .read         = cpumsf_pmu_read,
1889
1890        .attr_groups  = cpumsf_pmu_attr_groups,
1891
1892        .setup_aux    = aux_buffer_setup,
1893        .free_aux     = aux_buffer_free,
1894};
1895
1896static void cpumf_measurement_alert(struct ext_code ext_code,
1897                                    unsigned int alert, unsigned long unused)
1898{
1899        struct cpu_hw_sf *cpuhw;
1900
1901        if (!(alert & CPU_MF_INT_SF_MASK))
1902                return;
1903        inc_irq_stat(IRQEXT_CMS);
1904        cpuhw = this_cpu_ptr(&cpu_hw_sf);
1905
1906        /* Measurement alerts are shared and might happen when the PMU
1907         * is not reserved.  Ignore these alerts in this case. */
1908        if (!(cpuhw->flags & PMU_F_RESERVED))
1909                return;
1910
1911        /* The processing below must take care of multiple alert events that
1912         * might be indicated concurrently. */
1913
1914        /* Program alert request */
1915        if (alert & CPU_MF_INT_SF_PRA) {
1916                if (cpuhw->flags & PMU_F_IN_USE)
1917                        if (SAMPL_DIAG_MODE(&cpuhw->event->hw))
1918                                hw_collect_aux(cpuhw);
1919                        else
1920                                hw_perf_event_update(cpuhw->event, 0);
1921                else
1922                        WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
1923        }
1924
1925        /* Report measurement alerts only for non-PRA codes */
1926        if (alert != CPU_MF_INT_SF_PRA)
1927                debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert);
1928
1929        /* Sampling authorization change request */
1930        if (alert & CPU_MF_INT_SF_SACA)
1931                qsi(&cpuhw->qsi);
1932
1933        /* Loss of sample data due to high-priority machine activities */
1934        if (alert & CPU_MF_INT_SF_LSDA) {
1935                pr_err("Sample data was lost\n");
1936                cpuhw->flags |= PMU_F_ERR_LSDA;
1937                sf_disable();
1938        }
1939
1940        /* Invalid sampling buffer entry */
1941        if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
1942                pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
1943                       alert);
1944                cpuhw->flags |= PMU_F_ERR_IBE;
1945                sf_disable();
1946        }
1947}
1948static int cpusf_pmu_setup(unsigned int cpu, int flags)
1949{
1950        /* Ignore the notification if no events are scheduled on the PMU.
1951         * This might be racy...
1952         */
1953        if (!atomic_read(&num_events))
1954                return 0;
1955
1956        local_irq_disable();
1957        setup_pmc_cpu(&flags);
1958        local_irq_enable();
1959        return 0;
1960}
1961
1962static int s390_pmu_sf_online_cpu(unsigned int cpu)
1963{
1964        return cpusf_pmu_setup(cpu, PMC_INIT);
1965}
1966
1967static int s390_pmu_sf_offline_cpu(unsigned int cpu)
1968{
1969        return cpusf_pmu_setup(cpu, PMC_RELEASE);
1970}
1971
1972static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
1973{
1974        if (!cpum_sf_avail())
1975                return -ENODEV;
1976        return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1977}
1978
1979static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
1980{
1981        int rc;
1982        unsigned long min, max;
1983
1984        if (!cpum_sf_avail())
1985                return -ENODEV;
1986        if (!val || !strlen(val))
1987                return -EINVAL;
1988
1989        /* Valid parameter values: "min,max" or "max" */
1990        min = CPUM_SF_MIN_SDB;
1991        max = CPUM_SF_MAX_SDB;
1992        if (strchr(val, ','))
1993                rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL;
1994        else
1995                rc = kstrtoul(val, 10, &max);
1996
1997        if (min < 2 || min >= max || max > get_num_physpages())
1998                rc = -EINVAL;
1999        if (rc)
2000                return rc;
2001
2002        sfb_set_limits(min, max);
2003        pr_info("The sampling buffer limits have changed to: "
2004                "min=%lu max=%lu (diag=x%lu)\n",
2005                CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
2006        return 0;
2007}
2008
2009#define param_check_sfb_size(name, p) __param_check(name, p, void)
2010static const struct kernel_param_ops param_ops_sfb_size = {
2011        .set = param_set_sfb_size,
2012        .get = param_get_sfb_size,
2013};
2014
2015#define RS_INIT_FAILURE_QSI       0x0001
2016#define RS_INIT_FAILURE_BSDES     0x0002
2017#define RS_INIT_FAILURE_ALRT      0x0003
2018#define RS_INIT_FAILURE_PERF      0x0004
2019static void __init pr_cpumsf_err(unsigned int reason)
2020{
2021        pr_err("Sampling facility support for perf is not available: "
2022               "reason=%04x\n", reason);
2023}
2024
2025static int __init init_cpum_sampling_pmu(void)
2026{
2027        struct hws_qsi_info_block si;
2028        int err;
2029
2030        if (!cpum_sf_avail())
2031                return -ENODEV;
2032
2033        memset(&si, 0, sizeof(si));
2034        if (qsi(&si)) {
2035                pr_cpumsf_err(RS_INIT_FAILURE_QSI);
2036                return -ENODEV;
2037        }
2038
2039        if (!si.as && !si.ad)
2040                return -ENODEV;
2041
2042        if (si.bsdes != sizeof(struct hws_basic_entry)) {
2043                pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
2044                return -EINVAL;
2045        }
2046
2047        if (si.ad) {
2048                sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
2049                cpumsf_pmu_events_attr[1] =
2050                        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
2051        }
2052
2053        sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
2054        if (!sfdbg)
2055                pr_err("Registering for s390dbf failed\n");
2056        debug_register_view(sfdbg, &debug_sprintf_view);
2057
2058        err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
2059                                    cpumf_measurement_alert);
2060        if (err) {
2061                pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
2062                goto out;
2063        }
2064
2065        err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
2066        if (err) {
2067                pr_cpumsf_err(RS_INIT_FAILURE_PERF);
2068                unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
2069                                        cpumf_measurement_alert);
2070                goto out;
2071        }
2072
2073        cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online",
2074                          s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
2075out:
2076        return err;
2077}
2078arch_initcall(init_cpum_sampling_pmu);
2079core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
2080