linux/tools/perf/util/evsel.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   3 *
   4 * Parts came from builtin-{top,stat,record}.c, see those files for further
   5 * copyright notes.
   6 *
   7 * Released under the GPL v2. (and only v2, not any later version)
   8 */
   9
  10#include <byteswap.h>
  11#include <linux/bitops.h>
  12#include "asm/bug.h"
  13#include <lk/debugfs.h>
  14#include "event-parse.h"
  15#include "evsel.h"
  16#include "evlist.h"
  17#include "util.h"
  18#include "cpumap.h"
  19#include "thread_map.h"
  20#include "target.h"
  21#include <linux/hw_breakpoint.h>
  22#include <linux/perf_event.h>
  23#include "perf_regs.h"
  24
  25static struct {
  26        bool sample_id_all;
  27        bool exclude_guest;
  28} perf_missing_features;
  29
  30#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  31
  32static int __perf_evsel__sample_size(u64 sample_type)
  33{
  34        u64 mask = sample_type & PERF_SAMPLE_MASK;
  35        int size = 0;
  36        int i;
  37
  38        for (i = 0; i < 64; i++) {
  39                if (mask & (1ULL << i))
  40                        size++;
  41        }
  42
  43        size *= sizeof(u64);
  44
  45        return size;
  46}
  47
  48void hists__init(struct hists *hists)
  49{
  50        memset(hists, 0, sizeof(*hists));
  51        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
  52        hists->entries_in = &hists->entries_in_array[0];
  53        hists->entries_collapsed = RB_ROOT;
  54        hists->entries = RB_ROOT;
  55        pthread_mutex_init(&hists->lock, NULL);
  56}
  57
  58void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
  59                                  enum perf_event_sample_format bit)
  60{
  61        if (!(evsel->attr.sample_type & bit)) {
  62                evsel->attr.sample_type |= bit;
  63                evsel->sample_size += sizeof(u64);
  64        }
  65}
  66
  67void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
  68                                    enum perf_event_sample_format bit)
  69{
  70        if (evsel->attr.sample_type & bit) {
  71                evsel->attr.sample_type &= ~bit;
  72                evsel->sample_size -= sizeof(u64);
  73        }
  74}
  75
  76void perf_evsel__set_sample_id(struct perf_evsel *evsel)
  77{
  78        perf_evsel__set_sample_bit(evsel, ID);
  79        evsel->attr.read_format |= PERF_FORMAT_ID;
  80}
  81
  82void perf_evsel__init(struct perf_evsel *evsel,
  83                      struct perf_event_attr *attr, int idx)
  84{
  85        evsel->idx         = idx;
  86        evsel->attr        = *attr;
  87        evsel->leader      = evsel;
  88        INIT_LIST_HEAD(&evsel->node);
  89        hists__init(&evsel->hists);
  90        evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
  91}
  92
  93struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
  94{
  95        struct perf_evsel *evsel = zalloc(sizeof(*evsel));
  96
  97        if (evsel != NULL)
  98                perf_evsel__init(evsel, attr, idx);
  99
 100        return evsel;
 101}
 102
 103struct event_format *event_format__new(const char *sys, const char *name)
 104{
 105        int fd, n;
 106        char *filename;
 107        void *bf = NULL, *nbf;
 108        size_t size = 0, alloc_size = 0;
 109        struct event_format *format = NULL;
 110
 111        if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
 112                goto out;
 113
 114        fd = open(filename, O_RDONLY);
 115        if (fd < 0)
 116                goto out_free_filename;
 117
 118        do {
 119                if (size == alloc_size) {
 120                        alloc_size += BUFSIZ;
 121                        nbf = realloc(bf, alloc_size);
 122                        if (nbf == NULL)
 123                                goto out_free_bf;
 124                        bf = nbf;
 125                }
 126
 127                n = read(fd, bf + size, alloc_size - size);
 128                if (n < 0)
 129                        goto out_free_bf;
 130                size += n;
 131        } while (n > 0);
 132
 133        pevent_parse_format(&format, bf, size, sys);
 134
 135out_free_bf:
 136        free(bf);
 137        close(fd);
 138out_free_filename:
 139        free(filename);
 140out:
 141        return format;
 142}
 143
 144struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
 145{
 146        struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 147
 148        if (evsel != NULL) {
 149                struct perf_event_attr attr = {
 150                        .type          = PERF_TYPE_TRACEPOINT,
 151                        .sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
 152                                          PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
 153                };
 154
 155                if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
 156                        goto out_free;
 157
 158                evsel->tp_format = event_format__new(sys, name);
 159                if (evsel->tp_format == NULL)
 160                        goto out_free;
 161
 162                event_attr_init(&attr);
 163                attr.config = evsel->tp_format->id;
 164                attr.sample_period = 1;
 165                perf_evsel__init(evsel, &attr, idx);
 166        }
 167
 168        return evsel;
 169
 170out_free:
 171        free(evsel->name);
 172        free(evsel);
 173        return NULL;
 174}
 175
 176const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
 177        "cycles",
 178        "instructions",
 179        "cache-references",
 180        "cache-misses",
 181        "branches",
 182        "branch-misses",
 183        "bus-cycles",
 184        "stalled-cycles-frontend",
 185        "stalled-cycles-backend",
 186        "ref-cycles",
 187};
 188
 189static const char *__perf_evsel__hw_name(u64 config)
 190{
 191        if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
 192                return perf_evsel__hw_names[config];
 193
 194        return "unknown-hardware";
 195}
 196
 197static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
 198{
 199        int colon = 0, r = 0;
 200        struct perf_event_attr *attr = &evsel->attr;
 201        bool exclude_guest_default = false;
 202
 203#define MOD_PRINT(context, mod) do {                                    \
 204                if (!attr->exclude_##context) {                         \
 205                        if (!colon) colon = ++r;                        \
 206                        r += scnprintf(bf + r, size - r, "%c", mod);    \
 207                } } while(0)
 208
 209        if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
 210                MOD_PRINT(kernel, 'k');
 211                MOD_PRINT(user, 'u');
 212                MOD_PRINT(hv, 'h');
 213                exclude_guest_default = true;
 214        }
 215
 216        if (attr->precise_ip) {
 217                if (!colon)
 218                        colon = ++r;
 219                r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
 220                exclude_guest_default = true;
 221        }
 222
 223        if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
 224                MOD_PRINT(host, 'H');
 225                MOD_PRINT(guest, 'G');
 226        }
 227#undef MOD_PRINT
 228        if (colon)
 229                bf[colon - 1] = ':';
 230        return r;
 231}
 232
 233static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
 234{
 235        int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
 236        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 237}
 238
 239const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
 240        "cpu-clock",
 241        "task-clock",
 242        "page-faults",
 243        "context-switches",
 244        "cpu-migrations",
 245        "minor-faults",
 246        "major-faults",
 247        "alignment-faults",
 248        "emulation-faults",
 249};
 250
 251static const char *__perf_evsel__sw_name(u64 config)
 252{
 253        if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
 254                return perf_evsel__sw_names[config];
 255        return "unknown-software";
 256}
 257
 258static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
 259{
 260        int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
 261        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 262}
 263
 264static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
 265{
 266        int r;
 267
 268        r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
 269
 270        if (type & HW_BREAKPOINT_R)
 271                r += scnprintf(bf + r, size - r, "r");
 272
 273        if (type & HW_BREAKPOINT_W)
 274                r += scnprintf(bf + r, size - r, "w");
 275
 276        if (type & HW_BREAKPOINT_X)
 277                r += scnprintf(bf + r, size - r, "x");
 278
 279        return r;
 280}
 281
 282static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
 283{
 284        struct perf_event_attr *attr = &evsel->attr;
 285        int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
 286        return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
 287}
 288
 289const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
 290                                [PERF_EVSEL__MAX_ALIASES] = {
 291 { "L1-dcache", "l1-d",         "l1d",          "L1-data",              },
 292 { "L1-icache", "l1-i",         "l1i",          "L1-instruction",       },
 293 { "LLC",       "L2",                                                   },
 294 { "dTLB",      "d-tlb",        "Data-TLB",                             },
 295 { "iTLB",      "i-tlb",        "Instruction-TLB",                      },
 296 { "branch",    "branches",     "bpu",          "btb",          "bpc",  },
 297 { "node",                                                              },
 298};
 299
 300const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
 301                                   [PERF_EVSEL__MAX_ALIASES] = {
 302 { "load",      "loads",        "read",                                 },
 303 { "store",     "stores",       "write",                                },
 304 { "prefetch",  "prefetches",   "speculative-read", "speculative-load", },
 305};
 306
 307const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
 308                                       [PERF_EVSEL__MAX_ALIASES] = {
 309 { "refs",      "Reference",    "ops",          "access",               },
 310 { "misses",    "miss",                                                 },
 311};
 312
 313#define C(x)            PERF_COUNT_HW_CACHE_##x
 314#define CACHE_READ      (1 << C(OP_READ))
 315#define CACHE_WRITE     (1 << C(OP_WRITE))
 316#define CACHE_PREFETCH  (1 << C(OP_PREFETCH))
 317#define COP(x)          (1 << x)
 318
 319/*
 320 * cache operartion stat
 321 * L1I : Read and prefetch only
 322 * ITLB and BPU : Read-only
 323 */
 324static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
 325 [C(L1D)]       = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 326 [C(L1I)]       = (CACHE_READ | CACHE_PREFETCH),
 327 [C(LL)]        = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 328 [C(DTLB)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 329 [C(ITLB)]      = (CACHE_READ),
 330 [C(BPU)]       = (CACHE_READ),
 331 [C(NODE)]      = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
 332};
 333
 334bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
 335{
 336        if (perf_evsel__hw_cache_stat[type] & COP(op))
 337                return true;    /* valid */
 338        else
 339                return false;   /* invalid */
 340}
 341
 342int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
 343                                            char *bf, size_t size)
 344{
 345        if (result) {
 346                return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
 347                                 perf_evsel__hw_cache_op[op][0],
 348                                 perf_evsel__hw_cache_result[result][0]);
 349        }
 350
 351        return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
 352                         perf_evsel__hw_cache_op[op][1]);
 353}
 354
 355static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
 356{
 357        u8 op, result, type = (config >>  0) & 0xff;
 358        const char *err = "unknown-ext-hardware-cache-type";
 359
 360        if (type > PERF_COUNT_HW_CACHE_MAX)
 361                goto out_err;
 362
 363        op = (config >>  8) & 0xff;
 364        err = "unknown-ext-hardware-cache-op";
 365        if (op > PERF_COUNT_HW_CACHE_OP_MAX)
 366                goto out_err;
 367
 368        result = (config >> 16) & 0xff;
 369        err = "unknown-ext-hardware-cache-result";
 370        if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
 371                goto out_err;
 372
 373        err = "invalid-cache";
 374        if (!perf_evsel__is_cache_op_valid(type, op))
 375                goto out_err;
 376
 377        return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
 378out_err:
 379        return scnprintf(bf, size, "%s", err);
 380}
 381
 382static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
 383{
 384        int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
 385        return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
 386}
 387
 388static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
 389{
 390        int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
 391        return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
 392}
 393
 394const char *perf_evsel__name(struct perf_evsel *evsel)
 395{
 396        char bf[128];
 397
 398        if (evsel->name)
 399                return evsel->name;
 400
 401        switch (evsel->attr.type) {
 402        case PERF_TYPE_RAW:
 403                perf_evsel__raw_name(evsel, bf, sizeof(bf));
 404                break;
 405
 406        case PERF_TYPE_HARDWARE:
 407                perf_evsel__hw_name(evsel, bf, sizeof(bf));
 408                break;
 409
 410        case PERF_TYPE_HW_CACHE:
 411                perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
 412                break;
 413
 414        case PERF_TYPE_SOFTWARE:
 415                perf_evsel__sw_name(evsel, bf, sizeof(bf));
 416                break;
 417
 418        case PERF_TYPE_TRACEPOINT:
 419                scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
 420                break;
 421
 422        case PERF_TYPE_BREAKPOINT:
 423                perf_evsel__bp_name(evsel, bf, sizeof(bf));
 424                break;
 425
 426        default:
 427                scnprintf(bf, sizeof(bf), "unknown attr type: %d",
 428                          evsel->attr.type);
 429                break;
 430        }
 431
 432        evsel->name = strdup(bf);
 433
 434        return evsel->name ?: "unknown";
 435}
 436
 437const char *perf_evsel__group_name(struct perf_evsel *evsel)
 438{
 439        return evsel->group_name ?: "anon group";
 440}
 441
 442int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
 443{
 444        int ret;
 445        struct perf_evsel *pos;
 446        const char *group_name = perf_evsel__group_name(evsel);
 447
 448        ret = scnprintf(buf, size, "%s", group_name);
 449
 450        ret += scnprintf(buf + ret, size - ret, " { %s",
 451                         perf_evsel__name(evsel));
 452
 453        for_each_group_member(pos, evsel)
 454                ret += scnprintf(buf + ret, size - ret, ", %s",
 455                                 perf_evsel__name(pos));
 456
 457        ret += scnprintf(buf + ret, size - ret, " }");
 458
 459        return ret;
 460}
 461
 462/*
 463 * The enable_on_exec/disabled value strategy:
 464 *
 465 *  1) For any type of traced program:
 466 *    - all independent events and group leaders are disabled
 467 *    - all group members are enabled
 468 *
 469 *     Group members are ruled by group leaders. They need to
 470 *     be enabled, because the group scheduling relies on that.
 471 *
 472 *  2) For traced programs executed by perf:
 473 *     - all independent events and group leaders have
 474 *       enable_on_exec set
 475 *     - we don't specifically enable or disable any event during
 476 *       the record command
 477 *
 478 *     Independent events and group leaders are initially disabled
 479 *     and get enabled by exec. Group members are ruled by group
 480 *     leaders as stated in 1).
 481 *
 482 *  3) For traced programs attached by perf (pid/tid):
 483 *     - we specifically enable or disable all events during
 484 *       the record command
 485 *
 486 *     When attaching events to already running traced we
 487 *     enable/disable events specifically, as there's no
 488 *     initial traced exec call.
 489 */
 490void perf_evsel__config(struct perf_evsel *evsel,
 491                        struct perf_record_opts *opts)
 492{
 493        struct perf_event_attr *attr = &evsel->attr;
 494        int track = !evsel->idx; /* only the first counter needs these */
 495
 496        attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
 497        attr->inherit       = !opts->no_inherit;
 498
 499        perf_evsel__set_sample_bit(evsel, IP);
 500        perf_evsel__set_sample_bit(evsel, TID);
 501
 502        /*
 503         * We default some events to a 1 default interval. But keep
 504         * it a weak assumption overridable by the user.
 505         */
 506        if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
 507                                     opts->user_interval != ULLONG_MAX)) {
 508                if (opts->freq) {
 509                        perf_evsel__set_sample_bit(evsel, PERIOD);
 510                        attr->freq              = 1;
 511                        attr->sample_freq       = opts->freq;
 512                } else {
 513                        attr->sample_period = opts->default_interval;
 514                }
 515        }
 516
 517        if (opts->no_samples)
 518                attr->sample_freq = 0;
 519
 520        if (opts->inherit_stat)
 521                attr->inherit_stat = 1;
 522
 523        if (opts->sample_address) {
 524                perf_evsel__set_sample_bit(evsel, ADDR);
 525                attr->mmap_data = track;
 526        }
 527
 528        if (opts->call_graph) {
 529                perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 530
 531                if (opts->call_graph == CALLCHAIN_DWARF) {
 532                        perf_evsel__set_sample_bit(evsel, REGS_USER);
 533                        perf_evsel__set_sample_bit(evsel, STACK_USER);
 534                        attr->sample_regs_user = PERF_REGS_MASK;
 535                        attr->sample_stack_user = opts->stack_dump_size;
 536                        attr->exclude_callchain_user = 1;
 537                }
 538        }
 539
 540        if (perf_target__has_cpu(&opts->target))
 541                perf_evsel__set_sample_bit(evsel, CPU);
 542
 543        if (opts->period)
 544                perf_evsel__set_sample_bit(evsel, PERIOD);
 545
 546        if (!perf_missing_features.sample_id_all &&
 547            (opts->sample_time || !opts->no_inherit ||
 548             perf_target__has_cpu(&opts->target)))
 549                perf_evsel__set_sample_bit(evsel, TIME);
 550
 551        if (opts->raw_samples) {
 552                perf_evsel__set_sample_bit(evsel, TIME);
 553                perf_evsel__set_sample_bit(evsel, RAW);
 554                perf_evsel__set_sample_bit(evsel, CPU);
 555        }
 556
 557        if (opts->sample_address)
 558                attr->sample_type       |= PERF_SAMPLE_DATA_SRC;
 559
 560        if (opts->no_delay) {
 561                attr->watermark = 0;
 562                attr->wakeup_events = 1;
 563        }
 564        if (opts->branch_stack) {
 565                perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
 566                attr->branch_sample_type = opts->branch_stack;
 567        }
 568
 569        if (opts->sample_weight)
 570                attr->sample_type       |= PERF_SAMPLE_WEIGHT;
 571
 572        attr->mmap = track;
 573        attr->comm = track;
 574
 575        /*
 576         * XXX see the function comment above
 577         *
 578         * Disabling only independent events or group leaders,
 579         * keeping group members enabled.
 580         */
 581        if (perf_evsel__is_group_leader(evsel))
 582                attr->disabled = 1;
 583
 584        /*
 585         * Setting enable_on_exec for independent events and
 586         * group leaders for traced executed by perf.
 587         */
 588        if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
 589                attr->enable_on_exec = 1;
 590}
 591
 592int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 593{
 594        int cpu, thread;
 595        evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
 596
 597        if (evsel->fd) {
 598                for (cpu = 0; cpu < ncpus; cpu++) {
 599                        for (thread = 0; thread < nthreads; thread++) {
 600                                FD(evsel, cpu, thread) = -1;
 601                        }
 602                }
 603        }
 604
 605        return evsel->fd != NULL ? 0 : -ENOMEM;
 606}
 607
 608int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
 609                           const char *filter)
 610{
 611        int cpu, thread;
 612
 613        for (cpu = 0; cpu < ncpus; cpu++) {
 614                for (thread = 0; thread < nthreads; thread++) {
 615                        int fd = FD(evsel, cpu, thread),
 616                            err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
 617
 618                        if (err)
 619                                return err;
 620                }
 621        }
 622
 623        return 0;
 624}
 625
 626int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
 627{
 628        evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
 629        if (evsel->sample_id == NULL)
 630                return -ENOMEM;
 631
 632        evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
 633        if (evsel->id == NULL) {
 634                xyarray__delete(evsel->sample_id);
 635                evsel->sample_id = NULL;
 636                return -ENOMEM;
 637        }
 638
 639        return 0;
 640}
 641
 642void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
 643{
 644        memset(evsel->counts, 0, (sizeof(*evsel->counts) +
 645                                 (ncpus * sizeof(struct perf_counts_values))));
 646}
 647
 648int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
 649{
 650        evsel->counts = zalloc((sizeof(*evsel->counts) +
 651                                (ncpus * sizeof(struct perf_counts_values))));
 652        return evsel->counts != NULL ? 0 : -ENOMEM;
 653}
 654
 655void perf_evsel__free_fd(struct perf_evsel *evsel)
 656{
 657        xyarray__delete(evsel->fd);
 658        evsel->fd = NULL;
 659}
 660
 661void perf_evsel__free_id(struct perf_evsel *evsel)
 662{
 663        xyarray__delete(evsel->sample_id);
 664        evsel->sample_id = NULL;
 665        free(evsel->id);
 666        evsel->id = NULL;
 667}
 668
 669void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 670{
 671        int cpu, thread;
 672
 673        for (cpu = 0; cpu < ncpus; cpu++)
 674                for (thread = 0; thread < nthreads; ++thread) {
 675                        close(FD(evsel, cpu, thread));
 676                        FD(evsel, cpu, thread) = -1;
 677                }
 678}
 679
 680void perf_evsel__free_counts(struct perf_evsel *evsel)
 681{
 682        free(evsel->counts);
 683}
 684
 685void perf_evsel__exit(struct perf_evsel *evsel)
 686{
 687        assert(list_empty(&evsel->node));
 688        perf_evsel__free_fd(evsel);
 689        perf_evsel__free_id(evsel);
 690}
 691
 692void perf_evsel__delete(struct perf_evsel *evsel)
 693{
 694        perf_evsel__exit(evsel);
 695        close_cgroup(evsel->cgrp);
 696        free(evsel->group_name);
 697        if (evsel->tp_format)
 698                pevent_free_format(evsel->tp_format);
 699        free(evsel->name);
 700        free(evsel);
 701}
 702
 703static inline void compute_deltas(struct perf_evsel *evsel,
 704                                  int cpu,
 705                                  struct perf_counts_values *count)
 706{
 707        struct perf_counts_values tmp;
 708
 709        if (!evsel->prev_raw_counts)
 710                return;
 711
 712        if (cpu == -1) {
 713                tmp = evsel->prev_raw_counts->aggr;
 714                evsel->prev_raw_counts->aggr = *count;
 715        } else {
 716                tmp = evsel->prev_raw_counts->cpu[cpu];
 717                evsel->prev_raw_counts->cpu[cpu] = *count;
 718        }
 719
 720        count->val = count->val - tmp.val;
 721        count->ena = count->ena - tmp.ena;
 722        count->run = count->run - tmp.run;
 723}
 724
 725int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
 726                              int cpu, int thread, bool scale)
 727{
 728        struct perf_counts_values count;
 729        size_t nv = scale ? 3 : 1;
 730
 731        if (FD(evsel, cpu, thread) < 0)
 732                return -EINVAL;
 733
 734        if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
 735                return -ENOMEM;
 736
 737        if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
 738                return -errno;
 739
 740        compute_deltas(evsel, cpu, &count);
 741
 742        if (scale) {
 743                if (count.run == 0)
 744                        count.val = 0;
 745                else if (count.run < count.ena)
 746                        count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
 747        } else
 748                count.ena = count.run = 0;
 749
 750        evsel->counts->cpu[cpu] = count;
 751        return 0;
 752}
 753
 754int __perf_evsel__read(struct perf_evsel *evsel,
 755                       int ncpus, int nthreads, bool scale)
 756{
 757        size_t nv = scale ? 3 : 1;
 758        int cpu, thread;
 759        struct perf_counts_values *aggr = &evsel->counts->aggr, count;
 760
 761        aggr->val = aggr->ena = aggr->run = 0;
 762
 763        for (cpu = 0; cpu < ncpus; cpu++) {
 764                for (thread = 0; thread < nthreads; thread++) {
 765                        if (FD(evsel, cpu, thread) < 0)
 766                                continue;
 767
 768                        if (readn(FD(evsel, cpu, thread),
 769                                  &count, nv * sizeof(u64)) < 0)
 770                                return -errno;
 771
 772                        aggr->val += count.val;
 773                        if (scale) {
 774                                aggr->ena += count.ena;
 775                                aggr->run += count.run;
 776                        }
 777                }
 778        }
 779
 780        compute_deltas(evsel, -1, aggr);
 781
 782        evsel->counts->scaled = 0;
 783        if (scale) {
 784                if (aggr->run == 0) {
 785                        evsel->counts->scaled = -1;
 786                        aggr->val = 0;
 787                        return 0;
 788                }
 789
 790                if (aggr->run < aggr->ena) {
 791                        evsel->counts->scaled = 1;
 792                        aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
 793                }
 794        } else
 795                aggr->ena = aggr->run = 0;
 796
 797        return 0;
 798}
 799
 800static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
 801{
 802        struct perf_evsel *leader = evsel->leader;
 803        int fd;
 804
 805        if (perf_evsel__is_group_leader(evsel))
 806                return -1;
 807
 808        /*
 809         * Leader must be already processed/open,
 810         * if not it's a bug.
 811         */
 812        BUG_ON(!leader->fd);
 813
 814        fd = FD(leader, cpu, thread);
 815        BUG_ON(fd == -1);
 816
 817        return fd;
 818}
 819
 820static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
 821                              struct thread_map *threads)
 822{
 823        int cpu, thread;
 824        unsigned long flags = 0;
 825        int pid = -1, err;
 826
 827        if (evsel->fd == NULL &&
 828            perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
 829                return -ENOMEM;
 830
 831        if (evsel->cgrp) {
 832                flags = PERF_FLAG_PID_CGROUP;
 833                pid = evsel->cgrp->fd;
 834        }
 835
 836fallback_missing_features:
 837        if (perf_missing_features.exclude_guest)
 838                evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
 839retry_sample_id:
 840        if (perf_missing_features.sample_id_all)
 841                evsel->attr.sample_id_all = 0;
 842
 843        for (cpu = 0; cpu < cpus->nr; cpu++) {
 844
 845                for (thread = 0; thread < threads->nr; thread++) {
 846                        int group_fd;
 847
 848                        if (!evsel->cgrp)
 849                                pid = threads->map[thread];
 850
 851                        group_fd = get_group_fd(evsel, cpu, thread);
 852
 853                        FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
 854                                                                     pid,
 855                                                                     cpus->map[cpu],
 856                                                                     group_fd, flags);
 857                        if (FD(evsel, cpu, thread) < 0) {
 858                                err = -errno;
 859                                goto try_fallback;
 860                        }
 861                }
 862        }
 863
 864        return 0;
 865
 866try_fallback:
 867        if (err != -EINVAL || cpu > 0 || thread > 0)
 868                goto out_close;
 869
 870        if (!perf_missing_features.exclude_guest &&
 871            (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
 872                perf_missing_features.exclude_guest = true;
 873                goto fallback_missing_features;
 874        } else if (!perf_missing_features.sample_id_all) {
 875                perf_missing_features.sample_id_all = true;
 876                goto retry_sample_id;
 877        }
 878
 879out_close:
 880        do {
 881                while (--thread >= 0) {
 882                        close(FD(evsel, cpu, thread));
 883                        FD(evsel, cpu, thread) = -1;
 884                }
 885                thread = threads->nr;
 886        } while (--cpu >= 0);
 887        return err;
 888}
 889
 890void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
 891{
 892        if (evsel->fd == NULL)
 893                return;
 894
 895        perf_evsel__close_fd(evsel, ncpus, nthreads);
 896        perf_evsel__free_fd(evsel);
 897        evsel->fd = NULL;
 898}
 899
 900static struct {
 901        struct cpu_map map;
 902        int cpus[1];
 903} empty_cpu_map = {
 904        .map.nr = 1,
 905        .cpus   = { -1, },
 906};
 907
 908static struct {
 909        struct thread_map map;
 910        int threads[1];
 911} empty_thread_map = {
 912        .map.nr  = 1,
 913        .threads = { -1, },
 914};
 915
 916int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
 917                     struct thread_map *threads)
 918{
 919        if (cpus == NULL) {
 920                /* Work around old compiler warnings about strict aliasing */
 921                cpus = &empty_cpu_map.map;
 922        }
 923
 924        if (threads == NULL)
 925                threads = &empty_thread_map.map;
 926
 927        return __perf_evsel__open(evsel, cpus, threads);
 928}
 929
 930int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
 931                             struct cpu_map *cpus)
 932{
 933        return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
 934}
 935
 936int perf_evsel__open_per_thread(struct perf_evsel *evsel,
 937                                struct thread_map *threads)
 938{
 939        return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
 940}
 941
 942static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
 943                                       const union perf_event *event,
 944                                       struct perf_sample *sample)
 945{
 946        u64 type = evsel->attr.sample_type;
 947        const u64 *array = event->sample.array;
 948        bool swapped = evsel->needs_swap;
 949        union u64_swap u;
 950
 951        array += ((event->header.size -
 952                   sizeof(event->header)) / sizeof(u64)) - 1;
 953
 954        if (type & PERF_SAMPLE_CPU) {
 955                u.val64 = *array;
 956                if (swapped) {
 957                        /* undo swap of u64, then swap on individual u32s */
 958                        u.val64 = bswap_64(u.val64);
 959                        u.val32[0] = bswap_32(u.val32[0]);
 960                }
 961
 962                sample->cpu = u.val32[0];
 963                array--;
 964        }
 965
 966        if (type & PERF_SAMPLE_STREAM_ID) {
 967                sample->stream_id = *array;
 968                array--;
 969        }
 970
 971        if (type & PERF_SAMPLE_ID) {
 972                sample->id = *array;
 973                array--;
 974        }
 975
 976        if (type & PERF_SAMPLE_TIME) {
 977                sample->time = *array;
 978                array--;
 979        }
 980
 981        if (type & PERF_SAMPLE_TID) {
 982                u.val64 = *array;
 983                if (swapped) {
 984                        /* undo swap of u64, then swap on individual u32s */
 985                        u.val64 = bswap_64(u.val64);
 986                        u.val32[0] = bswap_32(u.val32[0]);
 987                        u.val32[1] = bswap_32(u.val32[1]);
 988                }
 989
 990                sample->pid = u.val32[0];
 991                sample->tid = u.val32[1];
 992        }
 993
 994        return 0;
 995}
 996
 997static bool sample_overlap(const union perf_event *event,
 998                           const void *offset, u64 size)
 999{
1000        const void *base = event;
1001
1002        if (offset + size > base + event->header.size)
1003                return true;
1004
1005        return false;
1006}
1007
1008int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1009                             struct perf_sample *data)
1010{
1011        u64 type = evsel->attr.sample_type;
1012        u64 regs_user = evsel->attr.sample_regs_user;
1013        bool swapped = evsel->needs_swap;
1014        const u64 *array;
1015
1016        /*
1017         * used for cross-endian analysis. See git commit 65014ab3
1018         * for why this goofiness is needed.
1019         */
1020        union u64_swap u;
1021
1022        memset(data, 0, sizeof(*data));
1023        data->cpu = data->pid = data->tid = -1;
1024        data->stream_id = data->id = data->time = -1ULL;
1025        data->period = 1;
1026        data->weight = 0;
1027
1028        if (event->header.type != PERF_RECORD_SAMPLE) {
1029                if (!evsel->attr.sample_id_all)
1030                        return 0;
1031                return perf_evsel__parse_id_sample(evsel, event, data);
1032        }
1033
1034        array = event->sample.array;
1035
1036        if (evsel->sample_size + sizeof(event->header) > event->header.size)
1037                return -EFAULT;
1038
1039        if (type & PERF_SAMPLE_IP) {
1040                data->ip = event->ip.ip;
1041                array++;
1042        }
1043
1044        if (type & PERF_SAMPLE_TID) {
1045                u.val64 = *array;
1046                if (swapped) {
1047                        /* undo swap of u64, then swap on individual u32s */
1048                        u.val64 = bswap_64(u.val64);
1049                        u.val32[0] = bswap_32(u.val32[0]);
1050                        u.val32[1] = bswap_32(u.val32[1]);
1051                }
1052
1053                data->pid = u.val32[0];
1054                data->tid = u.val32[1];
1055                array++;
1056        }
1057
1058        if (type & PERF_SAMPLE_TIME) {
1059                data->time = *array;
1060                array++;
1061        }
1062
1063        data->addr = 0;
1064        if (type & PERF_SAMPLE_ADDR) {
1065                data->addr = *array;
1066                array++;
1067        }
1068
1069        data->id = -1ULL;
1070        if (type & PERF_SAMPLE_ID) {
1071                data->id = *array;
1072                array++;
1073        }
1074
1075        if (type & PERF_SAMPLE_STREAM_ID) {
1076                data->stream_id = *array;
1077                array++;
1078        }
1079
1080        if (type & PERF_SAMPLE_CPU) {
1081
1082                u.val64 = *array;
1083                if (swapped) {
1084                        /* undo swap of u64, then swap on individual u32s */
1085                        u.val64 = bswap_64(u.val64);
1086                        u.val32[0] = bswap_32(u.val32[0]);
1087                }
1088
1089                data->cpu = u.val32[0];
1090                array++;
1091        }
1092
1093        if (type & PERF_SAMPLE_PERIOD) {
1094                data->period = *array;
1095                array++;
1096        }
1097
1098        if (type & PERF_SAMPLE_READ) {
1099                fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
1100                return -1;
1101        }
1102
1103        if (type & PERF_SAMPLE_CALLCHAIN) {
1104                if (sample_overlap(event, array, sizeof(data->callchain->nr)))
1105                        return -EFAULT;
1106
1107                data->callchain = (struct ip_callchain *)array;
1108
1109                if (sample_overlap(event, array, data->callchain->nr))
1110                        return -EFAULT;
1111
1112                array += 1 + data->callchain->nr;
1113        }
1114
1115        if (type & PERF_SAMPLE_RAW) {
1116                const u64 *pdata;
1117
1118                u.val64 = *array;
1119                if (WARN_ONCE(swapped,
1120                              "Endianness of raw data not corrected!\n")) {
1121                        /* undo swap of u64, then swap on individual u32s */
1122                        u.val64 = bswap_64(u.val64);
1123                        u.val32[0] = bswap_32(u.val32[0]);
1124                        u.val32[1] = bswap_32(u.val32[1]);
1125                }
1126
1127                if (sample_overlap(event, array, sizeof(u32)))
1128                        return -EFAULT;
1129
1130                data->raw_size = u.val32[0];
1131                pdata = (void *) array + sizeof(u32);
1132
1133                if (sample_overlap(event, pdata, data->raw_size))
1134                        return -EFAULT;
1135
1136                data->raw_data = (void *) pdata;
1137
1138                array = (void *)array + data->raw_size + sizeof(u32);
1139        }
1140
1141        if (type & PERF_SAMPLE_BRANCH_STACK) {
1142                u64 sz;
1143
1144                data->branch_stack = (struct branch_stack *)array;
1145                array++; /* nr */
1146
1147                sz = data->branch_stack->nr * sizeof(struct branch_entry);
1148                sz /= sizeof(u64);
1149                array += sz;
1150        }
1151
1152        if (type & PERF_SAMPLE_REGS_USER) {
1153                /* First u64 tells us if we have any regs in sample. */
1154                u64 avail = *array++;
1155
1156                if (avail) {
1157                        data->user_regs.regs = (u64 *)array;
1158                        array += hweight_long(regs_user);
1159                }
1160        }
1161
1162        if (type & PERF_SAMPLE_STACK_USER) {
1163                u64 size = *array++;
1164
1165                data->user_stack.offset = ((char *)(array - 1)
1166                                          - (char *) event);
1167
1168                if (!size) {
1169                        data->user_stack.size = 0;
1170                } else {
1171                        data->user_stack.data = (char *)array;
1172                        array += size / sizeof(*array);
1173                        data->user_stack.size = *array++;
1174                }
1175        }
1176
1177        data->weight = 0;
1178        if (type & PERF_SAMPLE_WEIGHT) {
1179                data->weight = *array;
1180                array++;
1181        }
1182
1183        data->data_src = PERF_MEM_DATA_SRC_NONE;
1184        if (type & PERF_SAMPLE_DATA_SRC) {
1185                data->data_src = *array;
1186                array++;
1187        }
1188
1189        return 0;
1190}
1191
1192int perf_event__synthesize_sample(union perf_event *event, u64 type,
1193                                  const struct perf_sample *sample,
1194                                  bool swapped)
1195{
1196        u64 *array;
1197
1198        /*
1199         * used for cross-endian analysis. See git commit 65014ab3
1200         * for why this goofiness is needed.
1201         */
1202        union u64_swap u;
1203
1204        array = event->sample.array;
1205
1206        if (type & PERF_SAMPLE_IP) {
1207                event->ip.ip = sample->ip;
1208                array++;
1209        }
1210
1211        if (type & PERF_SAMPLE_TID) {
1212                u.val32[0] = sample->pid;
1213                u.val32[1] = sample->tid;
1214                if (swapped) {
1215                        /*
1216                         * Inverse of what is done in perf_evsel__parse_sample
1217                         */
1218                        u.val32[0] = bswap_32(u.val32[0]);
1219                        u.val32[1] = bswap_32(u.val32[1]);
1220                        u.val64 = bswap_64(u.val64);
1221                }
1222
1223                *array = u.val64;
1224                array++;
1225        }
1226
1227        if (type & PERF_SAMPLE_TIME) {
1228                *array = sample->time;
1229                array++;
1230        }
1231
1232        if (type & PERF_SAMPLE_ADDR) {
1233                *array = sample->addr;
1234                array++;
1235        }
1236
1237        if (type & PERF_SAMPLE_ID) {
1238                *array = sample->id;
1239                array++;
1240        }
1241
1242        if (type & PERF_SAMPLE_STREAM_ID) {
1243                *array = sample->stream_id;
1244                array++;
1245        }
1246
1247        if (type & PERF_SAMPLE_CPU) {
1248                u.val32[0] = sample->cpu;
1249                if (swapped) {
1250                        /*
1251                         * Inverse of what is done in perf_evsel__parse_sample
1252                         */
1253                        u.val32[0] = bswap_32(u.val32[0]);
1254                        u.val64 = bswap_64(u.val64);
1255                }
1256                *array = u.val64;
1257                array++;
1258        }
1259
1260        if (type & PERF_SAMPLE_PERIOD) {
1261                *array = sample->period;
1262                array++;
1263        }
1264
1265        return 0;
1266}
1267
1268struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1269{
1270        return pevent_find_field(evsel->tp_format, name);
1271}
1272
1273void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
1274                         const char *name)
1275{
1276        struct format_field *field = perf_evsel__field(evsel, name);
1277        int offset;
1278
1279        if (!field)
1280                return NULL;
1281
1282        offset = field->offset;
1283
1284        if (field->flags & FIELD_IS_DYNAMIC) {
1285                offset = *(int *)(sample->raw_data + field->offset);
1286                offset &= 0xffff;
1287        }
1288
1289        return sample->raw_data + offset;
1290}
1291
1292u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1293                       const char *name)
1294{
1295        struct format_field *field = perf_evsel__field(evsel, name);
1296        void *ptr;
1297        u64 value;
1298
1299        if (!field)
1300                return 0;
1301
1302        ptr = sample->raw_data + field->offset;
1303
1304        switch (field->size) {
1305        case 1:
1306                return *(u8 *)ptr;
1307        case 2:
1308                value = *(u16 *)ptr;
1309                break;
1310        case 4:
1311                value = *(u32 *)ptr;
1312                break;
1313        case 8:
1314                value = *(u64 *)ptr;
1315                break;
1316        default:
1317                return 0;
1318        }
1319
1320        if (!evsel->needs_swap)
1321                return value;
1322
1323        switch (field->size) {
1324        case 2:
1325                return bswap_16(value);
1326        case 4:
1327                return bswap_32(value);
1328        case 8:
1329                return bswap_64(value);
1330        default:
1331                return 0;
1332        }
1333
1334        return 0;
1335}
1336
1337static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1338{
1339        va_list args;
1340        int ret = 0;
1341
1342        if (!*first) {
1343                ret += fprintf(fp, ",");
1344        } else {
1345                ret += fprintf(fp, ":");
1346                *first = false;
1347        }
1348
1349        va_start(args, fmt);
1350        ret += vfprintf(fp, fmt, args);
1351        va_end(args);
1352        return ret;
1353}
1354
1355static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1356{
1357        if (value == 0)
1358                return 0;
1359
1360        return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1361}
1362
1363#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1364
1365struct bit_names {
1366        int bit;
1367        const char *name;
1368};
1369
1370static int bits__fprintf(FILE *fp, const char *field, u64 value,
1371                         struct bit_names *bits, bool *first)
1372{
1373        int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1374        bool first_bit = true;
1375
1376        do {
1377                if (value & bits[i].bit) {
1378                        printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1379                        first_bit = false;
1380                }
1381        } while (bits[++i].name != NULL);
1382
1383        return printed;
1384}
1385
1386static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1387{
1388#define bit_name(n) { PERF_SAMPLE_##n, #n }
1389        struct bit_names bits[] = {
1390                bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1391                bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1392                bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1393                bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
1394                { .name = NULL, }
1395        };
1396#undef bit_name
1397        return bits__fprintf(fp, "sample_type", value, bits, first);
1398}
1399
1400static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1401{
1402#define bit_name(n) { PERF_FORMAT_##n, #n }
1403        struct bit_names bits[] = {
1404                bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1405                bit_name(ID), bit_name(GROUP),
1406                { .name = NULL, }
1407        };
1408#undef bit_name
1409        return bits__fprintf(fp, "read_format", value, bits, first);
1410}
1411
1412int perf_evsel__fprintf(struct perf_evsel *evsel,
1413                        struct perf_attr_details *details, FILE *fp)
1414{
1415        bool first = true;
1416        int printed = 0;
1417
1418        if (details->event_group) {
1419                struct perf_evsel *pos;
1420
1421                if (!perf_evsel__is_group_leader(evsel))
1422                        return 0;
1423
1424                if (evsel->nr_members > 1)
1425                        printed += fprintf(fp, "%s{", evsel->group_name ?: "");
1426
1427                printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1428                for_each_group_member(pos, evsel)
1429                        printed += fprintf(fp, ",%s", perf_evsel__name(pos));
1430
1431                if (evsel->nr_members > 1)
1432                        printed += fprintf(fp, "}");
1433                goto out;
1434        }
1435
1436        printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1437
1438        if (details->verbose || details->freq) {
1439                printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1440                                         (u64)evsel->attr.sample_freq);
1441        }
1442
1443        if (details->verbose) {
1444                if_print(type);
1445                if_print(config);
1446                if_print(config1);
1447                if_print(config2);
1448                if_print(size);
1449                printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1450                if (evsel->attr.read_format)
1451                        printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
1452                if_print(disabled);
1453                if_print(inherit);
1454                if_print(pinned);
1455                if_print(exclusive);
1456                if_print(exclude_user);
1457                if_print(exclude_kernel);
1458                if_print(exclude_hv);
1459                if_print(exclude_idle);
1460                if_print(mmap);
1461                if_print(comm);
1462                if_print(freq);
1463                if_print(inherit_stat);
1464                if_print(enable_on_exec);
1465                if_print(task);
1466                if_print(watermark);
1467                if_print(precise_ip);
1468                if_print(mmap_data);
1469                if_print(sample_id_all);
1470                if_print(exclude_host);
1471                if_print(exclude_guest);
1472                if_print(__reserved_1);
1473                if_print(wakeup_events);
1474                if_print(bp_type);
1475                if_print(branch_sample_type);
1476        }
1477out:
1478        fputc('\n', fp);
1479        return ++printed;
1480}
1481
1482bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1483                          char *msg, size_t msgsize)
1484{
1485        if ((err == ENOENT || err == ENXIO) &&
1486            evsel->attr.type   == PERF_TYPE_HARDWARE &&
1487            evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1488                /*
1489                 * If it's cycles then fall back to hrtimer based
1490                 * cpu-clock-tick sw counter, which is always available even if
1491                 * no PMU support.
1492                 *
1493                 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1494                 * b0a873e).
1495                 */
1496                scnprintf(msg, msgsize, "%s",
1497"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1498
1499                evsel->attr.type   = PERF_TYPE_SOFTWARE;
1500                evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1501
1502                free(evsel->name);
1503                evsel->name = NULL;
1504                return true;
1505        }
1506
1507        return false;
1508}
1509
1510int perf_evsel__open_strerror(struct perf_evsel *evsel,
1511                              struct perf_target *target,
1512                              int err, char *msg, size_t size)
1513{
1514        switch (err) {
1515        case EPERM:
1516        case EACCES:
1517                return scnprintf(msg, size,
1518                 "You may not have permission to collect %sstats.\n"
1519                 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1520                 " -1 - Not paranoid at all\n"
1521                 "  0 - Disallow raw tracepoint access for unpriv\n"
1522                 "  1 - Disallow cpu events for unpriv\n"
1523                 "  2 - Disallow kernel profiling for unpriv",
1524                                 target->system_wide ? "system-wide " : "");
1525        case ENOENT:
1526                return scnprintf(msg, size, "The %s event is not supported.",
1527                                 perf_evsel__name(evsel));
1528        case EMFILE:
1529                return scnprintf(msg, size, "%s",
1530                         "Too many events are opened.\n"
1531                         "Try again after reducing the number of events.");
1532        case ENODEV:
1533                if (target->cpu_list)
1534                        return scnprintf(msg, size, "%s",
1535         "No such device - did you specify an out-of-range profile CPU?\n");
1536                break;
1537        case EOPNOTSUPP:
1538                if (evsel->attr.precise_ip)
1539                        return scnprintf(msg, size, "%s",
1540        "\'precise\' request may not be supported. Try removing 'p' modifier.");
1541#if defined(__i386__) || defined(__x86_64__)
1542                if (evsel->attr.type == PERF_TYPE_HARDWARE)
1543                        return scnprintf(msg, size, "%s",
1544        "No hardware sampling interrupt available.\n"
1545        "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
1546#endif
1547                break;
1548        default:
1549                break;
1550        }
1551
1552        return scnprintf(msg, size,
1553        "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).  \n"
1554        "/bin/dmesg may provide additional information.\n"
1555        "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
1556                         err, strerror(err), perf_evsel__name(evsel));
1557}
1558