linux/tools/perf/arch/x86/util/intel-pt.c
<<
>>
Prefs
   1/*
   2 * intel_pt.c: Intel Processor Trace support
   3 * Copyright (c) 2013-2015, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 */
  15
  16#include <stdbool.h>
  17#include <linux/kernel.h>
  18#include <linux/types.h>
  19#include <linux/bitops.h>
  20#include <linux/log2.h>
  21#include <cpuid.h>
  22
  23#include "../../perf.h"
  24#include "../../util/session.h"
  25#include "../../util/event.h"
  26#include "../../util/evlist.h"
  27#include "../../util/evsel.h"
  28#include "../../util/cpumap.h"
  29#include <subcmd/parse-options.h>
  30#include "../../util/parse-events.h"
  31#include "../../util/pmu.h"
  32#include "../../util/debug.h"
  33#include "../../util/auxtrace.h"
  34#include "../../util/tsc.h"
  35#include "../../util/intel-pt.h"
  36
  37#define KiB(x) ((x) * 1024)
  38#define MiB(x) ((x) * 1024 * 1024)
  39#define KiB_MASK(x) (KiB(x) - 1)
  40#define MiB_MASK(x) (MiB(x) - 1)
  41
  42#define INTEL_PT_DEFAULT_SAMPLE_SIZE    KiB(4)
  43
  44#define INTEL_PT_MAX_SAMPLE_SIZE        KiB(60)
  45
  46#define INTEL_PT_PSB_PERIOD_NEAR        256
  47
  48struct intel_pt_snapshot_ref {
  49        void *ref_buf;
  50        size_t ref_offset;
  51        bool wrapped;
  52};
  53
  54struct intel_pt_recording {
  55        struct auxtrace_record          itr;
  56        struct perf_pmu                 *intel_pt_pmu;
  57        int                             have_sched_switch;
  58        struct perf_evlist              *evlist;
  59        bool                            snapshot_mode;
  60        bool                            snapshot_init_done;
  61        size_t                          snapshot_size;
  62        size_t                          snapshot_ref_buf_size;
  63        int                             snapshot_ref_cnt;
  64        struct intel_pt_snapshot_ref    *snapshot_refs;
  65};
  66
  67static int intel_pt_parse_terms_with_default(struct list_head *formats,
  68                                             const char *str,
  69                                             u64 *config)
  70{
  71        struct list_head *terms;
  72        struct perf_event_attr attr = { .size = 0, };
  73        int err;
  74
  75        terms = malloc(sizeof(struct list_head));
  76        if (!terms)
  77                return -ENOMEM;
  78
  79        INIT_LIST_HEAD(terms);
  80
  81        err = parse_events_terms(terms, str);
  82        if (err)
  83                goto out_free;
  84
  85        attr.config = *config;
  86        err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
  87        if (err)
  88                goto out_free;
  89
  90        *config = attr.config;
  91out_free:
  92        parse_events_terms__delete(terms);
  93        return err;
  94}
  95
  96static int intel_pt_parse_terms(struct list_head *formats, const char *str,
  97                                u64 *config)
  98{
  99        *config = 0;
 100        return intel_pt_parse_terms_with_default(formats, str, config);
 101}
 102
 103static u64 intel_pt_masked_bits(u64 mask, u64 bits)
 104{
 105        const u64 top_bit = 1ULL << 63;
 106        u64 res = 0;
 107        int i;
 108
 109        for (i = 0; i < 64; i++) {
 110                if (mask & top_bit) {
 111                        res <<= 1;
 112                        if (bits & top_bit)
 113                                res |= 1;
 114                }
 115                mask <<= 1;
 116                bits <<= 1;
 117        }
 118
 119        return res;
 120}
 121
 122static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
 123                                struct perf_evlist *evlist, u64 *res)
 124{
 125        struct perf_evsel *evsel;
 126        u64 mask;
 127
 128        *res = 0;
 129
 130        mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
 131        if (!mask)
 132                return -EINVAL;
 133
 134        evlist__for_each(evlist, evsel) {
 135                if (evsel->attr.type == intel_pt_pmu->type) {
 136                        *res = intel_pt_masked_bits(mask, evsel->attr.config);
 137                        return 0;
 138                }
 139        }
 140
 141        return -EINVAL;
 142}
 143
 144static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
 145                                  struct perf_evlist *evlist)
 146{
 147        u64 val;
 148        int err, topa_multiple_entries;
 149        size_t psb_period;
 150
 151        if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
 152                                "%d", &topa_multiple_entries) != 1)
 153                topa_multiple_entries = 0;
 154
 155        /*
 156         * Use caps/topa_multiple_entries to indicate early hardware that had
 157         * extra frequent PSBs.
 158         */
 159        if (!topa_multiple_entries) {
 160                psb_period = 256;
 161                goto out;
 162        }
 163
 164        err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
 165        if (err)
 166                val = 0;
 167
 168        psb_period = 1 << (val + 11);
 169out:
 170        pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
 171        return psb_period;
 172}
 173
 174static int intel_pt_pick_bit(int bits, int target)
 175{
 176        int pos, pick = -1;
 177
 178        for (pos = 0; bits; bits >>= 1, pos++) {
 179                if (bits & 1) {
 180                        if (pos <= target || pick < 0)
 181                                pick = pos;
 182                        if (pos >= target)
 183                                break;
 184                }
 185        }
 186
 187        return pick;
 188}
 189
 190static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
 191{
 192        char buf[256];
 193        int mtc, mtc_periods = 0, mtc_period;
 194        int psb_cyc, psb_periods, psb_period;
 195        int pos = 0;
 196        u64 config;
 197
 198        pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
 199
 200        if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
 201                                &mtc) != 1)
 202                mtc = 1;
 203
 204        if (mtc) {
 205                if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
 206                                        &mtc_periods) != 1)
 207                        mtc_periods = 0;
 208                if (mtc_periods) {
 209                        mtc_period = intel_pt_pick_bit(mtc_periods, 3);
 210                        pos += scnprintf(buf + pos, sizeof(buf) - pos,
 211                                         ",mtc,mtc_period=%d", mtc_period);
 212                }
 213        }
 214
 215        if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
 216                                &psb_cyc) != 1)
 217                psb_cyc = 1;
 218
 219        if (psb_cyc && mtc_periods) {
 220                if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
 221                                        &psb_periods) != 1)
 222                        psb_periods = 0;
 223                if (psb_periods) {
 224                        psb_period = intel_pt_pick_bit(psb_periods, 3);
 225                        pos += scnprintf(buf + pos, sizeof(buf) - pos,
 226                                         ",psb_period=%d", psb_period);
 227                }
 228        }
 229
 230        pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
 231
 232        intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
 233
 234        return config;
 235}
 236
 237static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
 238                                           struct record_opts *opts,
 239                                           const char *str)
 240{
 241        struct intel_pt_recording *ptr =
 242                        container_of(itr, struct intel_pt_recording, itr);
 243        unsigned long long snapshot_size = 0;
 244        char *endptr;
 245
 246        if (str) {
 247                snapshot_size = strtoull(str, &endptr, 0);
 248                if (*endptr || snapshot_size > SIZE_MAX)
 249                        return -1;
 250        }
 251
 252        opts->auxtrace_snapshot_mode = true;
 253        opts->auxtrace_snapshot_size = snapshot_size;
 254
 255        ptr->snapshot_size = snapshot_size;
 256
 257        return 0;
 258}
 259
 260struct perf_event_attr *
 261intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
 262{
 263        struct perf_event_attr *attr;
 264
 265        attr = zalloc(sizeof(struct perf_event_attr));
 266        if (!attr)
 267                return NULL;
 268
 269        attr->config = intel_pt_default_config(intel_pt_pmu);
 270
 271        intel_pt_pmu->selectable = true;
 272
 273        return attr;
 274}
 275
 276static size_t
 277intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
 278                        struct perf_evlist *evlist __maybe_unused)
 279{
 280        return INTEL_PT_AUXTRACE_PRIV_SIZE;
 281}
 282
 283static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
 284{
 285        unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
 286
 287        __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
 288        *n = ebx;
 289        *d = eax;
 290}
 291
 292static int intel_pt_info_fill(struct auxtrace_record *itr,
 293                              struct perf_session *session,
 294                              struct auxtrace_info_event *auxtrace_info,
 295                              size_t priv_size)
 296{
 297        struct intel_pt_recording *ptr =
 298                        container_of(itr, struct intel_pt_recording, itr);
 299        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
 300        struct perf_event_mmap_page *pc;
 301        struct perf_tsc_conversion tc = { .time_mult = 0, };
 302        bool cap_user_time_zero = false, per_cpu_mmaps;
 303        u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
 304        u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
 305        int err;
 306
 307        if (priv_size != INTEL_PT_AUXTRACE_PRIV_SIZE)
 308                return -EINVAL;
 309
 310        intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
 311        intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
 312                             &noretcomp_bit);
 313        intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
 314        mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
 315                                              "mtc_period");
 316        intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
 317
 318        intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
 319
 320        if (!session->evlist->nr_mmaps)
 321                return -EINVAL;
 322
 323        pc = session->evlist->mmap[0].base;
 324        if (pc) {
 325                err = perf_read_tsc_conversion(pc, &tc);
 326                if (err) {
 327                        if (err != -EOPNOTSUPP)
 328                                return err;
 329                } else {
 330                        cap_user_time_zero = tc.time_mult != 0;
 331                }
 332                if (!cap_user_time_zero)
 333                        ui__warning("Intel Processor Trace: TSC not available\n");
 334        }
 335
 336        per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
 337
 338        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
 339        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
 340        auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
 341        auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
 342        auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
 343        auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
 344        auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
 345        auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
 346        auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
 347        auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
 348        auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
 349        auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
 350        auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
 351        auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
 352        auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
 353        auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
 354
 355        return 0;
 356}
 357
 358static int intel_pt_track_switches(struct perf_evlist *evlist)
 359{
 360        const char *sched_switch = "sched:sched_switch";
 361        struct perf_evsel *evsel;
 362        int err;
 363
 364        if (!perf_evlist__can_select_event(evlist, sched_switch))
 365                return -EPERM;
 366
 367        err = parse_events(evlist, sched_switch, NULL);
 368        if (err) {
 369                pr_debug2("%s: failed to parse %s, error %d\n",
 370                          __func__, sched_switch, err);
 371                return err;
 372        }
 373
 374        evsel = perf_evlist__last(evlist);
 375
 376        perf_evsel__set_sample_bit(evsel, CPU);
 377        perf_evsel__set_sample_bit(evsel, TIME);
 378
 379        evsel->system_wide = true;
 380        evsel->no_aux_samples = true;
 381        evsel->immediate = true;
 382
 383        return 0;
 384}
 385
 386static void intel_pt_valid_str(char *str, size_t len, u64 valid)
 387{
 388        unsigned int val, last = 0, state = 1;
 389        int p = 0;
 390
 391        str[0] = '\0';
 392
 393        for (val = 0; val <= 64; val++, valid >>= 1) {
 394                if (valid & 1) {
 395                        last = val;
 396                        switch (state) {
 397                        case 0:
 398                                p += scnprintf(str + p, len - p, ",");
 399                                /* Fall through */
 400                        case 1:
 401                                p += scnprintf(str + p, len - p, "%u", val);
 402                                state = 2;
 403                                break;
 404                        case 2:
 405                                state = 3;
 406                                break;
 407                        case 3:
 408                                state = 4;
 409                                break;
 410                        default:
 411                                break;
 412                        }
 413                } else {
 414                        switch (state) {
 415                        case 3:
 416                                p += scnprintf(str + p, len - p, ",%u", last);
 417                                state = 0;
 418                                break;
 419                        case 4:
 420                                p += scnprintf(str + p, len - p, "-%u", last);
 421                                state = 0;
 422                                break;
 423                        default:
 424                                break;
 425                        }
 426                        if (state != 1)
 427                                state = 0;
 428                }
 429        }
 430}
 431
 432static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
 433                                    const char *caps, const char *name,
 434                                    const char *supported, u64 config)
 435{
 436        char valid_str[256];
 437        unsigned int shift;
 438        unsigned long long valid;
 439        u64 bits;
 440        int ok;
 441
 442        if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
 443                valid = 0;
 444
 445        if (supported &&
 446            perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
 447                valid = 0;
 448
 449        valid |= 1;
 450
 451        bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
 452
 453        config &= bits;
 454
 455        for (shift = 0; bits && !(bits & 1); shift++)
 456                bits >>= 1;
 457
 458        config >>= shift;
 459
 460        if (config > 63)
 461                goto out_err;
 462
 463        if (valid & (1 << config))
 464                return 0;
 465out_err:
 466        intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
 467        pr_err("Invalid %s for %s. Valid values are: %s\n",
 468               name, INTEL_PT_PMU_NAME, valid_str);
 469        return -EINVAL;
 470}
 471
 472static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
 473                                    struct perf_evsel *evsel)
 474{
 475        int err;
 476
 477        if (!evsel)
 478                return 0;
 479
 480        err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
 481                                       "cyc_thresh", "caps/psb_cyc",
 482                                       evsel->attr.config);
 483        if (err)
 484                return err;
 485
 486        err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
 487                                       "mtc_period", "caps/mtc",
 488                                       evsel->attr.config);
 489        if (err)
 490                return err;
 491
 492        return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
 493                                        "psb_period", "caps/psb_cyc",
 494                                        evsel->attr.config);
 495}
 496
 497static int intel_pt_recording_options(struct auxtrace_record *itr,
 498                                      struct perf_evlist *evlist,
 499                                      struct record_opts *opts)
 500{
 501        struct intel_pt_recording *ptr =
 502                        container_of(itr, struct intel_pt_recording, itr);
 503        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
 504        bool have_timing_info;
 505        struct perf_evsel *evsel, *intel_pt_evsel = NULL;
 506        const struct cpu_map *cpus = evlist->cpus;
 507        bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
 508        u64 tsc_bit;
 509        int err;
 510
 511        ptr->evlist = evlist;
 512        ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
 513
 514        evlist__for_each(evlist, evsel) {
 515                if (evsel->attr.type == intel_pt_pmu->type) {
 516                        if (intel_pt_evsel) {
 517                                pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
 518                                return -EINVAL;
 519                        }
 520                        evsel->attr.freq = 0;
 521                        evsel->attr.sample_period = 1;
 522                        intel_pt_evsel = evsel;
 523                        opts->full_auxtrace = true;
 524                }
 525        }
 526
 527        if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
 528                pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
 529                return -EINVAL;
 530        }
 531
 532        if (opts->use_clockid) {
 533                pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
 534                return -EINVAL;
 535        }
 536
 537        if (!opts->full_auxtrace)
 538                return 0;
 539
 540        err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
 541        if (err)
 542                return err;
 543
 544        /* Set default sizes for snapshot mode */
 545        if (opts->auxtrace_snapshot_mode) {
 546                size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
 547
 548                if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
 549                        if (privileged) {
 550                                opts->auxtrace_mmap_pages = MiB(4) / page_size;
 551                        } else {
 552                                opts->auxtrace_mmap_pages = KiB(128) / page_size;
 553                                if (opts->mmap_pages == UINT_MAX)
 554                                        opts->mmap_pages = KiB(256) / page_size;
 555                        }
 556                } else if (!opts->auxtrace_mmap_pages && !privileged &&
 557                           opts->mmap_pages == UINT_MAX) {
 558                        opts->mmap_pages = KiB(256) / page_size;
 559                }
 560                if (!opts->auxtrace_snapshot_size)
 561                        opts->auxtrace_snapshot_size =
 562                                opts->auxtrace_mmap_pages * (size_t)page_size;
 563                if (!opts->auxtrace_mmap_pages) {
 564                        size_t sz = opts->auxtrace_snapshot_size;
 565
 566                        sz = round_up(sz, page_size) / page_size;
 567                        opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
 568                }
 569                if (opts->auxtrace_snapshot_size >
 570                                opts->auxtrace_mmap_pages * (size_t)page_size) {
 571                        pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
 572                               opts->auxtrace_snapshot_size,
 573                               opts->auxtrace_mmap_pages * (size_t)page_size);
 574                        return -EINVAL;
 575                }
 576                if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
 577                        pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
 578                        return -EINVAL;
 579                }
 580                pr_debug2("Intel PT snapshot size: %zu\n",
 581                          opts->auxtrace_snapshot_size);
 582                if (psb_period &&
 583                    opts->auxtrace_snapshot_size <= psb_period +
 584                                                  INTEL_PT_PSB_PERIOD_NEAR)
 585                        ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
 586                                    opts->auxtrace_snapshot_size, psb_period);
 587        }
 588
 589        /* Set default sizes for full trace mode */
 590        if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
 591                if (privileged) {
 592                        opts->auxtrace_mmap_pages = MiB(4) / page_size;
 593                } else {
 594                        opts->auxtrace_mmap_pages = KiB(128) / page_size;
 595                        if (opts->mmap_pages == UINT_MAX)
 596                                opts->mmap_pages = KiB(256) / page_size;
 597                }
 598        }
 599
 600        /* Validate auxtrace_mmap_pages */
 601        if (opts->auxtrace_mmap_pages) {
 602                size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
 603                size_t min_sz;
 604
 605                if (opts->auxtrace_snapshot_mode)
 606                        min_sz = KiB(4);
 607                else
 608                        min_sz = KiB(8);
 609
 610                if (sz < min_sz || !is_power_of_2(sz)) {
 611                        pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
 612                               min_sz / 1024);
 613                        return -EINVAL;
 614                }
 615        }
 616
 617        intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
 618
 619        if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
 620                have_timing_info = true;
 621        else
 622                have_timing_info = false;
 623
 624        /*
 625         * Per-cpu recording needs sched_switch events to distinguish different
 626         * threads.
 627         */
 628        if (have_timing_info && !cpu_map__empty(cpus)) {
 629                if (perf_can_record_switch_events()) {
 630                        bool cpu_wide = !target__none(&opts->target) &&
 631                                        !target__has_task(&opts->target);
 632
 633                        if (!cpu_wide && perf_can_record_cpu_wide()) {
 634                                struct perf_evsel *switch_evsel;
 635
 636                                err = parse_events(evlist, "dummy:u", NULL);
 637                                if (err)
 638                                        return err;
 639
 640                                switch_evsel = perf_evlist__last(evlist);
 641
 642                                switch_evsel->attr.freq = 0;
 643                                switch_evsel->attr.sample_period = 1;
 644                                switch_evsel->attr.context_switch = 1;
 645
 646                                switch_evsel->system_wide = true;
 647                                switch_evsel->no_aux_samples = true;
 648                                switch_evsel->immediate = true;
 649
 650                                perf_evsel__set_sample_bit(switch_evsel, TID);
 651                                perf_evsel__set_sample_bit(switch_evsel, TIME);
 652                                perf_evsel__set_sample_bit(switch_evsel, CPU);
 653
 654                                opts->record_switch_events = false;
 655                                ptr->have_sched_switch = 3;
 656                        } else {
 657                                opts->record_switch_events = true;
 658                                if (cpu_wide)
 659                                        ptr->have_sched_switch = 3;
 660                                else
 661                                        ptr->have_sched_switch = 2;
 662                        }
 663                } else {
 664                        err = intel_pt_track_switches(evlist);
 665                        if (err == -EPERM)
 666                                pr_debug2("Unable to select sched:sched_switch\n");
 667                        else if (err)
 668                                return err;
 669                        else
 670                                ptr->have_sched_switch = 1;
 671                }
 672        }
 673
 674        if (intel_pt_evsel) {
 675                /*
 676                 * To obtain the auxtrace buffer file descriptor, the auxtrace
 677                 * event must come first.
 678                 */
 679                perf_evlist__to_front(evlist, intel_pt_evsel);
 680                /*
 681                 * In the case of per-cpu mmaps, we need the CPU on the
 682                 * AUX event.
 683                 */
 684                if (!cpu_map__empty(cpus))
 685                        perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
 686        }
 687
 688        /* Add dummy event to keep tracking */
 689        if (opts->full_auxtrace) {
 690                struct perf_evsel *tracking_evsel;
 691
 692                err = parse_events(evlist, "dummy:u", NULL);
 693                if (err)
 694                        return err;
 695
 696                tracking_evsel = perf_evlist__last(evlist);
 697
 698                perf_evlist__set_tracking_event(evlist, tracking_evsel);
 699
 700                tracking_evsel->attr.freq = 0;
 701                tracking_evsel->attr.sample_period = 1;
 702
 703                /* In per-cpu case, always need the time of mmap events etc */
 704                if (!cpu_map__empty(cpus)) {
 705                        perf_evsel__set_sample_bit(tracking_evsel, TIME);
 706                        /* And the CPU for switch events */
 707                        perf_evsel__set_sample_bit(tracking_evsel, CPU);
 708                }
 709        }
 710
 711        /*
 712         * Warn the user when we do not have enough information to decode i.e.
 713         * per-cpu with no sched_switch (except workload-only).
 714         */
 715        if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
 716            !target__none(&opts->target))
 717                ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
 718
 719        return 0;
 720}
 721
 722static int intel_pt_snapshot_start(struct auxtrace_record *itr)
 723{
 724        struct intel_pt_recording *ptr =
 725                        container_of(itr, struct intel_pt_recording, itr);
 726        struct perf_evsel *evsel;
 727
 728        evlist__for_each(ptr->evlist, evsel) {
 729                if (evsel->attr.type == ptr->intel_pt_pmu->type)
 730                        return perf_evsel__disable(evsel);
 731        }
 732        return -EINVAL;
 733}
 734
 735static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
 736{
 737        struct intel_pt_recording *ptr =
 738                        container_of(itr, struct intel_pt_recording, itr);
 739        struct perf_evsel *evsel;
 740
 741        evlist__for_each(ptr->evlist, evsel) {
 742                if (evsel->attr.type == ptr->intel_pt_pmu->type)
 743                        return perf_evsel__enable(evsel);
 744        }
 745        return -EINVAL;
 746}
 747
 748static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
 749{
 750        const size_t sz = sizeof(struct intel_pt_snapshot_ref);
 751        int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
 752        struct intel_pt_snapshot_ref *refs;
 753
 754        if (!new_cnt)
 755                new_cnt = 16;
 756
 757        while (new_cnt <= idx)
 758                new_cnt *= 2;
 759
 760        refs = calloc(new_cnt, sz);
 761        if (!refs)
 762                return -ENOMEM;
 763
 764        memcpy(refs, ptr->snapshot_refs, cnt * sz);
 765
 766        ptr->snapshot_refs = refs;
 767        ptr->snapshot_ref_cnt = new_cnt;
 768
 769        return 0;
 770}
 771
 772static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
 773{
 774        int i;
 775
 776        for (i = 0; i < ptr->snapshot_ref_cnt; i++)
 777                zfree(&ptr->snapshot_refs[i].ref_buf);
 778        zfree(&ptr->snapshot_refs);
 779}
 780
 781static void intel_pt_recording_free(struct auxtrace_record *itr)
 782{
 783        struct intel_pt_recording *ptr =
 784                        container_of(itr, struct intel_pt_recording, itr);
 785
 786        intel_pt_free_snapshot_refs(ptr);
 787        free(ptr);
 788}
 789
 790static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
 791                                       size_t snapshot_buf_size)
 792{
 793        size_t ref_buf_size = ptr->snapshot_ref_buf_size;
 794        void *ref_buf;
 795
 796        ref_buf = zalloc(ref_buf_size);
 797        if (!ref_buf)
 798                return -ENOMEM;
 799
 800        ptr->snapshot_refs[idx].ref_buf = ref_buf;
 801        ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
 802
 803        return 0;
 804}
 805
 806static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
 807                                             size_t snapshot_buf_size)
 808{
 809        const size_t max_size = 256 * 1024;
 810        size_t buf_size = 0, psb_period;
 811
 812        if (ptr->snapshot_size <= 64 * 1024)
 813                return 0;
 814
 815        psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
 816        if (psb_period)
 817                buf_size = psb_period * 2;
 818
 819        if (!buf_size || buf_size > max_size)
 820                buf_size = max_size;
 821
 822        if (buf_size >= snapshot_buf_size)
 823                return 0;
 824
 825        if (buf_size >= ptr->snapshot_size / 2)
 826                return 0;
 827
 828        return buf_size;
 829}
 830
 831static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
 832                                  size_t snapshot_buf_size)
 833{
 834        if (ptr->snapshot_init_done)
 835                return 0;
 836
 837        ptr->snapshot_init_done = true;
 838
 839        ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
 840                                                        snapshot_buf_size);
 841
 842        return 0;
 843}
 844
 845/**
 846 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
 847 * @buf1: first buffer
 848 * @compare_size: number of bytes to compare
 849 * @buf2: second buffer (a circular buffer)
 850 * @offs2: offset in second buffer
 851 * @buf2_size: size of second buffer
 852 *
 853 * The comparison allows for the possibility that the bytes to compare in the
 854 * circular buffer are not contiguous.  It is assumed that @compare_size <=
 855 * @buf2_size.  This function returns %false if the bytes are identical, %true
 856 * otherwise.
 857 */
 858static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
 859                                     void *buf2, size_t offs2, size_t buf2_size)
 860{
 861        size_t end2 = offs2 + compare_size, part_size;
 862
 863        if (end2 <= buf2_size)
 864                return memcmp(buf1, buf2 + offs2, compare_size);
 865
 866        part_size = end2 - buf2_size;
 867        if (memcmp(buf1, buf2 + offs2, part_size))
 868                return true;
 869
 870        compare_size -= part_size;
 871
 872        return memcmp(buf1 + part_size, buf2, compare_size);
 873}
 874
 875static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
 876                                 size_t ref_size, size_t buf_size,
 877                                 void *data, size_t head)
 878{
 879        size_t ref_end = ref_offset + ref_size;
 880
 881        if (ref_end > buf_size) {
 882                if (head > ref_offset || head < ref_end - buf_size)
 883                        return true;
 884        } else if (head > ref_offset && head < ref_end) {
 885                return true;
 886        }
 887
 888        return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
 889                                        buf_size);
 890}
 891
 892static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
 893                              void *data, size_t head)
 894{
 895        if (head >= ref_size) {
 896                memcpy(ref_buf, data + head - ref_size, ref_size);
 897        } else {
 898                memcpy(ref_buf, data, head);
 899                ref_size -= head;
 900                memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
 901        }
 902}
 903
 904static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
 905                             struct auxtrace_mmap *mm, unsigned char *data,
 906                             u64 head)
 907{
 908        struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
 909        bool wrapped;
 910
 911        wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
 912                                       ptr->snapshot_ref_buf_size, mm->len,
 913                                       data, head);
 914
 915        intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
 916                          data, head);
 917
 918        return wrapped;
 919}
 920
 921static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
 922{
 923        int i, a, b;
 924
 925        b = buf_size >> 3;
 926        a = b - 512;
 927        if (a < 0)
 928                a = 0;
 929
 930        for (i = a; i < b; i++) {
 931                if (data[i])
 932                        return true;
 933        }
 934
 935        return false;
 936}
 937
 938static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
 939                                  struct auxtrace_mmap *mm, unsigned char *data,
 940                                  u64 *head, u64 *old)
 941{
 942        struct intel_pt_recording *ptr =
 943                        container_of(itr, struct intel_pt_recording, itr);
 944        bool wrapped;
 945        int err;
 946
 947        pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
 948                  __func__, idx, (size_t)*old, (size_t)*head);
 949
 950        err = intel_pt_snapshot_init(ptr, mm->len);
 951        if (err)
 952                goto out_err;
 953
 954        if (idx >= ptr->snapshot_ref_cnt) {
 955                err = intel_pt_alloc_snapshot_refs(ptr, idx);
 956                if (err)
 957                        goto out_err;
 958        }
 959
 960        if (ptr->snapshot_ref_buf_size) {
 961                if (!ptr->snapshot_refs[idx].ref_buf) {
 962                        err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
 963                        if (err)
 964                                goto out_err;
 965                }
 966                wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
 967        } else {
 968                wrapped = ptr->snapshot_refs[idx].wrapped;
 969                if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
 970                        ptr->snapshot_refs[idx].wrapped = true;
 971                        wrapped = true;
 972                }
 973        }
 974
 975        /*
 976         * In full trace mode 'head' continually increases.  However in snapshot
 977         * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
 978         * are adjusted to match the full trace case which expects that 'old' is
 979         * always less than 'head'.
 980         */
 981        if (wrapped) {
 982                *old = *head;
 983                *head += mm->len;
 984        } else {
 985                if (mm->mask)
 986                        *old &= mm->mask;
 987                else
 988                        *old %= mm->len;
 989                if (*old > *head)
 990                        *head += mm->len;
 991        }
 992
 993        pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
 994                  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
 995
 996        return 0;
 997
 998out_err:
 999        pr_err("%s: failed, error %d\n", __func__, err);
1000        return err;
1001}
1002
1003static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1004{
1005        return rdtsc();
1006}
1007
1008static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
1009{
1010        struct intel_pt_recording *ptr =
1011                        container_of(itr, struct intel_pt_recording, itr);
1012        struct perf_evsel *evsel;
1013
1014        evlist__for_each(ptr->evlist, evsel) {
1015                if (evsel->attr.type == ptr->intel_pt_pmu->type)
1016                        return perf_evlist__enable_event_idx(ptr->evlist, evsel,
1017                                                             idx);
1018        }
1019        return -EINVAL;
1020}
1021
1022struct auxtrace_record *intel_pt_recording_init(int *err)
1023{
1024        struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1025        struct intel_pt_recording *ptr;
1026
1027        if (!intel_pt_pmu)
1028                return NULL;
1029
1030        ptr = zalloc(sizeof(struct intel_pt_recording));
1031        if (!ptr) {
1032                *err = -ENOMEM;
1033                return NULL;
1034        }
1035
1036        ptr->intel_pt_pmu = intel_pt_pmu;
1037        ptr->itr.recording_options = intel_pt_recording_options;
1038        ptr->itr.info_priv_size = intel_pt_info_priv_size;
1039        ptr->itr.info_fill = intel_pt_info_fill;
1040        ptr->itr.free = intel_pt_recording_free;
1041        ptr->itr.snapshot_start = intel_pt_snapshot_start;
1042        ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1043        ptr->itr.find_snapshot = intel_pt_find_snapshot;
1044        ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1045        ptr->itr.reference = intel_pt_reference;
1046        ptr->itr.read_finish = intel_pt_read_finish;
1047        return &ptr->itr;
1048}
1049