linux/tools/perf/arch/x86/util/intel-pt.c
<<
>>
Prefs
   1/*
   2 * intel_pt.c: Intel Processor Trace support
   3 * Copyright (c) 2013-2015, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 */
  15
  16#include <stdbool.h>
  17#include <linux/kernel.h>
  18#include <linux/types.h>
  19#include <linux/bitops.h>
  20#include <linux/log2.h>
  21#include <cpuid.h>
  22
  23#include "../../perf.h"
  24#include "../../util/session.h"
  25#include "../../util/event.h"
  26#include "../../util/evlist.h"
  27#include "../../util/evsel.h"
  28#include "../../util/cpumap.h"
  29#include "../../util/parse-options.h"
  30#include "../../util/parse-events.h"
  31#include "../../util/pmu.h"
  32#include "../../util/debug.h"
  33#include "../../util/auxtrace.h"
  34#include "../../util/tsc.h"
  35#include "../../util/intel-pt.h"
  36
  37#define KiB(x) ((x) * 1024)
  38#define MiB(x) ((x) * 1024 * 1024)
  39#define KiB_MASK(x) (KiB(x) - 1)
  40#define MiB_MASK(x) (MiB(x) - 1)
  41
  42#define INTEL_PT_DEFAULT_SAMPLE_SIZE    KiB(4)
  43
  44#define INTEL_PT_MAX_SAMPLE_SIZE        KiB(60)
  45
  46#define INTEL_PT_PSB_PERIOD_NEAR        256
  47
  48struct intel_pt_snapshot_ref {
  49        void *ref_buf;
  50        size_t ref_offset;
  51        bool wrapped;
  52};
  53
  54struct intel_pt_recording {
  55        struct auxtrace_record          itr;
  56        struct perf_pmu                 *intel_pt_pmu;
  57        int                             have_sched_switch;
  58        struct perf_evlist              *evlist;
  59        bool                            snapshot_mode;
  60        bool                            snapshot_init_done;
  61        size_t                          snapshot_size;
  62        size_t                          snapshot_ref_buf_size;
  63        int                             snapshot_ref_cnt;
  64        struct intel_pt_snapshot_ref    *snapshot_refs;
  65};
  66
  67static int intel_pt_parse_terms_with_default(struct list_head *formats,
  68                                             const char *str,
  69                                             u64 *config)
  70{
  71        struct list_head *terms;
  72        struct perf_event_attr attr = { .size = 0, };
  73        int err;
  74
  75        terms = malloc(sizeof(struct list_head));
  76        if (!terms)
  77                return -ENOMEM;
  78
  79        INIT_LIST_HEAD(terms);
  80
  81        err = parse_events_terms(terms, str);
  82        if (err)
  83                goto out_free;
  84
  85        attr.config = *config;
  86        err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
  87        if (err)
  88                goto out_free;
  89
  90        *config = attr.config;
  91out_free:
  92        parse_events__free_terms(terms);
  93        return err;
  94}
  95
  96static int intel_pt_parse_terms(struct list_head *formats, const char *str,
  97                                u64 *config)
  98{
  99        *config = 0;
 100        return intel_pt_parse_terms_with_default(formats, str, config);
 101}
 102
 103static u64 intel_pt_masked_bits(u64 mask, u64 bits)
 104{
 105        const u64 top_bit = 1ULL << 63;
 106        u64 res = 0;
 107        int i;
 108
 109        for (i = 0; i < 64; i++) {
 110                if (mask & top_bit) {
 111                        res <<= 1;
 112                        if (bits & top_bit)
 113                                res |= 1;
 114                }
 115                mask <<= 1;
 116                bits <<= 1;
 117        }
 118
 119        return res;
 120}
 121
 122static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
 123                                struct perf_evlist *evlist, u64 *res)
 124{
 125        struct perf_evsel *evsel;
 126        u64 mask;
 127
 128        *res = 0;
 129
 130        mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
 131        if (!mask)
 132                return -EINVAL;
 133
 134        evlist__for_each(evlist, evsel) {
 135                if (evsel->attr.type == intel_pt_pmu->type) {
 136                        *res = intel_pt_masked_bits(mask, evsel->attr.config);
 137                        return 0;
 138                }
 139        }
 140
 141        return -EINVAL;
 142}
 143
 144static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
 145                                  struct perf_evlist *evlist)
 146{
 147        u64 val;
 148        int err, topa_multiple_entries;
 149        size_t psb_period;
 150
 151        if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
 152                                "%d", &topa_multiple_entries) != 1)
 153                topa_multiple_entries = 0;
 154
 155        /*
 156         * Use caps/topa_multiple_entries to indicate early hardware that had
 157         * extra frequent PSBs.
 158         */
 159        if (!topa_multiple_entries) {
 160                psb_period = 256;
 161                goto out;
 162        }
 163
 164        err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
 165        if (err)
 166                val = 0;
 167
 168        psb_period = 1 << (val + 11);
 169out:
 170        pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
 171        return psb_period;
 172}
 173
 174static int intel_pt_pick_bit(int bits, int target)
 175{
 176        int pos, pick = -1;
 177
 178        for (pos = 0; bits; bits >>= 1, pos++) {
 179                if (bits & 1) {
 180                        if (pos <= target || pick < 0)
 181                                pick = pos;
 182                        if (pos >= target)
 183                                break;
 184                }
 185        }
 186
 187        return pick;
 188}
 189
 190static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
 191{
 192        char buf[256];
 193        int mtc, mtc_periods = 0, mtc_period;
 194        int psb_cyc, psb_periods, psb_period;
 195        int pos = 0;
 196        u64 config;
 197
 198        pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
 199
 200        if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
 201                                &mtc) != 1)
 202                mtc = 1;
 203
 204        if (mtc) {
 205                if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
 206                                        &mtc_periods) != 1)
 207                        mtc_periods = 0;
 208                if (mtc_periods) {
 209                        mtc_period = intel_pt_pick_bit(mtc_periods, 3);
 210                        pos += scnprintf(buf + pos, sizeof(buf) - pos,
 211                                         ",mtc,mtc_period=%d", mtc_period);
 212                }
 213        }
 214
 215        if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
 216                                &psb_cyc) != 1)
 217                psb_cyc = 1;
 218
 219        if (psb_cyc && mtc_periods) {
 220                if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
 221                                        &psb_periods) != 1)
 222                        psb_periods = 0;
 223                if (psb_periods) {
 224                        psb_period = intel_pt_pick_bit(psb_periods, 3);
 225                        pos += scnprintf(buf + pos, sizeof(buf) - pos,
 226                                         ",psb_period=%d", psb_period);
 227                }
 228        }
 229
 230        pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
 231
 232        intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
 233
 234        return config;
 235}
 236
 237static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
 238                                           struct record_opts *opts,
 239                                           const char *str)
 240{
 241        struct intel_pt_recording *ptr =
 242                        container_of(itr, struct intel_pt_recording, itr);
 243        unsigned long long snapshot_size = 0;
 244        char *endptr;
 245
 246        if (str) {
 247                snapshot_size = strtoull(str, &endptr, 0);
 248                if (*endptr || snapshot_size > SIZE_MAX)
 249                        return -1;
 250        }
 251
 252        opts->auxtrace_snapshot_mode = true;
 253        opts->auxtrace_snapshot_size = snapshot_size;
 254
 255        ptr->snapshot_size = snapshot_size;
 256
 257        return 0;
 258}
 259
 260struct perf_event_attr *
 261intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
 262{
 263        struct perf_event_attr *attr;
 264
 265        attr = zalloc(sizeof(struct perf_event_attr));
 266        if (!attr)
 267                return NULL;
 268
 269        attr->config = intel_pt_default_config(intel_pt_pmu);
 270
 271        intel_pt_pmu->selectable = true;
 272
 273        return attr;
 274}
 275
 276static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
 277{
 278        return INTEL_PT_AUXTRACE_PRIV_SIZE;
 279}
 280
 281static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
 282{
 283        unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
 284
 285        __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
 286        *n = ebx;
 287        *d = eax;
 288}
 289
 290static int intel_pt_info_fill(struct auxtrace_record *itr,
 291                              struct perf_session *session,
 292                              struct auxtrace_info_event *auxtrace_info,
 293                              size_t priv_size)
 294{
 295        struct intel_pt_recording *ptr =
 296                        container_of(itr, struct intel_pt_recording, itr);
 297        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
 298        struct perf_event_mmap_page *pc;
 299        struct perf_tsc_conversion tc = { .time_mult = 0, };
 300        bool cap_user_time_zero = false, per_cpu_mmaps;
 301        u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
 302        u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
 303        int err;
 304
 305        if (priv_size != INTEL_PT_AUXTRACE_PRIV_SIZE)
 306                return -EINVAL;
 307
 308        intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
 309        intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
 310                             &noretcomp_bit);
 311        intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
 312        mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
 313                                              "mtc_period");
 314        intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
 315
 316        intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
 317
 318        if (!session->evlist->nr_mmaps)
 319                return -EINVAL;
 320
 321        pc = session->evlist->mmap[0].base;
 322        if (pc) {
 323                err = perf_read_tsc_conversion(pc, &tc);
 324                if (err) {
 325                        if (err != -EOPNOTSUPP)
 326                                return err;
 327                } else {
 328                        cap_user_time_zero = tc.time_mult != 0;
 329                }
 330                if (!cap_user_time_zero)
 331                        ui__warning("Intel Processor Trace: TSC not available\n");
 332        }
 333
 334        per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
 335
 336        auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
 337        auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
 338        auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
 339        auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
 340        auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
 341        auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
 342        auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
 343        auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
 344        auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
 345        auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
 346        auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
 347        auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
 348        auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
 349        auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
 350        auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
 351        auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
 352
 353        return 0;
 354}
 355
 356static int intel_pt_track_switches(struct perf_evlist *evlist)
 357{
 358        const char *sched_switch = "sched:sched_switch";
 359        struct perf_evsel *evsel;
 360        int err;
 361
 362        if (!perf_evlist__can_select_event(evlist, sched_switch))
 363                return -EPERM;
 364
 365        err = parse_events(evlist, sched_switch, NULL);
 366        if (err) {
 367                pr_debug2("%s: failed to parse %s, error %d\n",
 368                          __func__, sched_switch, err);
 369                return err;
 370        }
 371
 372        evsel = perf_evlist__last(evlist);
 373
 374        perf_evsel__set_sample_bit(evsel, CPU);
 375        perf_evsel__set_sample_bit(evsel, TIME);
 376
 377        evsel->system_wide = true;
 378        evsel->no_aux_samples = true;
 379        evsel->immediate = true;
 380
 381        return 0;
 382}
 383
 384static void intel_pt_valid_str(char *str, size_t len, u64 valid)
 385{
 386        unsigned int val, last = 0, state = 1;
 387        int p = 0;
 388
 389        str[0] = '\0';
 390
 391        for (val = 0; val <= 64; val++, valid >>= 1) {
 392                if (valid & 1) {
 393                        last = val;
 394                        switch (state) {
 395                        case 0:
 396                                p += scnprintf(str + p, len - p, ",");
 397                                /* Fall through */
 398                        case 1:
 399                                p += scnprintf(str + p, len - p, "%u", val);
 400                                state = 2;
 401                                break;
 402                        case 2:
 403                                state = 3;
 404                                break;
 405                        case 3:
 406                                state = 4;
 407                                break;
 408                        default:
 409                                break;
 410                        }
 411                } else {
 412                        switch (state) {
 413                        case 3:
 414                                p += scnprintf(str + p, len - p, ",%u", last);
 415                                state = 0;
 416                                break;
 417                        case 4:
 418                                p += scnprintf(str + p, len - p, "-%u", last);
 419                                state = 0;
 420                                break;
 421                        default:
 422                                break;
 423                        }
 424                        if (state != 1)
 425                                state = 0;
 426                }
 427        }
 428}
 429
 430static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
 431                                    const char *caps, const char *name,
 432                                    const char *supported, u64 config)
 433{
 434        char valid_str[256];
 435        unsigned int shift;
 436        unsigned long long valid;
 437        u64 bits;
 438        int ok;
 439
 440        if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
 441                valid = 0;
 442
 443        if (supported &&
 444            perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
 445                valid = 0;
 446
 447        valid |= 1;
 448
 449        bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
 450
 451        config &= bits;
 452
 453        for (shift = 0; bits && !(bits & 1); shift++)
 454                bits >>= 1;
 455
 456        config >>= shift;
 457
 458        if (config > 63)
 459                goto out_err;
 460
 461        if (valid & (1 << config))
 462                return 0;
 463out_err:
 464        intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
 465        pr_err("Invalid %s for %s. Valid values are: %s\n",
 466               name, INTEL_PT_PMU_NAME, valid_str);
 467        return -EINVAL;
 468}
 469
 470static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
 471                                    struct perf_evsel *evsel)
 472{
 473        int err;
 474
 475        if (!evsel)
 476                return 0;
 477
 478        err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
 479                                       "cyc_thresh", "caps/psb_cyc",
 480                                       evsel->attr.config);
 481        if (err)
 482                return err;
 483
 484        err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
 485                                       "mtc_period", "caps/mtc",
 486                                       evsel->attr.config);
 487        if (err)
 488                return err;
 489
 490        return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
 491                                        "psb_period", "caps/psb_cyc",
 492                                        evsel->attr.config);
 493}
 494
 495static int intel_pt_recording_options(struct auxtrace_record *itr,
 496                                      struct perf_evlist *evlist,
 497                                      struct record_opts *opts)
 498{
 499        struct intel_pt_recording *ptr =
 500                        container_of(itr, struct intel_pt_recording, itr);
 501        struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
 502        bool have_timing_info;
 503        struct perf_evsel *evsel, *intel_pt_evsel = NULL;
 504        const struct cpu_map *cpus = evlist->cpus;
 505        bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
 506        u64 tsc_bit;
 507        int err;
 508
 509        ptr->evlist = evlist;
 510        ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
 511
 512        evlist__for_each(evlist, evsel) {
 513                if (evsel->attr.type == intel_pt_pmu->type) {
 514                        if (intel_pt_evsel) {
 515                                pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
 516                                return -EINVAL;
 517                        }
 518                        evsel->attr.freq = 0;
 519                        evsel->attr.sample_period = 1;
 520                        intel_pt_evsel = evsel;
 521                        opts->full_auxtrace = true;
 522                }
 523        }
 524
 525        if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
 526                pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
 527                return -EINVAL;
 528        }
 529
 530        if (opts->use_clockid) {
 531                pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
 532                return -EINVAL;
 533        }
 534
 535        if (!opts->full_auxtrace)
 536                return 0;
 537
 538        err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
 539        if (err)
 540                return err;
 541
 542        /* Set default sizes for snapshot mode */
 543        if (opts->auxtrace_snapshot_mode) {
 544                size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
 545
 546                if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
 547                        if (privileged) {
 548                                opts->auxtrace_mmap_pages = MiB(4) / page_size;
 549                        } else {
 550                                opts->auxtrace_mmap_pages = KiB(128) / page_size;
 551                                if (opts->mmap_pages == UINT_MAX)
 552                                        opts->mmap_pages = KiB(256) / page_size;
 553                        }
 554                } else if (!opts->auxtrace_mmap_pages && !privileged &&
 555                           opts->mmap_pages == UINT_MAX) {
 556                        opts->mmap_pages = KiB(256) / page_size;
 557                }
 558                if (!opts->auxtrace_snapshot_size)
 559                        opts->auxtrace_snapshot_size =
 560                                opts->auxtrace_mmap_pages * (size_t)page_size;
 561                if (!opts->auxtrace_mmap_pages) {
 562                        size_t sz = opts->auxtrace_snapshot_size;
 563
 564                        sz = round_up(sz, page_size) / page_size;
 565                        opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
 566                }
 567                if (opts->auxtrace_snapshot_size >
 568                                opts->auxtrace_mmap_pages * (size_t)page_size) {
 569                        pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
 570                               opts->auxtrace_snapshot_size,
 571                               opts->auxtrace_mmap_pages * (size_t)page_size);
 572                        return -EINVAL;
 573                }
 574                if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
 575                        pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
 576                        return -EINVAL;
 577                }
 578                pr_debug2("Intel PT snapshot size: %zu\n",
 579                          opts->auxtrace_snapshot_size);
 580                if (psb_period &&
 581                    opts->auxtrace_snapshot_size <= psb_period +
 582                                                  INTEL_PT_PSB_PERIOD_NEAR)
 583                        ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
 584                                    opts->auxtrace_snapshot_size, psb_period);
 585        }
 586
 587        /* Set default sizes for full trace mode */
 588        if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
 589                if (privileged) {
 590                        opts->auxtrace_mmap_pages = MiB(4) / page_size;
 591                } else {
 592                        opts->auxtrace_mmap_pages = KiB(128) / page_size;
 593                        if (opts->mmap_pages == UINT_MAX)
 594                                opts->mmap_pages = KiB(256) / page_size;
 595                }
 596        }
 597
 598        /* Validate auxtrace_mmap_pages */
 599        if (opts->auxtrace_mmap_pages) {
 600                size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
 601                size_t min_sz;
 602
 603                if (opts->auxtrace_snapshot_mode)
 604                        min_sz = KiB(4);
 605                else
 606                        min_sz = KiB(8);
 607
 608                if (sz < min_sz || !is_power_of_2(sz)) {
 609                        pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
 610                               min_sz / 1024);
 611                        return -EINVAL;
 612                }
 613        }
 614
 615        intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
 616
 617        if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
 618                have_timing_info = true;
 619        else
 620                have_timing_info = false;
 621
 622        /*
 623         * Per-cpu recording needs sched_switch events to distinguish different
 624         * threads.
 625         */
 626        if (have_timing_info && !cpu_map__empty(cpus)) {
 627                if (perf_can_record_switch_events()) {
 628                        bool cpu_wide = !target__none(&opts->target) &&
 629                                        !target__has_task(&opts->target);
 630
 631                        if (!cpu_wide && perf_can_record_cpu_wide()) {
 632                                struct perf_evsel *switch_evsel;
 633
 634                                err = parse_events(evlist, "dummy:u", NULL);
 635                                if (err)
 636                                        return err;
 637
 638                                switch_evsel = perf_evlist__last(evlist);
 639
 640                                switch_evsel->attr.freq = 0;
 641                                switch_evsel->attr.sample_period = 1;
 642                                switch_evsel->attr.context_switch = 1;
 643
 644                                switch_evsel->system_wide = true;
 645                                switch_evsel->no_aux_samples = true;
 646                                switch_evsel->immediate = true;
 647
 648                                perf_evsel__set_sample_bit(switch_evsel, TID);
 649                                perf_evsel__set_sample_bit(switch_evsel, TIME);
 650                                perf_evsel__set_sample_bit(switch_evsel, CPU);
 651
 652                                opts->record_switch_events = false;
 653                                ptr->have_sched_switch = 3;
 654                        } else {
 655                                opts->record_switch_events = true;
 656                                if (cpu_wide)
 657                                        ptr->have_sched_switch = 3;
 658                                else
 659                                        ptr->have_sched_switch = 2;
 660                        }
 661                } else {
 662                        err = intel_pt_track_switches(evlist);
 663                        if (err == -EPERM)
 664                                pr_debug2("Unable to select sched:sched_switch\n");
 665                        else if (err)
 666                                return err;
 667                        else
 668                                ptr->have_sched_switch = 1;
 669                }
 670        }
 671
 672        if (intel_pt_evsel) {
 673                /*
 674                 * To obtain the auxtrace buffer file descriptor, the auxtrace
 675                 * event must come first.
 676                 */
 677                perf_evlist__to_front(evlist, intel_pt_evsel);
 678                /*
 679                 * In the case of per-cpu mmaps, we need the CPU on the
 680                 * AUX event.
 681                 */
 682                if (!cpu_map__empty(cpus))
 683                        perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
 684        }
 685
 686        /* Add dummy event to keep tracking */
 687        if (opts->full_auxtrace) {
 688                struct perf_evsel *tracking_evsel;
 689
 690                err = parse_events(evlist, "dummy:u", NULL);
 691                if (err)
 692                        return err;
 693
 694                tracking_evsel = perf_evlist__last(evlist);
 695
 696                perf_evlist__set_tracking_event(evlist, tracking_evsel);
 697
 698                tracking_evsel->attr.freq = 0;
 699                tracking_evsel->attr.sample_period = 1;
 700
 701                /* In per-cpu case, always need the time of mmap events etc */
 702                if (!cpu_map__empty(cpus)) {
 703                        perf_evsel__set_sample_bit(tracking_evsel, TIME);
 704                        /* And the CPU for switch events */
 705                        perf_evsel__set_sample_bit(tracking_evsel, CPU);
 706                }
 707        }
 708
 709        /*
 710         * Warn the user when we do not have enough information to decode i.e.
 711         * per-cpu with no sched_switch (except workload-only).
 712         */
 713        if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
 714            !target__none(&opts->target))
 715                ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
 716
 717        return 0;
 718}
 719
 720static int intel_pt_snapshot_start(struct auxtrace_record *itr)
 721{
 722        struct intel_pt_recording *ptr =
 723                        container_of(itr, struct intel_pt_recording, itr);
 724        struct perf_evsel *evsel;
 725
 726        evlist__for_each(ptr->evlist, evsel) {
 727                if (evsel->attr.type == ptr->intel_pt_pmu->type)
 728                        return perf_evlist__disable_event(ptr->evlist, evsel);
 729        }
 730        return -EINVAL;
 731}
 732
 733static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
 734{
 735        struct intel_pt_recording *ptr =
 736                        container_of(itr, struct intel_pt_recording, itr);
 737        struct perf_evsel *evsel;
 738
 739        evlist__for_each(ptr->evlist, evsel) {
 740                if (evsel->attr.type == ptr->intel_pt_pmu->type)
 741                        return perf_evlist__enable_event(ptr->evlist, evsel);
 742        }
 743        return -EINVAL;
 744}
 745
 746static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
 747{
 748        const size_t sz = sizeof(struct intel_pt_snapshot_ref);
 749        int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
 750        struct intel_pt_snapshot_ref *refs;
 751
 752        if (!new_cnt)
 753                new_cnt = 16;
 754
 755        while (new_cnt <= idx)
 756                new_cnt *= 2;
 757
 758        refs = calloc(new_cnt, sz);
 759        if (!refs)
 760                return -ENOMEM;
 761
 762        memcpy(refs, ptr->snapshot_refs, cnt * sz);
 763
 764        ptr->snapshot_refs = refs;
 765        ptr->snapshot_ref_cnt = new_cnt;
 766
 767        return 0;
 768}
 769
 770static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
 771{
 772        int i;
 773
 774        for (i = 0; i < ptr->snapshot_ref_cnt; i++)
 775                zfree(&ptr->snapshot_refs[i].ref_buf);
 776        zfree(&ptr->snapshot_refs);
 777}
 778
 779static void intel_pt_recording_free(struct auxtrace_record *itr)
 780{
 781        struct intel_pt_recording *ptr =
 782                        container_of(itr, struct intel_pt_recording, itr);
 783
 784        intel_pt_free_snapshot_refs(ptr);
 785        free(ptr);
 786}
 787
 788static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
 789                                       size_t snapshot_buf_size)
 790{
 791        size_t ref_buf_size = ptr->snapshot_ref_buf_size;
 792        void *ref_buf;
 793
 794        ref_buf = zalloc(ref_buf_size);
 795        if (!ref_buf)
 796                return -ENOMEM;
 797
 798        ptr->snapshot_refs[idx].ref_buf = ref_buf;
 799        ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
 800
 801        return 0;
 802}
 803
 804static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
 805                                             size_t snapshot_buf_size)
 806{
 807        const size_t max_size = 256 * 1024;
 808        size_t buf_size = 0, psb_period;
 809
 810        if (ptr->snapshot_size <= 64 * 1024)
 811                return 0;
 812
 813        psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
 814        if (psb_period)
 815                buf_size = psb_period * 2;
 816
 817        if (!buf_size || buf_size > max_size)
 818                buf_size = max_size;
 819
 820        if (buf_size >= snapshot_buf_size)
 821                return 0;
 822
 823        if (buf_size >= ptr->snapshot_size / 2)
 824                return 0;
 825
 826        return buf_size;
 827}
 828
 829static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
 830                                  size_t snapshot_buf_size)
 831{
 832        if (ptr->snapshot_init_done)
 833                return 0;
 834
 835        ptr->snapshot_init_done = true;
 836
 837        ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
 838                                                        snapshot_buf_size);
 839
 840        return 0;
 841}
 842
 843/**
 844 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
 845 * @buf1: first buffer
 846 * @compare_size: number of bytes to compare
 847 * @buf2: second buffer (a circular buffer)
 848 * @offs2: offset in second buffer
 849 * @buf2_size: size of second buffer
 850 *
 851 * The comparison allows for the possibility that the bytes to compare in the
 852 * circular buffer are not contiguous.  It is assumed that @compare_size <=
 853 * @buf2_size.  This function returns %false if the bytes are identical, %true
 854 * otherwise.
 855 */
 856static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
 857                                     void *buf2, size_t offs2, size_t buf2_size)
 858{
 859        size_t end2 = offs2 + compare_size, part_size;
 860
 861        if (end2 <= buf2_size)
 862                return memcmp(buf1, buf2 + offs2, compare_size);
 863
 864        part_size = end2 - buf2_size;
 865        if (memcmp(buf1, buf2 + offs2, part_size))
 866                return true;
 867
 868        compare_size -= part_size;
 869
 870        return memcmp(buf1 + part_size, buf2, compare_size);
 871}
 872
 873static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
 874                                 size_t ref_size, size_t buf_size,
 875                                 void *data, size_t head)
 876{
 877        size_t ref_end = ref_offset + ref_size;
 878
 879        if (ref_end > buf_size) {
 880                if (head > ref_offset || head < ref_end - buf_size)
 881                        return true;
 882        } else if (head > ref_offset && head < ref_end) {
 883                return true;
 884        }
 885
 886        return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
 887                                        buf_size);
 888}
 889
 890static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
 891                              void *data, size_t head)
 892{
 893        if (head >= ref_size) {
 894                memcpy(ref_buf, data + head - ref_size, ref_size);
 895        } else {
 896                memcpy(ref_buf, data, head);
 897                ref_size -= head;
 898                memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
 899        }
 900}
 901
 902static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
 903                             struct auxtrace_mmap *mm, unsigned char *data,
 904                             u64 head)
 905{
 906        struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
 907        bool wrapped;
 908
 909        wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
 910                                       ptr->snapshot_ref_buf_size, mm->len,
 911                                       data, head);
 912
 913        intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
 914                          data, head);
 915
 916        return wrapped;
 917}
 918
 919static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
 920{
 921        int i, a, b;
 922
 923        b = buf_size >> 3;
 924        a = b - 512;
 925        if (a < 0)
 926                a = 0;
 927
 928        for (i = a; i < b; i++) {
 929                if (data[i])
 930                        return true;
 931        }
 932
 933        return false;
 934}
 935
 936static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
 937                                  struct auxtrace_mmap *mm, unsigned char *data,
 938                                  u64 *head, u64 *old)
 939{
 940        struct intel_pt_recording *ptr =
 941                        container_of(itr, struct intel_pt_recording, itr);
 942        bool wrapped;
 943        int err;
 944
 945        pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
 946                  __func__, idx, (size_t)*old, (size_t)*head);
 947
 948        err = intel_pt_snapshot_init(ptr, mm->len);
 949        if (err)
 950                goto out_err;
 951
 952        if (idx >= ptr->snapshot_ref_cnt) {
 953                err = intel_pt_alloc_snapshot_refs(ptr, idx);
 954                if (err)
 955                        goto out_err;
 956        }
 957
 958        if (ptr->snapshot_ref_buf_size) {
 959                if (!ptr->snapshot_refs[idx].ref_buf) {
 960                        err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
 961                        if (err)
 962                                goto out_err;
 963                }
 964                wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
 965        } else {
 966                wrapped = ptr->snapshot_refs[idx].wrapped;
 967                if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
 968                        ptr->snapshot_refs[idx].wrapped = true;
 969                        wrapped = true;
 970                }
 971        }
 972
 973        /*
 974         * In full trace mode 'head' continually increases.  However in snapshot
 975         * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
 976         * are adjusted to match the full trace case which expects that 'old' is
 977         * always less than 'head'.
 978         */
 979        if (wrapped) {
 980                *old = *head;
 981                *head += mm->len;
 982        } else {
 983                if (mm->mask)
 984                        *old &= mm->mask;
 985                else
 986                        *old %= mm->len;
 987                if (*old > *head)
 988                        *head += mm->len;
 989        }
 990
 991        pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
 992                  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
 993
 994        return 0;
 995
 996out_err:
 997        pr_err("%s: failed, error %d\n", __func__, err);
 998        return err;
 999}
1000
1001static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1002{
1003        return rdtsc();
1004}
1005
1006static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
1007{
1008        struct intel_pt_recording *ptr =
1009                        container_of(itr, struct intel_pt_recording, itr);
1010        struct perf_evsel *evsel;
1011
1012        evlist__for_each(ptr->evlist, evsel) {
1013                if (evsel->attr.type == ptr->intel_pt_pmu->type)
1014                        return perf_evlist__enable_event_idx(ptr->evlist, evsel,
1015                                                             idx);
1016        }
1017        return -EINVAL;
1018}
1019
1020struct auxtrace_record *intel_pt_recording_init(int *err)
1021{
1022        struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1023        struct intel_pt_recording *ptr;
1024
1025        if (!intel_pt_pmu)
1026                return NULL;
1027
1028        ptr = zalloc(sizeof(struct intel_pt_recording));
1029        if (!ptr) {
1030                *err = -ENOMEM;
1031                return NULL;
1032        }
1033
1034        ptr->intel_pt_pmu = intel_pt_pmu;
1035        ptr->itr.recording_options = intel_pt_recording_options;
1036        ptr->itr.info_priv_size = intel_pt_info_priv_size;
1037        ptr->itr.info_fill = intel_pt_info_fill;
1038        ptr->itr.free = intel_pt_recording_free;
1039        ptr->itr.snapshot_start = intel_pt_snapshot_start;
1040        ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1041        ptr->itr.find_snapshot = intel_pt_find_snapshot;
1042        ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1043        ptr->itr.reference = intel_pt_reference;
1044        ptr->itr.read_finish = intel_pt_read_finish;
1045        return &ptr->itr;
1046}
1047