linux/tools/perf/arch/x86/util/intel-bts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * intel-bts.c: Intel Processor Trace support
   4 * Copyright (c) 2013-2015, Intel Corporation.
   5 */
   6
   7#include <errno.h>
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/bitops.h>
  11#include <linux/log2.h>
  12#include <linux/zalloc.h>
  13
  14#include "../../../util/cpumap.h"
  15#include "../../../util/event.h"
  16#include "../../../util/evsel.h"
  17#include "../../../util/evlist.h"
  18#include "../../../util/mmap.h"
  19#include "../../../util/session.h"
  20#include "../../../util/pmu.h"
  21#include "../../../util/debug.h"
  22#include "../../../util/record.h"
  23#include "../../../util/tsc.h"
  24#include "../../../util/auxtrace.h"
  25#include "../../../util/intel-bts.h"
  26#include <internal/lib.h> // page_size
  27
  28#define KiB(x) ((x) * 1024)
  29#define MiB(x) ((x) * 1024 * 1024)
  30#define KiB_MASK(x) (KiB(x) - 1)
  31#define MiB_MASK(x) (MiB(x) - 1)
  32
  33struct intel_bts_snapshot_ref {
  34        void    *ref_buf;
  35        size_t  ref_offset;
  36        bool    wrapped;
  37};
  38
  39struct intel_bts_recording {
  40        struct auxtrace_record          itr;
  41        struct perf_pmu                 *intel_bts_pmu;
  42        struct evlist           *evlist;
  43        bool                            snapshot_mode;
  44        size_t                          snapshot_size;
  45        int                             snapshot_ref_cnt;
  46        struct intel_bts_snapshot_ref   *snapshot_refs;
  47};
  48
  49struct branch {
  50        u64 from;
  51        u64 to;
  52        u64 misc;
  53};
  54
  55static size_t
  56intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
  57                         struct evlist *evlist __maybe_unused)
  58{
  59        return INTEL_BTS_AUXTRACE_PRIV_SIZE;
  60}
  61
  62static int intel_bts_info_fill(struct auxtrace_record *itr,
  63                               struct perf_session *session,
  64                               struct perf_record_auxtrace_info *auxtrace_info,
  65                               size_t priv_size)
  66{
  67        struct intel_bts_recording *btsr =
  68                        container_of(itr, struct intel_bts_recording, itr);
  69        struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
  70        struct perf_event_mmap_page *pc;
  71        struct perf_tsc_conversion tc = { .time_mult = 0, };
  72        bool cap_user_time_zero = false;
  73        int err;
  74
  75        if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
  76                return -EINVAL;
  77
  78        if (!session->evlist->core.nr_mmaps)
  79                return -EINVAL;
  80
  81        pc = session->evlist->mmap[0].core.base;
  82        if (pc) {
  83                err = perf_read_tsc_conversion(pc, &tc);
  84                if (err) {
  85                        if (err != -EOPNOTSUPP)
  86                                return err;
  87                } else {
  88                        cap_user_time_zero = tc.time_mult != 0;
  89                }
  90                if (!cap_user_time_zero)
  91                        ui__warning("Intel BTS: TSC not available\n");
  92        }
  93
  94        auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS;
  95        auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type;
  96        auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift;
  97        auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult;
  98        auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero;
  99        auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero;
 100        auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode;
 101
 102        return 0;
 103}
 104
 105static int intel_bts_recording_options(struct auxtrace_record *itr,
 106                                       struct evlist *evlist,
 107                                       struct record_opts *opts)
 108{
 109        struct intel_bts_recording *btsr =
 110                        container_of(itr, struct intel_bts_recording, itr);
 111        struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
 112        struct evsel *evsel, *intel_bts_evsel = NULL;
 113        const struct perf_cpu_map *cpus = evlist->core.cpus;
 114        bool privileged = perf_event_paranoid_check(-1);
 115
 116        if (opts->auxtrace_sample_mode) {
 117                pr_err("Intel BTS does not support AUX area sampling\n");
 118                return -EINVAL;
 119        }
 120
 121        btsr->evlist = evlist;
 122        btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
 123
 124        evlist__for_each_entry(evlist, evsel) {
 125                if (evsel->core.attr.type == intel_bts_pmu->type) {
 126                        if (intel_bts_evsel) {
 127                                pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
 128                                return -EINVAL;
 129                        }
 130                        evsel->core.attr.freq = 0;
 131                        evsel->core.attr.sample_period = 1;
 132                        intel_bts_evsel = evsel;
 133                        opts->full_auxtrace = true;
 134                }
 135        }
 136
 137        if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
 138                pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n");
 139                return -EINVAL;
 140        }
 141
 142        if (!opts->full_auxtrace)
 143                return 0;
 144
 145        if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
 146                pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
 147                return -EINVAL;
 148        }
 149
 150        /* Set default sizes for snapshot mode */
 151        if (opts->auxtrace_snapshot_mode) {
 152                if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
 153                        if (privileged) {
 154                                opts->auxtrace_mmap_pages = MiB(4) / page_size;
 155                        } else {
 156                                opts->auxtrace_mmap_pages = KiB(128) / page_size;
 157                                if (opts->mmap_pages == UINT_MAX)
 158                                        opts->mmap_pages = KiB(256) / page_size;
 159                        }
 160                } else if (!opts->auxtrace_mmap_pages && !privileged &&
 161                           opts->mmap_pages == UINT_MAX) {
 162                        opts->mmap_pages = KiB(256) / page_size;
 163                }
 164                if (!opts->auxtrace_snapshot_size)
 165                        opts->auxtrace_snapshot_size =
 166                                opts->auxtrace_mmap_pages * (size_t)page_size;
 167                if (!opts->auxtrace_mmap_pages) {
 168                        size_t sz = opts->auxtrace_snapshot_size;
 169
 170                        sz = round_up(sz, page_size) / page_size;
 171                        opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
 172                }
 173                if (opts->auxtrace_snapshot_size >
 174                                opts->auxtrace_mmap_pages * (size_t)page_size) {
 175                        pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
 176                               opts->auxtrace_snapshot_size,
 177                               opts->auxtrace_mmap_pages * (size_t)page_size);
 178                        return -EINVAL;
 179                }
 180                if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
 181                        pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
 182                        return -EINVAL;
 183                }
 184                pr_debug2("Intel BTS snapshot size: %zu\n",
 185                          opts->auxtrace_snapshot_size);
 186        }
 187
 188        /* Set default sizes for full trace mode */
 189        if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
 190                if (privileged) {
 191                        opts->auxtrace_mmap_pages = MiB(4) / page_size;
 192                } else {
 193                        opts->auxtrace_mmap_pages = KiB(128) / page_size;
 194                        if (opts->mmap_pages == UINT_MAX)
 195                                opts->mmap_pages = KiB(256) / page_size;
 196                }
 197        }
 198
 199        /* Validate auxtrace_mmap_pages */
 200        if (opts->auxtrace_mmap_pages) {
 201                size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
 202                size_t min_sz;
 203
 204                if (opts->auxtrace_snapshot_mode)
 205                        min_sz = KiB(4);
 206                else
 207                        min_sz = KiB(8);
 208
 209                if (sz < min_sz || !is_power_of_2(sz)) {
 210                        pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
 211                               min_sz / 1024);
 212                        return -EINVAL;
 213                }
 214        }
 215
 216        if (intel_bts_evsel) {
 217                /*
 218                 * To obtain the auxtrace buffer file descriptor, the auxtrace event
 219                 * must come first.
 220                 */
 221                evlist__to_front(evlist, intel_bts_evsel);
 222                /*
 223                 * In the case of per-cpu mmaps, we need the CPU on the
 224                 * AUX event.
 225                 */
 226                if (!perf_cpu_map__empty(cpus))
 227                        evsel__set_sample_bit(intel_bts_evsel, CPU);
 228        }
 229
 230        /* Add dummy event to keep tracking */
 231        if (opts->full_auxtrace) {
 232                struct evsel *tracking_evsel;
 233                int err;
 234
 235                err = parse_events(evlist, "dummy:u", NULL);
 236                if (err)
 237                        return err;
 238
 239                tracking_evsel = evlist__last(evlist);
 240
 241                evlist__set_tracking_event(evlist, tracking_evsel);
 242
 243                tracking_evsel->core.attr.freq = 0;
 244                tracking_evsel->core.attr.sample_period = 1;
 245        }
 246
 247        return 0;
 248}
 249
 250static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr,
 251                                            struct record_opts *opts,
 252                                            const char *str)
 253{
 254        struct intel_bts_recording *btsr =
 255                        container_of(itr, struct intel_bts_recording, itr);
 256        unsigned long long snapshot_size = 0;
 257        char *endptr;
 258
 259        if (str) {
 260                snapshot_size = strtoull(str, &endptr, 0);
 261                if (*endptr || snapshot_size > SIZE_MAX)
 262                        return -1;
 263        }
 264
 265        opts->auxtrace_snapshot_mode = true;
 266        opts->auxtrace_snapshot_size = snapshot_size;
 267
 268        btsr->snapshot_size = snapshot_size;
 269
 270        return 0;
 271}
 272
 273static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
 274{
 275        return rdtsc();
 276}
 277
 278static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr,
 279                                         int idx)
 280{
 281        const size_t sz = sizeof(struct intel_bts_snapshot_ref);
 282        int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2;
 283        struct intel_bts_snapshot_ref *refs;
 284
 285        if (!new_cnt)
 286                new_cnt = 16;
 287
 288        while (new_cnt <= idx)
 289                new_cnt *= 2;
 290
 291        refs = calloc(new_cnt, sz);
 292        if (!refs)
 293                return -ENOMEM;
 294
 295        memcpy(refs, btsr->snapshot_refs, cnt * sz);
 296
 297        btsr->snapshot_refs = refs;
 298        btsr->snapshot_ref_cnt = new_cnt;
 299
 300        return 0;
 301}
 302
 303static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr)
 304{
 305        int i;
 306
 307        for (i = 0; i < btsr->snapshot_ref_cnt; i++)
 308                zfree(&btsr->snapshot_refs[i].ref_buf);
 309        zfree(&btsr->snapshot_refs);
 310}
 311
 312static void intel_bts_recording_free(struct auxtrace_record *itr)
 313{
 314        struct intel_bts_recording *btsr =
 315                        container_of(itr, struct intel_bts_recording, itr);
 316
 317        intel_bts_free_snapshot_refs(btsr);
 318        free(btsr);
 319}
 320
 321static int intel_bts_snapshot_start(struct auxtrace_record *itr)
 322{
 323        struct intel_bts_recording *btsr =
 324                        container_of(itr, struct intel_bts_recording, itr);
 325        struct evsel *evsel;
 326
 327        evlist__for_each_entry(btsr->evlist, evsel) {
 328                if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
 329                        return evsel__disable(evsel);
 330        }
 331        return -EINVAL;
 332}
 333
 334static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
 335{
 336        struct intel_bts_recording *btsr =
 337                        container_of(itr, struct intel_bts_recording, itr);
 338        struct evsel *evsel;
 339
 340        evlist__for_each_entry(btsr->evlist, evsel) {
 341                if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
 342                        return evsel__enable(evsel);
 343        }
 344        return -EINVAL;
 345}
 346
 347static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
 348{
 349        int i, a, b;
 350
 351        b = buf_size >> 3;
 352        a = b - 512;
 353        if (a < 0)
 354                a = 0;
 355
 356        for (i = a; i < b; i++) {
 357                if (data[i])
 358                        return true;
 359        }
 360
 361        return false;
 362}
 363
 364static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx,
 365                                   struct auxtrace_mmap *mm, unsigned char *data,
 366                                   u64 *head, u64 *old)
 367{
 368        struct intel_bts_recording *btsr =
 369                        container_of(itr, struct intel_bts_recording, itr);
 370        bool wrapped;
 371        int err;
 372
 373        pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
 374                  __func__, idx, (size_t)*old, (size_t)*head);
 375
 376        if (idx >= btsr->snapshot_ref_cnt) {
 377                err = intel_bts_alloc_snapshot_refs(btsr, idx);
 378                if (err)
 379                        goto out_err;
 380        }
 381
 382        wrapped = btsr->snapshot_refs[idx].wrapped;
 383        if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
 384                btsr->snapshot_refs[idx].wrapped = true;
 385                wrapped = true;
 386        }
 387
 388        /*
 389         * In full trace mode 'head' continually increases.  However in snapshot
 390         * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
 391         * are adjusted to match the full trace case which expects that 'old' is
 392         * always less than 'head'.
 393         */
 394        if (wrapped) {
 395                *old = *head;
 396                *head += mm->len;
 397        } else {
 398                if (mm->mask)
 399                        *old &= mm->mask;
 400                else
 401                        *old %= mm->len;
 402                if (*old > *head)
 403                        *head += mm->len;
 404        }
 405
 406        pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
 407                  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
 408
 409        return 0;
 410
 411out_err:
 412        pr_err("%s: failed, error %d\n", __func__, err);
 413        return err;
 414}
 415
 416struct auxtrace_record *intel_bts_recording_init(int *err)
 417{
 418        struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
 419        struct intel_bts_recording *btsr;
 420
 421        if (!intel_bts_pmu)
 422                return NULL;
 423
 424        if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
 425                *err = -errno;
 426                return NULL;
 427        }
 428
 429        btsr = zalloc(sizeof(struct intel_bts_recording));
 430        if (!btsr) {
 431                *err = -ENOMEM;
 432                return NULL;
 433        }
 434
 435        btsr->intel_bts_pmu = intel_bts_pmu;
 436        btsr->itr.pmu = intel_bts_pmu;
 437        btsr->itr.recording_options = intel_bts_recording_options;
 438        btsr->itr.info_priv_size = intel_bts_info_priv_size;
 439        btsr->itr.info_fill = intel_bts_info_fill;
 440        btsr->itr.free = intel_bts_recording_free;
 441        btsr->itr.snapshot_start = intel_bts_snapshot_start;
 442        btsr->itr.snapshot_finish = intel_bts_snapshot_finish;
 443        btsr->itr.find_snapshot = intel_bts_find_snapshot;
 444        btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
 445        btsr->itr.reference = intel_bts_reference;
 446        btsr->itr.read_finish = auxtrace_record__read_finish;
 447        btsr->itr.alignment = sizeof(struct branch);
 448        return &btsr->itr;
 449}
 450