linux/tools/perf/util/intel-pt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * intel_pt.c: Intel Processor Trace support
   4 * Copyright (c) 2013-2015, Intel Corporation.
   5 */
   6
   7#include <inttypes.h>
   8#include <stdio.h>
   9#include <stdbool.h>
  10#include <errno.h>
  11#include <linux/kernel.h>
  12#include <linux/string.h>
  13#include <linux/types.h>
  14#include <linux/zalloc.h>
  15
  16#include "session.h"
  17#include "machine.h"
  18#include "memswap.h"
  19#include "sort.h"
  20#include "tool.h"
  21#include "event.h"
  22#include "evlist.h"
  23#include "evsel.h"
  24#include "map.h"
  25#include "color.h"
  26#include "thread.h"
  27#include "thread-stack.h"
  28#include "symbol.h"
  29#include "callchain.h"
  30#include "dso.h"
  31#include "debug.h"
  32#include "auxtrace.h"
  33#include "tsc.h"
  34#include "intel-pt.h"
  35#include "config.h"
  36#include "util/perf_api_probe.h"
  37#include "util/synthetic-events.h"
  38#include "time-utils.h"
  39
  40#include "../arch/x86/include/uapi/asm/perf_regs.h"
  41
  42#include "intel-pt-decoder/intel-pt-log.h"
  43#include "intel-pt-decoder/intel-pt-decoder.h"
  44#include "intel-pt-decoder/intel-pt-insn-decoder.h"
  45#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
  46
  47#define MAX_TIMESTAMP (~0ULL)
  48
  49struct range {
  50        u64 start;
  51        u64 end;
  52};
  53
  54struct intel_pt {
  55        struct auxtrace auxtrace;
  56        struct auxtrace_queues queues;
  57        struct auxtrace_heap heap;
  58        u32 auxtrace_type;
  59        struct perf_session *session;
  60        struct machine *machine;
  61        struct evsel *switch_evsel;
  62        struct thread *unknown_thread;
  63        bool timeless_decoding;
  64        bool sampling_mode;
  65        bool snapshot_mode;
  66        bool per_cpu_mmaps;
  67        bool have_tsc;
  68        bool data_queued;
  69        bool est_tsc;
  70        bool sync_switch;
  71        bool mispred_all;
  72        bool use_thread_stack;
  73        bool callstack;
  74        unsigned int br_stack_sz;
  75        unsigned int br_stack_sz_plus;
  76        int have_sched_switch;
  77        u32 pmu_type;
  78        u64 kernel_start;
  79        u64 switch_ip;
  80        u64 ptss_ip;
  81        u64 first_timestamp;
  82
  83        struct perf_tsc_conversion tc;
  84        bool cap_user_time_zero;
  85
  86        struct itrace_synth_opts synth_opts;
  87
  88        bool sample_instructions;
  89        u64 instructions_sample_type;
  90        u64 instructions_id;
  91
  92        bool sample_branches;
  93        u32 branches_filter;
  94        u64 branches_sample_type;
  95        u64 branches_id;
  96
  97        bool sample_transactions;
  98        u64 transactions_sample_type;
  99        u64 transactions_id;
 100
 101        bool sample_ptwrites;
 102        u64 ptwrites_sample_type;
 103        u64 ptwrites_id;
 104
 105        bool sample_pwr_events;
 106        u64 pwr_events_sample_type;
 107        u64 mwait_id;
 108        u64 pwre_id;
 109        u64 exstop_id;
 110        u64 pwrx_id;
 111        u64 cbr_id;
 112        u64 psb_id;
 113
 114        bool single_pebs;
 115        bool sample_pebs;
 116        struct evsel *pebs_evsel;
 117
 118        u64 tsc_bit;
 119        u64 mtc_bit;
 120        u64 mtc_freq_bits;
 121        u32 tsc_ctc_ratio_n;
 122        u32 tsc_ctc_ratio_d;
 123        u64 cyc_bit;
 124        u64 noretcomp_bit;
 125        unsigned max_non_turbo_ratio;
 126        unsigned cbr2khz;
 127        int max_loops;
 128
 129        unsigned long num_events;
 130
 131        char *filter;
 132        struct addr_filters filts;
 133
 134        struct range *time_ranges;
 135        unsigned int range_cnt;
 136
 137        struct ip_callchain *chain;
 138        struct branch_stack *br_stack;
 139
 140        u64 dflt_tsc_offset;
 141        struct rb_root vmcs_info;
 142};
 143
 144enum switch_state {
 145        INTEL_PT_SS_NOT_TRACING,
 146        INTEL_PT_SS_UNKNOWN,
 147        INTEL_PT_SS_TRACING,
 148        INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
 149        INTEL_PT_SS_EXPECTING_SWITCH_IP,
 150};
 151
 152/* applicable_counters is 64-bits */
 153#define INTEL_PT_MAX_PEBS 64
 154
 155struct intel_pt_pebs_event {
 156        struct evsel *evsel;
 157        u64 id;
 158};
 159
 160struct intel_pt_queue {
 161        struct intel_pt *pt;
 162        unsigned int queue_nr;
 163        struct auxtrace_buffer *buffer;
 164        struct auxtrace_buffer *old_buffer;
 165        void *decoder;
 166        const struct intel_pt_state *state;
 167        struct ip_callchain *chain;
 168        struct branch_stack *last_branch;
 169        union perf_event *event_buf;
 170        bool on_heap;
 171        bool stop;
 172        bool step_through_buffers;
 173        bool use_buffer_pid_tid;
 174        bool sync_switch;
 175        bool sample_ipc;
 176        pid_t pid, tid;
 177        int cpu;
 178        int switch_state;
 179        pid_t next_tid;
 180        struct thread *thread;
 181        struct machine *guest_machine;
 182        struct thread *unknown_guest_thread;
 183        pid_t guest_machine_pid;
 184        bool exclude_kernel;
 185        bool have_sample;
 186        u64 time;
 187        u64 timestamp;
 188        u64 sel_timestamp;
 189        bool sel_start;
 190        unsigned int sel_idx;
 191        u32 flags;
 192        u16 insn_len;
 193        u64 last_insn_cnt;
 194        u64 ipc_insn_cnt;
 195        u64 ipc_cyc_cnt;
 196        u64 last_in_insn_cnt;
 197        u64 last_in_cyc_cnt;
 198        u64 last_br_insn_cnt;
 199        u64 last_br_cyc_cnt;
 200        unsigned int cbr_seen;
 201        char insn[INTEL_PT_INSN_BUF_SZ];
 202        struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
 203};
 204
 205static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
 206                          unsigned char *buf, size_t len)
 207{
 208        struct intel_pt_pkt packet;
 209        size_t pos = 0;
 210        int ret, pkt_len, i;
 211        char desc[INTEL_PT_PKT_DESC_MAX];
 212        const char *color = PERF_COLOR_BLUE;
 213        enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
 214
 215        color_fprintf(stdout, color,
 216                      ". ... Intel Processor Trace data: size %zu bytes\n",
 217                      len);
 218
 219        while (len) {
 220                ret = intel_pt_get_packet(buf, len, &packet, &ctx);
 221                if (ret > 0)
 222                        pkt_len = ret;
 223                else
 224                        pkt_len = 1;
 225                printf(".");
 226                color_fprintf(stdout, color, "  %08x: ", pos);
 227                for (i = 0; i < pkt_len; i++)
 228                        color_fprintf(stdout, color, " %02x", buf[i]);
 229                for (; i < 16; i++)
 230                        color_fprintf(stdout, color, "   ");
 231                if (ret > 0) {
 232                        ret = intel_pt_pkt_desc(&packet, desc,
 233                                                INTEL_PT_PKT_DESC_MAX);
 234                        if (ret > 0)
 235                                color_fprintf(stdout, color, " %s\n", desc);
 236                } else {
 237                        color_fprintf(stdout, color, " Bad packet!\n");
 238                }
 239                pos += pkt_len;
 240                buf += pkt_len;
 241                len -= pkt_len;
 242        }
 243}
 244
 245static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
 246                                size_t len)
 247{
 248        printf(".\n");
 249        intel_pt_dump(pt, buf, len);
 250}
 251
 252static void intel_pt_log_event(union perf_event *event)
 253{
 254        FILE *f = intel_pt_log_fp();
 255
 256        if (!intel_pt_enable_logging || !f)
 257                return;
 258
 259        perf_event__fprintf(event, NULL, f);
 260}
 261
 262static void intel_pt_dump_sample(struct perf_session *session,
 263                                 struct perf_sample *sample)
 264{
 265        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
 266                                           auxtrace);
 267
 268        printf("\n");
 269        intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
 270}
 271
 272static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
 273{
 274        struct perf_time_interval *range = pt->synth_opts.ptime_range;
 275        int n = pt->synth_opts.range_num;
 276
 277        if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 278                return true;
 279
 280        if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 281                return false;
 282
 283        /* perf_time__ranges_skip_sample does not work if time is zero */
 284        if (!tm)
 285                tm = 1;
 286
 287        return !n || !perf_time__ranges_skip_sample(range, n, tm);
 288}
 289
 290static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
 291                                                        u64 vmcs,
 292                                                        u64 dflt_tsc_offset)
 293{
 294        struct rb_node **p = &rb_root->rb_node;
 295        struct rb_node *parent = NULL;
 296        struct intel_pt_vmcs_info *v;
 297
 298        while (*p) {
 299                parent = *p;
 300                v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
 301
 302                if (v->vmcs == vmcs)
 303                        return v;
 304
 305                if (vmcs < v->vmcs)
 306                        p = &(*p)->rb_left;
 307                else
 308                        p = &(*p)->rb_right;
 309        }
 310
 311        v = zalloc(sizeof(*v));
 312        if (v) {
 313                v->vmcs = vmcs;
 314                v->tsc_offset = dflt_tsc_offset;
 315                v->reliable = dflt_tsc_offset;
 316
 317                rb_link_node(&v->rb_node, parent, p);
 318                rb_insert_color(&v->rb_node, rb_root);
 319        }
 320
 321        return v;
 322}
 323
 324static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
 325{
 326        struct intel_pt_queue *ptq = data;
 327        struct intel_pt *pt = ptq->pt;
 328
 329        if (!vmcs && !pt->dflt_tsc_offset)
 330                return NULL;
 331
 332        return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
 333}
 334
 335static void intel_pt_free_vmcs_info(struct intel_pt *pt)
 336{
 337        struct intel_pt_vmcs_info *v;
 338        struct rb_node *n;
 339
 340        n = rb_first(&pt->vmcs_info);
 341        while (n) {
 342                v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
 343                n = rb_next(n);
 344                rb_erase(&v->rb_node, &pt->vmcs_info);
 345                free(v);
 346        }
 347}
 348
 349static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
 350                                   struct auxtrace_buffer *b)
 351{
 352        bool consecutive = false;
 353        void *start;
 354
 355        start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
 356                                      pt->have_tsc, &consecutive,
 357                                      pt->synth_opts.vm_time_correlation);
 358        if (!start)
 359                return -EINVAL;
 360        /*
 361         * In the case of vm_time_correlation, the overlap might contain TSC
 362         * packets that will not be fixed, and that will then no longer work for
 363         * overlap detection. Avoid that by zeroing out the overlap.
 364         */
 365        if (pt->synth_opts.vm_time_correlation)
 366                memset(b->data, 0, start - b->data);
 367        b->use_size = b->data + b->size - start;
 368        b->use_data = start;
 369        if (b->use_size && consecutive)
 370                b->consecutive = true;
 371        return 0;
 372}
 373
 374static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
 375                               struct auxtrace_buffer *buffer,
 376                               struct auxtrace_buffer *old_buffer,
 377                               struct intel_pt_buffer *b)
 378{
 379        bool might_overlap;
 380
 381        if (!buffer->data) {
 382                int fd = perf_data__fd(ptq->pt->session->data);
 383
 384                buffer->data = auxtrace_buffer__get_data(buffer, fd);
 385                if (!buffer->data)
 386                        return -ENOMEM;
 387        }
 388
 389        might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
 390        if (might_overlap && !buffer->consecutive && old_buffer &&
 391            intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
 392                return -ENOMEM;
 393
 394        if (buffer->use_data) {
 395                b->len = buffer->use_size;
 396                b->buf = buffer->use_data;
 397        } else {
 398                b->len = buffer->size;
 399                b->buf = buffer->data;
 400        }
 401        b->ref_timestamp = buffer->reference;
 402
 403        if (!old_buffer || (might_overlap && !buffer->consecutive)) {
 404                b->consecutive = false;
 405                b->trace_nr = buffer->buffer_nr + 1;
 406        } else {
 407                b->consecutive = true;
 408        }
 409
 410        return 0;
 411}
 412
 413/* Do not drop buffers with references - refer intel_pt_get_trace() */
 414static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
 415                                           struct auxtrace_buffer *buffer)
 416{
 417        if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
 418                return;
 419
 420        auxtrace_buffer__drop_data(buffer);
 421}
 422
 423/* Must be serialized with respect to intel_pt_get_trace() */
 424static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
 425                              void *cb_data)
 426{
 427        struct intel_pt_queue *ptq = data;
 428        struct auxtrace_buffer *buffer = ptq->buffer;
 429        struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 430        struct auxtrace_queue *queue;
 431        int err = 0;
 432
 433        queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 434
 435        while (1) {
 436                struct intel_pt_buffer b = { .len = 0 };
 437
 438                buffer = auxtrace_buffer__next(queue, buffer);
 439                if (!buffer)
 440                        break;
 441
 442                err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
 443                if (err)
 444                        break;
 445
 446                if (b.len) {
 447                        intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 448                        old_buffer = buffer;
 449                } else {
 450                        intel_pt_lookahead_drop_buffer(ptq, buffer);
 451                        continue;
 452                }
 453
 454                err = cb(&b, cb_data);
 455                if (err)
 456                        break;
 457        }
 458
 459        if (buffer != old_buffer)
 460                intel_pt_lookahead_drop_buffer(ptq, buffer);
 461        intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 462
 463        return err;
 464}
 465
 466/*
 467 * This function assumes data is processed sequentially only.
 468 * Must be serialized with respect to intel_pt_lookahead()
 469 */
 470static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
 471{
 472        struct intel_pt_queue *ptq = data;
 473        struct auxtrace_buffer *buffer = ptq->buffer;
 474        struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 475        struct auxtrace_queue *queue;
 476        int err;
 477
 478        if (ptq->stop) {
 479                b->len = 0;
 480                return 0;
 481        }
 482
 483        queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 484
 485        buffer = auxtrace_buffer__next(queue, buffer);
 486        if (!buffer) {
 487                if (old_buffer)
 488                        auxtrace_buffer__drop_data(old_buffer);
 489                b->len = 0;
 490                return 0;
 491        }
 492
 493        ptq->buffer = buffer;
 494
 495        err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
 496        if (err)
 497                return err;
 498
 499        if (ptq->step_through_buffers)
 500                ptq->stop = true;
 501
 502        if (b->len) {
 503                if (old_buffer)
 504                        auxtrace_buffer__drop_data(old_buffer);
 505                ptq->old_buffer = buffer;
 506        } else {
 507                auxtrace_buffer__drop_data(buffer);
 508                return intel_pt_get_trace(b, data);
 509        }
 510
 511        return 0;
 512}
 513
 514struct intel_pt_cache_entry {
 515        struct auxtrace_cache_entry     entry;
 516        u64                             insn_cnt;
 517        u64                             byte_cnt;
 518        enum intel_pt_insn_op           op;
 519        enum intel_pt_insn_branch       branch;
 520        int                             length;
 521        int32_t                         rel;
 522        char                            insn[INTEL_PT_INSN_BUF_SZ];
 523};
 524
 525static int intel_pt_config_div(const char *var, const char *value, void *data)
 526{
 527        int *d = data;
 528        long val;
 529
 530        if (!strcmp(var, "intel-pt.cache-divisor")) {
 531                val = strtol(value, NULL, 0);
 532                if (val > 0 && val <= INT_MAX)
 533                        *d = val;
 534        }
 535
 536        return 0;
 537}
 538
 539static int intel_pt_cache_divisor(void)
 540{
 541        static int d;
 542
 543        if (d)
 544                return d;
 545
 546        perf_config(intel_pt_config_div, &d);
 547
 548        if (!d)
 549                d = 64;
 550
 551        return d;
 552}
 553
 554static unsigned int intel_pt_cache_size(struct dso *dso,
 555                                        struct machine *machine)
 556{
 557        off_t size;
 558
 559        size = dso__data_size(dso, machine);
 560        size /= intel_pt_cache_divisor();
 561        if (size < 1000)
 562                return 10;
 563        if (size > (1 << 21))
 564                return 21;
 565        return 32 - __builtin_clz(size);
 566}
 567
 568static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
 569                                             struct machine *machine)
 570{
 571        struct auxtrace_cache *c;
 572        unsigned int bits;
 573
 574        if (dso->auxtrace_cache)
 575                return dso->auxtrace_cache;
 576
 577        bits = intel_pt_cache_size(dso, machine);
 578
 579        /* Ignoring cache creation failure */
 580        c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
 581
 582        dso->auxtrace_cache = c;
 583
 584        return c;
 585}
 586
 587static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
 588                              u64 offset, u64 insn_cnt, u64 byte_cnt,
 589                              struct intel_pt_insn *intel_pt_insn)
 590{
 591        struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 592        struct intel_pt_cache_entry *e;
 593        int err;
 594
 595        if (!c)
 596                return -ENOMEM;
 597
 598        e = auxtrace_cache__alloc_entry(c);
 599        if (!e)
 600                return -ENOMEM;
 601
 602        e->insn_cnt = insn_cnt;
 603        e->byte_cnt = byte_cnt;
 604        e->op = intel_pt_insn->op;
 605        e->branch = intel_pt_insn->branch;
 606        e->length = intel_pt_insn->length;
 607        e->rel = intel_pt_insn->rel;
 608        memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
 609
 610        err = auxtrace_cache__add(c, offset, &e->entry);
 611        if (err)
 612                auxtrace_cache__free_entry(c, e);
 613
 614        return err;
 615}
 616
 617static struct intel_pt_cache_entry *
 618intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
 619{
 620        struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 621
 622        if (!c)
 623                return NULL;
 624
 625        return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
 626}
 627
 628static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
 629                                      u64 offset)
 630{
 631        struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 632
 633        if (!c)
 634                return;
 635
 636        auxtrace_cache__remove(dso->auxtrace_cache, offset);
 637}
 638
 639static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
 640{
 641        /* Assumes 64-bit kernel */
 642        return ip & (1ULL << 63);
 643}
 644
 645static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
 646{
 647        if (nr) {
 648                return intel_pt_guest_kernel_ip(ip) ?
 649                       PERF_RECORD_MISC_GUEST_KERNEL :
 650                       PERF_RECORD_MISC_GUEST_USER;
 651        }
 652
 653        return ip >= ptq->pt->kernel_start ?
 654               PERF_RECORD_MISC_KERNEL :
 655               PERF_RECORD_MISC_USER;
 656}
 657
 658static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
 659{
 660        /* No support for non-zero CS base */
 661        if (from_ip)
 662                return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
 663        return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
 664}
 665
 666static int intel_pt_get_guest(struct intel_pt_queue *ptq)
 667{
 668        struct machines *machines = &ptq->pt->session->machines;
 669        struct machine *machine;
 670        pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
 671
 672        if (ptq->guest_machine && pid == ptq->guest_machine_pid)
 673                return 0;
 674
 675        ptq->guest_machine = NULL;
 676        thread__zput(ptq->unknown_guest_thread);
 677
 678        machine = machines__find_guest(machines, pid);
 679        if (!machine)
 680                return -1;
 681
 682        ptq->unknown_guest_thread = machine__idle_thread(machine);
 683        if (!ptq->unknown_guest_thread)
 684                return -1;
 685
 686        ptq->guest_machine = machine;
 687        ptq->guest_machine_pid = pid;
 688
 689        return 0;
 690}
 691
 692static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
 693                                   uint64_t *insn_cnt_ptr, uint64_t *ip,
 694                                   uint64_t to_ip, uint64_t max_insn_cnt,
 695                                   void *data)
 696{
 697        struct intel_pt_queue *ptq = data;
 698        struct machine *machine = ptq->pt->machine;
 699        struct thread *thread;
 700        struct addr_location al;
 701        unsigned char buf[INTEL_PT_INSN_BUF_SZ];
 702        ssize_t len;
 703        int x86_64;
 704        u8 cpumode;
 705        u64 offset, start_offset, start_ip;
 706        u64 insn_cnt = 0;
 707        bool one_map = true;
 708        bool nr;
 709
 710        intel_pt_insn->length = 0;
 711
 712        if (to_ip && *ip == to_ip)
 713                goto out_no_cache;
 714
 715        nr = ptq->state->to_nr;
 716        cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
 717
 718        if (nr) {
 719                if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
 720                    intel_pt_get_guest(ptq))
 721                        return -EINVAL;
 722                machine = ptq->guest_machine;
 723                thread = ptq->unknown_guest_thread;
 724        } else {
 725                thread = ptq->thread;
 726                if (!thread) {
 727                        if (cpumode != PERF_RECORD_MISC_KERNEL)
 728                                return -EINVAL;
 729                        thread = ptq->pt->unknown_thread;
 730                }
 731        }
 732
 733        while (1) {
 734                if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
 735                        return -EINVAL;
 736
 737                if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
 738                    dso__data_status_seen(al.map->dso,
 739                                          DSO_DATA_STATUS_SEEN_ITRACE))
 740                        return -ENOENT;
 741
 742                offset = al.map->map_ip(al.map, *ip);
 743
 744                if (!to_ip && one_map) {
 745                        struct intel_pt_cache_entry *e;
 746
 747                        e = intel_pt_cache_lookup(al.map->dso, machine, offset);
 748                        if (e &&
 749                            (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
 750                                *insn_cnt_ptr = e->insn_cnt;
 751                                *ip += e->byte_cnt;
 752                                intel_pt_insn->op = e->op;
 753                                intel_pt_insn->branch = e->branch;
 754                                intel_pt_insn->length = e->length;
 755                                intel_pt_insn->rel = e->rel;
 756                                memcpy(intel_pt_insn->buf, e->insn,
 757                                       INTEL_PT_INSN_BUF_SZ);
 758                                intel_pt_log_insn_no_data(intel_pt_insn, *ip);
 759                                return 0;
 760                        }
 761                }
 762
 763                start_offset = offset;
 764                start_ip = *ip;
 765
 766                /* Load maps to ensure dso->is_64_bit has been updated */
 767                map__load(al.map);
 768
 769                x86_64 = al.map->dso->is_64_bit;
 770
 771                while (1) {
 772                        len = dso__data_read_offset(al.map->dso, machine,
 773                                                    offset, buf,
 774                                                    INTEL_PT_INSN_BUF_SZ);
 775                        if (len <= 0)
 776                                return -EINVAL;
 777
 778                        if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
 779                                return -EINVAL;
 780
 781                        intel_pt_log_insn(intel_pt_insn, *ip);
 782
 783                        insn_cnt += 1;
 784
 785                        if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
 786                                goto out;
 787
 788                        if (max_insn_cnt && insn_cnt >= max_insn_cnt)
 789                                goto out_no_cache;
 790
 791                        *ip += intel_pt_insn->length;
 792
 793                        if (to_ip && *ip == to_ip) {
 794                                intel_pt_insn->length = 0;
 795                                goto out_no_cache;
 796                        }
 797
 798                        if (*ip >= al.map->end)
 799                                break;
 800
 801                        offset += intel_pt_insn->length;
 802                }
 803                one_map = false;
 804        }
 805out:
 806        *insn_cnt_ptr = insn_cnt;
 807
 808        if (!one_map)
 809                goto out_no_cache;
 810
 811        /*
 812         * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
 813         * entries.
 814         */
 815        if (to_ip) {
 816                struct intel_pt_cache_entry *e;
 817
 818                e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
 819                if (e)
 820                        return 0;
 821        }
 822
 823        /* Ignore cache errors */
 824        intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
 825                           *ip - start_ip, intel_pt_insn);
 826
 827        return 0;
 828
 829out_no_cache:
 830        *insn_cnt_ptr = insn_cnt;
 831        return 0;
 832}
 833
 834static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
 835                                  uint64_t offset, const char *filename)
 836{
 837        struct addr_filter *filt;
 838        bool have_filter   = false;
 839        bool hit_tracestop = false;
 840        bool hit_filter    = false;
 841
 842        list_for_each_entry(filt, &pt->filts.head, list) {
 843                if (filt->start)
 844                        have_filter = true;
 845
 846                if ((filename && !filt->filename) ||
 847                    (!filename && filt->filename) ||
 848                    (filename && strcmp(filename, filt->filename)))
 849                        continue;
 850
 851                if (!(offset >= filt->addr && offset < filt->addr + filt->size))
 852                        continue;
 853
 854                intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
 855                             ip, offset, filename ? filename : "[kernel]",
 856                             filt->start ? "filter" : "stop",
 857                             filt->addr, filt->size);
 858
 859                if (filt->start)
 860                        hit_filter = true;
 861                else
 862                        hit_tracestop = true;
 863        }
 864
 865        if (!hit_tracestop && !hit_filter)
 866                intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
 867                             ip, offset, filename ? filename : "[kernel]");
 868
 869        return hit_tracestop || (have_filter && !hit_filter);
 870}
 871
 872static int __intel_pt_pgd_ip(uint64_t ip, void *data)
 873{
 874        struct intel_pt_queue *ptq = data;
 875        struct thread *thread;
 876        struct addr_location al;
 877        u8 cpumode;
 878        u64 offset;
 879
 880        if (ptq->state->to_nr) {
 881                if (intel_pt_guest_kernel_ip(ip))
 882                        return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 883                /* No support for decoding guest user space */
 884                return -EINVAL;
 885        } else if (ip >= ptq->pt->kernel_start) {
 886                return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 887        }
 888
 889        cpumode = PERF_RECORD_MISC_USER;
 890
 891        thread = ptq->thread;
 892        if (!thread)
 893                return -EINVAL;
 894
 895        if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
 896                return -EINVAL;
 897
 898        offset = al.map->map_ip(al.map, ip);
 899
 900        return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
 901                                     al.map->dso->long_name);
 902}
 903
 904static bool intel_pt_pgd_ip(uint64_t ip, void *data)
 905{
 906        return __intel_pt_pgd_ip(ip, data) > 0;
 907}
 908
 909static bool intel_pt_get_config(struct intel_pt *pt,
 910                                struct perf_event_attr *attr, u64 *config)
 911{
 912        if (attr->type == pt->pmu_type) {
 913                if (config)
 914                        *config = attr->config;
 915                return true;
 916        }
 917
 918        return false;
 919}
 920
 921static bool intel_pt_exclude_kernel(struct intel_pt *pt)
 922{
 923        struct evsel *evsel;
 924
 925        evlist__for_each_entry(pt->session->evlist, evsel) {
 926                if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
 927                    !evsel->core.attr.exclude_kernel)
 928                        return false;
 929        }
 930        return true;
 931}
 932
 933static bool intel_pt_return_compression(struct intel_pt *pt)
 934{
 935        struct evsel *evsel;
 936        u64 config;
 937
 938        if (!pt->noretcomp_bit)
 939                return true;
 940
 941        evlist__for_each_entry(pt->session->evlist, evsel) {
 942                if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
 943                    (config & pt->noretcomp_bit))
 944                        return false;
 945        }
 946        return true;
 947}
 948
 949static bool intel_pt_branch_enable(struct intel_pt *pt)
 950{
 951        struct evsel *evsel;
 952        u64 config;
 953
 954        evlist__for_each_entry(pt->session->evlist, evsel) {
 955                if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
 956                    (config & 1) && !(config & 0x2000))
 957                        return false;
 958        }
 959        return true;
 960}
 961
 962static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
 963{
 964        struct evsel *evsel;
 965        unsigned int shift;
 966        u64 config;
 967
 968        if (!pt->mtc_freq_bits)
 969                return 0;
 970
 971        for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
 972                config >>= 1;
 973
 974        evlist__for_each_entry(pt->session->evlist, evsel) {
 975                if (intel_pt_get_config(pt, &evsel->core.attr, &config))
 976                        return (config & pt->mtc_freq_bits) >> shift;
 977        }
 978        return 0;
 979}
 980
 981static bool intel_pt_timeless_decoding(struct intel_pt *pt)
 982{
 983        struct evsel *evsel;
 984        bool timeless_decoding = true;
 985        u64 config;
 986
 987        if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
 988                return true;
 989
 990        evlist__for_each_entry(pt->session->evlist, evsel) {
 991                if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
 992                        return true;
 993                if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
 994                        if (config & pt->tsc_bit)
 995                                timeless_decoding = false;
 996                        else
 997                                return true;
 998                }
 999        }
1000        return timeless_decoding;
1001}
1002
1003static bool intel_pt_tracing_kernel(struct intel_pt *pt)
1004{
1005        struct evsel *evsel;
1006
1007        evlist__for_each_entry(pt->session->evlist, evsel) {
1008                if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1009                    !evsel->core.attr.exclude_kernel)
1010                        return true;
1011        }
1012        return false;
1013}
1014
1015static bool intel_pt_have_tsc(struct intel_pt *pt)
1016{
1017        struct evsel *evsel;
1018        bool have_tsc = false;
1019        u64 config;
1020
1021        if (!pt->tsc_bit)
1022                return false;
1023
1024        evlist__for_each_entry(pt->session->evlist, evsel) {
1025                if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1026                        if (config & pt->tsc_bit)
1027                                have_tsc = true;
1028                        else
1029                                return false;
1030                }
1031        }
1032        return have_tsc;
1033}
1034
1035static bool intel_pt_have_mtc(struct intel_pt *pt)
1036{
1037        struct evsel *evsel;
1038        u64 config;
1039
1040        evlist__for_each_entry(pt->session->evlist, evsel) {
1041                if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1042                    (config & pt->mtc_bit))
1043                        return true;
1044        }
1045        return false;
1046}
1047
1048static bool intel_pt_sampling_mode(struct intel_pt *pt)
1049{
1050        struct evsel *evsel;
1051
1052        evlist__for_each_entry(pt->session->evlist, evsel) {
1053                if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1054                    evsel->core.attr.aux_sample_size)
1055                        return true;
1056        }
1057        return false;
1058}
1059
1060static u64 intel_pt_ctl(struct intel_pt *pt)
1061{
1062        struct evsel *evsel;
1063        u64 config;
1064
1065        evlist__for_each_entry(pt->session->evlist, evsel) {
1066                if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1067                        return config;
1068        }
1069        return 0;
1070}
1071
1072static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1073{
1074        u64 quot, rem;
1075
1076        quot = ns / pt->tc.time_mult;
1077        rem  = ns % pt->tc.time_mult;
1078        return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1079                pt->tc.time_mult;
1080}
1081
1082static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1083{
1084        size_t sz = sizeof(struct ip_callchain);
1085
1086        /* Add 1 to callchain_sz for callchain context */
1087        sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1088        return zalloc(sz);
1089}
1090
1091static int intel_pt_callchain_init(struct intel_pt *pt)
1092{
1093        struct evsel *evsel;
1094
1095        evlist__for_each_entry(pt->session->evlist, evsel) {
1096                if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1097                        evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1098        }
1099
1100        pt->chain = intel_pt_alloc_chain(pt);
1101        if (!pt->chain)
1102                return -ENOMEM;
1103
1104        return 0;
1105}
1106
1107static void intel_pt_add_callchain(struct intel_pt *pt,
1108                                   struct perf_sample *sample)
1109{
1110        struct thread *thread = machine__findnew_thread(pt->machine,
1111                                                        sample->pid,
1112                                                        sample->tid);
1113
1114        thread_stack__sample_late(thread, sample->cpu, pt->chain,
1115                                  pt->synth_opts.callchain_sz + 1, sample->ip,
1116                                  pt->kernel_start);
1117
1118        sample->callchain = pt->chain;
1119}
1120
1121static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1122{
1123        size_t sz = sizeof(struct branch_stack);
1124
1125        sz += entry_cnt * sizeof(struct branch_entry);
1126        return zalloc(sz);
1127}
1128
1129static int intel_pt_br_stack_init(struct intel_pt *pt)
1130{
1131        struct evsel *evsel;
1132
1133        evlist__for_each_entry(pt->session->evlist, evsel) {
1134                if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1135                        evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1136        }
1137
1138        pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1139        if (!pt->br_stack)
1140                return -ENOMEM;
1141
1142        return 0;
1143}
1144
1145static void intel_pt_add_br_stack(struct intel_pt *pt,
1146                                  struct perf_sample *sample)
1147{
1148        struct thread *thread = machine__findnew_thread(pt->machine,
1149                                                        sample->pid,
1150                                                        sample->tid);
1151
1152        thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1153                                     pt->br_stack_sz, sample->ip,
1154                                     pt->kernel_start);
1155
1156        sample->branch_stack = pt->br_stack;
1157}
1158
1159/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1160#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1161
1162static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1163                                                   unsigned int queue_nr)
1164{
1165        struct intel_pt_params params = { .get_trace = 0, };
1166        struct perf_env *env = pt->machine->env;
1167        struct intel_pt_queue *ptq;
1168
1169        ptq = zalloc(sizeof(struct intel_pt_queue));
1170        if (!ptq)
1171                return NULL;
1172
1173        if (pt->synth_opts.callchain) {
1174                ptq->chain = intel_pt_alloc_chain(pt);
1175                if (!ptq->chain)
1176                        goto out_free;
1177        }
1178
1179        if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1180                unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1181
1182                ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1183                if (!ptq->last_branch)
1184                        goto out_free;
1185        }
1186
1187        ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1188        if (!ptq->event_buf)
1189                goto out_free;
1190
1191        ptq->pt = pt;
1192        ptq->queue_nr = queue_nr;
1193        ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1194        ptq->pid = -1;
1195        ptq->tid = -1;
1196        ptq->cpu = -1;
1197        ptq->next_tid = -1;
1198
1199        params.get_trace = intel_pt_get_trace;
1200        params.walk_insn = intel_pt_walk_next_insn;
1201        params.lookahead = intel_pt_lookahead;
1202        params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1203        params.data = ptq;
1204        params.return_compression = intel_pt_return_compression(pt);
1205        params.branch_enable = intel_pt_branch_enable(pt);
1206        params.ctl = intel_pt_ctl(pt);
1207        params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1208        params.mtc_period = intel_pt_mtc_period(pt);
1209        params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1210        params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1211        params.quick = pt->synth_opts.quick;
1212        params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1213        params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1214        params.first_timestamp = pt->first_timestamp;
1215        params.max_loops = pt->max_loops;
1216
1217        if (pt->filts.cnt > 0)
1218                params.pgd_ip = intel_pt_pgd_ip;
1219
1220        if (pt->synth_opts.instructions) {
1221                if (pt->synth_opts.period) {
1222                        switch (pt->synth_opts.period_type) {
1223                        case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1224                                params.period_type =
1225                                                INTEL_PT_PERIOD_INSTRUCTIONS;
1226                                params.period = pt->synth_opts.period;
1227                                break;
1228                        case PERF_ITRACE_PERIOD_TICKS:
1229                                params.period_type = INTEL_PT_PERIOD_TICKS;
1230                                params.period = pt->synth_opts.period;
1231                                break;
1232                        case PERF_ITRACE_PERIOD_NANOSECS:
1233                                params.period_type = INTEL_PT_PERIOD_TICKS;
1234                                params.period = intel_pt_ns_to_ticks(pt,
1235                                                        pt->synth_opts.period);
1236                                break;
1237                        default:
1238                                break;
1239                        }
1240                }
1241
1242                if (!params.period) {
1243                        params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1244                        params.period = 1;
1245                }
1246        }
1247
1248        if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1249                params.flags |= INTEL_PT_FUP_WITH_NLIP;
1250
1251        ptq->decoder = intel_pt_decoder_new(&params);
1252        if (!ptq->decoder)
1253                goto out_free;
1254
1255        return ptq;
1256
1257out_free:
1258        zfree(&ptq->event_buf);
1259        zfree(&ptq->last_branch);
1260        zfree(&ptq->chain);
1261        free(ptq);
1262        return NULL;
1263}
1264
1265static void intel_pt_free_queue(void *priv)
1266{
1267        struct intel_pt_queue *ptq = priv;
1268
1269        if (!ptq)
1270                return;
1271        thread__zput(ptq->thread);
1272        thread__zput(ptq->unknown_guest_thread);
1273        intel_pt_decoder_free(ptq->decoder);
1274        zfree(&ptq->event_buf);
1275        zfree(&ptq->last_branch);
1276        zfree(&ptq->chain);
1277        free(ptq);
1278}
1279
1280static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1281{
1282        unsigned int i;
1283
1284        pt->first_timestamp = timestamp;
1285
1286        for (i = 0; i < pt->queues.nr_queues; i++) {
1287                struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1288                struct intel_pt_queue *ptq = queue->priv;
1289
1290                if (ptq && ptq->decoder)
1291                        intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1292        }
1293}
1294
1295static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1296                                     struct auxtrace_queue *queue)
1297{
1298        struct intel_pt_queue *ptq = queue->priv;
1299
1300        if (queue->tid == -1 || pt->have_sched_switch) {
1301                ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1302                if (ptq->tid == -1)
1303                        ptq->pid = -1;
1304                thread__zput(ptq->thread);
1305        }
1306
1307        if (!ptq->thread && ptq->tid != -1)
1308                ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1309
1310        if (ptq->thread) {
1311                ptq->pid = ptq->thread->pid_;
1312                if (queue->cpu == -1)
1313                        ptq->cpu = ptq->thread->cpu;
1314        }
1315}
1316
1317static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1318{
1319        ptq->insn_len = 0;
1320        if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1321                ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1322        } else if (ptq->state->flags & INTEL_PT_ASYNC) {
1323                if (!ptq->state->to_ip)
1324                        ptq->flags = PERF_IP_FLAG_BRANCH |
1325                                     PERF_IP_FLAG_TRACE_END;
1326                else if (ptq->state->from_nr && !ptq->state->to_nr)
1327                        ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1328                                     PERF_IP_FLAG_VMEXIT;
1329                else
1330                        ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1331                                     PERF_IP_FLAG_ASYNC |
1332                                     PERF_IP_FLAG_INTERRUPT;
1333        } else {
1334                if (ptq->state->from_ip)
1335                        ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1336                else
1337                        ptq->flags = PERF_IP_FLAG_BRANCH |
1338                                     PERF_IP_FLAG_TRACE_BEGIN;
1339                if (ptq->state->flags & INTEL_PT_IN_TX)
1340                        ptq->flags |= PERF_IP_FLAG_IN_TX;
1341                ptq->insn_len = ptq->state->insn_len;
1342                memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1343        }
1344
1345        if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1346                ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1347        if (ptq->state->type & INTEL_PT_TRACE_END)
1348                ptq->flags |= PERF_IP_FLAG_TRACE_END;
1349}
1350
1351static void intel_pt_setup_time_range(struct intel_pt *pt,
1352                                      struct intel_pt_queue *ptq)
1353{
1354        if (!pt->range_cnt)
1355                return;
1356
1357        ptq->sel_timestamp = pt->time_ranges[0].start;
1358        ptq->sel_idx = 0;
1359
1360        if (ptq->sel_timestamp) {
1361                ptq->sel_start = true;
1362        } else {
1363                ptq->sel_timestamp = pt->time_ranges[0].end;
1364                ptq->sel_start = false;
1365        }
1366}
1367
1368static int intel_pt_setup_queue(struct intel_pt *pt,
1369                                struct auxtrace_queue *queue,
1370                                unsigned int queue_nr)
1371{
1372        struct intel_pt_queue *ptq = queue->priv;
1373
1374        if (list_empty(&queue->head))
1375                return 0;
1376
1377        if (!ptq) {
1378                ptq = intel_pt_alloc_queue(pt, queue_nr);
1379                if (!ptq)
1380                        return -ENOMEM;
1381                queue->priv = ptq;
1382
1383                if (queue->cpu != -1)
1384                        ptq->cpu = queue->cpu;
1385                ptq->tid = queue->tid;
1386
1387                ptq->cbr_seen = UINT_MAX;
1388
1389                if (pt->sampling_mode && !pt->snapshot_mode &&
1390                    pt->timeless_decoding)
1391                        ptq->step_through_buffers = true;
1392
1393                ptq->sync_switch = pt->sync_switch;
1394
1395                intel_pt_setup_time_range(pt, ptq);
1396        }
1397
1398        if (!ptq->on_heap &&
1399            (!ptq->sync_switch ||
1400             ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1401                const struct intel_pt_state *state;
1402                int ret;
1403
1404                if (pt->timeless_decoding)
1405                        return 0;
1406
1407                intel_pt_log("queue %u getting timestamp\n", queue_nr);
1408                intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1409                             queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1410
1411                if (ptq->sel_start && ptq->sel_timestamp) {
1412                        ret = intel_pt_fast_forward(ptq->decoder,
1413                                                    ptq->sel_timestamp);
1414                        if (ret)
1415                                return ret;
1416                }
1417
1418                while (1) {
1419                        state = intel_pt_decode(ptq->decoder);
1420                        if (state->err) {
1421                                if (state->err == INTEL_PT_ERR_NODATA) {
1422                                        intel_pt_log("queue %u has no timestamp\n",
1423                                                     queue_nr);
1424                                        return 0;
1425                                }
1426                                continue;
1427                        }
1428                        if (state->timestamp)
1429                                break;
1430                }
1431
1432                ptq->timestamp = state->timestamp;
1433                intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1434                             queue_nr, ptq->timestamp);
1435                ptq->state = state;
1436                ptq->have_sample = true;
1437                if (ptq->sel_start && ptq->sel_timestamp &&
1438                    ptq->timestamp < ptq->sel_timestamp)
1439                        ptq->have_sample = false;
1440                intel_pt_sample_flags(ptq);
1441                ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1442                if (ret)
1443                        return ret;
1444                ptq->on_heap = true;
1445        }
1446
1447        return 0;
1448}
1449
1450static int intel_pt_setup_queues(struct intel_pt *pt)
1451{
1452        unsigned int i;
1453        int ret;
1454
1455        for (i = 0; i < pt->queues.nr_queues; i++) {
1456                ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1457                if (ret)
1458                        return ret;
1459        }
1460        return 0;
1461}
1462
1463static inline bool intel_pt_skip_event(struct intel_pt *pt)
1464{
1465        return pt->synth_opts.initial_skip &&
1466               pt->num_events++ < pt->synth_opts.initial_skip;
1467}
1468
1469/*
1470 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1471 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1472 * from this decoder state.
1473 */
1474static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1475{
1476        return pt->synth_opts.initial_skip &&
1477               pt->num_events + 4 < pt->synth_opts.initial_skip;
1478}
1479
1480static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1481                                   union perf_event *event,
1482                                   struct perf_sample *sample)
1483{
1484        event->sample.header.type = PERF_RECORD_SAMPLE;
1485        event->sample.header.size = sizeof(struct perf_event_header);
1486
1487        sample->pid = ptq->pid;
1488        sample->tid = ptq->tid;
1489        sample->cpu = ptq->cpu;
1490        sample->insn_len = ptq->insn_len;
1491        memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1492}
1493
1494static void intel_pt_prep_b_sample(struct intel_pt *pt,
1495                                   struct intel_pt_queue *ptq,
1496                                   union perf_event *event,
1497                                   struct perf_sample *sample)
1498{
1499        intel_pt_prep_a_sample(ptq, event, sample);
1500
1501        if (!pt->timeless_decoding)
1502                sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1503
1504        sample->ip = ptq->state->from_ip;
1505        sample->addr = ptq->state->to_ip;
1506        sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1507        sample->period = 1;
1508        sample->flags = ptq->flags;
1509
1510        event->sample.header.misc = sample->cpumode;
1511}
1512
1513static int intel_pt_inject_event(union perf_event *event,
1514                                 struct perf_sample *sample, u64 type)
1515{
1516        event->header.size = perf_event__sample_event_size(sample, type, 0);
1517        return perf_event__synthesize_sample(event, type, 0, sample);
1518}
1519
1520static inline int intel_pt_opt_inject(struct intel_pt *pt,
1521                                      union perf_event *event,
1522                                      struct perf_sample *sample, u64 type)
1523{
1524        if (!pt->synth_opts.inject)
1525                return 0;
1526
1527        return intel_pt_inject_event(event, sample, type);
1528}
1529
1530static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1531                                        union perf_event *event,
1532                                        struct perf_sample *sample, u64 type)
1533{
1534        int ret;
1535
1536        ret = intel_pt_opt_inject(pt, event, sample, type);
1537        if (ret)
1538                return ret;
1539
1540        ret = perf_session__deliver_synth_event(pt->session, event, sample);
1541        if (ret)
1542                pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1543
1544        return ret;
1545}
1546
1547static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1548{
1549        struct intel_pt *pt = ptq->pt;
1550        union perf_event *event = ptq->event_buf;
1551        struct perf_sample sample = { .ip = 0, };
1552        struct dummy_branch_stack {
1553                u64                     nr;
1554                u64                     hw_idx;
1555                struct branch_entry     entries;
1556        } dummy_bs;
1557
1558        if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1559                return 0;
1560
1561        if (intel_pt_skip_event(pt))
1562                return 0;
1563
1564        intel_pt_prep_b_sample(pt, ptq, event, &sample);
1565
1566        sample.id = ptq->pt->branches_id;
1567        sample.stream_id = ptq->pt->branches_id;
1568
1569        /*
1570         * perf report cannot handle events without a branch stack when using
1571         * SORT_MODE__BRANCH so make a dummy one.
1572         */
1573        if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1574                dummy_bs = (struct dummy_branch_stack){
1575                        .nr = 1,
1576                        .hw_idx = -1ULL,
1577                        .entries = {
1578                                .from = sample.ip,
1579                                .to = sample.addr,
1580                        },
1581                };
1582                sample.branch_stack = (struct branch_stack *)&dummy_bs;
1583        }
1584
1585        if (ptq->sample_ipc)
1586                sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1587        if (sample.cyc_cnt) {
1588                sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1589                ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1590                ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1591        }
1592
1593        return intel_pt_deliver_synth_event(pt, event, &sample,
1594                                            pt->branches_sample_type);
1595}
1596
1597static void intel_pt_prep_sample(struct intel_pt *pt,
1598                                 struct intel_pt_queue *ptq,
1599                                 union perf_event *event,
1600                                 struct perf_sample *sample)
1601{
1602        intel_pt_prep_b_sample(pt, ptq, event, sample);
1603
1604        if (pt->synth_opts.callchain) {
1605                thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1606                                     pt->synth_opts.callchain_sz + 1,
1607                                     sample->ip, pt->kernel_start);
1608                sample->callchain = ptq->chain;
1609        }
1610
1611        if (pt->synth_opts.last_branch) {
1612                thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1613                                        pt->br_stack_sz);
1614                sample->branch_stack = ptq->last_branch;
1615        }
1616}
1617
1618static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1619{
1620        struct intel_pt *pt = ptq->pt;
1621        union perf_event *event = ptq->event_buf;
1622        struct perf_sample sample = { .ip = 0, };
1623
1624        if (intel_pt_skip_event(pt))
1625                return 0;
1626
1627        intel_pt_prep_sample(pt, ptq, event, &sample);
1628
1629        sample.id = ptq->pt->instructions_id;
1630        sample.stream_id = ptq->pt->instructions_id;
1631        if (pt->synth_opts.quick)
1632                sample.period = 1;
1633        else
1634                sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1635
1636        if (ptq->sample_ipc)
1637                sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1638        if (sample.cyc_cnt) {
1639                sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1640                ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1641                ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1642        }
1643
1644        ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1645
1646        return intel_pt_deliver_synth_event(pt, event, &sample,
1647                                            pt->instructions_sample_type);
1648}
1649
1650static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1651{
1652        struct intel_pt *pt = ptq->pt;
1653        union perf_event *event = ptq->event_buf;
1654        struct perf_sample sample = { .ip = 0, };
1655
1656        if (intel_pt_skip_event(pt))
1657                return 0;
1658
1659        intel_pt_prep_sample(pt, ptq, event, &sample);
1660
1661        sample.id = ptq->pt->transactions_id;
1662        sample.stream_id = ptq->pt->transactions_id;
1663
1664        return intel_pt_deliver_synth_event(pt, event, &sample,
1665                                            pt->transactions_sample_type);
1666}
1667
1668static void intel_pt_prep_p_sample(struct intel_pt *pt,
1669                                   struct intel_pt_queue *ptq,
1670                                   union perf_event *event,
1671                                   struct perf_sample *sample)
1672{
1673        intel_pt_prep_sample(pt, ptq, event, sample);
1674
1675        /*
1676         * Zero IP is used to mean "trace start" but that is not the case for
1677         * power or PTWRITE events with no IP, so clear the flags.
1678         */
1679        if (!sample->ip)
1680                sample->flags = 0;
1681}
1682
1683static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1684{
1685        struct intel_pt *pt = ptq->pt;
1686        union perf_event *event = ptq->event_buf;
1687        struct perf_sample sample = { .ip = 0, };
1688        struct perf_synth_intel_ptwrite raw;
1689
1690        if (intel_pt_skip_event(pt))
1691                return 0;
1692
1693        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1694
1695        sample.id = ptq->pt->ptwrites_id;
1696        sample.stream_id = ptq->pt->ptwrites_id;
1697
1698        raw.flags = 0;
1699        raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1700        raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1701
1702        sample.raw_size = perf_synth__raw_size(raw);
1703        sample.raw_data = perf_synth__raw_data(&raw);
1704
1705        return intel_pt_deliver_synth_event(pt, event, &sample,
1706                                            pt->ptwrites_sample_type);
1707}
1708
1709static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1710{
1711        struct intel_pt *pt = ptq->pt;
1712        union perf_event *event = ptq->event_buf;
1713        struct perf_sample sample = { .ip = 0, };
1714        struct perf_synth_intel_cbr raw;
1715        u32 flags;
1716
1717        if (intel_pt_skip_cbr_event(pt))
1718                return 0;
1719
1720        ptq->cbr_seen = ptq->state->cbr;
1721
1722        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1723
1724        sample.id = ptq->pt->cbr_id;
1725        sample.stream_id = ptq->pt->cbr_id;
1726
1727        flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1728        raw.flags = cpu_to_le32(flags);
1729        raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1730        raw.reserved3 = 0;
1731
1732        sample.raw_size = perf_synth__raw_size(raw);
1733        sample.raw_data = perf_synth__raw_data(&raw);
1734
1735        return intel_pt_deliver_synth_event(pt, event, &sample,
1736                                            pt->pwr_events_sample_type);
1737}
1738
1739static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1740{
1741        struct intel_pt *pt = ptq->pt;
1742        union perf_event *event = ptq->event_buf;
1743        struct perf_sample sample = { .ip = 0, };
1744        struct perf_synth_intel_psb raw;
1745
1746        if (intel_pt_skip_event(pt))
1747                return 0;
1748
1749        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1750
1751        sample.id = ptq->pt->psb_id;
1752        sample.stream_id = ptq->pt->psb_id;
1753        sample.flags = 0;
1754
1755        raw.reserved = 0;
1756        raw.offset = ptq->state->psb_offset;
1757
1758        sample.raw_size = perf_synth__raw_size(raw);
1759        sample.raw_data = perf_synth__raw_data(&raw);
1760
1761        return intel_pt_deliver_synth_event(pt, event, &sample,
1762                                            pt->pwr_events_sample_type);
1763}
1764
1765static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1766{
1767        struct intel_pt *pt = ptq->pt;
1768        union perf_event *event = ptq->event_buf;
1769        struct perf_sample sample = { .ip = 0, };
1770        struct perf_synth_intel_mwait raw;
1771
1772        if (intel_pt_skip_event(pt))
1773                return 0;
1774
1775        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1776
1777        sample.id = ptq->pt->mwait_id;
1778        sample.stream_id = ptq->pt->mwait_id;
1779
1780        raw.reserved = 0;
1781        raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1782
1783        sample.raw_size = perf_synth__raw_size(raw);
1784        sample.raw_data = perf_synth__raw_data(&raw);
1785
1786        return intel_pt_deliver_synth_event(pt, event, &sample,
1787                                            pt->pwr_events_sample_type);
1788}
1789
1790static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1791{
1792        struct intel_pt *pt = ptq->pt;
1793        union perf_event *event = ptq->event_buf;
1794        struct perf_sample sample = { .ip = 0, };
1795        struct perf_synth_intel_pwre raw;
1796
1797        if (intel_pt_skip_event(pt))
1798                return 0;
1799
1800        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1801
1802        sample.id = ptq->pt->pwre_id;
1803        sample.stream_id = ptq->pt->pwre_id;
1804
1805        raw.reserved = 0;
1806        raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1807
1808        sample.raw_size = perf_synth__raw_size(raw);
1809        sample.raw_data = perf_synth__raw_data(&raw);
1810
1811        return intel_pt_deliver_synth_event(pt, event, &sample,
1812                                            pt->pwr_events_sample_type);
1813}
1814
1815static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1816{
1817        struct intel_pt *pt = ptq->pt;
1818        union perf_event *event = ptq->event_buf;
1819        struct perf_sample sample = { .ip = 0, };
1820        struct perf_synth_intel_exstop raw;
1821
1822        if (intel_pt_skip_event(pt))
1823                return 0;
1824
1825        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1826
1827        sample.id = ptq->pt->exstop_id;
1828        sample.stream_id = ptq->pt->exstop_id;
1829
1830        raw.flags = 0;
1831        raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1832
1833        sample.raw_size = perf_synth__raw_size(raw);
1834        sample.raw_data = perf_synth__raw_data(&raw);
1835
1836        return intel_pt_deliver_synth_event(pt, event, &sample,
1837                                            pt->pwr_events_sample_type);
1838}
1839
1840static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1841{
1842        struct intel_pt *pt = ptq->pt;
1843        union perf_event *event = ptq->event_buf;
1844        struct perf_sample sample = { .ip = 0, };
1845        struct perf_synth_intel_pwrx raw;
1846
1847        if (intel_pt_skip_event(pt))
1848                return 0;
1849
1850        intel_pt_prep_p_sample(pt, ptq, event, &sample);
1851
1852        sample.id = ptq->pt->pwrx_id;
1853        sample.stream_id = ptq->pt->pwrx_id;
1854
1855        raw.reserved = 0;
1856        raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1857
1858        sample.raw_size = perf_synth__raw_size(raw);
1859        sample.raw_data = perf_synth__raw_data(&raw);
1860
1861        return intel_pt_deliver_synth_event(pt, event, &sample,
1862                                            pt->pwr_events_sample_type);
1863}
1864
1865/*
1866 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
1867 * intel_pt_add_gp_regs().
1868 */
1869static const int pebs_gp_regs[] = {
1870        [PERF_REG_X86_FLAGS]    = 1,
1871        [PERF_REG_X86_IP]       = 2,
1872        [PERF_REG_X86_AX]       = 3,
1873        [PERF_REG_X86_CX]       = 4,
1874        [PERF_REG_X86_DX]       = 5,
1875        [PERF_REG_X86_BX]       = 6,
1876        [PERF_REG_X86_SP]       = 7,
1877        [PERF_REG_X86_BP]       = 8,
1878        [PERF_REG_X86_SI]       = 9,
1879        [PERF_REG_X86_DI]       = 10,
1880        [PERF_REG_X86_R8]       = 11,
1881        [PERF_REG_X86_R9]       = 12,
1882        [PERF_REG_X86_R10]      = 13,
1883        [PERF_REG_X86_R11]      = 14,
1884        [PERF_REG_X86_R12]      = 15,
1885        [PERF_REG_X86_R13]      = 16,
1886        [PERF_REG_X86_R14]      = 17,
1887        [PERF_REG_X86_R15]      = 18,
1888};
1889
1890static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
1891                                 const struct intel_pt_blk_items *items,
1892                                 u64 regs_mask)
1893{
1894        const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
1895        u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
1896        u32 bit;
1897        int i;
1898
1899        for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
1900                /* Get the PEBS gp_regs array index */
1901                int n = pebs_gp_regs[i] - 1;
1902
1903                if (n < 0)
1904                        continue;
1905                /*
1906                 * Add only registers that were requested (i.e. 'regs_mask') and
1907                 * that were provided (i.e. 'mask'), and update the resulting
1908                 * mask (i.e. 'intr_regs->mask') accordingly.
1909                 */
1910                if (mask & 1 << n && regs_mask & bit) {
1911                        intr_regs->mask |= bit;
1912                        *pos++ = gp_regs[n];
1913                }
1914        }
1915
1916        return pos;
1917}
1918
1919#ifndef PERF_REG_X86_XMM0
1920#define PERF_REG_X86_XMM0 32
1921#endif
1922
1923static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
1924                             const struct intel_pt_blk_items *items,
1925                             u64 regs_mask)
1926{
1927        u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
1928        const u64 *xmm = items->xmm;
1929
1930        /*
1931         * If there are any XMM registers, then there should be all of them.
1932         * Nevertheless, follow the logic to add only registers that were
1933         * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
1934         * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
1935         */
1936        intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
1937
1938        for (; mask; mask >>= 1, xmm++) {
1939                if (mask & 1)
1940                        *pos++ = *xmm;
1941        }
1942}
1943
1944#define LBR_INFO_MISPRED        (1ULL << 63)
1945#define LBR_INFO_IN_TX          (1ULL << 62)
1946#define LBR_INFO_ABORT          (1ULL << 61)
1947#define LBR_INFO_CYCLES         0xffff
1948
1949/* Refer kernel's intel_pmu_store_pebs_lbrs() */
1950static u64 intel_pt_lbr_flags(u64 info)
1951{
1952        union {
1953                struct branch_flags flags;
1954                u64 result;
1955        } u;
1956
1957        u.result          = 0;
1958        u.flags.mispred   = !!(info & LBR_INFO_MISPRED);
1959        u.flags.predicted = !(info & LBR_INFO_MISPRED);
1960        u.flags.in_tx     = !!(info & LBR_INFO_IN_TX);
1961        u.flags.abort     = !!(info & LBR_INFO_ABORT);
1962        u.flags.cycles    = info & LBR_INFO_CYCLES;
1963
1964        return u.result;
1965}
1966
1967static void intel_pt_add_lbrs(struct branch_stack *br_stack,
1968                              const struct intel_pt_blk_items *items)
1969{
1970        u64 *to;
1971        int i;
1972
1973        br_stack->nr = 0;
1974
1975        to = &br_stack->entries[0].from;
1976
1977        for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
1978                u32 mask = items->mask[i];
1979                const u64 *from = items->val[i];
1980
1981                for (; mask; mask >>= 3, from += 3) {
1982                        if ((mask & 7) == 7) {
1983                                *to++ = from[0];
1984                                *to++ = from[1];
1985                                *to++ = intel_pt_lbr_flags(from[2]);
1986                                br_stack->nr += 1;
1987                        }
1988                }
1989        }
1990}
1991
1992static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
1993{
1994        const struct intel_pt_blk_items *items = &ptq->state->items;
1995        struct perf_sample sample = { .ip = 0, };
1996        union perf_event *event = ptq->event_buf;
1997        struct intel_pt *pt = ptq->pt;
1998        u64 sample_type = evsel->core.attr.sample_type;
1999        u8 cpumode;
2000        u64 regs[8 * sizeof(sample.intr_regs.mask)];
2001
2002        if (intel_pt_skip_event(pt))
2003                return 0;
2004
2005        intel_pt_prep_a_sample(ptq, event, &sample);
2006
2007        sample.id = id;
2008        sample.stream_id = id;
2009
2010        if (!evsel->core.attr.freq)
2011                sample.period = evsel->core.attr.sample_period;
2012
2013        /* No support for non-zero CS base */
2014        if (items->has_ip)
2015                sample.ip = items->ip;
2016        else if (items->has_rip)
2017                sample.ip = items->rip;
2018        else
2019                sample.ip = ptq->state->from_ip;
2020
2021        cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2022
2023        event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2024
2025        sample.cpumode = cpumode;
2026
2027        if (sample_type & PERF_SAMPLE_TIME) {
2028                u64 timestamp = 0;
2029
2030                if (items->has_timestamp)
2031                        timestamp = items->timestamp;
2032                else if (!pt->timeless_decoding)
2033                        timestamp = ptq->timestamp;
2034                if (timestamp)
2035                        sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2036        }
2037
2038        if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2039            pt->synth_opts.callchain) {
2040                thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2041                                     pt->synth_opts.callchain_sz, sample.ip,
2042                                     pt->kernel_start);
2043                sample.callchain = ptq->chain;
2044        }
2045
2046        if (sample_type & PERF_SAMPLE_REGS_INTR &&
2047            (items->mask[INTEL_PT_GP_REGS_POS] ||
2048             items->mask[INTEL_PT_XMM_POS])) {
2049                u64 regs_mask = evsel->core.attr.sample_regs_intr;
2050                u64 *pos;
2051
2052                sample.intr_regs.abi = items->is_32_bit ?
2053                                       PERF_SAMPLE_REGS_ABI_32 :
2054                                       PERF_SAMPLE_REGS_ABI_64;
2055                sample.intr_regs.regs = regs;
2056
2057                pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2058
2059                intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2060        }
2061
2062        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2063                if (items->mask[INTEL_PT_LBR_0_POS] ||
2064                    items->mask[INTEL_PT_LBR_1_POS] ||
2065                    items->mask[INTEL_PT_LBR_2_POS]) {
2066                        intel_pt_add_lbrs(ptq->last_branch, items);
2067                } else if (pt->synth_opts.last_branch) {
2068                        thread_stack__br_sample(ptq->thread, ptq->cpu,
2069                                                ptq->last_branch,
2070                                                pt->br_stack_sz);
2071                } else {
2072                        ptq->last_branch->nr = 0;
2073                }
2074                sample.branch_stack = ptq->last_branch;
2075        }
2076
2077        if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2078                sample.addr = items->mem_access_address;
2079
2080        if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2081                /*
2082                 * Refer kernel's setup_pebs_adaptive_sample_data() and
2083                 * intel_hsw_weight().
2084                 */
2085                if (items->has_mem_access_latency) {
2086                        u64 weight = items->mem_access_latency >> 32;
2087
2088                        /*
2089                         * Starts from SPR, the mem access latency field
2090                         * contains both cache latency [47:32] and instruction
2091                         * latency [15:0]. The cache latency is the same as the
2092                         * mem access latency on previous platforms.
2093                         *
2094                         * In practice, no memory access could last than 4G
2095                         * cycles. Use latency >> 32 to distinguish the
2096                         * different format of the mem access latency field.
2097                         */
2098                        if (weight > 0) {
2099                                sample.weight = weight & 0xffff;
2100                                sample.ins_lat = items->mem_access_latency & 0xffff;
2101                        } else
2102                                sample.weight = items->mem_access_latency;
2103                }
2104                if (!sample.weight && items->has_tsx_aux_info) {
2105                        /* Cycles last block */
2106                        sample.weight = (u32)items->tsx_aux_info;
2107                }
2108        }
2109
2110        if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2111                u64 ax = items->has_rax ? items->rax : 0;
2112                /* Refer kernel's intel_hsw_transaction() */
2113                u64 txn = (u8)(items->tsx_aux_info >> 32);
2114
2115                /* For RTM XABORTs also log the abort code from AX */
2116                if (txn & PERF_TXN_TRANSACTION && ax & 1)
2117                        txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2118                sample.transaction = txn;
2119        }
2120
2121        return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2122}
2123
2124static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2125{
2126        struct intel_pt *pt = ptq->pt;
2127        struct evsel *evsel = pt->pebs_evsel;
2128        u64 id = evsel->core.id[0];
2129
2130        return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2131}
2132
2133static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2134{
2135        const struct intel_pt_blk_items *items = &ptq->state->items;
2136        struct intel_pt_pebs_event *pe;
2137        struct intel_pt *pt = ptq->pt;
2138        int err = -EINVAL;
2139        int hw_id;
2140
2141        if (!items->has_applicable_counters || !items->applicable_counters) {
2142                if (!pt->single_pebs)
2143                        pr_err("PEBS-via-PT record with no applicable_counters\n");
2144                return intel_pt_synth_single_pebs_sample(ptq);
2145        }
2146
2147        for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
2148                pe = &ptq->pebs[hw_id];
2149                if (!pe->evsel) {
2150                        if (!pt->single_pebs)
2151                                pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
2152                                       hw_id);
2153                        return intel_pt_synth_single_pebs_sample(ptq);
2154                }
2155                err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2156                if (err)
2157                        return err;
2158        }
2159
2160        return err;
2161}
2162
2163static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2164                                pid_t pid, pid_t tid, u64 ip, u64 timestamp)
2165{
2166        union perf_event event;
2167        char msg[MAX_AUXTRACE_ERROR_MSG];
2168        int err;
2169
2170        if (pt->synth_opts.error_minus_flags) {
2171                if (code == INTEL_PT_ERR_OVR &&
2172                    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2173                        return 0;
2174                if (code == INTEL_PT_ERR_LOST &&
2175                    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2176                        return 0;
2177        }
2178
2179        intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2180
2181        auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2182                             code, cpu, pid, tid, ip, msg, timestamp);
2183
2184        err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2185        if (err)
2186                pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2187                       err);
2188
2189        return err;
2190}
2191
2192static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2193                                 const struct intel_pt_state *state)
2194{
2195        struct intel_pt *pt = ptq->pt;
2196        u64 tm = ptq->timestamp;
2197
2198        tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2199
2200        return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
2201                                    ptq->tid, state->from_ip, tm);
2202}
2203
2204static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2205{
2206        struct auxtrace_queue *queue;
2207        pid_t tid = ptq->next_tid;
2208        int err;
2209
2210        if (tid == -1)
2211                return 0;
2212
2213        intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2214
2215        err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2216
2217        queue = &pt->queues.queue_array[ptq->queue_nr];
2218        intel_pt_set_pid_tid_cpu(pt, queue);
2219
2220        ptq->next_tid = -1;
2221
2222        return err;
2223}
2224
2225static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2226{
2227        struct intel_pt *pt = ptq->pt;
2228
2229        return ip == pt->switch_ip &&
2230               (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2231               !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2232                               PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2233}
2234
2235#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2236                          INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2237
2238static int intel_pt_sample(struct intel_pt_queue *ptq)
2239{
2240        const struct intel_pt_state *state = ptq->state;
2241        struct intel_pt *pt = ptq->pt;
2242        int err;
2243
2244        if (!ptq->have_sample)
2245                return 0;
2246
2247        ptq->have_sample = false;
2248
2249        if (pt->synth_opts.approx_ipc) {
2250                ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2251                ptq->ipc_cyc_cnt = ptq->state->cycles;
2252                ptq->sample_ipc = true;
2253        } else {
2254                ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2255                ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2256                ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2257        }
2258
2259        /*
2260         * Do PEBS first to allow for the possibility that the PEBS timestamp
2261         * precedes the current timestamp.
2262         */
2263        if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2264                err = intel_pt_synth_pebs_sample(ptq);
2265                if (err)
2266                        return err;
2267        }
2268
2269        if (pt->sample_pwr_events) {
2270                if (state->type & INTEL_PT_PSB_EVT) {
2271                        err = intel_pt_synth_psb_sample(ptq);
2272                        if (err)
2273                                return err;
2274                }
2275                if (ptq->state->cbr != ptq->cbr_seen) {
2276                        err = intel_pt_synth_cbr_sample(ptq);
2277                        if (err)
2278                                return err;
2279                }
2280                if (state->type & INTEL_PT_PWR_EVT) {
2281                        if (state->type & INTEL_PT_MWAIT_OP) {
2282                                err = intel_pt_synth_mwait_sample(ptq);
2283                                if (err)
2284                                        return err;
2285                        }
2286                        if (state->type & INTEL_PT_PWR_ENTRY) {
2287                                err = intel_pt_synth_pwre_sample(ptq);
2288                                if (err)
2289                                        return err;
2290                        }
2291                        if (state->type & INTEL_PT_EX_STOP) {
2292                                err = intel_pt_synth_exstop_sample(ptq);
2293                                if (err)
2294                                        return err;
2295                        }
2296                        if (state->type & INTEL_PT_PWR_EXIT) {
2297                                err = intel_pt_synth_pwrx_sample(ptq);
2298                                if (err)
2299                                        return err;
2300                        }
2301                }
2302        }
2303
2304        if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
2305                err = intel_pt_synth_instruction_sample(ptq);
2306                if (err)
2307                        return err;
2308        }
2309
2310        if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2311                err = intel_pt_synth_transaction_sample(ptq);
2312                if (err)
2313                        return err;
2314        }
2315
2316        if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2317                err = intel_pt_synth_ptwrite_sample(ptq);
2318                if (err)
2319                        return err;
2320        }
2321
2322        if (!(state->type & INTEL_PT_BRANCH))
2323                return 0;
2324
2325        if (pt->use_thread_stack) {
2326                thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2327                                    state->from_ip, state->to_ip, ptq->insn_len,
2328                                    state->trace_nr, pt->callstack,
2329                                    pt->br_stack_sz_plus,
2330                                    pt->mispred_all);
2331        } else {
2332                thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2333        }
2334
2335        if (pt->sample_branches) {
2336                if (state->from_nr != state->to_nr &&
2337                    state->from_ip && state->to_ip) {
2338                        struct intel_pt_state *st = (struct intel_pt_state *)state;
2339                        u64 to_ip = st->to_ip;
2340                        u64 from_ip = st->from_ip;
2341
2342                        /*
2343                         * perf cannot handle having different machines for ip
2344                         * and addr, so create 2 branches.
2345                         */
2346                        st->to_ip = 0;
2347                        err = intel_pt_synth_branch_sample(ptq);
2348                        if (err)
2349                                return err;
2350                        st->from_ip = 0;
2351                        st->to_ip = to_ip;
2352                        err = intel_pt_synth_branch_sample(ptq);
2353                        st->from_ip = from_ip;
2354                } else {
2355                        err = intel_pt_synth_branch_sample(ptq);
2356                }
2357                if (err)
2358                        return err;
2359        }
2360
2361        if (!ptq->sync_switch)
2362                return 0;
2363
2364        if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2365                switch (ptq->switch_state) {
2366                case INTEL_PT_SS_NOT_TRACING:
2367                case INTEL_PT_SS_UNKNOWN:
2368                case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2369                        err = intel_pt_next_tid(pt, ptq);
2370                        if (err)
2371                                return err;
2372                        ptq->switch_state = INTEL_PT_SS_TRACING;
2373                        break;
2374                default:
2375                        ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2376                        return 1;
2377                }
2378        } else if (!state->to_ip) {
2379                ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2380        } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2381                ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2382        } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2383                   state->to_ip == pt->ptss_ip &&
2384                   (ptq->flags & PERF_IP_FLAG_CALL)) {
2385                ptq->switch_state = INTEL_PT_SS_TRACING;
2386        }
2387
2388        return 0;
2389}
2390
2391static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2392{
2393        struct machine *machine = pt->machine;
2394        struct map *map;
2395        struct symbol *sym, *start;
2396        u64 ip, switch_ip = 0;
2397        const char *ptss;
2398
2399        if (ptss_ip)
2400                *ptss_ip = 0;
2401
2402        map = machine__kernel_map(machine);
2403        if (!map)
2404                return 0;
2405
2406        if (map__load(map))
2407                return 0;
2408
2409        start = dso__first_symbol(map->dso);
2410
2411        for (sym = start; sym; sym = dso__next_symbol(sym)) {
2412                if (sym->binding == STB_GLOBAL &&
2413                    !strcmp(sym->name, "__switch_to")) {
2414                        ip = map->unmap_ip(map, sym->start);
2415                        if (ip >= map->start && ip < map->end) {
2416                                switch_ip = ip;
2417                                break;
2418                        }
2419                }
2420        }
2421
2422        if (!switch_ip || !ptss_ip)
2423                return 0;
2424
2425        if (pt->have_sched_switch == 1)
2426                ptss = "perf_trace_sched_switch";
2427        else
2428                ptss = "__perf_event_task_sched_out";
2429
2430        for (sym = start; sym; sym = dso__next_symbol(sym)) {
2431                if (!strcmp(sym->name, ptss)) {
2432                        ip = map->unmap_ip(map, sym->start);
2433                        if (ip >= map->start && ip < map->end) {
2434                                *ptss_ip = ip;
2435                                break;
2436                        }
2437                }
2438        }
2439
2440        return switch_ip;
2441}
2442
2443static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2444{
2445        unsigned int i;
2446
2447        pt->sync_switch = true;
2448
2449        for (i = 0; i < pt->queues.nr_queues; i++) {
2450                struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2451                struct intel_pt_queue *ptq = queue->priv;
2452
2453                if (ptq)
2454                        ptq->sync_switch = true;
2455        }
2456}
2457
2458/*
2459 * To filter against time ranges, it is only necessary to look at the next start
2460 * or end time.
2461 */
2462static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2463{
2464        struct intel_pt *pt = ptq->pt;
2465
2466        if (ptq->sel_start) {
2467                /* Next time is an end time */
2468                ptq->sel_start = false;
2469                ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2470                return true;
2471        } else if (ptq->sel_idx + 1 < pt->range_cnt) {
2472                /* Next time is a start time */
2473                ptq->sel_start = true;
2474                ptq->sel_idx += 1;
2475                ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2476                return true;
2477        }
2478
2479        /* No next time */
2480        return false;
2481}
2482
2483static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2484{
2485        int err;
2486
2487        while (1) {
2488                if (ptq->sel_start) {
2489                        if (ptq->timestamp >= ptq->sel_timestamp) {
2490                                /* After start time, so consider next time */
2491                                intel_pt_next_time(ptq);
2492                                if (!ptq->sel_timestamp) {
2493                                        /* No end time */
2494                                        return 0;
2495                                }
2496                                /* Check against end time */
2497                                continue;
2498                        }
2499                        /* Before start time, so fast forward */
2500                        ptq->have_sample = false;
2501                        if (ptq->sel_timestamp > *ff_timestamp) {
2502                                if (ptq->sync_switch) {
2503                                        intel_pt_next_tid(ptq->pt, ptq);
2504                                        ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2505                                }
2506                                *ff_timestamp = ptq->sel_timestamp;
2507                                err = intel_pt_fast_forward(ptq->decoder,
2508                                                            ptq->sel_timestamp);
2509                                if (err)
2510                                        return err;
2511                        }
2512                        return 0;
2513                } else if (ptq->timestamp > ptq->sel_timestamp) {
2514                        /* After end time, so consider next time */
2515                        if (!intel_pt_next_time(ptq)) {
2516                                /* No next time range, so stop decoding */
2517                                ptq->have_sample = false;
2518                                ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2519                                return 1;
2520                        }
2521                        /* Check against next start time */
2522                        continue;
2523                } else {
2524                        /* Before end time */
2525                        return 0;
2526                }
2527        }
2528}
2529
2530static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2531{
2532        const struct intel_pt_state *state = ptq->state;
2533        struct intel_pt *pt = ptq->pt;
2534        u64 ff_timestamp = 0;
2535        int err;
2536
2537        if (!pt->kernel_start) {
2538                pt->kernel_start = machine__kernel_start(pt->machine);
2539                if (pt->per_cpu_mmaps &&
2540                    (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2541                    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2542                    !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2543                        pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2544                        if (pt->switch_ip) {
2545                                intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2546                                             pt->switch_ip, pt->ptss_ip);
2547                                intel_pt_enable_sync_switch(pt);
2548                        }
2549                }
2550        }
2551
2552        intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2553                     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2554        while (1) {
2555                err = intel_pt_sample(ptq);
2556                if (err)
2557                        return err;
2558
2559                state = intel_pt_decode(ptq->decoder);
2560                if (state->err) {
2561                        if (state->err == INTEL_PT_ERR_NODATA)
2562                                return 1;
2563                        if (ptq->sync_switch &&
2564                            state->from_ip >= pt->kernel_start) {
2565                                ptq->sync_switch = false;
2566                                intel_pt_next_tid(pt, ptq);
2567                        }
2568                        ptq->timestamp = state->est_timestamp;
2569                        if (pt->synth_opts.errors) {
2570                                err = intel_ptq_synth_error(ptq, state);
2571                                if (err)
2572                                        return err;
2573                        }
2574                        continue;
2575                }
2576
2577                ptq->state = state;
2578                ptq->have_sample = true;
2579                intel_pt_sample_flags(ptq);
2580
2581                /* Use estimated TSC upon return to user space */
2582                if (pt->est_tsc &&
2583                    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2584                    state->to_ip && state->to_ip < pt->kernel_start) {
2585                        intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2586                                     state->timestamp, state->est_timestamp);
2587                        ptq->timestamp = state->est_timestamp;
2588                /* Use estimated TSC in unknown switch state */
2589                } else if (ptq->sync_switch &&
2590                           ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2591                           intel_pt_is_switch_ip(ptq, state->to_ip) &&
2592                           ptq->next_tid == -1) {
2593                        intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2594                                     state->timestamp, state->est_timestamp);
2595                        ptq->timestamp = state->est_timestamp;
2596                } else if (state->timestamp > ptq->timestamp) {
2597                        ptq->timestamp = state->timestamp;
2598                }
2599
2600                if (ptq->sel_timestamp) {
2601                        err = intel_pt_time_filter(ptq, &ff_timestamp);
2602                        if (err)
2603                                return err;
2604                }
2605
2606                if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2607                        *timestamp = ptq->timestamp;
2608                        return 0;
2609                }
2610        }
2611        return 0;
2612}
2613
2614static inline int intel_pt_update_queues(struct intel_pt *pt)
2615{
2616        if (pt->queues.new_data) {
2617                pt->queues.new_data = false;
2618                return intel_pt_setup_queues(pt);
2619        }
2620        return 0;
2621}
2622
2623static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
2624{
2625        unsigned int queue_nr;
2626        u64 ts;
2627        int ret;
2628
2629        while (1) {
2630                struct auxtrace_queue *queue;
2631                struct intel_pt_queue *ptq;
2632
2633                if (!pt->heap.heap_cnt)
2634                        return 0;
2635
2636                if (pt->heap.heap_array[0].ordinal >= timestamp)
2637                        return 0;
2638
2639                queue_nr = pt->heap.heap_array[0].queue_nr;
2640                queue = &pt->queues.queue_array[queue_nr];
2641                ptq = queue->priv;
2642
2643                intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
2644                             queue_nr, pt->heap.heap_array[0].ordinal,
2645                             timestamp);
2646
2647                auxtrace_heap__pop(&pt->heap);
2648
2649                if (pt->heap.heap_cnt) {
2650                        ts = pt->heap.heap_array[0].ordinal + 1;
2651                        if (ts > timestamp)
2652                                ts = timestamp;
2653                } else {
2654                        ts = timestamp;
2655                }
2656
2657                intel_pt_set_pid_tid_cpu(pt, queue);
2658
2659                ret = intel_pt_run_decoder(ptq, &ts);
2660
2661                if (ret < 0) {
2662                        auxtrace_heap__add(&pt->heap, queue_nr, ts);
2663                        return ret;
2664                }
2665
2666                if (!ret) {
2667                        ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
2668                        if (ret < 0)
2669                                return ret;
2670                } else {
2671                        ptq->on_heap = false;
2672                }
2673        }
2674
2675        return 0;
2676}
2677
2678static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
2679                                            u64 time_)
2680{
2681        struct auxtrace_queues *queues = &pt->queues;
2682        unsigned int i;
2683        u64 ts = 0;
2684
2685        for (i = 0; i < queues->nr_queues; i++) {
2686                struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2687                struct intel_pt_queue *ptq = queue->priv;
2688
2689                if (ptq && (tid == -1 || ptq->tid == tid)) {
2690                        ptq->time = time_;
2691                        intel_pt_set_pid_tid_cpu(pt, queue);
2692                        intel_pt_run_decoder(ptq, &ts);
2693                }
2694        }
2695        return 0;
2696}
2697
2698static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
2699                                            struct auxtrace_queue *queue,
2700                                            struct perf_sample *sample)
2701{
2702        struct machine *m = ptq->pt->machine;
2703
2704        ptq->pid = sample->pid;
2705        ptq->tid = sample->tid;
2706        ptq->cpu = queue->cpu;
2707
2708        intel_pt_log("queue %u cpu %d pid %d tid %d\n",
2709                     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2710
2711        thread__zput(ptq->thread);
2712
2713        if (ptq->tid == -1)
2714                return;
2715
2716        if (ptq->pid == -1) {
2717                ptq->thread = machine__find_thread(m, -1, ptq->tid);
2718                if (ptq->thread)
2719                        ptq->pid = ptq->thread->pid_;
2720                return;
2721        }
2722
2723        ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
2724}
2725
2726static int intel_pt_process_timeless_sample(struct intel_pt *pt,
2727                                            struct perf_sample *sample)
2728{
2729        struct auxtrace_queue *queue;
2730        struct intel_pt_queue *ptq;
2731        u64 ts = 0;
2732
2733        queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2734        if (!queue)
2735                return -EINVAL;
2736
2737        ptq = queue->priv;
2738        if (!ptq)
2739                return 0;
2740
2741        ptq->stop = false;
2742        ptq->time = sample->time;
2743        intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
2744        intel_pt_run_decoder(ptq, &ts);
2745        return 0;
2746}
2747
2748static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
2749{
2750        return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
2751                                    sample->pid, sample->tid, 0, sample->time);
2752}
2753
2754static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
2755{
2756        unsigned i, j;
2757
2758        if (cpu < 0 || !pt->queues.nr_queues)
2759                return NULL;
2760
2761        if ((unsigned)cpu >= pt->queues.nr_queues)
2762                i = pt->queues.nr_queues - 1;
2763        else
2764                i = cpu;
2765
2766        if (pt->queues.queue_array[i].cpu == cpu)
2767                return pt->queues.queue_array[i].priv;
2768
2769        for (j = 0; i > 0; j++) {
2770                if (pt->queues.queue_array[--i].cpu == cpu)
2771                        return pt->queues.queue_array[i].priv;
2772        }
2773
2774        for (; j < pt->queues.nr_queues; j++) {
2775                if (pt->queues.queue_array[j].cpu == cpu)
2776                        return pt->queues.queue_array[j].priv;
2777        }
2778
2779        return NULL;
2780}
2781
2782static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
2783                                u64 timestamp)
2784{
2785        struct intel_pt_queue *ptq;
2786        int err;
2787
2788        if (!pt->sync_switch)
2789                return 1;
2790
2791        ptq = intel_pt_cpu_to_ptq(pt, cpu);
2792        if (!ptq || !ptq->sync_switch)
2793                return 1;
2794
2795        switch (ptq->switch_state) {
2796        case INTEL_PT_SS_NOT_TRACING:
2797                break;
2798        case INTEL_PT_SS_UNKNOWN:
2799        case INTEL_PT_SS_TRACING:
2800                ptq->next_tid = tid;
2801                ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
2802                return 0;
2803        case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2804                if (!ptq->on_heap) {
2805                        ptq->timestamp = perf_time_to_tsc(timestamp,
2806                                                          &pt->tc);
2807                        err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
2808                                                 ptq->timestamp);
2809                        if (err)
2810                                return err;
2811                        ptq->on_heap = true;
2812                }
2813                ptq->switch_state = INTEL_PT_SS_TRACING;
2814                break;
2815        case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2816                intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
2817                break;
2818        default:
2819                break;
2820        }
2821
2822        ptq->next_tid = -1;
2823
2824        return 1;
2825}
2826
2827static int intel_pt_process_switch(struct intel_pt *pt,
2828                                   struct perf_sample *sample)
2829{
2830        pid_t tid;
2831        int cpu, ret;
2832        struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
2833
2834        if (evsel != pt->switch_evsel)
2835                return 0;
2836
2837        tid = evsel__intval(evsel, sample, "next_pid");
2838        cpu = sample->cpu;
2839
2840        intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2841                     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
2842                     &pt->tc));
2843
2844        ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2845        if (ret <= 0)
2846                return ret;
2847
2848        return machine__set_current_tid(pt->machine, cpu, -1, tid);
2849}
2850
2851static int intel_pt_context_switch_in(struct intel_pt *pt,
2852                                      struct perf_sample *sample)
2853{
2854        pid_t pid = sample->pid;
2855        pid_t tid = sample->tid;
2856        int cpu = sample->cpu;
2857
2858        if (pt->sync_switch) {
2859                struct intel_pt_queue *ptq;
2860
2861                ptq = intel_pt_cpu_to_ptq(pt, cpu);
2862                if (ptq && ptq->sync_switch) {
2863                        ptq->next_tid = -1;
2864                        switch (ptq->switch_state) {
2865                        case INTEL_PT_SS_NOT_TRACING:
2866                        case INTEL_PT_SS_UNKNOWN:
2867                        case INTEL_PT_SS_TRACING:
2868                                break;
2869                        case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
2870                        case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2871                                ptq->switch_state = INTEL_PT_SS_TRACING;
2872                                break;
2873                        default:
2874                                break;
2875                        }
2876                }
2877        }
2878
2879        /*
2880         * If the current tid has not been updated yet, ensure it is now that
2881         * a "switch in" event has occurred.
2882         */
2883        if (machine__get_current_tid(pt->machine, cpu) == tid)
2884                return 0;
2885
2886        return machine__set_current_tid(pt->machine, cpu, pid, tid);
2887}
2888
2889static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
2890                                   struct perf_sample *sample)
2891{
2892        bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2893        pid_t pid, tid;
2894        int cpu, ret;
2895
2896        cpu = sample->cpu;
2897
2898        if (pt->have_sched_switch == 3) {
2899                if (!out)
2900                        return intel_pt_context_switch_in(pt, sample);
2901                if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
2902                        pr_err("Expecting CPU-wide context switch event\n");
2903                        return -EINVAL;
2904                }
2905                pid = event->context_switch.next_prev_pid;
2906                tid = event->context_switch.next_prev_tid;
2907        } else {
2908                if (out)
2909                        return 0;
2910                pid = sample->pid;
2911                tid = sample->tid;
2912        }
2913
2914        if (tid == -1)
2915                intel_pt_log("context_switch event has no tid\n");
2916
2917        ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2918        if (ret <= 0)
2919                return ret;
2920
2921        return machine__set_current_tid(pt->machine, cpu, pid, tid);
2922}
2923
2924static int intel_pt_process_itrace_start(struct intel_pt *pt,
2925                                         union perf_event *event,
2926                                         struct perf_sample *sample)
2927{
2928        if (!pt->per_cpu_mmaps)
2929                return 0;
2930
2931        intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2932                     sample->cpu, event->itrace_start.pid,
2933                     event->itrace_start.tid, sample->time,
2934                     perf_time_to_tsc(sample->time, &pt->tc));
2935
2936        return machine__set_current_tid(pt->machine, sample->cpu,
2937                                        event->itrace_start.pid,
2938                                        event->itrace_start.tid);
2939}
2940
2941static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
2942                                             union perf_event *event,
2943                                             struct perf_sample *sample)
2944{
2945        u64 hw_id = event->aux_output_hw_id.hw_id;
2946        struct auxtrace_queue *queue;
2947        struct intel_pt_queue *ptq;
2948        struct evsel *evsel;
2949
2950        queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
2951        evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
2952        if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
2953                pr_err("Bad AUX output hardware ID\n");
2954                return -EINVAL;
2955        }
2956
2957        ptq = queue->priv;
2958
2959        ptq->pebs[hw_id].evsel = evsel;
2960        ptq->pebs[hw_id].id = sample->id;
2961
2962        return 0;
2963}
2964
2965static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
2966                             struct addr_location *al)
2967{
2968        if (!al->map || addr < al->map->start || addr >= al->map->end) {
2969                if (!thread__find_map(thread, cpumode, addr, al))
2970                        return -1;
2971        }
2972
2973        return 0;
2974}
2975
2976/* Invalidate all instruction cache entries that overlap the text poke */
2977static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
2978{
2979        u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2980        u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
2981        /* Assume text poke begins in a basic block no more than 4096 bytes */
2982        int cnt = 4096 + event->text_poke.new_len;
2983        struct thread *thread = pt->unknown_thread;
2984        struct addr_location al = { .map = NULL };
2985        struct machine *machine = pt->machine;
2986        struct intel_pt_cache_entry *e;
2987        u64 offset;
2988
2989        if (!event->text_poke.new_len)
2990                return 0;
2991
2992        for (; cnt; cnt--, addr--) {
2993                if (intel_pt_find_map(thread, cpumode, addr, &al)) {
2994                        if (addr < event->text_poke.addr)
2995                                return 0;
2996                        continue;
2997                }
2998
2999                if (!al.map->dso || !al.map->dso->auxtrace_cache)
3000                        continue;
3001
3002                offset = al.map->map_ip(al.map, addr);
3003
3004                e = intel_pt_cache_lookup(al.map->dso, machine, offset);
3005                if (!e)
3006                        continue;
3007
3008                if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
3009                        /*
3010                         * No overlap. Working backwards there cannot be another
3011                         * basic block that overlaps the text poke if there is a
3012                         * branch instruction before the text poke address.
3013                         */
3014                        if (e->branch != INTEL_PT_BR_NO_BRANCH)
3015                                return 0;
3016                } else {
3017                        intel_pt_cache_invalidate(al.map->dso, machine, offset);
3018                        intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
3019                                     al.map->dso->long_name, addr);
3020                }
3021        }
3022
3023        return 0;
3024}
3025
3026static int intel_pt_process_event(struct perf_session *session,
3027                                  union perf_event *event,
3028                                  struct perf_sample *sample,
3029                                  struct perf_tool *tool)
3030{
3031        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3032                                           auxtrace);
3033        u64 timestamp;
3034        int err = 0;
3035
3036        if (dump_trace)
3037                return 0;
3038
3039        if (!tool->ordered_events) {
3040                pr_err("Intel Processor Trace requires ordered events\n");
3041                return -EINVAL;
3042        }
3043
3044        if (sample->time && sample->time != (u64)-1)
3045                timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3046        else
3047                timestamp = 0;
3048
3049        if (timestamp || pt->timeless_decoding) {
3050                err = intel_pt_update_queues(pt);
3051                if (err)
3052                        return err;
3053        }
3054
3055        if (pt->timeless_decoding) {
3056                if (pt->sampling_mode) {
3057                        if (sample->aux_sample.size)
3058                                err = intel_pt_process_timeless_sample(pt,
3059                                                                       sample);
3060                } else if (event->header.type == PERF_RECORD_EXIT) {
3061                        err = intel_pt_process_timeless_queues(pt,
3062                                                               event->fork.tid,
3063                                                               sample->time);
3064                }
3065        } else if (timestamp) {
3066                if (!pt->first_timestamp)
3067                        intel_pt_first_timestamp(pt, timestamp);
3068                err = intel_pt_process_queues(pt, timestamp);
3069        }
3070        if (err)
3071                return err;
3072
3073        if (event->header.type == PERF_RECORD_SAMPLE) {
3074                if (pt->synth_opts.add_callchain && !sample->callchain)
3075                        intel_pt_add_callchain(pt, sample);
3076                if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3077                        intel_pt_add_br_stack(pt, sample);
3078        }
3079
3080        if (event->header.type == PERF_RECORD_AUX &&
3081            (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3082            pt->synth_opts.errors) {
3083                err = intel_pt_lost(pt, sample);
3084                if (err)
3085                        return err;
3086        }
3087
3088        if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3089                err = intel_pt_process_switch(pt, sample);
3090        else if (event->header.type == PERF_RECORD_ITRACE_START)
3091                err = intel_pt_process_itrace_start(pt, event, sample);
3092        else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
3093                err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3094        else if (event->header.type == PERF_RECORD_SWITCH ||
3095                 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3096                err = intel_pt_context_switch(pt, event, sample);
3097
3098        if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3099                err = intel_pt_text_poke(pt, event);
3100
3101        if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3102                intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3103                             event->header.type, sample->cpu, sample->time, timestamp);
3104                intel_pt_log_event(event);
3105        }
3106
3107        return err;
3108}
3109
3110static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3111{
3112        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3113                                           auxtrace);
3114        int ret;
3115
3116        if (dump_trace)
3117                return 0;
3118
3119        if (!tool->ordered_events)
3120                return -EINVAL;
3121
3122        ret = intel_pt_update_queues(pt);
3123        if (ret < 0)
3124                return ret;
3125
3126        if (pt->timeless_decoding)
3127                return intel_pt_process_timeless_queues(pt, -1,
3128                                                        MAX_TIMESTAMP - 1);
3129
3130        return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3131}
3132
3133static void intel_pt_free_events(struct perf_session *session)
3134{
3135        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3136                                           auxtrace);
3137        struct auxtrace_queues *queues = &pt->queues;
3138        unsigned int i;
3139
3140        for (i = 0; i < queues->nr_queues; i++) {
3141                intel_pt_free_queue(queues->queue_array[i].priv);
3142                queues->queue_array[i].priv = NULL;
3143        }
3144        intel_pt_log_disable();
3145        auxtrace_queues__free(queues);
3146}
3147
3148static void intel_pt_free(struct perf_session *session)
3149{
3150        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3151                                           auxtrace);
3152
3153        auxtrace_heap__free(&pt->heap);
3154        intel_pt_free_events(session);
3155        session->auxtrace = NULL;
3156        intel_pt_free_vmcs_info(pt);
3157        thread__put(pt->unknown_thread);
3158        addr_filters__exit(&pt->filts);
3159        zfree(&pt->chain);
3160        zfree(&pt->filter);
3161        zfree(&pt->time_ranges);
3162        free(pt);
3163}
3164
3165static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3166                                       struct evsel *evsel)
3167{
3168        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3169                                           auxtrace);
3170
3171        return evsel->core.attr.type == pt->pmu_type;
3172}
3173
3174static int intel_pt_process_auxtrace_event(struct perf_session *session,
3175                                           union perf_event *event,
3176                                           struct perf_tool *tool __maybe_unused)
3177{
3178        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3179                                           auxtrace);
3180
3181        if (!pt->data_queued) {
3182                struct auxtrace_buffer *buffer;
3183                off_t data_offset;
3184                int fd = perf_data__fd(session->data);
3185                int err;
3186
3187                if (perf_data__is_pipe(session->data)) {
3188                        data_offset = 0;
3189                } else {
3190                        data_offset = lseek(fd, 0, SEEK_CUR);
3191                        if (data_offset == -1)
3192                                return -errno;
3193                }
3194
3195                err = auxtrace_queues__add_event(&pt->queues, session, event,
3196                                                 data_offset, &buffer);
3197                if (err)
3198                        return err;
3199
3200                /* Dump here now we have copied a piped trace out of the pipe */
3201                if (dump_trace) {
3202                        if (auxtrace_buffer__get_data(buffer, fd)) {
3203                                intel_pt_dump_event(pt, buffer->data,
3204                                                    buffer->size);
3205                                auxtrace_buffer__put_data(buffer);
3206                        }
3207                }
3208        }
3209
3210        return 0;
3211}
3212
3213static int intel_pt_queue_data(struct perf_session *session,
3214                               struct perf_sample *sample,
3215                               union perf_event *event, u64 data_offset)
3216{
3217        struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3218                                           auxtrace);
3219        u64 timestamp;
3220
3221        if (event) {
3222                return auxtrace_queues__add_event(&pt->queues, session, event,
3223                                                  data_offset, NULL);
3224        }
3225
3226        if (sample->time && sample->time != (u64)-1)
3227                timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3228        else
3229                timestamp = 0;
3230
3231        return auxtrace_queues__add_sample(&pt->queues, session, sample,
3232                                           data_offset, timestamp);
3233}
3234
3235struct intel_pt_synth {
3236        struct perf_tool dummy_tool;
3237        struct perf_session *session;
3238};
3239
3240static int intel_pt_event_synth(struct perf_tool *tool,
3241                                union perf_event *event,
3242                                struct perf_sample *sample __maybe_unused,
3243                                struct machine *machine __maybe_unused)
3244{
3245        struct intel_pt_synth *intel_pt_synth =
3246                        container_of(tool, struct intel_pt_synth, dummy_tool);
3247
3248        return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3249                                                 NULL);
3250}
3251
3252static int intel_pt_synth_event(struct perf_session *session, const char *name,
3253                                struct perf_event_attr *attr, u64 id)
3254{
3255        struct intel_pt_synth intel_pt_synth;
3256        int err;
3257
3258        pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3259                 name, id, (u64)attr->sample_type);
3260
3261        memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3262        intel_pt_synth.session = session;
3263
3264        err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3265                                          &id, intel_pt_event_synth);
3266        if (err)
3267                pr_err("%s: failed to synthesize '%s' event type\n",
3268                       __func__, name);
3269
3270        return err;
3271}
3272
3273static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3274                                    const char *name)
3275{
3276        struct evsel *evsel;
3277
3278        evlist__for_each_entry(evlist, evsel) {
3279                if (evsel->core.id && evsel->core.id[0] == id) {
3280                        if (evsel->name)
3281                                zfree(&evsel->name);
3282                        evsel->name = strdup(name);
3283                        break;
3284                }
3285        }
3286}
3287
3288static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3289                                         struct evlist *evlist)
3290{
3291        struct evsel *evsel;
3292
3293        evlist__for_each_entry(evlist, evsel) {
3294                if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3295                        return evsel;
3296        }
3297
3298        return NULL;
3299}
3300
3301static int intel_pt_synth_events(struct intel_pt *pt,
3302                                 struct perf_session *session)
3303{
3304        struct evlist *evlist = session->evlist;
3305        struct evsel *evsel = intel_pt_evsel(pt, evlist);
3306        struct perf_event_attr attr;
3307        u64 id;
3308        int err;
3309
3310        if (!evsel) {
3311                pr_debug("There are no selected events with Intel Processor Trace data\n");
3312                return 0;
3313        }
3314
3315        memset(&attr, 0, sizeof(struct perf_event_attr));
3316        attr.size = sizeof(struct perf_event_attr);
3317        attr.type = PERF_TYPE_HARDWARE;
3318        attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3319        attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3320                            PERF_SAMPLE_PERIOD;
3321        if (pt->timeless_decoding)
3322                attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3323        else
3324                attr.sample_type |= PERF_SAMPLE_TIME;
3325        if (!pt->per_cpu_mmaps)
3326                attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3327        attr.exclude_user = evsel->core.attr.exclude_user;
3328        attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3329        attr.exclude_hv = evsel->core.attr.exclude_hv;
3330        attr.exclude_host = evsel->core.attr.exclude_host;
3331        attr.exclude_guest = evsel->core.attr.exclude_guest;
3332        attr.sample_id_all = evsel->core.attr.sample_id_all;
3333        attr.read_format = evsel->core.attr.read_format;
3334
3335        id = evsel->core.id[0] + 1000000000;
3336        if (!id)
3337                id = 1;
3338
3339        if (pt->synth_opts.branches) {
3340                attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3341                attr.sample_period = 1;
3342                attr.sample_type |= PERF_SAMPLE_ADDR;
3343                err = intel_pt_synth_event(session, "branches", &attr, id);
3344                if (err)
3345                        return err;
3346                pt->sample_branches = true;
3347                pt->branches_sample_type = attr.sample_type;
3348                pt->branches_id = id;
3349                id += 1;
3350                attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3351        }
3352
3353        if (pt->synth_opts.callchain)
3354                attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3355        if (pt->synth_opts.last_branch) {
3356                attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3357                /*
3358                 * We don't use the hardware index, but the sample generation
3359                 * code uses the new format branch_stack with this field,
3360                 * so the event attributes must indicate that it's present.
3361                 */
3362                attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3363        }
3364
3365        if (pt->synth_opts.instructions) {
3366                attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3367                if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3368                        attr.sample_period =
3369                                intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3370                else
3371                        attr.sample_period = pt->synth_opts.period;
3372                err = intel_pt_synth_event(session, "instructions", &attr, id);
3373                if (err)
3374                        return err;
3375                pt->sample_instructions = true;
3376                pt->instructions_sample_type = attr.sample_type;
3377                pt->instructions_id = id;
3378                id += 1;
3379        }
3380
3381        attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3382        attr.sample_period = 1;
3383
3384        if (pt->synth_opts.transactions) {
3385                attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3386                err = intel_pt_synth_event(session, "transactions", &attr, id);
3387                if (err)
3388                        return err;
3389                pt->sample_transactions = true;
3390                pt->transactions_sample_type = attr.sample_type;
3391                pt->transactions_id = id;
3392                intel_pt_set_event_name(evlist, id, "transactions");
3393                id += 1;
3394        }
3395
3396        attr.type = PERF_TYPE_SYNTH;
3397        attr.sample_type |= PERF_SAMPLE_RAW;
3398
3399        if (pt->synth_opts.ptwrites) {
3400                attr.config = PERF_SYNTH_INTEL_PTWRITE;
3401                err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3402                if (err)
3403                        return err;
3404                pt->sample_ptwrites = true;
3405                pt->ptwrites_sample_type = attr.sample_type;
3406                pt->ptwrites_id = id;
3407                intel_pt_set_event_name(evlist, id, "ptwrite");
3408                id += 1;
3409        }
3410
3411        if (pt->synth_opts.pwr_events) {
3412                pt->sample_pwr_events = true;
3413                pt->pwr_events_sample_type = attr.sample_type;
3414
3415                attr.config = PERF_SYNTH_INTEL_CBR;
3416                err = intel_pt_synth_event(session, "cbr", &attr, id);
3417                if (err)
3418                        return err;
3419                pt->cbr_id = id;
3420                intel_pt_set_event_name(evlist, id, "cbr");
3421                id += 1;
3422
3423                attr.config = PERF_SYNTH_INTEL_PSB;
3424                err = intel_pt_synth_event(session, "psb", &attr, id);
3425                if (err)
3426                        return err;
3427                pt->psb_id = id;
3428                intel_pt_set_event_name(evlist, id, "psb");
3429                id += 1;
3430        }
3431
3432        if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) {
3433                attr.config = PERF_SYNTH_INTEL_MWAIT;
3434                err = intel_pt_synth_event(session, "mwait", &attr, id);
3435                if (err)
3436                        return err;
3437                pt->mwait_id = id;
3438                intel_pt_set_event_name(evlist, id, "mwait");
3439                id += 1;
3440
3441                attr.config = PERF_SYNTH_INTEL_PWRE;
3442                err = intel_pt_synth_event(session, "pwre", &attr, id);
3443                if (err)
3444                        return err;
3445                pt->pwre_id = id;
3446                intel_pt_set_event_name(evlist, id, "pwre");
3447                id += 1;
3448
3449                attr.config = PERF_SYNTH_INTEL_EXSTOP;
3450                err = intel_pt_synth_event(session, "exstop", &attr, id);
3451                if (err)
3452                        return err;
3453                pt->exstop_id = id;
3454                intel_pt_set_event_name(evlist, id, "exstop");
3455                id += 1;
3456
3457                attr.config = PERF_SYNTH_INTEL_PWRX;
3458                err = intel_pt_synth_event(session, "pwrx", &attr, id);
3459                if (err)
3460                        return err;
3461                pt->pwrx_id = id;
3462                intel_pt_set_event_name(evlist, id, "pwrx");
3463                id += 1;
3464        }
3465
3466        return 0;
3467}
3468
3469static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3470{
3471        struct evsel *evsel;
3472
3473        if (!pt->synth_opts.other_events)
3474                return;
3475
3476        evlist__for_each_entry(pt->session->evlist, evsel) {
3477                if (evsel->core.attr.aux_output && evsel->core.id) {
3478                        if (pt->single_pebs) {
3479                                pt->single_pebs = false;
3480                                return;
3481                        }
3482                        pt->single_pebs = true;
3483                        pt->sample_pebs = true;
3484                        pt->pebs_evsel = evsel;
3485                }
3486        }
3487}
3488
3489static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3490{
3491        struct evsel *evsel;
3492
3493        evlist__for_each_entry_reverse(evlist, evsel) {
3494                const char *name = evsel__name(evsel);
3495
3496                if (!strcmp(name, "sched:sched_switch"))
3497                        return evsel;
3498        }
3499
3500        return NULL;
3501}
3502
3503static bool intel_pt_find_switch(struct evlist *evlist)
3504{
3505        struct evsel *evsel;
3506
3507        evlist__for_each_entry(evlist, evsel) {
3508                if (evsel->core.attr.context_switch)
3509                        return true;
3510        }
3511
3512        return false;
3513}
3514
3515static int intel_pt_perf_config(const char *var, const char *value, void *data)
3516{
3517        struct intel_pt *pt = data;
3518
3519        if (!strcmp(var, "intel-pt.mispred-all"))
3520                pt->mispred_all = perf_config_bool(var, value);
3521
3522        if (!strcmp(var, "intel-pt.max-loops"))
3523                perf_config_int(&pt->max_loops, var, value);
3524
3525        return 0;
3526}
3527
3528/* Find least TSC which converts to ns or later */
3529static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3530{
3531        u64 tsc, tm;
3532
3533        tsc = perf_time_to_tsc(ns, &pt->tc);
3534
3535        while (1) {
3536                tm = tsc_to_perf_time(tsc, &pt->tc);
3537                if (tm < ns)
3538                        break;
3539                tsc -= 1;
3540        }
3541
3542        while (tm < ns)
3543                tm = tsc_to_perf_time(++tsc, &pt->tc);
3544
3545        return tsc;
3546}
3547
3548/* Find greatest TSC which converts to ns or earlier */
3549static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
3550{
3551        u64 tsc, tm;
3552
3553        tsc = perf_time_to_tsc(ns, &pt->tc);
3554
3555        while (1) {
3556                tm = tsc_to_perf_time(tsc, &pt->tc);
3557                if (tm > ns)
3558                        break;
3559                tsc += 1;
3560        }
3561
3562        while (tm > ns)
3563                tm = tsc_to_perf_time(--tsc, &pt->tc);
3564
3565        return tsc;
3566}
3567
3568static int intel_pt_setup_time_ranges(struct intel_pt *pt,
3569                                      struct itrace_synth_opts *opts)
3570{
3571        struct perf_time_interval *p = opts->ptime_range;
3572        int n = opts->range_num;
3573        int i;
3574
3575        if (!n || !p || pt->timeless_decoding)
3576                return 0;
3577
3578        pt->time_ranges = calloc(n, sizeof(struct range));
3579        if (!pt->time_ranges)
3580                return -ENOMEM;
3581
3582        pt->range_cnt = n;
3583
3584        intel_pt_log("%s: %u range(s)\n", __func__, n);
3585
3586        for (i = 0; i < n; i++) {
3587                struct range *r = &pt->time_ranges[i];
3588                u64 ts = p[i].start;
3589                u64 te = p[i].end;
3590
3591                /*
3592                 * Take care to ensure the TSC range matches the perf-time range
3593                 * when converted back to perf-time.
3594                 */
3595                r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
3596                r->end   = te ? intel_pt_tsc_end(te, pt) : 0;
3597
3598                intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
3599                             i, ts, te);
3600                intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
3601                             i, r->start, r->end);
3602        }
3603
3604        return 0;
3605}
3606
3607static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
3608{
3609        struct intel_pt_vmcs_info *vmcs_info;
3610        u64 tsc_offset, vmcs;
3611        char *p = *args;
3612
3613        errno = 0;
3614
3615        p = skip_spaces(p);
3616        if (!*p)
3617                return 1;
3618
3619        tsc_offset = strtoull(p, &p, 0);
3620        if (errno)
3621                return -errno;
3622        p = skip_spaces(p);
3623        if (*p != ':') {
3624                pt->dflt_tsc_offset = tsc_offset;
3625                *args = p;
3626                return 0;
3627        }
3628        p += 1;
3629        while (1) {
3630                vmcs = strtoull(p, &p, 0);
3631                if (errno)
3632                        return -errno;
3633                if (!vmcs)
3634                        return -EINVAL;
3635                vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
3636                if (!vmcs_info)
3637                        return -ENOMEM;
3638                p = skip_spaces(p);
3639                if (*p != ',')
3640                        break;
3641                p += 1;
3642        }
3643        *args = p;
3644        return 0;
3645}
3646
3647static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
3648{
3649        char *args = pt->synth_opts.vm_tm_corr_args;
3650        int ret;
3651
3652        if (!args)
3653                return 0;
3654
3655        do {
3656                ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
3657        } while (!ret);
3658
3659        if (ret < 0) {
3660                pr_err("Failed to parse VM Time Correlation options\n");
3661                return ret;
3662        }
3663
3664        return 0;
3665}
3666
3667static const char * const intel_pt_info_fmts[] = {
3668        [INTEL_PT_PMU_TYPE]             = "  PMU Type            %"PRId64"\n",
3669        [INTEL_PT_TIME_SHIFT]           = "  Time Shift          %"PRIu64"\n",
3670        [INTEL_PT_TIME_MULT]            = "  Time Muliplier      %"PRIu64"\n",
3671        [INTEL_PT_TIME_ZERO]            = "  Time Zero           %"PRIu64"\n",
3672        [INTEL_PT_CAP_USER_TIME_ZERO]   = "  Cap Time Zero       %"PRId64"\n",
3673        [INTEL_PT_TSC_BIT]              = "  TSC bit             %#"PRIx64"\n",
3674        [INTEL_PT_NORETCOMP_BIT]        = "  NoRETComp bit       %#"PRIx64"\n",
3675        [INTEL_PT_HAVE_SCHED_SWITCH]    = "  Have sched_switch   %"PRId64"\n",
3676        [INTEL_PT_SNAPSHOT_MODE]        = "  Snapshot mode       %"PRId64"\n",
3677        [INTEL_PT_PER_CPU_MMAPS]        = "  Per-cpu maps        %"PRId64"\n",
3678        [INTEL_PT_MTC_BIT]              = "  MTC bit             %#"PRIx64"\n",
3679        [INTEL_PT_TSC_CTC_N]            = "  TSC:CTC numerator   %"PRIu64"\n",
3680        [INTEL_PT_TSC_CTC_D]            = "  TSC:CTC denominator %"PRIu64"\n",
3681        [INTEL_PT_CYC_BIT]              = "  CYC bit             %#"PRIx64"\n",
3682        [INTEL_PT_MAX_NONTURBO_RATIO]   = "  Max non-turbo ratio %"PRIu64"\n",
3683        [INTEL_PT_FILTER_STR_LEN]       = "  Filter string len.  %"PRIu64"\n",
3684};
3685
3686static void intel_pt_print_info(__u64 *arr, int start, int finish)
3687{
3688        int i;
3689
3690        if (!dump_trace)
3691                return;
3692
3693        for (i = start; i <= finish; i++)
3694                fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
3695}
3696
3697static void intel_pt_print_info_str(const char *name, const char *str)
3698{
3699        if (!dump_trace)
3700                return;
3701
3702        fprintf(stdout, "  %-20s%s\n", name, str ? str : "");
3703}
3704
3705static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
3706{
3707        return auxtrace_info->header.size >=
3708                sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
3709}
3710
3711int intel_pt_process_auxtrace_info(union perf_event *event,
3712                                   struct perf_session *session)
3713{
3714        struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
3715        size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
3716        struct intel_pt *pt;
3717        void *info_end;
3718        __u64 *info;
3719        int err;
3720
3721        if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
3722                                        min_sz)
3723                return -EINVAL;
3724
3725        pt = zalloc(sizeof(struct intel_pt));
3726        if (!pt)
3727                return -ENOMEM;
3728
3729        pt->vmcs_info = RB_ROOT;
3730
3731        addr_filters__init(&pt->filts);
3732
3733        err = perf_config(intel_pt_perf_config, pt);
3734        if (err)
3735                goto err_free;
3736
3737        err = auxtrace_queues__init(&pt->queues);
3738        if (err)
3739                goto err_free;
3740
3741        if (session->itrace_synth_opts->set) {
3742                pt->synth_opts = *session->itrace_synth_opts;
3743        } else {
3744                struct itrace_synth_opts *opts = session->itrace_synth_opts;
3745
3746                itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
3747                if (!opts->default_no_sample && !opts->inject) {
3748                        pt->synth_opts.branches = false;
3749                        pt->synth_opts.callchain = true;
3750                        pt->synth_opts.add_callchain = true;
3751                }
3752                pt->synth_opts.thread_stack = opts->thread_stack;
3753        }
3754
3755        if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
3756                intel_pt_log_set_name(INTEL_PT_PMU_NAME);
3757
3758        pt->session = session;
3759        pt->machine = &session->machines.host; /* No kvm support */
3760        pt->auxtrace_type = auxtrace_info->type;
3761        pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
3762        pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
3763        pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
3764        pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
3765        pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
3766        pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
3767        pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
3768        pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
3769        pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
3770        pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
3771        intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
3772                            INTEL_PT_PER_CPU_MMAPS);
3773
3774        if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
3775                pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
3776                pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
3777                pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
3778                pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
3779                pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
3780                intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
3781                                    INTEL_PT_CYC_BIT);
3782        }
3783
3784        if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
3785                pt->max_non_turbo_ratio =
3786                        auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
3787                intel_pt_print_info(&auxtrace_info->priv[0],
3788                                    INTEL_PT_MAX_NONTURBO_RATIO,
3789                                    INTEL_PT_MAX_NONTURBO_RATIO);
3790        }
3791
3792        info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
3793        info_end = (void *)info + auxtrace_info->header.size;
3794
3795        if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
3796                size_t len;
3797
3798                len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
3799                intel_pt_print_info(&auxtrace_info->priv[0],
3800                                    INTEL_PT_FILTER_STR_LEN,
3801                                    INTEL_PT_FILTER_STR_LEN);
3802                if (len) {
3803                        const char *filter = (const char *)info;
3804
3805                        len = roundup(len + 1, 8);
3806                        info += len >> 3;
3807                        if ((void *)info > info_end) {
3808                                pr_err("%s: bad filter string length\n", __func__);
3809                                err = -EINVAL;
3810                                goto err_free_queues;
3811                        }
3812                        pt->filter = memdup(filter, len);
3813                        if (!pt->filter) {
3814                                err = -ENOMEM;
3815                                goto err_free_queues;
3816                        }
3817                        if (session->header.needs_swap)
3818                                mem_bswap_64(pt->filter, len);
3819                        if (pt->filter[len - 1]) {
3820                                pr_err("%s: filter string not null terminated\n", __func__);
3821                                err = -EINVAL;
3822                                goto err_free_queues;
3823                        }
3824                        err = addr_filters__parse_bare_filter(&pt->filts,
3825                                                              filter);
3826                        if (err)
3827                                goto err_free_queues;
3828                }
3829                intel_pt_print_info_str("Filter string", pt->filter);
3830        }
3831
3832        pt->timeless_decoding = intel_pt_timeless_decoding(pt);
3833        if (pt->timeless_decoding && !pt->tc.time_mult)
3834                pt->tc.time_mult = 1;
3835        pt->have_tsc = intel_pt_have_tsc(pt);
3836        pt->sampling_mode = intel_pt_sampling_mode(pt);
3837        pt->est_tsc = !pt->timeless_decoding;
3838
3839        if (pt->synth_opts.vm_time_correlation) {
3840                if (pt->timeless_decoding) {
3841                        pr_err("Intel PT has no time information for VM Time Correlation\n");
3842                        err = -EINVAL;
3843                        goto err_free_queues;
3844                }
3845                if (session->itrace_synth_opts->ptime_range) {
3846                        pr_err("Time ranges cannot be specified with VM Time Correlation\n");
3847                        err = -EINVAL;
3848                        goto err_free_queues;
3849                }
3850                /* Currently TSC Offset is calculated using MTC packets */
3851                if (!intel_pt_have_mtc(pt)) {
3852                        pr_err("MTC packets must have been enabled for VM Time Correlation\n");
3853                        err = -EINVAL;
3854                        goto err_free_queues;
3855                }
3856                err = intel_pt_parse_vm_tm_corr_args(pt);
3857                if (err)
3858                        goto err_free_queues;
3859        }
3860
3861        pt->unknown_thread = thread__new(999999999, 999999999);
3862        if (!pt->unknown_thread) {
3863                err = -ENOMEM;
3864                goto err_free_queues;
3865        }
3866
3867        /*
3868         * Since this thread will not be kept in any rbtree not in a
3869         * list, initialize its list node so that at thread__put() the
3870         * current thread lifetime assumption is kept and we don't segfault
3871         * at list_del_init().
3872         */
3873        INIT_LIST_HEAD(&pt->unknown_thread->node);
3874
3875        err = thread__set_comm(pt->unknown_thread, "unknown", 0);
3876        if (err)
3877                goto err_delete_thread;
3878        if (thread__init_maps(pt->unknown_thread, pt->machine)) {
3879                err = -ENOMEM;
3880                goto err_delete_thread;
3881        }
3882
3883        pt->auxtrace.process_event = intel_pt_process_event;
3884        pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
3885        pt->auxtrace.queue_data = intel_pt_queue_data;
3886        pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
3887        pt->auxtrace.flush_events = intel_pt_flush;
3888        pt->auxtrace.free_events = intel_pt_free_events;
3889        pt->auxtrace.free = intel_pt_free;
3890        pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
3891        session->auxtrace = &pt->auxtrace;
3892
3893        if (dump_trace)
3894                return 0;
3895
3896        if (pt->have_sched_switch == 1) {
3897                pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
3898                if (!pt->switch_evsel) {
3899                        pr_err("%s: missing sched_switch event\n", __func__);
3900                        err = -EINVAL;
3901                        goto err_delete_thread;
3902                }
3903        } else if (pt->have_sched_switch == 2 &&
3904                   !intel_pt_find_switch(session->evlist)) {
3905                pr_err("%s: missing context_switch attribute flag\n", __func__);
3906                err = -EINVAL;
3907                goto err_delete_thread;
3908        }
3909
3910        if (pt->synth_opts.log)
3911                intel_pt_log_enable();
3912
3913        /* Maximum non-turbo ratio is TSC freq / 100 MHz */
3914        if (pt->tc.time_mult) {
3915                u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
3916
3917                if (!pt->max_non_turbo_ratio)
3918                        pt->max_non_turbo_ratio =
3919                                        (tsc_freq + 50000000) / 100000000;
3920                intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
3921                intel_pt_log("Maximum non-turbo ratio %u\n",
3922                             pt->max_non_turbo_ratio);
3923                pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
3924        }
3925
3926        err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
3927        if (err)
3928                goto err_delete_thread;
3929
3930        if (pt->synth_opts.calls)
3931                pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
3932                                       PERF_IP_FLAG_TRACE_END;
3933        if (pt->synth_opts.returns)
3934                pt->branches_filter |= PERF_IP_FLAG_RETURN |
3935                                       PERF_IP_FLAG_TRACE_BEGIN;
3936
3937        if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
3938            !symbol_conf.use_callchain) {
3939                symbol_conf.use_callchain = true;
3940                if (callchain_register_param(&callchain_param) < 0) {
3941                        symbol_conf.use_callchain = false;
3942                        pt->synth_opts.callchain = false;
3943                        pt->synth_opts.add_callchain = false;
3944                }
3945        }
3946
3947        if (pt->synth_opts.add_callchain) {
3948                err = intel_pt_callchain_init(pt);
3949                if (err)
3950                        goto err_delete_thread;
3951        }
3952
3953        if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
3954                pt->br_stack_sz = pt->synth_opts.last_branch_sz;
3955                pt->br_stack_sz_plus = pt->br_stack_sz;
3956        }
3957
3958        if (pt->synth_opts.add_last_branch) {
3959                err = intel_pt_br_stack_init(pt);
3960                if (err)
3961                        goto err_delete_thread;
3962                /*
3963                 * Additional branch stack size to cater for tracing from the
3964                 * actual sample ip to where the sample time is recorded.
3965                 * Measured at about 200 branches, but generously set to 1024.
3966                 * If kernel space is not being traced, then add just 1 for the
3967                 * branch to kernel space.
3968                 */
3969                if (intel_pt_tracing_kernel(pt))
3970                        pt->br_stack_sz_plus += 1024;
3971                else
3972                        pt->br_stack_sz_plus += 1;
3973        }
3974
3975        pt->use_thread_stack = pt->synth_opts.callchain ||
3976                               pt->synth_opts.add_callchain ||
3977                               pt->synth_opts.thread_stack ||
3978                               pt->synth_opts.last_branch ||
3979                               pt->synth_opts.add_last_branch;
3980
3981        pt->callstack = pt->synth_opts.callchain ||
3982                        pt->synth_opts.add_callchain ||
3983                        pt->synth_opts.thread_stack;
3984
3985        err = intel_pt_synth_events(pt, session);
3986        if (err)
3987                goto err_delete_thread;
3988
3989        intel_pt_setup_pebs_events(pt);
3990
3991        if (pt->sampling_mode || list_empty(&session->auxtrace_index))
3992                err = auxtrace_queue_data(session, true, true);
3993        else
3994                err = auxtrace_queues__process_index(&pt->queues, session);
3995        if (err)
3996                goto err_delete_thread;
3997
3998        if (pt->queues.populated)
3999                pt->data_queued = true;
4000
4001        if (pt->timeless_decoding)
4002                pr_debug2("Intel PT decoding without timestamps\n");
4003
4004        return 0;
4005
4006err_delete_thread:
4007        zfree(&pt->chain);
4008        thread__zput(pt->unknown_thread);
4009err_free_queues:
4010        intel_pt_log_disable();
4011        auxtrace_queues__free(&pt->queues);
4012        session->auxtrace = NULL;
4013err_free:
4014        addr_filters__exit(&pt->filts);
4015        zfree(&pt->filter);
4016        zfree(&pt->time_ranges);
4017        free(pt);
4018        return err;
4019}
4020