linux/tools/perf/util/session.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/err.h>
   5#include <linux/kernel.h>
   6#include <linux/zalloc.h>
   7#include <api/fs/fs.h>
   8
   9#include <byteswap.h>
  10#include <unistd.h>
  11#include <sys/types.h>
  12#include <sys/mman.h>
  13#include <perf/cpumap.h>
  14
  15#include "map_symbol.h"
  16#include "branch.h"
  17#include "debug.h"
  18#include "evlist.h"
  19#include "evsel.h"
  20#include "memswap.h"
  21#include "map.h"
  22#include "symbol.h"
  23#include "session.h"
  24#include "tool.h"
  25#include "perf_regs.h"
  26#include "asm/bug.h"
  27#include "auxtrace.h"
  28#include "thread.h"
  29#include "thread-stack.h"
  30#include "sample-raw.h"
  31#include "stat.h"
  32#include "tsc.h"
  33#include "ui/progress.h"
  34#include "../perf.h"
  35#include "arch/common.h"
  36#include "units.h"
  37#include <internal/lib.h>
  38
  39#ifdef HAVE_ZSTD_SUPPORT
  40static int perf_session__process_compressed_event(struct perf_session *session,
  41                                                  union perf_event *event, u64 file_offset)
  42{
  43        void *src;
  44        size_t decomp_size, src_size;
  45        u64 decomp_last_rem = 0;
  46        size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
  47        struct decomp *decomp, *decomp_last = session->decomp_last;
  48
  49        if (decomp_last) {
  50                decomp_last_rem = decomp_last->size - decomp_last->head;
  51                decomp_len += decomp_last_rem;
  52        }
  53
  54        mmap_len = sizeof(struct decomp) + decomp_len;
  55        decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
  56                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  57        if (decomp == MAP_FAILED) {
  58                pr_err("Couldn't allocate memory for decompression\n");
  59                return -1;
  60        }
  61
  62        decomp->file_pos = file_offset;
  63        decomp->mmap_len = mmap_len;
  64        decomp->head = 0;
  65
  66        if (decomp_last_rem) {
  67                memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
  68                decomp->size = decomp_last_rem;
  69        }
  70
  71        src = (void *)event + sizeof(struct perf_record_compressed);
  72        src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
  73
  74        decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
  75                                &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
  76        if (!decomp_size) {
  77                munmap(decomp, mmap_len);
  78                pr_err("Couldn't decompress data\n");
  79                return -1;
  80        }
  81
  82        decomp->size += decomp_size;
  83
  84        if (session->decomp == NULL) {
  85                session->decomp = decomp;
  86                session->decomp_last = decomp;
  87        } else {
  88                session->decomp_last->next = decomp;
  89                session->decomp_last = decomp;
  90        }
  91
  92        pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
  93
  94        return 0;
  95}
  96#else /* !HAVE_ZSTD_SUPPORT */
  97#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
  98#endif
  99
 100static int perf_session__deliver_event(struct perf_session *session,
 101                                       union perf_event *event,
 102                                       struct perf_tool *tool,
 103                                       u64 file_offset);
 104
 105static int perf_session__open(struct perf_session *session)
 106{
 107        struct perf_data *data = session->data;
 108
 109        if (perf_session__read_header(session) < 0) {
 110                pr_err("incompatible file format (rerun with -v to learn more)\n");
 111                return -1;
 112        }
 113
 114        if (perf_data__is_pipe(data))
 115                return 0;
 116
 117        if (perf_header__has_feat(&session->header, HEADER_STAT))
 118                return 0;
 119
 120        if (!evlist__valid_sample_type(session->evlist)) {
 121                pr_err("non matching sample_type\n");
 122                return -1;
 123        }
 124
 125        if (!evlist__valid_sample_id_all(session->evlist)) {
 126                pr_err("non matching sample_id_all\n");
 127                return -1;
 128        }
 129
 130        if (!evlist__valid_read_format(session->evlist)) {
 131                pr_err("non matching read_format\n");
 132                return -1;
 133        }
 134
 135        return 0;
 136}
 137
 138void perf_session__set_id_hdr_size(struct perf_session *session)
 139{
 140        u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
 141
 142        machines__set_id_hdr_size(&session->machines, id_hdr_size);
 143}
 144
 145int perf_session__create_kernel_maps(struct perf_session *session)
 146{
 147        int ret = machine__create_kernel_maps(&session->machines.host);
 148
 149        if (ret >= 0)
 150                ret = machines__create_guest_kernel_maps(&session->machines);
 151        return ret;
 152}
 153
 154static void perf_session__destroy_kernel_maps(struct perf_session *session)
 155{
 156        machines__destroy_kernel_maps(&session->machines);
 157}
 158
 159static bool perf_session__has_comm_exec(struct perf_session *session)
 160{
 161        struct evsel *evsel;
 162
 163        evlist__for_each_entry(session->evlist, evsel) {
 164                if (evsel->core.attr.comm_exec)
 165                        return true;
 166        }
 167
 168        return false;
 169}
 170
 171static void perf_session__set_comm_exec(struct perf_session *session)
 172{
 173        bool comm_exec = perf_session__has_comm_exec(session);
 174
 175        machines__set_comm_exec(&session->machines, comm_exec);
 176}
 177
 178static int ordered_events__deliver_event(struct ordered_events *oe,
 179                                         struct ordered_event *event)
 180{
 181        struct perf_session *session = container_of(oe, struct perf_session,
 182                                                    ordered_events);
 183
 184        return perf_session__deliver_event(session, event->event,
 185                                           session->tool, event->file_offset);
 186}
 187
 188struct perf_session *perf_session__new(struct perf_data *data,
 189                                       bool repipe, struct perf_tool *tool)
 190{
 191        int ret = -ENOMEM;
 192        struct perf_session *session = zalloc(sizeof(*session));
 193
 194        if (!session)
 195                goto out;
 196
 197        session->repipe = repipe;
 198        session->tool   = tool;
 199        INIT_LIST_HEAD(&session->auxtrace_index);
 200        machines__init(&session->machines);
 201        ordered_events__init(&session->ordered_events,
 202                             ordered_events__deliver_event, NULL);
 203
 204        perf_env__init(&session->header.env);
 205        if (data) {
 206                ret = perf_data__open(data);
 207                if (ret < 0)
 208                        goto out_delete;
 209
 210                session->data = data;
 211
 212                if (perf_data__is_read(data)) {
 213                        ret = perf_session__open(session);
 214                        if (ret < 0)
 215                                goto out_delete;
 216
 217                        /*
 218                         * set session attributes that are present in perf.data
 219                         * but not in pipe-mode.
 220                         */
 221                        if (!data->is_pipe) {
 222                                perf_session__set_id_hdr_size(session);
 223                                perf_session__set_comm_exec(session);
 224                        }
 225
 226                        evlist__init_trace_event_sample_raw(session->evlist);
 227
 228                        /* Open the directory data. */
 229                        if (data->is_dir) {
 230                                ret = perf_data__open_dir(data);
 231                                if (ret)
 232                                        goto out_delete;
 233                        }
 234
 235                        if (!symbol_conf.kallsyms_name &&
 236                            !symbol_conf.vmlinux_name)
 237                                symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
 238                }
 239        } else  {
 240                session->machines.host.env = &perf_env;
 241        }
 242
 243        session->machines.host.single_address_space =
 244                perf_env__single_address_space(session->machines.host.env);
 245
 246        if (!data || perf_data__is_write(data)) {
 247                /*
 248                 * In O_RDONLY mode this will be performed when reading the
 249                 * kernel MMAP event, in perf_event__process_mmap().
 250                 */
 251                if (perf_session__create_kernel_maps(session) < 0)
 252                        pr_warning("Cannot read kernel map\n");
 253        }
 254
 255        /*
 256         * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
 257         * processed, so evlist__sample_id_all is not meaningful here.
 258         */
 259        if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
 260            tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
 261                dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
 262                tool->ordered_events = false;
 263        }
 264
 265        return session;
 266
 267 out_delete:
 268        perf_session__delete(session);
 269 out:
 270        return ERR_PTR(ret);
 271}
 272
 273static void perf_session__delete_threads(struct perf_session *session)
 274{
 275        machine__delete_threads(&session->machines.host);
 276}
 277
 278static void perf_session__release_decomp_events(struct perf_session *session)
 279{
 280        struct decomp *next, *decomp;
 281        size_t mmap_len;
 282        next = session->decomp;
 283        do {
 284                decomp = next;
 285                if (decomp == NULL)
 286                        break;
 287                next = decomp->next;
 288                mmap_len = decomp->mmap_len;
 289                munmap(decomp, mmap_len);
 290        } while (1);
 291}
 292
 293void perf_session__delete(struct perf_session *session)
 294{
 295        if (session == NULL)
 296                return;
 297        auxtrace__free(session);
 298        auxtrace_index__free(&session->auxtrace_index);
 299        perf_session__destroy_kernel_maps(session);
 300        perf_session__delete_threads(session);
 301        perf_session__release_decomp_events(session);
 302        perf_env__exit(&session->header.env);
 303        machines__exit(&session->machines);
 304        if (session->data) {
 305                if (perf_data__is_read(session->data))
 306                        evlist__delete(session->evlist);
 307                perf_data__close(session->data);
 308        }
 309        trace_event__cleanup(&session->tevent);
 310        free(session);
 311}
 312
 313static int process_event_synth_tracing_data_stub(struct perf_session *session
 314                                                 __maybe_unused,
 315                                                 union perf_event *event
 316                                                 __maybe_unused)
 317{
 318        dump_printf(": unhandled!\n");
 319        return 0;
 320}
 321
 322static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
 323                                         union perf_event *event __maybe_unused,
 324                                         struct evlist **pevlist
 325                                         __maybe_unused)
 326{
 327        dump_printf(": unhandled!\n");
 328        return 0;
 329}
 330
 331static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
 332                                                 union perf_event *event __maybe_unused,
 333                                                 struct evlist **pevlist
 334                                                 __maybe_unused)
 335{
 336        if (dump_trace)
 337                perf_event__fprintf_event_update(event, stdout);
 338
 339        dump_printf(": unhandled!\n");
 340        return 0;
 341}
 342
 343static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
 344                                     union perf_event *event __maybe_unused,
 345                                     struct perf_sample *sample __maybe_unused,
 346                                     struct evsel *evsel __maybe_unused,
 347                                     struct machine *machine __maybe_unused)
 348{
 349        dump_printf(": unhandled!\n");
 350        return 0;
 351}
 352
 353static int process_event_stub(struct perf_tool *tool __maybe_unused,
 354                              union perf_event *event __maybe_unused,
 355                              struct perf_sample *sample __maybe_unused,
 356                              struct machine *machine __maybe_unused)
 357{
 358        dump_printf(": unhandled!\n");
 359        return 0;
 360}
 361
 362static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
 363                                       union perf_event *event __maybe_unused,
 364                                       struct ordered_events *oe __maybe_unused)
 365{
 366        dump_printf(": unhandled!\n");
 367        return 0;
 368}
 369
 370static int process_finished_round(struct perf_tool *tool,
 371                                  union perf_event *event,
 372                                  struct ordered_events *oe);
 373
 374static int skipn(int fd, off_t n)
 375{
 376        char buf[4096];
 377        ssize_t ret;
 378
 379        while (n > 0) {
 380                ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
 381                if (ret <= 0)
 382                        return ret;
 383                n -= ret;
 384        }
 385
 386        return 0;
 387}
 388
 389static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
 390                                       union perf_event *event)
 391{
 392        dump_printf(": unhandled!\n");
 393        if (perf_data__is_pipe(session->data))
 394                skipn(perf_data__fd(session->data), event->auxtrace.size);
 395        return event->auxtrace.size;
 396}
 397
 398static int process_event_op2_stub(struct perf_session *session __maybe_unused,
 399                                  union perf_event *event __maybe_unused)
 400{
 401        dump_printf(": unhandled!\n");
 402        return 0;
 403}
 404
 405
 406static
 407int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
 408                                  union perf_event *event __maybe_unused)
 409{
 410        if (dump_trace)
 411                perf_event__fprintf_thread_map(event, stdout);
 412
 413        dump_printf(": unhandled!\n");
 414        return 0;
 415}
 416
 417static
 418int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
 419                               union perf_event *event __maybe_unused)
 420{
 421        if (dump_trace)
 422                perf_event__fprintf_cpu_map(event, stdout);
 423
 424        dump_printf(": unhandled!\n");
 425        return 0;
 426}
 427
 428static
 429int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
 430                                   union perf_event *event __maybe_unused)
 431{
 432        if (dump_trace)
 433                perf_event__fprintf_stat_config(event, stdout);
 434
 435        dump_printf(": unhandled!\n");
 436        return 0;
 437}
 438
 439static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
 440                             union perf_event *event)
 441{
 442        if (dump_trace)
 443                perf_event__fprintf_stat(event, stdout);
 444
 445        dump_printf(": unhandled!\n");
 446        return 0;
 447}
 448
 449static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
 450                                   union perf_event *event)
 451{
 452        if (dump_trace)
 453                perf_event__fprintf_stat_round(event, stdout);
 454
 455        dump_printf(": unhandled!\n");
 456        return 0;
 457}
 458
 459static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
 460                                        union perf_event *event)
 461{
 462        if (dump_trace)
 463                perf_event__fprintf_time_conv(event, stdout);
 464
 465        dump_printf(": unhandled!\n");
 466        return 0;
 467}
 468
 469static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
 470                                                       union perf_event *event __maybe_unused,
 471                                                       u64 file_offset __maybe_unused)
 472{
 473       dump_printf(": unhandled!\n");
 474       return 0;
 475}
 476
 477void perf_tool__fill_defaults(struct perf_tool *tool)
 478{
 479        if (tool->sample == NULL)
 480                tool->sample = process_event_sample_stub;
 481        if (tool->mmap == NULL)
 482                tool->mmap = process_event_stub;
 483        if (tool->mmap2 == NULL)
 484                tool->mmap2 = process_event_stub;
 485        if (tool->comm == NULL)
 486                tool->comm = process_event_stub;
 487        if (tool->namespaces == NULL)
 488                tool->namespaces = process_event_stub;
 489        if (tool->cgroup == NULL)
 490                tool->cgroup = process_event_stub;
 491        if (tool->fork == NULL)
 492                tool->fork = process_event_stub;
 493        if (tool->exit == NULL)
 494                tool->exit = process_event_stub;
 495        if (tool->lost == NULL)
 496                tool->lost = perf_event__process_lost;
 497        if (tool->lost_samples == NULL)
 498                tool->lost_samples = perf_event__process_lost_samples;
 499        if (tool->aux == NULL)
 500                tool->aux = perf_event__process_aux;
 501        if (tool->itrace_start == NULL)
 502                tool->itrace_start = perf_event__process_itrace_start;
 503        if (tool->context_switch == NULL)
 504                tool->context_switch = perf_event__process_switch;
 505        if (tool->ksymbol == NULL)
 506                tool->ksymbol = perf_event__process_ksymbol;
 507        if (tool->bpf == NULL)
 508                tool->bpf = perf_event__process_bpf;
 509        if (tool->text_poke == NULL)
 510                tool->text_poke = perf_event__process_text_poke;
 511        if (tool->read == NULL)
 512                tool->read = process_event_sample_stub;
 513        if (tool->throttle == NULL)
 514                tool->throttle = process_event_stub;
 515        if (tool->unthrottle == NULL)
 516                tool->unthrottle = process_event_stub;
 517        if (tool->attr == NULL)
 518                tool->attr = process_event_synth_attr_stub;
 519        if (tool->event_update == NULL)
 520                tool->event_update = process_event_synth_event_update_stub;
 521        if (tool->tracing_data == NULL)
 522                tool->tracing_data = process_event_synth_tracing_data_stub;
 523        if (tool->build_id == NULL)
 524                tool->build_id = process_event_op2_stub;
 525        if (tool->finished_round == NULL) {
 526                if (tool->ordered_events)
 527                        tool->finished_round = process_finished_round;
 528                else
 529                        tool->finished_round = process_finished_round_stub;
 530        }
 531        if (tool->id_index == NULL)
 532                tool->id_index = process_event_op2_stub;
 533        if (tool->auxtrace_info == NULL)
 534                tool->auxtrace_info = process_event_op2_stub;
 535        if (tool->auxtrace == NULL)
 536                tool->auxtrace = process_event_auxtrace_stub;
 537        if (tool->auxtrace_error == NULL)
 538                tool->auxtrace_error = process_event_op2_stub;
 539        if (tool->thread_map == NULL)
 540                tool->thread_map = process_event_thread_map_stub;
 541        if (tool->cpu_map == NULL)
 542                tool->cpu_map = process_event_cpu_map_stub;
 543        if (tool->stat_config == NULL)
 544                tool->stat_config = process_event_stat_config_stub;
 545        if (tool->stat == NULL)
 546                tool->stat = process_stat_stub;
 547        if (tool->stat_round == NULL)
 548                tool->stat_round = process_stat_round_stub;
 549        if (tool->time_conv == NULL)
 550                tool->time_conv = process_event_time_conv_stub;
 551        if (tool->feature == NULL)
 552                tool->feature = process_event_op2_stub;
 553        if (tool->compressed == NULL)
 554                tool->compressed = perf_session__process_compressed_event;
 555}
 556
 557static void swap_sample_id_all(union perf_event *event, void *data)
 558{
 559        void *end = (void *) event + event->header.size;
 560        int size = end - data;
 561
 562        BUG_ON(size % sizeof(u64));
 563        mem_bswap_64(data, size);
 564}
 565
 566static void perf_event__all64_swap(union perf_event *event,
 567                                   bool sample_id_all __maybe_unused)
 568{
 569        struct perf_event_header *hdr = &event->header;
 570        mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 571}
 572
 573static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 574{
 575        event->comm.pid = bswap_32(event->comm.pid);
 576        event->comm.tid = bswap_32(event->comm.tid);
 577
 578        if (sample_id_all) {
 579                void *data = &event->comm.comm;
 580
 581                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 582                swap_sample_id_all(event, data);
 583        }
 584}
 585
 586static void perf_event__mmap_swap(union perf_event *event,
 587                                  bool sample_id_all)
 588{
 589        event->mmap.pid   = bswap_32(event->mmap.pid);
 590        event->mmap.tid   = bswap_32(event->mmap.tid);
 591        event->mmap.start = bswap_64(event->mmap.start);
 592        event->mmap.len   = bswap_64(event->mmap.len);
 593        event->mmap.pgoff = bswap_64(event->mmap.pgoff);
 594
 595        if (sample_id_all) {
 596                void *data = &event->mmap.filename;
 597
 598                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 599                swap_sample_id_all(event, data);
 600        }
 601}
 602
 603static void perf_event__mmap2_swap(union perf_event *event,
 604                                  bool sample_id_all)
 605{
 606        event->mmap2.pid   = bswap_32(event->mmap2.pid);
 607        event->mmap2.tid   = bswap_32(event->mmap2.tid);
 608        event->mmap2.start = bswap_64(event->mmap2.start);
 609        event->mmap2.len   = bswap_64(event->mmap2.len);
 610        event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
 611
 612        if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
 613                event->mmap2.maj   = bswap_32(event->mmap2.maj);
 614                event->mmap2.min   = bswap_32(event->mmap2.min);
 615                event->mmap2.ino   = bswap_64(event->mmap2.ino);
 616                event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
 617        }
 618
 619        if (sample_id_all) {
 620                void *data = &event->mmap2.filename;
 621
 622                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 623                swap_sample_id_all(event, data);
 624        }
 625}
 626static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 627{
 628        event->fork.pid  = bswap_32(event->fork.pid);
 629        event->fork.tid  = bswap_32(event->fork.tid);
 630        event->fork.ppid = bswap_32(event->fork.ppid);
 631        event->fork.ptid = bswap_32(event->fork.ptid);
 632        event->fork.time = bswap_64(event->fork.time);
 633
 634        if (sample_id_all)
 635                swap_sample_id_all(event, &event->fork + 1);
 636}
 637
 638static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 639{
 640        event->read.pid          = bswap_32(event->read.pid);
 641        event->read.tid          = bswap_32(event->read.tid);
 642        event->read.value        = bswap_64(event->read.value);
 643        event->read.time_enabled = bswap_64(event->read.time_enabled);
 644        event->read.time_running = bswap_64(event->read.time_running);
 645        event->read.id           = bswap_64(event->read.id);
 646
 647        if (sample_id_all)
 648                swap_sample_id_all(event, &event->read + 1);
 649}
 650
 651static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
 652{
 653        event->aux.aux_offset = bswap_64(event->aux.aux_offset);
 654        event->aux.aux_size   = bswap_64(event->aux.aux_size);
 655        event->aux.flags      = bswap_64(event->aux.flags);
 656
 657        if (sample_id_all)
 658                swap_sample_id_all(event, &event->aux + 1);
 659}
 660
 661static void perf_event__itrace_start_swap(union perf_event *event,
 662                                          bool sample_id_all)
 663{
 664        event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
 665        event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
 666
 667        if (sample_id_all)
 668                swap_sample_id_all(event, &event->itrace_start + 1);
 669}
 670
 671static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
 672{
 673        if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
 674                event->context_switch.next_prev_pid =
 675                                bswap_32(event->context_switch.next_prev_pid);
 676                event->context_switch.next_prev_tid =
 677                                bswap_32(event->context_switch.next_prev_tid);
 678        }
 679
 680        if (sample_id_all)
 681                swap_sample_id_all(event, &event->context_switch + 1);
 682}
 683
 684static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
 685{
 686        event->text_poke.addr    = bswap_64(event->text_poke.addr);
 687        event->text_poke.old_len = bswap_16(event->text_poke.old_len);
 688        event->text_poke.new_len = bswap_16(event->text_poke.new_len);
 689
 690        if (sample_id_all) {
 691                size_t len = sizeof(event->text_poke.old_len) +
 692                             sizeof(event->text_poke.new_len) +
 693                             event->text_poke.old_len +
 694                             event->text_poke.new_len;
 695                void *data = &event->text_poke.old_len;
 696
 697                data += PERF_ALIGN(len, sizeof(u64));
 698                swap_sample_id_all(event, data);
 699        }
 700}
 701
 702static void perf_event__throttle_swap(union perf_event *event,
 703                                      bool sample_id_all)
 704{
 705        event->throttle.time      = bswap_64(event->throttle.time);
 706        event->throttle.id        = bswap_64(event->throttle.id);
 707        event->throttle.stream_id = bswap_64(event->throttle.stream_id);
 708
 709        if (sample_id_all)
 710                swap_sample_id_all(event, &event->throttle + 1);
 711}
 712
 713static void perf_event__namespaces_swap(union perf_event *event,
 714                                        bool sample_id_all)
 715{
 716        u64 i;
 717
 718        event->namespaces.pid           = bswap_32(event->namespaces.pid);
 719        event->namespaces.tid           = bswap_32(event->namespaces.tid);
 720        event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
 721
 722        for (i = 0; i < event->namespaces.nr_namespaces; i++) {
 723                struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
 724
 725                ns->dev = bswap_64(ns->dev);
 726                ns->ino = bswap_64(ns->ino);
 727        }
 728
 729        if (sample_id_all)
 730                swap_sample_id_all(event, &event->namespaces.link_info[i]);
 731}
 732
 733static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
 734{
 735        event->cgroup.id = bswap_64(event->cgroup.id);
 736
 737        if (sample_id_all) {
 738                void *data = &event->cgroup.path;
 739
 740                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 741                swap_sample_id_all(event, data);
 742        }
 743}
 744
 745static u8 revbyte(u8 b)
 746{
 747        int rev = (b >> 4) | ((b & 0xf) << 4);
 748        rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
 749        rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
 750        return (u8) rev;
 751}
 752
 753/*
 754 * XXX this is hack in attempt to carry flags bitfield
 755 * through endian village. ABI says:
 756 *
 757 * Bit-fields are allocated from right to left (least to most significant)
 758 * on little-endian implementations and from left to right (most to least
 759 * significant) on big-endian implementations.
 760 *
 761 * The above seems to be byte specific, so we need to reverse each
 762 * byte of the bitfield. 'Internet' also says this might be implementation
 763 * specific and we probably need proper fix and carry perf_event_attr
 764 * bitfield flags in separate data file FEAT_ section. Thought this seems
 765 * to work for now.
 766 */
 767static void swap_bitfield(u8 *p, unsigned len)
 768{
 769        unsigned i;
 770
 771        for (i = 0; i < len; i++) {
 772                *p = revbyte(*p);
 773                p++;
 774        }
 775}
 776
 777/* exported for swapping attributes in file header */
 778void perf_event__attr_swap(struct perf_event_attr *attr)
 779{
 780        attr->type              = bswap_32(attr->type);
 781        attr->size              = bswap_32(attr->size);
 782
 783#define bswap_safe(f, n)                                        \
 784        (attr->size > (offsetof(struct perf_event_attr, f) +    \
 785                       sizeof(attr->f) * (n)))
 786#define bswap_field(f, sz)                      \
 787do {                                            \
 788        if (bswap_safe(f, 0))                   \
 789                attr->f = bswap_##sz(attr->f);  \
 790} while(0)
 791#define bswap_field_16(f) bswap_field(f, 16)
 792#define bswap_field_32(f) bswap_field(f, 32)
 793#define bswap_field_64(f) bswap_field(f, 64)
 794
 795        bswap_field_64(config);
 796        bswap_field_64(sample_period);
 797        bswap_field_64(sample_type);
 798        bswap_field_64(read_format);
 799        bswap_field_32(wakeup_events);
 800        bswap_field_32(bp_type);
 801        bswap_field_64(bp_addr);
 802        bswap_field_64(bp_len);
 803        bswap_field_64(branch_sample_type);
 804        bswap_field_64(sample_regs_user);
 805        bswap_field_32(sample_stack_user);
 806        bswap_field_32(aux_watermark);
 807        bswap_field_16(sample_max_stack);
 808        bswap_field_32(aux_sample_size);
 809
 810        /*
 811         * After read_format are bitfields. Check read_format because
 812         * we are unable to use offsetof on bitfield.
 813         */
 814        if (bswap_safe(read_format, 1))
 815                swap_bitfield((u8 *) (&attr->read_format + 1),
 816                              sizeof(u64));
 817#undef bswap_field_64
 818#undef bswap_field_32
 819#undef bswap_field
 820#undef bswap_safe
 821}
 822
 823static void perf_event__hdr_attr_swap(union perf_event *event,
 824                                      bool sample_id_all __maybe_unused)
 825{
 826        size_t size;
 827
 828        perf_event__attr_swap(&event->attr.attr);
 829
 830        size = event->header.size;
 831        size -= (void *)&event->attr.id - (void *)event;
 832        mem_bswap_64(event->attr.id, size);
 833}
 834
 835static void perf_event__event_update_swap(union perf_event *event,
 836                                          bool sample_id_all __maybe_unused)
 837{
 838        event->event_update.type = bswap_64(event->event_update.type);
 839        event->event_update.id   = bswap_64(event->event_update.id);
 840}
 841
 842static void perf_event__event_type_swap(union perf_event *event,
 843                                        bool sample_id_all __maybe_unused)
 844{
 845        event->event_type.event_type.event_id =
 846                bswap_64(event->event_type.event_type.event_id);
 847}
 848
 849static void perf_event__tracing_data_swap(union perf_event *event,
 850                                          bool sample_id_all __maybe_unused)
 851{
 852        event->tracing_data.size = bswap_32(event->tracing_data.size);
 853}
 854
 855static void perf_event__auxtrace_info_swap(union perf_event *event,
 856                                           bool sample_id_all __maybe_unused)
 857{
 858        size_t size;
 859
 860        event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
 861
 862        size = event->header.size;
 863        size -= (void *)&event->auxtrace_info.priv - (void *)event;
 864        mem_bswap_64(event->auxtrace_info.priv, size);
 865}
 866
 867static void perf_event__auxtrace_swap(union perf_event *event,
 868                                      bool sample_id_all __maybe_unused)
 869{
 870        event->auxtrace.size      = bswap_64(event->auxtrace.size);
 871        event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
 872        event->auxtrace.reference = bswap_64(event->auxtrace.reference);
 873        event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
 874        event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
 875        event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
 876}
 877
 878static void perf_event__auxtrace_error_swap(union perf_event *event,
 879                                            bool sample_id_all __maybe_unused)
 880{
 881        event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
 882        event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
 883        event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
 884        event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
 885        event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
 886        event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
 887        event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
 888        if (event->auxtrace_error.fmt)
 889                event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
 890}
 891
 892static void perf_event__thread_map_swap(union perf_event *event,
 893                                        bool sample_id_all __maybe_unused)
 894{
 895        unsigned i;
 896
 897        event->thread_map.nr = bswap_64(event->thread_map.nr);
 898
 899        for (i = 0; i < event->thread_map.nr; i++)
 900                event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
 901}
 902
 903static void perf_event__cpu_map_swap(union perf_event *event,
 904                                     bool sample_id_all __maybe_unused)
 905{
 906        struct perf_record_cpu_map_data *data = &event->cpu_map.data;
 907        struct cpu_map_entries *cpus;
 908        struct perf_record_record_cpu_map *mask;
 909        unsigned i;
 910
 911        data->type = bswap_16(data->type);
 912
 913        switch (data->type) {
 914        case PERF_CPU_MAP__CPUS:
 915                cpus = (struct cpu_map_entries *)data->data;
 916
 917                cpus->nr = bswap_16(cpus->nr);
 918
 919                for (i = 0; i < cpus->nr; i++)
 920                        cpus->cpu[i] = bswap_16(cpus->cpu[i]);
 921                break;
 922        case PERF_CPU_MAP__MASK:
 923                mask = (struct perf_record_record_cpu_map *)data->data;
 924
 925                mask->nr = bswap_16(mask->nr);
 926                mask->long_size = bswap_16(mask->long_size);
 927
 928                switch (mask->long_size) {
 929                case 4: mem_bswap_32(&mask->mask, mask->nr); break;
 930                case 8: mem_bswap_64(&mask->mask, mask->nr); break;
 931                default:
 932                        pr_err("cpu_map swap: unsupported long size\n");
 933                }
 934        default:
 935                break;
 936        }
 937}
 938
 939static void perf_event__stat_config_swap(union perf_event *event,
 940                                         bool sample_id_all __maybe_unused)
 941{
 942        u64 size;
 943
 944        size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
 945        size += 1; /* nr item itself */
 946        mem_bswap_64(&event->stat_config.nr, size);
 947}
 948
 949static void perf_event__stat_swap(union perf_event *event,
 950                                  bool sample_id_all __maybe_unused)
 951{
 952        event->stat.id     = bswap_64(event->stat.id);
 953        event->stat.thread = bswap_32(event->stat.thread);
 954        event->stat.cpu    = bswap_32(event->stat.cpu);
 955        event->stat.val    = bswap_64(event->stat.val);
 956        event->stat.ena    = bswap_64(event->stat.ena);
 957        event->stat.run    = bswap_64(event->stat.run);
 958}
 959
 960static void perf_event__stat_round_swap(union perf_event *event,
 961                                        bool sample_id_all __maybe_unused)
 962{
 963        event->stat_round.type = bswap_64(event->stat_round.type);
 964        event->stat_round.time = bswap_64(event->stat_round.time);
 965}
 966
 967static void perf_event__time_conv_swap(union perf_event *event,
 968                                       bool sample_id_all __maybe_unused)
 969{
 970        event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
 971        event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
 972        event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
 973
 974        if (event_contains(event->time_conv, time_cycles)) {
 975                event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
 976                event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
 977        }
 978}
 979
 980typedef void (*perf_event__swap_op)(union perf_event *event,
 981                                    bool sample_id_all);
 982
 983static perf_event__swap_op perf_event__swap_ops[] = {
 984        [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
 985        [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
 986        [PERF_RECORD_COMM]                = perf_event__comm_swap,
 987        [PERF_RECORD_FORK]                = perf_event__task_swap,
 988        [PERF_RECORD_EXIT]                = perf_event__task_swap,
 989        [PERF_RECORD_LOST]                = perf_event__all64_swap,
 990        [PERF_RECORD_READ]                = perf_event__read_swap,
 991        [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
 992        [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
 993        [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
 994        [PERF_RECORD_AUX]                 = perf_event__aux_swap,
 995        [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
 996        [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
 997        [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
 998        [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
 999        [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
1000        [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
1001        [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
1002        [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
1003        [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
1004        [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1005        [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
1006        [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
1007        [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
1008        [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
1009        [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
1010        [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
1011        [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
1012        [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
1013        [PERF_RECORD_STAT]                = perf_event__stat_swap,
1014        [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
1015        [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
1016        [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
1017        [PERF_RECORD_HEADER_MAX]          = NULL,
1018};
1019
1020/*
1021 * When perf record finishes a pass on every buffers, it records this pseudo
1022 * event.
1023 * We record the max timestamp t found in the pass n.
1024 * Assuming these timestamps are monotonic across cpus, we know that if
1025 * a buffer still has events with timestamps below t, they will be all
1026 * available and then read in the pass n + 1.
1027 * Hence when we start to read the pass n + 2, we can safely flush every
1028 * events with timestamps below t.
1029 *
1030 *    ============ PASS n =================
1031 *       CPU 0         |   CPU 1
1032 *                     |
1033 *    cnt1 timestamps  |   cnt2 timestamps
1034 *          1          |         2
1035 *          2          |         3
1036 *          -          |         4  <--- max recorded
1037 *
1038 *    ============ PASS n + 1 ==============
1039 *       CPU 0         |   CPU 1
1040 *                     |
1041 *    cnt1 timestamps  |   cnt2 timestamps
1042 *          3          |         5
1043 *          4          |         6
1044 *          5          |         7 <---- max recorded
1045 *
1046 *      Flush every events below timestamp 4
1047 *
1048 *    ============ PASS n + 2 ==============
1049 *       CPU 0         |   CPU 1
1050 *                     |
1051 *    cnt1 timestamps  |   cnt2 timestamps
1052 *          6          |         8
1053 *          7          |         9
1054 *          -          |         10
1055 *
1056 *      Flush every events below timestamp 7
1057 *      etc...
1058 */
1059static int process_finished_round(struct perf_tool *tool __maybe_unused,
1060                                  union perf_event *event __maybe_unused,
1061                                  struct ordered_events *oe)
1062{
1063        if (dump_trace)
1064                fprintf(stdout, "\n");
1065        return ordered_events__flush(oe, OE_FLUSH__ROUND);
1066}
1067
1068int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1069                              u64 timestamp, u64 file_offset)
1070{
1071        return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1072}
1073
1074static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1075{
1076        struct ip_callchain *callchain = sample->callchain;
1077        struct branch_stack *lbr_stack = sample->branch_stack;
1078        struct branch_entry *entries = perf_sample__branch_entries(sample);
1079        u64 kernel_callchain_nr = callchain->nr;
1080        unsigned int i;
1081
1082        for (i = 0; i < kernel_callchain_nr; i++) {
1083                if (callchain->ips[i] == PERF_CONTEXT_USER)
1084                        break;
1085        }
1086
1087        if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1088                u64 total_nr;
1089                /*
1090                 * LBR callstack can only get user call chain,
1091                 * i is kernel call chain number,
1092                 * 1 is PERF_CONTEXT_USER.
1093                 *
1094                 * The user call chain is stored in LBR registers.
1095                 * LBR are pair registers. The caller is stored
1096                 * in "from" register, while the callee is stored
1097                 * in "to" register.
1098                 * For example, there is a call stack
1099                 * "A"->"B"->"C"->"D".
1100                 * The LBR registers will be recorded like
1101                 * "C"->"D", "B"->"C", "A"->"B".
1102                 * So only the first "to" register and all "from"
1103                 * registers are needed to construct the whole stack.
1104                 */
1105                total_nr = i + 1 + lbr_stack->nr + 1;
1106                kernel_callchain_nr = i + 1;
1107
1108                printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1109
1110                for (i = 0; i < kernel_callchain_nr; i++)
1111                        printf("..... %2d: %016" PRIx64 "\n",
1112                               i, callchain->ips[i]);
1113
1114                printf("..... %2d: %016" PRIx64 "\n",
1115                       (int)(kernel_callchain_nr), entries[0].to);
1116                for (i = 0; i < lbr_stack->nr; i++)
1117                        printf("..... %2d: %016" PRIx64 "\n",
1118                               (int)(i + kernel_callchain_nr + 1), entries[i].from);
1119        }
1120}
1121
1122static void callchain__printf(struct evsel *evsel,
1123                              struct perf_sample *sample)
1124{
1125        unsigned int i;
1126        struct ip_callchain *callchain = sample->callchain;
1127
1128        if (evsel__has_branch_callstack(evsel))
1129                callchain__lbr_callstack_printf(sample);
1130
1131        printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1132
1133        for (i = 0; i < callchain->nr; i++)
1134                printf("..... %2d: %016" PRIx64 "\n",
1135                       i, callchain->ips[i]);
1136}
1137
1138static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1139{
1140        struct branch_entry *entries = perf_sample__branch_entries(sample);
1141        uint64_t i;
1142
1143        printf("%s: nr:%" PRIu64 "\n",
1144                !callstack ? "... branch stack" : "... branch callstack",
1145                sample->branch_stack->nr);
1146
1147        for (i = 0; i < sample->branch_stack->nr; i++) {
1148                struct branch_entry *e = &entries[i];
1149
1150                if (!callstack) {
1151                        printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1152                                i, e->from, e->to,
1153                                (unsigned short)e->flags.cycles,
1154                                e->flags.mispred ? "M" : " ",
1155                                e->flags.predicted ? "P" : " ",
1156                                e->flags.abort ? "A" : " ",
1157                                e->flags.in_tx ? "T" : " ",
1158                                (unsigned)e->flags.reserved);
1159                } else {
1160                        printf("..... %2"PRIu64": %016" PRIx64 "\n",
1161                                i, i > 0 ? e->from : e->to);
1162                }
1163        }
1164}
1165
1166static void regs_dump__printf(u64 mask, u64 *regs)
1167{
1168        unsigned rid, i = 0;
1169
1170        for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1171                u64 val = regs[i++];
1172
1173                printf(".... %-5s 0x%016" PRIx64 "\n",
1174                       perf_reg_name(rid), val);
1175        }
1176}
1177
1178static const char *regs_abi[] = {
1179        [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1180        [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1181        [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1182};
1183
1184static inline const char *regs_dump_abi(struct regs_dump *d)
1185{
1186        if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1187                return "unknown";
1188
1189        return regs_abi[d->abi];
1190}
1191
1192static void regs__printf(const char *type, struct regs_dump *regs)
1193{
1194        u64 mask = regs->mask;
1195
1196        printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1197               type,
1198               mask,
1199               regs_dump_abi(regs));
1200
1201        regs_dump__printf(mask, regs->regs);
1202}
1203
1204static void regs_user__printf(struct perf_sample *sample)
1205{
1206        struct regs_dump *user_regs = &sample->user_regs;
1207
1208        if (user_regs->regs)
1209                regs__printf("user", user_regs);
1210}
1211
1212static void regs_intr__printf(struct perf_sample *sample)
1213{
1214        struct regs_dump *intr_regs = &sample->intr_regs;
1215
1216        if (intr_regs->regs)
1217                regs__printf("intr", intr_regs);
1218}
1219
1220static void stack_user__printf(struct stack_dump *dump)
1221{
1222        printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1223               dump->size, dump->offset);
1224}
1225
1226static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1227{
1228        u64 sample_type = __evlist__combined_sample_type(evlist);
1229
1230        if (event->header.type != PERF_RECORD_SAMPLE &&
1231            !evlist__sample_id_all(evlist)) {
1232                fputs("-1 -1 ", stdout);
1233                return;
1234        }
1235
1236        if ((sample_type & PERF_SAMPLE_CPU))
1237                printf("%u ", sample->cpu);
1238
1239        if (sample_type & PERF_SAMPLE_TIME)
1240                printf("%" PRIu64 " ", sample->time);
1241}
1242
1243static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1244{
1245        printf("... sample_read:\n");
1246
1247        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1248                printf("...... time enabled %016" PRIx64 "\n",
1249                       sample->read.time_enabled);
1250
1251        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1252                printf("...... time running %016" PRIx64 "\n",
1253                       sample->read.time_running);
1254
1255        if (read_format & PERF_FORMAT_GROUP) {
1256                u64 i;
1257
1258                printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1259
1260                for (i = 0; i < sample->read.group.nr; i++) {
1261                        struct sample_read_value *value;
1262
1263                        value = &sample->read.group.values[i];
1264                        printf("..... id %016" PRIx64
1265                               ", value %016" PRIx64 "\n",
1266                               value->id, value->value);
1267                }
1268        } else
1269                printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1270                        sample->read.one.id, sample->read.one.value);
1271}
1272
1273static void dump_event(struct evlist *evlist, union perf_event *event,
1274                       u64 file_offset, struct perf_sample *sample)
1275{
1276        if (!dump_trace)
1277                return;
1278
1279        printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1280               file_offset, event->header.size, event->header.type);
1281
1282        trace_event(event);
1283        if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1284                evlist->trace_event_sample_raw(evlist, event, sample);
1285
1286        if (sample)
1287                evlist__print_tstamp(evlist, event, sample);
1288
1289        printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1290               event->header.size, perf_event__name(event->header.type));
1291}
1292
1293char *get_page_size_name(u64 size, char *str)
1294{
1295        if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1296                snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1297
1298        return str;
1299}
1300
1301static void dump_sample(struct evsel *evsel, union perf_event *event,
1302                        struct perf_sample *sample)
1303{
1304        u64 sample_type;
1305        char str[PAGE_SIZE_NAME_LEN];
1306
1307        if (!dump_trace)
1308                return;
1309
1310        printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1311               event->header.misc, sample->pid, sample->tid, sample->ip,
1312               sample->period, sample->addr);
1313
1314        sample_type = evsel->core.attr.sample_type;
1315
1316        if (evsel__has_callchain(evsel))
1317                callchain__printf(evsel, sample);
1318
1319        if (evsel__has_br_stack(evsel))
1320                branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1321
1322        if (sample_type & PERF_SAMPLE_REGS_USER)
1323                regs_user__printf(sample);
1324
1325        if (sample_type & PERF_SAMPLE_REGS_INTR)
1326                regs_intr__printf(sample);
1327
1328        if (sample_type & PERF_SAMPLE_STACK_USER)
1329                stack_user__printf(&sample->user_stack);
1330
1331        if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1332                printf("... weight: %" PRIu64 "", sample->weight);
1333                        if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1334                                printf(",0x%"PRIx16"", sample->ins_lat);
1335                                printf(",0x%"PRIx16"", sample->p_stage_cyc);
1336                        }
1337                printf("\n");
1338        }
1339
1340        if (sample_type & PERF_SAMPLE_DATA_SRC)
1341                printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1342
1343        if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1344                printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1345
1346        if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1347                printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1348
1349        if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1350                printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1351
1352        if (sample_type & PERF_SAMPLE_TRANSACTION)
1353                printf("... transaction: %" PRIx64 "\n", sample->transaction);
1354
1355        if (sample_type & PERF_SAMPLE_READ)
1356                sample_read__printf(sample, evsel->core.attr.read_format);
1357}
1358
1359static void dump_read(struct evsel *evsel, union perf_event *event)
1360{
1361        struct perf_record_read *read_event = &event->read;
1362        u64 read_format;
1363
1364        if (!dump_trace)
1365                return;
1366
1367        printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1368               evsel__name(evsel), event->read.value);
1369
1370        if (!evsel)
1371                return;
1372
1373        read_format = evsel->core.attr.read_format;
1374
1375        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1376                printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1377
1378        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1379                printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1380
1381        if (read_format & PERF_FORMAT_ID)
1382                printf("... id           : %" PRI_lu64 "\n", read_event->id);
1383}
1384
1385static struct machine *machines__find_for_cpumode(struct machines *machines,
1386                                               union perf_event *event,
1387                                               struct perf_sample *sample)
1388{
1389        if (perf_guest &&
1390            ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1391             (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1392                u32 pid;
1393
1394                if (event->header.type == PERF_RECORD_MMAP
1395                    || event->header.type == PERF_RECORD_MMAP2)
1396                        pid = event->mmap.pid;
1397                else
1398                        pid = sample->pid;
1399
1400                return machines__find_guest(machines, pid);
1401        }
1402
1403        return &machines->host;
1404}
1405
1406static int deliver_sample_value(struct evlist *evlist,
1407                                struct perf_tool *tool,
1408                                union perf_event *event,
1409                                struct perf_sample *sample,
1410                                struct sample_read_value *v,
1411                                struct machine *machine)
1412{
1413        struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1414        struct evsel *evsel;
1415
1416        if (sid) {
1417                sample->id     = v->id;
1418                sample->period = v->value - sid->period;
1419                sid->period    = v->value;
1420        }
1421
1422        if (!sid || sid->evsel == NULL) {
1423                ++evlist->stats.nr_unknown_id;
1424                return 0;
1425        }
1426
1427        /*
1428         * There's no reason to deliver sample
1429         * for zero period, bail out.
1430         */
1431        if (!sample->period)
1432                return 0;
1433
1434        evsel = container_of(sid->evsel, struct evsel, core);
1435        return tool->sample(tool, event, sample, evsel, machine);
1436}
1437
1438static int deliver_sample_group(struct evlist *evlist,
1439                                struct perf_tool *tool,
1440                                union  perf_event *event,
1441                                struct perf_sample *sample,
1442                                struct machine *machine)
1443{
1444        int ret = -EINVAL;
1445        u64 i;
1446
1447        for (i = 0; i < sample->read.group.nr; i++) {
1448                ret = deliver_sample_value(evlist, tool, event, sample,
1449                                           &sample->read.group.values[i],
1450                                           machine);
1451                if (ret)
1452                        break;
1453        }
1454
1455        return ret;
1456}
1457
1458static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1459                                  union  perf_event *event, struct perf_sample *sample,
1460                                  struct evsel *evsel, struct machine *machine)
1461{
1462        /* We know evsel != NULL. */
1463        u64 sample_type = evsel->core.attr.sample_type;
1464        u64 read_format = evsel->core.attr.read_format;
1465
1466        /* Standard sample delivery. */
1467        if (!(sample_type & PERF_SAMPLE_READ))
1468                return tool->sample(tool, event, sample, evsel, machine);
1469
1470        /* For PERF_SAMPLE_READ we have either single or group mode. */
1471        if (read_format & PERF_FORMAT_GROUP)
1472                return deliver_sample_group(evlist, tool, event, sample,
1473                                            machine);
1474        else
1475                return deliver_sample_value(evlist, tool, event, sample,
1476                                            &sample->read.one, machine);
1477}
1478
1479static int machines__deliver_event(struct machines *machines,
1480                                   struct evlist *evlist,
1481                                   union perf_event *event,
1482                                   struct perf_sample *sample,
1483                                   struct perf_tool *tool, u64 file_offset)
1484{
1485        struct evsel *evsel;
1486        struct machine *machine;
1487
1488        dump_event(evlist, event, file_offset, sample);
1489
1490        evsel = evlist__id2evsel(evlist, sample->id);
1491
1492        machine = machines__find_for_cpumode(machines, event, sample);
1493
1494        switch (event->header.type) {
1495        case PERF_RECORD_SAMPLE:
1496                if (evsel == NULL) {
1497                        ++evlist->stats.nr_unknown_id;
1498                        return 0;
1499                }
1500                dump_sample(evsel, event, sample);
1501                if (machine == NULL) {
1502                        ++evlist->stats.nr_unprocessable_samples;
1503                        return 0;
1504                }
1505                return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1506        case PERF_RECORD_MMAP:
1507                return tool->mmap(tool, event, sample, machine);
1508        case PERF_RECORD_MMAP2:
1509                if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1510                        ++evlist->stats.nr_proc_map_timeout;
1511                return tool->mmap2(tool, event, sample, machine);
1512        case PERF_RECORD_COMM:
1513                return tool->comm(tool, event, sample, machine);
1514        case PERF_RECORD_NAMESPACES:
1515                return tool->namespaces(tool, event, sample, machine);
1516        case PERF_RECORD_CGROUP:
1517                return tool->cgroup(tool, event, sample, machine);
1518        case PERF_RECORD_FORK:
1519                return tool->fork(tool, event, sample, machine);
1520        case PERF_RECORD_EXIT:
1521                return tool->exit(tool, event, sample, machine);
1522        case PERF_RECORD_LOST:
1523                if (tool->lost == perf_event__process_lost)
1524                        evlist->stats.total_lost += event->lost.lost;
1525                return tool->lost(tool, event, sample, machine);
1526        case PERF_RECORD_LOST_SAMPLES:
1527                if (tool->lost_samples == perf_event__process_lost_samples)
1528                        evlist->stats.total_lost_samples += event->lost_samples.lost;
1529                return tool->lost_samples(tool, event, sample, machine);
1530        case PERF_RECORD_READ:
1531                dump_read(evsel, event);
1532                return tool->read(tool, event, sample, evsel, machine);
1533        case PERF_RECORD_THROTTLE:
1534                return tool->throttle(tool, event, sample, machine);
1535        case PERF_RECORD_UNTHROTTLE:
1536                return tool->unthrottle(tool, event, sample, machine);
1537        case PERF_RECORD_AUX:
1538                if (tool->aux == perf_event__process_aux) {
1539                        if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1540                                evlist->stats.total_aux_lost += 1;
1541                        if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1542                                evlist->stats.total_aux_partial += 1;
1543                }
1544                return tool->aux(tool, event, sample, machine);
1545        case PERF_RECORD_ITRACE_START:
1546                return tool->itrace_start(tool, event, sample, machine);
1547        case PERF_RECORD_SWITCH:
1548        case PERF_RECORD_SWITCH_CPU_WIDE:
1549                return tool->context_switch(tool, event, sample, machine);
1550        case PERF_RECORD_KSYMBOL:
1551                return tool->ksymbol(tool, event, sample, machine);
1552        case PERF_RECORD_BPF_EVENT:
1553                return tool->bpf(tool, event, sample, machine);
1554        case PERF_RECORD_TEXT_POKE:
1555                return tool->text_poke(tool, event, sample, machine);
1556        default:
1557                ++evlist->stats.nr_unknown_events;
1558                return -1;
1559        }
1560}
1561
1562static int perf_session__deliver_event(struct perf_session *session,
1563                                       union perf_event *event,
1564                                       struct perf_tool *tool,
1565                                       u64 file_offset)
1566{
1567        struct perf_sample sample;
1568        int ret = evlist__parse_sample(session->evlist, event, &sample);
1569
1570        if (ret) {
1571                pr_err("Can't parse sample, err = %d\n", ret);
1572                return ret;
1573        }
1574
1575        ret = auxtrace__process_event(session, event, &sample, tool);
1576        if (ret < 0)
1577                return ret;
1578        if (ret > 0)
1579                return 0;
1580
1581        ret = machines__deliver_event(&session->machines, session->evlist,
1582                                      event, &sample, tool, file_offset);
1583
1584        if (dump_trace && sample.aux_sample.size)
1585                auxtrace__dump_auxtrace_sample(session, &sample);
1586
1587        return ret;
1588}
1589
1590static s64 perf_session__process_user_event(struct perf_session *session,
1591                                            union perf_event *event,
1592                                            u64 file_offset)
1593{
1594        struct ordered_events *oe = &session->ordered_events;
1595        struct perf_tool *tool = session->tool;
1596        struct perf_sample sample = { .time = 0, };
1597        int fd = perf_data__fd(session->data);
1598        int err;
1599
1600        if (event->header.type != PERF_RECORD_COMPRESSED ||
1601            tool->compressed == perf_session__process_compressed_event_stub)
1602                dump_event(session->evlist, event, file_offset, &sample);
1603
1604        /* These events are processed right away */
1605        switch (event->header.type) {
1606        case PERF_RECORD_HEADER_ATTR:
1607                err = tool->attr(tool, event, &session->evlist);
1608                if (err == 0) {
1609                        perf_session__set_id_hdr_size(session);
1610                        perf_session__set_comm_exec(session);
1611                }
1612                return err;
1613        case PERF_RECORD_EVENT_UPDATE:
1614                return tool->event_update(tool, event, &session->evlist);
1615        case PERF_RECORD_HEADER_EVENT_TYPE:
1616                /*
1617                 * Deprecated, but we need to handle it for sake
1618                 * of old data files create in pipe mode.
1619                 */
1620                return 0;
1621        case PERF_RECORD_HEADER_TRACING_DATA:
1622                /*
1623                 * Setup for reading amidst mmap, but only when we
1624                 * are in 'file' mode. The 'pipe' fd is in proper
1625                 * place already.
1626                 */
1627                if (!perf_data__is_pipe(session->data))
1628                        lseek(fd, file_offset, SEEK_SET);
1629                return tool->tracing_data(session, event);
1630        case PERF_RECORD_HEADER_BUILD_ID:
1631                return tool->build_id(session, event);
1632        case PERF_RECORD_FINISHED_ROUND:
1633                return tool->finished_round(tool, event, oe);
1634        case PERF_RECORD_ID_INDEX:
1635                return tool->id_index(session, event);
1636        case PERF_RECORD_AUXTRACE_INFO:
1637                return tool->auxtrace_info(session, event);
1638        case PERF_RECORD_AUXTRACE:
1639                /* setup for reading amidst mmap */
1640                lseek(fd, file_offset + event->header.size, SEEK_SET);
1641                return tool->auxtrace(session, event);
1642        case PERF_RECORD_AUXTRACE_ERROR:
1643                perf_session__auxtrace_error_inc(session, event);
1644                return tool->auxtrace_error(session, event);
1645        case PERF_RECORD_THREAD_MAP:
1646                return tool->thread_map(session, event);
1647        case PERF_RECORD_CPU_MAP:
1648                return tool->cpu_map(session, event);
1649        case PERF_RECORD_STAT_CONFIG:
1650                return tool->stat_config(session, event);
1651        case PERF_RECORD_STAT:
1652                return tool->stat(session, event);
1653        case PERF_RECORD_STAT_ROUND:
1654                return tool->stat_round(session, event);
1655        case PERF_RECORD_TIME_CONV:
1656                session->time_conv = event->time_conv;
1657                return tool->time_conv(session, event);
1658        case PERF_RECORD_HEADER_FEATURE:
1659                return tool->feature(session, event);
1660        case PERF_RECORD_COMPRESSED:
1661                err = tool->compressed(session, event, file_offset);
1662                if (err)
1663                        dump_event(session->evlist, event, file_offset, &sample);
1664                return err;
1665        default:
1666                return -EINVAL;
1667        }
1668}
1669
1670int perf_session__deliver_synth_event(struct perf_session *session,
1671                                      union perf_event *event,
1672                                      struct perf_sample *sample)
1673{
1674        struct evlist *evlist = session->evlist;
1675        struct perf_tool *tool = session->tool;
1676
1677        events_stats__inc(&evlist->stats, event->header.type);
1678
1679        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1680                return perf_session__process_user_event(session, event, 0);
1681
1682        return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1683}
1684
1685static void event_swap(union perf_event *event, bool sample_id_all)
1686{
1687        perf_event__swap_op swap;
1688
1689        swap = perf_event__swap_ops[event->header.type];
1690        if (swap)
1691                swap(event, sample_id_all);
1692}
1693
1694int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1695                             void *buf, size_t buf_sz,
1696                             union perf_event **event_ptr,
1697                             struct perf_sample *sample)
1698{
1699        union perf_event *event;
1700        size_t hdr_sz, rest;
1701        int fd;
1702
1703        if (session->one_mmap && !session->header.needs_swap) {
1704                event = file_offset - session->one_mmap_offset +
1705                        session->one_mmap_addr;
1706                goto out_parse_sample;
1707        }
1708
1709        if (perf_data__is_pipe(session->data))
1710                return -1;
1711
1712        fd = perf_data__fd(session->data);
1713        hdr_sz = sizeof(struct perf_event_header);
1714
1715        if (buf_sz < hdr_sz)
1716                return -1;
1717
1718        if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1719            readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1720                return -1;
1721
1722        event = (union perf_event *)buf;
1723
1724        if (session->header.needs_swap)
1725                perf_event_header__bswap(&event->header);
1726
1727        if (event->header.size < hdr_sz || event->header.size > buf_sz)
1728                return -1;
1729
1730        buf += hdr_sz;
1731        rest = event->header.size - hdr_sz;
1732
1733        if (readn(fd, buf, rest) != (ssize_t)rest)
1734                return -1;
1735
1736        if (session->header.needs_swap)
1737                event_swap(event, evlist__sample_id_all(session->evlist));
1738
1739out_parse_sample:
1740
1741        if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1742            evlist__parse_sample(session->evlist, event, sample))
1743                return -1;
1744
1745        *event_ptr = event;
1746
1747        return 0;
1748}
1749
1750int perf_session__peek_events(struct perf_session *session, u64 offset,
1751                              u64 size, peek_events_cb_t cb, void *data)
1752{
1753        u64 max_offset = offset + size;
1754        char buf[PERF_SAMPLE_MAX_SIZE];
1755        union perf_event *event;
1756        int err;
1757
1758        do {
1759                err = perf_session__peek_event(session, offset, buf,
1760                                               PERF_SAMPLE_MAX_SIZE, &event,
1761                                               NULL);
1762                if (err)
1763                        return err;
1764
1765                err = cb(session, event, offset, data);
1766                if (err)
1767                        return err;
1768
1769                offset += event->header.size;
1770                if (event->header.type == PERF_RECORD_AUXTRACE)
1771                        offset += event->auxtrace.size;
1772
1773        } while (offset < max_offset);
1774
1775        return err;
1776}
1777
1778static s64 perf_session__process_event(struct perf_session *session,
1779                                       union perf_event *event, u64 file_offset)
1780{
1781        struct evlist *evlist = session->evlist;
1782        struct perf_tool *tool = session->tool;
1783        int ret;
1784
1785        if (session->header.needs_swap)
1786                event_swap(event, evlist__sample_id_all(evlist));
1787
1788        if (event->header.type >= PERF_RECORD_HEADER_MAX)
1789                return -EINVAL;
1790
1791        events_stats__inc(&evlist->stats, event->header.type);
1792
1793        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1794                return perf_session__process_user_event(session, event, file_offset);
1795
1796        if (tool->ordered_events) {
1797                u64 timestamp = -1ULL;
1798
1799                ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1800                if (ret && ret != -1)
1801                        return ret;
1802
1803                ret = perf_session__queue_event(session, event, timestamp, file_offset);
1804                if (ret != -ETIME)
1805                        return ret;
1806        }
1807
1808        return perf_session__deliver_event(session, event, tool, file_offset);
1809}
1810
1811void perf_event_header__bswap(struct perf_event_header *hdr)
1812{
1813        hdr->type = bswap_32(hdr->type);
1814        hdr->misc = bswap_16(hdr->misc);
1815        hdr->size = bswap_16(hdr->size);
1816}
1817
1818struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1819{
1820        return machine__findnew_thread(&session->machines.host, -1, pid);
1821}
1822
1823int perf_session__register_idle_thread(struct perf_session *session)
1824{
1825        struct thread *thread = machine__idle_thread(&session->machines.host);
1826
1827        /* machine__idle_thread() got the thread, so put it */
1828        thread__put(thread);
1829        return thread ? 0 : -1;
1830}
1831
1832static void
1833perf_session__warn_order(const struct perf_session *session)
1834{
1835        const struct ordered_events *oe = &session->ordered_events;
1836        struct evsel *evsel;
1837        bool should_warn = true;
1838
1839        evlist__for_each_entry(session->evlist, evsel) {
1840                if (evsel->core.attr.write_backward)
1841                        should_warn = false;
1842        }
1843
1844        if (!should_warn)
1845                return;
1846        if (oe->nr_unordered_events != 0)
1847                ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1848}
1849
1850static void perf_session__warn_about_errors(const struct perf_session *session)
1851{
1852        const struct events_stats *stats = &session->evlist->stats;
1853
1854        if (session->tool->lost == perf_event__process_lost &&
1855            stats->nr_events[PERF_RECORD_LOST] != 0) {
1856                ui__warning("Processed %d events and lost %d chunks!\n\n"
1857                            "Check IO/CPU overload!\n\n",
1858                            stats->nr_events[0],
1859                            stats->nr_events[PERF_RECORD_LOST]);
1860        }
1861
1862        if (session->tool->lost_samples == perf_event__process_lost_samples) {
1863                double drop_rate;
1864
1865                drop_rate = (double)stats->total_lost_samples /
1866                            (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1867                if (drop_rate > 0.05) {
1868                        ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1869                                    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1870                                    drop_rate * 100.0);
1871                }
1872        }
1873
1874        if (session->tool->aux == perf_event__process_aux &&
1875            stats->total_aux_lost != 0) {
1876                ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1877                            stats->total_aux_lost,
1878                            stats->nr_events[PERF_RECORD_AUX]);
1879        }
1880
1881        if (session->tool->aux == perf_event__process_aux &&
1882            stats->total_aux_partial != 0) {
1883                bool vmm_exclusive = false;
1884
1885                (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1886                                       &vmm_exclusive);
1887
1888                ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1889                            "Are you running a KVM guest in the background?%s\n\n",
1890                            stats->total_aux_partial,
1891                            stats->nr_events[PERF_RECORD_AUX],
1892                            vmm_exclusive ?
1893                            "\nReloading kvm_intel module with vmm_exclusive=0\n"
1894                            "will reduce the gaps to only guest's timeslices." :
1895                            "");
1896        }
1897
1898        if (stats->nr_unknown_events != 0) {
1899                ui__warning("Found %u unknown events!\n\n"
1900                            "Is this an older tool processing a perf.data "
1901                            "file generated by a more recent tool?\n\n"
1902                            "If that is not the case, consider "
1903                            "reporting to linux-kernel@vger.kernel.org.\n\n",
1904                            stats->nr_unknown_events);
1905        }
1906
1907        if (stats->nr_unknown_id != 0) {
1908                ui__warning("%u samples with id not present in the header\n",
1909                            stats->nr_unknown_id);
1910        }
1911
1912        if (stats->nr_invalid_chains != 0) {
1913                ui__warning("Found invalid callchains!\n\n"
1914                            "%u out of %u events were discarded for this reason.\n\n"
1915                            "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1916                            stats->nr_invalid_chains,
1917                            stats->nr_events[PERF_RECORD_SAMPLE]);
1918        }
1919
1920        if (stats->nr_unprocessable_samples != 0) {
1921                ui__warning("%u unprocessable samples recorded.\n"
1922                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
1923                            stats->nr_unprocessable_samples);
1924        }
1925
1926        perf_session__warn_order(session);
1927
1928        events_stats__auxtrace_error_warn(stats);
1929
1930        if (stats->nr_proc_map_timeout != 0) {
1931                ui__warning("%d map information files for pre-existing threads were\n"
1932                            "not processed, if there are samples for addresses they\n"
1933                            "will not be resolved, you may find out which are these\n"
1934                            "threads by running with -v and redirecting the output\n"
1935                            "to a file.\n"
1936                            "The time limit to process proc map is too short?\n"
1937                            "Increase it by --proc-map-timeout\n",
1938                            stats->nr_proc_map_timeout);
1939        }
1940}
1941
1942static int perf_session__flush_thread_stack(struct thread *thread,
1943                                            void *p __maybe_unused)
1944{
1945        return thread_stack__flush(thread);
1946}
1947
1948static int perf_session__flush_thread_stacks(struct perf_session *session)
1949{
1950        return machines__for_each_thread(&session->machines,
1951                                         perf_session__flush_thread_stack,
1952                                         NULL);
1953}
1954
1955volatile int session_done;
1956
1957static int __perf_session__process_decomp_events(struct perf_session *session);
1958
1959static int __perf_session__process_pipe_events(struct perf_session *session)
1960{
1961        struct ordered_events *oe = &session->ordered_events;
1962        struct perf_tool *tool = session->tool;
1963        union perf_event *event;
1964        uint32_t size, cur_size = 0;
1965        void *buf = NULL;
1966        s64 skip = 0;
1967        u64 head;
1968        ssize_t err;
1969        void *p;
1970
1971        perf_tool__fill_defaults(tool);
1972
1973        head = 0;
1974        cur_size = sizeof(union perf_event);
1975
1976        buf = malloc(cur_size);
1977        if (!buf)
1978                return -errno;
1979        ordered_events__set_copy_on_queue(oe, true);
1980more:
1981        event = buf;
1982        err = perf_data__read(session->data, event,
1983                              sizeof(struct perf_event_header));
1984        if (err <= 0) {
1985                if (err == 0)
1986                        goto done;
1987
1988                pr_err("failed to read event header\n");
1989                goto out_err;
1990        }
1991
1992        if (session->header.needs_swap)
1993                perf_event_header__bswap(&event->header);
1994
1995        size = event->header.size;
1996        if (size < sizeof(struct perf_event_header)) {
1997                pr_err("bad event header size\n");
1998                goto out_err;
1999        }
2000
2001        if (size > cur_size) {
2002                void *new = realloc(buf, size);
2003                if (!new) {
2004                        pr_err("failed to allocate memory to read event\n");
2005                        goto out_err;
2006                }
2007                buf = new;
2008                cur_size = size;
2009                event = buf;
2010        }
2011        p = event;
2012        p += sizeof(struct perf_event_header);
2013
2014        if (size - sizeof(struct perf_event_header)) {
2015                err = perf_data__read(session->data, p,
2016                                      size - sizeof(struct perf_event_header));
2017                if (err <= 0) {
2018                        if (err == 0) {
2019                                pr_err("unexpected end of event stream\n");
2020                                goto done;
2021                        }
2022
2023                        pr_err("failed to read event data\n");
2024                        goto out_err;
2025                }
2026        }
2027
2028        if ((skip = perf_session__process_event(session, event, head)) < 0) {
2029                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2030                       head, event->header.size, event->header.type);
2031                err = -EINVAL;
2032                goto out_err;
2033        }
2034
2035        head += size;
2036
2037        if (skip > 0)
2038                head += skip;
2039
2040        err = __perf_session__process_decomp_events(session);
2041        if (err)
2042                goto out_err;
2043
2044        if (!session_done())
2045                goto more;
2046done:
2047        /* do the final flush for ordered samples */
2048        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2049        if (err)
2050                goto out_err;
2051        err = auxtrace__flush_events(session, tool);
2052        if (err)
2053                goto out_err;
2054        err = perf_session__flush_thread_stacks(session);
2055out_err:
2056        free(buf);
2057        if (!tool->no_warn)
2058                perf_session__warn_about_errors(session);
2059        ordered_events__free(&session->ordered_events);
2060        auxtrace__free_events(session);
2061        return err;
2062}
2063
2064static union perf_event *
2065prefetch_event(char *buf, u64 head, size_t mmap_size,
2066               bool needs_swap, union perf_event *error)
2067{
2068        union perf_event *event;
2069
2070        /*
2071         * Ensure we have enough space remaining to read
2072         * the size of the event in the headers.
2073         */
2074        if (head + sizeof(event->header) > mmap_size)
2075                return NULL;
2076
2077        event = (union perf_event *)(buf + head);
2078        if (needs_swap)
2079                perf_event_header__bswap(&event->header);
2080
2081        if (head + event->header.size <= mmap_size)
2082                return event;
2083
2084        /* We're not fetching the event so swap back again */
2085        if (needs_swap)
2086                perf_event_header__bswap(&event->header);
2087
2088        pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2089                 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2090
2091        return error;
2092}
2093
2094static union perf_event *
2095fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2096{
2097        return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2098}
2099
2100static union perf_event *
2101fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2102{
2103        return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2104}
2105
2106static int __perf_session__process_decomp_events(struct perf_session *session)
2107{
2108        s64 skip;
2109        u64 size, file_pos = 0;
2110        struct decomp *decomp = session->decomp_last;
2111
2112        if (!decomp)
2113                return 0;
2114
2115        while (decomp->head < decomp->size && !session_done()) {
2116                union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2117                                                             session->header.needs_swap);
2118
2119                if (!event)
2120                        break;
2121
2122                size = event->header.size;
2123
2124                if (size < sizeof(struct perf_event_header) ||
2125                    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2126                        pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2127                                decomp->file_pos + decomp->head, event->header.size, event->header.type);
2128                        return -EINVAL;
2129                }
2130
2131                if (skip)
2132                        size += skip;
2133
2134                decomp->head += size;
2135        }
2136
2137        return 0;
2138}
2139
2140/*
2141 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2142 * slices. On 32bit we use 32MB.
2143 */
2144#if BITS_PER_LONG == 64
2145#define MMAP_SIZE ULLONG_MAX
2146#define NUM_MMAPS 1
2147#else
2148#define MMAP_SIZE (32 * 1024 * 1024ULL)
2149#define NUM_MMAPS 128
2150#endif
2151
2152struct reader;
2153
2154typedef s64 (*reader_cb_t)(struct perf_session *session,
2155                           union perf_event *event,
2156                           u64 file_offset);
2157
2158struct reader {
2159        int              fd;
2160        u64              data_size;
2161        u64              data_offset;
2162        reader_cb_t      process;
2163        bool             in_place_update;
2164};
2165
2166static int
2167reader__process_events(struct reader *rd, struct perf_session *session,
2168                       struct ui_progress *prog)
2169{
2170        u64 data_size = rd->data_size;
2171        u64 head, page_offset, file_offset, file_pos, size;
2172        int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2173        size_t  mmap_size;
2174        char *buf, *mmaps[NUM_MMAPS];
2175        union perf_event *event;
2176        s64 skip;
2177
2178        page_offset = page_size * (rd->data_offset / page_size);
2179        file_offset = page_offset;
2180        head = rd->data_offset - page_offset;
2181
2182        ui_progress__init_size(prog, data_size, "Processing events...");
2183
2184        data_size += rd->data_offset;
2185
2186        mmap_size = MMAP_SIZE;
2187        if (mmap_size > data_size) {
2188                mmap_size = data_size;
2189                session->one_mmap = true;
2190        }
2191
2192        memset(mmaps, 0, sizeof(mmaps));
2193
2194        mmap_prot  = PROT_READ;
2195        mmap_flags = MAP_SHARED;
2196
2197        if (rd->in_place_update) {
2198                mmap_prot  |= PROT_WRITE;
2199        } else if (session->header.needs_swap) {
2200                mmap_prot  |= PROT_WRITE;
2201                mmap_flags = MAP_PRIVATE;
2202        }
2203remap:
2204        buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2205                   file_offset);
2206        if (buf == MAP_FAILED) {
2207                pr_err("failed to mmap file\n");
2208                err = -errno;
2209                goto out;
2210        }
2211        mmaps[map_idx] = buf;
2212        map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2213        file_pos = file_offset + head;
2214        if (session->one_mmap) {
2215                session->one_mmap_addr = buf;
2216                session->one_mmap_offset = file_offset;
2217        }
2218
2219more:
2220        event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2221        if (IS_ERR(event))
2222                return PTR_ERR(event);
2223
2224        if (!event) {
2225                if (mmaps[map_idx]) {
2226                        munmap(mmaps[map_idx], mmap_size);
2227                        mmaps[map_idx] = NULL;
2228                }
2229
2230                page_offset = page_size * (head / page_size);
2231                file_offset += page_offset;
2232                head -= page_offset;
2233                goto remap;
2234        }
2235
2236        size = event->header.size;
2237
2238        skip = -EINVAL;
2239
2240        if (size < sizeof(struct perf_event_header) ||
2241            (skip = rd->process(session, event, file_pos)) < 0) {
2242                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2243                       file_offset + head, event->header.size,
2244                       event->header.type, strerror(-skip));
2245                err = skip;
2246                goto out;
2247        }
2248
2249        if (skip)
2250                size += skip;
2251
2252        head += size;
2253        file_pos += size;
2254
2255        err = __perf_session__process_decomp_events(session);
2256        if (err)
2257                goto out;
2258
2259        ui_progress__update(prog, size);
2260
2261        if (session_done())
2262                goto out;
2263
2264        if (file_pos < data_size)
2265                goto more;
2266
2267out:
2268        return err;
2269}
2270
2271static s64 process_simple(struct perf_session *session,
2272                          union perf_event *event,
2273                          u64 file_offset)
2274{
2275        return perf_session__process_event(session, event, file_offset);
2276}
2277
2278static int __perf_session__process_events(struct perf_session *session)
2279{
2280        struct reader rd = {
2281                .fd             = perf_data__fd(session->data),
2282                .data_size      = session->header.data_size,
2283                .data_offset    = session->header.data_offset,
2284                .process        = process_simple,
2285                .in_place_update = session->data->in_place_update,
2286        };
2287        struct ordered_events *oe = &session->ordered_events;
2288        struct perf_tool *tool = session->tool;
2289        struct ui_progress prog;
2290        int err;
2291
2292        perf_tool__fill_defaults(tool);
2293
2294        if (rd.data_size == 0)
2295                return -1;
2296
2297        ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2298
2299        err = reader__process_events(&rd, session, &prog);
2300        if (err)
2301                goto out_err;
2302        /* do the final flush for ordered samples */
2303        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2304        if (err)
2305                goto out_err;
2306        err = auxtrace__flush_events(session, tool);
2307        if (err)
2308                goto out_err;
2309        err = perf_session__flush_thread_stacks(session);
2310out_err:
2311        ui_progress__finish();
2312        if (!tool->no_warn)
2313                perf_session__warn_about_errors(session);
2314        /*
2315         * We may switching perf.data output, make ordered_events
2316         * reusable.
2317         */
2318        ordered_events__reinit(&session->ordered_events);
2319        auxtrace__free_events(session);
2320        session->one_mmap = false;
2321        return err;
2322}
2323
2324int perf_session__process_events(struct perf_session *session)
2325{
2326        if (perf_session__register_idle_thread(session) < 0)
2327                return -ENOMEM;
2328
2329        if (perf_data__is_pipe(session->data))
2330                return __perf_session__process_pipe_events(session);
2331
2332        return __perf_session__process_events(session);
2333}
2334
2335bool perf_session__has_traces(struct perf_session *session, const char *msg)
2336{
2337        struct evsel *evsel;
2338
2339        evlist__for_each_entry(session->evlist, evsel) {
2340                if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2341                        return true;
2342        }
2343
2344        pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2345        return false;
2346}
2347
2348int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2349{
2350        char *bracket;
2351        struct ref_reloc_sym *ref;
2352        struct kmap *kmap;
2353
2354        ref = zalloc(sizeof(struct ref_reloc_sym));
2355        if (ref == NULL)
2356                return -ENOMEM;
2357
2358        ref->name = strdup(symbol_name);
2359        if (ref->name == NULL) {
2360                free(ref);
2361                return -ENOMEM;
2362        }
2363
2364        bracket = strchr(ref->name, ']');
2365        if (bracket)
2366                *bracket = '\0';
2367
2368        ref->addr = addr;
2369
2370        kmap = map__kmap(map);
2371        if (kmap)
2372                kmap->ref_reloc_sym = ref;
2373
2374        return 0;
2375}
2376
2377size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2378{
2379        return machines__fprintf_dsos(&session->machines, fp);
2380}
2381
2382size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2383                                          bool (skip)(struct dso *dso, int parm), int parm)
2384{
2385        return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2386}
2387
2388size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2389                                       bool skip_empty)
2390{
2391        size_t ret;
2392        const char *msg = "";
2393
2394        if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2395                msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2396
2397        ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2398
2399        ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2400        return ret;
2401}
2402
2403size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2404{
2405        /*
2406         * FIXME: Here we have to actually print all the machines in this
2407         * session, not just the host...
2408         */
2409        return machine__fprintf(&session->machines.host, fp);
2410}
2411
2412struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2413                                              unsigned int type)
2414{
2415        struct evsel *pos;
2416
2417        evlist__for_each_entry(session->evlist, pos) {
2418                if (pos->core.attr.type == type)
2419                        return pos;
2420        }
2421        return NULL;
2422}
2423
2424int perf_session__cpu_bitmap(struct perf_session *session,
2425                             const char *cpu_list, unsigned long *cpu_bitmap)
2426{
2427        int i, err = -1;
2428        struct perf_cpu_map *map;
2429        int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2430
2431        for (i = 0; i < PERF_TYPE_MAX; ++i) {
2432                struct evsel *evsel;
2433
2434                evsel = perf_session__find_first_evtype(session, i);
2435                if (!evsel)
2436                        continue;
2437
2438                if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2439                        pr_err("File does not contain CPU events. "
2440                               "Remove -C option to proceed.\n");
2441                        return -1;
2442                }
2443        }
2444
2445        map = perf_cpu_map__new(cpu_list);
2446        if (map == NULL) {
2447                pr_err("Invalid cpu_list\n");
2448                return -1;
2449        }
2450
2451        for (i = 0; i < map->nr; i++) {
2452                int cpu = map->map[i];
2453
2454                if (cpu >= nr_cpus) {
2455                        pr_err("Requested CPU %d too large. "
2456                               "Consider raising MAX_NR_CPUS\n", cpu);
2457                        goto out_delete_map;
2458                }
2459
2460                set_bit(cpu, cpu_bitmap);
2461        }
2462
2463        err = 0;
2464
2465out_delete_map:
2466        perf_cpu_map__put(map);
2467        return err;
2468}
2469
2470void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2471                                bool full)
2472{
2473        if (session == NULL || fp == NULL)
2474                return;
2475
2476        fprintf(fp, "# ========\n");
2477        perf_header__fprintf_info(session, fp, full);
2478        fprintf(fp, "# ========\n#\n");
2479}
2480
2481int perf_event__process_id_index(struct perf_session *session,
2482                                 union perf_event *event)
2483{
2484        struct evlist *evlist = session->evlist;
2485        struct perf_record_id_index *ie = &event->id_index;
2486        size_t i, nr, max_nr;
2487
2488        max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2489                 sizeof(struct id_index_entry);
2490        nr = ie->nr;
2491        if (nr > max_nr)
2492                return -EINVAL;
2493
2494        if (dump_trace)
2495                fprintf(stdout, " nr: %zu\n", nr);
2496
2497        for (i = 0; i < nr; i++) {
2498                struct id_index_entry *e = &ie->entries[i];
2499                struct perf_sample_id *sid;
2500
2501                if (dump_trace) {
2502                        fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2503                        fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2504                        fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2505                        fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2506                }
2507
2508                sid = evlist__id2sid(evlist, e->id);
2509                if (!sid)
2510                        return -ENOENT;
2511                sid->idx = e->idx;
2512                sid->cpu = e->cpu;
2513                sid->tid = e->tid;
2514        }
2515        return 0;
2516}
2517