linux/tools/perf/util/session.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/err.h>
   5#include <linux/kernel.h>
   6#include <linux/zalloc.h>
   7#include <api/fs/fs.h>
   8
   9#include <byteswap.h>
  10#include <unistd.h>
  11#include <sys/types.h>
  12#include <sys/mman.h>
  13#include <perf/cpumap.h>
  14
  15#include "map_symbol.h"
  16#include "branch.h"
  17#include "debug.h"
  18#include "evlist.h"
  19#include "evsel.h"
  20#include "memswap.h"
  21#include "map.h"
  22#include "symbol.h"
  23#include "session.h"
  24#include "tool.h"
  25#include "perf_regs.h"
  26#include "asm/bug.h"
  27#include "auxtrace.h"
  28#include "thread.h"
  29#include "thread-stack.h"
  30#include "sample-raw.h"
  31#include "stat.h"
  32#include "ui/progress.h"
  33#include "../perf.h"
  34#include "arch/common.h"
  35#include <internal/lib.h>
  36
  37#ifdef HAVE_ZSTD_SUPPORT
  38static int perf_session__process_compressed_event(struct perf_session *session,
  39                                                  union perf_event *event, u64 file_offset)
  40{
  41        void *src;
  42        size_t decomp_size, src_size;
  43        u64 decomp_last_rem = 0;
  44        size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
  45        struct decomp *decomp, *decomp_last = session->decomp_last;
  46
  47        if (decomp_last) {
  48                decomp_last_rem = decomp_last->size - decomp_last->head;
  49                decomp_len += decomp_last_rem;
  50        }
  51
  52        mmap_len = sizeof(struct decomp) + decomp_len;
  53        decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
  54                      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  55        if (decomp == MAP_FAILED) {
  56                pr_err("Couldn't allocate memory for decompression\n");
  57                return -1;
  58        }
  59
  60        decomp->file_pos = file_offset;
  61        decomp->mmap_len = mmap_len;
  62        decomp->head = 0;
  63
  64        if (decomp_last_rem) {
  65                memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
  66                decomp->size = decomp_last_rem;
  67        }
  68
  69        src = (void *)event + sizeof(struct perf_record_compressed);
  70        src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
  71
  72        decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
  73                                &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
  74        if (!decomp_size) {
  75                munmap(decomp, mmap_len);
  76                pr_err("Couldn't decompress data\n");
  77                return -1;
  78        }
  79
  80        decomp->size += decomp_size;
  81
  82        if (session->decomp == NULL) {
  83                session->decomp = decomp;
  84                session->decomp_last = decomp;
  85        } else {
  86                session->decomp_last->next = decomp;
  87                session->decomp_last = decomp;
  88        }
  89
  90        pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
  91
  92        return 0;
  93}
  94#else /* !HAVE_ZSTD_SUPPORT */
  95#define perf_session__process_compressed_event perf_session__process_compressed_event_stub
  96#endif
  97
  98static int perf_session__deliver_event(struct perf_session *session,
  99                                       union perf_event *event,
 100                                       struct perf_tool *tool,
 101                                       u64 file_offset);
 102
 103static int perf_session__open(struct perf_session *session)
 104{
 105        struct perf_data *data = session->data;
 106
 107        if (perf_session__read_header(session) < 0) {
 108                pr_err("incompatible file format (rerun with -v to learn more)\n");
 109                return -1;
 110        }
 111
 112        if (perf_data__is_pipe(data))
 113                return 0;
 114
 115        if (perf_header__has_feat(&session->header, HEADER_STAT))
 116                return 0;
 117
 118        if (!evlist__valid_sample_type(session->evlist)) {
 119                pr_err("non matching sample_type\n");
 120                return -1;
 121        }
 122
 123        if (!evlist__valid_sample_id_all(session->evlist)) {
 124                pr_err("non matching sample_id_all\n");
 125                return -1;
 126        }
 127
 128        if (!perf_evlist__valid_read_format(session->evlist)) {
 129                pr_err("non matching read_format\n");
 130                return -1;
 131        }
 132
 133        return 0;
 134}
 135
 136void perf_session__set_id_hdr_size(struct perf_session *session)
 137{
 138        u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
 139
 140        machines__set_id_hdr_size(&session->machines, id_hdr_size);
 141}
 142
 143int perf_session__create_kernel_maps(struct perf_session *session)
 144{
 145        int ret = machine__create_kernel_maps(&session->machines.host);
 146
 147        if (ret >= 0)
 148                ret = machines__create_guest_kernel_maps(&session->machines);
 149        return ret;
 150}
 151
 152static void perf_session__destroy_kernel_maps(struct perf_session *session)
 153{
 154        machines__destroy_kernel_maps(&session->machines);
 155}
 156
 157static bool perf_session__has_comm_exec(struct perf_session *session)
 158{
 159        struct evsel *evsel;
 160
 161        evlist__for_each_entry(session->evlist, evsel) {
 162                if (evsel->core.attr.comm_exec)
 163                        return true;
 164        }
 165
 166        return false;
 167}
 168
 169static void perf_session__set_comm_exec(struct perf_session *session)
 170{
 171        bool comm_exec = perf_session__has_comm_exec(session);
 172
 173        machines__set_comm_exec(&session->machines, comm_exec);
 174}
 175
 176static int ordered_events__deliver_event(struct ordered_events *oe,
 177                                         struct ordered_event *event)
 178{
 179        struct perf_session *session = container_of(oe, struct perf_session,
 180                                                    ordered_events);
 181
 182        return perf_session__deliver_event(session, event->event,
 183                                           session->tool, event->file_offset);
 184}
 185
 186struct perf_session *perf_session__new(struct perf_data *data,
 187                                       bool repipe, struct perf_tool *tool)
 188{
 189        int ret = -ENOMEM;
 190        struct perf_session *session = zalloc(sizeof(*session));
 191
 192        if (!session)
 193                goto out;
 194
 195        session->repipe = repipe;
 196        session->tool   = tool;
 197        INIT_LIST_HEAD(&session->auxtrace_index);
 198        machines__init(&session->machines);
 199        ordered_events__init(&session->ordered_events,
 200                             ordered_events__deliver_event, NULL);
 201
 202        perf_env__init(&session->header.env);
 203        if (data) {
 204                ret = perf_data__open(data);
 205                if (ret < 0)
 206                        goto out_delete;
 207
 208                session->data = data;
 209
 210                if (perf_data__is_read(data)) {
 211                        ret = perf_session__open(session);
 212                        if (ret < 0)
 213                                goto out_delete;
 214
 215                        /*
 216                         * set session attributes that are present in perf.data
 217                         * but not in pipe-mode.
 218                         */
 219                        if (!data->is_pipe) {
 220                                perf_session__set_id_hdr_size(session);
 221                                perf_session__set_comm_exec(session);
 222                        }
 223
 224                        perf_evlist__init_trace_event_sample_raw(session->evlist);
 225
 226                        /* Open the directory data. */
 227                        if (data->is_dir) {
 228                                ret = perf_data__open_dir(data);
 229                                if (ret)
 230                                        goto out_delete;
 231                        }
 232
 233                        if (!symbol_conf.kallsyms_name &&
 234                            !symbol_conf.vmlinux_name)
 235                                symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
 236                }
 237        } else  {
 238                session->machines.host.env = &perf_env;
 239        }
 240
 241        session->machines.host.single_address_space =
 242                perf_env__single_address_space(session->machines.host.env);
 243
 244        if (!data || perf_data__is_write(data)) {
 245                /*
 246                 * In O_RDONLY mode this will be performed when reading the
 247                 * kernel MMAP event, in perf_event__process_mmap().
 248                 */
 249                if (perf_session__create_kernel_maps(session) < 0)
 250                        pr_warning("Cannot read kernel map\n");
 251        }
 252
 253        /*
 254         * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
 255         * processed, so evlist__sample_id_all is not meaningful here.
 256         */
 257        if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
 258            tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
 259                dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
 260                tool->ordered_events = false;
 261        }
 262
 263        return session;
 264
 265 out_delete:
 266        perf_session__delete(session);
 267 out:
 268        return ERR_PTR(ret);
 269}
 270
 271static void perf_session__delete_threads(struct perf_session *session)
 272{
 273        machine__delete_threads(&session->machines.host);
 274}
 275
 276static void perf_session__release_decomp_events(struct perf_session *session)
 277{
 278        struct decomp *next, *decomp;
 279        size_t mmap_len;
 280        next = session->decomp;
 281        do {
 282                decomp = next;
 283                if (decomp == NULL)
 284                        break;
 285                next = decomp->next;
 286                mmap_len = decomp->mmap_len;
 287                munmap(decomp, mmap_len);
 288        } while (1);
 289}
 290
 291void perf_session__delete(struct perf_session *session)
 292{
 293        if (session == NULL)
 294                return;
 295        auxtrace__free(session);
 296        auxtrace_index__free(&session->auxtrace_index);
 297        perf_session__destroy_kernel_maps(session);
 298        perf_session__delete_threads(session);
 299        perf_session__release_decomp_events(session);
 300        perf_env__exit(&session->header.env);
 301        machines__exit(&session->machines);
 302        if (session->data)
 303                perf_data__close(session->data);
 304        free(session);
 305}
 306
 307static int process_event_synth_tracing_data_stub(struct perf_session *session
 308                                                 __maybe_unused,
 309                                                 union perf_event *event
 310                                                 __maybe_unused)
 311{
 312        dump_printf(": unhandled!\n");
 313        return 0;
 314}
 315
 316static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
 317                                         union perf_event *event __maybe_unused,
 318                                         struct evlist **pevlist
 319                                         __maybe_unused)
 320{
 321        dump_printf(": unhandled!\n");
 322        return 0;
 323}
 324
 325static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
 326                                                 union perf_event *event __maybe_unused,
 327                                                 struct evlist **pevlist
 328                                                 __maybe_unused)
 329{
 330        if (dump_trace)
 331                perf_event__fprintf_event_update(event, stdout);
 332
 333        dump_printf(": unhandled!\n");
 334        return 0;
 335}
 336
 337static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
 338                                     union perf_event *event __maybe_unused,
 339                                     struct perf_sample *sample __maybe_unused,
 340                                     struct evsel *evsel __maybe_unused,
 341                                     struct machine *machine __maybe_unused)
 342{
 343        dump_printf(": unhandled!\n");
 344        return 0;
 345}
 346
 347static int process_event_stub(struct perf_tool *tool __maybe_unused,
 348                              union perf_event *event __maybe_unused,
 349                              struct perf_sample *sample __maybe_unused,
 350                              struct machine *machine __maybe_unused)
 351{
 352        dump_printf(": unhandled!\n");
 353        return 0;
 354}
 355
 356static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
 357                                       union perf_event *event __maybe_unused,
 358                                       struct ordered_events *oe __maybe_unused)
 359{
 360        dump_printf(": unhandled!\n");
 361        return 0;
 362}
 363
 364static int process_finished_round(struct perf_tool *tool,
 365                                  union perf_event *event,
 366                                  struct ordered_events *oe);
 367
 368static int skipn(int fd, off_t n)
 369{
 370        char buf[4096];
 371        ssize_t ret;
 372
 373        while (n > 0) {
 374                ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
 375                if (ret <= 0)
 376                        return ret;
 377                n -= ret;
 378        }
 379
 380        return 0;
 381}
 382
 383static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
 384                                       union perf_event *event)
 385{
 386        dump_printf(": unhandled!\n");
 387        if (perf_data__is_pipe(session->data))
 388                skipn(perf_data__fd(session->data), event->auxtrace.size);
 389        return event->auxtrace.size;
 390}
 391
 392static int process_event_op2_stub(struct perf_session *session __maybe_unused,
 393                                  union perf_event *event __maybe_unused)
 394{
 395        dump_printf(": unhandled!\n");
 396        return 0;
 397}
 398
 399
 400static
 401int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
 402                                  union perf_event *event __maybe_unused)
 403{
 404        if (dump_trace)
 405                perf_event__fprintf_thread_map(event, stdout);
 406
 407        dump_printf(": unhandled!\n");
 408        return 0;
 409}
 410
 411static
 412int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
 413                               union perf_event *event __maybe_unused)
 414{
 415        if (dump_trace)
 416                perf_event__fprintf_cpu_map(event, stdout);
 417
 418        dump_printf(": unhandled!\n");
 419        return 0;
 420}
 421
 422static
 423int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
 424                                   union perf_event *event __maybe_unused)
 425{
 426        if (dump_trace)
 427                perf_event__fprintf_stat_config(event, stdout);
 428
 429        dump_printf(": unhandled!\n");
 430        return 0;
 431}
 432
 433static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
 434                             union perf_event *event)
 435{
 436        if (dump_trace)
 437                perf_event__fprintf_stat(event, stdout);
 438
 439        dump_printf(": unhandled!\n");
 440        return 0;
 441}
 442
 443static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
 444                                   union perf_event *event)
 445{
 446        if (dump_trace)
 447                perf_event__fprintf_stat_round(event, stdout);
 448
 449        dump_printf(": unhandled!\n");
 450        return 0;
 451}
 452
 453static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
 454                                                       union perf_event *event __maybe_unused,
 455                                                       u64 file_offset __maybe_unused)
 456{
 457       dump_printf(": unhandled!\n");
 458       return 0;
 459}
 460
 461void perf_tool__fill_defaults(struct perf_tool *tool)
 462{
 463        if (tool->sample == NULL)
 464                tool->sample = process_event_sample_stub;
 465        if (tool->mmap == NULL)
 466                tool->mmap = process_event_stub;
 467        if (tool->mmap2 == NULL)
 468                tool->mmap2 = process_event_stub;
 469        if (tool->comm == NULL)
 470                tool->comm = process_event_stub;
 471        if (tool->namespaces == NULL)
 472                tool->namespaces = process_event_stub;
 473        if (tool->cgroup == NULL)
 474                tool->cgroup = process_event_stub;
 475        if (tool->fork == NULL)
 476                tool->fork = process_event_stub;
 477        if (tool->exit == NULL)
 478                tool->exit = process_event_stub;
 479        if (tool->lost == NULL)
 480                tool->lost = perf_event__process_lost;
 481        if (tool->lost_samples == NULL)
 482                tool->lost_samples = perf_event__process_lost_samples;
 483        if (tool->aux == NULL)
 484                tool->aux = perf_event__process_aux;
 485        if (tool->itrace_start == NULL)
 486                tool->itrace_start = perf_event__process_itrace_start;
 487        if (tool->context_switch == NULL)
 488                tool->context_switch = perf_event__process_switch;
 489        if (tool->ksymbol == NULL)
 490                tool->ksymbol = perf_event__process_ksymbol;
 491        if (tool->bpf == NULL)
 492                tool->bpf = perf_event__process_bpf;
 493        if (tool->text_poke == NULL)
 494                tool->text_poke = perf_event__process_text_poke;
 495        if (tool->read == NULL)
 496                tool->read = process_event_sample_stub;
 497        if (tool->throttle == NULL)
 498                tool->throttle = process_event_stub;
 499        if (tool->unthrottle == NULL)
 500                tool->unthrottle = process_event_stub;
 501        if (tool->attr == NULL)
 502                tool->attr = process_event_synth_attr_stub;
 503        if (tool->event_update == NULL)
 504                tool->event_update = process_event_synth_event_update_stub;
 505        if (tool->tracing_data == NULL)
 506                tool->tracing_data = process_event_synth_tracing_data_stub;
 507        if (tool->build_id == NULL)
 508                tool->build_id = process_event_op2_stub;
 509        if (tool->finished_round == NULL) {
 510                if (tool->ordered_events)
 511                        tool->finished_round = process_finished_round;
 512                else
 513                        tool->finished_round = process_finished_round_stub;
 514        }
 515        if (tool->id_index == NULL)
 516                tool->id_index = process_event_op2_stub;
 517        if (tool->auxtrace_info == NULL)
 518                tool->auxtrace_info = process_event_op2_stub;
 519        if (tool->auxtrace == NULL)
 520                tool->auxtrace = process_event_auxtrace_stub;
 521        if (tool->auxtrace_error == NULL)
 522                tool->auxtrace_error = process_event_op2_stub;
 523        if (tool->thread_map == NULL)
 524                tool->thread_map = process_event_thread_map_stub;
 525        if (tool->cpu_map == NULL)
 526                tool->cpu_map = process_event_cpu_map_stub;
 527        if (tool->stat_config == NULL)
 528                tool->stat_config = process_event_stat_config_stub;
 529        if (tool->stat == NULL)
 530                tool->stat = process_stat_stub;
 531        if (tool->stat_round == NULL)
 532                tool->stat_round = process_stat_round_stub;
 533        if (tool->time_conv == NULL)
 534                tool->time_conv = process_event_op2_stub;
 535        if (tool->feature == NULL)
 536                tool->feature = process_event_op2_stub;
 537        if (tool->compressed == NULL)
 538                tool->compressed = perf_session__process_compressed_event;
 539}
 540
 541static void swap_sample_id_all(union perf_event *event, void *data)
 542{
 543        void *end = (void *) event + event->header.size;
 544        int size = end - data;
 545
 546        BUG_ON(size % sizeof(u64));
 547        mem_bswap_64(data, size);
 548}
 549
 550static void perf_event__all64_swap(union perf_event *event,
 551                                   bool sample_id_all __maybe_unused)
 552{
 553        struct perf_event_header *hdr = &event->header;
 554        mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 555}
 556
 557static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 558{
 559        event->comm.pid = bswap_32(event->comm.pid);
 560        event->comm.tid = bswap_32(event->comm.tid);
 561
 562        if (sample_id_all) {
 563                void *data = &event->comm.comm;
 564
 565                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 566                swap_sample_id_all(event, data);
 567        }
 568}
 569
 570static void perf_event__mmap_swap(union perf_event *event,
 571                                  bool sample_id_all)
 572{
 573        event->mmap.pid   = bswap_32(event->mmap.pid);
 574        event->mmap.tid   = bswap_32(event->mmap.tid);
 575        event->mmap.start = bswap_64(event->mmap.start);
 576        event->mmap.len   = bswap_64(event->mmap.len);
 577        event->mmap.pgoff = bswap_64(event->mmap.pgoff);
 578
 579        if (sample_id_all) {
 580                void *data = &event->mmap.filename;
 581
 582                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 583                swap_sample_id_all(event, data);
 584        }
 585}
 586
 587static void perf_event__mmap2_swap(union perf_event *event,
 588                                  bool sample_id_all)
 589{
 590        event->mmap2.pid   = bswap_32(event->mmap2.pid);
 591        event->mmap2.tid   = bswap_32(event->mmap2.tid);
 592        event->mmap2.start = bswap_64(event->mmap2.start);
 593        event->mmap2.len   = bswap_64(event->mmap2.len);
 594        event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
 595        event->mmap2.maj   = bswap_32(event->mmap2.maj);
 596        event->mmap2.min   = bswap_32(event->mmap2.min);
 597        event->mmap2.ino   = bswap_64(event->mmap2.ino);
 598
 599        if (sample_id_all) {
 600                void *data = &event->mmap2.filename;
 601
 602                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 603                swap_sample_id_all(event, data);
 604        }
 605}
 606static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 607{
 608        event->fork.pid  = bswap_32(event->fork.pid);
 609        event->fork.tid  = bswap_32(event->fork.tid);
 610        event->fork.ppid = bswap_32(event->fork.ppid);
 611        event->fork.ptid = bswap_32(event->fork.ptid);
 612        event->fork.time = bswap_64(event->fork.time);
 613
 614        if (sample_id_all)
 615                swap_sample_id_all(event, &event->fork + 1);
 616}
 617
 618static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 619{
 620        event->read.pid          = bswap_32(event->read.pid);
 621        event->read.tid          = bswap_32(event->read.tid);
 622        event->read.value        = bswap_64(event->read.value);
 623        event->read.time_enabled = bswap_64(event->read.time_enabled);
 624        event->read.time_running = bswap_64(event->read.time_running);
 625        event->read.id           = bswap_64(event->read.id);
 626
 627        if (sample_id_all)
 628                swap_sample_id_all(event, &event->read + 1);
 629}
 630
 631static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
 632{
 633        event->aux.aux_offset = bswap_64(event->aux.aux_offset);
 634        event->aux.aux_size   = bswap_64(event->aux.aux_size);
 635        event->aux.flags      = bswap_64(event->aux.flags);
 636
 637        if (sample_id_all)
 638                swap_sample_id_all(event, &event->aux + 1);
 639}
 640
 641static void perf_event__itrace_start_swap(union perf_event *event,
 642                                          bool sample_id_all)
 643{
 644        event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
 645        event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
 646
 647        if (sample_id_all)
 648                swap_sample_id_all(event, &event->itrace_start + 1);
 649}
 650
 651static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
 652{
 653        if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
 654                event->context_switch.next_prev_pid =
 655                                bswap_32(event->context_switch.next_prev_pid);
 656                event->context_switch.next_prev_tid =
 657                                bswap_32(event->context_switch.next_prev_tid);
 658        }
 659
 660        if (sample_id_all)
 661                swap_sample_id_all(event, &event->context_switch + 1);
 662}
 663
 664static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
 665{
 666        event->text_poke.addr    = bswap_64(event->text_poke.addr);
 667        event->text_poke.old_len = bswap_16(event->text_poke.old_len);
 668        event->text_poke.new_len = bswap_16(event->text_poke.new_len);
 669
 670        if (sample_id_all) {
 671                size_t len = sizeof(event->text_poke.old_len) +
 672                             sizeof(event->text_poke.new_len) +
 673                             event->text_poke.old_len +
 674                             event->text_poke.new_len;
 675                void *data = &event->text_poke.old_len;
 676
 677                data += PERF_ALIGN(len, sizeof(u64));
 678                swap_sample_id_all(event, data);
 679        }
 680}
 681
 682static void perf_event__throttle_swap(union perf_event *event,
 683                                      bool sample_id_all)
 684{
 685        event->throttle.time      = bswap_64(event->throttle.time);
 686        event->throttle.id        = bswap_64(event->throttle.id);
 687        event->throttle.stream_id = bswap_64(event->throttle.stream_id);
 688
 689        if (sample_id_all)
 690                swap_sample_id_all(event, &event->throttle + 1);
 691}
 692
 693static void perf_event__namespaces_swap(union perf_event *event,
 694                                        bool sample_id_all)
 695{
 696        u64 i;
 697
 698        event->namespaces.pid           = bswap_32(event->namespaces.pid);
 699        event->namespaces.tid           = bswap_32(event->namespaces.tid);
 700        event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
 701
 702        for (i = 0; i < event->namespaces.nr_namespaces; i++) {
 703                struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
 704
 705                ns->dev = bswap_64(ns->dev);
 706                ns->ino = bswap_64(ns->ino);
 707        }
 708
 709        if (sample_id_all)
 710                swap_sample_id_all(event, &event->namespaces.link_info[i]);
 711}
 712
 713static u8 revbyte(u8 b)
 714{
 715        int rev = (b >> 4) | ((b & 0xf) << 4);
 716        rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
 717        rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
 718        return (u8) rev;
 719}
 720
 721/*
 722 * XXX this is hack in attempt to carry flags bitfield
 723 * through endian village. ABI says:
 724 *
 725 * Bit-fields are allocated from right to left (least to most significant)
 726 * on little-endian implementations and from left to right (most to least
 727 * significant) on big-endian implementations.
 728 *
 729 * The above seems to be byte specific, so we need to reverse each
 730 * byte of the bitfield. 'Internet' also says this might be implementation
 731 * specific and we probably need proper fix and carry perf_event_attr
 732 * bitfield flags in separate data file FEAT_ section. Thought this seems
 733 * to work for now.
 734 */
 735static void swap_bitfield(u8 *p, unsigned len)
 736{
 737        unsigned i;
 738
 739        for (i = 0; i < len; i++) {
 740                *p = revbyte(*p);
 741                p++;
 742        }
 743}
 744
 745/* exported for swapping attributes in file header */
 746void perf_event__attr_swap(struct perf_event_attr *attr)
 747{
 748        attr->type              = bswap_32(attr->type);
 749        attr->size              = bswap_32(attr->size);
 750
 751#define bswap_safe(f, n)                                        \
 752        (attr->size > (offsetof(struct perf_event_attr, f) +    \
 753                       sizeof(attr->f) * (n)))
 754#define bswap_field(f, sz)                      \
 755do {                                            \
 756        if (bswap_safe(f, 0))                   \
 757                attr->f = bswap_##sz(attr->f);  \
 758} while(0)
 759#define bswap_field_16(f) bswap_field(f, 16)
 760#define bswap_field_32(f) bswap_field(f, 32)
 761#define bswap_field_64(f) bswap_field(f, 64)
 762
 763        bswap_field_64(config);
 764        bswap_field_64(sample_period);
 765        bswap_field_64(sample_type);
 766        bswap_field_64(read_format);
 767        bswap_field_32(wakeup_events);
 768        bswap_field_32(bp_type);
 769        bswap_field_64(bp_addr);
 770        bswap_field_64(bp_len);
 771        bswap_field_64(branch_sample_type);
 772        bswap_field_64(sample_regs_user);
 773        bswap_field_32(sample_stack_user);
 774        bswap_field_32(aux_watermark);
 775        bswap_field_16(sample_max_stack);
 776        bswap_field_32(aux_sample_size);
 777
 778        /*
 779         * After read_format are bitfields. Check read_format because
 780         * we are unable to use offsetof on bitfield.
 781         */
 782        if (bswap_safe(read_format, 1))
 783                swap_bitfield((u8 *) (&attr->read_format + 1),
 784                              sizeof(u64));
 785#undef bswap_field_64
 786#undef bswap_field_32
 787#undef bswap_field
 788#undef bswap_safe
 789}
 790
 791static void perf_event__hdr_attr_swap(union perf_event *event,
 792                                      bool sample_id_all __maybe_unused)
 793{
 794        size_t size;
 795
 796        perf_event__attr_swap(&event->attr.attr);
 797
 798        size = event->header.size;
 799        size -= (void *)&event->attr.id - (void *)event;
 800        mem_bswap_64(event->attr.id, size);
 801}
 802
 803static void perf_event__event_update_swap(union perf_event *event,
 804                                          bool sample_id_all __maybe_unused)
 805{
 806        event->event_update.type = bswap_64(event->event_update.type);
 807        event->event_update.id   = bswap_64(event->event_update.id);
 808}
 809
 810static void perf_event__event_type_swap(union perf_event *event,
 811                                        bool sample_id_all __maybe_unused)
 812{
 813        event->event_type.event_type.event_id =
 814                bswap_64(event->event_type.event_type.event_id);
 815}
 816
 817static void perf_event__tracing_data_swap(union perf_event *event,
 818                                          bool sample_id_all __maybe_unused)
 819{
 820        event->tracing_data.size = bswap_32(event->tracing_data.size);
 821}
 822
 823static void perf_event__auxtrace_info_swap(union perf_event *event,
 824                                           bool sample_id_all __maybe_unused)
 825{
 826        size_t size;
 827
 828        event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
 829
 830        size = event->header.size;
 831        size -= (void *)&event->auxtrace_info.priv - (void *)event;
 832        mem_bswap_64(event->auxtrace_info.priv, size);
 833}
 834
 835static void perf_event__auxtrace_swap(union perf_event *event,
 836                                      bool sample_id_all __maybe_unused)
 837{
 838        event->auxtrace.size      = bswap_64(event->auxtrace.size);
 839        event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
 840        event->auxtrace.reference = bswap_64(event->auxtrace.reference);
 841        event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
 842        event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
 843        event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
 844}
 845
 846static void perf_event__auxtrace_error_swap(union perf_event *event,
 847                                            bool sample_id_all __maybe_unused)
 848{
 849        event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
 850        event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
 851        event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
 852        event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
 853        event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
 854        event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
 855        event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
 856        if (event->auxtrace_error.fmt)
 857                event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
 858}
 859
 860static void perf_event__thread_map_swap(union perf_event *event,
 861                                        bool sample_id_all __maybe_unused)
 862{
 863        unsigned i;
 864
 865        event->thread_map.nr = bswap_64(event->thread_map.nr);
 866
 867        for (i = 0; i < event->thread_map.nr; i++)
 868                event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
 869}
 870
 871static void perf_event__cpu_map_swap(union perf_event *event,
 872                                     bool sample_id_all __maybe_unused)
 873{
 874        struct perf_record_cpu_map_data *data = &event->cpu_map.data;
 875        struct cpu_map_entries *cpus;
 876        struct perf_record_record_cpu_map *mask;
 877        unsigned i;
 878
 879        data->type = bswap_64(data->type);
 880
 881        switch (data->type) {
 882        case PERF_CPU_MAP__CPUS:
 883                cpus = (struct cpu_map_entries *)data->data;
 884
 885                cpus->nr = bswap_16(cpus->nr);
 886
 887                for (i = 0; i < cpus->nr; i++)
 888                        cpus->cpu[i] = bswap_16(cpus->cpu[i]);
 889                break;
 890        case PERF_CPU_MAP__MASK:
 891                mask = (struct perf_record_record_cpu_map *)data->data;
 892
 893                mask->nr = bswap_16(mask->nr);
 894                mask->long_size = bswap_16(mask->long_size);
 895
 896                switch (mask->long_size) {
 897                case 4: mem_bswap_32(&mask->mask, mask->nr); break;
 898                case 8: mem_bswap_64(&mask->mask, mask->nr); break;
 899                default:
 900                        pr_err("cpu_map swap: unsupported long size\n");
 901                }
 902        default:
 903                break;
 904        }
 905}
 906
 907static void perf_event__stat_config_swap(union perf_event *event,
 908                                         bool sample_id_all __maybe_unused)
 909{
 910        u64 size;
 911
 912        size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
 913        size += 1; /* nr item itself */
 914        mem_bswap_64(&event->stat_config.nr, size);
 915}
 916
 917static void perf_event__stat_swap(union perf_event *event,
 918                                  bool sample_id_all __maybe_unused)
 919{
 920        event->stat.id     = bswap_64(event->stat.id);
 921        event->stat.thread = bswap_32(event->stat.thread);
 922        event->stat.cpu    = bswap_32(event->stat.cpu);
 923        event->stat.val    = bswap_64(event->stat.val);
 924        event->stat.ena    = bswap_64(event->stat.ena);
 925        event->stat.run    = bswap_64(event->stat.run);
 926}
 927
 928static void perf_event__stat_round_swap(union perf_event *event,
 929                                        bool sample_id_all __maybe_unused)
 930{
 931        event->stat_round.type = bswap_64(event->stat_round.type);
 932        event->stat_round.time = bswap_64(event->stat_round.time);
 933}
 934
 935typedef void (*perf_event__swap_op)(union perf_event *event,
 936                                    bool sample_id_all);
 937
 938static perf_event__swap_op perf_event__swap_ops[] = {
 939        [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
 940        [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
 941        [PERF_RECORD_COMM]                = perf_event__comm_swap,
 942        [PERF_RECORD_FORK]                = perf_event__task_swap,
 943        [PERF_RECORD_EXIT]                = perf_event__task_swap,
 944        [PERF_RECORD_LOST]                = perf_event__all64_swap,
 945        [PERF_RECORD_READ]                = perf_event__read_swap,
 946        [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
 947        [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
 948        [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
 949        [PERF_RECORD_AUX]                 = perf_event__aux_swap,
 950        [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
 951        [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
 952        [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
 953        [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
 954        [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
 955        [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
 956        [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
 957        [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
 958        [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
 959        [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
 960        [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
 961        [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
 962        [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
 963        [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
 964        [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
 965        [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
 966        [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
 967        [PERF_RECORD_STAT]                = perf_event__stat_swap,
 968        [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
 969        [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
 970        [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
 971        [PERF_RECORD_HEADER_MAX]          = NULL,
 972};
 973
 974/*
 975 * When perf record finishes a pass on every buffers, it records this pseudo
 976 * event.
 977 * We record the max timestamp t found in the pass n.
 978 * Assuming these timestamps are monotonic across cpus, we know that if
 979 * a buffer still has events with timestamps below t, they will be all
 980 * available and then read in the pass n + 1.
 981 * Hence when we start to read the pass n + 2, we can safely flush every
 982 * events with timestamps below t.
 983 *
 984 *    ============ PASS n =================
 985 *       CPU 0         |   CPU 1
 986 *                     |
 987 *    cnt1 timestamps  |   cnt2 timestamps
 988 *          1          |         2
 989 *          2          |         3
 990 *          -          |         4  <--- max recorded
 991 *
 992 *    ============ PASS n + 1 ==============
 993 *       CPU 0         |   CPU 1
 994 *                     |
 995 *    cnt1 timestamps  |   cnt2 timestamps
 996 *          3          |         5
 997 *          4          |         6
 998 *          5          |         7 <---- max recorded
 999 *
1000 *      Flush every events below timestamp 4
1001 *
1002 *    ============ PASS n + 2 ==============
1003 *       CPU 0         |   CPU 1
1004 *                     |
1005 *    cnt1 timestamps  |   cnt2 timestamps
1006 *          6          |         8
1007 *          7          |         9
1008 *          -          |         10
1009 *
1010 *      Flush every events below timestamp 7
1011 *      etc...
1012 */
1013static int process_finished_round(struct perf_tool *tool __maybe_unused,
1014                                  union perf_event *event __maybe_unused,
1015                                  struct ordered_events *oe)
1016{
1017        if (dump_trace)
1018                fprintf(stdout, "\n");
1019        return ordered_events__flush(oe, OE_FLUSH__ROUND);
1020}
1021
1022int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1023                              u64 timestamp, u64 file_offset)
1024{
1025        return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1026}
1027
1028static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1029{
1030        struct ip_callchain *callchain = sample->callchain;
1031        struct branch_stack *lbr_stack = sample->branch_stack;
1032        struct branch_entry *entries = perf_sample__branch_entries(sample);
1033        u64 kernel_callchain_nr = callchain->nr;
1034        unsigned int i;
1035
1036        for (i = 0; i < kernel_callchain_nr; i++) {
1037                if (callchain->ips[i] == PERF_CONTEXT_USER)
1038                        break;
1039        }
1040
1041        if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1042                u64 total_nr;
1043                /*
1044                 * LBR callstack can only get user call chain,
1045                 * i is kernel call chain number,
1046                 * 1 is PERF_CONTEXT_USER.
1047                 *
1048                 * The user call chain is stored in LBR registers.
1049                 * LBR are pair registers. The caller is stored
1050                 * in "from" register, while the callee is stored
1051                 * in "to" register.
1052                 * For example, there is a call stack
1053                 * "A"->"B"->"C"->"D".
1054                 * The LBR registers will recorde like
1055                 * "C"->"D", "B"->"C", "A"->"B".
1056                 * So only the first "to" register and all "from"
1057                 * registers are needed to construct the whole stack.
1058                 */
1059                total_nr = i + 1 + lbr_stack->nr + 1;
1060                kernel_callchain_nr = i + 1;
1061
1062                printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1063
1064                for (i = 0; i < kernel_callchain_nr; i++)
1065                        printf("..... %2d: %016" PRIx64 "\n",
1066                               i, callchain->ips[i]);
1067
1068                printf("..... %2d: %016" PRIx64 "\n",
1069                       (int)(kernel_callchain_nr), entries[0].to);
1070                for (i = 0; i < lbr_stack->nr; i++)
1071                        printf("..... %2d: %016" PRIx64 "\n",
1072                               (int)(i + kernel_callchain_nr + 1), entries[i].from);
1073        }
1074}
1075
1076static void callchain__printf(struct evsel *evsel,
1077                              struct perf_sample *sample)
1078{
1079        unsigned int i;
1080        struct ip_callchain *callchain = sample->callchain;
1081
1082        if (evsel__has_branch_callstack(evsel))
1083                callchain__lbr_callstack_printf(sample);
1084
1085        printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1086
1087        for (i = 0; i < callchain->nr; i++)
1088                printf("..... %2d: %016" PRIx64 "\n",
1089                       i, callchain->ips[i]);
1090}
1091
1092static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1093{
1094        struct branch_entry *entries = perf_sample__branch_entries(sample);
1095        uint64_t i;
1096
1097        printf("%s: nr:%" PRIu64 "\n",
1098                !callstack ? "... branch stack" : "... branch callstack",
1099                sample->branch_stack->nr);
1100
1101        for (i = 0; i < sample->branch_stack->nr; i++) {
1102                struct branch_entry *e = &entries[i];
1103
1104                if (!callstack) {
1105                        printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1106                                i, e->from, e->to,
1107                                (unsigned short)e->flags.cycles,
1108                                e->flags.mispred ? "M" : " ",
1109                                e->flags.predicted ? "P" : " ",
1110                                e->flags.abort ? "A" : " ",
1111                                e->flags.in_tx ? "T" : " ",
1112                                (unsigned)e->flags.reserved);
1113                } else {
1114                        printf("..... %2"PRIu64": %016" PRIx64 "\n",
1115                                i, i > 0 ? e->from : e->to);
1116                }
1117        }
1118}
1119
1120static void regs_dump__printf(u64 mask, u64 *regs)
1121{
1122        unsigned rid, i = 0;
1123
1124        for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1125                u64 val = regs[i++];
1126
1127                printf(".... %-5s 0x%016" PRIx64 "\n",
1128                       perf_reg_name(rid), val);
1129        }
1130}
1131
1132static const char *regs_abi[] = {
1133        [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1134        [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1135        [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1136};
1137
1138static inline const char *regs_dump_abi(struct regs_dump *d)
1139{
1140        if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1141                return "unknown";
1142
1143        return regs_abi[d->abi];
1144}
1145
1146static void regs__printf(const char *type, struct regs_dump *regs)
1147{
1148        u64 mask = regs->mask;
1149
1150        printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1151               type,
1152               mask,
1153               regs_dump_abi(regs));
1154
1155        regs_dump__printf(mask, regs->regs);
1156}
1157
1158static void regs_user__printf(struct perf_sample *sample)
1159{
1160        struct regs_dump *user_regs = &sample->user_regs;
1161
1162        if (user_regs->regs)
1163                regs__printf("user", user_regs);
1164}
1165
1166static void regs_intr__printf(struct perf_sample *sample)
1167{
1168        struct regs_dump *intr_regs = &sample->intr_regs;
1169
1170        if (intr_regs->regs)
1171                regs__printf("intr", intr_regs);
1172}
1173
1174static void stack_user__printf(struct stack_dump *dump)
1175{
1176        printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1177               dump->size, dump->offset);
1178}
1179
1180static void perf_evlist__print_tstamp(struct evlist *evlist,
1181                                       union perf_event *event,
1182                                       struct perf_sample *sample)
1183{
1184        u64 sample_type = __evlist__combined_sample_type(evlist);
1185
1186        if (event->header.type != PERF_RECORD_SAMPLE &&
1187            !evlist__sample_id_all(evlist)) {
1188                fputs("-1 -1 ", stdout);
1189                return;
1190        }
1191
1192        if ((sample_type & PERF_SAMPLE_CPU))
1193                printf("%u ", sample->cpu);
1194
1195        if (sample_type & PERF_SAMPLE_TIME)
1196                printf("%" PRIu64 " ", sample->time);
1197}
1198
1199static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1200{
1201        printf("... sample_read:\n");
1202
1203        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1204                printf("...... time enabled %016" PRIx64 "\n",
1205                       sample->read.time_enabled);
1206
1207        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1208                printf("...... time running %016" PRIx64 "\n",
1209                       sample->read.time_running);
1210
1211        if (read_format & PERF_FORMAT_GROUP) {
1212                u64 i;
1213
1214                printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1215
1216                for (i = 0; i < sample->read.group.nr; i++) {
1217                        struct sample_read_value *value;
1218
1219                        value = &sample->read.group.values[i];
1220                        printf("..... id %016" PRIx64
1221                               ", value %016" PRIx64 "\n",
1222                               value->id, value->value);
1223                }
1224        } else
1225                printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1226                        sample->read.one.id, sample->read.one.value);
1227}
1228
1229static void dump_event(struct evlist *evlist, union perf_event *event,
1230                       u64 file_offset, struct perf_sample *sample)
1231{
1232        if (!dump_trace)
1233                return;
1234
1235        printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1236               file_offset, event->header.size, event->header.type);
1237
1238        trace_event(event);
1239        if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1240                evlist->trace_event_sample_raw(evlist, event, sample);
1241
1242        if (sample)
1243                perf_evlist__print_tstamp(evlist, event, sample);
1244
1245        printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1246               event->header.size, perf_event__name(event->header.type));
1247}
1248
1249static void dump_sample(struct evsel *evsel, union perf_event *event,
1250                        struct perf_sample *sample)
1251{
1252        u64 sample_type;
1253
1254        if (!dump_trace)
1255                return;
1256
1257        printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1258               event->header.misc, sample->pid, sample->tid, sample->ip,
1259               sample->period, sample->addr);
1260
1261        sample_type = evsel->core.attr.sample_type;
1262
1263        if (evsel__has_callchain(evsel))
1264                callchain__printf(evsel, sample);
1265
1266        if (evsel__has_br_stack(evsel))
1267                branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1268
1269        if (sample_type & PERF_SAMPLE_REGS_USER)
1270                regs_user__printf(sample);
1271
1272        if (sample_type & PERF_SAMPLE_REGS_INTR)
1273                regs_intr__printf(sample);
1274
1275        if (sample_type & PERF_SAMPLE_STACK_USER)
1276                stack_user__printf(&sample->user_stack);
1277
1278        if (sample_type & PERF_SAMPLE_WEIGHT)
1279                printf("... weight: %" PRIu64 "\n", sample->weight);
1280
1281        if (sample_type & PERF_SAMPLE_DATA_SRC)
1282                printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1283
1284        if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1285                printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1286
1287        if (sample_type & PERF_SAMPLE_TRANSACTION)
1288                printf("... transaction: %" PRIx64 "\n", sample->transaction);
1289
1290        if (sample_type & PERF_SAMPLE_READ)
1291                sample_read__printf(sample, evsel->core.attr.read_format);
1292}
1293
1294static void dump_read(struct evsel *evsel, union perf_event *event)
1295{
1296        struct perf_record_read *read_event = &event->read;
1297        u64 read_format;
1298
1299        if (!dump_trace)
1300                return;
1301
1302        printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1303               evsel__name(evsel), event->read.value);
1304
1305        if (!evsel)
1306                return;
1307
1308        read_format = evsel->core.attr.read_format;
1309
1310        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1311                printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1312
1313        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1314                printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1315
1316        if (read_format & PERF_FORMAT_ID)
1317                printf("... id           : %" PRI_lu64 "\n", read_event->id);
1318}
1319
1320static struct machine *machines__find_for_cpumode(struct machines *machines,
1321                                               union perf_event *event,
1322                                               struct perf_sample *sample)
1323{
1324        struct machine *machine;
1325
1326        if (perf_guest &&
1327            ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1328             (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1329                u32 pid;
1330
1331                if (event->header.type == PERF_RECORD_MMAP
1332                    || event->header.type == PERF_RECORD_MMAP2)
1333                        pid = event->mmap.pid;
1334                else
1335                        pid = sample->pid;
1336
1337                machine = machines__find(machines, pid);
1338                if (!machine)
1339                        machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1340                return machine;
1341        }
1342
1343        return &machines->host;
1344}
1345
1346static int deliver_sample_value(struct evlist *evlist,
1347                                struct perf_tool *tool,
1348                                union perf_event *event,
1349                                struct perf_sample *sample,
1350                                struct sample_read_value *v,
1351                                struct machine *machine)
1352{
1353        struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1354        struct evsel *evsel;
1355
1356        if (sid) {
1357                sample->id     = v->id;
1358                sample->period = v->value - sid->period;
1359                sid->period    = v->value;
1360        }
1361
1362        if (!sid || sid->evsel == NULL) {
1363                ++evlist->stats.nr_unknown_id;
1364                return 0;
1365        }
1366
1367        /*
1368         * There's no reason to deliver sample
1369         * for zero period, bail out.
1370         */
1371        if (!sample->period)
1372                return 0;
1373
1374        evsel = container_of(sid->evsel, struct evsel, core);
1375        return tool->sample(tool, event, sample, evsel, machine);
1376}
1377
1378static int deliver_sample_group(struct evlist *evlist,
1379                                struct perf_tool *tool,
1380                                union  perf_event *event,
1381                                struct perf_sample *sample,
1382                                struct machine *machine)
1383{
1384        int ret = -EINVAL;
1385        u64 i;
1386
1387        for (i = 0; i < sample->read.group.nr; i++) {
1388                ret = deliver_sample_value(evlist, tool, event, sample,
1389                                           &sample->read.group.values[i],
1390                                           machine);
1391                if (ret)
1392                        break;
1393        }
1394
1395        return ret;
1396}
1397
1398static int
1399 perf_evlist__deliver_sample(struct evlist *evlist,
1400                             struct perf_tool *tool,
1401                             union  perf_event *event,
1402                             struct perf_sample *sample,
1403                             struct evsel *evsel,
1404                             struct machine *machine)
1405{
1406        /* We know evsel != NULL. */
1407        u64 sample_type = evsel->core.attr.sample_type;
1408        u64 read_format = evsel->core.attr.read_format;
1409
1410        /* Standard sample delivery. */
1411        if (!(sample_type & PERF_SAMPLE_READ))
1412                return tool->sample(tool, event, sample, evsel, machine);
1413
1414        /* For PERF_SAMPLE_READ we have either single or group mode. */
1415        if (read_format & PERF_FORMAT_GROUP)
1416                return deliver_sample_group(evlist, tool, event, sample,
1417                                            machine);
1418        else
1419                return deliver_sample_value(evlist, tool, event, sample,
1420                                            &sample->read.one, machine);
1421}
1422
1423static int machines__deliver_event(struct machines *machines,
1424                                   struct evlist *evlist,
1425                                   union perf_event *event,
1426                                   struct perf_sample *sample,
1427                                   struct perf_tool *tool, u64 file_offset)
1428{
1429        struct evsel *evsel;
1430        struct machine *machine;
1431
1432        dump_event(evlist, event, file_offset, sample);
1433
1434        evsel = perf_evlist__id2evsel(evlist, sample->id);
1435
1436        machine = machines__find_for_cpumode(machines, event, sample);
1437
1438        switch (event->header.type) {
1439        case PERF_RECORD_SAMPLE:
1440                if (evsel == NULL) {
1441                        ++evlist->stats.nr_unknown_id;
1442                        return 0;
1443                }
1444                dump_sample(evsel, event, sample);
1445                if (machine == NULL) {
1446                        ++evlist->stats.nr_unprocessable_samples;
1447                        return 0;
1448                }
1449                return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1450        case PERF_RECORD_MMAP:
1451                return tool->mmap(tool, event, sample, machine);
1452        case PERF_RECORD_MMAP2:
1453                if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1454                        ++evlist->stats.nr_proc_map_timeout;
1455                return tool->mmap2(tool, event, sample, machine);
1456        case PERF_RECORD_COMM:
1457                return tool->comm(tool, event, sample, machine);
1458        case PERF_RECORD_NAMESPACES:
1459                return tool->namespaces(tool, event, sample, machine);
1460        case PERF_RECORD_CGROUP:
1461                return tool->cgroup(tool, event, sample, machine);
1462        case PERF_RECORD_FORK:
1463                return tool->fork(tool, event, sample, machine);
1464        case PERF_RECORD_EXIT:
1465                return tool->exit(tool, event, sample, machine);
1466        case PERF_RECORD_LOST:
1467                if (tool->lost == perf_event__process_lost)
1468                        evlist->stats.total_lost += event->lost.lost;
1469                return tool->lost(tool, event, sample, machine);
1470        case PERF_RECORD_LOST_SAMPLES:
1471                if (tool->lost_samples == perf_event__process_lost_samples)
1472                        evlist->stats.total_lost_samples += event->lost_samples.lost;
1473                return tool->lost_samples(tool, event, sample, machine);
1474        case PERF_RECORD_READ:
1475                dump_read(evsel, event);
1476                return tool->read(tool, event, sample, evsel, machine);
1477        case PERF_RECORD_THROTTLE:
1478                return tool->throttle(tool, event, sample, machine);
1479        case PERF_RECORD_UNTHROTTLE:
1480                return tool->unthrottle(tool, event, sample, machine);
1481        case PERF_RECORD_AUX:
1482                if (tool->aux == perf_event__process_aux) {
1483                        if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1484                                evlist->stats.total_aux_lost += 1;
1485                        if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1486                                evlist->stats.total_aux_partial += 1;
1487                }
1488                return tool->aux(tool, event, sample, machine);
1489        case PERF_RECORD_ITRACE_START:
1490                return tool->itrace_start(tool, event, sample, machine);
1491        case PERF_RECORD_SWITCH:
1492        case PERF_RECORD_SWITCH_CPU_WIDE:
1493                return tool->context_switch(tool, event, sample, machine);
1494        case PERF_RECORD_KSYMBOL:
1495                return tool->ksymbol(tool, event, sample, machine);
1496        case PERF_RECORD_BPF_EVENT:
1497                return tool->bpf(tool, event, sample, machine);
1498        case PERF_RECORD_TEXT_POKE:
1499                return tool->text_poke(tool, event, sample, machine);
1500        default:
1501                ++evlist->stats.nr_unknown_events;
1502                return -1;
1503        }
1504}
1505
1506static int perf_session__deliver_event(struct perf_session *session,
1507                                       union perf_event *event,
1508                                       struct perf_tool *tool,
1509                                       u64 file_offset)
1510{
1511        struct perf_sample sample;
1512        int ret;
1513
1514        ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1515        if (ret) {
1516                pr_err("Can't parse sample, err = %d\n", ret);
1517                return ret;
1518        }
1519
1520        ret = auxtrace__process_event(session, event, &sample, tool);
1521        if (ret < 0)
1522                return ret;
1523        if (ret > 0)
1524                return 0;
1525
1526        ret = machines__deliver_event(&session->machines, session->evlist,
1527                                      event, &sample, tool, file_offset);
1528
1529        if (dump_trace && sample.aux_sample.size)
1530                auxtrace__dump_auxtrace_sample(session, &sample);
1531
1532        return ret;
1533}
1534
1535static s64 perf_session__process_user_event(struct perf_session *session,
1536                                            union perf_event *event,
1537                                            u64 file_offset)
1538{
1539        struct ordered_events *oe = &session->ordered_events;
1540        struct perf_tool *tool = session->tool;
1541        struct perf_sample sample = { .time = 0, };
1542        int fd = perf_data__fd(session->data);
1543        int err;
1544
1545        if (event->header.type != PERF_RECORD_COMPRESSED ||
1546            tool->compressed == perf_session__process_compressed_event_stub)
1547                dump_event(session->evlist, event, file_offset, &sample);
1548
1549        /* These events are processed right away */
1550        switch (event->header.type) {
1551        case PERF_RECORD_HEADER_ATTR:
1552                err = tool->attr(tool, event, &session->evlist);
1553                if (err == 0) {
1554                        perf_session__set_id_hdr_size(session);
1555                        perf_session__set_comm_exec(session);
1556                }
1557                return err;
1558        case PERF_RECORD_EVENT_UPDATE:
1559                return tool->event_update(tool, event, &session->evlist);
1560        case PERF_RECORD_HEADER_EVENT_TYPE:
1561                /*
1562                 * Depreceated, but we need to handle it for sake
1563                 * of old data files create in pipe mode.
1564                 */
1565                return 0;
1566        case PERF_RECORD_HEADER_TRACING_DATA:
1567                /*
1568                 * Setup for reading amidst mmap, but only when we
1569                 * are in 'file' mode. The 'pipe' fd is in proper
1570                 * place already.
1571                 */
1572                if (!perf_data__is_pipe(session->data))
1573                        lseek(fd, file_offset, SEEK_SET);
1574                return tool->tracing_data(session, event);
1575        case PERF_RECORD_HEADER_BUILD_ID:
1576                return tool->build_id(session, event);
1577        case PERF_RECORD_FINISHED_ROUND:
1578                return tool->finished_round(tool, event, oe);
1579        case PERF_RECORD_ID_INDEX:
1580                return tool->id_index(session, event);
1581        case PERF_RECORD_AUXTRACE_INFO:
1582                return tool->auxtrace_info(session, event);
1583        case PERF_RECORD_AUXTRACE:
1584                /* setup for reading amidst mmap */
1585                lseek(fd, file_offset + event->header.size, SEEK_SET);
1586                return tool->auxtrace(session, event);
1587        case PERF_RECORD_AUXTRACE_ERROR:
1588                perf_session__auxtrace_error_inc(session, event);
1589                return tool->auxtrace_error(session, event);
1590        case PERF_RECORD_THREAD_MAP:
1591                return tool->thread_map(session, event);
1592        case PERF_RECORD_CPU_MAP:
1593                return tool->cpu_map(session, event);
1594        case PERF_RECORD_STAT_CONFIG:
1595                return tool->stat_config(session, event);
1596        case PERF_RECORD_STAT:
1597                return tool->stat(session, event);
1598        case PERF_RECORD_STAT_ROUND:
1599                return tool->stat_round(session, event);
1600        case PERF_RECORD_TIME_CONV:
1601                session->time_conv = event->time_conv;
1602                return tool->time_conv(session, event);
1603        case PERF_RECORD_HEADER_FEATURE:
1604                return tool->feature(session, event);
1605        case PERF_RECORD_COMPRESSED:
1606                err = tool->compressed(session, event, file_offset);
1607                if (err)
1608                        dump_event(session->evlist, event, file_offset, &sample);
1609                return err;
1610        default:
1611                return -EINVAL;
1612        }
1613}
1614
1615int perf_session__deliver_synth_event(struct perf_session *session,
1616                                      union perf_event *event,
1617                                      struct perf_sample *sample)
1618{
1619        struct evlist *evlist = session->evlist;
1620        struct perf_tool *tool = session->tool;
1621
1622        events_stats__inc(&evlist->stats, event->header.type);
1623
1624        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1625                return perf_session__process_user_event(session, event, 0);
1626
1627        return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1628}
1629
1630static void event_swap(union perf_event *event, bool sample_id_all)
1631{
1632        perf_event__swap_op swap;
1633
1634        swap = perf_event__swap_ops[event->header.type];
1635        if (swap)
1636                swap(event, sample_id_all);
1637}
1638
1639int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1640                             void *buf, size_t buf_sz,
1641                             union perf_event **event_ptr,
1642                             struct perf_sample *sample)
1643{
1644        union perf_event *event;
1645        size_t hdr_sz, rest;
1646        int fd;
1647
1648        if (session->one_mmap && !session->header.needs_swap) {
1649                event = file_offset - session->one_mmap_offset +
1650                        session->one_mmap_addr;
1651                goto out_parse_sample;
1652        }
1653
1654        if (perf_data__is_pipe(session->data))
1655                return -1;
1656
1657        fd = perf_data__fd(session->data);
1658        hdr_sz = sizeof(struct perf_event_header);
1659
1660        if (buf_sz < hdr_sz)
1661                return -1;
1662
1663        if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1664            readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1665                return -1;
1666
1667        event = (union perf_event *)buf;
1668
1669        if (session->header.needs_swap)
1670                perf_event_header__bswap(&event->header);
1671
1672        if (event->header.size < hdr_sz || event->header.size > buf_sz)
1673                return -1;
1674
1675        rest = event->header.size - hdr_sz;
1676
1677        if (readn(fd, buf, rest) != (ssize_t)rest)
1678                return -1;
1679
1680        if (session->header.needs_swap)
1681                event_swap(event, evlist__sample_id_all(session->evlist));
1682
1683out_parse_sample:
1684
1685        if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1686            perf_evlist__parse_sample(session->evlist, event, sample))
1687                return -1;
1688
1689        *event_ptr = event;
1690
1691        return 0;
1692}
1693
1694int perf_session__peek_events(struct perf_session *session, u64 offset,
1695                              u64 size, peek_events_cb_t cb, void *data)
1696{
1697        u64 max_offset = offset + size;
1698        char buf[PERF_SAMPLE_MAX_SIZE];
1699        union perf_event *event;
1700        int err;
1701
1702        do {
1703                err = perf_session__peek_event(session, offset, buf,
1704                                               PERF_SAMPLE_MAX_SIZE, &event,
1705                                               NULL);
1706                if (err)
1707                        return err;
1708
1709                err = cb(session, event, offset, data);
1710                if (err)
1711                        return err;
1712
1713                offset += event->header.size;
1714                if (event->header.type == PERF_RECORD_AUXTRACE)
1715                        offset += event->auxtrace.size;
1716
1717        } while (offset < max_offset);
1718
1719        return err;
1720}
1721
1722static s64 perf_session__process_event(struct perf_session *session,
1723                                       union perf_event *event, u64 file_offset)
1724{
1725        struct evlist *evlist = session->evlist;
1726        struct perf_tool *tool = session->tool;
1727        int ret;
1728
1729        if (session->header.needs_swap)
1730                event_swap(event, evlist__sample_id_all(evlist));
1731
1732        if (event->header.type >= PERF_RECORD_HEADER_MAX)
1733                return -EINVAL;
1734
1735        events_stats__inc(&evlist->stats, event->header.type);
1736
1737        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1738                return perf_session__process_user_event(session, event, file_offset);
1739
1740        if (tool->ordered_events) {
1741                u64 timestamp = -1ULL;
1742
1743                ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1744                if (ret && ret != -1)
1745                        return ret;
1746
1747                ret = perf_session__queue_event(session, event, timestamp, file_offset);
1748                if (ret != -ETIME)
1749                        return ret;
1750        }
1751
1752        return perf_session__deliver_event(session, event, tool, file_offset);
1753}
1754
1755void perf_event_header__bswap(struct perf_event_header *hdr)
1756{
1757        hdr->type = bswap_32(hdr->type);
1758        hdr->misc = bswap_16(hdr->misc);
1759        hdr->size = bswap_16(hdr->size);
1760}
1761
1762struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1763{
1764        return machine__findnew_thread(&session->machines.host, -1, pid);
1765}
1766
1767/*
1768 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1769 * So here a single thread is created for that, but actually there is a separate
1770 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1771 * is only 1. That causes problems for some tools, requiring workarounds. For
1772 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1773 */
1774int perf_session__register_idle_thread(struct perf_session *session)
1775{
1776        struct thread *thread;
1777        int err = 0;
1778
1779        thread = machine__findnew_thread(&session->machines.host, 0, 0);
1780        if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1781                pr_err("problem inserting idle task.\n");
1782                err = -1;
1783        }
1784
1785        if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1786                pr_err("problem inserting idle task.\n");
1787                err = -1;
1788        }
1789
1790        /* machine__findnew_thread() got the thread, so put it */
1791        thread__put(thread);
1792        return err;
1793}
1794
1795static void
1796perf_session__warn_order(const struct perf_session *session)
1797{
1798        const struct ordered_events *oe = &session->ordered_events;
1799        struct evsel *evsel;
1800        bool should_warn = true;
1801
1802        evlist__for_each_entry(session->evlist, evsel) {
1803                if (evsel->core.attr.write_backward)
1804                        should_warn = false;
1805        }
1806
1807        if (!should_warn)
1808                return;
1809        if (oe->nr_unordered_events != 0)
1810                ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1811}
1812
1813static void perf_session__warn_about_errors(const struct perf_session *session)
1814{
1815        const struct events_stats *stats = &session->evlist->stats;
1816
1817        if (session->tool->lost == perf_event__process_lost &&
1818            stats->nr_events[PERF_RECORD_LOST] != 0) {
1819                ui__warning("Processed %d events and lost %d chunks!\n\n"
1820                            "Check IO/CPU overload!\n\n",
1821                            stats->nr_events[0],
1822                            stats->nr_events[PERF_RECORD_LOST]);
1823        }
1824
1825        if (session->tool->lost_samples == perf_event__process_lost_samples) {
1826                double drop_rate;
1827
1828                drop_rate = (double)stats->total_lost_samples /
1829                            (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1830                if (drop_rate > 0.05) {
1831                        ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1832                                    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1833                                    drop_rate * 100.0);
1834                }
1835        }
1836
1837        if (session->tool->aux == perf_event__process_aux &&
1838            stats->total_aux_lost != 0) {
1839                ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1840                            stats->total_aux_lost,
1841                            stats->nr_events[PERF_RECORD_AUX]);
1842        }
1843
1844        if (session->tool->aux == perf_event__process_aux &&
1845            stats->total_aux_partial != 0) {
1846                bool vmm_exclusive = false;
1847
1848                (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1849                                       &vmm_exclusive);
1850
1851                ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1852                            "Are you running a KVM guest in the background?%s\n\n",
1853                            stats->total_aux_partial,
1854                            stats->nr_events[PERF_RECORD_AUX],
1855                            vmm_exclusive ?
1856                            "\nReloading kvm_intel module with vmm_exclusive=0\n"
1857                            "will reduce the gaps to only guest's timeslices." :
1858                            "");
1859        }
1860
1861        if (stats->nr_unknown_events != 0) {
1862                ui__warning("Found %u unknown events!\n\n"
1863                            "Is this an older tool processing a perf.data "
1864                            "file generated by a more recent tool?\n\n"
1865                            "If that is not the case, consider "
1866                            "reporting to linux-kernel@vger.kernel.org.\n\n",
1867                            stats->nr_unknown_events);
1868        }
1869
1870        if (stats->nr_unknown_id != 0) {
1871                ui__warning("%u samples with id not present in the header\n",
1872                            stats->nr_unknown_id);
1873        }
1874
1875        if (stats->nr_invalid_chains != 0) {
1876                ui__warning("Found invalid callchains!\n\n"
1877                            "%u out of %u events were discarded for this reason.\n\n"
1878                            "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1879                            stats->nr_invalid_chains,
1880                            stats->nr_events[PERF_RECORD_SAMPLE]);
1881        }
1882
1883        if (stats->nr_unprocessable_samples != 0) {
1884                ui__warning("%u unprocessable samples recorded.\n"
1885                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
1886                            stats->nr_unprocessable_samples);
1887        }
1888
1889        perf_session__warn_order(session);
1890
1891        events_stats__auxtrace_error_warn(stats);
1892
1893        if (stats->nr_proc_map_timeout != 0) {
1894                ui__warning("%d map information files for pre-existing threads were\n"
1895                            "not processed, if there are samples for addresses they\n"
1896                            "will not be resolved, you may find out which are these\n"
1897                            "threads by running with -v and redirecting the output\n"
1898                            "to a file.\n"
1899                            "The time limit to process proc map is too short?\n"
1900                            "Increase it by --proc-map-timeout\n",
1901                            stats->nr_proc_map_timeout);
1902        }
1903}
1904
1905static int perf_session__flush_thread_stack(struct thread *thread,
1906                                            void *p __maybe_unused)
1907{
1908        return thread_stack__flush(thread);
1909}
1910
1911static int perf_session__flush_thread_stacks(struct perf_session *session)
1912{
1913        return machines__for_each_thread(&session->machines,
1914                                         perf_session__flush_thread_stack,
1915                                         NULL);
1916}
1917
1918volatile int session_done;
1919
1920static int __perf_session__process_decomp_events(struct perf_session *session);
1921
1922static int __perf_session__process_pipe_events(struct perf_session *session)
1923{
1924        struct ordered_events *oe = &session->ordered_events;
1925        struct perf_tool *tool = session->tool;
1926        int fd = perf_data__fd(session->data);
1927        union perf_event *event;
1928        uint32_t size, cur_size = 0;
1929        void *buf = NULL;
1930        s64 skip = 0;
1931        u64 head;
1932        ssize_t err;
1933        void *p;
1934
1935        perf_tool__fill_defaults(tool);
1936
1937        head = 0;
1938        cur_size = sizeof(union perf_event);
1939
1940        buf = malloc(cur_size);
1941        if (!buf)
1942                return -errno;
1943        ordered_events__set_copy_on_queue(oe, true);
1944more:
1945        event = buf;
1946        err = readn(fd, event, sizeof(struct perf_event_header));
1947        if (err <= 0) {
1948                if (err == 0)
1949                        goto done;
1950
1951                pr_err("failed to read event header\n");
1952                goto out_err;
1953        }
1954
1955        if (session->header.needs_swap)
1956                perf_event_header__bswap(&event->header);
1957
1958        size = event->header.size;
1959        if (size < sizeof(struct perf_event_header)) {
1960                pr_err("bad event header size\n");
1961                goto out_err;
1962        }
1963
1964        if (size > cur_size) {
1965                void *new = realloc(buf, size);
1966                if (!new) {
1967                        pr_err("failed to allocate memory to read event\n");
1968                        goto out_err;
1969                }
1970                buf = new;
1971                cur_size = size;
1972                event = buf;
1973        }
1974        p = event;
1975        p += sizeof(struct perf_event_header);
1976
1977        if (size - sizeof(struct perf_event_header)) {
1978                err = readn(fd, p, size - sizeof(struct perf_event_header));
1979                if (err <= 0) {
1980                        if (err == 0) {
1981                                pr_err("unexpected end of event stream\n");
1982                                goto done;
1983                        }
1984
1985                        pr_err("failed to read event data\n");
1986                        goto out_err;
1987                }
1988        }
1989
1990        if ((skip = perf_session__process_event(session, event, head)) < 0) {
1991                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1992                       head, event->header.size, event->header.type);
1993                err = -EINVAL;
1994                goto out_err;
1995        }
1996
1997        head += size;
1998
1999        if (skip > 0)
2000                head += skip;
2001
2002        err = __perf_session__process_decomp_events(session);
2003        if (err)
2004                goto out_err;
2005
2006        if (!session_done())
2007                goto more;
2008done:
2009        /* do the final flush for ordered samples */
2010        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2011        if (err)
2012                goto out_err;
2013        err = auxtrace__flush_events(session, tool);
2014        if (err)
2015                goto out_err;
2016        err = perf_session__flush_thread_stacks(session);
2017out_err:
2018        free(buf);
2019        if (!tool->no_warn)
2020                perf_session__warn_about_errors(session);
2021        ordered_events__free(&session->ordered_events);
2022        auxtrace__free_events(session);
2023        return err;
2024}
2025
2026static union perf_event *
2027prefetch_event(char *buf, u64 head, size_t mmap_size,
2028               bool needs_swap, union perf_event *error)
2029{
2030        union perf_event *event;
2031
2032        /*
2033         * Ensure we have enough space remaining to read
2034         * the size of the event in the headers.
2035         */
2036        if (head + sizeof(event->header) > mmap_size)
2037                return NULL;
2038
2039        event = (union perf_event *)(buf + head);
2040        if (needs_swap)
2041                perf_event_header__bswap(&event->header);
2042
2043        if (head + event->header.size <= mmap_size)
2044                return event;
2045
2046        /* We're not fetching the event so swap back again */
2047        if (needs_swap)
2048                perf_event_header__bswap(&event->header);
2049
2050        pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2051                 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2052
2053        return error;
2054}
2055
2056static union perf_event *
2057fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2058{
2059        return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2060}
2061
2062static union perf_event *
2063fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2064{
2065        return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2066}
2067
2068static int __perf_session__process_decomp_events(struct perf_session *session)
2069{
2070        s64 skip;
2071        u64 size, file_pos = 0;
2072        struct decomp *decomp = session->decomp_last;
2073
2074        if (!decomp)
2075                return 0;
2076
2077        while (decomp->head < decomp->size && !session_done()) {
2078                union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2079                                                             session->header.needs_swap);
2080
2081                if (!event)
2082                        break;
2083
2084                size = event->header.size;
2085
2086                if (size < sizeof(struct perf_event_header) ||
2087                    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2088                        pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2089                                decomp->file_pos + decomp->head, event->header.size, event->header.type);
2090                        return -EINVAL;
2091                }
2092
2093                if (skip)
2094                        size += skip;
2095
2096                decomp->head += size;
2097        }
2098
2099        return 0;
2100}
2101
2102/*
2103 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2104 * slices. On 32bit we use 32MB.
2105 */
2106#if BITS_PER_LONG == 64
2107#define MMAP_SIZE ULLONG_MAX
2108#define NUM_MMAPS 1
2109#else
2110#define MMAP_SIZE (32 * 1024 * 1024ULL)
2111#define NUM_MMAPS 128
2112#endif
2113
2114struct reader;
2115
2116typedef s64 (*reader_cb_t)(struct perf_session *session,
2117                           union perf_event *event,
2118                           u64 file_offset);
2119
2120struct reader {
2121        int              fd;
2122        u64              data_size;
2123        u64              data_offset;
2124        reader_cb_t      process;
2125};
2126
2127static int
2128reader__process_events(struct reader *rd, struct perf_session *session,
2129                       struct ui_progress *prog)
2130{
2131        u64 data_size = rd->data_size;
2132        u64 head, page_offset, file_offset, file_pos, size;
2133        int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2134        size_t  mmap_size;
2135        char *buf, *mmaps[NUM_MMAPS];
2136        union perf_event *event;
2137        s64 skip;
2138
2139        page_offset = page_size * (rd->data_offset / page_size);
2140        file_offset = page_offset;
2141        head = rd->data_offset - page_offset;
2142
2143        ui_progress__init_size(prog, data_size, "Processing events...");
2144
2145        data_size += rd->data_offset;
2146
2147        mmap_size = MMAP_SIZE;
2148        if (mmap_size > data_size) {
2149                mmap_size = data_size;
2150                session->one_mmap = true;
2151        }
2152
2153        memset(mmaps, 0, sizeof(mmaps));
2154
2155        mmap_prot  = PROT_READ;
2156        mmap_flags = MAP_SHARED;
2157
2158        if (session->header.needs_swap) {
2159                mmap_prot  |= PROT_WRITE;
2160                mmap_flags = MAP_PRIVATE;
2161        }
2162remap:
2163        buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2164                   file_offset);
2165        if (buf == MAP_FAILED) {
2166                pr_err("failed to mmap file\n");
2167                err = -errno;
2168                goto out;
2169        }
2170        mmaps[map_idx] = buf;
2171        map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2172        file_pos = file_offset + head;
2173        if (session->one_mmap) {
2174                session->one_mmap_addr = buf;
2175                session->one_mmap_offset = file_offset;
2176        }
2177
2178more:
2179        event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2180        if (IS_ERR(event))
2181                return PTR_ERR(event);
2182
2183        if (!event) {
2184                if (mmaps[map_idx]) {
2185                        munmap(mmaps[map_idx], mmap_size);
2186                        mmaps[map_idx] = NULL;
2187                }
2188
2189                page_offset = page_size * (head / page_size);
2190                file_offset += page_offset;
2191                head -= page_offset;
2192                goto remap;
2193        }
2194
2195        size = event->header.size;
2196
2197        skip = -EINVAL;
2198
2199        if (size < sizeof(struct perf_event_header) ||
2200            (skip = rd->process(session, event, file_pos)) < 0) {
2201                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2202                       file_offset + head, event->header.size,
2203                       event->header.type, strerror(-skip));
2204                err = skip;
2205                goto out;
2206        }
2207
2208        if (skip)
2209                size += skip;
2210
2211        head += size;
2212        file_pos += size;
2213
2214        err = __perf_session__process_decomp_events(session);
2215        if (err)
2216                goto out;
2217
2218        ui_progress__update(prog, size);
2219
2220        if (session_done())
2221                goto out;
2222
2223        if (file_pos < data_size)
2224                goto more;
2225
2226out:
2227        return err;
2228}
2229
2230static s64 process_simple(struct perf_session *session,
2231                          union perf_event *event,
2232                          u64 file_offset)
2233{
2234        return perf_session__process_event(session, event, file_offset);
2235}
2236
2237static int __perf_session__process_events(struct perf_session *session)
2238{
2239        struct reader rd = {
2240                .fd             = perf_data__fd(session->data),
2241                .data_size      = session->header.data_size,
2242                .data_offset    = session->header.data_offset,
2243                .process        = process_simple,
2244        };
2245        struct ordered_events *oe = &session->ordered_events;
2246        struct perf_tool *tool = session->tool;
2247        struct ui_progress prog;
2248        int err;
2249
2250        perf_tool__fill_defaults(tool);
2251
2252        if (rd.data_size == 0)
2253                return -1;
2254
2255        ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2256
2257        err = reader__process_events(&rd, session, &prog);
2258        if (err)
2259                goto out_err;
2260        /* do the final flush for ordered samples */
2261        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2262        if (err)
2263                goto out_err;
2264        err = auxtrace__flush_events(session, tool);
2265        if (err)
2266                goto out_err;
2267        err = perf_session__flush_thread_stacks(session);
2268out_err:
2269        ui_progress__finish();
2270        if (!tool->no_warn)
2271                perf_session__warn_about_errors(session);
2272        /*
2273         * We may switching perf.data output, make ordered_events
2274         * reusable.
2275         */
2276        ordered_events__reinit(&session->ordered_events);
2277        auxtrace__free_events(session);
2278        session->one_mmap = false;
2279        return err;
2280}
2281
2282int perf_session__process_events(struct perf_session *session)
2283{
2284        if (perf_session__register_idle_thread(session) < 0)
2285                return -ENOMEM;
2286
2287        if (perf_data__is_pipe(session->data))
2288                return __perf_session__process_pipe_events(session);
2289
2290        return __perf_session__process_events(session);
2291}
2292
2293bool perf_session__has_traces(struct perf_session *session, const char *msg)
2294{
2295        struct evsel *evsel;
2296
2297        evlist__for_each_entry(session->evlist, evsel) {
2298                if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2299                        return true;
2300        }
2301
2302        pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2303        return false;
2304}
2305
2306int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2307{
2308        char *bracket;
2309        struct ref_reloc_sym *ref;
2310        struct kmap *kmap;
2311
2312        ref = zalloc(sizeof(struct ref_reloc_sym));
2313        if (ref == NULL)
2314                return -ENOMEM;
2315
2316        ref->name = strdup(symbol_name);
2317        if (ref->name == NULL) {
2318                free(ref);
2319                return -ENOMEM;
2320        }
2321
2322        bracket = strchr(ref->name, ']');
2323        if (bracket)
2324                *bracket = '\0';
2325
2326        ref->addr = addr;
2327
2328        kmap = map__kmap(map);
2329        if (kmap)
2330                kmap->ref_reloc_sym = ref;
2331
2332        return 0;
2333}
2334
2335size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2336{
2337        return machines__fprintf_dsos(&session->machines, fp);
2338}
2339
2340size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2341                                          bool (skip)(struct dso *dso, int parm), int parm)
2342{
2343        return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2344}
2345
2346size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2347{
2348        size_t ret;
2349        const char *msg = "";
2350
2351        if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2352                msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2353
2354        ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2355
2356        ret += events_stats__fprintf(&session->evlist->stats, fp);
2357        return ret;
2358}
2359
2360size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2361{
2362        /*
2363         * FIXME: Here we have to actually print all the machines in this
2364         * session, not just the host...
2365         */
2366        return machine__fprintf(&session->machines.host, fp);
2367}
2368
2369struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2370                                              unsigned int type)
2371{
2372        struct evsel *pos;
2373
2374        evlist__for_each_entry(session->evlist, pos) {
2375                if (pos->core.attr.type == type)
2376                        return pos;
2377        }
2378        return NULL;
2379}
2380
2381int perf_session__cpu_bitmap(struct perf_session *session,
2382                             const char *cpu_list, unsigned long *cpu_bitmap)
2383{
2384        int i, err = -1;
2385        struct perf_cpu_map *map;
2386        int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2387
2388        for (i = 0; i < PERF_TYPE_MAX; ++i) {
2389                struct evsel *evsel;
2390
2391                evsel = perf_session__find_first_evtype(session, i);
2392                if (!evsel)
2393                        continue;
2394
2395                if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2396                        pr_err("File does not contain CPU events. "
2397                               "Remove -C option to proceed.\n");
2398                        return -1;
2399                }
2400        }
2401
2402        map = perf_cpu_map__new(cpu_list);
2403        if (map == NULL) {
2404                pr_err("Invalid cpu_list\n");
2405                return -1;
2406        }
2407
2408        for (i = 0; i < map->nr; i++) {
2409                int cpu = map->map[i];
2410
2411                if (cpu >= nr_cpus) {
2412                        pr_err("Requested CPU %d too large. "
2413                               "Consider raising MAX_NR_CPUS\n", cpu);
2414                        goto out_delete_map;
2415                }
2416
2417                set_bit(cpu, cpu_bitmap);
2418        }
2419
2420        err = 0;
2421
2422out_delete_map:
2423        perf_cpu_map__put(map);
2424        return err;
2425}
2426
2427void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2428                                bool full)
2429{
2430        if (session == NULL || fp == NULL)
2431                return;
2432
2433        fprintf(fp, "# ========\n");
2434        perf_header__fprintf_info(session, fp, full);
2435        fprintf(fp, "# ========\n#\n");
2436}
2437
2438int perf_event__process_id_index(struct perf_session *session,
2439                                 union perf_event *event)
2440{
2441        struct evlist *evlist = session->evlist;
2442        struct perf_record_id_index *ie = &event->id_index;
2443        size_t i, nr, max_nr;
2444
2445        max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2446                 sizeof(struct id_index_entry);
2447        nr = ie->nr;
2448        if (nr > max_nr)
2449                return -EINVAL;
2450
2451        if (dump_trace)
2452                fprintf(stdout, " nr: %zu\n", nr);
2453
2454        for (i = 0; i < nr; i++) {
2455                struct id_index_entry *e = &ie->entries[i];
2456                struct perf_sample_id *sid;
2457
2458                if (dump_trace) {
2459                        fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2460                        fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2461                        fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2462                        fprintf(stdout, "  tid: %"PRI_ld64"\n", e->tid);
2463                }
2464
2465                sid = perf_evlist__id2sid(evlist, e->id);
2466                if (!sid)
2467                        return -ENOENT;
2468                sid->idx = e->idx;
2469                sid->cpu = e->cpu;
2470                sid->tid = e->tid;
2471        }
2472        return 0;
2473}
2474