linux/tools/perf/util/session.c
<<
>>
Prefs
   1#include <linux/kernel.h>
   2#include <traceevent/event-parse.h>
   3
   4#include <byteswap.h>
   5#include <unistd.h>
   6#include <sys/types.h>
   7#include <sys/mman.h>
   8
   9#include "evlist.h"
  10#include "evsel.h"
  11#include "session.h"
  12#include "tool.h"
  13#include "sort.h"
  14#include "util.h"
  15#include "cpumap.h"
  16#include "perf_regs.h"
  17#include "asm/bug.h"
  18#include "auxtrace.h"
  19#include "thread-stack.h"
  20
  21static int perf_session__deliver_event(struct perf_session *session,
  22                                       union perf_event *event,
  23                                       struct perf_sample *sample,
  24                                       struct perf_tool *tool,
  25                                       u64 file_offset);
  26
  27static int perf_session__open(struct perf_session *session)
  28{
  29        struct perf_data_file *file = session->file;
  30
  31        if (perf_session__read_header(session) < 0) {
  32                pr_err("incompatible file format (rerun with -v to learn more)\n");
  33                return -1;
  34        }
  35
  36        if (perf_data_file__is_pipe(file))
  37                return 0;
  38
  39        if (!perf_evlist__valid_sample_type(session->evlist)) {
  40                pr_err("non matching sample_type\n");
  41                return -1;
  42        }
  43
  44        if (!perf_evlist__valid_sample_id_all(session->evlist)) {
  45                pr_err("non matching sample_id_all\n");
  46                return -1;
  47        }
  48
  49        if (!perf_evlist__valid_read_format(session->evlist)) {
  50                pr_err("non matching read_format\n");
  51                return -1;
  52        }
  53
  54        return 0;
  55}
  56
  57void perf_session__set_id_hdr_size(struct perf_session *session)
  58{
  59        u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
  60
  61        machines__set_id_hdr_size(&session->machines, id_hdr_size);
  62}
  63
  64int perf_session__create_kernel_maps(struct perf_session *session)
  65{
  66        int ret = machine__create_kernel_maps(&session->machines.host);
  67
  68        if (ret >= 0)
  69                ret = machines__create_guest_kernel_maps(&session->machines);
  70        return ret;
  71}
  72
  73static void perf_session__destroy_kernel_maps(struct perf_session *session)
  74{
  75        machines__destroy_kernel_maps(&session->machines);
  76}
  77
  78static bool perf_session__has_comm_exec(struct perf_session *session)
  79{
  80        struct perf_evsel *evsel;
  81
  82        evlist__for_each(session->evlist, evsel) {
  83                if (evsel->attr.comm_exec)
  84                        return true;
  85        }
  86
  87        return false;
  88}
  89
  90static void perf_session__set_comm_exec(struct perf_session *session)
  91{
  92        bool comm_exec = perf_session__has_comm_exec(session);
  93
  94        machines__set_comm_exec(&session->machines, comm_exec);
  95}
  96
  97static int ordered_events__deliver_event(struct ordered_events *oe,
  98                                         struct ordered_event *event)
  99{
 100        struct perf_sample sample;
 101        struct perf_session *session = container_of(oe, struct perf_session,
 102                                                    ordered_events);
 103        int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
 104
 105        if (ret) {
 106                pr_err("Can't parse sample, err = %d\n", ret);
 107                return ret;
 108        }
 109
 110        return perf_session__deliver_event(session, event->event, &sample,
 111                                           session->tool, event->file_offset);
 112}
 113
 114struct perf_session *perf_session__new(struct perf_data_file *file,
 115                                       bool repipe, struct perf_tool *tool)
 116{
 117        struct perf_session *session = zalloc(sizeof(*session));
 118
 119        if (!session)
 120                goto out;
 121
 122        session->repipe = repipe;
 123        session->tool   = tool;
 124        INIT_LIST_HEAD(&session->auxtrace_index);
 125        machines__init(&session->machines);
 126        ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
 127
 128        if (file) {
 129                if (perf_data_file__open(file))
 130                        goto out_delete;
 131
 132                session->file = file;
 133
 134                if (perf_data_file__is_read(file)) {
 135                        if (perf_session__open(session) < 0)
 136                                goto out_close;
 137
 138                        perf_session__set_id_hdr_size(session);
 139                        perf_session__set_comm_exec(session);
 140                }
 141        } else  {
 142                session->machines.host.env = &perf_env;
 143        }
 144
 145        if (!file || perf_data_file__is_write(file)) {
 146                /*
 147                 * In O_RDONLY mode this will be performed when reading the
 148                 * kernel MMAP event, in perf_event__process_mmap().
 149                 */
 150                if (perf_session__create_kernel_maps(session) < 0)
 151                        pr_warning("Cannot read kernel map\n");
 152        }
 153
 154        if (tool && tool->ordering_requires_timestamps &&
 155            tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
 156                dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
 157                tool->ordered_events = false;
 158        }
 159
 160        return session;
 161
 162 out_close:
 163        perf_data_file__close(file);
 164 out_delete:
 165        perf_session__delete(session);
 166 out:
 167        return NULL;
 168}
 169
 170static void perf_session__delete_threads(struct perf_session *session)
 171{
 172        machine__delete_threads(&session->machines.host);
 173}
 174
 175void perf_session__delete(struct perf_session *session)
 176{
 177        auxtrace__free(session);
 178        auxtrace_index__free(&session->auxtrace_index);
 179        perf_session__destroy_kernel_maps(session);
 180        perf_session__delete_threads(session);
 181        perf_env__exit(&session->header.env);
 182        machines__exit(&session->machines);
 183        if (session->file)
 184                perf_data_file__close(session->file);
 185        free(session);
 186}
 187
 188static int process_event_synth_tracing_data_stub(struct perf_tool *tool
 189                                                 __maybe_unused,
 190                                                 union perf_event *event
 191                                                 __maybe_unused,
 192                                                 struct perf_session *session
 193                                                __maybe_unused)
 194{
 195        dump_printf(": unhandled!\n");
 196        return 0;
 197}
 198
 199static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
 200                                         union perf_event *event __maybe_unused,
 201                                         struct perf_evlist **pevlist
 202                                         __maybe_unused)
 203{
 204        dump_printf(": unhandled!\n");
 205        return 0;
 206}
 207
 208static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
 209                                     union perf_event *event __maybe_unused,
 210                                     struct perf_sample *sample __maybe_unused,
 211                                     struct perf_evsel *evsel __maybe_unused,
 212                                     struct machine *machine __maybe_unused)
 213{
 214        dump_printf(": unhandled!\n");
 215        return 0;
 216}
 217
 218static int process_event_stub(struct perf_tool *tool __maybe_unused,
 219                              union perf_event *event __maybe_unused,
 220                              struct perf_sample *sample __maybe_unused,
 221                              struct machine *machine __maybe_unused)
 222{
 223        dump_printf(": unhandled!\n");
 224        return 0;
 225}
 226
 227static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
 228                                 union perf_event *event __maybe_unused,
 229                                 struct perf_session *session __maybe_unused)
 230{
 231        dump_printf(": unhandled!\n");
 232        return 0;
 233}
 234
 235static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
 236                                       union perf_event *event __maybe_unused,
 237                                       struct ordered_events *oe __maybe_unused)
 238{
 239        dump_printf(": unhandled!\n");
 240        return 0;
 241}
 242
 243static int process_finished_round(struct perf_tool *tool,
 244                                  union perf_event *event,
 245                                  struct ordered_events *oe);
 246
 247static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
 248                                 union perf_event *event __maybe_unused,
 249                                 struct perf_session *perf_session
 250                                 __maybe_unused)
 251{
 252        dump_printf(": unhandled!\n");
 253        return 0;
 254}
 255
 256static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
 257                                union perf_event *event __maybe_unused,
 258                                struct perf_session *session __maybe_unused)
 259{
 260        dump_printf(": unhandled!\n");
 261        return 0;
 262}
 263
 264static int skipn(int fd, off_t n)
 265{
 266        char buf[4096];
 267        ssize_t ret;
 268
 269        while (n > 0) {
 270                ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
 271                if (ret <= 0)
 272                        return ret;
 273                n -= ret;
 274        }
 275
 276        return 0;
 277}
 278
 279static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
 280                                       union perf_event *event,
 281                                       struct perf_session *session
 282                                       __maybe_unused)
 283{
 284        dump_printf(": unhandled!\n");
 285        if (perf_data_file__is_pipe(session->file))
 286                skipn(perf_data_file__fd(session->file), event->auxtrace.size);
 287        return event->auxtrace.size;
 288}
 289
 290static
 291int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
 292                                      union perf_event *event __maybe_unused,
 293                                      struct perf_session *session __maybe_unused)
 294{
 295        dump_printf(": unhandled!\n");
 296        return 0;
 297}
 298
 299void perf_tool__fill_defaults(struct perf_tool *tool)
 300{
 301        if (tool->sample == NULL)
 302                tool->sample = process_event_sample_stub;
 303        if (tool->mmap == NULL)
 304                tool->mmap = process_event_stub;
 305        if (tool->mmap2 == NULL)
 306                tool->mmap2 = process_event_stub;
 307        if (tool->comm == NULL)
 308                tool->comm = process_event_stub;
 309        if (tool->fork == NULL)
 310                tool->fork = process_event_stub;
 311        if (tool->exit == NULL)
 312                tool->exit = process_event_stub;
 313        if (tool->lost == NULL)
 314                tool->lost = perf_event__process_lost;
 315        if (tool->lost_samples == NULL)
 316                tool->lost_samples = perf_event__process_lost_samples;
 317        if (tool->aux == NULL)
 318                tool->aux = perf_event__process_aux;
 319        if (tool->itrace_start == NULL)
 320                tool->itrace_start = perf_event__process_itrace_start;
 321        if (tool->context_switch == NULL)
 322                tool->context_switch = perf_event__process_switch;
 323        if (tool->read == NULL)
 324                tool->read = process_event_sample_stub;
 325        if (tool->throttle == NULL)
 326                tool->throttle = process_event_stub;
 327        if (tool->unthrottle == NULL)
 328                tool->unthrottle = process_event_stub;
 329        if (tool->attr == NULL)
 330                tool->attr = process_event_synth_attr_stub;
 331        if (tool->tracing_data == NULL)
 332                tool->tracing_data = process_event_synth_tracing_data_stub;
 333        if (tool->build_id == NULL)
 334                tool->build_id = process_build_id_stub;
 335        if (tool->finished_round == NULL) {
 336                if (tool->ordered_events)
 337                        tool->finished_round = process_finished_round;
 338                else
 339                        tool->finished_round = process_finished_round_stub;
 340        }
 341        if (tool->id_index == NULL)
 342                tool->id_index = process_id_index_stub;
 343        if (tool->auxtrace_info == NULL)
 344                tool->auxtrace_info = process_event_auxtrace_info_stub;
 345        if (tool->auxtrace == NULL)
 346                tool->auxtrace = process_event_auxtrace_stub;
 347        if (tool->auxtrace_error == NULL)
 348                tool->auxtrace_error = process_event_auxtrace_error_stub;
 349}
 350
 351static void swap_sample_id_all(union perf_event *event, void *data)
 352{
 353        void *end = (void *) event + event->header.size;
 354        int size = end - data;
 355
 356        BUG_ON(size % sizeof(u64));
 357        mem_bswap_64(data, size);
 358}
 359
 360static void perf_event__all64_swap(union perf_event *event,
 361                                   bool sample_id_all __maybe_unused)
 362{
 363        struct perf_event_header *hdr = &event->header;
 364        mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
 365}
 366
 367static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
 368{
 369        event->comm.pid = bswap_32(event->comm.pid);
 370        event->comm.tid = bswap_32(event->comm.tid);
 371
 372        if (sample_id_all) {
 373                void *data = &event->comm.comm;
 374
 375                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 376                swap_sample_id_all(event, data);
 377        }
 378}
 379
 380static void perf_event__mmap_swap(union perf_event *event,
 381                                  bool sample_id_all)
 382{
 383        event->mmap.pid   = bswap_32(event->mmap.pid);
 384        event->mmap.tid   = bswap_32(event->mmap.tid);
 385        event->mmap.start = bswap_64(event->mmap.start);
 386        event->mmap.len   = bswap_64(event->mmap.len);
 387        event->mmap.pgoff = bswap_64(event->mmap.pgoff);
 388
 389        if (sample_id_all) {
 390                void *data = &event->mmap.filename;
 391
 392                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 393                swap_sample_id_all(event, data);
 394        }
 395}
 396
 397static void perf_event__mmap2_swap(union perf_event *event,
 398                                  bool sample_id_all)
 399{
 400        event->mmap2.pid   = bswap_32(event->mmap2.pid);
 401        event->mmap2.tid   = bswap_32(event->mmap2.tid);
 402        event->mmap2.start = bswap_64(event->mmap2.start);
 403        event->mmap2.len   = bswap_64(event->mmap2.len);
 404        event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
 405        event->mmap2.maj   = bswap_32(event->mmap2.maj);
 406        event->mmap2.min   = bswap_32(event->mmap2.min);
 407        event->mmap2.ino   = bswap_64(event->mmap2.ino);
 408
 409        if (sample_id_all) {
 410                void *data = &event->mmap2.filename;
 411
 412                data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
 413                swap_sample_id_all(event, data);
 414        }
 415}
 416static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
 417{
 418        event->fork.pid  = bswap_32(event->fork.pid);
 419        event->fork.tid  = bswap_32(event->fork.tid);
 420        event->fork.ppid = bswap_32(event->fork.ppid);
 421        event->fork.ptid = bswap_32(event->fork.ptid);
 422        event->fork.time = bswap_64(event->fork.time);
 423
 424        if (sample_id_all)
 425                swap_sample_id_all(event, &event->fork + 1);
 426}
 427
 428static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
 429{
 430        event->read.pid          = bswap_32(event->read.pid);
 431        event->read.tid          = bswap_32(event->read.tid);
 432        event->read.value        = bswap_64(event->read.value);
 433        event->read.time_enabled = bswap_64(event->read.time_enabled);
 434        event->read.time_running = bswap_64(event->read.time_running);
 435        event->read.id           = bswap_64(event->read.id);
 436
 437        if (sample_id_all)
 438                swap_sample_id_all(event, &event->read + 1);
 439}
 440
 441static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
 442{
 443        event->aux.aux_offset = bswap_64(event->aux.aux_offset);
 444        event->aux.aux_size   = bswap_64(event->aux.aux_size);
 445        event->aux.flags      = bswap_64(event->aux.flags);
 446
 447        if (sample_id_all)
 448                swap_sample_id_all(event, &event->aux + 1);
 449}
 450
 451static void perf_event__itrace_start_swap(union perf_event *event,
 452                                          bool sample_id_all)
 453{
 454        event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
 455        event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
 456
 457        if (sample_id_all)
 458                swap_sample_id_all(event, &event->itrace_start + 1);
 459}
 460
 461static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
 462{
 463        if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
 464                event->context_switch.next_prev_pid =
 465                                bswap_32(event->context_switch.next_prev_pid);
 466                event->context_switch.next_prev_tid =
 467                                bswap_32(event->context_switch.next_prev_tid);
 468        }
 469
 470        if (sample_id_all)
 471                swap_sample_id_all(event, &event->context_switch + 1);
 472}
 473
 474static void perf_event__throttle_swap(union perf_event *event,
 475                                      bool sample_id_all)
 476{
 477        event->throttle.time      = bswap_64(event->throttle.time);
 478        event->throttle.id        = bswap_64(event->throttle.id);
 479        event->throttle.stream_id = bswap_64(event->throttle.stream_id);
 480
 481        if (sample_id_all)
 482                swap_sample_id_all(event, &event->throttle + 1);
 483}
 484
 485static u8 revbyte(u8 b)
 486{
 487        int rev = (b >> 4) | ((b & 0xf) << 4);
 488        rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
 489        rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
 490        return (u8) rev;
 491}
 492
 493/*
 494 * XXX this is hack in attempt to carry flags bitfield
 495 * throught endian village. ABI says:
 496 *
 497 * Bit-fields are allocated from right to left (least to most significant)
 498 * on little-endian implementations and from left to right (most to least
 499 * significant) on big-endian implementations.
 500 *
 501 * The above seems to be byte specific, so we need to reverse each
 502 * byte of the bitfield. 'Internet' also says this might be implementation
 503 * specific and we probably need proper fix and carry perf_event_attr
 504 * bitfield flags in separate data file FEAT_ section. Thought this seems
 505 * to work for now.
 506 */
 507static void swap_bitfield(u8 *p, unsigned len)
 508{
 509        unsigned i;
 510
 511        for (i = 0; i < len; i++) {
 512                *p = revbyte(*p);
 513                p++;
 514        }
 515}
 516
 517/* exported for swapping attributes in file header */
 518void perf_event__attr_swap(struct perf_event_attr *attr)
 519{
 520        attr->type              = bswap_32(attr->type);
 521        attr->size              = bswap_32(attr->size);
 522
 523#define bswap_safe(f, n)                                        \
 524        (attr->size > (offsetof(struct perf_event_attr, f) +    \
 525                       sizeof(attr->f) * (n)))
 526#define bswap_field(f, sz)                      \
 527do {                                            \
 528        if (bswap_safe(f, 0))                   \
 529                attr->f = bswap_##sz(attr->f);  \
 530} while(0)
 531#define bswap_field_32(f) bswap_field(f, 32)
 532#define bswap_field_64(f) bswap_field(f, 64)
 533
 534        bswap_field_64(config);
 535        bswap_field_64(sample_period);
 536        bswap_field_64(sample_type);
 537        bswap_field_64(read_format);
 538        bswap_field_32(wakeup_events);
 539        bswap_field_32(bp_type);
 540        bswap_field_64(bp_addr);
 541        bswap_field_64(bp_len);
 542        bswap_field_64(branch_sample_type);
 543        bswap_field_64(sample_regs_user);
 544        bswap_field_32(sample_stack_user);
 545        bswap_field_32(aux_watermark);
 546
 547        /*
 548         * After read_format are bitfields. Check read_format because
 549         * we are unable to use offsetof on bitfield.
 550         */
 551        if (bswap_safe(read_format, 1))
 552                swap_bitfield((u8 *) (&attr->read_format + 1),
 553                              sizeof(u64));
 554#undef bswap_field_64
 555#undef bswap_field_32
 556#undef bswap_field
 557#undef bswap_safe
 558}
 559
 560static void perf_event__hdr_attr_swap(union perf_event *event,
 561                                      bool sample_id_all __maybe_unused)
 562{
 563        size_t size;
 564
 565        perf_event__attr_swap(&event->attr.attr);
 566
 567        size = event->header.size;
 568        size -= (void *)&event->attr.id - (void *)event;
 569        mem_bswap_64(event->attr.id, size);
 570}
 571
 572static void perf_event__event_type_swap(union perf_event *event,
 573                                        bool sample_id_all __maybe_unused)
 574{
 575        event->event_type.event_type.event_id =
 576                bswap_64(event->event_type.event_type.event_id);
 577}
 578
 579static void perf_event__tracing_data_swap(union perf_event *event,
 580                                          bool sample_id_all __maybe_unused)
 581{
 582        event->tracing_data.size = bswap_32(event->tracing_data.size);
 583}
 584
 585static void perf_event__auxtrace_info_swap(union perf_event *event,
 586                                           bool sample_id_all __maybe_unused)
 587{
 588        size_t size;
 589
 590        event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
 591
 592        size = event->header.size;
 593        size -= (void *)&event->auxtrace_info.priv - (void *)event;
 594        mem_bswap_64(event->auxtrace_info.priv, size);
 595}
 596
 597static void perf_event__auxtrace_swap(union perf_event *event,
 598                                      bool sample_id_all __maybe_unused)
 599{
 600        event->auxtrace.size      = bswap_64(event->auxtrace.size);
 601        event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
 602        event->auxtrace.reference = bswap_64(event->auxtrace.reference);
 603        event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
 604        event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
 605        event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
 606}
 607
 608static void perf_event__auxtrace_error_swap(union perf_event *event,
 609                                            bool sample_id_all __maybe_unused)
 610{
 611        event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
 612        event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
 613        event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
 614        event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
 615        event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
 616        event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
 617}
 618
 619typedef void (*perf_event__swap_op)(union perf_event *event,
 620                                    bool sample_id_all);
 621
 622static perf_event__swap_op perf_event__swap_ops[] = {
 623        [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
 624        [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
 625        [PERF_RECORD_COMM]                = perf_event__comm_swap,
 626        [PERF_RECORD_FORK]                = perf_event__task_swap,
 627        [PERF_RECORD_EXIT]                = perf_event__task_swap,
 628        [PERF_RECORD_LOST]                = perf_event__all64_swap,
 629        [PERF_RECORD_READ]                = perf_event__read_swap,
 630        [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
 631        [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
 632        [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
 633        [PERF_RECORD_AUX]                 = perf_event__aux_swap,
 634        [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
 635        [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
 636        [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
 637        [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
 638        [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
 639        [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
 640        [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
 641        [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
 642        [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
 643        [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
 644        [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
 645        [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
 646        [PERF_RECORD_HEADER_MAX]          = NULL,
 647};
 648
 649/*
 650 * When perf record finishes a pass on every buffers, it records this pseudo
 651 * event.
 652 * We record the max timestamp t found in the pass n.
 653 * Assuming these timestamps are monotonic across cpus, we know that if
 654 * a buffer still has events with timestamps below t, they will be all
 655 * available and then read in the pass n + 1.
 656 * Hence when we start to read the pass n + 2, we can safely flush every
 657 * events with timestamps below t.
 658 *
 659 *    ============ PASS n =================
 660 *       CPU 0         |   CPU 1
 661 *                     |
 662 *    cnt1 timestamps  |   cnt2 timestamps
 663 *          1          |         2
 664 *          2          |         3
 665 *          -          |         4  <--- max recorded
 666 *
 667 *    ============ PASS n + 1 ==============
 668 *       CPU 0         |   CPU 1
 669 *                     |
 670 *    cnt1 timestamps  |   cnt2 timestamps
 671 *          3          |         5
 672 *          4          |         6
 673 *          5          |         7 <---- max recorded
 674 *
 675 *      Flush every events below timestamp 4
 676 *
 677 *    ============ PASS n + 2 ==============
 678 *       CPU 0         |   CPU 1
 679 *                     |
 680 *    cnt1 timestamps  |   cnt2 timestamps
 681 *          6          |         8
 682 *          7          |         9
 683 *          -          |         10
 684 *
 685 *      Flush every events below timestamp 7
 686 *      etc...
 687 */
 688static int process_finished_round(struct perf_tool *tool __maybe_unused,
 689                                  union perf_event *event __maybe_unused,
 690                                  struct ordered_events *oe)
 691{
 692        if (dump_trace)
 693                fprintf(stdout, "\n");
 694        return ordered_events__flush(oe, OE_FLUSH__ROUND);
 695}
 696
 697int perf_session__queue_event(struct perf_session *s, union perf_event *event,
 698                              struct perf_sample *sample, u64 file_offset)
 699{
 700        return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
 701}
 702
 703static void callchain__lbr_callstack_printf(struct perf_sample *sample)
 704{
 705        struct ip_callchain *callchain = sample->callchain;
 706        struct branch_stack *lbr_stack = sample->branch_stack;
 707        u64 kernel_callchain_nr = callchain->nr;
 708        unsigned int i;
 709
 710        for (i = 0; i < kernel_callchain_nr; i++) {
 711                if (callchain->ips[i] == PERF_CONTEXT_USER)
 712                        break;
 713        }
 714
 715        if ((i != kernel_callchain_nr) && lbr_stack->nr) {
 716                u64 total_nr;
 717                /*
 718                 * LBR callstack can only get user call chain,
 719                 * i is kernel call chain number,
 720                 * 1 is PERF_CONTEXT_USER.
 721                 *
 722                 * The user call chain is stored in LBR registers.
 723                 * LBR are pair registers. The caller is stored
 724                 * in "from" register, while the callee is stored
 725                 * in "to" register.
 726                 * For example, there is a call stack
 727                 * "A"->"B"->"C"->"D".
 728                 * The LBR registers will recorde like
 729                 * "C"->"D", "B"->"C", "A"->"B".
 730                 * So only the first "to" register and all "from"
 731                 * registers are needed to construct the whole stack.
 732                 */
 733                total_nr = i + 1 + lbr_stack->nr + 1;
 734                kernel_callchain_nr = i + 1;
 735
 736                printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
 737
 738                for (i = 0; i < kernel_callchain_nr; i++)
 739                        printf("..... %2d: %016" PRIx64 "\n",
 740                               i, callchain->ips[i]);
 741
 742                printf("..... %2d: %016" PRIx64 "\n",
 743                       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
 744                for (i = 0; i < lbr_stack->nr; i++)
 745                        printf("..... %2d: %016" PRIx64 "\n",
 746                               (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
 747        }
 748}
 749
 750static void callchain__printf(struct perf_evsel *evsel,
 751                              struct perf_sample *sample)
 752{
 753        unsigned int i;
 754        struct ip_callchain *callchain = sample->callchain;
 755
 756        if (has_branch_callstack(evsel))
 757                callchain__lbr_callstack_printf(sample);
 758
 759        printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
 760
 761        for (i = 0; i < callchain->nr; i++)
 762                printf("..... %2d: %016" PRIx64 "\n",
 763                       i, callchain->ips[i]);
 764}
 765
 766static void branch_stack__printf(struct perf_sample *sample)
 767{
 768        uint64_t i;
 769
 770        printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
 771
 772        for (i = 0; i < sample->branch_stack->nr; i++) {
 773                struct branch_entry *e = &sample->branch_stack->entries[i];
 774
 775                printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
 776                        i, e->from, e->to,
 777                        e->flags.cycles,
 778                        e->flags.mispred ? "M" : " ",
 779                        e->flags.predicted ? "P" : " ",
 780                        e->flags.abort ? "A" : " ",
 781                        e->flags.in_tx ? "T" : " ",
 782                        (unsigned)e->flags.reserved);
 783        }
 784}
 785
 786static void regs_dump__printf(u64 mask, u64 *regs)
 787{
 788        unsigned rid, i = 0;
 789
 790        for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
 791                u64 val = regs[i++];
 792
 793                printf(".... %-5s 0x%" PRIx64 "\n",
 794                       perf_reg_name(rid), val);
 795        }
 796}
 797
 798static const char *regs_abi[] = {
 799        [PERF_SAMPLE_REGS_ABI_NONE] = "none",
 800        [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
 801        [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
 802};
 803
 804static inline const char *regs_dump_abi(struct regs_dump *d)
 805{
 806        if (d->abi > PERF_SAMPLE_REGS_ABI_64)
 807                return "unknown";
 808
 809        return regs_abi[d->abi];
 810}
 811
 812static void regs__printf(const char *type, struct regs_dump *regs)
 813{
 814        u64 mask = regs->mask;
 815
 816        printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
 817               type,
 818               mask,
 819               regs_dump_abi(regs));
 820
 821        regs_dump__printf(mask, regs->regs);
 822}
 823
 824static void regs_user__printf(struct perf_sample *sample)
 825{
 826        struct regs_dump *user_regs = &sample->user_regs;
 827
 828        if (user_regs->regs)
 829                regs__printf("user", user_regs);
 830}
 831
 832static void regs_intr__printf(struct perf_sample *sample)
 833{
 834        struct regs_dump *intr_regs = &sample->intr_regs;
 835
 836        if (intr_regs->regs)
 837                regs__printf("intr", intr_regs);
 838}
 839
 840static void stack_user__printf(struct stack_dump *dump)
 841{
 842        printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
 843               dump->size, dump->offset);
 844}
 845
 846static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
 847                                       union perf_event *event,
 848                                       struct perf_sample *sample)
 849{
 850        u64 sample_type = __perf_evlist__combined_sample_type(evlist);
 851
 852        if (event->header.type != PERF_RECORD_SAMPLE &&
 853            !perf_evlist__sample_id_all(evlist)) {
 854                fputs("-1 -1 ", stdout);
 855                return;
 856        }
 857
 858        if ((sample_type & PERF_SAMPLE_CPU))
 859                printf("%u ", sample->cpu);
 860
 861        if (sample_type & PERF_SAMPLE_TIME)
 862                printf("%" PRIu64 " ", sample->time);
 863}
 864
 865static void sample_read__printf(struct perf_sample *sample, u64 read_format)
 866{
 867        printf("... sample_read:\n");
 868
 869        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 870                printf("...... time enabled %016" PRIx64 "\n",
 871                       sample->read.time_enabled);
 872
 873        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 874                printf("...... time running %016" PRIx64 "\n",
 875                       sample->read.time_running);
 876
 877        if (read_format & PERF_FORMAT_GROUP) {
 878                u64 i;
 879
 880                printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
 881
 882                for (i = 0; i < sample->read.group.nr; i++) {
 883                        struct sample_read_value *value;
 884
 885                        value = &sample->read.group.values[i];
 886                        printf("..... id %016" PRIx64
 887                               ", value %016" PRIx64 "\n",
 888                               value->id, value->value);
 889                }
 890        } else
 891                printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
 892                        sample->read.one.id, sample->read.one.value);
 893}
 894
 895static void dump_event(struct perf_evlist *evlist, union perf_event *event,
 896                       u64 file_offset, struct perf_sample *sample)
 897{
 898        if (!dump_trace)
 899                return;
 900
 901        printf("\n%#" PRIx64 " [%#x]: event: %d\n",
 902               file_offset, event->header.size, event->header.type);
 903
 904        trace_event(event);
 905
 906        if (sample)
 907                perf_evlist__print_tstamp(evlist, event, sample);
 908
 909        printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
 910               event->header.size, perf_event__name(event->header.type));
 911}
 912
 913static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
 914                        struct perf_sample *sample)
 915{
 916        u64 sample_type;
 917
 918        if (!dump_trace)
 919                return;
 920
 921        printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
 922               event->header.misc, sample->pid, sample->tid, sample->ip,
 923               sample->period, sample->addr);
 924
 925        sample_type = evsel->attr.sample_type;
 926
 927        if (sample_type & PERF_SAMPLE_CALLCHAIN)
 928                callchain__printf(evsel, sample);
 929
 930        if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
 931                branch_stack__printf(sample);
 932
 933        if (sample_type & PERF_SAMPLE_REGS_USER)
 934                regs_user__printf(sample);
 935
 936        if (sample_type & PERF_SAMPLE_REGS_INTR)
 937                regs_intr__printf(sample);
 938
 939        if (sample_type & PERF_SAMPLE_STACK_USER)
 940                stack_user__printf(&sample->user_stack);
 941
 942        if (sample_type & PERF_SAMPLE_WEIGHT)
 943                printf("... weight: %" PRIu64 "\n", sample->weight);
 944
 945        if (sample_type & PERF_SAMPLE_DATA_SRC)
 946                printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
 947
 948        if (sample_type & PERF_SAMPLE_TRANSACTION)
 949                printf("... transaction: %" PRIx64 "\n", sample->transaction);
 950
 951        if (sample_type & PERF_SAMPLE_READ)
 952                sample_read__printf(sample, evsel->attr.read_format);
 953}
 954
 955static struct machine *machines__find_for_cpumode(struct machines *machines,
 956                                               union perf_event *event,
 957                                               struct perf_sample *sample)
 958{
 959        const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 960        struct machine *machine;
 961
 962        if (perf_guest &&
 963            ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
 964             (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
 965                u32 pid;
 966
 967                if (event->header.type == PERF_RECORD_MMAP
 968                    || event->header.type == PERF_RECORD_MMAP2)
 969                        pid = event->mmap.pid;
 970                else
 971                        pid = sample->pid;
 972
 973                machine = machines__find(machines, pid);
 974                if (!machine)
 975                        machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
 976                return machine;
 977        }
 978
 979        return &machines->host;
 980}
 981
 982static int deliver_sample_value(struct perf_evlist *evlist,
 983                                struct perf_tool *tool,
 984                                union perf_event *event,
 985                                struct perf_sample *sample,
 986                                struct sample_read_value *v,
 987                                struct machine *machine)
 988{
 989        struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
 990
 991        if (sid) {
 992                sample->id     = v->id;
 993                sample->period = v->value - sid->period;
 994                sid->period    = v->value;
 995        }
 996
 997        if (!sid || sid->evsel == NULL) {
 998                ++evlist->stats.nr_unknown_id;
 999                return 0;
1000        }
1001
1002        return tool->sample(tool, event, sample, sid->evsel, machine);
1003}
1004
1005static int deliver_sample_group(struct perf_evlist *evlist,
1006                                struct perf_tool *tool,
1007                                union  perf_event *event,
1008                                struct perf_sample *sample,
1009                                struct machine *machine)
1010{
1011        int ret = -EINVAL;
1012        u64 i;
1013
1014        for (i = 0; i < sample->read.group.nr; i++) {
1015                ret = deliver_sample_value(evlist, tool, event, sample,
1016                                           &sample->read.group.values[i],
1017                                           machine);
1018                if (ret)
1019                        break;
1020        }
1021
1022        return ret;
1023}
1024
1025static int
1026 perf_evlist__deliver_sample(struct perf_evlist *evlist,
1027                             struct perf_tool *tool,
1028                             union  perf_event *event,
1029                             struct perf_sample *sample,
1030                             struct perf_evsel *evsel,
1031                             struct machine *machine)
1032{
1033        /* We know evsel != NULL. */
1034        u64 sample_type = evsel->attr.sample_type;
1035        u64 read_format = evsel->attr.read_format;
1036
1037        /* Standard sample delievery. */
1038        if (!(sample_type & PERF_SAMPLE_READ))
1039                return tool->sample(tool, event, sample, evsel, machine);
1040
1041        /* For PERF_SAMPLE_READ we have either single or group mode. */
1042        if (read_format & PERF_FORMAT_GROUP)
1043                return deliver_sample_group(evlist, tool, event, sample,
1044                                            machine);
1045        else
1046                return deliver_sample_value(evlist, tool, event, sample,
1047                                            &sample->read.one, machine);
1048}
1049
1050static int machines__deliver_event(struct machines *machines,
1051                                   struct perf_evlist *evlist,
1052                                   union perf_event *event,
1053                                   struct perf_sample *sample,
1054                                   struct perf_tool *tool, u64 file_offset)
1055{
1056        struct perf_evsel *evsel;
1057        struct machine *machine;
1058
1059        dump_event(evlist, event, file_offset, sample);
1060
1061        evsel = perf_evlist__id2evsel(evlist, sample->id);
1062
1063        machine = machines__find_for_cpumode(machines, event, sample);
1064
1065        switch (event->header.type) {
1066        case PERF_RECORD_SAMPLE:
1067                if (evsel == NULL) {
1068                        ++evlist->stats.nr_unknown_id;
1069                        return 0;
1070                }
1071                dump_sample(evsel, event, sample);
1072                if (machine == NULL) {
1073                        ++evlist->stats.nr_unprocessable_samples;
1074                        return 0;
1075                }
1076                return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1077        case PERF_RECORD_MMAP:
1078                return tool->mmap(tool, event, sample, machine);
1079        case PERF_RECORD_MMAP2:
1080                if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1081                        ++evlist->stats.nr_proc_map_timeout;
1082                return tool->mmap2(tool, event, sample, machine);
1083        case PERF_RECORD_COMM:
1084                return tool->comm(tool, event, sample, machine);
1085        case PERF_RECORD_FORK:
1086                return tool->fork(tool, event, sample, machine);
1087        case PERF_RECORD_EXIT:
1088                return tool->exit(tool, event, sample, machine);
1089        case PERF_RECORD_LOST:
1090                if (tool->lost == perf_event__process_lost)
1091                        evlist->stats.total_lost += event->lost.lost;
1092                return tool->lost(tool, event, sample, machine);
1093        case PERF_RECORD_LOST_SAMPLES:
1094                if (tool->lost_samples == perf_event__process_lost_samples)
1095                        evlist->stats.total_lost_samples += event->lost_samples.lost;
1096                return tool->lost_samples(tool, event, sample, machine);
1097        case PERF_RECORD_READ:
1098                return tool->read(tool, event, sample, evsel, machine);
1099        case PERF_RECORD_THROTTLE:
1100                return tool->throttle(tool, event, sample, machine);
1101        case PERF_RECORD_UNTHROTTLE:
1102                return tool->unthrottle(tool, event, sample, machine);
1103        case PERF_RECORD_AUX:
1104                if (tool->aux == perf_event__process_aux &&
1105                    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED))
1106                        evlist->stats.total_aux_lost += 1;
1107                return tool->aux(tool, event, sample, machine);
1108        case PERF_RECORD_ITRACE_START:
1109                return tool->itrace_start(tool, event, sample, machine);
1110        case PERF_RECORD_SWITCH:
1111        case PERF_RECORD_SWITCH_CPU_WIDE:
1112                return tool->context_switch(tool, event, sample, machine);
1113        default:
1114                ++evlist->stats.nr_unknown_events;
1115                return -1;
1116        }
1117}
1118
1119static int perf_session__deliver_event(struct perf_session *session,
1120                                       union perf_event *event,
1121                                       struct perf_sample *sample,
1122                                       struct perf_tool *tool,
1123                                       u64 file_offset)
1124{
1125        int ret;
1126
1127        ret = auxtrace__process_event(session, event, sample, tool);
1128        if (ret < 0)
1129                return ret;
1130        if (ret > 0)
1131                return 0;
1132
1133        return machines__deliver_event(&session->machines, session->evlist,
1134                                       event, sample, tool, file_offset);
1135}
1136
1137static s64 perf_session__process_user_event(struct perf_session *session,
1138                                            union perf_event *event,
1139                                            u64 file_offset)
1140{
1141        struct ordered_events *oe = &session->ordered_events;
1142        struct perf_tool *tool = session->tool;
1143        int fd = perf_data_file__fd(session->file);
1144        int err;
1145
1146        dump_event(session->evlist, event, file_offset, NULL);
1147
1148        /* These events are processed right away */
1149        switch (event->header.type) {
1150        case PERF_RECORD_HEADER_ATTR:
1151                err = tool->attr(tool, event, &session->evlist);
1152                if (err == 0) {
1153                        perf_session__set_id_hdr_size(session);
1154                        perf_session__set_comm_exec(session);
1155                }
1156                return err;
1157        case PERF_RECORD_HEADER_EVENT_TYPE:
1158                /*
1159                 * Depreceated, but we need to handle it for sake
1160                 * of old data files create in pipe mode.
1161                 */
1162                return 0;
1163        case PERF_RECORD_HEADER_TRACING_DATA:
1164                /* setup for reading amidst mmap */
1165                lseek(fd, file_offset, SEEK_SET);
1166                return tool->tracing_data(tool, event, session);
1167        case PERF_RECORD_HEADER_BUILD_ID:
1168                return tool->build_id(tool, event, session);
1169        case PERF_RECORD_FINISHED_ROUND:
1170                return tool->finished_round(tool, event, oe);
1171        case PERF_RECORD_ID_INDEX:
1172                return tool->id_index(tool, event, session);
1173        case PERF_RECORD_AUXTRACE_INFO:
1174                return tool->auxtrace_info(tool, event, session);
1175        case PERF_RECORD_AUXTRACE:
1176                /* setup for reading amidst mmap */
1177                lseek(fd, file_offset + event->header.size, SEEK_SET);
1178                return tool->auxtrace(tool, event, session);
1179        case PERF_RECORD_AUXTRACE_ERROR:
1180                perf_session__auxtrace_error_inc(session, event);
1181                return tool->auxtrace_error(tool, event, session);
1182        default:
1183                return -EINVAL;
1184        }
1185}
1186
1187int perf_session__deliver_synth_event(struct perf_session *session,
1188                                      union perf_event *event,
1189                                      struct perf_sample *sample)
1190{
1191        struct perf_evlist *evlist = session->evlist;
1192        struct perf_tool *tool = session->tool;
1193
1194        events_stats__inc(&evlist->stats, event->header.type);
1195
1196        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1197                return perf_session__process_user_event(session, event, 0);
1198
1199        return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1200}
1201
1202static void event_swap(union perf_event *event, bool sample_id_all)
1203{
1204        perf_event__swap_op swap;
1205
1206        swap = perf_event__swap_ops[event->header.type];
1207        if (swap)
1208                swap(event, sample_id_all);
1209}
1210
1211int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1212                             void *buf, size_t buf_sz,
1213                             union perf_event **event_ptr,
1214                             struct perf_sample *sample)
1215{
1216        union perf_event *event;
1217        size_t hdr_sz, rest;
1218        int fd;
1219
1220        if (session->one_mmap && !session->header.needs_swap) {
1221                event = file_offset - session->one_mmap_offset +
1222                        session->one_mmap_addr;
1223                goto out_parse_sample;
1224        }
1225
1226        if (perf_data_file__is_pipe(session->file))
1227                return -1;
1228
1229        fd = perf_data_file__fd(session->file);
1230        hdr_sz = sizeof(struct perf_event_header);
1231
1232        if (buf_sz < hdr_sz)
1233                return -1;
1234
1235        if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1236            readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1237                return -1;
1238
1239        event = (union perf_event *)buf;
1240
1241        if (session->header.needs_swap)
1242                perf_event_header__bswap(&event->header);
1243
1244        if (event->header.size < hdr_sz || event->header.size > buf_sz)
1245                return -1;
1246
1247        rest = event->header.size - hdr_sz;
1248
1249        if (readn(fd, buf, rest) != (ssize_t)rest)
1250                return -1;
1251
1252        if (session->header.needs_swap)
1253                event_swap(event, perf_evlist__sample_id_all(session->evlist));
1254
1255out_parse_sample:
1256
1257        if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1258            perf_evlist__parse_sample(session->evlist, event, sample))
1259                return -1;
1260
1261        *event_ptr = event;
1262
1263        return 0;
1264}
1265
1266static s64 perf_session__process_event(struct perf_session *session,
1267                                       union perf_event *event, u64 file_offset)
1268{
1269        struct perf_evlist *evlist = session->evlist;
1270        struct perf_tool *tool = session->tool;
1271        struct perf_sample sample;
1272        int ret;
1273
1274        if (session->header.needs_swap)
1275                event_swap(event, perf_evlist__sample_id_all(evlist));
1276
1277        if (event->header.type >= PERF_RECORD_HEADER_MAX)
1278                return -EINVAL;
1279
1280        events_stats__inc(&evlist->stats, event->header.type);
1281
1282        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1283                return perf_session__process_user_event(session, event, file_offset);
1284
1285        /*
1286         * For all kernel events we get the sample data
1287         */
1288        ret = perf_evlist__parse_sample(evlist, event, &sample);
1289        if (ret)
1290                return ret;
1291
1292        if (tool->ordered_events) {
1293                ret = perf_session__queue_event(session, event, &sample, file_offset);
1294                if (ret != -ETIME)
1295                        return ret;
1296        }
1297
1298        return perf_session__deliver_event(session, event, &sample, tool,
1299                                           file_offset);
1300}
1301
1302void perf_event_header__bswap(struct perf_event_header *hdr)
1303{
1304        hdr->type = bswap_32(hdr->type);
1305        hdr->misc = bswap_16(hdr->misc);
1306        hdr->size = bswap_16(hdr->size);
1307}
1308
1309struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1310{
1311        return machine__findnew_thread(&session->machines.host, -1, pid);
1312}
1313
1314struct thread *perf_session__register_idle_thread(struct perf_session *session)
1315{
1316        struct thread *thread;
1317
1318        thread = machine__findnew_thread(&session->machines.host, 0, 0);
1319        if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1320                pr_err("problem inserting idle task.\n");
1321                thread = NULL;
1322        }
1323
1324        return thread;
1325}
1326
1327static void perf_session__warn_about_errors(const struct perf_session *session)
1328{
1329        const struct events_stats *stats = &session->evlist->stats;
1330        const struct ordered_events *oe = &session->ordered_events;
1331
1332        if (session->tool->lost == perf_event__process_lost &&
1333            stats->nr_events[PERF_RECORD_LOST] != 0) {
1334                ui__warning("Processed %d events and lost %d chunks!\n\n"
1335                            "Check IO/CPU overload!\n\n",
1336                            stats->nr_events[0],
1337                            stats->nr_events[PERF_RECORD_LOST]);
1338        }
1339
1340        if (session->tool->lost_samples == perf_event__process_lost_samples) {
1341                double drop_rate;
1342
1343                drop_rate = (double)stats->total_lost_samples /
1344                            (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1345                if (drop_rate > 0.05) {
1346                        ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1347                                    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1348                                    drop_rate * 100.0);
1349                }
1350        }
1351
1352        if (session->tool->aux == perf_event__process_aux &&
1353            stats->total_aux_lost != 0) {
1354                ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1355                            stats->total_aux_lost,
1356                            stats->nr_events[PERF_RECORD_AUX]);
1357        }
1358
1359        if (stats->nr_unknown_events != 0) {
1360                ui__warning("Found %u unknown events!\n\n"
1361                            "Is this an older tool processing a perf.data "
1362                            "file generated by a more recent tool?\n\n"
1363                            "If that is not the case, consider "
1364                            "reporting to linux-kernel@vger.kernel.org.\n\n",
1365                            stats->nr_unknown_events);
1366        }
1367
1368        if (stats->nr_unknown_id != 0) {
1369                ui__warning("%u samples with id not present in the header\n",
1370                            stats->nr_unknown_id);
1371        }
1372
1373        if (stats->nr_invalid_chains != 0) {
1374                ui__warning("Found invalid callchains!\n\n"
1375                            "%u out of %u events were discarded for this reason.\n\n"
1376                            "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1377                            stats->nr_invalid_chains,
1378                            stats->nr_events[PERF_RECORD_SAMPLE]);
1379        }
1380
1381        if (stats->nr_unprocessable_samples != 0) {
1382                ui__warning("%u unprocessable samples recorded.\n"
1383                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
1384                            stats->nr_unprocessable_samples);
1385        }
1386
1387        if (oe->nr_unordered_events != 0)
1388                ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1389
1390        events_stats__auxtrace_error_warn(stats);
1391
1392        if (stats->nr_proc_map_timeout != 0) {
1393                ui__warning("%d map information files for pre-existing threads were\n"
1394                            "not processed, if there are samples for addresses they\n"
1395                            "will not be resolved, you may find out which are these\n"
1396                            "threads by running with -v and redirecting the output\n"
1397                            "to a file.\n"
1398                            "The time limit to process proc map is too short?\n"
1399                            "Increase it by --proc-map-timeout\n",
1400                            stats->nr_proc_map_timeout);
1401        }
1402}
1403
1404static int perf_session__flush_thread_stack(struct thread *thread,
1405                                            void *p __maybe_unused)
1406{
1407        return thread_stack__flush(thread);
1408}
1409
1410static int perf_session__flush_thread_stacks(struct perf_session *session)
1411{
1412        return machines__for_each_thread(&session->machines,
1413                                         perf_session__flush_thread_stack,
1414                                         NULL);
1415}
1416
1417volatile int session_done;
1418
1419static int __perf_session__process_pipe_events(struct perf_session *session)
1420{
1421        struct ordered_events *oe = &session->ordered_events;
1422        struct perf_tool *tool = session->tool;
1423        int fd = perf_data_file__fd(session->file);
1424        union perf_event *event;
1425        uint32_t size, cur_size = 0;
1426        void *buf = NULL;
1427        s64 skip = 0;
1428        u64 head;
1429        ssize_t err;
1430        void *p;
1431
1432        perf_tool__fill_defaults(tool);
1433
1434        head = 0;
1435        cur_size = sizeof(union perf_event);
1436
1437        buf = malloc(cur_size);
1438        if (!buf)
1439                return -errno;
1440more:
1441        event = buf;
1442        err = readn(fd, event, sizeof(struct perf_event_header));
1443        if (err <= 0) {
1444                if (err == 0)
1445                        goto done;
1446
1447                pr_err("failed to read event header\n");
1448                goto out_err;
1449        }
1450
1451        if (session->header.needs_swap)
1452                perf_event_header__bswap(&event->header);
1453
1454        size = event->header.size;
1455        if (size < sizeof(struct perf_event_header)) {
1456                pr_err("bad event header size\n");
1457                goto out_err;
1458        }
1459
1460        if (size > cur_size) {
1461                void *new = realloc(buf, size);
1462                if (!new) {
1463                        pr_err("failed to allocate memory to read event\n");
1464                        goto out_err;
1465                }
1466                buf = new;
1467                cur_size = size;
1468                event = buf;
1469        }
1470        p = event;
1471        p += sizeof(struct perf_event_header);
1472
1473        if (size - sizeof(struct perf_event_header)) {
1474                err = readn(fd, p, size - sizeof(struct perf_event_header));
1475                if (err <= 0) {
1476                        if (err == 0) {
1477                                pr_err("unexpected end of event stream\n");
1478                                goto done;
1479                        }
1480
1481                        pr_err("failed to read event data\n");
1482                        goto out_err;
1483                }
1484        }
1485
1486        if ((skip = perf_session__process_event(session, event, head)) < 0) {
1487                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1488                       head, event->header.size, event->header.type);
1489                err = -EINVAL;
1490                goto out_err;
1491        }
1492
1493        head += size;
1494
1495        if (skip > 0)
1496                head += skip;
1497
1498        if (!session_done())
1499                goto more;
1500done:
1501        /* do the final flush for ordered samples */
1502        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1503        if (err)
1504                goto out_err;
1505        err = auxtrace__flush_events(session, tool);
1506        if (err)
1507                goto out_err;
1508        err = perf_session__flush_thread_stacks(session);
1509out_err:
1510        free(buf);
1511        perf_session__warn_about_errors(session);
1512        ordered_events__free(&session->ordered_events);
1513        auxtrace__free_events(session);
1514        return err;
1515}
1516
1517static union perf_event *
1518fetch_mmaped_event(struct perf_session *session,
1519                   u64 head, size_t mmap_size, char *buf)
1520{
1521        union perf_event *event;
1522
1523        /*
1524         * Ensure we have enough space remaining to read
1525         * the size of the event in the headers.
1526         */
1527        if (head + sizeof(event->header) > mmap_size)
1528                return NULL;
1529
1530        event = (union perf_event *)(buf + head);
1531
1532        if (session->header.needs_swap)
1533                perf_event_header__bswap(&event->header);
1534
1535        if (head + event->header.size > mmap_size) {
1536                /* We're not fetching the event so swap back again */
1537                if (session->header.needs_swap)
1538                        perf_event_header__bswap(&event->header);
1539                return NULL;
1540        }
1541
1542        return event;
1543}
1544
1545/*
1546 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1547 * slices. On 32bit we use 32MB.
1548 */
1549#if BITS_PER_LONG == 64
1550#define MMAP_SIZE ULLONG_MAX
1551#define NUM_MMAPS 1
1552#else
1553#define MMAP_SIZE (32 * 1024 * 1024ULL)
1554#define NUM_MMAPS 128
1555#endif
1556
1557static int __perf_session__process_events(struct perf_session *session,
1558                                          u64 data_offset, u64 data_size,
1559                                          u64 file_size)
1560{
1561        struct ordered_events *oe = &session->ordered_events;
1562        struct perf_tool *tool = session->tool;
1563        int fd = perf_data_file__fd(session->file);
1564        u64 head, page_offset, file_offset, file_pos, size;
1565        int err, mmap_prot, mmap_flags, map_idx = 0;
1566        size_t  mmap_size;
1567        char *buf, *mmaps[NUM_MMAPS];
1568        union perf_event *event;
1569        struct ui_progress prog;
1570        s64 skip;
1571
1572        perf_tool__fill_defaults(tool);
1573
1574        page_offset = page_size * (data_offset / page_size);
1575        file_offset = page_offset;
1576        head = data_offset - page_offset;
1577
1578        if (data_size == 0)
1579                goto out;
1580
1581        if (data_offset + data_size < file_size)
1582                file_size = data_offset + data_size;
1583
1584        ui_progress__init(&prog, file_size, "Processing events...");
1585
1586        mmap_size = MMAP_SIZE;
1587        if (mmap_size > file_size) {
1588                mmap_size = file_size;
1589                session->one_mmap = true;
1590        }
1591
1592        memset(mmaps, 0, sizeof(mmaps));
1593
1594        mmap_prot  = PROT_READ;
1595        mmap_flags = MAP_SHARED;
1596
1597        if (session->header.needs_swap) {
1598                mmap_prot  |= PROT_WRITE;
1599                mmap_flags = MAP_PRIVATE;
1600        }
1601remap:
1602        buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1603                   file_offset);
1604        if (buf == MAP_FAILED) {
1605                pr_err("failed to mmap file\n");
1606                err = -errno;
1607                goto out_err;
1608        }
1609        mmaps[map_idx] = buf;
1610        map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1611        file_pos = file_offset + head;
1612        if (session->one_mmap) {
1613                session->one_mmap_addr = buf;
1614                session->one_mmap_offset = file_offset;
1615        }
1616
1617more:
1618        event = fetch_mmaped_event(session, head, mmap_size, buf);
1619        if (!event) {
1620                if (mmaps[map_idx]) {
1621                        munmap(mmaps[map_idx], mmap_size);
1622                        mmaps[map_idx] = NULL;
1623                }
1624
1625                page_offset = page_size * (head / page_size);
1626                file_offset += page_offset;
1627                head -= page_offset;
1628                goto remap;
1629        }
1630
1631        size = event->header.size;
1632
1633        if (size < sizeof(struct perf_event_header) ||
1634            (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1635                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1636                       file_offset + head, event->header.size,
1637                       event->header.type);
1638                err = -EINVAL;
1639                goto out_err;
1640        }
1641
1642        if (skip)
1643                size += skip;
1644
1645        head += size;
1646        file_pos += size;
1647
1648        ui_progress__update(&prog, size);
1649
1650        if (session_done())
1651                goto out;
1652
1653        if (file_pos < file_size)
1654                goto more;
1655
1656out:
1657        /* do the final flush for ordered samples */
1658        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1659        if (err)
1660                goto out_err;
1661        err = auxtrace__flush_events(session, tool);
1662        if (err)
1663                goto out_err;
1664        err = perf_session__flush_thread_stacks(session);
1665out_err:
1666        ui_progress__finish();
1667        perf_session__warn_about_errors(session);
1668        ordered_events__free(&session->ordered_events);
1669        auxtrace__free_events(session);
1670        session->one_mmap = false;
1671        return err;
1672}
1673
1674int perf_session__process_events(struct perf_session *session)
1675{
1676        u64 size = perf_data_file__size(session->file);
1677        int err;
1678
1679        if (perf_session__register_idle_thread(session) == NULL)
1680                return -ENOMEM;
1681
1682        if (!perf_data_file__is_pipe(session->file))
1683                err = __perf_session__process_events(session,
1684                                                     session->header.data_offset,
1685                                                     session->header.data_size, size);
1686        else
1687                err = __perf_session__process_pipe_events(session);
1688
1689        return err;
1690}
1691
1692bool perf_session__has_traces(struct perf_session *session, const char *msg)
1693{
1694        struct perf_evsel *evsel;
1695
1696        evlist__for_each(session->evlist, evsel) {
1697                if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1698                        return true;
1699        }
1700
1701        pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1702        return false;
1703}
1704
1705int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1706                                     const char *symbol_name, u64 addr)
1707{
1708        char *bracket;
1709        enum map_type i;
1710        struct ref_reloc_sym *ref;
1711
1712        ref = zalloc(sizeof(struct ref_reloc_sym));
1713        if (ref == NULL)
1714                return -ENOMEM;
1715
1716        ref->name = strdup(symbol_name);
1717        if (ref->name == NULL) {
1718                free(ref);
1719                return -ENOMEM;
1720        }
1721
1722        bracket = strchr(ref->name, ']');
1723        if (bracket)
1724                *bracket = '\0';
1725
1726        ref->addr = addr;
1727
1728        for (i = 0; i < MAP__NR_TYPES; ++i) {
1729                struct kmap *kmap = map__kmap(maps[i]);
1730
1731                if (!kmap)
1732                        continue;
1733                kmap->ref_reloc_sym = ref;
1734        }
1735
1736        return 0;
1737}
1738
1739size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1740{
1741        return machines__fprintf_dsos(&session->machines, fp);
1742}
1743
1744size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1745                                          bool (skip)(struct dso *dso, int parm), int parm)
1746{
1747        return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1748}
1749
1750size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1751{
1752        size_t ret;
1753        const char *msg = "";
1754
1755        if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1756                msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1757
1758        ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1759
1760        ret += events_stats__fprintf(&session->evlist->stats, fp);
1761        return ret;
1762}
1763
1764size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1765{
1766        /*
1767         * FIXME: Here we have to actually print all the machines in this
1768         * session, not just the host...
1769         */
1770        return machine__fprintf(&session->machines.host, fp);
1771}
1772
1773struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1774                                              unsigned int type)
1775{
1776        struct perf_evsel *pos;
1777
1778        evlist__for_each(session->evlist, pos) {
1779                if (pos->attr.type == type)
1780                        return pos;
1781        }
1782        return NULL;
1783}
1784
1785void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
1786                          struct addr_location *al,
1787                          unsigned int print_opts, unsigned int stack_depth)
1788{
1789        struct callchain_cursor_node *node;
1790        int print_ip = print_opts & PRINT_IP_OPT_IP;
1791        int print_sym = print_opts & PRINT_IP_OPT_SYM;
1792        int print_dso = print_opts & PRINT_IP_OPT_DSO;
1793        int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
1794        int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
1795        int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
1796        char s = print_oneline ? ' ' : '\t';
1797
1798        if (symbol_conf.use_callchain && sample->callchain) {
1799                struct addr_location node_al;
1800
1801                if (thread__resolve_callchain(al->thread, evsel,
1802                                              sample, NULL, NULL,
1803                                              stack_depth) != 0) {
1804                        if (verbose)
1805                                error("Failed to resolve callchain. Skipping\n");
1806                        return;
1807                }
1808                callchain_cursor_commit(&callchain_cursor);
1809
1810                if (print_symoffset)
1811                        node_al = *al;
1812
1813                while (stack_depth) {
1814                        u64 addr = 0;
1815
1816                        node = callchain_cursor_current(&callchain_cursor);
1817                        if (!node)
1818                                break;
1819
1820                        if (node->sym && node->sym->ignore)
1821                                goto next;
1822
1823                        if (print_ip)
1824                                printf("%c%16" PRIx64, s, node->ip);
1825
1826                        if (node->map)
1827                                addr = node->map->map_ip(node->map, node->ip);
1828
1829                        if (print_sym) {
1830                                printf(" ");
1831                                if (print_symoffset) {
1832                                        node_al.addr = addr;
1833                                        node_al.map  = node->map;
1834                                        symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
1835                                } else
1836                                        symbol__fprintf_symname(node->sym, stdout);
1837                        }
1838
1839                        if (print_dso) {
1840                                printf(" (");
1841                                map__fprintf_dsoname(node->map, stdout);
1842                                printf(")");
1843                        }
1844
1845                        if (print_srcline)
1846                                map__fprintf_srcline(node->map, addr, "\n  ",
1847                                                     stdout);
1848
1849                        if (!print_oneline)
1850                                printf("\n");
1851
1852                        stack_depth--;
1853next:
1854                        callchain_cursor_advance(&callchain_cursor);
1855                }
1856
1857        } else {
1858                if (al->sym && al->sym->ignore)
1859                        return;
1860
1861                if (print_ip)
1862                        printf("%16" PRIx64, sample->ip);
1863
1864                if (print_sym) {
1865                        printf(" ");
1866                        if (print_symoffset)
1867                                symbol__fprintf_symname_offs(al->sym, al,
1868                                                             stdout);
1869                        else
1870                                symbol__fprintf_symname(al->sym, stdout);
1871                }
1872
1873                if (print_dso) {
1874                        printf(" (");
1875                        map__fprintf_dsoname(al->map, stdout);
1876                        printf(")");
1877                }
1878
1879                if (print_srcline)
1880                        map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
1881        }
1882}
1883
1884int perf_session__cpu_bitmap(struct perf_session *session,
1885                             const char *cpu_list, unsigned long *cpu_bitmap)
1886{
1887        int i, err = -1;
1888        struct cpu_map *map;
1889
1890        for (i = 0; i < PERF_TYPE_MAX; ++i) {
1891                struct perf_evsel *evsel;
1892
1893                evsel = perf_session__find_first_evtype(session, i);
1894                if (!evsel)
1895                        continue;
1896
1897                if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1898                        pr_err("File does not contain CPU events. "
1899                               "Remove -c option to proceed.\n");
1900                        return -1;
1901                }
1902        }
1903
1904        map = cpu_map__new(cpu_list);
1905        if (map == NULL) {
1906                pr_err("Invalid cpu_list\n");
1907                return -1;
1908        }
1909
1910        for (i = 0; i < map->nr; i++) {
1911                int cpu = map->map[i];
1912
1913                if (cpu >= MAX_NR_CPUS) {
1914                        pr_err("Requested CPU %d too large. "
1915                               "Consider raising MAX_NR_CPUS\n", cpu);
1916                        goto out_delete_map;
1917                }
1918
1919                set_bit(cpu, cpu_bitmap);
1920        }
1921
1922        err = 0;
1923
1924out_delete_map:
1925        cpu_map__put(map);
1926        return err;
1927}
1928
1929void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1930                                bool full)
1931{
1932        struct stat st;
1933        int fd, ret;
1934
1935        if (session == NULL || fp == NULL)
1936                return;
1937
1938        fd = perf_data_file__fd(session->file);
1939
1940        ret = fstat(fd, &st);
1941        if (ret == -1)
1942                return;
1943
1944        fprintf(fp, "# ========\n");
1945        fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1946        perf_header__fprintf_info(session, fp, full);
1947        fprintf(fp, "# ========\n#\n");
1948}
1949
1950
1951int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1952                                             const struct perf_evsel_str_handler *assocs,
1953                                             size_t nr_assocs)
1954{
1955        struct perf_evsel *evsel;
1956        size_t i;
1957        int err;
1958
1959        for (i = 0; i < nr_assocs; i++) {
1960                /*
1961                 * Adding a handler for an event not in the session,
1962                 * just ignore it.
1963                 */
1964                evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
1965                if (evsel == NULL)
1966                        continue;
1967
1968                err = -EEXIST;
1969                if (evsel->handler != NULL)
1970                        goto out;
1971                evsel->handler = assocs[i].handler;
1972        }
1973
1974        err = 0;
1975out:
1976        return err;
1977}
1978
1979int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
1980                                 union perf_event *event,
1981                                 struct perf_session *session)
1982{
1983        struct perf_evlist *evlist = session->evlist;
1984        struct id_index_event *ie = &event->id_index;
1985        size_t i, nr, max_nr;
1986
1987        max_nr = (ie->header.size - sizeof(struct id_index_event)) /
1988                 sizeof(struct id_index_entry);
1989        nr = ie->nr;
1990        if (nr > max_nr)
1991                return -EINVAL;
1992
1993        if (dump_trace)
1994                fprintf(stdout, " nr: %zu\n", nr);
1995
1996        for (i = 0; i < nr; i++) {
1997                struct id_index_entry *e = &ie->entries[i];
1998                struct perf_sample_id *sid;
1999
2000                if (dump_trace) {
2001                        fprintf(stdout, " ... id: %"PRIu64, e->id);
2002                        fprintf(stdout, "  idx: %"PRIu64, e->idx);
2003                        fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2004                        fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2005                }
2006
2007                sid = perf_evlist__id2sid(evlist, e->id);
2008                if (!sid)
2009                        return -ENOENT;
2010                sid->idx = e->idx;
2011                sid->cpu = e->cpu;
2012                sid->tid = e->tid;
2013        }
2014        return 0;
2015}
2016
2017int perf_event__synthesize_id_index(struct perf_tool *tool,
2018                                    perf_event__handler_t process,
2019                                    struct perf_evlist *evlist,
2020                                    struct machine *machine)
2021{
2022        union perf_event *ev;
2023        struct perf_evsel *evsel;
2024        size_t nr = 0, i = 0, sz, max_nr, n;
2025        int err;
2026
2027        pr_debug2("Synthesizing id index\n");
2028
2029        max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2030                 sizeof(struct id_index_entry);
2031
2032        evlist__for_each(evlist, evsel)
2033                nr += evsel->ids;
2034
2035        n = nr > max_nr ? max_nr : nr;
2036        sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2037        ev = zalloc(sz);
2038        if (!ev)
2039                return -ENOMEM;
2040
2041        ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2042        ev->id_index.header.size = sz;
2043        ev->id_index.nr = n;
2044
2045        evlist__for_each(evlist, evsel) {
2046                u32 j;
2047
2048                for (j = 0; j < evsel->ids; j++) {
2049                        struct id_index_entry *e;
2050                        struct perf_sample_id *sid;
2051
2052                        if (i >= n) {
2053                                err = process(tool, ev, NULL, machine);
2054                                if (err)
2055                                        goto out_err;
2056                                nr -= n;
2057                                i = 0;
2058                        }
2059
2060                        e = &ev->id_index.entries[i++];
2061
2062                        e->id = evsel->id[j];
2063
2064                        sid = perf_evlist__id2sid(evlist, e->id);
2065                        if (!sid) {
2066                                free(ev);
2067                                return -ENOENT;
2068                        }
2069
2070                        e->idx = sid->idx;
2071                        e->cpu = sid->cpu;
2072                        e->tid = sid->tid;
2073                }
2074        }
2075
2076        sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2077        ev->id_index.header.size = sz;
2078        ev->id_index.nr = nr;
2079
2080        err = process(tool, ev, NULL, machine);
2081out_err:
2082        free(ev);
2083
2084        return err;
2085}
2086