linux/tools/perf/util/event.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __PERF_RECORD_H
   3#define __PERF_RECORD_H
   4
   5#include <limits.h>
   6#include <stdio.h>
   7#include <linux/kernel.h>
   8
   9#include "../perf.h"
  10#include "build-id.h"
  11#include "perf_regs.h"
  12
  13struct mmap_event {
  14        struct perf_event_header header;
  15        u32 pid, tid;
  16        u64 start;
  17        u64 len;
  18        u64 pgoff;
  19        char filename[PATH_MAX];
  20};
  21
  22struct mmap2_event {
  23        struct perf_event_header header;
  24        u32 pid, tid;
  25        u64 start;
  26        u64 len;
  27        u64 pgoff;
  28        u32 maj;
  29        u32 min;
  30        u64 ino;
  31        u64 ino_generation;
  32        u32 prot;
  33        u32 flags;
  34        char filename[PATH_MAX];
  35};
  36
  37struct comm_event {
  38        struct perf_event_header header;
  39        u32 pid, tid;
  40        char comm[16];
  41};
  42
  43struct namespaces_event {
  44        struct perf_event_header header;
  45        u32 pid, tid;
  46        u64 nr_namespaces;
  47        struct perf_ns_link_info link_info[];
  48};
  49
  50struct fork_event {
  51        struct perf_event_header header;
  52        u32 pid, ppid;
  53        u32 tid, ptid;
  54        u64 time;
  55};
  56
  57struct lost_event {
  58        struct perf_event_header header;
  59        u64 id;
  60        u64 lost;
  61};
  62
  63struct lost_samples_event {
  64        struct perf_event_header header;
  65        u64 lost;
  66};
  67
  68/*
  69 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  70 */
  71struct read_event {
  72        struct perf_event_header header;
  73        u32 pid, tid;
  74        u64 value;
  75        u64 time_enabled;
  76        u64 time_running;
  77        u64 id;
  78};
  79
  80struct throttle_event {
  81        struct perf_event_header header;
  82        u64 time;
  83        u64 id;
  84        u64 stream_id;
  85};
  86
  87#define PERF_SAMPLE_MASK                                \
  88        (PERF_SAMPLE_IP | PERF_SAMPLE_TID |             \
  89         PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |          \
  90        PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |        \
  91         PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |         \
  92         PERF_SAMPLE_IDENTIFIER)
  93
  94/* perf sample has 16 bits size limit */
  95#define PERF_SAMPLE_MAX_SIZE (1 << 16)
  96
  97struct sample_event {
  98        struct perf_event_header        header;
  99        u64 array[];
 100};
 101
 102struct regs_dump {
 103        u64 abi;
 104        u64 mask;
 105        u64 *regs;
 106
 107        /* Cached values/mask filled by first register access. */
 108        u64 cache_regs[PERF_REGS_MAX];
 109        u64 cache_mask;
 110};
 111
 112struct stack_dump {
 113        u16 offset;
 114        u64 size;
 115        char *data;
 116};
 117
 118struct sample_read_value {
 119        u64 value;
 120        u64 id;
 121};
 122
 123struct sample_read {
 124        u64 time_enabled;
 125        u64 time_running;
 126        union {
 127                struct {
 128                        u64 nr;
 129                        struct sample_read_value *values;
 130                } group;
 131                struct sample_read_value one;
 132        };
 133};
 134
 135struct ip_callchain {
 136        u64 nr;
 137        u64 ips[0];
 138};
 139
 140struct branch_flags {
 141        u64 mispred:1;
 142        u64 predicted:1;
 143        u64 in_tx:1;
 144        u64 abort:1;
 145        u64 cycles:16;
 146        u64 type:4;
 147        u64 reserved:40;
 148};
 149
 150struct branch_entry {
 151        u64                     from;
 152        u64                     to;
 153        struct branch_flags     flags;
 154};
 155
 156struct branch_stack {
 157        u64                     nr;
 158        struct branch_entry     entries[0];
 159};
 160
 161enum {
 162        PERF_IP_FLAG_BRANCH             = 1ULL << 0,
 163        PERF_IP_FLAG_CALL               = 1ULL << 1,
 164        PERF_IP_FLAG_RETURN             = 1ULL << 2,
 165        PERF_IP_FLAG_CONDITIONAL        = 1ULL << 3,
 166        PERF_IP_FLAG_SYSCALLRET         = 1ULL << 4,
 167        PERF_IP_FLAG_ASYNC              = 1ULL << 5,
 168        PERF_IP_FLAG_INTERRUPT          = 1ULL << 6,
 169        PERF_IP_FLAG_TX_ABORT           = 1ULL << 7,
 170        PERF_IP_FLAG_TRACE_BEGIN        = 1ULL << 8,
 171        PERF_IP_FLAG_TRACE_END          = 1ULL << 9,
 172        PERF_IP_FLAG_IN_TX              = 1ULL << 10,
 173};
 174
 175#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
 176
 177#define PERF_BRANCH_MASK                (\
 178        PERF_IP_FLAG_BRANCH             |\
 179        PERF_IP_FLAG_CALL               |\
 180        PERF_IP_FLAG_RETURN             |\
 181        PERF_IP_FLAG_CONDITIONAL        |\
 182        PERF_IP_FLAG_SYSCALLRET         |\
 183        PERF_IP_FLAG_ASYNC              |\
 184        PERF_IP_FLAG_INTERRUPT          |\
 185        PERF_IP_FLAG_TX_ABORT           |\
 186        PERF_IP_FLAG_TRACE_BEGIN        |\
 187        PERF_IP_FLAG_TRACE_END)
 188
 189#define MAX_INSN 16
 190
 191struct perf_sample {
 192        u64 ip;
 193        u32 pid, tid;
 194        u64 time;
 195        u64 addr;
 196        u64 id;
 197        u64 stream_id;
 198        u64 period;
 199        u64 weight;
 200        u64 transaction;
 201        u32 cpu;
 202        u32 raw_size;
 203        u64 data_src;
 204        u64 phys_addr;
 205        u32 flags;
 206        u16 insn_len;
 207        u8  cpumode;
 208        u16 misc;
 209        char insn[MAX_INSN];
 210        void *raw_data;
 211        struct ip_callchain *callchain;
 212        struct branch_stack *branch_stack;
 213        struct regs_dump  user_regs;
 214        struct regs_dump  intr_regs;
 215        struct stack_dump user_stack;
 216        struct sample_read read;
 217};
 218
 219#define PERF_MEM_DATA_SRC_NONE \
 220        (PERF_MEM_S(OP, NA) |\
 221         PERF_MEM_S(LVL, NA) |\
 222         PERF_MEM_S(SNOOP, NA) |\
 223         PERF_MEM_S(LOCK, NA) |\
 224         PERF_MEM_S(TLB, NA))
 225
 226struct build_id_event {
 227        struct perf_event_header header;
 228        pid_t                    pid;
 229        u8                       build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
 230        char                     filename[];
 231};
 232
 233enum perf_user_event_type { /* above any possible kernel type */
 234        PERF_RECORD_USER_TYPE_START             = 64,
 235        PERF_RECORD_HEADER_ATTR                 = 64,
 236        PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* deprecated */
 237        PERF_RECORD_HEADER_TRACING_DATA         = 66,
 238        PERF_RECORD_HEADER_BUILD_ID             = 67,
 239        PERF_RECORD_FINISHED_ROUND              = 68,
 240        PERF_RECORD_ID_INDEX                    = 69,
 241        PERF_RECORD_AUXTRACE_INFO               = 70,
 242        PERF_RECORD_AUXTRACE                    = 71,
 243        PERF_RECORD_AUXTRACE_ERROR              = 72,
 244        PERF_RECORD_THREAD_MAP                  = 73,
 245        PERF_RECORD_CPU_MAP                     = 74,
 246        PERF_RECORD_STAT_CONFIG                 = 75,
 247        PERF_RECORD_STAT                        = 76,
 248        PERF_RECORD_STAT_ROUND                  = 77,
 249        PERF_RECORD_EVENT_UPDATE                = 78,
 250        PERF_RECORD_TIME_CONV                   = 79,
 251        PERF_RECORD_HEADER_FEATURE              = 80,
 252        PERF_RECORD_HEADER_MAX
 253};
 254
 255enum auxtrace_error_type {
 256        PERF_AUXTRACE_ERROR_ITRACE  = 1,
 257        PERF_AUXTRACE_ERROR_MAX
 258};
 259
 260/* Attribute type for custom synthesized events */
 261#define PERF_TYPE_SYNTH         (INT_MAX + 1U)
 262
 263/* Attribute config for custom synthesized events */
 264enum perf_synth_id {
 265        PERF_SYNTH_INTEL_PTWRITE,
 266        PERF_SYNTH_INTEL_MWAIT,
 267        PERF_SYNTH_INTEL_PWRE,
 268        PERF_SYNTH_INTEL_EXSTOP,
 269        PERF_SYNTH_INTEL_PWRX,
 270        PERF_SYNTH_INTEL_CBR,
 271};
 272
 273/*
 274 * Raw data formats for synthesized events. Note that 4 bytes of padding are
 275 * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
 276 * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
 277 * Refer perf_sample__synth_ptr() and perf_synth__raw_data().  It also means the
 278 * structure sizes are 4 bytes bigger than the raw_size, refer
 279 * perf_synth__raw_size().
 280 */
 281
 282struct perf_synth_intel_ptwrite {
 283        u32 padding;
 284        union {
 285                struct {
 286                        u32     ip              :  1,
 287                                reserved        : 31;
 288                };
 289                u32     flags;
 290        };
 291        u64     payload;
 292};
 293
 294struct perf_synth_intel_mwait {
 295        u32 padding;
 296        u32 reserved;
 297        union {
 298                struct {
 299                        u64     hints           :  8,
 300                                reserved1       : 24,
 301                                extensions      :  2,
 302                                reserved2       : 30;
 303                };
 304                u64     payload;
 305        };
 306};
 307
 308struct perf_synth_intel_pwre {
 309        u32 padding;
 310        u32 reserved;
 311        union {
 312                struct {
 313                        u64     reserved1       :  7,
 314                                hw              :  1,
 315                                subcstate       :  4,
 316                                cstate          :  4,
 317                                reserved2       : 48;
 318                };
 319                u64     payload;
 320        };
 321};
 322
 323struct perf_synth_intel_exstop {
 324        u32 padding;
 325        union {
 326                struct {
 327                        u32     ip              :  1,
 328                                reserved        : 31;
 329                };
 330                u32     flags;
 331        };
 332};
 333
 334struct perf_synth_intel_pwrx {
 335        u32 padding;
 336        u32 reserved;
 337        union {
 338                struct {
 339                        u64     deepest_cstate  :  4,
 340                                last_cstate     :  4,
 341                                wake_reason     :  4,
 342                                reserved1       : 52;
 343                };
 344                u64     payload;
 345        };
 346};
 347
 348struct perf_synth_intel_cbr {
 349        u32 padding;
 350        union {
 351                struct {
 352                        u32     cbr             :  8,
 353                                reserved1       :  8,
 354                                max_nonturbo    :  8,
 355                                reserved2       :  8;
 356                };
 357                u32     flags;
 358        };
 359        u32 freq;
 360        u32 reserved3;
 361};
 362
 363/*
 364 * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
 365 * 8-byte alignment.
 366 */
 367static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
 368{
 369        return sample->raw_data - 4;
 370}
 371
 372static inline void *perf_synth__raw_data(void *p)
 373{
 374        return p + 4;
 375}
 376
 377#define perf_synth__raw_size(d) (sizeof(d) - 4)
 378
 379#define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
 380
 381/*
 382 * The kernel collects the number of events it couldn't send in a stretch and
 383 * when possible sends this number in a PERF_RECORD_LOST event. The number of
 384 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
 385 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
 386 * the sum of all struct lost_event.lost fields reported.
 387 *
 388 * The kernel discards mixed up samples and sends the number in a
 389 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
 390 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
 391 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
 392 * all struct lost_samples_event.lost fields reported.
 393 *
 394 * The total_period is needed because by default auto-freq is used, so
 395 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
 396 * the total number of low level events, it is necessary to to sum all struct
 397 * sample_event.period and stash the result in total_period.
 398 */
 399struct events_stats {
 400        u64 total_period;
 401        u64 total_non_filtered_period;
 402        u64 total_lost;
 403        u64 total_lost_samples;
 404        u64 total_aux_lost;
 405        u64 total_aux_partial;
 406        u64 total_invalid_chains;
 407        u32 nr_events[PERF_RECORD_HEADER_MAX];
 408        u32 nr_non_filtered_samples;
 409        u32 nr_lost_warned;
 410        u32 nr_unknown_events;
 411        u32 nr_invalid_chains;
 412        u32 nr_unknown_id;
 413        u32 nr_unprocessable_samples;
 414        u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
 415        u32 nr_proc_map_timeout;
 416};
 417
 418enum {
 419        PERF_CPU_MAP__CPUS = 0,
 420        PERF_CPU_MAP__MASK = 1,
 421};
 422
 423struct cpu_map_entries {
 424        u16     nr;
 425        u16     cpu[];
 426};
 427
 428struct cpu_map_mask {
 429        u16     nr;
 430        u16     long_size;
 431        unsigned long mask[];
 432};
 433
 434struct cpu_map_data {
 435        u16     type;
 436        char    data[];
 437};
 438
 439struct cpu_map_event {
 440        struct perf_event_header        header;
 441        struct cpu_map_data             data;
 442};
 443
 444struct attr_event {
 445        struct perf_event_header header;
 446        struct perf_event_attr attr;
 447        u64 id[];
 448};
 449
 450enum {
 451        PERF_EVENT_UPDATE__UNIT  = 0,
 452        PERF_EVENT_UPDATE__SCALE = 1,
 453        PERF_EVENT_UPDATE__NAME  = 2,
 454        PERF_EVENT_UPDATE__CPUS  = 3,
 455};
 456
 457struct event_update_event_cpus {
 458        struct cpu_map_data cpus;
 459};
 460
 461struct event_update_event_scale {
 462        double scale;
 463};
 464
 465struct event_update_event {
 466        struct perf_event_header header;
 467        u64 type;
 468        u64 id;
 469
 470        char data[];
 471};
 472
 473#define MAX_EVENT_NAME 64
 474
 475struct perf_trace_event_type {
 476        u64     event_id;
 477        char    name[MAX_EVENT_NAME];
 478};
 479
 480struct event_type_event {
 481        struct perf_event_header header;
 482        struct perf_trace_event_type event_type;
 483};
 484
 485struct tracing_data_event {
 486        struct perf_event_header header;
 487        u32 size;
 488};
 489
 490struct id_index_entry {
 491        u64 id;
 492        u64 idx;
 493        u64 cpu;
 494        u64 tid;
 495};
 496
 497struct id_index_event {
 498        struct perf_event_header header;
 499        u64 nr;
 500        struct id_index_entry entries[0];
 501};
 502
 503struct auxtrace_info_event {
 504        struct perf_event_header header;
 505        u32 type;
 506        u32 reserved__; /* For alignment */
 507        u64 priv[];
 508};
 509
 510struct auxtrace_event {
 511        struct perf_event_header header;
 512        u64 size;
 513        u64 offset;
 514        u64 reference;
 515        u32 idx;
 516        u32 tid;
 517        u32 cpu;
 518        u32 reserved__; /* For alignment */
 519};
 520
 521#define MAX_AUXTRACE_ERROR_MSG 64
 522
 523struct auxtrace_error_event {
 524        struct perf_event_header header;
 525        u32 type;
 526        u32 code;
 527        u32 cpu;
 528        u32 pid;
 529        u32 tid;
 530        u32 reserved__; /* For alignment */
 531        u64 ip;
 532        char msg[MAX_AUXTRACE_ERROR_MSG];
 533};
 534
 535struct aux_event {
 536        struct perf_event_header header;
 537        u64     aux_offset;
 538        u64     aux_size;
 539        u64     flags;
 540};
 541
 542struct itrace_start_event {
 543        struct perf_event_header header;
 544        u32 pid, tid;
 545};
 546
 547struct context_switch_event {
 548        struct perf_event_header header;
 549        u32 next_prev_pid;
 550        u32 next_prev_tid;
 551};
 552
 553struct thread_map_event_entry {
 554        u64     pid;
 555        char    comm[16];
 556};
 557
 558struct thread_map_event {
 559        struct perf_event_header        header;
 560        u64                             nr;
 561        struct thread_map_event_entry   entries[];
 562};
 563
 564enum {
 565        PERF_STAT_CONFIG_TERM__AGGR_MODE        = 0,
 566        PERF_STAT_CONFIG_TERM__INTERVAL         = 1,
 567        PERF_STAT_CONFIG_TERM__SCALE            = 2,
 568        PERF_STAT_CONFIG_TERM__MAX              = 3,
 569};
 570
 571struct stat_config_event_entry {
 572        u64     tag;
 573        u64     val;
 574};
 575
 576struct stat_config_event {
 577        struct perf_event_header        header;
 578        u64                             nr;
 579        struct stat_config_event_entry  data[];
 580};
 581
 582struct stat_event {
 583        struct perf_event_header        header;
 584
 585        u64     id;
 586        u32     cpu;
 587        u32     thread;
 588
 589        union {
 590                struct {
 591                        u64 val;
 592                        u64 ena;
 593                        u64 run;
 594                };
 595                u64 values[3];
 596        };
 597};
 598
 599enum {
 600        PERF_STAT_ROUND_TYPE__INTERVAL  = 0,
 601        PERF_STAT_ROUND_TYPE__FINAL     = 1,
 602};
 603
 604struct stat_round_event {
 605        struct perf_event_header        header;
 606        u64                             type;
 607        u64                             time;
 608};
 609
 610struct time_conv_event {
 611        struct perf_event_header header;
 612        u64 time_shift;
 613        u64 time_mult;
 614        u64 time_zero;
 615};
 616
 617struct feature_event {
 618        struct perf_event_header        header;
 619        u64                             feat_id;
 620        char                            data[];
 621};
 622
 623union perf_event {
 624        struct perf_event_header        header;
 625        struct mmap_event               mmap;
 626        struct mmap2_event              mmap2;
 627        struct comm_event               comm;
 628        struct namespaces_event         namespaces;
 629        struct fork_event               fork;
 630        struct lost_event               lost;
 631        struct lost_samples_event       lost_samples;
 632        struct read_event               read;
 633        struct throttle_event           throttle;
 634        struct sample_event             sample;
 635        struct attr_event               attr;
 636        struct event_update_event       event_update;
 637        struct event_type_event         event_type;
 638        struct tracing_data_event       tracing_data;
 639        struct build_id_event           build_id;
 640        struct id_index_event           id_index;
 641        struct auxtrace_info_event      auxtrace_info;
 642        struct auxtrace_event           auxtrace;
 643        struct auxtrace_error_event     auxtrace_error;
 644        struct aux_event                aux;
 645        struct itrace_start_event       itrace_start;
 646        struct context_switch_event     context_switch;
 647        struct thread_map_event         thread_map;
 648        struct cpu_map_event            cpu_map;
 649        struct stat_config_event        stat_config;
 650        struct stat_event               stat;
 651        struct stat_round_event         stat_round;
 652        struct time_conv_event          time_conv;
 653        struct feature_event            feat;
 654};
 655
 656void perf_event__print_totals(void);
 657
 658struct perf_tool;
 659struct thread_map;
 660struct cpu_map;
 661struct perf_stat_config;
 662struct perf_counts_values;
 663
 664typedef int (*perf_event__handler_t)(struct perf_tool *tool,
 665                                     union perf_event *event,
 666                                     struct perf_sample *sample,
 667                                     struct machine *machine);
 668
 669int perf_event__synthesize_thread_map(struct perf_tool *tool,
 670                                      struct thread_map *threads,
 671                                      perf_event__handler_t process,
 672                                      struct machine *machine, bool mmap_data,
 673                                      unsigned int proc_map_timeout);
 674int perf_event__synthesize_thread_map2(struct perf_tool *tool,
 675                                      struct thread_map *threads,
 676                                      perf_event__handler_t process,
 677                                      struct machine *machine);
 678int perf_event__synthesize_cpu_map(struct perf_tool *tool,
 679                                   struct cpu_map *cpus,
 680                                   perf_event__handler_t process,
 681                                   struct machine *machine);
 682int perf_event__synthesize_threads(struct perf_tool *tool,
 683                                   perf_event__handler_t process,
 684                                   struct machine *machine, bool mmap_data,
 685                                   unsigned int proc_map_timeout,
 686                                   unsigned int nr_threads_synthesize);
 687int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 688                                       perf_event__handler_t process,
 689                                       struct machine *machine);
 690int perf_event__synthesize_stat_config(struct perf_tool *tool,
 691                                       struct perf_stat_config *config,
 692                                       perf_event__handler_t process,
 693                                       struct machine *machine);
 694void perf_event__read_stat_config(struct perf_stat_config *config,
 695                                  struct stat_config_event *event);
 696int perf_event__synthesize_stat(struct perf_tool *tool,
 697                                u32 cpu, u32 thread, u64 id,
 698                                struct perf_counts_values *count,
 699                                perf_event__handler_t process,
 700                                struct machine *machine);
 701int perf_event__synthesize_stat_round(struct perf_tool *tool,
 702                                      u64 time, u64 type,
 703                                      perf_event__handler_t process,
 704                                      struct machine *machine);
 705int perf_event__synthesize_modules(struct perf_tool *tool,
 706                                   perf_event__handler_t process,
 707                                   struct machine *machine);
 708
 709int perf_event__process_comm(struct perf_tool *tool,
 710                             union perf_event *event,
 711                             struct perf_sample *sample,
 712                             struct machine *machine);
 713int perf_event__process_lost(struct perf_tool *tool,
 714                             union perf_event *event,
 715                             struct perf_sample *sample,
 716                             struct machine *machine);
 717int perf_event__process_lost_samples(struct perf_tool *tool,
 718                                     union perf_event *event,
 719                                     struct perf_sample *sample,
 720                                     struct machine *machine);
 721int perf_event__process_aux(struct perf_tool *tool,
 722                            union perf_event *event,
 723                            struct perf_sample *sample,
 724                            struct machine *machine);
 725int perf_event__process_itrace_start(struct perf_tool *tool,
 726                                     union perf_event *event,
 727                                     struct perf_sample *sample,
 728                                     struct machine *machine);
 729int perf_event__process_switch(struct perf_tool *tool,
 730                               union perf_event *event,
 731                               struct perf_sample *sample,
 732                               struct machine *machine);
 733int perf_event__process_namespaces(struct perf_tool *tool,
 734                                   union perf_event *event,
 735                                   struct perf_sample *sample,
 736                                   struct machine *machine);
 737int perf_event__process_mmap(struct perf_tool *tool,
 738                             union perf_event *event,
 739                             struct perf_sample *sample,
 740                             struct machine *machine);
 741int perf_event__process_mmap2(struct perf_tool *tool,
 742                             union perf_event *event,
 743                             struct perf_sample *sample,
 744                             struct machine *machine);
 745int perf_event__process_fork(struct perf_tool *tool,
 746                             union perf_event *event,
 747                             struct perf_sample *sample,
 748                             struct machine *machine);
 749int perf_event__process_exit(struct perf_tool *tool,
 750                             union perf_event *event,
 751                             struct perf_sample *sample,
 752                             struct machine *machine);
 753int perf_event__process(struct perf_tool *tool,
 754                        union perf_event *event,
 755                        struct perf_sample *sample,
 756                        struct machine *machine);
 757
 758struct addr_location;
 759
 760int machine__resolve(struct machine *machine, struct addr_location *al,
 761                     struct perf_sample *sample);
 762
 763void addr_location__put(struct addr_location *al);
 764
 765struct thread;
 766
 767bool is_bts_event(struct perf_event_attr *attr);
 768bool sample_addr_correlates_sym(struct perf_event_attr *attr);
 769void thread__resolve(struct thread *thread, struct addr_location *al,
 770                     struct perf_sample *sample);
 771
 772const char *perf_event__name(unsigned int id);
 773
 774size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
 775                                     u64 read_format);
 776int perf_event__synthesize_sample(union perf_event *event, u64 type,
 777                                  u64 read_format,
 778                                  const struct perf_sample *sample);
 779
 780pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 781                                  union perf_event *event, pid_t pid,
 782                                  perf_event__handler_t process,
 783                                  struct machine *machine);
 784
 785int perf_event__synthesize_namespaces(struct perf_tool *tool,
 786                                      union perf_event *event,
 787                                      pid_t pid, pid_t tgid,
 788                                      perf_event__handler_t process,
 789                                      struct machine *machine);
 790
 791int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 792                                       union perf_event *event,
 793                                       pid_t pid, pid_t tgid,
 794                                       perf_event__handler_t process,
 795                                       struct machine *machine,
 796                                       bool mmap_data,
 797                                       unsigned int proc_map_timeout);
 798
 799size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
 800size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
 801size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
 802size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
 803size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
 804size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
 805size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
 806size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
 807size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
 808size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
 809size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 810
 811int kallsyms__get_function_start(const char *kallsyms_filename,
 812                                 const char *symbol_name, u64 *addr);
 813
 814void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
 815void  cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
 816                               u16 type, int max);
 817
 818void event_attr_init(struct perf_event_attr *attr);
 819
 820int perf_event_paranoid(void);
 821
 822extern int sysctl_perf_event_max_stack;
 823extern int sysctl_perf_event_max_contexts_per_stack;
 824
 825#endif /* __PERF_RECORD_H */
 826