linux/tools/perf/util/event.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __PERF_RECORD_H
   3#define __PERF_RECORD_H
   4
   5#include <limits.h>
   6#include <stdio.h>
   7#include <linux/kernel.h>
   8#include <linux/bpf.h>
   9#include <linux/perf_event.h>
  10
  11#include "../perf.h"
  12#include "build-id.h"
  13#include "perf_regs.h"
  14
  15struct mmap_event {
  16        struct perf_event_header header;
  17        u32 pid, tid;
  18        u64 start;
  19        u64 len;
  20        u64 pgoff;
  21        char filename[PATH_MAX];
  22};
  23
  24struct mmap2_event {
  25        struct perf_event_header header;
  26        u32 pid, tid;
  27        u64 start;
  28        u64 len;
  29        u64 pgoff;
  30        u32 maj;
  31        u32 min;
  32        u64 ino;
  33        u64 ino_generation;
  34        u32 prot;
  35        u32 flags;
  36        char filename[PATH_MAX];
  37};
  38
  39struct comm_event {
  40        struct perf_event_header header;
  41        u32 pid, tid;
  42        char comm[16];
  43};
  44
  45struct namespaces_event {
  46        struct perf_event_header header;
  47        u32 pid, tid;
  48        u64 nr_namespaces;
  49        struct perf_ns_link_info link_info[];
  50};
  51
  52struct fork_event {
  53        struct perf_event_header header;
  54        u32 pid, ppid;
  55        u32 tid, ptid;
  56        u64 time;
  57};
  58
  59struct lost_event {
  60        struct perf_event_header header;
  61        u64 id;
  62        u64 lost;
  63};
  64
  65struct lost_samples_event {
  66        struct perf_event_header header;
  67        u64 lost;
  68};
  69
  70/*
  71 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  72 */
  73struct read_event {
  74        struct perf_event_header header;
  75        u32 pid, tid;
  76        u64 value;
  77        u64 time_enabled;
  78        u64 time_running;
  79        u64 id;
  80};
  81
  82struct throttle_event {
  83        struct perf_event_header header;
  84        u64 time;
  85        u64 id;
  86        u64 stream_id;
  87};
  88
  89#ifndef KSYM_NAME_LEN
  90#define KSYM_NAME_LEN 256
  91#endif
  92
  93struct ksymbol_event {
  94        struct perf_event_header header;
  95        u64 addr;
  96        u32 len;
  97        u16 ksym_type;
  98        u16 flags;
  99        char name[KSYM_NAME_LEN];
 100};
 101
 102struct bpf_event {
 103        struct perf_event_header header;
 104        u16 type;
 105        u16 flags;
 106        u32 id;
 107
 108        /* for bpf_prog types */
 109        u8 tag[BPF_TAG_SIZE];  // prog tag
 110};
 111
 112#define PERF_SAMPLE_MASK                                \
 113        (PERF_SAMPLE_IP | PERF_SAMPLE_TID |             \
 114         PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |          \
 115        PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |        \
 116         PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |         \
 117         PERF_SAMPLE_IDENTIFIER)
 118
 119/* perf sample has 16 bits size limit */
 120#define PERF_SAMPLE_MAX_SIZE (1 << 16)
 121
 122struct sample_event {
 123        struct perf_event_header        header;
 124        u64 array[];
 125};
 126
 127struct regs_dump {
 128        u64 abi;
 129        u64 mask;
 130        u64 *regs;
 131
 132        /* Cached values/mask filled by first register access. */
 133        u64 cache_regs[PERF_REGS_MAX];
 134        u64 cache_mask;
 135};
 136
 137struct stack_dump {
 138        u16 offset;
 139        u64 size;
 140        char *data;
 141};
 142
 143struct sample_read_value {
 144        u64 value;
 145        u64 id;
 146};
 147
 148struct sample_read {
 149        u64 time_enabled;
 150        u64 time_running;
 151        union {
 152                struct {
 153                        u64 nr;
 154                        struct sample_read_value *values;
 155                } group;
 156                struct sample_read_value one;
 157        };
 158};
 159
 160struct ip_callchain {
 161        u64 nr;
 162        u64 ips[0];
 163};
 164
 165struct branch_stack;
 166
 167enum {
 168        PERF_IP_FLAG_BRANCH             = 1ULL << 0,
 169        PERF_IP_FLAG_CALL               = 1ULL << 1,
 170        PERF_IP_FLAG_RETURN             = 1ULL << 2,
 171        PERF_IP_FLAG_CONDITIONAL        = 1ULL << 3,
 172        PERF_IP_FLAG_SYSCALLRET         = 1ULL << 4,
 173        PERF_IP_FLAG_ASYNC              = 1ULL << 5,
 174        PERF_IP_FLAG_INTERRUPT          = 1ULL << 6,
 175        PERF_IP_FLAG_TX_ABORT           = 1ULL << 7,
 176        PERF_IP_FLAG_TRACE_BEGIN        = 1ULL << 8,
 177        PERF_IP_FLAG_TRACE_END          = 1ULL << 9,
 178        PERF_IP_FLAG_IN_TX              = 1ULL << 10,
 179};
 180
 181#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
 182
 183#define PERF_BRANCH_MASK                (\
 184        PERF_IP_FLAG_BRANCH             |\
 185        PERF_IP_FLAG_CALL               |\
 186        PERF_IP_FLAG_RETURN             |\
 187        PERF_IP_FLAG_CONDITIONAL        |\
 188        PERF_IP_FLAG_SYSCALLRET         |\
 189        PERF_IP_FLAG_ASYNC              |\
 190        PERF_IP_FLAG_INTERRUPT          |\
 191        PERF_IP_FLAG_TX_ABORT           |\
 192        PERF_IP_FLAG_TRACE_BEGIN        |\
 193        PERF_IP_FLAG_TRACE_END)
 194
 195#define MAX_INSN 16
 196
 197struct perf_sample {
 198        u64 ip;
 199        u32 pid, tid;
 200        u64 time;
 201        u64 addr;
 202        u64 id;
 203        u64 stream_id;
 204        u64 period;
 205        u64 weight;
 206        u64 transaction;
 207        u64 insn_cnt;
 208        u64 cyc_cnt;
 209        u32 cpu;
 210        u32 raw_size;
 211        u64 data_src;
 212        u64 phys_addr;
 213        u32 flags;
 214        u16 insn_len;
 215        u8  cpumode;
 216        u16 misc;
 217        char insn[MAX_INSN];
 218        void *raw_data;
 219        struct ip_callchain *callchain;
 220        struct branch_stack *branch_stack;
 221        struct regs_dump  user_regs;
 222        struct regs_dump  intr_regs;
 223        struct stack_dump user_stack;
 224        struct sample_read read;
 225};
 226
 227#define PERF_MEM_DATA_SRC_NONE \
 228        (PERF_MEM_S(OP, NA) |\
 229         PERF_MEM_S(LVL, NA) |\
 230         PERF_MEM_S(SNOOP, NA) |\
 231         PERF_MEM_S(LOCK, NA) |\
 232         PERF_MEM_S(TLB, NA))
 233
 234struct build_id_event {
 235        struct perf_event_header header;
 236        pid_t                    pid;
 237        u8                       build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
 238        char                     filename[];
 239};
 240
 241enum perf_user_event_type { /* above any possible kernel type */
 242        PERF_RECORD_USER_TYPE_START             = 64,
 243        PERF_RECORD_HEADER_ATTR                 = 64,
 244        PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* deprecated */
 245        PERF_RECORD_HEADER_TRACING_DATA         = 66,
 246        PERF_RECORD_HEADER_BUILD_ID             = 67,
 247        PERF_RECORD_FINISHED_ROUND              = 68,
 248        PERF_RECORD_ID_INDEX                    = 69,
 249        PERF_RECORD_AUXTRACE_INFO               = 70,
 250        PERF_RECORD_AUXTRACE                    = 71,
 251        PERF_RECORD_AUXTRACE_ERROR              = 72,
 252        PERF_RECORD_THREAD_MAP                  = 73,
 253        PERF_RECORD_CPU_MAP                     = 74,
 254        PERF_RECORD_STAT_CONFIG                 = 75,
 255        PERF_RECORD_STAT                        = 76,
 256        PERF_RECORD_STAT_ROUND                  = 77,
 257        PERF_RECORD_EVENT_UPDATE                = 78,
 258        PERF_RECORD_TIME_CONV                   = 79,
 259        PERF_RECORD_HEADER_FEATURE              = 80,
 260        PERF_RECORD_COMPRESSED                  = 81,
 261        PERF_RECORD_HEADER_MAX
 262};
 263
 264enum auxtrace_error_type {
 265        PERF_AUXTRACE_ERROR_ITRACE  = 1,
 266        PERF_AUXTRACE_ERROR_MAX
 267};
 268
 269/* Attribute type for custom synthesized events */
 270#define PERF_TYPE_SYNTH         (INT_MAX + 1U)
 271
 272/* Attribute config for custom synthesized events */
 273enum perf_synth_id {
 274        PERF_SYNTH_INTEL_PTWRITE,
 275        PERF_SYNTH_INTEL_MWAIT,
 276        PERF_SYNTH_INTEL_PWRE,
 277        PERF_SYNTH_INTEL_EXSTOP,
 278        PERF_SYNTH_INTEL_PWRX,
 279        PERF_SYNTH_INTEL_CBR,
 280};
 281
 282/*
 283 * Raw data formats for synthesized events. Note that 4 bytes of padding are
 284 * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
 285 * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
 286 * Refer perf_sample__synth_ptr() and perf_synth__raw_data().  It also means the
 287 * structure sizes are 4 bytes bigger than the raw_size, refer
 288 * perf_synth__raw_size().
 289 */
 290
 291struct perf_synth_intel_ptwrite {
 292        u32 padding;
 293        union {
 294                struct {
 295                        u32     ip              :  1,
 296                                reserved        : 31;
 297                };
 298                u32     flags;
 299        };
 300        u64     payload;
 301};
 302
 303struct perf_synth_intel_mwait {
 304        u32 padding;
 305        u32 reserved;
 306        union {
 307                struct {
 308                        u64     hints           :  8,
 309                                reserved1       : 24,
 310                                extensions      :  2,
 311                                reserved2       : 30;
 312                };
 313                u64     payload;
 314        };
 315};
 316
 317struct perf_synth_intel_pwre {
 318        u32 padding;
 319        u32 reserved;
 320        union {
 321                struct {
 322                        u64     reserved1       :  7,
 323                                hw              :  1,
 324                                subcstate       :  4,
 325                                cstate          :  4,
 326                                reserved2       : 48;
 327                };
 328                u64     payload;
 329        };
 330};
 331
 332struct perf_synth_intel_exstop {
 333        u32 padding;
 334        union {
 335                struct {
 336                        u32     ip              :  1,
 337                                reserved        : 31;
 338                };
 339                u32     flags;
 340        };
 341};
 342
 343struct perf_synth_intel_pwrx {
 344        u32 padding;
 345        u32 reserved;
 346        union {
 347                struct {
 348                        u64     deepest_cstate  :  4,
 349                                last_cstate     :  4,
 350                                wake_reason     :  4,
 351                                reserved1       : 52;
 352                };
 353                u64     payload;
 354        };
 355};
 356
 357struct perf_synth_intel_cbr {
 358        u32 padding;
 359        union {
 360                struct {
 361                        u32     cbr             :  8,
 362                                reserved1       :  8,
 363                                max_nonturbo    :  8,
 364                                reserved2       :  8;
 365                };
 366                u32     flags;
 367        };
 368        u32 freq;
 369        u32 reserved3;
 370};
 371
 372/*
 373 * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
 374 * 8-byte alignment.
 375 */
 376static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
 377{
 378        return sample->raw_data - 4;
 379}
 380
 381static inline void *perf_synth__raw_data(void *p)
 382{
 383        return p + 4;
 384}
 385
 386#define perf_synth__raw_size(d) (sizeof(d) - 4)
 387
 388#define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
 389
 390/*
 391 * The kernel collects the number of events it couldn't send in a stretch and
 392 * when possible sends this number in a PERF_RECORD_LOST event. The number of
 393 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
 394 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
 395 * the sum of all struct lost_event.lost fields reported.
 396 *
 397 * The kernel discards mixed up samples and sends the number in a
 398 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
 399 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
 400 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
 401 * all struct lost_samples_event.lost fields reported.
 402 *
 403 * The total_period is needed because by default auto-freq is used, so
 404 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
 405 * the total number of low level events, it is necessary to to sum all struct
 406 * sample_event.period and stash the result in total_period.
 407 */
 408struct events_stats {
 409        u64 total_period;
 410        u64 total_non_filtered_period;
 411        u64 total_lost;
 412        u64 total_lost_samples;
 413        u64 total_aux_lost;
 414        u64 total_aux_partial;
 415        u64 total_invalid_chains;
 416        u32 nr_events[PERF_RECORD_HEADER_MAX];
 417        u32 nr_non_filtered_samples;
 418        u32 nr_lost_warned;
 419        u32 nr_unknown_events;
 420        u32 nr_invalid_chains;
 421        u32 nr_unknown_id;
 422        u32 nr_unprocessable_samples;
 423        u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
 424        u32 nr_proc_map_timeout;
 425};
 426
 427enum {
 428        PERF_CPU_MAP__CPUS = 0,
 429        PERF_CPU_MAP__MASK = 1,
 430};
 431
 432struct cpu_map_entries {
 433        u16     nr;
 434        u16     cpu[];
 435};
 436
 437struct cpu_map_mask {
 438        u16     nr;
 439        u16     long_size;
 440        unsigned long mask[];
 441};
 442
 443struct cpu_map_data {
 444        u16     type;
 445        char    data[];
 446};
 447
 448struct cpu_map_event {
 449        struct perf_event_header        header;
 450        struct cpu_map_data             data;
 451};
 452
 453struct attr_event {
 454        struct perf_event_header header;
 455        struct perf_event_attr attr;
 456        u64 id[];
 457};
 458
 459enum {
 460        PERF_EVENT_UPDATE__UNIT  = 0,
 461        PERF_EVENT_UPDATE__SCALE = 1,
 462        PERF_EVENT_UPDATE__NAME  = 2,
 463        PERF_EVENT_UPDATE__CPUS  = 3,
 464};
 465
 466struct event_update_event_cpus {
 467        struct cpu_map_data cpus;
 468};
 469
 470struct event_update_event_scale {
 471        double scale;
 472};
 473
 474struct event_update_event {
 475        struct perf_event_header header;
 476        u64 type;
 477        u64 id;
 478
 479        char data[];
 480};
 481
 482#define MAX_EVENT_NAME 64
 483
 484struct perf_trace_event_type {
 485        u64     event_id;
 486        char    name[MAX_EVENT_NAME];
 487};
 488
 489struct event_type_event {
 490        struct perf_event_header header;
 491        struct perf_trace_event_type event_type;
 492};
 493
 494struct tracing_data_event {
 495        struct perf_event_header header;
 496        u32 size;
 497};
 498
 499struct id_index_entry {
 500        u64 id;
 501        u64 idx;
 502        u64 cpu;
 503        u64 tid;
 504};
 505
 506struct id_index_event {
 507        struct perf_event_header header;
 508        u64 nr;
 509        struct id_index_entry entries[0];
 510};
 511
 512struct auxtrace_info_event {
 513        struct perf_event_header header;
 514        u32 type;
 515        u32 reserved__; /* For alignment */
 516        u64 priv[];
 517};
 518
 519struct auxtrace_event {
 520        struct perf_event_header header;
 521        u64 size;
 522        u64 offset;
 523        u64 reference;
 524        u32 idx;
 525        u32 tid;
 526        u32 cpu;
 527        u32 reserved__; /* For alignment */
 528};
 529
 530#define MAX_AUXTRACE_ERROR_MSG 64
 531
 532struct auxtrace_error_event {
 533        struct perf_event_header header;
 534        u32 type;
 535        u32 code;
 536        u32 cpu;
 537        u32 pid;
 538        u32 tid;
 539        u32 fmt;
 540        u64 ip;
 541        u64 time;
 542        char msg[MAX_AUXTRACE_ERROR_MSG];
 543};
 544
 545struct aux_event {
 546        struct perf_event_header header;
 547        u64     aux_offset;
 548        u64     aux_size;
 549        u64     flags;
 550};
 551
 552struct itrace_start_event {
 553        struct perf_event_header header;
 554        u32 pid, tid;
 555};
 556
 557struct context_switch_event {
 558        struct perf_event_header header;
 559        u32 next_prev_pid;
 560        u32 next_prev_tid;
 561};
 562
 563struct thread_map_event_entry {
 564        u64     pid;
 565        char    comm[16];
 566};
 567
 568struct thread_map_event {
 569        struct perf_event_header        header;
 570        u64                             nr;
 571        struct thread_map_event_entry   entries[];
 572};
 573
 574enum {
 575        PERF_STAT_CONFIG_TERM__AGGR_MODE        = 0,
 576        PERF_STAT_CONFIG_TERM__INTERVAL         = 1,
 577        PERF_STAT_CONFIG_TERM__SCALE            = 2,
 578        PERF_STAT_CONFIG_TERM__MAX              = 3,
 579};
 580
 581struct stat_config_event_entry {
 582        u64     tag;
 583        u64     val;
 584};
 585
 586struct stat_config_event {
 587        struct perf_event_header        header;
 588        u64                             nr;
 589        struct stat_config_event_entry  data[];
 590};
 591
 592struct stat_event {
 593        struct perf_event_header        header;
 594
 595        u64     id;
 596        u32     cpu;
 597        u32     thread;
 598
 599        union {
 600                struct {
 601                        u64 val;
 602                        u64 ena;
 603                        u64 run;
 604                };
 605                u64 values[3];
 606        };
 607};
 608
 609enum {
 610        PERF_STAT_ROUND_TYPE__INTERVAL  = 0,
 611        PERF_STAT_ROUND_TYPE__FINAL     = 1,
 612};
 613
 614struct stat_round_event {
 615        struct perf_event_header        header;
 616        u64                             type;
 617        u64                             time;
 618};
 619
 620struct time_conv_event {
 621        struct perf_event_header header;
 622        u64 time_shift;
 623        u64 time_mult;
 624        u64 time_zero;
 625};
 626
 627struct feature_event {
 628        struct perf_event_header        header;
 629        u64                             feat_id;
 630        char                            data[];
 631};
 632
 633struct compressed_event {
 634        struct perf_event_header        header;
 635        char                            data[];
 636};
 637
 638union perf_event {
 639        struct perf_event_header        header;
 640        struct mmap_event               mmap;
 641        struct mmap2_event              mmap2;
 642        struct comm_event               comm;
 643        struct namespaces_event         namespaces;
 644        struct fork_event               fork;
 645        struct lost_event               lost;
 646        struct lost_samples_event       lost_samples;
 647        struct read_event               read;
 648        struct throttle_event           throttle;
 649        struct sample_event             sample;
 650        struct attr_event               attr;
 651        struct event_update_event       event_update;
 652        struct event_type_event         event_type;
 653        struct tracing_data_event       tracing_data;
 654        struct build_id_event           build_id;
 655        struct id_index_event           id_index;
 656        struct auxtrace_info_event      auxtrace_info;
 657        struct auxtrace_event           auxtrace;
 658        struct auxtrace_error_event     auxtrace_error;
 659        struct aux_event                aux;
 660        struct itrace_start_event       itrace_start;
 661        struct context_switch_event     context_switch;
 662        struct thread_map_event         thread_map;
 663        struct cpu_map_event            cpu_map;
 664        struct stat_config_event        stat_config;
 665        struct stat_event               stat;
 666        struct stat_round_event         stat_round;
 667        struct time_conv_event          time_conv;
 668        struct feature_event            feat;
 669        struct ksymbol_event            ksymbol_event;
 670        struct bpf_event                bpf_event;
 671        struct compressed_event         pack;
 672};
 673
 674void perf_event__print_totals(void);
 675
 676struct perf_tool;
 677struct thread_map;
 678struct cpu_map;
 679struct perf_stat_config;
 680struct perf_counts_values;
 681
 682typedef int (*perf_event__handler_t)(struct perf_tool *tool,
 683                                     union perf_event *event,
 684                                     struct perf_sample *sample,
 685                                     struct machine *machine);
 686
 687int perf_event__synthesize_thread_map(struct perf_tool *tool,
 688                                      struct thread_map *threads,
 689                                      perf_event__handler_t process,
 690                                      struct machine *machine, bool mmap_data);
 691int perf_event__synthesize_thread_map2(struct perf_tool *tool,
 692                                      struct thread_map *threads,
 693                                      perf_event__handler_t process,
 694                                      struct machine *machine);
 695int perf_event__synthesize_cpu_map(struct perf_tool *tool,
 696                                   struct cpu_map *cpus,
 697                                   perf_event__handler_t process,
 698                                   struct machine *machine);
 699int perf_event__synthesize_threads(struct perf_tool *tool,
 700                                   perf_event__handler_t process,
 701                                   struct machine *machine, bool mmap_data,
 702                                   unsigned int nr_threads_synthesize);
 703int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 704                                       perf_event__handler_t process,
 705                                       struct machine *machine);
 706int perf_event__synthesize_stat_config(struct perf_tool *tool,
 707                                       struct perf_stat_config *config,
 708                                       perf_event__handler_t process,
 709                                       struct machine *machine);
 710void perf_event__read_stat_config(struct perf_stat_config *config,
 711                                  struct stat_config_event *event);
 712int perf_event__synthesize_stat(struct perf_tool *tool,
 713                                u32 cpu, u32 thread, u64 id,
 714                                struct perf_counts_values *count,
 715                                perf_event__handler_t process,
 716                                struct machine *machine);
 717int perf_event__synthesize_stat_round(struct perf_tool *tool,
 718                                      u64 time, u64 type,
 719                                      perf_event__handler_t process,
 720                                      struct machine *machine);
 721int perf_event__synthesize_modules(struct perf_tool *tool,
 722                                   perf_event__handler_t process,
 723                                   struct machine *machine);
 724
 725int perf_event__process_comm(struct perf_tool *tool,
 726                             union perf_event *event,
 727                             struct perf_sample *sample,
 728                             struct machine *machine);
 729int perf_event__process_lost(struct perf_tool *tool,
 730                             union perf_event *event,
 731                             struct perf_sample *sample,
 732                             struct machine *machine);
 733int perf_event__process_lost_samples(struct perf_tool *tool,
 734                                     union perf_event *event,
 735                                     struct perf_sample *sample,
 736                                     struct machine *machine);
 737int perf_event__process_aux(struct perf_tool *tool,
 738                            union perf_event *event,
 739                            struct perf_sample *sample,
 740                            struct machine *machine);
 741int perf_event__process_itrace_start(struct perf_tool *tool,
 742                                     union perf_event *event,
 743                                     struct perf_sample *sample,
 744                                     struct machine *machine);
 745int perf_event__process_switch(struct perf_tool *tool,
 746                               union perf_event *event,
 747                               struct perf_sample *sample,
 748                               struct machine *machine);
 749int perf_event__process_namespaces(struct perf_tool *tool,
 750                                   union perf_event *event,
 751                                   struct perf_sample *sample,
 752                                   struct machine *machine);
 753int perf_event__process_mmap(struct perf_tool *tool,
 754                             union perf_event *event,
 755                             struct perf_sample *sample,
 756                             struct machine *machine);
 757int perf_event__process_mmap2(struct perf_tool *tool,
 758                             union perf_event *event,
 759                             struct perf_sample *sample,
 760                             struct machine *machine);
 761int perf_event__process_fork(struct perf_tool *tool,
 762                             union perf_event *event,
 763                             struct perf_sample *sample,
 764                             struct machine *machine);
 765int perf_event__process_exit(struct perf_tool *tool,
 766                             union perf_event *event,
 767                             struct perf_sample *sample,
 768                             struct machine *machine);
 769int perf_event__process_ksymbol(struct perf_tool *tool,
 770                                union perf_event *event,
 771                                struct perf_sample *sample,
 772                                struct machine *machine);
 773int perf_event__process_bpf_event(struct perf_tool *tool,
 774                                  union perf_event *event,
 775                                  struct perf_sample *sample,
 776                                  struct machine *machine);
 777int perf_tool__process_synth_event(struct perf_tool *tool,
 778                                   union perf_event *event,
 779                                   struct machine *machine,
 780                                   perf_event__handler_t process);
 781int perf_event__process(struct perf_tool *tool,
 782                        union perf_event *event,
 783                        struct perf_sample *sample,
 784                        struct machine *machine);
 785
 786struct addr_location;
 787
 788int machine__resolve(struct machine *machine, struct addr_location *al,
 789                     struct perf_sample *sample);
 790
 791void addr_location__put(struct addr_location *al);
 792
 793struct thread;
 794
 795bool is_bts_event(struct perf_event_attr *attr);
 796bool sample_addr_correlates_sym(struct perf_event_attr *attr);
 797void thread__resolve(struct thread *thread, struct addr_location *al,
 798                     struct perf_sample *sample);
 799
 800const char *perf_event__name(unsigned int id);
 801
 802size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
 803                                     u64 read_format);
 804int perf_event__synthesize_sample(union perf_event *event, u64 type,
 805                                  u64 read_format,
 806                                  const struct perf_sample *sample);
 807
 808pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 809                                  union perf_event *event, pid_t pid,
 810                                  perf_event__handler_t process,
 811                                  struct machine *machine);
 812
 813int perf_event__synthesize_namespaces(struct perf_tool *tool,
 814                                      union perf_event *event,
 815                                      pid_t pid, pid_t tgid,
 816                                      perf_event__handler_t process,
 817                                      struct machine *machine);
 818
 819int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 820                                       union perf_event *event,
 821                                       pid_t pid, pid_t tgid,
 822                                       perf_event__handler_t process,
 823                                       struct machine *machine,
 824                                       bool mmap_data);
 825
 826int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
 827                                       perf_event__handler_t process,
 828                                       struct machine *machine);
 829
 830size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
 831size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
 832size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
 833size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
 834size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
 835size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
 836size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
 837size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
 838size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
 839size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
 840size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp);
 841size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp);
 842size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 843
 844int kallsyms__get_function_start(const char *kallsyms_filename,
 845                                 const char *symbol_name, u64 *addr);
 846
 847void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
 848void  cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
 849                               u16 type, int max);
 850
 851void event_attr_init(struct perf_event_attr *attr);
 852
 853int perf_event_paranoid(void);
 854
 855extern int sysctl_perf_event_max_stack;
 856extern int sysctl_perf_event_max_contexts_per_stack;
 857extern unsigned int proc_map_timeout;
 858
 859#endif /* __PERF_RECORD_H */
 860