linux/tools/perf/util/event.h
<<
>>
Prefs
   1#ifndef __PERF_RECORD_H
   2#define __PERF_RECORD_H
   3
   4#include <limits.h>
   5#include <stdio.h>
   6
   7#include "../perf.h"
   8#include "map.h"
   9#include "build-id.h"
  10#include "perf_regs.h"
  11
  12struct mmap_event {
  13        struct perf_event_header header;
  14        u32 pid, tid;
  15        u64 start;
  16        u64 len;
  17        u64 pgoff;
  18        char filename[PATH_MAX];
  19};
  20
  21struct mmap2_event {
  22        struct perf_event_header header;
  23        u32 pid, tid;
  24        u64 start;
  25        u64 len;
  26        u64 pgoff;
  27        u32 maj;
  28        u32 min;
  29        u64 ino;
  30        u64 ino_generation;
  31        u32 prot;
  32        u32 flags;
  33        char filename[PATH_MAX];
  34};
  35
  36struct comm_event {
  37        struct perf_event_header header;
  38        u32 pid, tid;
  39        char comm[16];
  40};
  41
  42struct fork_event {
  43        struct perf_event_header header;
  44        u32 pid, ppid;
  45        u32 tid, ptid;
  46        u64 time;
  47};
  48
  49struct lost_event {
  50        struct perf_event_header header;
  51        u64 id;
  52        u64 lost;
  53};
  54
  55struct lost_samples_event {
  56        struct perf_event_header header;
  57        u64 lost;
  58};
  59
  60/*
  61 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  62 */
  63struct read_event {
  64        struct perf_event_header header;
  65        u32 pid, tid;
  66        u64 value;
  67        u64 time_enabled;
  68        u64 time_running;
  69        u64 id;
  70};
  71
  72struct throttle_event {
  73        struct perf_event_header header;
  74        u64 time;
  75        u64 id;
  76        u64 stream_id;
  77};
  78
  79#define PERF_SAMPLE_MASK                                \
  80        (PERF_SAMPLE_IP | PERF_SAMPLE_TID |             \
  81         PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |          \
  82        PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |        \
  83         PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |         \
  84         PERF_SAMPLE_IDENTIFIER)
  85
  86/* perf sample has 16 bits size limit */
  87#define PERF_SAMPLE_MAX_SIZE (1 << 16)
  88
  89struct sample_event {
  90        struct perf_event_header        header;
  91        u64 array[];
  92};
  93
  94struct regs_dump {
  95        u64 abi;
  96        u64 mask;
  97        u64 *regs;
  98
  99        /* Cached values/mask filled by first register access. */
 100        u64 cache_regs[PERF_REGS_MAX];
 101        u64 cache_mask;
 102};
 103
 104struct stack_dump {
 105        u16 offset;
 106        u64 size;
 107        char *data;
 108};
 109
 110struct sample_read_value {
 111        u64 value;
 112        u64 id;
 113};
 114
 115struct sample_read {
 116        u64 time_enabled;
 117        u64 time_running;
 118        union {
 119                struct {
 120                        u64 nr;
 121                        struct sample_read_value *values;
 122                } group;
 123                struct sample_read_value one;
 124        };
 125};
 126
 127struct ip_callchain {
 128        u64 nr;
 129        u64 ips[0];
 130};
 131
 132struct branch_flags {
 133        u64 mispred:1;
 134        u64 predicted:1;
 135        u64 in_tx:1;
 136        u64 abort:1;
 137        u64 cycles:16;
 138        u64 reserved:44;
 139};
 140
 141struct branch_entry {
 142        u64                     from;
 143        u64                     to;
 144        struct branch_flags     flags;
 145};
 146
 147struct branch_stack {
 148        u64                     nr;
 149        struct branch_entry     entries[0];
 150};
 151
 152enum {
 153        PERF_IP_FLAG_BRANCH             = 1ULL << 0,
 154        PERF_IP_FLAG_CALL               = 1ULL << 1,
 155        PERF_IP_FLAG_RETURN             = 1ULL << 2,
 156        PERF_IP_FLAG_CONDITIONAL        = 1ULL << 3,
 157        PERF_IP_FLAG_SYSCALLRET         = 1ULL << 4,
 158        PERF_IP_FLAG_ASYNC              = 1ULL << 5,
 159        PERF_IP_FLAG_INTERRUPT          = 1ULL << 6,
 160        PERF_IP_FLAG_TX_ABORT           = 1ULL << 7,
 161        PERF_IP_FLAG_TRACE_BEGIN        = 1ULL << 8,
 162        PERF_IP_FLAG_TRACE_END          = 1ULL << 9,
 163        PERF_IP_FLAG_IN_TX              = 1ULL << 10,
 164};
 165
 166#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
 167
 168#define PERF_BRANCH_MASK                (\
 169        PERF_IP_FLAG_BRANCH             |\
 170        PERF_IP_FLAG_CALL               |\
 171        PERF_IP_FLAG_RETURN             |\
 172        PERF_IP_FLAG_CONDITIONAL        |\
 173        PERF_IP_FLAG_SYSCALLRET         |\
 174        PERF_IP_FLAG_ASYNC              |\
 175        PERF_IP_FLAG_INTERRUPT          |\
 176        PERF_IP_FLAG_TX_ABORT           |\
 177        PERF_IP_FLAG_TRACE_BEGIN        |\
 178        PERF_IP_FLAG_TRACE_END)
 179
 180struct perf_sample {
 181        u64 ip;
 182        u32 pid, tid;
 183        u64 time;
 184        u64 addr;
 185        u64 id;
 186        u64 stream_id;
 187        u64 period;
 188        u64 weight;
 189        u64 transaction;
 190        u32 cpu;
 191        u32 raw_size;
 192        u64 data_src;
 193        u32 flags;
 194        u16 insn_len;
 195        u8  cpumode;
 196        void *raw_data;
 197        struct ip_callchain *callchain;
 198        struct branch_stack *branch_stack;
 199        struct regs_dump  user_regs;
 200        struct regs_dump  intr_regs;
 201        struct stack_dump user_stack;
 202        struct sample_read read;
 203};
 204
 205#define PERF_MEM_DATA_SRC_NONE \
 206        (PERF_MEM_S(OP, NA) |\
 207         PERF_MEM_S(LVL, NA) |\
 208         PERF_MEM_S(SNOOP, NA) |\
 209         PERF_MEM_S(LOCK, NA) |\
 210         PERF_MEM_S(TLB, NA))
 211
 212struct build_id_event {
 213        struct perf_event_header header;
 214        pid_t                    pid;
 215        u8                       build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
 216        char                     filename[];
 217};
 218
 219enum perf_user_event_type { /* above any possible kernel type */
 220        PERF_RECORD_USER_TYPE_START             = 64,
 221        PERF_RECORD_HEADER_ATTR                 = 64,
 222        PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* depreceated */
 223        PERF_RECORD_HEADER_TRACING_DATA         = 66,
 224        PERF_RECORD_HEADER_BUILD_ID             = 67,
 225        PERF_RECORD_FINISHED_ROUND              = 68,
 226        PERF_RECORD_ID_INDEX                    = 69,
 227        PERF_RECORD_AUXTRACE_INFO               = 70,
 228        PERF_RECORD_AUXTRACE                    = 71,
 229        PERF_RECORD_AUXTRACE_ERROR              = 72,
 230        PERF_RECORD_THREAD_MAP                  = 73,
 231        PERF_RECORD_CPU_MAP                     = 74,
 232        PERF_RECORD_STAT_CONFIG                 = 75,
 233        PERF_RECORD_STAT                        = 76,
 234        PERF_RECORD_STAT_ROUND                  = 77,
 235        PERF_RECORD_EVENT_UPDATE                = 78,
 236        PERF_RECORD_HEADER_MAX
 237};
 238
 239enum auxtrace_error_type {
 240        PERF_AUXTRACE_ERROR_ITRACE  = 1,
 241        PERF_AUXTRACE_ERROR_MAX
 242};
 243
 244/*
 245 * The kernel collects the number of events it couldn't send in a stretch and
 246 * when possible sends this number in a PERF_RECORD_LOST event. The number of
 247 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
 248 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
 249 * the sum of all struct lost_event.lost fields reported.
 250 *
 251 * The kernel discards mixed up samples and sends the number in a
 252 * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
 253 * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
 254 * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
 255 * all struct lost_samples_event.lost fields reported.
 256 *
 257 * The total_period is needed because by default auto-freq is used, so
 258 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
 259 * the total number of low level events, it is necessary to to sum all struct
 260 * sample_event.period and stash the result in total_period.
 261 */
 262struct events_stats {
 263        u64 total_period;
 264        u64 total_non_filtered_period;
 265        u64 total_lost;
 266        u64 total_lost_samples;
 267        u64 total_aux_lost;
 268        u64 total_invalid_chains;
 269        u32 nr_events[PERF_RECORD_HEADER_MAX];
 270        u32 nr_non_filtered_samples;
 271        u32 nr_lost_warned;
 272        u32 nr_unknown_events;
 273        u32 nr_invalid_chains;
 274        u32 nr_unknown_id;
 275        u32 nr_unprocessable_samples;
 276        u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
 277        u32 nr_proc_map_timeout;
 278};
 279
 280enum {
 281        PERF_CPU_MAP__CPUS = 0,
 282        PERF_CPU_MAP__MASK = 1,
 283};
 284
 285struct cpu_map_entries {
 286        u16     nr;
 287        u16     cpu[];
 288};
 289
 290struct cpu_map_mask {
 291        u16     nr;
 292        u16     long_size;
 293        unsigned long mask[];
 294};
 295
 296struct cpu_map_data {
 297        u16     type;
 298        char    data[];
 299};
 300
 301struct cpu_map_event {
 302        struct perf_event_header        header;
 303        struct cpu_map_data             data;
 304};
 305
 306struct attr_event {
 307        struct perf_event_header header;
 308        struct perf_event_attr attr;
 309        u64 id[];
 310};
 311
 312enum {
 313        PERF_EVENT_UPDATE__UNIT  = 0,
 314        PERF_EVENT_UPDATE__SCALE = 1,
 315        PERF_EVENT_UPDATE__NAME  = 2,
 316        PERF_EVENT_UPDATE__CPUS  = 3,
 317};
 318
 319struct event_update_event_cpus {
 320        struct cpu_map_data cpus;
 321};
 322
 323struct event_update_event_scale {
 324        double scale;
 325};
 326
 327struct event_update_event {
 328        struct perf_event_header header;
 329        u64 type;
 330        u64 id;
 331
 332        char data[];
 333};
 334
 335#define MAX_EVENT_NAME 64
 336
 337struct perf_trace_event_type {
 338        u64     event_id;
 339        char    name[MAX_EVENT_NAME];
 340};
 341
 342struct event_type_event {
 343        struct perf_event_header header;
 344        struct perf_trace_event_type event_type;
 345};
 346
 347struct tracing_data_event {
 348        struct perf_event_header header;
 349        u32 size;
 350};
 351
 352struct id_index_entry {
 353        u64 id;
 354        u64 idx;
 355        u64 cpu;
 356        u64 tid;
 357};
 358
 359struct id_index_event {
 360        struct perf_event_header header;
 361        u64 nr;
 362        struct id_index_entry entries[0];
 363};
 364
 365struct auxtrace_info_event {
 366        struct perf_event_header header;
 367        u32 type;
 368        u32 reserved__; /* For alignment */
 369        u64 priv[];
 370};
 371
 372struct auxtrace_event {
 373        struct perf_event_header header;
 374        u64 size;
 375        u64 offset;
 376        u64 reference;
 377        u32 idx;
 378        u32 tid;
 379        u32 cpu;
 380        u32 reserved__; /* For alignment */
 381};
 382
 383#define MAX_AUXTRACE_ERROR_MSG 64
 384
 385struct auxtrace_error_event {
 386        struct perf_event_header header;
 387        u32 type;
 388        u32 code;
 389        u32 cpu;
 390        u32 pid;
 391        u32 tid;
 392        u32 reserved__; /* For alignment */
 393        u64 ip;
 394        char msg[MAX_AUXTRACE_ERROR_MSG];
 395};
 396
 397struct aux_event {
 398        struct perf_event_header header;
 399        u64     aux_offset;
 400        u64     aux_size;
 401        u64     flags;
 402};
 403
 404struct itrace_start_event {
 405        struct perf_event_header header;
 406        u32 pid, tid;
 407};
 408
 409struct context_switch_event {
 410        struct perf_event_header header;
 411        u32 next_prev_pid;
 412        u32 next_prev_tid;
 413};
 414
 415struct thread_map_event_entry {
 416        u64     pid;
 417        char    comm[16];
 418};
 419
 420struct thread_map_event {
 421        struct perf_event_header        header;
 422        u64                             nr;
 423        struct thread_map_event_entry   entries[];
 424};
 425
 426enum {
 427        PERF_STAT_CONFIG_TERM__AGGR_MODE        = 0,
 428        PERF_STAT_CONFIG_TERM__INTERVAL         = 1,
 429        PERF_STAT_CONFIG_TERM__SCALE            = 2,
 430        PERF_STAT_CONFIG_TERM__MAX              = 3,
 431};
 432
 433struct stat_config_event_entry {
 434        u64     tag;
 435        u64     val;
 436};
 437
 438struct stat_config_event {
 439        struct perf_event_header        header;
 440        u64                             nr;
 441        struct stat_config_event_entry  data[];
 442};
 443
 444struct stat_event {
 445        struct perf_event_header        header;
 446
 447        u64     id;
 448        u32     cpu;
 449        u32     thread;
 450
 451        union {
 452                struct {
 453                        u64 val;
 454                        u64 ena;
 455                        u64 run;
 456                };
 457                u64 values[3];
 458        };
 459};
 460
 461enum {
 462        PERF_STAT_ROUND_TYPE__INTERVAL  = 0,
 463        PERF_STAT_ROUND_TYPE__FINAL     = 1,
 464};
 465
 466struct stat_round_event {
 467        struct perf_event_header        header;
 468        u64                             type;
 469        u64                             time;
 470};
 471
 472union perf_event {
 473        struct perf_event_header        header;
 474        struct mmap_event               mmap;
 475        struct mmap2_event              mmap2;
 476        struct comm_event               comm;
 477        struct fork_event               fork;
 478        struct lost_event               lost;
 479        struct lost_samples_event       lost_samples;
 480        struct read_event               read;
 481        struct throttle_event           throttle;
 482        struct sample_event             sample;
 483        struct attr_event               attr;
 484        struct event_update_event       event_update;
 485        struct event_type_event         event_type;
 486        struct tracing_data_event       tracing_data;
 487        struct build_id_event           build_id;
 488        struct id_index_event           id_index;
 489        struct auxtrace_info_event      auxtrace_info;
 490        struct auxtrace_event           auxtrace;
 491        struct auxtrace_error_event     auxtrace_error;
 492        struct aux_event                aux;
 493        struct itrace_start_event       itrace_start;
 494        struct context_switch_event     context_switch;
 495        struct thread_map_event         thread_map;
 496        struct cpu_map_event            cpu_map;
 497        struct stat_config_event        stat_config;
 498        struct stat_event               stat;
 499        struct stat_round_event         stat_round;
 500};
 501
 502void perf_event__print_totals(void);
 503
 504struct perf_tool;
 505struct thread_map;
 506struct cpu_map;
 507struct perf_stat_config;
 508struct perf_counts_values;
 509
 510typedef int (*perf_event__handler_t)(struct perf_tool *tool,
 511                                     union perf_event *event,
 512                                     struct perf_sample *sample,
 513                                     struct machine *machine);
 514
 515int perf_event__synthesize_thread_map(struct perf_tool *tool,
 516                                      struct thread_map *threads,
 517                                      perf_event__handler_t process,
 518                                      struct machine *machine, bool mmap_data,
 519                                      unsigned int proc_map_timeout);
 520int perf_event__synthesize_thread_map2(struct perf_tool *tool,
 521                                      struct thread_map *threads,
 522                                      perf_event__handler_t process,
 523                                      struct machine *machine);
 524int perf_event__synthesize_cpu_map(struct perf_tool *tool,
 525                                   struct cpu_map *cpus,
 526                                   perf_event__handler_t process,
 527                                   struct machine *machine);
 528int perf_event__synthesize_threads(struct perf_tool *tool,
 529                                   perf_event__handler_t process,
 530                                   struct machine *machine, bool mmap_data,
 531                                   unsigned int proc_map_timeout);
 532int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 533                                       perf_event__handler_t process,
 534                                       struct machine *machine);
 535int perf_event__synthesize_stat_config(struct perf_tool *tool,
 536                                       struct perf_stat_config *config,
 537                                       perf_event__handler_t process,
 538                                       struct machine *machine);
 539void perf_event__read_stat_config(struct perf_stat_config *config,
 540                                  struct stat_config_event *event);
 541int perf_event__synthesize_stat(struct perf_tool *tool,
 542                                u32 cpu, u32 thread, u64 id,
 543                                struct perf_counts_values *count,
 544                                perf_event__handler_t process,
 545                                struct machine *machine);
 546int perf_event__synthesize_stat_round(struct perf_tool *tool,
 547                                      u64 time, u64 type,
 548                                      perf_event__handler_t process,
 549                                      struct machine *machine);
 550int perf_event__synthesize_modules(struct perf_tool *tool,
 551                                   perf_event__handler_t process,
 552                                   struct machine *machine);
 553
 554int perf_event__process_comm(struct perf_tool *tool,
 555                             union perf_event *event,
 556                             struct perf_sample *sample,
 557                             struct machine *machine);
 558int perf_event__process_lost(struct perf_tool *tool,
 559                             union perf_event *event,
 560                             struct perf_sample *sample,
 561                             struct machine *machine);
 562int perf_event__process_lost_samples(struct perf_tool *tool,
 563                                     union perf_event *event,
 564                                     struct perf_sample *sample,
 565                                     struct machine *machine);
 566int perf_event__process_aux(struct perf_tool *tool,
 567                            union perf_event *event,
 568                            struct perf_sample *sample,
 569                            struct machine *machine);
 570int perf_event__process_itrace_start(struct perf_tool *tool,
 571                                     union perf_event *event,
 572                                     struct perf_sample *sample,
 573                                     struct machine *machine);
 574int perf_event__process_switch(struct perf_tool *tool,
 575                               union perf_event *event,
 576                               struct perf_sample *sample,
 577                               struct machine *machine);
 578int perf_event__process_mmap(struct perf_tool *tool,
 579                             union perf_event *event,
 580                             struct perf_sample *sample,
 581                             struct machine *machine);
 582int perf_event__process_mmap2(struct perf_tool *tool,
 583                             union perf_event *event,
 584                             struct perf_sample *sample,
 585                             struct machine *machine);
 586int perf_event__process_fork(struct perf_tool *tool,
 587                             union perf_event *event,
 588                             struct perf_sample *sample,
 589                             struct machine *machine);
 590int perf_event__process_exit(struct perf_tool *tool,
 591                             union perf_event *event,
 592                             struct perf_sample *sample,
 593                             struct machine *machine);
 594int perf_event__process(struct perf_tool *tool,
 595                        union perf_event *event,
 596                        struct perf_sample *sample,
 597                        struct machine *machine);
 598
 599struct addr_location;
 600
 601int machine__resolve(struct machine *machine, struct addr_location *al,
 602                     struct perf_sample *sample);
 603
 604void addr_location__put(struct addr_location *al);
 605
 606struct thread;
 607
 608bool is_bts_event(struct perf_event_attr *attr);
 609bool sample_addr_correlates_sym(struct perf_event_attr *attr);
 610void thread__resolve(struct thread *thread, struct addr_location *al,
 611                     struct perf_sample *sample);
 612
 613const char *perf_event__name(unsigned int id);
 614
 615size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
 616                                     u64 read_format);
 617int perf_event__synthesize_sample(union perf_event *event, u64 type,
 618                                  u64 read_format,
 619                                  const struct perf_sample *sample,
 620                                  bool swapped);
 621
 622pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 623                                  union perf_event *event, pid_t pid,
 624                                  perf_event__handler_t process,
 625                                  struct machine *machine);
 626
 627int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 628                                       union perf_event *event,
 629                                       pid_t pid, pid_t tgid,
 630                                       perf_event__handler_t process,
 631                                       struct machine *machine,
 632                                       bool mmap_data,
 633                                       unsigned int proc_map_timeout);
 634
 635size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
 636size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
 637size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
 638size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
 639size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
 640size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
 641size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
 642size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
 643size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
 644size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 645
 646u64 kallsyms__get_function_start(const char *kallsyms_filename,
 647                                 const char *symbol_name);
 648
 649void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
 650void  cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
 651                               u16 type, int max);
 652#endif /* __PERF_RECORD_H */
 653