linux/include/uapi/linux/perf_event.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
   2/*
   3 * Performance events:
   4 *
   5 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   7 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   8 *
   9 * Data type definitions, declarations, prototypes.
  10 *
  11 *    Started by: Thomas Gleixner and Ingo Molnar
  12 *
  13 * For licencing details see kernel-base/COPYING
  14 */
  15#ifndef _UAPI_LINUX_PERF_EVENT_H
  16#define _UAPI_LINUX_PERF_EVENT_H
  17
  18#include <linux/types.h>
  19#include <linux/ioctl.h>
  20#include <asm/byteorder.h>
  21
  22/*
  23 * User-space ABI bits:
  24 */
  25
  26/*
  27 * attr.type
  28 */
  29enum perf_type_id {
  30        PERF_TYPE_HARDWARE                      = 0,
  31        PERF_TYPE_SOFTWARE                      = 1,
  32        PERF_TYPE_TRACEPOINT                    = 2,
  33        PERF_TYPE_HW_CACHE                      = 3,
  34        PERF_TYPE_RAW                           = 4,
  35        PERF_TYPE_BREAKPOINT                    = 5,
  36
  37        PERF_TYPE_MAX,                          /* non-ABI */
  38};
  39
  40/*
  41 * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
  42 * PERF_TYPE_HARDWARE:                  0xEEEEEEEE000000AA
  43 *                                      AA: hardware event ID
  44 *                                      EEEEEEEE: PMU type ID
  45 * PERF_TYPE_HW_CACHE:                  0xEEEEEEEE00DDCCBB
  46 *                                      BB: hardware cache ID
  47 *                                      CC: hardware cache op ID
  48 *                                      DD: hardware cache op result ID
  49 *                                      EEEEEEEE: PMU type ID
  50 * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
  51 */
  52#define PERF_PMU_TYPE_SHIFT             32
  53#define PERF_HW_EVENT_MASK              0xffffffff
  54
  55/*
  56 * Generalized performance event event_id types, used by the
  57 * attr.event_id parameter of the sys_perf_event_open()
  58 * syscall:
  59 */
  60enum perf_hw_id {
  61        /*
  62         * Common hardware events, generalized by the kernel:
  63         */
  64        PERF_COUNT_HW_CPU_CYCLES                = 0,
  65        PERF_COUNT_HW_INSTRUCTIONS              = 1,
  66        PERF_COUNT_HW_CACHE_REFERENCES          = 2,
  67        PERF_COUNT_HW_CACHE_MISSES              = 3,
  68        PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
  69        PERF_COUNT_HW_BRANCH_MISSES             = 5,
  70        PERF_COUNT_HW_BUS_CYCLES                = 6,
  71        PERF_COUNT_HW_STALLED_CYCLES_FRONTEND   = 7,
  72        PERF_COUNT_HW_STALLED_CYCLES_BACKEND    = 8,
  73        PERF_COUNT_HW_REF_CPU_CYCLES            = 9,
  74
  75        PERF_COUNT_HW_MAX,                      /* non-ABI */
  76};
  77
  78/*
  79 * Generalized hardware cache events:
  80 *
  81 *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  82 *       { read, write, prefetch } x
  83 *       { accesses, misses }
  84 */
  85enum perf_hw_cache_id {
  86        PERF_COUNT_HW_CACHE_L1D                 = 0,
  87        PERF_COUNT_HW_CACHE_L1I                 = 1,
  88        PERF_COUNT_HW_CACHE_LL                  = 2,
  89        PERF_COUNT_HW_CACHE_DTLB                = 3,
  90        PERF_COUNT_HW_CACHE_ITLB                = 4,
  91        PERF_COUNT_HW_CACHE_BPU                 = 5,
  92        PERF_COUNT_HW_CACHE_NODE                = 6,
  93
  94        PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
  95};
  96
  97enum perf_hw_cache_op_id {
  98        PERF_COUNT_HW_CACHE_OP_READ             = 0,
  99        PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
 100        PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
 101
 102        PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
 103};
 104
 105enum perf_hw_cache_op_result_id {
 106        PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
 107        PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
 108
 109        PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
 110};
 111
 112/*
 113 * Special "software" events provided by the kernel, even if the hardware
 114 * does not support performance events. These events measure various
 115 * physical and sw events of the kernel (and allow the profiling of them as
 116 * well):
 117 */
 118enum perf_sw_ids {
 119        PERF_COUNT_SW_CPU_CLOCK                 = 0,
 120        PERF_COUNT_SW_TASK_CLOCK                = 1,
 121        PERF_COUNT_SW_PAGE_FAULTS               = 2,
 122        PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
 123        PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
 124        PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
 125        PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
 126        PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
 127        PERF_COUNT_SW_EMULATION_FAULTS          = 8,
 128        PERF_COUNT_SW_DUMMY                     = 9,
 129        PERF_COUNT_SW_BPF_OUTPUT                = 10,
 130
 131        PERF_COUNT_SW_MAX,                      /* non-ABI */
 132};
 133
 134/*
 135 * Bits that can be set in attr.sample_type to request information
 136 * in the overflow packets.
 137 */
 138enum perf_event_sample_format {
 139        PERF_SAMPLE_IP                          = 1U << 0,
 140        PERF_SAMPLE_TID                         = 1U << 1,
 141        PERF_SAMPLE_TIME                        = 1U << 2,
 142        PERF_SAMPLE_ADDR                        = 1U << 3,
 143        PERF_SAMPLE_READ                        = 1U << 4,
 144        PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
 145        PERF_SAMPLE_ID                          = 1U << 6,
 146        PERF_SAMPLE_CPU                         = 1U << 7,
 147        PERF_SAMPLE_PERIOD                      = 1U << 8,
 148        PERF_SAMPLE_STREAM_ID                   = 1U << 9,
 149        PERF_SAMPLE_RAW                         = 1U << 10,
 150        PERF_SAMPLE_BRANCH_STACK                = 1U << 11,
 151        PERF_SAMPLE_REGS_USER                   = 1U << 12,
 152        PERF_SAMPLE_STACK_USER                  = 1U << 13,
 153        PERF_SAMPLE_WEIGHT                      = 1U << 14,
 154        PERF_SAMPLE_DATA_SRC                    = 1U << 15,
 155        PERF_SAMPLE_IDENTIFIER                  = 1U << 16,
 156        PERF_SAMPLE_TRANSACTION                 = 1U << 17,
 157        PERF_SAMPLE_REGS_INTR                   = 1U << 18,
 158        PERF_SAMPLE_PHYS_ADDR                   = 1U << 19,
 159        PERF_SAMPLE_AUX                         = 1U << 20,
 160        PERF_SAMPLE_CGROUP                      = 1U << 21,
 161#ifndef __GENKSYMS__
 162        PERF_SAMPLE_DATA_PAGE_SIZE              = 1U << 22,
 163        PERF_SAMPLE_CODE_PAGE_SIZE              = 1U << 23,
 164        PERF_SAMPLE_WEIGHT_STRUCT               = 1U << 24,
 165
 166        PERF_SAMPLE_MAX = 1U << 25,             /* non-ABI */
 167#else
 168        PERF_SAMPLE_MAX = 1U << 22,             /* non-ABI */
 169#endif /* __GENKSYMS__ */
 170
 171        __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 172};
 173
 174#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
 175/*
 176 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
 177 *
 178 * If the user does not pass priv level information via branch_sample_type,
 179 * the kernel uses the event's priv level. Branch and event priv levels do
 180 * not have to match. Branch priv level is checked for permissions.
 181 *
 182 * The branch types can be combined, however BRANCH_ANY covers all types
 183 * of branches and therefore it supersedes all the other types.
 184 */
 185enum perf_branch_sample_type_shift {
 186        PERF_SAMPLE_BRANCH_USER_SHIFT           = 0, /* user branches */
 187        PERF_SAMPLE_BRANCH_KERNEL_SHIFT         = 1, /* kernel branches */
 188        PERF_SAMPLE_BRANCH_HV_SHIFT             = 2, /* hypervisor branches */
 189
 190        PERF_SAMPLE_BRANCH_ANY_SHIFT            = 3, /* any branch types */
 191        PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT       = 4, /* any call branch */
 192        PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT     = 5, /* any return branch */
 193        PERF_SAMPLE_BRANCH_IND_CALL_SHIFT       = 6, /* indirect calls */
 194        PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT       = 7, /* transaction aborts */
 195        PERF_SAMPLE_BRANCH_IN_TX_SHIFT          = 8, /* in transaction */
 196        PERF_SAMPLE_BRANCH_NO_TX_SHIFT          = 9, /* not in transaction */
 197        PERF_SAMPLE_BRANCH_COND_SHIFT           = 10, /* conditional branches */
 198
 199        PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT     = 11, /* call/ret stack */
 200        PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT       = 12, /* indirect jumps */
 201        PERF_SAMPLE_BRANCH_CALL_SHIFT           = 13, /* direct call */
 202
 203        PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT       = 14, /* no flags */
 204        PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT      = 15, /* no cycles */
 205
 206        PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT      = 16, /* save branch type */
 207
 208        PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT       = 17, /* save low level index of raw branch records */
 209
 210        PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
 211};
 212
 213enum perf_branch_sample_type {
 214        PERF_SAMPLE_BRANCH_USER         = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
 215        PERF_SAMPLE_BRANCH_KERNEL       = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
 216        PERF_SAMPLE_BRANCH_HV           = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
 217
 218        PERF_SAMPLE_BRANCH_ANY          = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
 219        PERF_SAMPLE_BRANCH_ANY_CALL     = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
 220        PERF_SAMPLE_BRANCH_ANY_RETURN   = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
 221        PERF_SAMPLE_BRANCH_IND_CALL     = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
 222        PERF_SAMPLE_BRANCH_ABORT_TX     = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
 223        PERF_SAMPLE_BRANCH_IN_TX        = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
 224        PERF_SAMPLE_BRANCH_NO_TX        = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
 225        PERF_SAMPLE_BRANCH_COND         = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
 226
 227        PERF_SAMPLE_BRANCH_CALL_STACK   = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
 228        PERF_SAMPLE_BRANCH_IND_JUMP     = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
 229        PERF_SAMPLE_BRANCH_CALL         = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
 230
 231        PERF_SAMPLE_BRANCH_NO_FLAGS     = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
 232        PERF_SAMPLE_BRANCH_NO_CYCLES    = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
 233
 234        PERF_SAMPLE_BRANCH_TYPE_SAVE    =
 235                1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
 236
 237        PERF_SAMPLE_BRANCH_HW_INDEX     = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
 238
 239        PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 240};
 241
 242/*
 243 * Common flow change classification
 244 */
 245enum {
 246        PERF_BR_UNKNOWN         = 0,    /* unknown */
 247        PERF_BR_COND            = 1,    /* conditional */
 248        PERF_BR_UNCOND          = 2,    /* unconditional  */
 249        PERF_BR_IND             = 3,    /* indirect */
 250        PERF_BR_CALL            = 4,    /* function call */
 251        PERF_BR_IND_CALL        = 5,    /* indirect function call */
 252        PERF_BR_RET             = 6,    /* function return */
 253        PERF_BR_SYSCALL         = 7,    /* syscall */
 254        PERF_BR_SYSRET          = 8,    /* syscall return */
 255        PERF_BR_COND_CALL       = 9,    /* conditional function call */
 256        PERF_BR_COND_RET        = 10,   /* conditional function return */
 257        PERF_BR_MAX,
 258};
 259
 260#define PERF_SAMPLE_BRANCH_PLM_ALL \
 261        (PERF_SAMPLE_BRANCH_USER|\
 262         PERF_SAMPLE_BRANCH_KERNEL|\
 263         PERF_SAMPLE_BRANCH_HV)
 264
 265/*
 266 * Values to determine ABI of the registers dump.
 267 */
 268enum perf_sample_regs_abi {
 269        PERF_SAMPLE_REGS_ABI_NONE       = 0,
 270        PERF_SAMPLE_REGS_ABI_32         = 1,
 271        PERF_SAMPLE_REGS_ABI_64         = 2,
 272};
 273
 274/*
 275 * Values for the memory transaction event qualifier, mostly for
 276 * abort events. Multiple bits can be set.
 277 */
 278enum {
 279        PERF_TXN_ELISION        = (1 << 0), /* From elision */
 280        PERF_TXN_TRANSACTION    = (1 << 1), /* From transaction */
 281        PERF_TXN_SYNC           = (1 << 2), /* Instruction is related */
 282        PERF_TXN_ASYNC          = (1 << 3), /* Instruction not related */
 283        PERF_TXN_RETRY          = (1 << 4), /* Retry possible */
 284        PERF_TXN_CONFLICT       = (1 << 5), /* Conflict abort */
 285        PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
 286        PERF_TXN_CAPACITY_READ  = (1 << 7), /* Capacity read abort */
 287
 288        PERF_TXN_MAX            = (1 << 8), /* non-ABI */
 289
 290        /* bits 32..63 are reserved for the abort code */
 291
 292        PERF_TXN_ABORT_MASK  = (0xffffffffULL << 32),
 293        PERF_TXN_ABORT_SHIFT = 32,
 294};
 295
 296/*
 297 * The format of the data returned by read() on a perf event fd,
 298 * as specified by attr.read_format:
 299 *
 300 * struct read_format {
 301 *      { u64           value;
 302 *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
 303 *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
 304 *        { u64         id;           } && PERF_FORMAT_ID
 305 *      } && !PERF_FORMAT_GROUP
 306 *
 307 *      { u64           nr;
 308 *        { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
 309 *        { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
 310 *        { u64         value;
 311 *          { u64       id;           } && PERF_FORMAT_ID
 312 *        }             cntr[nr];
 313 *      } && PERF_FORMAT_GROUP
 314 * };
 315 */
 316enum perf_event_read_format {
 317        PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
 318        PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
 319        PERF_FORMAT_ID                          = 1U << 2,
 320        PERF_FORMAT_GROUP                       = 1U << 3,
 321
 322        PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
 323};
 324
 325#define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
 326#define PERF_ATTR_SIZE_VER1     72      /* add: config2 */
 327#define PERF_ATTR_SIZE_VER2     80      /* add: branch_sample_type */
 328#define PERF_ATTR_SIZE_VER3     96      /* add: sample_regs_user */
 329                                        /* add: sample_stack_user */
 330#define PERF_ATTR_SIZE_VER4     104     /* add: sample_regs_intr */
 331#define PERF_ATTR_SIZE_VER5     112     /* add: aux_watermark */
 332#define PERF_ATTR_SIZE_VER6     120     /* add: aux_sample_size */
 333
 334/*
 335 * Hardware event_id to monitor via a performance monitoring event:
 336 *
 337 * @sample_max_stack: Max number of frame pointers in a callchain,
 338 *                    should be < /proc/sys/kernel/perf_event_max_stack
 339 */
 340struct perf_event_attr {
 341
 342        /*
 343         * Major type: hardware/software/tracepoint/etc.
 344         */
 345        __u32                   type;
 346
 347        /*
 348         * Size of the attr structure, for fwd/bwd compat.
 349         */
 350        __u32                   size;
 351
 352        /*
 353         * Type specific configuration information.
 354         */
 355        __u64                   config;
 356
 357        union {
 358                __u64           sample_period;
 359                __u64           sample_freq;
 360        };
 361
 362        __u64                   sample_type;
 363        __u64                   read_format;
 364
 365        __u64                   disabled       :  1, /* off by default        */
 366                                inherit        :  1, /* children inherit it   */
 367                                pinned         :  1, /* must always be on PMU */
 368                                exclusive      :  1, /* only group on PMU     */
 369                                exclude_user   :  1, /* don't count user      */
 370                                exclude_kernel :  1, /* ditto kernel          */
 371                                exclude_hv     :  1, /* ditto hypervisor      */
 372                                exclude_idle   :  1, /* don't count when idle */
 373                                mmap           :  1, /* include mmap data     */
 374                                comm           :  1, /* include comm data     */
 375                                freq           :  1, /* use freq, not period  */
 376                                inherit_stat   :  1, /* per task counts       */
 377                                enable_on_exec :  1, /* next exec enables     */
 378                                task           :  1, /* trace fork/exit       */
 379                                watermark      :  1, /* wakeup_watermark      */
 380                                /*
 381                                 * precise_ip:
 382                                 *
 383                                 *  0 - SAMPLE_IP can have arbitrary skid
 384                                 *  1 - SAMPLE_IP must have constant skid
 385                                 *  2 - SAMPLE_IP requested to have 0 skid
 386                                 *  3 - SAMPLE_IP must have 0 skid
 387                                 *
 388                                 *  See also PERF_RECORD_MISC_EXACT_IP
 389                                 */
 390                                precise_ip     :  2, /* skid constraint       */
 391                                mmap_data      :  1, /* non-exec mmap data    */
 392                                sample_id_all  :  1, /* sample_type all events */
 393
 394                                exclude_host   :  1, /* don't count in host   */
 395                                exclude_guest  :  1, /* don't count in guest  */
 396
 397                                exclude_callchain_kernel : 1, /* exclude kernel callchains */
 398                                exclude_callchain_user   : 1, /* exclude user callchains */
 399                                mmap2          :  1, /* include mmap with inode data     */
 400                                comm_exec      :  1, /* flag comm events that are due to an exec */
 401                                use_clockid    :  1, /* use @clockid for time fields */
 402                                context_switch :  1, /* context switch data */
 403                                write_backward :  1, /* Write ring buffer from end to beginning */
 404                                namespaces     :  1, /* include namespaces data */
 405#ifndef __GENKSYMS__
 406                                ksymbol        :  1, /* include ksymbol events */
 407                                bpf_event      :  1, /* include bpf events */
 408                                aux_output     :  1, /* generate AUX records instead of events */
 409                                cgroup         :  1, /* include cgroup events */
 410                                text_poke      :  1, /* include text poke events */
 411                                build_id       :  1, /* use build id in mmap2 events */
 412                                __reserved_1   : 29;
 413#else
 414                                __reserved_1   : 35;
 415#endif /*  __GENKSYMS__ */
 416
 417        union {
 418                __u32           wakeup_events;    /* wakeup every n events */
 419                __u32           wakeup_watermark; /* bytes before wakeup   */
 420        };
 421
 422        __u32                   bp_type;
 423        union {
 424                __u64           bp_addr;
 425                __u64           kprobe_func; /* for perf_kprobe */
 426                __u64           uprobe_path; /* for perf_uprobe */
 427                __u64           config1; /* extension of config */
 428        };
 429        union {
 430                __u64           bp_len;
 431                __u64           kprobe_addr; /* when kprobe_func == NULL */
 432                __u64           probe_offset; /* for perf_[k,u]probe */
 433                __u64           config2; /* extension of config1 */
 434        };
 435        __u64   branch_sample_type; /* enum perf_branch_sample_type */
 436
 437        /*
 438         * Defines set of user regs to dump on samples.
 439         * See asm/perf_regs.h for details.
 440         */
 441        __u64   sample_regs_user;
 442
 443        /*
 444         * Defines size of the user stack to dump on samples.
 445         */
 446        __u32   sample_stack_user;
 447
 448        __s32   clockid;
 449        /*
 450         * Defines set of regs to dump for each sample
 451         * state captured on:
 452         *  - precise = 0: PMU interrupt
 453         *  - precise > 0: sampled instruction
 454         *
 455         * See asm/perf_regs.h for details.
 456         */
 457        __u64   sample_regs_intr;
 458
 459        /*
 460         * Wakeup watermark for AUX area
 461         */
 462        __u32   aux_watermark;
 463        __u16   sample_max_stack;
 464        __u16   __reserved_2;
 465#ifndef __GENKSYMS__
 466        __u32   aux_sample_size;
 467        __u32   __reserved_3;
 468#endif /* __GENKSYMS__ */
 469};
 470
 471/*
 472 * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
 473 * to query bpf programs attached to the same perf tracepoint
 474 * as the given perf event.
 475 */
 476struct perf_event_query_bpf {
 477        /*
 478         * The below ids array length
 479         */
 480        __u32   ids_len;
 481        /*
 482         * Set by the kernel to indicate the number of
 483         * available programs
 484         */
 485        __u32   prog_cnt;
 486        /*
 487         * User provided buffer to store program ids
 488         */
 489        __u32   ids[0];
 490};
 491
 492/*
 493 * Ioctls that can be done on a perf event fd:
 494 */
 495#define PERF_EVENT_IOC_ENABLE                   _IO ('$', 0)
 496#define PERF_EVENT_IOC_DISABLE                  _IO ('$', 1)
 497#define PERF_EVENT_IOC_REFRESH                  _IO ('$', 2)
 498#define PERF_EVENT_IOC_RESET                    _IO ('$', 3)
 499#define PERF_EVENT_IOC_PERIOD                   _IOW('$', 4, __u64)
 500#define PERF_EVENT_IOC_SET_OUTPUT               _IO ('$', 5)
 501#define PERF_EVENT_IOC_SET_FILTER               _IOW('$', 6, char *)
 502#define PERF_EVENT_IOC_ID                       _IOR('$', 7, __u64 *)
 503#define PERF_EVENT_IOC_SET_BPF                  _IOW('$', 8, __u32)
 504#define PERF_EVENT_IOC_PAUSE_OUTPUT             _IOW('$', 9, __u32)
 505#define PERF_EVENT_IOC_QUERY_BPF                _IOWR('$', 10, struct perf_event_query_bpf *)
 506#define PERF_EVENT_IOC_MODIFY_ATTRIBUTES        _IOW('$', 11, struct perf_event_attr *)
 507
 508enum perf_event_ioc_flags {
 509        PERF_IOC_FLAG_GROUP             = 1U << 0,
 510};
 511
 512/*
 513 * Structure of the page that can be mapped via mmap
 514 */
 515struct perf_event_mmap_page {
 516        __u32   version;                /* version number of this structure */
 517        __u32   compat_version;         /* lowest version this is compat with */
 518
 519        /*
 520         * Bits needed to read the hw events in user-space.
 521         *
 522         *   u32 seq, time_mult, time_shift, index, width;
 523         *   u64 count, enabled, running;
 524         *   u64 cyc, time_offset;
 525         *   s64 pmc = 0;
 526         *
 527         *   do {
 528         *     seq = pc->lock;
 529         *     barrier()
 530         *
 531         *     enabled = pc->time_enabled;
 532         *     running = pc->time_running;
 533         *
 534         *     if (pc->cap_usr_time && enabled != running) {
 535         *       cyc = rdtsc();
 536         *       time_offset = pc->time_offset;
 537         *       time_mult   = pc->time_mult;
 538         *       time_shift  = pc->time_shift;
 539         *     }
 540         *
 541         *     index = pc->index;
 542         *     count = pc->offset;
 543         *     if (pc->cap_user_rdpmc && index) {
 544         *       width = pc->pmc_width;
 545         *       pmc = rdpmc(index - 1);
 546         *     }
 547         *
 548         *     barrier();
 549         *   } while (pc->lock != seq);
 550         *
 551         * NOTE: for obvious reason this only works on self-monitoring
 552         *       processes.
 553         */
 554        __u32   lock;                   /* seqlock for synchronization */
 555        __u32   index;                  /* hardware event identifier */
 556        __s64   offset;                 /* add to hardware event value */
 557        __u64   time_enabled;           /* time event active */
 558        __u64   time_running;           /* time event on cpu */
 559        union {
 560                __u64   capabilities;
 561                struct {
 562                        __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
 563                                cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
 564
 565                                cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
 566                                cap_user_time           : 1, /* The time_{shift,mult,offset} fields are used */
 567                                cap_user_time_zero      : 1, /* The time_zero field is used */
 568                                cap_user_time_short     : 1, /* the time_{cycle,mask} fields are used */
 569                                cap_____res             : 58;
 570                };
 571        };
 572
 573        /*
 574         * If cap_user_rdpmc this field provides the bit-width of the value
 575         * read using the rdpmc() or equivalent instruction. This can be used
 576         * to sign extend the result like:
 577         *
 578         *   pmc <<= 64 - width;
 579         *   pmc >>= 64 - width; // signed shift right
 580         *   count += pmc;
 581         */
 582        __u16   pmc_width;
 583
 584        /*
 585         * If cap_usr_time the below fields can be used to compute the time
 586         * delta since time_enabled (in ns) using rdtsc or similar.
 587         *
 588         *   u64 quot, rem;
 589         *   u64 delta;
 590         *
 591         *   quot = (cyc >> time_shift);
 592         *   rem = cyc & (((u64)1 << time_shift) - 1);
 593         *   delta = time_offset + quot * time_mult +
 594         *              ((rem * time_mult) >> time_shift);
 595         *
 596         * Where time_offset,time_mult,time_shift and cyc are read in the
 597         * seqcount loop described above. This delta can then be added to
 598         * enabled and possible running (if index), improving the scaling:
 599         *
 600         *   enabled += delta;
 601         *   if (index)
 602         *     running += delta;
 603         *
 604         *   quot = count / running;
 605         *   rem  = count % running;
 606         *   count = quot * enabled + (rem * enabled) / running;
 607         */
 608        __u16   time_shift;
 609        __u32   time_mult;
 610        __u64   time_offset;
 611        /*
 612         * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
 613         * from sample timestamps.
 614         *
 615         *   time = timestamp - time_zero;
 616         *   quot = time / time_mult;
 617         *   rem  = time % time_mult;
 618         *   cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
 619         *
 620         * And vice versa:
 621         *
 622         *   quot = cyc >> time_shift;
 623         *   rem  = cyc & (((u64)1 << time_shift) - 1);
 624         *   timestamp = time_zero + quot * time_mult +
 625         *               ((rem * time_mult) >> time_shift);
 626         */
 627        __u64   time_zero;
 628
 629        __u32   size;                   /* Header size up to __reserved[] fields. */
 630        __u32   __reserved_1;
 631
 632        /*
 633         * If cap_usr_time_short, the hardware clock is less than 64bit wide
 634         * and we must compute the 'cyc' value, as used by cap_usr_time, as:
 635         *
 636         *   cyc = time_cycles + ((cyc - time_cycles) & time_mask)
 637         *
 638         * NOTE: this form is explicitly chosen such that cap_usr_time_short
 639         *       is a correction on top of cap_usr_time, and code that doesn't
 640         *       know about cap_usr_time_short still works under the assumption
 641         *       the counter doesn't wrap.
 642         */
 643        __u64   time_cycles;
 644        __u64   time_mask;
 645
 646                /*
 647                 * Hole for extension of the self monitor capabilities
 648                 */
 649
 650        __u8    __reserved[116*8];      /* align to 1k. */
 651
 652        /*
 653         * Control data for the mmap() data buffer.
 654         *
 655         * User-space reading the @data_head value should issue an smp_rmb(),
 656         * after reading this value.
 657         *
 658         * When the mapping is PROT_WRITE the @data_tail value should be
 659         * written by userspace to reflect the last read data, after issueing
 660         * an smp_mb() to separate the data read from the ->data_tail store.
 661         * In this case the kernel will not over-write unread data.
 662         *
 663         * See perf_output_put_handle() for the data ordering.
 664         *
 665         * data_{offset,size} indicate the location and size of the perf record
 666         * buffer within the mmapped area.
 667         */
 668        __u64   data_head;              /* head in the data section */
 669        __u64   data_tail;              /* user-space written tail */
 670        __u64   data_offset;            /* where the buffer starts */
 671        __u64   data_size;              /* data buffer size */
 672
 673        /*
 674         * AUX area is defined by aux_{offset,size} fields that should be set
 675         * by the userspace, so that
 676         *
 677         *   aux_offset >= data_offset + data_size
 678         *
 679         * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
 680         *
 681         * Ring buffer pointers aux_{head,tail} have the same semantics as
 682         * data_{head,tail} and same ordering rules apply.
 683         */
 684        __u64   aux_head;
 685        __u64   aux_tail;
 686        __u64   aux_offset;
 687        __u64   aux_size;
 688};
 689
 690/*
 691 * The current state of perf_event_header::misc bits usage:
 692 * ('|' used bit, '-' unused bit)
 693 *
 694 *  012         CDEF
 695 *  |||---------||||
 696 *
 697 *  Where:
 698 *    0-2     CPUMODE_MASK
 699 *
 700 *    C       PROC_MAP_PARSE_TIMEOUT
 701 *    D       MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
 702 *    E       MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
 703 *    F       (reserved)
 704 */
 705
 706#define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
 707#define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
 708#define PERF_RECORD_MISC_KERNEL                 (1 << 0)
 709#define PERF_RECORD_MISC_USER                   (2 << 0)
 710#define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
 711#define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
 712#define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
 713
 714/*
 715 * Indicates that /proc/PID/maps parsing are truncated by time out.
 716 */
 717#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
 718/*
 719 * Following PERF_RECORD_MISC_* are used on different
 720 * events, so can reuse the same bit position:
 721 *
 722 *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
 723 *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
 724 *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
 725 *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
 726 */
 727#define PERF_RECORD_MISC_MMAP_DATA              (1 << 13)
 728#define PERF_RECORD_MISC_COMM_EXEC              (1 << 13)
 729#define PERF_RECORD_MISC_FORK_EXEC              (1 << 13)
 730#define PERF_RECORD_MISC_SWITCH_OUT             (1 << 13)
 731/*
 732 * These PERF_RECORD_MISC_* flags below are safely reused
 733 * for the following events:
 734 *
 735 *   PERF_RECORD_MISC_EXACT_IP           - PERF_RECORD_SAMPLE of precise events
 736 *   PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
 737 *   PERF_RECORD_MISC_MMAP_BUILD_ID      - PERF_RECORD_MMAP2 event
 738 *
 739 *
 740 * PERF_RECORD_MISC_EXACT_IP:
 741 *   Indicates that the content of PERF_SAMPLE_IP points to
 742 *   the actual instruction that triggered the event. See also
 743 *   perf_event_attr::precise_ip.
 744 *
 745 * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
 746 *   Indicates that thread was preempted in TASK_RUNNING state.
 747 *
 748 * PERF_RECORD_MISC_MMAP_BUILD_ID:
 749 *   Indicates that mmap2 event carries build id data.
 750 */
 751#define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
 752#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT     (1 << 14)
 753#define PERF_RECORD_MISC_MMAP_BUILD_ID          (1 << 14)
 754/*
 755 * Reserve the last bit to indicate some extended misc field
 756 */
 757#define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
 758
 759struct perf_event_header {
 760        __u32   type;
 761        __u16   misc;
 762        __u16   size;
 763};
 764
 765struct perf_ns_link_info {
 766        __u64   dev;
 767        __u64   ino;
 768};
 769
 770enum {
 771        NET_NS_INDEX            = 0,
 772        UTS_NS_INDEX            = 1,
 773        IPC_NS_INDEX            = 2,
 774        PID_NS_INDEX            = 3,
 775        USER_NS_INDEX           = 4,
 776        MNT_NS_INDEX            = 5,
 777        CGROUP_NS_INDEX         = 6,
 778
 779        NR_NAMESPACES,          /* number of available namespaces */
 780};
 781
 782enum perf_event_type {
 783
 784        /*
 785         * If perf_event_attr.sample_id_all is set then all event types will
 786         * have the sample_type selected fields related to where/when
 787         * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
 788         * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
 789         * just after the perf_event_header and the fields already present for
 790         * the existing fields, i.e. at the end of the payload. That way a newer
 791         * perf.data file will be supported by older perf tools, with these new
 792         * optional fields being ignored.
 793         *
 794         * struct sample_id {
 795         *      { u32                   pid, tid; } && PERF_SAMPLE_TID
 796         *      { u64                   time;     } && PERF_SAMPLE_TIME
 797         *      { u64                   id;       } && PERF_SAMPLE_ID
 798         *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
 799         *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
 800         *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
 801         * } && perf_event_attr::sample_id_all
 802         *
 803         * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.  The
 804         * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
 805         * relative to header.size.
 806         */
 807
 808        /*
 809         * The MMAP events record the PROT_EXEC mappings so that we can
 810         * correlate userspace IPs to code. They have the following structure:
 811         *
 812         * struct {
 813         *      struct perf_event_header        header;
 814         *
 815         *      u32                             pid, tid;
 816         *      u64                             addr;
 817         *      u64                             len;
 818         *      u64                             pgoff;
 819         *      char                            filename[];
 820         *      struct sample_id                sample_id;
 821         * };
 822         */
 823        PERF_RECORD_MMAP                        = 1,
 824
 825        /*
 826         * struct {
 827         *      struct perf_event_header        header;
 828         *      u64                             id;
 829         *      u64                             lost;
 830         *      struct sample_id                sample_id;
 831         * };
 832         */
 833        PERF_RECORD_LOST                        = 2,
 834
 835        /*
 836         * struct {
 837         *      struct perf_event_header        header;
 838         *
 839         *      u32                             pid, tid;
 840         *      char                            comm[];
 841         *      struct sample_id                sample_id;
 842         * };
 843         */
 844        PERF_RECORD_COMM                        = 3,
 845
 846        /*
 847         * struct {
 848         *      struct perf_event_header        header;
 849         *      u32                             pid, ppid;
 850         *      u32                             tid, ptid;
 851         *      u64                             time;
 852         *      struct sample_id                sample_id;
 853         * };
 854         */
 855        PERF_RECORD_EXIT                        = 4,
 856
 857        /*
 858         * struct {
 859         *      struct perf_event_header        header;
 860         *      u64                             time;
 861         *      u64                             id;
 862         *      u64                             stream_id;
 863         *      struct sample_id                sample_id;
 864         * };
 865         */
 866        PERF_RECORD_THROTTLE                    = 5,
 867        PERF_RECORD_UNTHROTTLE                  = 6,
 868
 869        /*
 870         * struct {
 871         *      struct perf_event_header        header;
 872         *      u32                             pid, ppid;
 873         *      u32                             tid, ptid;
 874         *      u64                             time;
 875         *      struct sample_id                sample_id;
 876         * };
 877         */
 878        PERF_RECORD_FORK                        = 7,
 879
 880        /*
 881         * struct {
 882         *      struct perf_event_header        header;
 883         *      u32                             pid, tid;
 884         *
 885         *      struct read_format              values;
 886         *      struct sample_id                sample_id;
 887         * };
 888         */
 889        PERF_RECORD_READ                        = 8,
 890
 891        /*
 892         * struct {
 893         *      struct perf_event_header        header;
 894         *
 895         *      #
 896         *      # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
 897         *      # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
 898         *      # is fixed relative to header.
 899         *      #
 900         *
 901         *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
 902         *      { u64                   ip;       } && PERF_SAMPLE_IP
 903         *      { u32                   pid, tid; } && PERF_SAMPLE_TID
 904         *      { u64                   time;     } && PERF_SAMPLE_TIME
 905         *      { u64                   addr;     } && PERF_SAMPLE_ADDR
 906         *      { u64                   id;       } && PERF_SAMPLE_ID
 907         *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
 908         *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
 909         *      { u64                   period;   } && PERF_SAMPLE_PERIOD
 910         *
 911         *      { struct read_format    values;   } && PERF_SAMPLE_READ
 912         *
 913         *      { u64                   nr,
 914         *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
 915         *
 916         *      #
 917         *      # The RAW record below is opaque data wrt the ABI
 918         *      #
 919         *      # That is, the ABI doesn't make any promises wrt to
 920         *      # the stability of its content, it may vary depending
 921         *      # on event, hardware, kernel version and phase of
 922         *      # the moon.
 923         *      #
 924         *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
 925         *      #
 926         *
 927         *      { u32                   size;
 928         *        char                  data[size];}&& PERF_SAMPLE_RAW
 929         *
 930         *      { u64                   nr;
 931         *        { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
 932         *        { u64 from, to, flags } lbr[nr];
 933         *      } && PERF_SAMPLE_BRANCH_STACK
 934         *
 935         *      { u64                   abi; # enum perf_sample_regs_abi
 936         *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
 937         *
 938         *      { u64                   size;
 939         *        char                  data[size];
 940         *        u64                   dyn_size; } && PERF_SAMPLE_STACK_USER
 941         *
 942         *      { union perf_sample_weight
 943         *       {
 944         *              u64             full; && PERF_SAMPLE_WEIGHT
 945         *      #if defined(__LITTLE_ENDIAN_BITFIELD)
 946         *              struct {
 947         *                      u32     var1_dw;
 948         *                      u16     var2_w;
 949         *                      u16     var3_w;
 950         *              } && PERF_SAMPLE_WEIGHT_STRUCT
 951         *      #elif defined(__BIG_ENDIAN_BITFIELD)
 952         *              struct {
 953         *                      u16     var3_w;
 954         *                      u16     var2_w;
 955         *                      u32     var1_dw;
 956         *              } && PERF_SAMPLE_WEIGHT_STRUCT
 957         *      #endif
 958         *       }
 959         *      }
 960         *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
 961         *      { u64                   transaction; } && PERF_SAMPLE_TRANSACTION
 962         *      { u64                   abi; # enum perf_sample_regs_abi
 963         *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
 964         *      { u64                   phys_addr;} && PERF_SAMPLE_PHYS_ADDR
 965         *      { u64                   size;
 966         *        char                  data[size]; } && PERF_SAMPLE_AUX
 967         *      { u64                   data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
 968         *      { u64                   code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
 969         * };
 970         */
 971        PERF_RECORD_SAMPLE                      = 9,
 972
 973        /*
 974         * The MMAP2 records are an augmented version of MMAP, they add
 975         * maj, min, ino numbers to be used to uniquely identify each mapping
 976         *
 977         * struct {
 978         *      struct perf_event_header        header;
 979         *
 980         *      u32                             pid, tid;
 981         *      u64                             addr;
 982         *      u64                             len;
 983         *      u64                             pgoff;
 984         *      union {
 985         *              struct {
 986         *                      u32             maj;
 987         *                      u32             min;
 988         *                      u64             ino;
 989         *                      u64             ino_generation;
 990         *              };
 991         *              struct {
 992         *                      u8              build_id_size;
 993         *                      u8              __reserved_1;
 994         *                      u16             __reserved_2;
 995         *                      u8              build_id[20];
 996         *              };
 997         *      };
 998         *      u32                             prot, flags;
 999         *      char                            filename[];
1000         *      struct sample_id                sample_id;
1001         * };
1002         */
1003        PERF_RECORD_MMAP2                       = 10,
1004
1005        /*
1006         * Records that new data landed in the AUX buffer part.
1007         *
1008         * struct {
1009         *      struct perf_event_header        header;
1010         *
1011         *      u64                             aux_offset;
1012         *      u64                             aux_size;
1013         *      u64                             flags;
1014         *      struct sample_id                sample_id;
1015         * };
1016         */
1017        PERF_RECORD_AUX                         = 11,
1018
1019        /*
1020         * Indicates that instruction trace has started
1021         *
1022         * struct {
1023         *      struct perf_event_header        header;
1024         *      u32                             pid;
1025         *      u32                             tid;
1026         *      struct sample_id                sample_id;
1027         * };
1028         */
1029        PERF_RECORD_ITRACE_START                = 12,
1030
1031        /*
1032         * Records the dropped/lost sample number.
1033         *
1034         * struct {
1035         *      struct perf_event_header        header;
1036         *
1037         *      u64                             lost;
1038         *      struct sample_id                sample_id;
1039         * };
1040         */
1041        PERF_RECORD_LOST_SAMPLES                = 13,
1042
1043        /*
1044         * Records a context switch in or out (flagged by
1045         * PERF_RECORD_MISC_SWITCH_OUT). See also
1046         * PERF_RECORD_SWITCH_CPU_WIDE.
1047         *
1048         * struct {
1049         *      struct perf_event_header        header;
1050         *      struct sample_id                sample_id;
1051         * };
1052         */
1053        PERF_RECORD_SWITCH                      = 14,
1054
1055        /*
1056         * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
1057         * next_prev_tid that are the next (switching out) or previous
1058         * (switching in) pid/tid.
1059         *
1060         * struct {
1061         *      struct perf_event_header        header;
1062         *      u32                             next_prev_pid;
1063         *      u32                             next_prev_tid;
1064         *      struct sample_id                sample_id;
1065         * };
1066         */
1067        PERF_RECORD_SWITCH_CPU_WIDE             = 15,
1068
1069        /*
1070         * struct {
1071         *      struct perf_event_header        header;
1072         *      u32                             pid;
1073         *      u32                             tid;
1074         *      u64                             nr_namespaces;
1075         *      { u64                           dev, inode; } [nr_namespaces];
1076         *      struct sample_id                sample_id;
1077         * };
1078         */
1079        PERF_RECORD_NAMESPACES                  = 16,
1080
1081#ifndef __GENKSYMS__
1082        /*
1083         * Record ksymbol register/unregister events:
1084         *
1085         * struct {
1086         *      struct perf_event_header        header;
1087         *      u64                             addr;
1088         *      u32                             len;
1089         *      u16                             ksym_type;
1090         *      u16                             flags;
1091         *      char                            name[];
1092         *      struct sample_id                sample_id;
1093         * };
1094         */
1095        PERF_RECORD_KSYMBOL                     = 17,
1096
1097        /*
1098         * Record bpf events:
1099         *  enum perf_bpf_event_type {
1100         *      PERF_BPF_EVENT_UNKNOWN          = 0,
1101         *      PERF_BPF_EVENT_PROG_LOAD        = 1,
1102         *      PERF_BPF_EVENT_PROG_UNLOAD      = 2,
1103         *  };
1104         *
1105         * struct {
1106         *      struct perf_event_header        header;
1107         *      u16                             type;
1108         *      u16                             flags;
1109         *      u32                             id;
1110         *      u8                              tag[BPF_TAG_SIZE];
1111         *      struct sample_id                sample_id;
1112         * };
1113         */
1114        PERF_RECORD_BPF_EVENT                   = 18,
1115
1116        /*
1117         * struct {
1118         *      struct perf_event_header        header;
1119         *      u64                             id;
1120         *      char                            path[];
1121         *      struct sample_id                sample_id;
1122         * };
1123         */
1124        PERF_RECORD_CGROUP                      = 19,
1125
1126        /*
1127         * Records changes to kernel text i.e. self-modified code. 'old_len' is
1128         * the number of old bytes, 'new_len' is the number of new bytes. Either
1129         * 'old_len' or 'new_len' may be zero to indicate, for example, the
1130         * addition or removal of a trampoline. 'bytes' contains the old bytes
1131         * followed immediately by the new bytes.
1132         *
1133         * struct {
1134         *      struct perf_event_header        header;
1135         *      u64                             addr;
1136         *      u16                             old_len;
1137         *      u16                             new_len;
1138         *      u8                              bytes[];
1139         *      struct sample_id                sample_id;
1140         * };
1141         */
1142        PERF_RECORD_TEXT_POKE                   = 20,
1143#endif /* __GENKSYMS__ */
1144
1145        PERF_RECORD_MAX,                        /* non-ABI */
1146};
1147
1148enum perf_record_ksymbol_type {
1149        PERF_RECORD_KSYMBOL_TYPE_UNKNOWN        = 0,
1150        PERF_RECORD_KSYMBOL_TYPE_BPF            = 1,
1151        PERF_RECORD_KSYMBOL_TYPE_MAX            /* non-ABI */
1152};
1153
1154#define PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER    (1 << 0)
1155
1156enum perf_bpf_event_type {
1157        PERF_BPF_EVENT_UNKNOWN          = 0,
1158        PERF_BPF_EVENT_PROG_LOAD        = 1,
1159        PERF_BPF_EVENT_PROG_UNLOAD      = 2,
1160        PERF_BPF_EVENT_MAX,             /* non-ABI */
1161};
1162
1163#define PERF_MAX_STACK_DEPTH            127
1164#define PERF_MAX_CONTEXTS_PER_STACK       8
1165
1166enum perf_callchain_context {
1167        PERF_CONTEXT_HV                 = (__u64)-32,
1168        PERF_CONTEXT_KERNEL             = (__u64)-128,
1169        PERF_CONTEXT_USER               = (__u64)-512,
1170
1171        PERF_CONTEXT_GUEST              = (__u64)-2048,
1172        PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
1173        PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
1174
1175        PERF_CONTEXT_MAX                = (__u64)-4095,
1176};
1177
1178/**
1179 * PERF_RECORD_AUX::flags bits
1180 */
1181#define PERF_AUX_FLAG_TRUNCATED         0x01    /* record was truncated to fit */
1182#define PERF_AUX_FLAG_OVERWRITE         0x02    /* snapshot from overwrite mode */
1183#define PERF_AUX_FLAG_PARTIAL           0x04    /* record contains gaps */
1184#define PERF_AUX_FLAG_COLLISION         0x08    /* sample collided with another */
1185
1186#define PERF_FLAG_FD_NO_GROUP           (1UL << 0)
1187#define PERF_FLAG_FD_OUTPUT             (1UL << 1)
1188#define PERF_FLAG_PID_CGROUP            (1UL << 2) /* pid=cgroup id, per-cpu mode only */
1189#define PERF_FLAG_FD_CLOEXEC            (1UL << 3) /* O_CLOEXEC */
1190
1191#if defined(__LITTLE_ENDIAN_BITFIELD)
1192union perf_mem_data_src {
1193        __u64 val;
1194        struct {
1195                __u64   mem_op:5,       /* type of opcode */
1196                        mem_lvl:14,     /* memory hierarchy level */
1197                        mem_snoop:5,    /* snoop mode */
1198                        mem_lock:2,     /* lock instr */
1199                        mem_dtlb:7,     /* tlb access */
1200                        mem_lvl_num:4,  /* memory hierarchy level number */
1201                        mem_remote:1,   /* remote */
1202                        mem_snoopx:2,   /* snoop mode, ext */
1203#ifndef __GENKSYMS__
1204                        mem_blk:3,      /* access blocked */
1205                        mem_rsvd:21;
1206#else
1207                        mem_rsvd:24;
1208#endif /* __GENKSYMS__ */
1209        };
1210};
1211#elif defined(__BIG_ENDIAN_BITFIELD)
1212union perf_mem_data_src {
1213        __u64 val;
1214        struct {
1215#ifndef __GENKSYMS__
1216                __u64   mem_rsvd:21,
1217                        mem_blk:3,      /* access blocked */
1218#else
1219                __u64   mem_rsvd:24,
1220#endif /* __GENKSYMS__ */
1221                        mem_snoopx:2,   /* snoop mode, ext */
1222                        mem_remote:1,   /* remote */
1223                        mem_lvl_num:4,  /* memory hierarchy level number */
1224                        mem_dtlb:7,     /* tlb access */
1225                        mem_lock:2,     /* lock instr */
1226                        mem_snoop:5,    /* snoop mode */
1227                        mem_lvl:14,     /* memory hierarchy level */
1228                        mem_op:5;       /* type of opcode */
1229        };
1230};
1231#else
1232#error "Unknown endianness"
1233#endif
1234
1235/* type of opcode (load/store/prefetch,code) */
1236#define PERF_MEM_OP_NA          0x01 /* not available */
1237#define PERF_MEM_OP_LOAD        0x02 /* load instruction */
1238#define PERF_MEM_OP_STORE       0x04 /* store instruction */
1239#define PERF_MEM_OP_PFETCH      0x08 /* prefetch */
1240#define PERF_MEM_OP_EXEC        0x10 /* code (execution) */
1241#define PERF_MEM_OP_SHIFT       0
1242
1243/* memory hierarchy (memory level, hit or miss) */
1244#define PERF_MEM_LVL_NA         0x01  /* not available */
1245#define PERF_MEM_LVL_HIT        0x02  /* hit level */
1246#define PERF_MEM_LVL_MISS       0x04  /* miss level  */
1247#define PERF_MEM_LVL_L1         0x08  /* L1 */
1248#define PERF_MEM_LVL_LFB        0x10  /* Line Fill Buffer */
1249#define PERF_MEM_LVL_L2         0x20  /* L2 */
1250#define PERF_MEM_LVL_L3         0x40  /* L3 */
1251#define PERF_MEM_LVL_LOC_RAM    0x80  /* Local DRAM */
1252#define PERF_MEM_LVL_REM_RAM1   0x100 /* Remote DRAM (1 hop) */
1253#define PERF_MEM_LVL_REM_RAM2   0x200 /* Remote DRAM (2 hops) */
1254#define PERF_MEM_LVL_REM_CCE1   0x400 /* Remote Cache (1 hop) */
1255#define PERF_MEM_LVL_REM_CCE2   0x800 /* Remote Cache (2 hops) */
1256#define PERF_MEM_LVL_IO         0x1000 /* I/O memory */
1257#define PERF_MEM_LVL_UNC        0x2000 /* Uncached memory */
1258#define PERF_MEM_LVL_SHIFT      5
1259
1260#define PERF_MEM_REMOTE_REMOTE  0x01  /* Remote */
1261#define PERF_MEM_REMOTE_SHIFT   37
1262
1263#define PERF_MEM_LVLNUM_L1      0x01 /* L1 */
1264#define PERF_MEM_LVLNUM_L2      0x02 /* L2 */
1265#define PERF_MEM_LVLNUM_L3      0x03 /* L3 */
1266#define PERF_MEM_LVLNUM_L4      0x04 /* L4 */
1267/* 5-0xa available */
1268#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
1269#define PERF_MEM_LVLNUM_LFB     0x0c /* LFB */
1270#define PERF_MEM_LVLNUM_RAM     0x0d /* RAM */
1271#define PERF_MEM_LVLNUM_PMEM    0x0e /* PMEM */
1272#define PERF_MEM_LVLNUM_NA      0x0f /* N/A */
1273
1274#define PERF_MEM_LVLNUM_SHIFT   33
1275
1276/* snoop mode */
1277#define PERF_MEM_SNOOP_NA       0x01 /* not available */
1278#define PERF_MEM_SNOOP_NONE     0x02 /* no snoop */
1279#define PERF_MEM_SNOOP_HIT      0x04 /* snoop hit */
1280#define PERF_MEM_SNOOP_MISS     0x08 /* snoop miss */
1281#define PERF_MEM_SNOOP_HITM     0x10 /* snoop hit modified */
1282#define PERF_MEM_SNOOP_SHIFT    19
1283
1284#define PERF_MEM_SNOOPX_FWD     0x01 /* forward */
1285/* 1 free */
1286#define PERF_MEM_SNOOPX_SHIFT   37
1287
1288/* locked instruction */
1289#define PERF_MEM_LOCK_NA        0x01 /* not available */
1290#define PERF_MEM_LOCK_LOCKED    0x02 /* locked transaction */
1291#define PERF_MEM_LOCK_SHIFT     24
1292
1293/* TLB access */
1294#define PERF_MEM_TLB_NA         0x01 /* not available */
1295#define PERF_MEM_TLB_HIT        0x02 /* hit level */
1296#define PERF_MEM_TLB_MISS       0x04 /* miss level */
1297#define PERF_MEM_TLB_L1         0x08 /* L1 */
1298#define PERF_MEM_TLB_L2         0x10 /* L2 */
1299#define PERF_MEM_TLB_WK         0x20 /* Hardware Walker*/
1300#define PERF_MEM_TLB_OS         0x40 /* OS fault handler */
1301#define PERF_MEM_TLB_SHIFT      26
1302
1303/* Access blocked */
1304#define PERF_MEM_BLK_NA         0x01 /* not available */
1305#define PERF_MEM_BLK_DATA       0x02 /* data could not be forwarded */
1306#define PERF_MEM_BLK_ADDR       0x04 /* address conflict */
1307#define PERF_MEM_BLK_SHIFT      40
1308
1309#define PERF_MEM_S(a, s) \
1310        (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
1311
1312/*
1313 * single taken branch record layout:
1314 *
1315 *      from: source instruction (may not always be a branch insn)
1316 *        to: branch target
1317 *   mispred: branch target was mispredicted
1318 * predicted: branch target was predicted
1319 *
1320 * support for mispred, predicted is optional. In case it
1321 * is not supported mispred = predicted = 0.
1322 *
1323 *     in_tx: running in a hardware transaction
1324 *     abort: aborting a hardware transaction
1325 *    cycles: cycles from last branch (or 0 if not supported)
1326 *      type: branch type
1327 */
1328struct perf_branch_entry {
1329        __u64   from;
1330        __u64   to;
1331        __u64   mispred:1,  /* target mispredicted */
1332                predicted:1,/* target predicted */
1333                in_tx:1,    /* in transaction */
1334                abort:1,    /* transaction abort */
1335                cycles:16,  /* cycle count to last branch */
1336                type:4,     /* branch type */
1337                reserved:40;
1338};
1339
1340#ifndef __GENKSYMS__
1341union perf_sample_weight {
1342        __u64           full;
1343#if defined(__LITTLE_ENDIAN_BITFIELD)
1344        struct {
1345                __u32   var1_dw;
1346                __u16   var2_w;
1347                __u16   var3_w;
1348        };
1349#elif defined(__BIG_ENDIAN_BITFIELD)
1350        struct {
1351                __u16   var3_w;
1352                __u16   var2_w;
1353                __u32   var1_dw;
1354        };
1355#else
1356#error "Unknown endianness"
1357#endif
1358};
1359#endif /* __GENKSYMS__ */
1360
1361#endif /* _UAPI_LINUX_PERF_EVENT_H */
1362