1/* 2 * Performance events: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14#ifndef _UAPI_LINUX_PERF_EVENT_H 15#define _UAPI_LINUX_PERF_EVENT_H 16 17#include <linux/types.h> 18#include <linux/ioctl.h> 19#include <asm/byteorder.h> 20 21/* 22 * User-space ABI bits: 23 */ 24 25/* 26 * attr.type 27 */ 28enum perf_type_id { 29 PERF_TYPE_HARDWARE = 0, 30 PERF_TYPE_SOFTWARE = 1, 31 PERF_TYPE_TRACEPOINT = 2, 32 PERF_TYPE_HW_CACHE = 3, 33 PERF_TYPE_RAW = 4, 34 PERF_TYPE_BREAKPOINT = 5, 35 36 PERF_TYPE_MAX, /* non-ABI */ 37}; 38 39/* 40 * Generalized performance event event_id types, used by the 41 * attr.event_id parameter of the sys_perf_event_open() 42 * syscall: 43 */ 44enum perf_hw_id { 45 /* 46 * Common hardware events, generalized by the kernel: 47 */ 48 PERF_COUNT_HW_CPU_CYCLES = 0, 49 PERF_COUNT_HW_INSTRUCTIONS = 1, 50 PERF_COUNT_HW_CACHE_REFERENCES = 2, 51 PERF_COUNT_HW_CACHE_MISSES = 3, 52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 53 PERF_COUNT_HW_BRANCH_MISSES = 5, 54 PERF_COUNT_HW_BUS_CYCLES = 6, 55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 57 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 58 59 PERF_COUNT_HW_MAX, /* non-ABI */ 60}; 61 62/* 63 * Generalized hardware cache events: 64 * 65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 66 * { read, write, prefetch } x 67 * { accesses, misses } 68 */ 69enum perf_hw_cache_id { 70 PERF_COUNT_HW_CACHE_L1D = 0, 71 PERF_COUNT_HW_CACHE_L1I = 1, 72 PERF_COUNT_HW_CACHE_LL = 2, 73 PERF_COUNT_HW_CACHE_DTLB = 3, 74 PERF_COUNT_HW_CACHE_ITLB = 4, 75 PERF_COUNT_HW_CACHE_BPU = 5, 76 PERF_COUNT_HW_CACHE_NODE = 6, 77 78 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 79}; 80 81enum perf_hw_cache_op_id { 82 PERF_COUNT_HW_CACHE_OP_READ = 0, 83 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 85 86 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 87}; 88 89enum perf_hw_cache_op_result_id { 90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 92 93 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 94}; 95 96/* 97 * Special "software" events provided by the kernel, even if the hardware 98 * does not support performance events. These events measure various 99 * physical and sw events of the kernel (and allow the profiling of them as 100 * well): 101 */ 102enum perf_sw_ids { 103 PERF_COUNT_SW_CPU_CLOCK = 0, 104 PERF_COUNT_SW_TASK_CLOCK = 1, 105 PERF_COUNT_SW_PAGE_FAULTS = 2, 106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 107 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 111 PERF_COUNT_SW_EMULATION_FAULTS = 8, 112 PERF_COUNT_SW_DUMMY = 9, 113 PERF_COUNT_SW_BPF_OUTPUT = 10, 114 115 PERF_COUNT_SW_MAX, /* non-ABI */ 116}; 117 118/* 119 * Bits that can be set in attr.sample_type to request information 120 * in the overflow packets. 121 */ 122enum perf_event_sample_format { 123 PERF_SAMPLE_IP = 1U << 0, 124 PERF_SAMPLE_TID = 1U << 1, 125 PERF_SAMPLE_TIME = 1U << 2, 126 PERF_SAMPLE_ADDR = 1U << 3, 127 PERF_SAMPLE_READ = 1U << 4, 128 PERF_SAMPLE_CALLCHAIN = 1U << 5, 129 PERF_SAMPLE_ID = 1U << 6, 130 PERF_SAMPLE_CPU = 1U << 7, 131 PERF_SAMPLE_PERIOD = 1U << 8, 132 PERF_SAMPLE_STREAM_ID = 1U << 9, 133 PERF_SAMPLE_RAW = 1U << 10, 134 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 135 PERF_SAMPLE_REGS_USER = 1U << 12, 136 PERF_SAMPLE_STACK_USER = 1U << 13, 137 PERF_SAMPLE_WEIGHT = 1U << 14, 138 PERF_SAMPLE_DATA_SRC = 1U << 15, 139 PERF_SAMPLE_IDENTIFIER = 1U << 16, 140 PERF_SAMPLE_TRANSACTION = 1U << 17, 141 PERF_SAMPLE_REGS_INTR = 1U << 18, 142 143 PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */ 144}; 145 146/* 147 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 148 * 149 * If the user does not pass priv level information via branch_sample_type, 150 * the kernel uses the event's priv level. Branch and event priv levels do 151 * not have to match. Branch priv level is checked for permissions. 152 * 153 * The branch types can be combined, however BRANCH_ANY covers all types 154 * of branches and therefore it supersedes all the other types. 155 */ 156enum perf_branch_sample_type_shift { 157 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */ 158 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */ 159 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */ 160 161 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */ 162 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */ 163 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */ 164 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */ 165 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */ 166 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */ 167 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 168 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 169 170 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */ 171 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */ 172 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */ 173 174 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */ 175 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */ 176 177 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 178}; 179 180enum perf_branch_sample_type { 181 PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT, 182 PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 183 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 184 185 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 186 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 187 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 188 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 189 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 190 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 191 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 192 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 193 194 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 195 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 196 PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT, 197 198 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 199 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 200 201 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 202}; 203 204#define PERF_SAMPLE_BRANCH_PLM_ALL \ 205 (PERF_SAMPLE_BRANCH_USER|\ 206 PERF_SAMPLE_BRANCH_KERNEL|\ 207 PERF_SAMPLE_BRANCH_HV) 208 209/* 210 * Values to determine ABI of the registers dump. 211 */ 212enum perf_sample_regs_abi { 213 PERF_SAMPLE_REGS_ABI_NONE = 0, 214 PERF_SAMPLE_REGS_ABI_32 = 1, 215 PERF_SAMPLE_REGS_ABI_64 = 2, 216}; 217 218/* 219 * Values for the memory transaction event qualifier, mostly for 220 * abort events. Multiple bits can be set. 221 */ 222enum { 223 PERF_TXN_ELISION = (1 << 0), /* From elision */ 224 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */ 225 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */ 226 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */ 227 PERF_TXN_RETRY = (1 << 4), /* Retry possible */ 228 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */ 229 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */ 230 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */ 231 232 PERF_TXN_MAX = (1 << 8), /* non-ABI */ 233 234 /* bits 32..63 are reserved for the abort code */ 235 236 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32), 237 PERF_TXN_ABORT_SHIFT = 32, 238}; 239 240/* 241 * The format of the data returned by read() on a perf event fd, 242 * as specified by attr.read_format: 243 * 244 * struct read_format { 245 * { u64 value; 246 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 247 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 248 * { u64 id; } && PERF_FORMAT_ID 249 * } && !PERF_FORMAT_GROUP 250 * 251 * { u64 nr; 252 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 253 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 254 * { u64 value; 255 * { u64 id; } && PERF_FORMAT_ID 256 * } cntr[nr]; 257 * } && PERF_FORMAT_GROUP 258 * }; 259 */ 260enum perf_event_read_format { 261 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 262 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 263 PERF_FORMAT_ID = 1U << 2, 264 PERF_FORMAT_GROUP = 1U << 3, 265 266 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 267}; 268 269#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 270#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 271#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 272#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ 273 /* add: sample_stack_user */ 274#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ 275#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */ 276 277/* 278 * Hardware event_id to monitor via a performance monitoring event: 279 * 280 * @sample_max_stack: Max number of frame pointers in a callchain, 281 * should be < /proc/sys/kernel/perf_event_max_stack 282 */ 283struct perf_event_attr { 284 285 /* 286 * Major type: hardware/software/tracepoint/etc. 287 */ 288 __u32 type; 289 290 /* 291 * Size of the attr structure, for fwd/bwd compat. 292 */ 293 __u32 size; 294 295 /* 296 * Type specific configuration information. 297 */ 298 __u64 config; 299 300 union { 301 __u64 sample_period; 302 __u64 sample_freq; 303 }; 304 305 __u64 sample_type; 306 __u64 read_format; 307 308 __u64 disabled : 1, /* off by default */ 309 inherit : 1, /* children inherit it */ 310 pinned : 1, /* must always be on PMU */ 311 exclusive : 1, /* only group on PMU */ 312 exclude_user : 1, /* don't count user */ 313 exclude_kernel : 1, /* ditto kernel */ 314 exclude_hv : 1, /* ditto hypervisor */ 315 exclude_idle : 1, /* don't count when idle */ 316 mmap : 1, /* include mmap data */ 317 comm : 1, /* include comm data */ 318 freq : 1, /* use freq, not period */ 319 inherit_stat : 1, /* per task counts */ 320 enable_on_exec : 1, /* next exec enables */ 321 task : 1, /* trace fork/exit */ 322 watermark : 1, /* wakeup_watermark */ 323 /* 324 * precise_ip: 325 * 326 * 0 - SAMPLE_IP can have arbitrary skid 327 * 1 - SAMPLE_IP must have constant skid 328 * 2 - SAMPLE_IP requested to have 0 skid 329 * 3 - SAMPLE_IP must have 0 skid 330 * 331 * See also PERF_RECORD_MISC_EXACT_IP 332 */ 333 precise_ip : 2, /* skid constraint */ 334 mmap_data : 1, /* non-exec mmap data */ 335 sample_id_all : 1, /* sample_type all events */ 336 337 exclude_host : 1, /* don't count in host */ 338 exclude_guest : 1, /* don't count in guest */ 339 340 exclude_callchain_kernel : 1, /* exclude kernel callchains */ 341 exclude_callchain_user : 1, /* exclude user callchains */ 342 mmap2 : 1, /* include mmap with inode data */ 343 comm_exec : 1, /* flag comm events that are due to an exec */ 344 use_clockid : 1, /* use @clockid for time fields */ 345 context_switch : 1, /* context switch data */ 346 write_backward : 1, /* Write ring buffer from end to beginning */ 347 namespaces : 1, /* include namespaces data */ 348 __reserved_1 : 35; 349 350 union { 351 __u32 wakeup_events; /* wakeup every n events */ 352 __u32 wakeup_watermark; /* bytes before wakeup */ 353 }; 354 355 __u32 bp_type; 356 union { 357 __u64 bp_addr; 358 __u64 config1; /* extension of config */ 359 }; 360 union { 361 __u64 bp_len; 362 __u64 config2; /* extension of config1 */ 363 }; 364 __u64 branch_sample_type; /* enum perf_branch_sample_type */ 365 366 /* 367 * Defines set of user regs to dump on samples. 368 * See asm/perf_regs.h for details. 369 */ 370 __u64 sample_regs_user; 371 372 /* 373 * Defines size of the user stack to dump on samples. 374 */ 375 __u32 sample_stack_user; 376 377 __s32 clockid; 378 /* 379 * Defines set of regs to dump for each sample 380 * state captured on: 381 * - precise = 0: PMU interrupt 382 * - precise > 0: sampled instruction 383 * 384 * See asm/perf_regs.h for details. 385 */ 386 __u64 sample_regs_intr; 387 388 /* 389 * Wakeup watermark for AUX area 390 */ 391 __u32 aux_watermark; 392 __u16 sample_max_stack; 393 __u16 __reserved_2; /* align to __u64 */ 394}; 395 396#define perf_flags(attr) (*(&(attr)->read_format + 1)) 397 398/* 399 * Ioctls that can be done on a perf event fd: 400 */ 401#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 402#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 403#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 404#define PERF_EVENT_IOC_RESET _IO ('$', 3) 405#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) 406#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 407#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *) 408#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *) 409#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32) 410#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32) 411 412enum perf_event_ioc_flags { 413 PERF_IOC_FLAG_GROUP = 1U << 0, 414}; 415 416/* 417 * Structure of the page that can be mapped via mmap 418 */ 419struct perf_event_mmap_page { 420 __u32 version; /* version number of this structure */ 421 __u32 compat_version; /* lowest version this is compat with */ 422 423 /* 424 * Bits needed to read the hw events in user-space. 425 * 426 * u32 seq, time_mult, time_shift, index, width; 427 * u64 count, enabled, running; 428 * u64 cyc, time_offset; 429 * s64 pmc = 0; 430 * 431 * do { 432 * seq = pc->lock; 433 * barrier() 434 * 435 * enabled = pc->time_enabled; 436 * running = pc->time_running; 437 * 438 * if (pc->cap_usr_time && enabled != running) { 439 * cyc = rdtsc(); 440 * time_offset = pc->time_offset; 441 * time_mult = pc->time_mult; 442 * time_shift = pc->time_shift; 443 * } 444 * 445 * index = pc->index; 446 * count = pc->offset; 447 * if (pc->cap_user_rdpmc && index) { 448 * width = pc->pmc_width; 449 * pmc = rdpmc(index - 1); 450 * } 451 * 452 * barrier(); 453 * } while (pc->lock != seq); 454 * 455 * NOTE: for obvious reason this only works on self-monitoring 456 * processes. 457 */ 458 __u32 lock; /* seqlock for synchronization */ 459 __u32 index; /* hardware event identifier */ 460 __s64 offset; /* add to hardware event value */ 461 __u64 time_enabled; /* time event active */ 462 __u64 time_running; /* time event on cpu */ 463 union { 464 __u64 capabilities; 465 struct { 466 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */ 467 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ 468 469 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ 470 cap_user_time : 1, /* The time_* fields are used */ 471 cap_user_time_zero : 1, /* The time_zero field is used */ 472 cap_____res : 59; 473 }; 474 }; 475 476 /* 477 * If cap_user_rdpmc this field provides the bit-width of the value 478 * read using the rdpmc() or equivalent instruction. This can be used 479 * to sign extend the result like: 480 * 481 * pmc <<= 64 - width; 482 * pmc >>= 64 - width; // signed shift right 483 * count += pmc; 484 */ 485 __u16 pmc_width; 486 487 /* 488 * If cap_usr_time the below fields can be used to compute the time 489 * delta since time_enabled (in ns) using rdtsc or similar. 490 * 491 * u64 quot, rem; 492 * u64 delta; 493 * 494 * quot = (cyc >> time_shift); 495 * rem = cyc & (((u64)1 << time_shift) - 1); 496 * delta = time_offset + quot * time_mult + 497 * ((rem * time_mult) >> time_shift); 498 * 499 * Where time_offset,time_mult,time_shift and cyc are read in the 500 * seqcount loop described above. This delta can then be added to 501 * enabled and possible running (if index), improving the scaling: 502 * 503 * enabled += delta; 504 * if (index) 505 * running += delta; 506 * 507 * quot = count / running; 508 * rem = count % running; 509 * count = quot * enabled + (rem * enabled) / running; 510 */ 511 __u16 time_shift; 512 __u32 time_mult; 513 __u64 time_offset; 514 /* 515 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 516 * from sample timestamps. 517 * 518 * time = timestamp - time_zero; 519 * quot = time / time_mult; 520 * rem = time % time_mult; 521 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 522 * 523 * And vice versa: 524 * 525 * quot = cyc >> time_shift; 526 * rem = cyc & (((u64)1 << time_shift) - 1); 527 * timestamp = time_zero + quot * time_mult + 528 * ((rem * time_mult) >> time_shift); 529 */ 530 __u64 time_zero; 531 __u32 size; /* Header size up to __reserved[] fields. */ 532 533 /* 534 * Hole for extension of the self monitor capabilities 535 */ 536 537 __u8 __reserved[118*8+4]; /* align to 1k. */ 538 539 /* 540 * Control data for the mmap() data buffer. 541 * 542 * User-space reading the @data_head value should issue an smp_rmb(), 543 * after reading this value. 544 * 545 * When the mapping is PROT_WRITE the @data_tail value should be 546 * written by userspace to reflect the last read data, after issueing 547 * an smp_mb() to separate the data read from the ->data_tail store. 548 * In this case the kernel will not over-write unread data. 549 * 550 * See perf_output_put_handle() for the data ordering. 551 * 552 * data_{offset,size} indicate the location and size of the perf record 553 * buffer within the mmapped area. 554 */ 555 __u64 data_head; /* head in the data section */ 556 __u64 data_tail; /* user-space written tail */ 557 __u64 data_offset; /* where the buffer starts */ 558 __u64 data_size; /* data buffer size */ 559 560 /* 561 * AUX area is defined by aux_{offset,size} fields that should be set 562 * by the userspace, so that 563 * 564 * aux_offset >= data_offset + data_size 565 * 566 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 567 * 568 * Ring buffer pointers aux_{head,tail} have the same semantics as 569 * data_{head,tail} and same ordering rules apply. 570 */ 571 __u64 aux_head; 572 __u64 aux_tail; 573 __u64 aux_offset; 574 __u64 aux_size; 575}; 576 577#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0) 578#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 579#define PERF_RECORD_MISC_KERNEL (1 << 0) 580#define PERF_RECORD_MISC_USER (2 << 0) 581#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 582#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0) 583#define PERF_RECORD_MISC_GUEST_USER (5 << 0) 584 585/* 586 * Indicates that /proc/PID/maps parsing are truncated by time out. 587 */ 588#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12) 589/* 590 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on 591 * different events so can reuse the same bit position. 592 * Ditto PERF_RECORD_MISC_SWITCH_OUT. 593 */ 594#define PERF_RECORD_MISC_MMAP_DATA (1 << 13) 595#define PERF_RECORD_MISC_COMM_EXEC (1 << 13) 596#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) 597/* 598 * Indicates that the content of PERF_SAMPLE_IP points to 599 * the actual instruction that triggered the event. See also 600 * perf_event_attr::precise_ip. 601 */ 602#define PERF_RECORD_MISC_EXACT_IP (1 << 14) 603/* 604 * Reserve the last bit to indicate some extended misc field 605 */ 606#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15) 607 608struct perf_event_header { 609 __u32 type; 610 __u16 misc; 611 __u16 size; 612}; 613 614struct perf_ns_link_info { 615 __u64 dev; 616 __u64 ino; 617}; 618 619enum { 620 NET_NS_INDEX = 0, 621 UTS_NS_INDEX = 1, 622 IPC_NS_INDEX = 2, 623 PID_NS_INDEX = 3, 624 USER_NS_INDEX = 4, 625 MNT_NS_INDEX = 5, 626 CGROUP_NS_INDEX = 6, 627 628 NR_NAMESPACES, /* number of available namespaces */ 629}; 630 631enum perf_event_type { 632 633 /* 634 * If perf_event_attr.sample_id_all is set then all event types will 635 * have the sample_type selected fields related to where/when 636 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 637 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 638 * just after the perf_event_header and the fields already present for 639 * the existing fields, i.e. at the end of the payload. That way a newer 640 * perf.data file will be supported by older perf tools, with these new 641 * optional fields being ignored. 642 * 643 * struct sample_id { 644 * { u32 pid, tid; } && PERF_SAMPLE_TID 645 * { u64 time; } && PERF_SAMPLE_TIME 646 * { u64 id; } && PERF_SAMPLE_ID 647 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 648 * { u32 cpu, res; } && PERF_SAMPLE_CPU 649 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 650 * } && perf_event_attr::sample_id_all 651 * 652 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 653 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 654 * relative to header.size. 655 */ 656 657 /* 658 * The MMAP events record the PROT_EXEC mappings so that we can 659 * correlate userspace IPs to code. They have the following structure: 660 * 661 * struct { 662 * struct perf_event_header header; 663 * 664 * u32 pid, tid; 665 * u64 addr; 666 * u64 len; 667 * u64 pgoff; 668 * char filename[]; 669 * struct sample_id sample_id; 670 * }; 671 */ 672 PERF_RECORD_MMAP = 1, 673 674 /* 675 * struct { 676 * struct perf_event_header header; 677 * u64 id; 678 * u64 lost; 679 * struct sample_id sample_id; 680 * }; 681 */ 682 PERF_RECORD_LOST = 2, 683 684 /* 685 * struct { 686 * struct perf_event_header header; 687 * 688 * u32 pid, tid; 689 * char comm[]; 690 * struct sample_id sample_id; 691 * }; 692 */ 693 PERF_RECORD_COMM = 3, 694 695 /* 696 * struct { 697 * struct perf_event_header header; 698 * u32 pid, ppid; 699 * u32 tid, ptid; 700 * u64 time; 701 * struct sample_id sample_id; 702 * }; 703 */ 704 PERF_RECORD_EXIT = 4, 705 706 /* 707 * struct { 708 * struct perf_event_header header; 709 * u64 time; 710 * u64 id; 711 * u64 stream_id; 712 * struct sample_id sample_id; 713 * }; 714 */ 715 PERF_RECORD_THROTTLE = 5, 716 PERF_RECORD_UNTHROTTLE = 6, 717 718 /* 719 * struct { 720 * struct perf_event_header header; 721 * u32 pid, ppid; 722 * u32 tid, ptid; 723 * u64 time; 724 * struct sample_id sample_id; 725 * }; 726 */ 727 PERF_RECORD_FORK = 7, 728 729 /* 730 * struct { 731 * struct perf_event_header header; 732 * u32 pid, tid; 733 * 734 * struct read_format values; 735 * struct sample_id sample_id; 736 * }; 737 */ 738 PERF_RECORD_READ = 8, 739 740 /* 741 * struct { 742 * struct perf_event_header header; 743 * 744 * # 745 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 746 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 747 * # is fixed relative to header. 748 * # 749 * 750 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 751 * { u64 ip; } && PERF_SAMPLE_IP 752 * { u32 pid, tid; } && PERF_SAMPLE_TID 753 * { u64 time; } && PERF_SAMPLE_TIME 754 * { u64 addr; } && PERF_SAMPLE_ADDR 755 * { u64 id; } && PERF_SAMPLE_ID 756 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 757 * { u32 cpu, res; } && PERF_SAMPLE_CPU 758 * { u64 period; } && PERF_SAMPLE_PERIOD 759 * 760 * { struct read_format values; } && PERF_SAMPLE_READ 761 * 762 * { u64 nr, 763 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 764 * 765 * # 766 * # The RAW record below is opaque data wrt the ABI 767 * # 768 * # That is, the ABI doesn't make any promises wrt to 769 * # the stability of its content, it may vary depending 770 * # on event, hardware, kernel version and phase of 771 * # the moon. 772 * # 773 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 774 * # 775 * 776 * { u32 size; 777 * char data[size];}&& PERF_SAMPLE_RAW 778 * 779 * { u64 nr; 780 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 781 * 782 * { u64 abi; # enum perf_sample_regs_abi 783 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 784 * 785 * { u64 size; 786 * char data[size]; 787 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 788 * 789 * { u64 weight; } && PERF_SAMPLE_WEIGHT 790 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 791 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 792 * { u64 abi; # enum perf_sample_regs_abi 793 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 794 * }; 795 */ 796 PERF_RECORD_SAMPLE = 9, 797 798 /* 799 * The MMAP2 records are an augmented version of MMAP, they add 800 * maj, min, ino numbers to be used to uniquely identify each mapping 801 * 802 * struct { 803 * struct perf_event_header header; 804 * 805 * u32 pid, tid; 806 * u64 addr; 807 * u64 len; 808 * u64 pgoff; 809 * u32 maj; 810 * u32 min; 811 * u64 ino; 812 * u64 ino_generation; 813 * u32 prot, flags; 814 * char filename[]; 815 * struct sample_id sample_id; 816 * }; 817 */ 818 PERF_RECORD_MMAP2 = 10, 819 820 /* 821 * Records that new data landed in the AUX buffer part. 822 * 823 * struct { 824 * struct perf_event_header header; 825 * 826 * u64 aux_offset; 827 * u64 aux_size; 828 * u64 flags; 829 * struct sample_id sample_id; 830 * }; 831 */ 832 PERF_RECORD_AUX = 11, 833 834 /* 835 * Indicates that instruction trace has started 836 * 837 * struct { 838 * struct perf_event_header header; 839 * u32 pid; 840 * u32 tid; 841 * }; 842 */ 843 PERF_RECORD_ITRACE_START = 12, 844 845 /* 846 * Records the dropped/lost sample number. 847 * 848 * struct { 849 * struct perf_event_header header; 850 * 851 * u64 lost; 852 * struct sample_id sample_id; 853 * }; 854 */ 855 PERF_RECORD_LOST_SAMPLES = 13, 856 857 /* 858 * Records a context switch in or out (flagged by 859 * PERF_RECORD_MISC_SWITCH_OUT). See also 860 * PERF_RECORD_SWITCH_CPU_WIDE. 861 * 862 * struct { 863 * struct perf_event_header header; 864 * struct sample_id sample_id; 865 * }; 866 */ 867 PERF_RECORD_SWITCH = 14, 868 869 /* 870 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 871 * next_prev_tid that are the next (switching out) or previous 872 * (switching in) pid/tid. 873 * 874 * struct { 875 * struct perf_event_header header; 876 * u32 next_prev_pid; 877 * u32 next_prev_tid; 878 * struct sample_id sample_id; 879 * }; 880 */ 881 PERF_RECORD_SWITCH_CPU_WIDE = 15, 882 883 /* 884 * struct { 885 * struct perf_event_header header; 886 * u32 pid; 887 * u32 tid; 888 * u64 nr_namespaces; 889 * { u64 dev, inode; } [nr_namespaces]; 890 * struct sample_id sample_id; 891 * }; 892 */ 893 PERF_RECORD_NAMESPACES = 16, 894 895 PERF_RECORD_MAX, /* non-ABI */ 896}; 897 898#define PERF_MAX_STACK_DEPTH 127 899#define PERF_MAX_CONTEXTS_PER_STACK 8 900 901enum perf_callchain_context { 902 PERF_CONTEXT_HV = (__u64)-32, 903 PERF_CONTEXT_KERNEL = (__u64)-128, 904 PERF_CONTEXT_USER = (__u64)-512, 905 906 PERF_CONTEXT_GUEST = (__u64)-2048, 907 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 908 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 909 910 PERF_CONTEXT_MAX = (__u64)-4095, 911}; 912 913/** 914 * PERF_RECORD_AUX::flags bits 915 */ 916#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */ 917#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */ 918#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */ 919 920#define PERF_FLAG_FD_NO_GROUP (1UL << 0) 921#define PERF_FLAG_FD_OUTPUT (1UL << 1) 922#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */ 923#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */ 924 925#if defined(__LITTLE_ENDIAN_BITFIELD) 926union perf_mem_data_src { 927 __u64 val; 928 struct { 929 __u64 mem_op:5, /* type of opcode */ 930 mem_lvl:14, /* memory hierarchy level */ 931 mem_snoop:5, /* snoop mode */ 932 mem_lock:2, /* lock instr */ 933 mem_dtlb:7, /* tlb access */ 934 mem_rsvd:31; 935 }; 936}; 937#elif defined(__BIG_ENDIAN_BITFIELD) 938union perf_mem_data_src { 939 __u64 val; 940 struct { 941 __u64 mem_rsvd:31, 942 mem_dtlb:7, /* tlb access */ 943 mem_lock:2, /* lock instr */ 944 mem_snoop:5, /* snoop mode */ 945 mem_lvl:14, /* memory hierarchy level */ 946 mem_op:5; /* type of opcode */ 947 }; 948}; 949#else 950#error "Unknown endianness" 951#endif 952 953/* type of opcode (load/store/prefetch,code) */ 954#define PERF_MEM_OP_NA 0x01 /* not available */ 955#define PERF_MEM_OP_LOAD 0x02 /* load instruction */ 956#define PERF_MEM_OP_STORE 0x04 /* store instruction */ 957#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ 958#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ 959#define PERF_MEM_OP_SHIFT 0 960 961/* memory hierarchy (memory level, hit or miss) */ 962#define PERF_MEM_LVL_NA 0x01 /* not available */ 963#define PERF_MEM_LVL_HIT 0x02 /* hit level */ 964#define PERF_MEM_LVL_MISS 0x04 /* miss level */ 965#define PERF_MEM_LVL_L1 0x08 /* L1 */ 966#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ 967#define PERF_MEM_LVL_L2 0x20 /* L2 */ 968#define PERF_MEM_LVL_L3 0x40 /* L3 */ 969#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ 970#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ 971#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ 972#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ 973#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ 974#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ 975#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ 976#define PERF_MEM_LVL_SHIFT 5 977 978/* snoop mode */ 979#define PERF_MEM_SNOOP_NA 0x01 /* not available */ 980#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ 981#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ 982#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ 983#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ 984#define PERF_MEM_SNOOP_SHIFT 19 985 986/* locked instruction */ 987#define PERF_MEM_LOCK_NA 0x01 /* not available */ 988#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ 989#define PERF_MEM_LOCK_SHIFT 24 990 991/* TLB access */ 992#define PERF_MEM_TLB_NA 0x01 /* not available */ 993#define PERF_MEM_TLB_HIT 0x02 /* hit level */ 994#define PERF_MEM_TLB_MISS 0x04 /* miss level */ 995#define PERF_MEM_TLB_L1 0x08 /* L1 */ 996#define PERF_MEM_TLB_L2 0x10 /* L2 */ 997#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ 998#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ 999#define PERF_MEM_TLB_SHIFT 26 1000
1001#define PERF_MEM_S(a, s) \ 1002 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT) 1003 1004/* 1005 * single taken branch record layout: 1006 * 1007 * from: source instruction (may not always be a branch insn) 1008 * to: branch target 1009 * mispred: branch target was mispredicted 1010 * predicted: branch target was predicted 1011 * 1012 * support for mispred, predicted is optional. In case it 1013 * is not supported mispred = predicted = 0. 1014 * 1015 * in_tx: running in a hardware transaction 1016 * abort: aborting a hardware transaction 1017 * cycles: cycles from last branch (or 0 if not supported) 1018 */ 1019struct perf_branch_entry { 1020 __u64 from; 1021 __u64 to; 1022 __u64 mispred:1, /* target mispredicted */ 1023 predicted:1,/* target predicted */ 1024 in_tx:1, /* in transaction */ 1025 abort:1, /* transaction abort */ 1026 cycles:16, /* cycle count to last branch */ 1027 reserved:44; 1028}; 1029 1030#endif /* _UAPI_LINUX_PERF_EVENT_H */ 1031