linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <linux/types.h>
  18#include <linux/ioctl.h>
  19#include <asm/byteorder.h>
  20
  21/*
  22 * User-space ABI bits:
  23 */
  24
  25/*
  26 * attr.type
  27 */
  28enum perf_type_id {
  29        PERF_TYPE_HARDWARE                      = 0,
  30        PERF_TYPE_SOFTWARE                      = 1,
  31        PERF_TYPE_TRACEPOINT                    = 2,
  32        PERF_TYPE_HW_CACHE                      = 3,
  33        PERF_TYPE_RAW                           = 4,
  34
  35        PERF_TYPE_MAX,                          /* non-ABI */
  36};
  37
  38/*
  39 * Generalized performance event event_id types, used by the
  40 * attr.event_id parameter of the sys_perf_event_open()
  41 * syscall:
  42 */
  43enum perf_hw_id {
  44        /*
  45         * Common hardware events, generalized by the kernel:
  46         */
  47        PERF_COUNT_HW_CPU_CYCLES                = 0,
  48        PERF_COUNT_HW_INSTRUCTIONS              = 1,
  49        PERF_COUNT_HW_CACHE_REFERENCES          = 2,
  50        PERF_COUNT_HW_CACHE_MISSES              = 3,
  51        PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
  52        PERF_COUNT_HW_BRANCH_MISSES             = 5,
  53        PERF_COUNT_HW_BUS_CYCLES                = 6,
  54
  55        PERF_COUNT_HW_MAX,                      /* non-ABI */
  56};
  57
  58/*
  59 * Generalized hardware cache events:
  60 *
  61 *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
  62 *       { read, write, prefetch } x
  63 *       { accesses, misses }
  64 */
  65enum perf_hw_cache_id {
  66        PERF_COUNT_HW_CACHE_L1D                 = 0,
  67        PERF_COUNT_HW_CACHE_L1I                 = 1,
  68        PERF_COUNT_HW_CACHE_LL                  = 2,
  69        PERF_COUNT_HW_CACHE_DTLB                = 3,
  70        PERF_COUNT_HW_CACHE_ITLB                = 4,
  71        PERF_COUNT_HW_CACHE_BPU                 = 5,
  72
  73        PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
  74};
  75
  76enum perf_hw_cache_op_id {
  77        PERF_COUNT_HW_CACHE_OP_READ             = 0,
  78        PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
  79        PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
  80
  81        PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
  82};
  83
  84enum perf_hw_cache_op_result_id {
  85        PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
  86        PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
  87
  88        PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
  89};
  90
  91/*
  92 * Special "software" events provided by the kernel, even if the hardware
  93 * does not support performance events. These events measure various
  94 * physical and sw events of the kernel (and allow the profiling of them as
  95 * well):
  96 */
  97enum perf_sw_ids {
  98        PERF_COUNT_SW_CPU_CLOCK                 = 0,
  99        PERF_COUNT_SW_TASK_CLOCK                = 1,
 100        PERF_COUNT_SW_PAGE_FAULTS               = 2,
 101        PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
 102        PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
 103        PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
 104        PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
 105
 106        PERF_COUNT_SW_MAX,                      /* non-ABI */
 107};
 108
 109/*
 110 * Bits that can be set in attr.sample_type to request information
 111 * in the overflow packets.
 112 */
 113enum perf_event_sample_format {
 114        PERF_SAMPLE_IP                          = 1U << 0,
 115        PERF_SAMPLE_TID                         = 1U << 1,
 116        PERF_SAMPLE_TIME                        = 1U << 2,
 117        PERF_SAMPLE_ADDR                        = 1U << 3,
 118        PERF_SAMPLE_READ                        = 1U << 4,
 119        PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
 120        PERF_SAMPLE_ID                          = 1U << 6,
 121        PERF_SAMPLE_CPU                         = 1U << 7,
 122        PERF_SAMPLE_PERIOD                      = 1U << 8,
 123        PERF_SAMPLE_STREAM_ID                   = 1U << 9,
 124        PERF_SAMPLE_RAW                         = 1U << 10,
 125
 126        PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
 127};
 128
 129/*
 130 * The format of the data returned by read() on a perf event fd,
 131 * as specified by attr.read_format:
 132 *
 133 * struct read_format {
 134 *      { u64           value;
 135 *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
 136 *        { u64         time_running; } && PERF_FORMAT_RUNNING
 137 *        { u64         id;           } && PERF_FORMAT_ID
 138 *      } && !PERF_FORMAT_GROUP
 139 *
 140 *      { u64           nr;
 141 *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
 142 *        { u64         time_running; } && PERF_FORMAT_RUNNING
 143 *        { u64         value;
 144 *          { u64       id;           } && PERF_FORMAT_ID
 145 *        }             cntr[nr];
 146 *      } && PERF_FORMAT_GROUP
 147 * };
 148 */
 149enum perf_event_read_format {
 150        PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
 151        PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
 152        PERF_FORMAT_ID                          = 1U << 2,
 153        PERF_FORMAT_GROUP                       = 1U << 3,
 154
 155        PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
 156};
 157
 158#define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
 159
 160/*
 161 * Hardware event_id to monitor via a performance monitoring event:
 162 */
 163struct perf_event_attr {
 164
 165        /*
 166         * Major type: hardware/software/tracepoint/etc.
 167         */
 168        __u32                   type;
 169
 170        /*
 171         * Size of the attr structure, for fwd/bwd compat.
 172         */
 173        __u32                   size;
 174
 175        /*
 176         * Type specific configuration information.
 177         */
 178        __u64                   config;
 179
 180        union {
 181                __u64           sample_period;
 182                __u64           sample_freq;
 183        };
 184
 185        __u64                   sample_type;
 186        __u64                   read_format;
 187
 188        __u64                   disabled       :  1, /* off by default        */
 189                                inherit        :  1, /* children inherit it   */
 190                                pinned         :  1, /* must always be on PMU */
 191                                exclusive      :  1, /* only group on PMU     */
 192                                exclude_user   :  1, /* don't count user      */
 193                                exclude_kernel :  1, /* ditto kernel          */
 194                                exclude_hv     :  1, /* ditto hypervisor      */
 195                                exclude_idle   :  1, /* don't count when idle */
 196                                mmap           :  1, /* include mmap data     */
 197                                comm           :  1, /* include comm data     */
 198                                freq           :  1, /* use freq, not period  */
 199                                inherit_stat   :  1, /* per task counts       */
 200                                enable_on_exec :  1, /* next exec enables     */
 201                                task           :  1, /* trace fork/exit       */
 202                                watermark      :  1, /* wakeup_watermark      */
 203
 204                                __reserved_1   : 49;
 205
 206        union {
 207                __u32           wakeup_events;    /* wakeup every n events */
 208                __u32           wakeup_watermark; /* bytes before wakeup   */
 209        };
 210        __u32                   __reserved_2;
 211
 212        __u64                   __reserved_3;
 213};
 214
 215/*
 216 * Ioctls that can be done on a perf event fd:
 217 */
 218#define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
 219#define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
 220#define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
 221#define PERF_EVENT_IOC_RESET            _IO ('$', 3)
 222#define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, u64)
 223#define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
 224
 225enum perf_event_ioc_flags {
 226        PERF_IOC_FLAG_GROUP             = 1U << 0,
 227};
 228
 229/*
 230 * Structure of the page that can be mapped via mmap
 231 */
 232struct perf_event_mmap_page {
 233        __u32   version;                /* version number of this structure */
 234        __u32   compat_version;         /* lowest version this is compat with */
 235
 236        /*
 237         * Bits needed to read the hw events in user-space.
 238         *
 239         *   u32 seq;
 240         *   s64 count;
 241         *
 242         *   do {
 243         *     seq = pc->lock;
 244         *
 245         *     barrier()
 246         *     if (pc->index) {
 247         *       count = pmc_read(pc->index - 1);
 248         *       count += pc->offset;
 249         *     } else
 250         *       goto regular_read;
 251         *
 252         *     barrier();
 253         *   } while (pc->lock != seq);
 254         *
 255         * NOTE: for obvious reason this only works on self-monitoring
 256         *       processes.
 257         */
 258        __u32   lock;                   /* seqlock for synchronization */
 259        __u32   index;                  /* hardware event identifier */
 260        __s64   offset;                 /* add to hardware event value */
 261        __u64   time_enabled;           /* time event active */
 262        __u64   time_running;           /* time event on cpu */
 263
 264                /*
 265                 * Hole for extension of the self monitor capabilities
 266                 */
 267
 268        __u64   __reserved[123];        /* align to 1k */
 269
 270        /*
 271         * Control data for the mmap() data buffer.
 272         *
 273         * User-space reading the @data_head value should issue an rmb(), on
 274         * SMP capable platforms, after reading this value -- see
 275         * perf_event_wakeup().
 276         *
 277         * When the mapping is PROT_WRITE the @data_tail value should be
 278         * written by userspace to reflect the last read data. In this case
 279         * the kernel will not over-write unread data.
 280         */
 281        __u64   data_head;              /* head in the data section */
 282        __u64   data_tail;              /* user-space written tail */
 283};
 284
 285#define PERF_RECORD_MISC_CPUMODE_MASK           (3 << 0)
 286#define PERF_RECORD_MISC_CPUMODE_UNKNOWN                (0 << 0)
 287#define PERF_RECORD_MISC_KERNEL                 (1 << 0)
 288#define PERF_RECORD_MISC_USER                   (2 << 0)
 289#define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
 290
 291struct perf_event_header {
 292        __u32   type;
 293        __u16   misc;
 294        __u16   size;
 295};
 296
 297enum perf_event_type {
 298
 299        /*
 300         * The MMAP events record the PROT_EXEC mappings so that we can
 301         * correlate userspace IPs to code. They have the following structure:
 302         *
 303         * struct {
 304         *      struct perf_event_header        header;
 305         *
 306         *      u32                             pid, tid;
 307         *      u64                             addr;
 308         *      u64                             len;
 309         *      u64                             pgoff;
 310         *      char                            filename[];
 311         * };
 312         */
 313        PERF_RECORD_MMAP                        = 1,
 314
 315        /*
 316         * struct {
 317         *      struct perf_event_header        header;
 318         *      u64                             id;
 319         *      u64                             lost;
 320         * };
 321         */
 322        PERF_RECORD_LOST                        = 2,
 323
 324        /*
 325         * struct {
 326         *      struct perf_event_header        header;
 327         *
 328         *      u32                             pid, tid;
 329         *      char                            comm[];
 330         * };
 331         */
 332        PERF_RECORD_COMM                        = 3,
 333
 334        /*
 335         * struct {
 336         *      struct perf_event_header        header;
 337         *      u32                             pid, ppid;
 338         *      u32                             tid, ptid;
 339         *      u64                             time;
 340         * };
 341         */
 342        PERF_RECORD_EXIT                        = 4,
 343
 344        /*
 345         * struct {
 346         *      struct perf_event_header        header;
 347         *      u64                             time;
 348         *      u64                             id;
 349         *      u64                             stream_id;
 350         * };
 351         */
 352        PERF_RECORD_THROTTLE            = 5,
 353        PERF_RECORD_UNTHROTTLE          = 6,
 354
 355        /*
 356         * struct {
 357         *      struct perf_event_header        header;
 358         *      u32                             pid, ppid;
 359         *      u32                             tid, ptid;
 360         *      u64                             time;
 361         * };
 362         */
 363        PERF_RECORD_FORK                        = 7,
 364
 365        /*
 366         * struct {
 367         *      struct perf_event_header        header;
 368         *      u32                             pid, tid;
 369         *
 370         *      struct read_format              values;
 371         * };
 372         */
 373        PERF_RECORD_READ                        = 8,
 374
 375        /*
 376         * struct {
 377         *      struct perf_event_header        header;
 378         *
 379         *      { u64                   ip;       } && PERF_SAMPLE_IP
 380         *      { u32                   pid, tid; } && PERF_SAMPLE_TID
 381         *      { u64                   time;     } && PERF_SAMPLE_TIME
 382         *      { u64                   addr;     } && PERF_SAMPLE_ADDR
 383         *      { u64                   id;       } && PERF_SAMPLE_ID
 384         *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
 385         *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
 386         *      { u64                   period;   } && PERF_SAMPLE_PERIOD
 387         *
 388         *      { struct read_format    values;   } && PERF_SAMPLE_READ
 389         *
 390         *      { u64                   nr,
 391         *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
 392         *
 393         *      #
 394         *      # The RAW record below is opaque data wrt the ABI
 395         *      #
 396         *      # That is, the ABI doesn't make any promises wrt to
 397         *      # the stability of its content, it may vary depending
 398         *      # on event, hardware, kernel version and phase of
 399         *      # the moon.
 400         *      #
 401         *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
 402         *      #
 403         *
 404         *      { u32                   size;
 405         *        char                  data[size];}&& PERF_SAMPLE_RAW
 406         * };
 407         */
 408        PERF_RECORD_SAMPLE              = 9,
 409
 410        PERF_RECORD_MAX,                        /* non-ABI */
 411};
 412
 413enum perf_callchain_context {
 414        PERF_CONTEXT_HV                 = (__u64)-32,
 415        PERF_CONTEXT_KERNEL             = (__u64)-128,
 416        PERF_CONTEXT_USER               = (__u64)-512,
 417
 418        PERF_CONTEXT_GUEST              = (__u64)-2048,
 419        PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
 420        PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
 421
 422        PERF_CONTEXT_MAX                = (__u64)-4095,
 423};
 424
 425#define PERF_FLAG_FD_NO_GROUP   (1U << 0)
 426#define PERF_FLAG_FD_OUTPUT     (1U << 1)
 427
 428#ifdef __KERNEL__
 429/*
 430 * Kernel-internal data types and definitions:
 431 */
 432
 433#ifdef CONFIG_PERF_EVENTS
 434# include <asm/perf_event.h>
 435#endif
 436
 437#include <linux/list.h>
 438#include <linux/mutex.h>
 439#include <linux/rculist.h>
 440#include <linux/rcupdate.h>
 441#include <linux/spinlock.h>
 442#include <linux/hrtimer.h>
 443#include <linux/fs.h>
 444#include <linux/pid_namespace.h>
 445#include <linux/workqueue.h>
 446#include <asm/atomic.h>
 447
 448#define PERF_MAX_STACK_DEPTH            255
 449
 450struct perf_callchain_entry {
 451        __u64                           nr;
 452        __u64                           ip[PERF_MAX_STACK_DEPTH];
 453};
 454
 455struct perf_raw_record {
 456        u32                             size;
 457        void                            *data;
 458};
 459
 460struct task_struct;
 461
 462/**
 463 * struct hw_perf_event - performance event hardware details:
 464 */
 465struct hw_perf_event {
 466#ifdef CONFIG_PERF_EVENTS
 467        union {
 468                struct { /* hardware */
 469                        u64             config;
 470                        unsigned long   config_base;
 471                        unsigned long   event_base;
 472                        int             idx;
 473                };
 474                struct { /* software */
 475                        s64             remaining;
 476                        struct hrtimer  hrtimer;
 477                };
 478        };
 479        atomic64_t                      prev_count;
 480        u64                             sample_period;
 481        u64                             last_period;
 482        atomic64_t                      period_left;
 483        u64                             interrupts;
 484
 485        u64                             freq_count;
 486        u64                             freq_interrupts;
 487        u64                             freq_stamp;
 488#endif
 489};
 490
 491struct perf_event;
 492
 493/**
 494 * struct pmu - generic performance monitoring unit
 495 */
 496struct pmu {
 497        int (*enable)                   (struct perf_event *event);
 498        void (*disable)                 (struct perf_event *event);
 499        void (*read)                    (struct perf_event *event);
 500        void (*unthrottle)              (struct perf_event *event);
 501};
 502
 503/**
 504 * enum perf_event_active_state - the states of a event
 505 */
 506enum perf_event_active_state {
 507        PERF_EVENT_STATE_ERROR          = -2,
 508        PERF_EVENT_STATE_OFF            = -1,
 509        PERF_EVENT_STATE_INACTIVE       =  0,
 510        PERF_EVENT_STATE_ACTIVE         =  1,
 511};
 512
 513struct file;
 514
 515struct perf_mmap_data {
 516        struct rcu_head                 rcu_head;
 517#ifdef CONFIG_PERF_USE_VMALLOC
 518        struct work_struct              work;
 519#endif
 520        int                             data_order;
 521        int                             nr_pages;       /* nr of data pages  */
 522        int                             writable;       /* are we writable   */
 523        int                             nr_locked;      /* nr pages mlocked  */
 524
 525        atomic_t                        poll;           /* POLL_ for wakeups */
 526        atomic_t                        events;         /* event_id limit       */
 527
 528        atomic_long_t                   head;           /* write position    */
 529        atomic_long_t                   done_head;      /* completed head    */
 530
 531        atomic_t                        lock;           /* concurrent writes */
 532        atomic_t                        wakeup;         /* needs a wakeup    */
 533        atomic_t                        lost;           /* nr records lost   */
 534
 535        long                            watermark;      /* wakeup watermark  */
 536
 537        struct perf_event_mmap_page     *user_page;
 538        void                            *data_pages[0];
 539};
 540
 541struct perf_pending_entry {
 542        struct perf_pending_entry *next;
 543        void (*func)(struct perf_pending_entry *);
 544};
 545
 546/**
 547 * struct perf_event - performance event kernel representation:
 548 */
 549struct perf_event {
 550#ifdef CONFIG_PERF_EVENTS
 551        struct list_head                group_entry;
 552        struct list_head                event_entry;
 553        struct list_head                sibling_list;
 554        int                             nr_siblings;
 555        struct perf_event               *group_leader;
 556        struct perf_event               *output;
 557        const struct pmu                *pmu;
 558
 559        enum perf_event_active_state    state;
 560        atomic64_t                      count;
 561
 562        /*
 563         * These are the total time in nanoseconds that the event
 564         * has been enabled (i.e. eligible to run, and the task has
 565         * been scheduled in, if this is a per-task event)
 566         * and running (scheduled onto the CPU), respectively.
 567         *
 568         * They are computed from tstamp_enabled, tstamp_running and
 569         * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
 570         */
 571        u64                             total_time_enabled;
 572        u64                             total_time_running;
 573
 574        /*
 575         * These are timestamps used for computing total_time_enabled
 576         * and total_time_running when the event is in INACTIVE or
 577         * ACTIVE state, measured in nanoseconds from an arbitrary point
 578         * in time.
 579         * tstamp_enabled: the notional time when the event was enabled
 580         * tstamp_running: the notional time when the event was scheduled on
 581         * tstamp_stopped: in INACTIVE state, the notional time when the
 582         *      event was scheduled off.
 583         */
 584        u64                             tstamp_enabled;
 585        u64                             tstamp_running;
 586        u64                             tstamp_stopped;
 587
 588        struct perf_event_attr  attr;
 589        struct hw_perf_event            hw;
 590
 591        struct perf_event_context       *ctx;
 592        struct file                     *filp;
 593
 594        /*
 595         * These accumulate total time (in nanoseconds) that children
 596         * events have been enabled and running, respectively.
 597         */
 598        atomic64_t                      child_total_time_enabled;
 599        atomic64_t                      child_total_time_running;
 600
 601        /*
 602         * Protect attach/detach and child_list:
 603         */
 604        struct mutex                    child_mutex;
 605        struct list_head                child_list;
 606        struct perf_event               *parent;
 607
 608        int                             oncpu;
 609        int                             cpu;
 610
 611        struct list_head                owner_entry;
 612        struct task_struct              *owner;
 613
 614        /* mmap bits */
 615        struct mutex                    mmap_mutex;
 616        atomic_t                        mmap_count;
 617        struct perf_mmap_data           *data;
 618
 619        /* poll related */
 620        wait_queue_head_t               waitq;
 621        struct fasync_struct            *fasync;
 622
 623        /* delayed work for NMIs and such */
 624        int                             pending_wakeup;
 625        int                             pending_kill;
 626        int                             pending_disable;
 627        struct perf_pending_entry       pending;
 628
 629        atomic_t                        event_limit;
 630
 631        void (*destroy)(struct perf_event *);
 632        struct rcu_head                 rcu_head;
 633
 634        struct pid_namespace            *ns;
 635        u64                             id;
 636#endif
 637};
 638
 639/**
 640 * struct perf_event_context - event context structure
 641 *
 642 * Used as a container for task events and CPU events as well:
 643 */
 644struct perf_event_context {
 645        /*
 646         * Protect the states of the events in the list,
 647         * nr_active, and the list:
 648         */
 649        spinlock_t                      lock;
 650        /*
 651         * Protect the list of events.  Locking either mutex or lock
 652         * is sufficient to ensure the list doesn't change; to change
 653         * the list you need to lock both the mutex and the spinlock.
 654         */
 655        struct mutex                    mutex;
 656
 657        struct list_head                group_list;
 658        struct list_head                event_list;
 659        int                             nr_events;
 660        int                             nr_active;
 661        int                             is_active;
 662        int                             nr_stat;
 663        atomic_t                        refcount;
 664        struct task_struct              *task;
 665
 666        /*
 667         * Context clock, runs when context enabled.
 668         */
 669        u64                             time;
 670        u64                             timestamp;
 671
 672        /*
 673         * These fields let us detect when two contexts have both
 674         * been cloned (inherited) from a common ancestor.
 675         */
 676        struct perf_event_context       *parent_ctx;
 677        u64                             parent_gen;
 678        u64                             generation;
 679        int                             pin_count;
 680        struct rcu_head                 rcu_head;
 681};
 682
 683/**
 684 * struct perf_event_cpu_context - per cpu event context structure
 685 */
 686struct perf_cpu_context {
 687        struct perf_event_context       ctx;
 688        struct perf_event_context       *task_ctx;
 689        int                             active_oncpu;
 690        int                             max_pertask;
 691        int                             exclusive;
 692
 693        /*
 694         * Recursion avoidance:
 695         *
 696         * task, softirq, irq, nmi context
 697         */
 698        int                             recursion[4];
 699};
 700
 701struct perf_output_handle {
 702        struct perf_event               *event;
 703        struct perf_mmap_data           *data;
 704        unsigned long                   head;
 705        unsigned long                   offset;
 706        int                             nmi;
 707        int                             sample;
 708        int                             locked;
 709        unsigned long                   flags;
 710};
 711
 712#ifdef CONFIG_PERF_EVENTS
 713
 714/*
 715 * Set by architecture code:
 716 */
 717extern int perf_max_events;
 718
 719extern const struct pmu *hw_perf_event_init(struct perf_event *event);
 720
 721extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
 722extern void perf_event_task_sched_out(struct task_struct *task,
 723                                        struct task_struct *next, int cpu);
 724extern void perf_event_task_tick(struct task_struct *task, int cpu);
 725extern int perf_event_init_task(struct task_struct *child);
 726extern void perf_event_exit_task(struct task_struct *child);
 727extern void perf_event_free_task(struct task_struct *task);
 728extern void set_perf_event_pending(void);
 729extern void perf_event_do_pending(void);
 730extern void perf_event_print_debug(void);
 731extern void __perf_disable(void);
 732extern bool __perf_enable(void);
 733extern void perf_disable(void);
 734extern void perf_enable(void);
 735extern int perf_event_task_disable(void);
 736extern int perf_event_task_enable(void);
 737extern int hw_perf_group_sched_in(struct perf_event *group_leader,
 738               struct perf_cpu_context *cpuctx,
 739               struct perf_event_context *ctx, int cpu);
 740extern void perf_event_update_userpage(struct perf_event *event);
 741
 742struct perf_sample_data {
 743        u64                             type;
 744
 745        u64                             ip;
 746        struct {
 747                u32     pid;
 748                u32     tid;
 749        }                               tid_entry;
 750        u64                             time;
 751        u64                             addr;
 752        u64                             id;
 753        u64                             stream_id;
 754        struct {
 755                u32     cpu;
 756                u32     reserved;
 757        }                               cpu_entry;
 758        u64                             period;
 759        struct perf_callchain_entry     *callchain;
 760        struct perf_raw_record          *raw;
 761};
 762
 763extern void perf_output_sample(struct perf_output_handle *handle,
 764                               struct perf_event_header *header,
 765                               struct perf_sample_data *data,
 766                               struct perf_event *event);
 767extern void perf_prepare_sample(struct perf_event_header *header,
 768                                struct perf_sample_data *data,
 769                                struct perf_event *event,
 770                                struct pt_regs *regs);
 771
 772extern int perf_event_overflow(struct perf_event *event, int nmi,
 773                                 struct perf_sample_data *data,
 774                                 struct pt_regs *regs);
 775
 776/*
 777 * Return 1 for a software event, 0 for a hardware event
 778 */
 779static inline int is_software_event(struct perf_event *event)
 780{
 781        return (event->attr.type != PERF_TYPE_RAW) &&
 782                (event->attr.type != PERF_TYPE_HARDWARE) &&
 783                (event->attr.type != PERF_TYPE_HW_CACHE);
 784}
 785
 786extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
 787
 788extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
 789
 790static inline void
 791perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
 792{
 793        if (atomic_read(&perf_swevent_enabled[event_id]))
 794                __perf_sw_event(event_id, nr, nmi, regs, addr);
 795}
 796
 797extern void __perf_event_mmap(struct vm_area_struct *vma);
 798
 799static inline void perf_event_mmap(struct vm_area_struct *vma)
 800{
 801        if (vma->vm_flags & VM_EXEC)
 802                __perf_event_mmap(vma);
 803}
 804
 805extern void perf_event_comm(struct task_struct *tsk);
 806extern void perf_event_fork(struct task_struct *tsk);
 807
 808extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
 809
 810extern int sysctl_perf_event_paranoid;
 811extern int sysctl_perf_event_mlock;
 812extern int sysctl_perf_event_sample_rate;
 813
 814extern void perf_event_init(void);
 815extern void perf_tp_event(int event_id, u64 addr, u64 count,
 816                                 void *record, int entry_size);
 817
 818#ifndef perf_misc_flags
 819#define perf_misc_flags(regs)   (user_mode(regs) ? PERF_RECORD_MISC_USER : \
 820                                 PERF_RECORD_MISC_KERNEL)
 821#define perf_instruction_pointer(regs)  instruction_pointer(regs)
 822#endif
 823
 824extern int perf_output_begin(struct perf_output_handle *handle,
 825                             struct perf_event *event, unsigned int size,
 826                             int nmi, int sample);
 827extern void perf_output_end(struct perf_output_handle *handle);
 828extern void perf_output_copy(struct perf_output_handle *handle,
 829                             const void *buf, unsigned int len);
 830#else
 831static inline void
 832perf_event_task_sched_in(struct task_struct *task, int cpu)             { }
 833static inline void
 834perf_event_task_sched_out(struct task_struct *task,
 835                            struct task_struct *next, int cpu)          { }
 836static inline void
 837perf_event_task_tick(struct task_struct *task, int cpu)                 { }
 838static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
 839static inline void perf_event_exit_task(struct task_struct *child)      { }
 840static inline void perf_event_free_task(struct task_struct *task)       { }
 841static inline void perf_event_do_pending(void)                          { }
 842static inline void perf_event_print_debug(void)                         { }
 843static inline void perf_disable(void)                                   { }
 844static inline void perf_enable(void)                                    { }
 845static inline int perf_event_task_disable(void)                         { return -EINVAL; }
 846static inline int perf_event_task_enable(void)                          { return -EINVAL; }
 847
 848static inline void
 849perf_sw_event(u32 event_id, u64 nr, int nmi,
 850                     struct pt_regs *regs, u64 addr)                    { }
 851
 852static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
 853static inline void perf_event_comm(struct task_struct *tsk)             { }
 854static inline void perf_event_fork(struct task_struct *tsk)             { }
 855static inline void perf_event_init(void)                                { }
 856
 857#endif
 858
 859#define perf_output_put(handle, x) \
 860        perf_output_copy((handle), &(x), sizeof(x))
 861
 862#endif /* __KERNEL__ */
 863#endif /* _LINUX_PERF_EVENT_H */
 864