linux/include/linux/trace_events.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef _LINUX_TRACE_EVENT_H
   4#define _LINUX_TRACE_EVENT_H
   5
   6#include <linux/ring_buffer.h>
   7#include <linux/trace_seq.h>
   8#include <linux/percpu.h>
   9#include <linux/hardirq.h>
  10#include <linux/perf_event.h>
  11#include <linux/tracepoint.h>
  12
  13struct trace_array;
  14struct trace_buffer;
  15struct tracer;
  16struct dentry;
  17struct bpf_prog;
  18
  19const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
  20                                  unsigned long flags,
  21                                  const struct trace_print_flags *flag_array);
  22
  23const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  24                                    const struct trace_print_flags *symbol_array);
  25
  26#if BITS_PER_LONG == 32
  27const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
  28                      unsigned long long flags,
  29                      const struct trace_print_flags_u64 *flag_array);
  30
  31const char *trace_print_symbols_seq_u64(struct trace_seq *p,
  32                                        unsigned long long val,
  33                                        const struct trace_print_flags_u64
  34                                                                 *symbol_array);
  35#endif
  36
  37const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  38                                    unsigned int bitmask_size);
  39
  40const char *trace_print_hex_seq(struct trace_seq *p,
  41                                const unsigned char *buf, int len,
  42                                bool concatenate);
  43
  44const char *trace_print_array_seq(struct trace_seq *p,
  45                                   const void *buf, int count,
  46                                   size_t el_size);
  47
  48const char *
  49trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
  50                         int prefix_type, int rowsize, int groupsize,
  51                         const void *buf, size_t len, bool ascii);
  52
  53struct trace_iterator;
  54struct trace_event;
  55
  56int trace_raw_output_prep(struct trace_iterator *iter,
  57                          struct trace_event *event);
  58
  59/*
  60 * The trace entry - the most basic unit of tracing. This is what
  61 * is printed in the end as a single line in the trace output, such as:
  62 *
  63 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
  64 */
  65struct trace_entry {
  66        unsigned short          type;
  67        unsigned char           flags;
  68        unsigned char           preempt_count;
  69        int                     pid;
  70};
  71
  72#define TRACE_EVENT_TYPE_MAX                                            \
  73        ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  74
  75/*
  76 * Trace iterator - used by printout routines who present trace
  77 * results to users and which routines might sleep, etc:
  78 */
  79struct trace_iterator {
  80        struct trace_array      *tr;
  81        struct tracer           *trace;
  82        struct trace_buffer     *trace_buffer;
  83        void                    *private;
  84        int                     cpu_file;
  85        struct mutex            mutex;
  86        struct ring_buffer_iter **buffer_iter;
  87        unsigned long           iter_flags;
  88
  89        /* trace_seq for __print_flags() and __print_symbolic() etc. */
  90        struct trace_seq        tmp_seq;
  91
  92        cpumask_var_t           started;
  93
  94        /* it's true when current open file is snapshot */
  95        bool                    snapshot;
  96
  97        /* The below is zeroed out in pipe_read */
  98        struct trace_seq        seq;
  99        struct trace_entry      *ent;
 100        unsigned long           lost_events;
 101        int                     leftover;
 102        int                     ent_size;
 103        int                     cpu;
 104        u64                     ts;
 105
 106        loff_t                  pos;
 107        long                    idx;
 108
 109        /* All new field here will be zeroed out in pipe_read */
 110};
 111
 112enum trace_iter_flags {
 113        TRACE_FILE_LAT_FMT      = 1,
 114        TRACE_FILE_ANNOTATE     = 2,
 115        TRACE_FILE_TIME_IN_NS   = 4,
 116};
 117
 118
 119typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
 120                                      int flags, struct trace_event *event);
 121
 122struct trace_event_functions {
 123        trace_print_func        trace;
 124        trace_print_func        raw;
 125        trace_print_func        hex;
 126        trace_print_func        binary;
 127};
 128
 129struct trace_event {
 130        struct hlist_node               node;
 131        struct list_head                list;
 132        int                             type;
 133        struct trace_event_functions    *funcs;
 134};
 135
 136extern int register_trace_event(struct trace_event *event);
 137extern int unregister_trace_event(struct trace_event *event);
 138
 139/* Return values for print_line callback */
 140enum print_line_t {
 141        TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
 142        TRACE_TYPE_HANDLED      = 1,
 143        TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
 144        TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 145};
 146
 147enum print_line_t trace_handle_return(struct trace_seq *s);
 148
 149void tracing_generic_entry_update(struct trace_entry *entry,
 150                                  unsigned short type,
 151                                  unsigned long flags,
 152                                  int pc);
 153struct trace_event_file;
 154
 155struct ring_buffer_event *
 156trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
 157                                struct trace_event_file *trace_file,
 158                                int type, unsigned long len,
 159                                unsigned long flags, int pc);
 160
 161#define TRACE_RECORD_CMDLINE    BIT(0)
 162#define TRACE_RECORD_TGID       BIT(1)
 163
 164void tracing_record_taskinfo(struct task_struct *task, int flags);
 165void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
 166                                          struct task_struct *next, int flags);
 167
 168void tracing_record_cmdline(struct task_struct *task);
 169void tracing_record_tgid(struct task_struct *task);
 170
 171int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
 172
 173struct event_filter;
 174
 175enum trace_reg {
 176        TRACE_REG_REGISTER,
 177        TRACE_REG_UNREGISTER,
 178#ifdef CONFIG_PERF_EVENTS
 179        TRACE_REG_PERF_REGISTER,
 180        TRACE_REG_PERF_UNREGISTER,
 181        TRACE_REG_PERF_OPEN,
 182        TRACE_REG_PERF_CLOSE,
 183        /*
 184         * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
 185         * custom action was taken and the default action is not to be
 186         * performed.
 187         */
 188        TRACE_REG_PERF_ADD,
 189        TRACE_REG_PERF_DEL,
 190#endif
 191};
 192
 193struct trace_event_call;
 194
 195struct trace_event_class {
 196        const char              *system;
 197        void                    *probe;
 198#ifdef CONFIG_PERF_EVENTS
 199        void                    *perf_probe;
 200#endif
 201        int                     (*reg)(struct trace_event_call *event,
 202                                       enum trace_reg type, void *data);
 203        int                     (*define_fields)(struct trace_event_call *);
 204        struct list_head        *(*get_fields)(struct trace_event_call *);
 205        struct list_head        fields;
 206        int                     (*raw_init)(struct trace_event_call *);
 207};
 208
 209extern int trace_event_reg(struct trace_event_call *event,
 210                            enum trace_reg type, void *data);
 211
 212struct trace_event_buffer {
 213        struct ring_buffer              *buffer;
 214        struct ring_buffer_event        *event;
 215        struct trace_event_file         *trace_file;
 216        void                            *entry;
 217        unsigned long                   flags;
 218        int                             pc;
 219};
 220
 221void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 222                                  struct trace_event_file *trace_file,
 223                                  unsigned long len);
 224
 225void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
 226
 227enum {
 228        TRACE_EVENT_FL_FILTERED_BIT,
 229        TRACE_EVENT_FL_CAP_ANY_BIT,
 230        TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 231        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 232        TRACE_EVENT_FL_TRACEPOINT_BIT,
 233        TRACE_EVENT_FL_KPROBE_BIT,
 234        TRACE_EVENT_FL_UPROBE_BIT,
 235};
 236
 237/*
 238 * Event flags:
 239 *  FILTERED      - The event has a filter attached
 240 *  CAP_ANY       - Any user can enable for perf
 241 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 242 *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
 243 *  TRACEPOINT    - Event is a tracepoint
 244 *  KPROBE        - Event is a kprobe
 245 *  UPROBE        - Event is a uprobe
 246 */
 247enum {
 248        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
 249        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 250        TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 251        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 252        TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 253        TRACE_EVENT_FL_KPROBE           = (1 << TRACE_EVENT_FL_KPROBE_BIT),
 254        TRACE_EVENT_FL_UPROBE           = (1 << TRACE_EVENT_FL_UPROBE_BIT),
 255};
 256
 257#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
 258
 259struct trace_event_call {
 260        struct list_head        list;
 261        struct trace_event_class *class;
 262        union {
 263                char                    *name;
 264                /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
 265                struct tracepoint       *tp;
 266        };
 267        struct trace_event      event;
 268        char                    *print_fmt;
 269        struct event_filter     *filter;
 270        void                    *mod;
 271        void                    *data;
 272        /*
 273         *   bit 0:             filter_active
 274         *   bit 1:             allow trace by non root (cap any)
 275         *   bit 2:             failed to apply filter
 276         *   bit 3:             trace internal event (do not enable)
 277         *   bit 4:             Event was enabled by module
 278         *   bit 5:             use call filter rather than file filter
 279         *   bit 6:             Event is a tracepoint
 280         */
 281        int                     flags; /* static flags of different events */
 282
 283#ifdef CONFIG_PERF_EVENTS
 284        int                             perf_refcount;
 285        struct hlist_head __percpu      *perf_events;
 286        struct bpf_prog_array __rcu     *prog_array;
 287
 288        int     (*perf_perm)(struct trace_event_call *,
 289                             struct perf_event *);
 290#endif
 291};
 292
 293#ifdef CONFIG_PERF_EVENTS
 294static inline bool bpf_prog_array_valid(struct trace_event_call *call)
 295{
 296        /*
 297         * This inline function checks whether call->prog_array
 298         * is valid or not. The function is called in various places,
 299         * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
 300         *
 301         * If this function returns true, and later call->prog_array
 302         * becomes false inside rcu_read_lock/unlock region,
 303         * we bail out then. If this function return false,
 304         * there is a risk that we might miss a few events if the checking
 305         * were delayed until inside rcu_read_lock/unlock region and
 306         * call->prog_array happened to become non-NULL then.
 307         *
 308         * Here, READ_ONCE() is used instead of rcu_access_pointer().
 309         * rcu_access_pointer() requires the actual definition of
 310         * "struct bpf_prog_array" while READ_ONCE() only needs
 311         * a declaration of the same type.
 312         */
 313        return !!READ_ONCE(call->prog_array);
 314}
 315#endif
 316
 317static inline const char *
 318trace_event_name(struct trace_event_call *call)
 319{
 320        if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
 321                return call->tp ? call->tp->name : NULL;
 322        else
 323                return call->name;
 324}
 325
 326static inline struct list_head *
 327trace_get_fields(struct trace_event_call *event_call)
 328{
 329        if (!event_call->class->get_fields)
 330                return &event_call->class->fields;
 331        return event_call->class->get_fields(event_call);
 332}
 333
 334struct trace_array;
 335struct trace_subsystem_dir;
 336
 337enum {
 338        EVENT_FILE_FL_ENABLED_BIT,
 339        EVENT_FILE_FL_RECORDED_CMD_BIT,
 340        EVENT_FILE_FL_RECORDED_TGID_BIT,
 341        EVENT_FILE_FL_FILTERED_BIT,
 342        EVENT_FILE_FL_NO_SET_FILTER_BIT,
 343        EVENT_FILE_FL_SOFT_MODE_BIT,
 344        EVENT_FILE_FL_SOFT_DISABLED_BIT,
 345        EVENT_FILE_FL_TRIGGER_MODE_BIT,
 346        EVENT_FILE_FL_TRIGGER_COND_BIT,
 347        EVENT_FILE_FL_PID_FILTER_BIT,
 348        EVENT_FILE_FL_WAS_ENABLED_BIT,
 349};
 350
 351/*
 352 * Event file flags:
 353 *  ENABLED       - The event is enabled
 354 *  RECORDED_CMD  - The comms should be recorded at sched_switch
 355 *  RECORDED_TGID - The tgids should be recorded at sched_switch
 356 *  FILTERED      - The event has a filter attached
 357 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 358 *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
 359 *  SOFT_DISABLED - When set, do not trace the event (even though its
 360 *                   tracepoint may be enabled)
 361 *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
 362 *  TRIGGER_COND  - When set, one or more triggers has an associated filter
 363 *  PID_FILTER    - When set, the event is filtered based on pid
 364 *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
 365 */
 366enum {
 367        EVENT_FILE_FL_ENABLED           = (1 << EVENT_FILE_FL_ENABLED_BIT),
 368        EVENT_FILE_FL_RECORDED_CMD      = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
 369        EVENT_FILE_FL_RECORDED_TGID     = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
 370        EVENT_FILE_FL_FILTERED          = (1 << EVENT_FILE_FL_FILTERED_BIT),
 371        EVENT_FILE_FL_NO_SET_FILTER     = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
 372        EVENT_FILE_FL_SOFT_MODE         = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
 373        EVENT_FILE_FL_SOFT_DISABLED     = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
 374        EVENT_FILE_FL_TRIGGER_MODE      = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
 375        EVENT_FILE_FL_TRIGGER_COND      = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
 376        EVENT_FILE_FL_PID_FILTER        = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
 377        EVENT_FILE_FL_WAS_ENABLED       = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
 378};
 379
 380struct trace_event_file {
 381        struct list_head                list;
 382        struct trace_event_call         *event_call;
 383        struct event_filter __rcu       *filter;
 384        struct dentry                   *dir;
 385        struct trace_array              *tr;
 386        struct trace_subsystem_dir      *system;
 387        struct list_head                triggers;
 388
 389        /*
 390         * 32 bit flags:
 391         *   bit 0:             enabled
 392         *   bit 1:             enabled cmd record
 393         *   bit 2:             enable/disable with the soft disable bit
 394         *   bit 3:             soft disabled
 395         *   bit 4:             trigger enabled
 396         *
 397         * Note: The bits must be set atomically to prevent races
 398         * from other writers. Reads of flags do not need to be in
 399         * sync as they occur in critical sections. But the way flags
 400         * is currently used, these changes do not affect the code
 401         * except that when a change is made, it may have a slight
 402         * delay in propagating the changes to other CPUs due to
 403         * caching and such. Which is mostly OK ;-)
 404         */
 405        unsigned long           flags;
 406        atomic_t                sm_ref; /* soft-mode reference counter */
 407        atomic_t                tm_ref; /* trigger-mode reference counter */
 408};
 409
 410#define __TRACE_EVENT_FLAGS(name, value)                                \
 411        static int __init trace_init_flags_##name(void)                 \
 412        {                                                               \
 413                event_##name.flags |= value;                            \
 414                return 0;                                               \
 415        }                                                               \
 416        early_initcall(trace_init_flags_##name);
 417
 418#define __TRACE_EVENT_PERF_PERM(name, expr...)                          \
 419        static int perf_perm_##name(struct trace_event_call *tp_event, \
 420                                    struct perf_event *p_event)         \
 421        {                                                               \
 422                return ({ expr; });                                     \
 423        }                                                               \
 424        static int __init trace_init_perf_perm_##name(void)             \
 425        {                                                               \
 426                event_##name.perf_perm = &perf_perm_##name;             \
 427                return 0;                                               \
 428        }                                                               \
 429        early_initcall(trace_init_perf_perm_##name);
 430
 431#define PERF_MAX_TRACE_SIZE     2048
 432
 433#define MAX_FILTER_STR_VAL      256     /* Should handle KSYM_SYMBOL_LEN */
 434
 435enum event_trigger_type {
 436        ETT_NONE                = (0),
 437        ETT_TRACE_ONOFF         = (1 << 0),
 438        ETT_SNAPSHOT            = (1 << 1),
 439        ETT_STACKTRACE          = (1 << 2),
 440        ETT_EVENT_ENABLE        = (1 << 3),
 441        ETT_EVENT_HIST          = (1 << 4),
 442        ETT_HIST_ENABLE         = (1 << 5),
 443};
 444
 445extern int filter_match_preds(struct event_filter *filter, void *rec);
 446
 447extern enum event_trigger_type
 448event_triggers_call(struct trace_event_file *file, void *rec,
 449                    struct ring_buffer_event *event);
 450extern void
 451event_triggers_post_call(struct trace_event_file *file,
 452                         enum event_trigger_type tt);
 453
 454bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
 455
 456/**
 457 * trace_trigger_soft_disabled - do triggers and test if soft disabled
 458 * @file: The file pointer of the event to test
 459 *
 460 * If any triggers without filters are attached to this event, they
 461 * will be called here. If the event is soft disabled and has no
 462 * triggers that require testing the fields, it will return true,
 463 * otherwise false.
 464 */
 465static inline bool
 466trace_trigger_soft_disabled(struct trace_event_file *file)
 467{
 468        unsigned long eflags = file->flags;
 469
 470        if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
 471                if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
 472                        event_triggers_call(file, NULL, NULL);
 473                if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
 474                        return true;
 475                if (eflags & EVENT_FILE_FL_PID_FILTER)
 476                        return trace_event_ignore_this_pid(file);
 477        }
 478        return false;
 479}
 480
 481#ifdef CONFIG_BPF_EVENTS
 482unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
 483int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
 484void perf_event_detach_bpf_prog(struct perf_event *event);
 485int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 486int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 487int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 488struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
 489void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
 490int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
 491                            u32 *fd_type, const char **buf,
 492                            u64 *probe_offset, u64 *probe_addr);
 493#else
 494static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 495{
 496        return 1;
 497}
 498
 499static inline int
 500perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
 501{
 502        return -EOPNOTSUPP;
 503}
 504
 505static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
 506
 507static inline int
 508perf_event_query_prog_array(struct perf_event *event, void __user *info)
 509{
 510        return -EOPNOTSUPP;
 511}
 512static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 513{
 514        return -EOPNOTSUPP;
 515}
 516static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 517{
 518        return -EOPNOTSUPP;
 519}
 520static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
 521{
 522        return NULL;
 523}
 524static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 525{
 526}
 527static inline int bpf_get_perf_event_info(const struct perf_event *event,
 528                                          u32 *prog_id, u32 *fd_type,
 529                                          const char **buf, u64 *probe_offset,
 530                                          u64 *probe_addr)
 531{
 532        return -EOPNOTSUPP;
 533}
 534#endif
 535
 536enum {
 537        FILTER_OTHER = 0,
 538        FILTER_STATIC_STRING,
 539        FILTER_DYN_STRING,
 540        FILTER_PTR_STRING,
 541        FILTER_TRACE_FN,
 542        FILTER_COMM,
 543        FILTER_CPU,
 544};
 545
 546extern int trace_event_raw_init(struct trace_event_call *call);
 547extern int trace_define_field(struct trace_event_call *call, const char *type,
 548                              const char *name, int offset, int size,
 549                              int is_signed, int filter_type);
 550extern int trace_add_event_call(struct trace_event_call *call);
 551extern int trace_remove_event_call(struct trace_event_call *call);
 552extern int trace_event_get_offsets(struct trace_event_call *call);
 553
 554#define is_signed_type(type)    (((type)(-1)) < (type)1)
 555
 556int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
 557int trace_set_clr_event(const char *system, const char *event, int set);
 558int trace_array_set_clr_event(struct trace_array *tr, const char *system,
 559                const char *event, bool enable);
 560/*
 561 * The double __builtin_constant_p is because gcc will give us an error
 562 * if we try to allocate the static variable to fmt if it is not a
 563 * constant. Even with the outer if statement optimizing out.
 564 */
 565#define event_trace_printk(ip, fmt, args...)                            \
 566do {                                                                    \
 567        __trace_printk_check_format(fmt, ##args);                       \
 568        tracing_record_cmdline(current);                                \
 569        if (__builtin_constant_p(fmt)) {                                \
 570                static const char *trace_printk_fmt                     \
 571                  __attribute__((section("__trace_printk_fmt"))) =      \
 572                        __builtin_constant_p(fmt) ? fmt : NULL;         \
 573                                                                        \
 574                __trace_bprintk(ip, trace_printk_fmt, ##args);          \
 575        } else                                                          \
 576                __trace_printk(ip, fmt, ##args);                        \
 577} while (0)
 578
 579#ifdef CONFIG_PERF_EVENTS
 580struct perf_event;
 581
 582DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 583DECLARE_PER_CPU(int, bpf_kprobe_override);
 584
 585extern int  perf_trace_init(struct perf_event *event);
 586extern void perf_trace_destroy(struct perf_event *event);
 587extern int  perf_trace_add(struct perf_event *event, int flags);
 588extern void perf_trace_del(struct perf_event *event, int flags);
 589#ifdef CONFIG_KPROBE_EVENTS
 590extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
 591extern void perf_kprobe_destroy(struct perf_event *event);
 592extern int bpf_get_kprobe_info(const struct perf_event *event,
 593                               u32 *fd_type, const char **symbol,
 594                               u64 *probe_offset, u64 *probe_addr,
 595                               bool perf_type_tracepoint);
 596#endif
 597#ifdef CONFIG_UPROBE_EVENTS
 598extern int  perf_uprobe_init(struct perf_event *event,
 599                             unsigned long ref_ctr_offset, bool is_retprobe);
 600extern void perf_uprobe_destroy(struct perf_event *event);
 601extern int bpf_get_uprobe_info(const struct perf_event *event,
 602                               u32 *fd_type, const char **filename,
 603                               u64 *probe_offset, bool perf_type_tracepoint);
 604#endif
 605extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
 606                                     char *filter_str);
 607extern void ftrace_profile_free_filter(struct perf_event *event);
 608void perf_trace_buf_update(void *record, u16 type);
 609void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
 610
 611void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
 612void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
 613void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
 614                    u64 arg3);
 615void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
 616                    u64 arg3, u64 arg4);
 617void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
 618                    u64 arg3, u64 arg4, u64 arg5);
 619void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
 620                    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
 621void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
 622                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
 623void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
 624                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 625                    u64 arg8);
 626void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
 627                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 628                    u64 arg8, u64 arg9);
 629void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
 630                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 631                     u64 arg8, u64 arg9, u64 arg10);
 632void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
 633                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 634                     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
 635void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
 636                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 637                     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
 638void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
 639                               struct trace_event_call *call, u64 count,
 640                               struct pt_regs *regs, struct hlist_head *head,
 641                               struct task_struct *task);
 642
 643static inline void
 644perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
 645                       u64 count, struct pt_regs *regs, void *head,
 646                       struct task_struct *task)
 647{
 648        perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 649}
 650
 651#endif
 652
 653#endif /* _LINUX_TRACE_EVENT_H */
 654