linux/include/linux/trace_events.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef _LINUX_TRACE_EVENT_H
   4#define _LINUX_TRACE_EVENT_H
   5
   6#include <linux/ring_buffer.h>
   7#include <linux/trace_seq.h>
   8#include <linux/percpu.h>
   9#include <linux/hardirq.h>
  10#include <linux/perf_event.h>
  11#include <linux/tracepoint.h>
  12
  13struct trace_array;
  14struct trace_buffer;
  15struct tracer;
  16struct dentry;
  17struct bpf_prog;
  18
  19const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
  20                                  unsigned long flags,
  21                                  const struct trace_print_flags *flag_array);
  22
  23const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  24                                    const struct trace_print_flags *symbol_array);
  25
  26#if BITS_PER_LONG == 32
  27const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
  28                      unsigned long long flags,
  29                      const struct trace_print_flags_u64 *flag_array);
  30
  31const char *trace_print_symbols_seq_u64(struct trace_seq *p,
  32                                        unsigned long long val,
  33                                        const struct trace_print_flags_u64
  34                                                                 *symbol_array);
  35#endif
  36
  37const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  38                                    unsigned int bitmask_size);
  39
  40const char *trace_print_hex_seq(struct trace_seq *p,
  41                                const unsigned char *buf, int len,
  42                                bool concatenate);
  43
  44const char *trace_print_array_seq(struct trace_seq *p,
  45                                   const void *buf, int count,
  46                                   size_t el_size);
  47
  48struct trace_iterator;
  49struct trace_event;
  50
  51int trace_raw_output_prep(struct trace_iterator *iter,
  52                          struct trace_event *event);
  53
  54/*
  55 * The trace entry - the most basic unit of tracing. This is what
  56 * is printed in the end as a single line in the trace output, such as:
  57 *
  58 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
  59 */
  60struct trace_entry {
  61        unsigned short          type;
  62        unsigned char           flags;
  63        unsigned char           preempt_count;
  64        int                     pid;
  65};
  66
  67#define TRACE_EVENT_TYPE_MAX                                            \
  68        ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  69
  70/*
  71 * Trace iterator - used by printout routines who present trace
  72 * results to users and which routines might sleep, etc:
  73 */
  74struct trace_iterator {
  75        struct trace_array      *tr;
  76        struct tracer           *trace;
  77        struct trace_buffer     *trace_buffer;
  78        void                    *private;
  79        int                     cpu_file;
  80        struct mutex            mutex;
  81        struct ring_buffer_iter **buffer_iter;
  82        unsigned long           iter_flags;
  83
  84        /* trace_seq for __print_flags() and __print_symbolic() etc. */
  85        struct trace_seq        tmp_seq;
  86
  87        cpumask_var_t           started;
  88
  89        /* it's true when current open file is snapshot */
  90        bool                    snapshot;
  91
  92        /* The below is zeroed out in pipe_read */
  93        struct trace_seq        seq;
  94        struct trace_entry      *ent;
  95        unsigned long           lost_events;
  96        int                     leftover;
  97        int                     ent_size;
  98        int                     cpu;
  99        u64                     ts;
 100
 101        loff_t                  pos;
 102        long                    idx;
 103
 104        /* All new field here will be zeroed out in pipe_read */
 105};
 106
 107enum trace_iter_flags {
 108        TRACE_FILE_LAT_FMT      = 1,
 109        TRACE_FILE_ANNOTATE     = 2,
 110        TRACE_FILE_TIME_IN_NS   = 4,
 111};
 112
 113
 114typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
 115                                      int flags, struct trace_event *event);
 116
 117struct trace_event_functions {
 118        trace_print_func        trace;
 119        trace_print_func        raw;
 120        trace_print_func        hex;
 121        trace_print_func        binary;
 122};
 123
 124struct trace_event {
 125        struct hlist_node               node;
 126        struct list_head                list;
 127        int                             type;
 128        struct trace_event_functions    *funcs;
 129};
 130
 131extern int register_trace_event(struct trace_event *event);
 132extern int unregister_trace_event(struct trace_event *event);
 133
 134/* Return values for print_line callback */
 135enum print_line_t {
 136        TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
 137        TRACE_TYPE_HANDLED      = 1,
 138        TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
 139        TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 140};
 141
 142enum print_line_t trace_handle_return(struct trace_seq *s);
 143
 144void tracing_generic_entry_update(struct trace_entry *entry,
 145                                  unsigned long flags,
 146                                  int pc);
 147struct trace_event_file;
 148
 149struct ring_buffer_event *
 150trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
 151                                struct trace_event_file *trace_file,
 152                                int type, unsigned long len,
 153                                unsigned long flags, int pc);
 154
 155#define TRACE_RECORD_CMDLINE    BIT(0)
 156#define TRACE_RECORD_TGID       BIT(1)
 157
 158void tracing_record_taskinfo(struct task_struct *task, int flags);
 159void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
 160                                          struct task_struct *next, int flags);
 161
 162void tracing_record_cmdline(struct task_struct *task);
 163void tracing_record_tgid(struct task_struct *task);
 164
 165int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
 166
 167struct event_filter;
 168
 169enum trace_reg {
 170        TRACE_REG_REGISTER,
 171        TRACE_REG_UNREGISTER,
 172#ifdef CONFIG_PERF_EVENTS
 173        TRACE_REG_PERF_REGISTER,
 174        TRACE_REG_PERF_UNREGISTER,
 175        TRACE_REG_PERF_OPEN,
 176        TRACE_REG_PERF_CLOSE,
 177        /*
 178         * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
 179         * custom action was taken and the default action is not to be
 180         * performed.
 181         */
 182        TRACE_REG_PERF_ADD,
 183        TRACE_REG_PERF_DEL,
 184#endif
 185};
 186
 187struct trace_event_call;
 188
 189struct trace_event_class {
 190        const char              *system;
 191        void                    *probe;
 192#ifdef CONFIG_PERF_EVENTS
 193        void                    *perf_probe;
 194#endif
 195        int                     (*reg)(struct trace_event_call *event,
 196                                       enum trace_reg type, void *data);
 197        int                     (*define_fields)(struct trace_event_call *);
 198        struct list_head        *(*get_fields)(struct trace_event_call *);
 199        struct list_head        fields;
 200        int                     (*raw_init)(struct trace_event_call *);
 201};
 202
 203extern int trace_event_reg(struct trace_event_call *event,
 204                            enum trace_reg type, void *data);
 205
 206struct trace_event_buffer {
 207        struct ring_buffer              *buffer;
 208        struct ring_buffer_event        *event;
 209        struct trace_event_file         *trace_file;
 210        void                            *entry;
 211        unsigned long                   flags;
 212        int                             pc;
 213};
 214
 215void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 216                                  struct trace_event_file *trace_file,
 217                                  unsigned long len);
 218
 219void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
 220
 221enum {
 222        TRACE_EVENT_FL_FILTERED_BIT,
 223        TRACE_EVENT_FL_CAP_ANY_BIT,
 224        TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 225        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 226        TRACE_EVENT_FL_TRACEPOINT_BIT,
 227        TRACE_EVENT_FL_KPROBE_BIT,
 228        TRACE_EVENT_FL_UPROBE_BIT,
 229};
 230
 231/*
 232 * Event flags:
 233 *  FILTERED      - The event has a filter attached
 234 *  CAP_ANY       - Any user can enable for perf
 235 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 236 *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
 237 *  TRACEPOINT    - Event is a tracepoint
 238 *  KPROBE        - Event is a kprobe
 239 *  UPROBE        - Event is a uprobe
 240 */
 241enum {
 242        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
 243        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 244        TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 245        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 246        TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 247        TRACE_EVENT_FL_KPROBE           = (1 << TRACE_EVENT_FL_KPROBE_BIT),
 248        TRACE_EVENT_FL_UPROBE           = (1 << TRACE_EVENT_FL_UPROBE_BIT),
 249};
 250
 251#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
 252
 253struct trace_event_call {
 254        struct list_head        list;
 255        struct trace_event_class *class;
 256        union {
 257                char                    *name;
 258                /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
 259                struct tracepoint       *tp;
 260        };
 261        struct trace_event      event;
 262        char                    *print_fmt;
 263        struct event_filter     *filter;
 264        void                    *mod;
 265        void                    *data;
 266        /*
 267         *   bit 0:             filter_active
 268         *   bit 1:             allow trace by non root (cap any)
 269         *   bit 2:             failed to apply filter
 270         *   bit 3:             trace internal event (do not enable)
 271         *   bit 4:             Event was enabled by module
 272         *   bit 5:             use call filter rather than file filter
 273         *   bit 6:             Event is a tracepoint
 274         */
 275        int                     flags; /* static flags of different events */
 276
 277#ifdef CONFIG_PERF_EVENTS
 278        int                             perf_refcount;
 279        struct hlist_head __percpu      *perf_events;
 280        struct bpf_prog_array __rcu     *prog_array;
 281
 282        int     (*perf_perm)(struct trace_event_call *,
 283                             struct perf_event *);
 284#endif
 285};
 286
 287#ifdef CONFIG_PERF_EVENTS
 288static inline bool bpf_prog_array_valid(struct trace_event_call *call)
 289{
 290        /*
 291         * This inline function checks whether call->prog_array
 292         * is valid or not. The function is called in various places,
 293         * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
 294         *
 295         * If this function returns true, and later call->prog_array
 296         * becomes false inside rcu_read_lock/unlock region,
 297         * we bail out then. If this function return false,
 298         * there is a risk that we might miss a few events if the checking
 299         * were delayed until inside rcu_read_lock/unlock region and
 300         * call->prog_array happened to become non-NULL then.
 301         *
 302         * Here, READ_ONCE() is used instead of rcu_access_pointer().
 303         * rcu_access_pointer() requires the actual definition of
 304         * "struct bpf_prog_array" while READ_ONCE() only needs
 305         * a declaration of the same type.
 306         */
 307        return !!READ_ONCE(call->prog_array);
 308}
 309#endif
 310
 311static inline const char *
 312trace_event_name(struct trace_event_call *call)
 313{
 314        if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
 315                return call->tp ? call->tp->name : NULL;
 316        else
 317                return call->name;
 318}
 319
 320struct trace_array;
 321struct trace_subsystem_dir;
 322
 323enum {
 324        EVENT_FILE_FL_ENABLED_BIT,
 325        EVENT_FILE_FL_RECORDED_CMD_BIT,
 326        EVENT_FILE_FL_RECORDED_TGID_BIT,
 327        EVENT_FILE_FL_FILTERED_BIT,
 328        EVENT_FILE_FL_NO_SET_FILTER_BIT,
 329        EVENT_FILE_FL_SOFT_MODE_BIT,
 330        EVENT_FILE_FL_SOFT_DISABLED_BIT,
 331        EVENT_FILE_FL_TRIGGER_MODE_BIT,
 332        EVENT_FILE_FL_TRIGGER_COND_BIT,
 333        EVENT_FILE_FL_PID_FILTER_BIT,
 334        EVENT_FILE_FL_WAS_ENABLED_BIT,
 335};
 336
 337/*
 338 * Event file flags:
 339 *  ENABLED       - The event is enabled
 340 *  RECORDED_CMD  - The comms should be recorded at sched_switch
 341 *  RECORDED_TGID - The tgids should be recorded at sched_switch
 342 *  FILTERED      - The event has a filter attached
 343 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 344 *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
 345 *  SOFT_DISABLED - When set, do not trace the event (even though its
 346 *                   tracepoint may be enabled)
 347 *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
 348 *  TRIGGER_COND  - When set, one or more triggers has an associated filter
 349 *  PID_FILTER    - When set, the event is filtered based on pid
 350 *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
 351 */
 352enum {
 353        EVENT_FILE_FL_ENABLED           = (1 << EVENT_FILE_FL_ENABLED_BIT),
 354        EVENT_FILE_FL_RECORDED_CMD      = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
 355        EVENT_FILE_FL_RECORDED_TGID     = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
 356        EVENT_FILE_FL_FILTERED          = (1 << EVENT_FILE_FL_FILTERED_BIT),
 357        EVENT_FILE_FL_NO_SET_FILTER     = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
 358        EVENT_FILE_FL_SOFT_MODE         = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
 359        EVENT_FILE_FL_SOFT_DISABLED     = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
 360        EVENT_FILE_FL_TRIGGER_MODE      = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
 361        EVENT_FILE_FL_TRIGGER_COND      = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
 362        EVENT_FILE_FL_PID_FILTER        = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
 363        EVENT_FILE_FL_WAS_ENABLED       = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
 364};
 365
 366struct trace_event_file {
 367        struct list_head                list;
 368        struct trace_event_call         *event_call;
 369        struct event_filter __rcu       *filter;
 370        struct dentry                   *dir;
 371        struct trace_array              *tr;
 372        struct trace_subsystem_dir      *system;
 373        struct list_head                triggers;
 374
 375        /*
 376         * 32 bit flags:
 377         *   bit 0:             enabled
 378         *   bit 1:             enabled cmd record
 379         *   bit 2:             enable/disable with the soft disable bit
 380         *   bit 3:             soft disabled
 381         *   bit 4:             trigger enabled
 382         *
 383         * Note: The bits must be set atomically to prevent races
 384         * from other writers. Reads of flags do not need to be in
 385         * sync as they occur in critical sections. But the way flags
 386         * is currently used, these changes do not affect the code
 387         * except that when a change is made, it may have a slight
 388         * delay in propagating the changes to other CPUs due to
 389         * caching and such. Which is mostly OK ;-)
 390         */
 391        unsigned long           flags;
 392        atomic_t                sm_ref; /* soft-mode reference counter */
 393        atomic_t                tm_ref; /* trigger-mode reference counter */
 394};
 395
 396#define __TRACE_EVENT_FLAGS(name, value)                                \
 397        static int __init trace_init_flags_##name(void)                 \
 398        {                                                               \
 399                event_##name.flags |= value;                            \
 400                return 0;                                               \
 401        }                                                               \
 402        early_initcall(trace_init_flags_##name);
 403
 404#define __TRACE_EVENT_PERF_PERM(name, expr...)                          \
 405        static int perf_perm_##name(struct trace_event_call *tp_event, \
 406                                    struct perf_event *p_event)         \
 407        {                                                               \
 408                return ({ expr; });                                     \
 409        }                                                               \
 410        static int __init trace_init_perf_perm_##name(void)             \
 411        {                                                               \
 412                event_##name.perf_perm = &perf_perm_##name;             \
 413                return 0;                                               \
 414        }                                                               \
 415        early_initcall(trace_init_perf_perm_##name);
 416
 417#define PERF_MAX_TRACE_SIZE     2048
 418
 419#define MAX_FILTER_STR_VAL      256     /* Should handle KSYM_SYMBOL_LEN */
 420
 421enum event_trigger_type {
 422        ETT_NONE                = (0),
 423        ETT_TRACE_ONOFF         = (1 << 0),
 424        ETT_SNAPSHOT            = (1 << 1),
 425        ETT_STACKTRACE          = (1 << 2),
 426        ETT_EVENT_ENABLE        = (1 << 3),
 427        ETT_EVENT_HIST          = (1 << 4),
 428        ETT_HIST_ENABLE         = (1 << 5),
 429};
 430
 431extern int filter_match_preds(struct event_filter *filter, void *rec);
 432
 433extern enum event_trigger_type
 434event_triggers_call(struct trace_event_file *file, void *rec,
 435                    struct ring_buffer_event *event);
 436extern void
 437event_triggers_post_call(struct trace_event_file *file,
 438                         enum event_trigger_type tt,
 439                         void *rec, struct ring_buffer_event *event);
 440
 441bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
 442
 443/**
 444 * trace_trigger_soft_disabled - do triggers and test if soft disabled
 445 * @file: The file pointer of the event to test
 446 *
 447 * If any triggers without filters are attached to this event, they
 448 * will be called here. If the event is soft disabled and has no
 449 * triggers that require testing the fields, it will return true,
 450 * otherwise false.
 451 */
 452static inline bool
 453trace_trigger_soft_disabled(struct trace_event_file *file)
 454{
 455        unsigned long eflags = file->flags;
 456
 457        if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
 458                if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
 459                        event_triggers_call(file, NULL, NULL);
 460                if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
 461                        return true;
 462                if (eflags & EVENT_FILE_FL_PID_FILTER)
 463                        return trace_event_ignore_this_pid(file);
 464        }
 465        return false;
 466}
 467
 468#ifdef CONFIG_BPF_EVENTS
 469unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
 470int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
 471void perf_event_detach_bpf_prog(struct perf_event *event);
 472int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 473int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 474int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 475struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
 476#else
 477static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 478{
 479        return 1;
 480}
 481
 482static inline int
 483perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
 484{
 485        return -EOPNOTSUPP;
 486}
 487
 488static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
 489
 490static inline int
 491perf_event_query_prog_array(struct perf_event *event, void __user *info)
 492{
 493        return -EOPNOTSUPP;
 494}
 495static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 496{
 497        return -EOPNOTSUPP;
 498}
 499static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 500{
 501        return -EOPNOTSUPP;
 502}
 503static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
 504{
 505        return NULL;
 506}
 507#endif
 508
 509enum {
 510        FILTER_OTHER = 0,
 511        FILTER_STATIC_STRING,
 512        FILTER_DYN_STRING,
 513        FILTER_PTR_STRING,
 514        FILTER_TRACE_FN,
 515        FILTER_COMM,
 516        FILTER_CPU,
 517};
 518
 519extern int trace_event_raw_init(struct trace_event_call *call);
 520extern int trace_define_field(struct trace_event_call *call, const char *type,
 521                              const char *name, int offset, int size,
 522                              int is_signed, int filter_type);
 523extern int trace_add_event_call(struct trace_event_call *call);
 524extern int trace_remove_event_call(struct trace_event_call *call);
 525extern int trace_event_get_offsets(struct trace_event_call *call);
 526
 527#define is_signed_type(type)    (((type)(-1)) < (type)1)
 528
 529int trace_set_clr_event(const char *system, const char *event, int set);
 530
 531/*
 532 * The double __builtin_constant_p is because gcc will give us an error
 533 * if we try to allocate the static variable to fmt if it is not a
 534 * constant. Even with the outer if statement optimizing out.
 535 */
 536#define event_trace_printk(ip, fmt, args...)                            \
 537do {                                                                    \
 538        __trace_printk_check_format(fmt, ##args);                       \
 539        tracing_record_cmdline(current);                                \
 540        if (__builtin_constant_p(fmt)) {                                \
 541                static const char *trace_printk_fmt                     \
 542                  __attribute__((section("__trace_printk_fmt"))) =      \
 543                        __builtin_constant_p(fmt) ? fmt : NULL;         \
 544                                                                        \
 545                __trace_bprintk(ip, trace_printk_fmt, ##args);          \
 546        } else                                                          \
 547                __trace_printk(ip, fmt, ##args);                        \
 548} while (0)
 549
 550#ifdef CONFIG_PERF_EVENTS
 551struct perf_event;
 552
 553DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 554DECLARE_PER_CPU(int, bpf_kprobe_override);
 555
 556extern int  perf_trace_init(struct perf_event *event);
 557extern void perf_trace_destroy(struct perf_event *event);
 558extern int  perf_trace_add(struct perf_event *event, int flags);
 559extern void perf_trace_del(struct perf_event *event, int flags);
 560#ifdef CONFIG_KPROBE_EVENTS
 561extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
 562extern void perf_kprobe_destroy(struct perf_event *event);
 563#endif
 564#ifdef CONFIG_UPROBE_EVENTS
 565extern int  perf_uprobe_init(struct perf_event *event, bool is_retprobe);
 566extern void perf_uprobe_destroy(struct perf_event *event);
 567#endif
 568extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
 569                                     char *filter_str);
 570extern void ftrace_profile_free_filter(struct perf_event *event);
 571void perf_trace_buf_update(void *record, u16 type);
 572void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
 573
 574void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
 575void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
 576void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
 577                    u64 arg3);
 578void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
 579                    u64 arg3, u64 arg4);
 580void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
 581                    u64 arg3, u64 arg4, u64 arg5);
 582void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
 583                    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
 584void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
 585                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
 586void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
 587                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 588                    u64 arg8);
 589void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
 590                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 591                    u64 arg8, u64 arg9);
 592void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
 593                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 594                     u64 arg8, u64 arg9, u64 arg10);
 595void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
 596                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 597                     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
 598void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
 599                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 600                     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
 601void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
 602                               struct trace_event_call *call, u64 count,
 603                               struct pt_regs *regs, struct hlist_head *head,
 604                               struct task_struct *task);
 605
 606static inline void
 607perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
 608                       u64 count, struct pt_regs *regs, void *head,
 609                       struct task_struct *task)
 610{
 611        perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 612}
 613
 614#endif
 615
 616#endif /* _LINUX_TRACE_EVENT_H */
 617