linux/include/linux/trace_events.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#ifndef _LINUX_TRACE_EVENT_H
   4#define _LINUX_TRACE_EVENT_H
   5
   6#include <linux/ring_buffer.h>
   7#include <linux/trace_seq.h>
   8#include <linux/percpu.h>
   9#include <linux/hardirq.h>
  10#include <linux/perf_event.h>
  11#include <linux/tracepoint.h>
  12
  13struct trace_array;
  14struct array_buffer;
  15struct tracer;
  16struct dentry;
  17struct bpf_prog;
  18
  19const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
  20                                  unsigned long flags,
  21                                  const struct trace_print_flags *flag_array);
  22
  23const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  24                                    const struct trace_print_flags *symbol_array);
  25
  26#if BITS_PER_LONG == 32
  27const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
  28                      unsigned long long flags,
  29                      const struct trace_print_flags_u64 *flag_array);
  30
  31const char *trace_print_symbols_seq_u64(struct trace_seq *p,
  32                                        unsigned long long val,
  33                                        const struct trace_print_flags_u64
  34                                                                 *symbol_array);
  35#endif
  36
  37const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  38                                    unsigned int bitmask_size);
  39
  40const char *trace_print_hex_seq(struct trace_seq *p,
  41                                const unsigned char *buf, int len,
  42                                bool concatenate);
  43
  44const char *trace_print_array_seq(struct trace_seq *p,
  45                                   const void *buf, int count,
  46                                   size_t el_size);
  47
  48const char *
  49trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
  50                         int prefix_type, int rowsize, int groupsize,
  51                         const void *buf, size_t len, bool ascii);
  52
  53struct trace_iterator;
  54struct trace_event;
  55
  56int trace_raw_output_prep(struct trace_iterator *iter,
  57                          struct trace_event *event);
  58extern __printf(2, 3)
  59void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
  60
  61/*
  62 * The trace entry - the most basic unit of tracing. This is what
  63 * is printed in the end as a single line in the trace output, such as:
  64 *
  65 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
  66 */
  67struct trace_entry {
  68        unsigned short          type;
  69        unsigned char           flags;
  70        unsigned char           preempt_count;
  71        int                     pid;
  72};
  73
  74#define TRACE_EVENT_TYPE_MAX                                            \
  75        ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  76
  77/*
  78 * Trace iterator - used by printout routines who present trace
  79 * results to users and which routines might sleep, etc:
  80 */
  81struct trace_iterator {
  82        struct trace_array      *tr;
  83        struct tracer           *trace;
  84        struct array_buffer     *array_buffer;
  85        void                    *private;
  86        int                     cpu_file;
  87        struct mutex            mutex;
  88        struct ring_buffer_iter **buffer_iter;
  89        unsigned long           iter_flags;
  90        void                    *temp;  /* temp holder */
  91        unsigned int            temp_size;
  92        char                    *fmt;   /* modified format holder */
  93        unsigned int            fmt_size;
  94
  95        /* trace_seq for __print_flags() and __print_symbolic() etc. */
  96        struct trace_seq        tmp_seq;
  97
  98        cpumask_var_t           started;
  99
 100        /* it's true when current open file is snapshot */
 101        bool                    snapshot;
 102
 103        /* The below is zeroed out in pipe_read */
 104        struct trace_seq        seq;
 105        struct trace_entry      *ent;
 106        unsigned long           lost_events;
 107        int                     leftover;
 108        int                     ent_size;
 109        int                     cpu;
 110        u64                     ts;
 111
 112        loff_t                  pos;
 113        long                    idx;
 114
 115        /* All new field here will be zeroed out in pipe_read */
 116};
 117
 118enum trace_iter_flags {
 119        TRACE_FILE_LAT_FMT      = 1,
 120        TRACE_FILE_ANNOTATE     = 2,
 121        TRACE_FILE_TIME_IN_NS   = 4,
 122};
 123
 124
 125typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
 126                                      int flags, struct trace_event *event);
 127
 128struct trace_event_functions {
 129        trace_print_func        trace;
 130        trace_print_func        raw;
 131        trace_print_func        hex;
 132        trace_print_func        binary;
 133};
 134
 135struct trace_event {
 136        struct hlist_node               node;
 137        struct list_head                list;
 138        int                             type;
 139        struct trace_event_functions    *funcs;
 140};
 141
 142extern int register_trace_event(struct trace_event *event);
 143extern int unregister_trace_event(struct trace_event *event);
 144
 145/* Return values for print_line callback */
 146enum print_line_t {
 147        TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
 148        TRACE_TYPE_HANDLED      = 1,
 149        TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
 150        TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 151};
 152
 153enum print_line_t trace_handle_return(struct trace_seq *s);
 154
 155static inline void tracing_generic_entry_update(struct trace_entry *entry,
 156                                                unsigned short type,
 157                                                unsigned int trace_ctx)
 158{
 159        entry->preempt_count            = trace_ctx & 0xff;
 160        entry->pid                      = current->pid;
 161        entry->type                     = type;
 162        entry->flags =                  trace_ctx >> 16;
 163}
 164
 165unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
 166
 167enum trace_flag_type {
 168        TRACE_FLAG_IRQS_OFF             = 0x01,
 169        TRACE_FLAG_IRQS_NOSUPPORT       = 0x02,
 170        TRACE_FLAG_NEED_RESCHED         = 0x04,
 171        TRACE_FLAG_HARDIRQ              = 0x08,
 172        TRACE_FLAG_SOFTIRQ              = 0x10,
 173        TRACE_FLAG_PREEMPT_RESCHED      = 0x20,
 174        TRACE_FLAG_NMI                  = 0x40,
 175};
 176
 177#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
 178static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
 179{
 180        unsigned int irq_status = irqs_disabled_flags(irqflags) ?
 181                TRACE_FLAG_IRQS_OFF : 0;
 182        return tracing_gen_ctx_irq_test(irq_status);
 183}
 184static inline unsigned int tracing_gen_ctx(void)
 185{
 186        unsigned long irqflags;
 187
 188        local_save_flags(irqflags);
 189        return tracing_gen_ctx_flags(irqflags);
 190}
 191#else
 192
 193static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
 194{
 195        return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
 196}
 197static inline unsigned int tracing_gen_ctx(void)
 198{
 199        return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
 200}
 201#endif
 202
 203static inline unsigned int tracing_gen_ctx_dec(void)
 204{
 205        unsigned int trace_ctx;
 206
 207        trace_ctx = tracing_gen_ctx();
 208        /*
 209         * Subtract one from the preeption counter if preemption is enabled,
 210         * see trace_event_buffer_reserve()for details.
 211         */
 212        if (IS_ENABLED(CONFIG_PREEMPTION))
 213                trace_ctx--;
 214        return trace_ctx;
 215}
 216
 217struct trace_event_file;
 218
 219struct ring_buffer_event *
 220trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
 221                                struct trace_event_file *trace_file,
 222                                int type, unsigned long len,
 223                                unsigned int trace_ctx);
 224
 225#define TRACE_RECORD_CMDLINE    BIT(0)
 226#define TRACE_RECORD_TGID       BIT(1)
 227
 228void tracing_record_taskinfo(struct task_struct *task, int flags);
 229void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
 230                                          struct task_struct *next, int flags);
 231
 232void tracing_record_cmdline(struct task_struct *task);
 233void tracing_record_tgid(struct task_struct *task);
 234
 235int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
 236
 237struct event_filter;
 238
 239enum trace_reg {
 240        TRACE_REG_REGISTER,
 241        TRACE_REG_UNREGISTER,
 242#ifdef CONFIG_PERF_EVENTS
 243        TRACE_REG_PERF_REGISTER,
 244        TRACE_REG_PERF_UNREGISTER,
 245        TRACE_REG_PERF_OPEN,
 246        TRACE_REG_PERF_CLOSE,
 247        /*
 248         * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
 249         * custom action was taken and the default action is not to be
 250         * performed.
 251         */
 252        TRACE_REG_PERF_ADD,
 253        TRACE_REG_PERF_DEL,
 254#endif
 255};
 256
 257struct trace_event_call;
 258
 259#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
 260
 261struct trace_event_fields {
 262        const char *type;
 263        union {
 264                struct {
 265                        const char *name;
 266                        const int  size;
 267                        const int  align;
 268                        const int  is_signed;
 269                        const int  filter_type;
 270                };
 271                int (*define_fields)(struct trace_event_call *);
 272        };
 273};
 274
 275struct trace_event_class {
 276        const char              *system;
 277        void                    *probe;
 278#ifdef CONFIG_PERF_EVENTS
 279        void                    *perf_probe;
 280#endif
 281        int                     (*reg)(struct trace_event_call *event,
 282                                       enum trace_reg type, void *data);
 283        struct trace_event_fields *fields_array;
 284        struct list_head        *(*get_fields)(struct trace_event_call *);
 285        struct list_head        fields;
 286        int                     (*raw_init)(struct trace_event_call *);
 287};
 288
 289extern int trace_event_reg(struct trace_event_call *event,
 290                            enum trace_reg type, void *data);
 291
 292struct trace_event_buffer {
 293        struct trace_buffer             *buffer;
 294        struct ring_buffer_event        *event;
 295        struct trace_event_file         *trace_file;
 296        void                            *entry;
 297        unsigned int                    trace_ctx;
 298        struct pt_regs                  *regs;
 299};
 300
 301void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 302                                  struct trace_event_file *trace_file,
 303                                  unsigned long len);
 304
 305void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
 306
 307enum {
 308        TRACE_EVENT_FL_FILTERED_BIT,
 309        TRACE_EVENT_FL_CAP_ANY_BIT,
 310        TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 311        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 312        TRACE_EVENT_FL_TRACEPOINT_BIT,
 313        TRACE_EVENT_FL_KPROBE_BIT,
 314        TRACE_EVENT_FL_UPROBE_BIT,
 315};
 316
 317/*
 318 * Event flags:
 319 *  FILTERED      - The event has a filter attached
 320 *  CAP_ANY       - Any user can enable for perf
 321 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 322 *  IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
 323 *  TRACEPOINT    - Event is a tracepoint
 324 *  KPROBE        - Event is a kprobe
 325 *  UPROBE        - Event is a uprobe
 326 */
 327enum {
 328        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
 329        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 330        TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 331        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 332        TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 333        TRACE_EVENT_FL_KPROBE           = (1 << TRACE_EVENT_FL_KPROBE_BIT),
 334        TRACE_EVENT_FL_UPROBE           = (1 << TRACE_EVENT_FL_UPROBE_BIT),
 335};
 336
 337#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
 338
 339struct trace_event_call {
 340        struct list_head        list;
 341        struct trace_event_class *class;
 342        union {
 343                char                    *name;
 344                /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
 345                struct tracepoint       *tp;
 346        };
 347        struct trace_event      event;
 348        char                    *print_fmt;
 349        struct event_filter     *filter;
 350        void                    *mod;
 351        void                    *data;
 352
 353        /* See the TRACE_EVENT_FL_* flags above */
 354        int                     flags; /* static flags of different events */
 355
 356#ifdef CONFIG_PERF_EVENTS
 357        int                             perf_refcount;
 358        struct hlist_head __percpu      *perf_events;
 359        struct bpf_prog_array __rcu     *prog_array;
 360
 361        int     (*perf_perm)(struct trace_event_call *,
 362                             struct perf_event *);
 363#endif
 364};
 365
 366#ifdef CONFIG_PERF_EVENTS
 367static inline bool bpf_prog_array_valid(struct trace_event_call *call)
 368{
 369        /*
 370         * This inline function checks whether call->prog_array
 371         * is valid or not. The function is called in various places,
 372         * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
 373         *
 374         * If this function returns true, and later call->prog_array
 375         * becomes false inside rcu_read_lock/unlock region,
 376         * we bail out then. If this function return false,
 377         * there is a risk that we might miss a few events if the checking
 378         * were delayed until inside rcu_read_lock/unlock region and
 379         * call->prog_array happened to become non-NULL then.
 380         *
 381         * Here, READ_ONCE() is used instead of rcu_access_pointer().
 382         * rcu_access_pointer() requires the actual definition of
 383         * "struct bpf_prog_array" while READ_ONCE() only needs
 384         * a declaration of the same type.
 385         */
 386        return !!READ_ONCE(call->prog_array);
 387}
 388#endif
 389
 390static inline const char *
 391trace_event_name(struct trace_event_call *call)
 392{
 393        if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
 394                return call->tp ? call->tp->name : NULL;
 395        else
 396                return call->name;
 397}
 398
 399static inline struct list_head *
 400trace_get_fields(struct trace_event_call *event_call)
 401{
 402        if (!event_call->class->get_fields)
 403                return &event_call->class->fields;
 404        return event_call->class->get_fields(event_call);
 405}
 406
 407struct trace_array;
 408struct trace_subsystem_dir;
 409
 410enum {
 411        EVENT_FILE_FL_ENABLED_BIT,
 412        EVENT_FILE_FL_RECORDED_CMD_BIT,
 413        EVENT_FILE_FL_RECORDED_TGID_BIT,
 414        EVENT_FILE_FL_FILTERED_BIT,
 415        EVENT_FILE_FL_NO_SET_FILTER_BIT,
 416        EVENT_FILE_FL_SOFT_MODE_BIT,
 417        EVENT_FILE_FL_SOFT_DISABLED_BIT,
 418        EVENT_FILE_FL_TRIGGER_MODE_BIT,
 419        EVENT_FILE_FL_TRIGGER_COND_BIT,
 420        EVENT_FILE_FL_PID_FILTER_BIT,
 421        EVENT_FILE_FL_WAS_ENABLED_BIT,
 422};
 423
 424extern struct trace_event_file *trace_get_event_file(const char *instance,
 425                                                     const char *system,
 426                                                     const char *event);
 427extern void trace_put_event_file(struct trace_event_file *file);
 428
 429#define MAX_DYNEVENT_CMD_LEN    (2048)
 430
 431enum dynevent_type {
 432        DYNEVENT_TYPE_SYNTH = 1,
 433        DYNEVENT_TYPE_KPROBE,
 434        DYNEVENT_TYPE_NONE,
 435};
 436
 437struct dynevent_cmd;
 438
 439typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
 440
 441struct dynevent_cmd {
 442        struct seq_buf          seq;
 443        const char              *event_name;
 444        unsigned int            n_fields;
 445        enum dynevent_type      type;
 446        dynevent_create_fn_t    run_command;
 447        void                    *private_data;
 448};
 449
 450extern int dynevent_create(struct dynevent_cmd *cmd);
 451
 452extern int synth_event_delete(const char *name);
 453
 454extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
 455                                 char *buf, int maxlen);
 456
 457extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
 458                                       const char *name,
 459                                       struct module *mod, ...);
 460
 461#define synth_event_gen_cmd_start(cmd, name, mod, ...)  \
 462        __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
 463
 464struct synth_field_desc {
 465        const char *type;
 466        const char *name;
 467};
 468
 469extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
 470                                           const char *name,
 471                                           struct module *mod,
 472                                           struct synth_field_desc *fields,
 473                                           unsigned int n_fields);
 474extern int synth_event_create(const char *name,
 475                              struct synth_field_desc *fields,
 476                              unsigned int n_fields, struct module *mod);
 477
 478extern int synth_event_add_field(struct dynevent_cmd *cmd,
 479                                 const char *type,
 480                                 const char *name);
 481extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
 482                                     const char *type_name);
 483extern int synth_event_add_fields(struct dynevent_cmd *cmd,
 484                                  struct synth_field_desc *fields,
 485                                  unsigned int n_fields);
 486
 487#define synth_event_gen_cmd_end(cmd)    \
 488        dynevent_create(cmd)
 489
 490struct synth_event;
 491
 492struct synth_event_trace_state {
 493        struct trace_event_buffer fbuffer;
 494        struct synth_trace_event *entry;
 495        struct trace_buffer *buffer;
 496        struct synth_event *event;
 497        unsigned int cur_field;
 498        unsigned int n_u64;
 499        bool disabled;
 500        bool add_next;
 501        bool add_name;
 502};
 503
 504extern int synth_event_trace(struct trace_event_file *file,
 505                             unsigned int n_vals, ...);
 506extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
 507                                   unsigned int n_vals);
 508extern int synth_event_trace_start(struct trace_event_file *file,
 509                                   struct synth_event_trace_state *trace_state);
 510extern int synth_event_add_next_val(u64 val,
 511                                    struct synth_event_trace_state *trace_state);
 512extern int synth_event_add_val(const char *field_name, u64 val,
 513                               struct synth_event_trace_state *trace_state);
 514extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
 515
 516extern int kprobe_event_delete(const char *name);
 517
 518extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
 519                                  char *buf, int maxlen);
 520
 521#define kprobe_event_gen_cmd_start(cmd, name, loc, ...)                 \
 522        __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
 523
 524#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...)              \
 525        __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
 526
 527extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
 528                                        bool kretprobe,
 529                                        const char *name,
 530                                        const char *loc, ...);
 531
 532#define kprobe_event_add_fields(cmd, ...)       \
 533        __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
 534
 535#define kprobe_event_add_field(cmd, field)      \
 536        __kprobe_event_add_fields(cmd, field, NULL)
 537
 538extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
 539
 540#define kprobe_event_gen_cmd_end(cmd)           \
 541        dynevent_create(cmd)
 542
 543#define kretprobe_event_gen_cmd_end(cmd)        \
 544        dynevent_create(cmd)
 545
 546/*
 547 * Event file flags:
 548 *  ENABLED       - The event is enabled
 549 *  RECORDED_CMD  - The comms should be recorded at sched_switch
 550 *  RECORDED_TGID - The tgids should be recorded at sched_switch
 551 *  FILTERED      - The event has a filter attached
 552 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 553 *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
 554 *  SOFT_DISABLED - When set, do not trace the event (even though its
 555 *                   tracepoint may be enabled)
 556 *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
 557 *  TRIGGER_COND  - When set, one or more triggers has an associated filter
 558 *  PID_FILTER    - When set, the event is filtered based on pid
 559 *  WAS_ENABLED   - Set when enabled to know to clear trace on module removal
 560 */
 561enum {
 562        EVENT_FILE_FL_ENABLED           = (1 << EVENT_FILE_FL_ENABLED_BIT),
 563        EVENT_FILE_FL_RECORDED_CMD      = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
 564        EVENT_FILE_FL_RECORDED_TGID     = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
 565        EVENT_FILE_FL_FILTERED          = (1 << EVENT_FILE_FL_FILTERED_BIT),
 566        EVENT_FILE_FL_NO_SET_FILTER     = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
 567        EVENT_FILE_FL_SOFT_MODE         = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
 568        EVENT_FILE_FL_SOFT_DISABLED     = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
 569        EVENT_FILE_FL_TRIGGER_MODE      = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
 570        EVENT_FILE_FL_TRIGGER_COND      = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
 571        EVENT_FILE_FL_PID_FILTER        = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
 572        EVENT_FILE_FL_WAS_ENABLED       = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
 573};
 574
 575struct trace_event_file {
 576        struct list_head                list;
 577        struct trace_event_call         *event_call;
 578        struct event_filter __rcu       *filter;
 579        struct dentry                   *dir;
 580        struct trace_array              *tr;
 581        struct trace_subsystem_dir      *system;
 582        struct list_head                triggers;
 583
 584        /*
 585         * 32 bit flags:
 586         *   bit 0:             enabled
 587         *   bit 1:             enabled cmd record
 588         *   bit 2:             enable/disable with the soft disable bit
 589         *   bit 3:             soft disabled
 590         *   bit 4:             trigger enabled
 591         *
 592         * Note: The bits must be set atomically to prevent races
 593         * from other writers. Reads of flags do not need to be in
 594         * sync as they occur in critical sections. But the way flags
 595         * is currently used, these changes do not affect the code
 596         * except that when a change is made, it may have a slight
 597         * delay in propagating the changes to other CPUs due to
 598         * caching and such. Which is mostly OK ;-)
 599         */
 600        unsigned long           flags;
 601        atomic_t                sm_ref; /* soft-mode reference counter */
 602        atomic_t                tm_ref; /* trigger-mode reference counter */
 603};
 604
 605#define __TRACE_EVENT_FLAGS(name, value)                                \
 606        static int __init trace_init_flags_##name(void)                 \
 607        {                                                               \
 608                event_##name.flags |= value;                            \
 609                return 0;                                               \
 610        }                                                               \
 611        early_initcall(trace_init_flags_##name);
 612
 613#define __TRACE_EVENT_PERF_PERM(name, expr...)                          \
 614        static int perf_perm_##name(struct trace_event_call *tp_event, \
 615                                    struct perf_event *p_event)         \
 616        {                                                               \
 617                return ({ expr; });                                     \
 618        }                                                               \
 619        static int __init trace_init_perf_perm_##name(void)             \
 620        {                                                               \
 621                event_##name.perf_perm = &perf_perm_##name;             \
 622                return 0;                                               \
 623        }                                                               \
 624        early_initcall(trace_init_perf_perm_##name);
 625
 626#define PERF_MAX_TRACE_SIZE     2048
 627
 628#define MAX_FILTER_STR_VAL      256     /* Should handle KSYM_SYMBOL_LEN */
 629
 630enum event_trigger_type {
 631        ETT_NONE                = (0),
 632        ETT_TRACE_ONOFF         = (1 << 0),
 633        ETT_SNAPSHOT            = (1 << 1),
 634        ETT_STACKTRACE          = (1 << 2),
 635        ETT_EVENT_ENABLE        = (1 << 3),
 636        ETT_EVENT_HIST          = (1 << 4),
 637        ETT_HIST_ENABLE         = (1 << 5),
 638};
 639
 640extern int filter_match_preds(struct event_filter *filter, void *rec);
 641
 642extern enum event_trigger_type
 643event_triggers_call(struct trace_event_file *file, void *rec,
 644                    struct ring_buffer_event *event);
 645extern void
 646event_triggers_post_call(struct trace_event_file *file,
 647                         enum event_trigger_type tt);
 648
 649bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
 650
 651/**
 652 * trace_trigger_soft_disabled - do triggers and test if soft disabled
 653 * @file: The file pointer of the event to test
 654 *
 655 * If any triggers without filters are attached to this event, they
 656 * will be called here. If the event is soft disabled and has no
 657 * triggers that require testing the fields, it will return true,
 658 * otherwise false.
 659 */
 660static inline bool
 661trace_trigger_soft_disabled(struct trace_event_file *file)
 662{
 663        unsigned long eflags = file->flags;
 664
 665        if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
 666                if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
 667                        event_triggers_call(file, NULL, NULL);
 668                if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
 669                        return true;
 670                if (eflags & EVENT_FILE_FL_PID_FILTER)
 671                        return trace_event_ignore_this_pid(file);
 672        }
 673        return false;
 674}
 675
 676#ifdef CONFIG_BPF_EVENTS
 677unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
 678int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
 679void perf_event_detach_bpf_prog(struct perf_event *event);
 680int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 681int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 682int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
 683struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
 684void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
 685int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
 686                            u32 *fd_type, const char **buf,
 687                            u64 *probe_offset, u64 *probe_addr);
 688#else
 689static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 690{
 691        return 1;
 692}
 693
 694static inline int
 695perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
 696{
 697        return -EOPNOTSUPP;
 698}
 699
 700static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
 701
 702static inline int
 703perf_event_query_prog_array(struct perf_event *event, void __user *info)
 704{
 705        return -EOPNOTSUPP;
 706}
 707static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 708{
 709        return -EOPNOTSUPP;
 710}
 711static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
 712{
 713        return -EOPNOTSUPP;
 714}
 715static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
 716{
 717        return NULL;
 718}
 719static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
 720{
 721}
 722static inline int bpf_get_perf_event_info(const struct perf_event *event,
 723                                          u32 *prog_id, u32 *fd_type,
 724                                          const char **buf, u64 *probe_offset,
 725                                          u64 *probe_addr)
 726{
 727        return -EOPNOTSUPP;
 728}
 729#endif
 730
 731enum {
 732        FILTER_OTHER = 0,
 733        FILTER_STATIC_STRING,
 734        FILTER_DYN_STRING,
 735        FILTER_PTR_STRING,
 736        FILTER_TRACE_FN,
 737        FILTER_COMM,
 738        FILTER_CPU,
 739};
 740
 741extern int trace_event_raw_init(struct trace_event_call *call);
 742extern int trace_define_field(struct trace_event_call *call, const char *type,
 743                              const char *name, int offset, int size,
 744                              int is_signed, int filter_type);
 745extern int trace_add_event_call(struct trace_event_call *call);
 746extern int trace_remove_event_call(struct trace_event_call *call);
 747extern int trace_event_get_offsets(struct trace_event_call *call);
 748
 749#define is_signed_type(type)    (((type)(-1)) < (type)1)
 750
 751int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
 752int trace_set_clr_event(const char *system, const char *event, int set);
 753int trace_array_set_clr_event(struct trace_array *tr, const char *system,
 754                const char *event, bool enable);
 755/*
 756 * The double __builtin_constant_p is because gcc will give us an error
 757 * if we try to allocate the static variable to fmt if it is not a
 758 * constant. Even with the outer if statement optimizing out.
 759 */
 760#define event_trace_printk(ip, fmt, args...)                            \
 761do {                                                                    \
 762        __trace_printk_check_format(fmt, ##args);                       \
 763        tracing_record_cmdline(current);                                \
 764        if (__builtin_constant_p(fmt)) {                                \
 765                static const char *trace_printk_fmt                     \
 766                  __section("__trace_printk_fmt") =                     \
 767                        __builtin_constant_p(fmt) ? fmt : NULL;         \
 768                                                                        \
 769                __trace_bprintk(ip, trace_printk_fmt, ##args);          \
 770        } else                                                          \
 771                __trace_printk(ip, fmt, ##args);                        \
 772} while (0)
 773
 774#ifdef CONFIG_PERF_EVENTS
 775struct perf_event;
 776
 777DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 778DECLARE_PER_CPU(int, bpf_kprobe_override);
 779
 780extern int  perf_trace_init(struct perf_event *event);
 781extern void perf_trace_destroy(struct perf_event *event);
 782extern int  perf_trace_add(struct perf_event *event, int flags);
 783extern void perf_trace_del(struct perf_event *event, int flags);
 784#ifdef CONFIG_KPROBE_EVENTS
 785extern int  perf_kprobe_init(struct perf_event *event, bool is_retprobe);
 786extern void perf_kprobe_destroy(struct perf_event *event);
 787extern int bpf_get_kprobe_info(const struct perf_event *event,
 788                               u32 *fd_type, const char **symbol,
 789                               u64 *probe_offset, u64 *probe_addr,
 790                               bool perf_type_tracepoint);
 791#endif
 792#ifdef CONFIG_UPROBE_EVENTS
 793extern int  perf_uprobe_init(struct perf_event *event,
 794                             unsigned long ref_ctr_offset, bool is_retprobe);
 795extern void perf_uprobe_destroy(struct perf_event *event);
 796extern int bpf_get_uprobe_info(const struct perf_event *event,
 797                               u32 *fd_type, const char **filename,
 798                               u64 *probe_offset, bool perf_type_tracepoint);
 799#endif
 800extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
 801                                     char *filter_str);
 802extern void ftrace_profile_free_filter(struct perf_event *event);
 803void perf_trace_buf_update(void *record, u16 type);
 804void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
 805
 806void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
 807void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
 808void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
 809                    u64 arg3);
 810void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
 811                    u64 arg3, u64 arg4);
 812void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
 813                    u64 arg3, u64 arg4, u64 arg5);
 814void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
 815                    u64 arg3, u64 arg4, u64 arg5, u64 arg6);
 816void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
 817                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
 818void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
 819                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 820                    u64 arg8);
 821void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
 822                    u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 823                    u64 arg8, u64 arg9);
 824void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
 825                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 826                     u64 arg8, u64 arg9, u64 arg10);
 827void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
 828                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 829                     u64 arg8, u64 arg9, u64 arg10, u64 arg11);
 830void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
 831                     u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
 832                     u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
 833void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
 834                               struct trace_event_call *call, u64 count,
 835                               struct pt_regs *regs, struct hlist_head *head,
 836                               struct task_struct *task);
 837
 838static inline void
 839perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
 840                       u64 count, struct pt_regs *regs, void *head,
 841                       struct task_struct *task)
 842{
 843        perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
 844}
 845
 846#endif
 847
 848#endif /* _LINUX_TRACE_EVENT_H */
 849