linux/kernel/trace/trace.h
<<
>>
Prefs
   1
   2#ifndef _LINUX_KERNEL_TRACE_H
   3#define _LINUX_KERNEL_TRACE_H
   4
   5#include <linux/fs.h>
   6#include <linux/atomic.h>
   7#include <linux/sched.h>
   8#include <linux/clocksource.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/mmiotrace.h>
  11#include <linux/tracepoint.h>
  12#include <linux/ftrace.h>
  13#include <linux/hw_breakpoint.h>
  14#include <linux/trace_seq.h>
  15#include <linux/trace_events.h>
  16#include <linux/compiler.h>
  17#include <linux/trace_seq.h>
  18
  19#ifdef CONFIG_FTRACE_SYSCALLS
  20#include <asm/unistd.h>         /* For NR_SYSCALLS           */
  21#include <asm/syscall.h>        /* some archs define it here */
  22#endif
  23
  24enum trace_type {
  25        __TRACE_FIRST_TYPE = 0,
  26
  27        TRACE_FN,
  28        TRACE_CTX,
  29        TRACE_WAKE,
  30        TRACE_STACK,
  31        TRACE_PRINT,
  32        TRACE_BPRINT,
  33        TRACE_MMIO_RW,
  34        TRACE_MMIO_MAP,
  35        TRACE_BRANCH,
  36        TRACE_GRAPH_RET,
  37        TRACE_GRAPH_ENT,
  38        TRACE_USER_STACK,
  39        TRACE_BLK,
  40        TRACE_BPUTS,
  41
  42        __TRACE_LAST_TYPE,
  43};
  44
  45
  46#undef __field
  47#define __field(type, item)             type    item;
  48
  49#undef __field_struct
  50#define __field_struct(type, item)      __field(type, item)
  51
  52#undef __field_desc
  53#define __field_desc(type, container, item)
  54
  55#undef __array
  56#define __array(type, item, size)       type    item[size];
  57
  58#undef __array_desc
  59#define __array_desc(type, container, item, size)
  60
  61#undef __dynamic_array
  62#define __dynamic_array(type, item)     type    item[];
  63
  64#undef F_STRUCT
  65#define F_STRUCT(args...)               args
  66
  67#undef FTRACE_ENTRY
  68#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)     \
  69        struct struct_name {                                            \
  70                struct trace_entry      ent;                            \
  71                tstruct                                                 \
  72        }
  73
  74#undef FTRACE_ENTRY_DUP
  75#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
  76
  77#undef FTRACE_ENTRY_REG
  78#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
  79                         filter, regfn) \
  80        FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
  81                     filter)
  82
  83#include "trace_entries.h"
  84
  85/*
  86 * syscalls are special, and need special handling, this is why
  87 * they are not included in trace_entries.h
  88 */
  89struct syscall_trace_enter {
  90        struct trace_entry      ent;
  91        int                     nr;
  92        unsigned long           args[];
  93};
  94
  95struct syscall_trace_exit {
  96        struct trace_entry      ent;
  97        int                     nr;
  98        long                    ret;
  99};
 100
 101struct kprobe_trace_entry_head {
 102        struct trace_entry      ent;
 103        unsigned long           ip;
 104};
 105
 106struct kretprobe_trace_entry_head {
 107        struct trace_entry      ent;
 108        unsigned long           func;
 109        unsigned long           ret_ip;
 110};
 111
 112/*
 113 * trace_flag_type is an enumeration that holds different
 114 * states when a trace occurs. These are:
 115 *  IRQS_OFF            - interrupts were disabled
 116 *  IRQS_NOSUPPORT      - arch does not support irqs_disabled_flags
 117 *  NEED_RESCHED        - reschedule is requested
 118 *  HARDIRQ             - inside an interrupt handler
 119 *  SOFTIRQ             - inside a softirq handler
 120 */
 121enum trace_flag_type {
 122        TRACE_FLAG_IRQS_OFF             = 0x01,
 123        TRACE_FLAG_IRQS_NOSUPPORT       = 0x02,
 124        TRACE_FLAG_NEED_RESCHED         = 0x04,
 125        TRACE_FLAG_HARDIRQ              = 0x08,
 126        TRACE_FLAG_SOFTIRQ              = 0x10,
 127        TRACE_FLAG_PREEMPT_RESCHED      = 0x20,
 128};
 129
 130#define TRACE_BUF_SIZE          1024
 131
 132struct trace_array;
 133
 134/*
 135 * The CPU trace array - it consists of thousands of trace entries
 136 * plus some other descriptor data: (for example which task started
 137 * the trace, etc.)
 138 */
 139struct trace_array_cpu {
 140        atomic_t                disabled;
 141        void                    *buffer_page;   /* ring buffer spare */
 142
 143        unsigned long           entries;
 144        unsigned long           saved_latency;
 145        unsigned long           critical_start;
 146        unsigned long           critical_end;
 147        unsigned long           critical_sequence;
 148        unsigned long           nice;
 149        unsigned long           policy;
 150        unsigned long           rt_priority;
 151        unsigned long           skipped_entries;
 152        cycle_t                 preempt_timestamp;
 153        pid_t                   pid;
 154        kuid_t                  uid;
 155        char                    comm[TASK_COMM_LEN];
 156
 157        bool                    ignore_pid;
 158};
 159
 160struct tracer;
 161struct trace_option_dentry;
 162
 163struct trace_buffer {
 164        struct trace_array              *tr;
 165        struct ring_buffer              *buffer;
 166        struct trace_array_cpu __percpu *data;
 167        cycle_t                         time_start;
 168        int                             cpu;
 169};
 170
 171#define TRACE_FLAGS_MAX_SIZE            32
 172
 173struct trace_options {
 174        struct tracer                   *tracer;
 175        struct trace_option_dentry      *topts;
 176};
 177
 178struct trace_pid_list {
 179        unsigned int                    nr_pids;
 180        int                             order;
 181        pid_t                           *pids;
 182};
 183
 184/*
 185 * The trace array - an array of per-CPU trace arrays. This is the
 186 * highest level data structure that individual tracers deal with.
 187 * They have on/off state as well:
 188 */
 189struct trace_array {
 190        struct list_head        list;
 191        char                    *name;
 192        struct trace_buffer     trace_buffer;
 193#ifdef CONFIG_TRACER_MAX_TRACE
 194        /*
 195         * The max_buffer is used to snapshot the trace when a maximum
 196         * latency is reached, or when the user initiates a snapshot.
 197         * Some tracers will use this to store a maximum trace while
 198         * it continues examining live traces.
 199         *
 200         * The buffers for the max_buffer are set up the same as the trace_buffer
 201         * When a snapshot is taken, the buffer of the max_buffer is swapped
 202         * with the buffer of the trace_buffer and the buffers are reset for
 203         * the trace_buffer so the tracing can continue.
 204         */
 205        struct trace_buffer     max_buffer;
 206        bool                    allocated_snapshot;
 207        unsigned long           max_latency;
 208#endif
 209        struct trace_pid_list   __rcu *filtered_pids;
 210        /*
 211         * max_lock is used to protect the swapping of buffers
 212         * when taking a max snapshot. The buffers themselves are
 213         * protected by per_cpu spinlocks. But the action of the swap
 214         * needs its own lock.
 215         *
 216         * This is defined as a arch_spinlock_t in order to help
 217         * with performance when lockdep debugging is enabled.
 218         *
 219         * It is also used in other places outside the update_max_tr
 220         * so it needs to be defined outside of the
 221         * CONFIG_TRACER_MAX_TRACE.
 222         */
 223        arch_spinlock_t         max_lock;
 224        int                     buffer_disabled;
 225#ifdef CONFIG_FTRACE_SYSCALLS
 226        int                     sys_refcount_enter;
 227        int                     sys_refcount_exit;
 228        struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
 229        struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
 230#endif
 231        int                     stop_count;
 232        int                     clock_id;
 233        int                     nr_topts;
 234        struct tracer           *current_trace;
 235        unsigned int            trace_flags;
 236        unsigned char           trace_flags_index[TRACE_FLAGS_MAX_SIZE];
 237        unsigned int            flags;
 238        raw_spinlock_t          start_lock;
 239        struct dentry           *dir;
 240        struct dentry           *options;
 241        struct dentry           *percpu_dir;
 242        struct dentry           *event_dir;
 243        struct trace_options    *topts;
 244        struct list_head        systems;
 245        struct list_head        events;
 246        cpumask_var_t           tracing_cpumask; /* only trace on set CPUs */
 247        int                     ref;
 248#ifdef CONFIG_FUNCTION_TRACER
 249        struct ftrace_ops       *ops;
 250        /* function tracing enabled */
 251        int                     function_enabled;
 252#endif
 253};
 254
 255enum {
 256        TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 257};
 258
 259extern struct list_head ftrace_trace_arrays;
 260
 261extern struct mutex trace_types_lock;
 262
 263extern int trace_array_get(struct trace_array *tr);
 264extern void trace_array_put(struct trace_array *tr);
 265
 266/*
 267 * The global tracer (top) should be the first trace array added,
 268 * but we check the flag anyway.
 269 */
 270static inline struct trace_array *top_trace_array(void)
 271{
 272        struct trace_array *tr;
 273
 274        if (list_empty(&ftrace_trace_arrays))
 275                return NULL;
 276
 277        tr = list_entry(ftrace_trace_arrays.prev,
 278                        typeof(*tr), list);
 279        WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
 280        return tr;
 281}
 282
 283#define FTRACE_CMP_TYPE(var, type) \
 284        __builtin_types_compatible_p(typeof(var), type *)
 285
 286#undef IF_ASSIGN
 287#define IF_ASSIGN(var, entry, etype, id)                \
 288        if (FTRACE_CMP_TYPE(var, etype)) {              \
 289                var = (typeof(var))(entry);             \
 290                WARN_ON(id && (entry)->type != id);     \
 291                break;                                  \
 292        }
 293
 294/* Will cause compile errors if type is not found. */
 295extern void __ftrace_bad_type(void);
 296
 297/*
 298 * The trace_assign_type is a verifier that the entry type is
 299 * the same as the type being assigned. To add new types simply
 300 * add a line with the following format:
 301 *
 302 * IF_ASSIGN(var, ent, type, id);
 303 *
 304 *  Where "type" is the trace type that includes the trace_entry
 305 *  as the "ent" item. And "id" is the trace identifier that is
 306 *  used in the trace_type enum.
 307 *
 308 *  If the type can have more than one id, then use zero.
 309 */
 310#define trace_assign_type(var, ent)                                     \
 311        do {                                                            \
 312                IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);     \
 313                IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
 314                IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
 315                IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
 316                IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
 317                IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
 318                IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);   \
 319                IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,          \
 320                          TRACE_MMIO_RW);                               \
 321                IF_ASSIGN(var, ent, struct trace_mmiotrace_map,         \
 322                          TRACE_MMIO_MAP);                              \
 323                IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
 324                IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
 325                          TRACE_GRAPH_ENT);             \
 326                IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
 327                          TRACE_GRAPH_RET);             \
 328                __ftrace_bad_type();                                    \
 329        } while (0)
 330
 331/*
 332 * An option specific to a tracer. This is a boolean value.
 333 * The bit is the bit index that sets its value on the
 334 * flags value in struct tracer_flags.
 335 */
 336struct tracer_opt {
 337        const char      *name; /* Will appear on the trace_options file */
 338        u32             bit; /* Mask assigned in val field in tracer_flags */
 339};
 340
 341/*
 342 * The set of specific options for a tracer. Your tracer
 343 * have to set the initial value of the flags val.
 344 */
 345struct tracer_flags {
 346        u32                     val;
 347        struct tracer_opt       *opts;
 348};
 349
 350/* Makes more easy to define a tracer opt */
 351#define TRACER_OPT(s, b)        .name = #s, .bit = b
 352
 353
 354struct trace_option_dentry {
 355        struct tracer_opt               *opt;
 356        struct tracer_flags             *flags;
 357        struct trace_array              *tr;
 358        struct dentry                   *entry;
 359};
 360
 361/**
 362 * struct tracer - a specific tracer and its callbacks to interact with tracefs
 363 * @name: the name chosen to select it on the available_tracers file
 364 * @init: called when one switches to this tracer (echo name > current_tracer)
 365 * @reset: called when one switches to another tracer
 366 * @start: called when tracing is unpaused (echo 1 > tracing_on)
 367 * @stop: called when tracing is paused (echo 0 > tracing_on)
 368 * @update_thresh: called when tracing_thresh is updated
 369 * @open: called when the trace file is opened
 370 * @pipe_open: called when the trace_pipe file is opened
 371 * @close: called when the trace file is released
 372 * @pipe_close: called when the trace_pipe file is released
 373 * @read: override the default read callback on trace_pipe
 374 * @splice_read: override the default splice_read callback on trace_pipe
 375 * @selftest: selftest to run on boot (see trace_selftest.c)
 376 * @print_headers: override the first lines that describe your columns
 377 * @print_line: callback that prints a trace
 378 * @set_flag: signals one of your private flags changed (trace_options file)
 379 * @flags: your private flags
 380 */
 381struct tracer {
 382        const char              *name;
 383        int                     (*init)(struct trace_array *tr);
 384        void                    (*reset)(struct trace_array *tr);
 385        void                    (*start)(struct trace_array *tr);
 386        void                    (*stop)(struct trace_array *tr);
 387        int                     (*update_thresh)(struct trace_array *tr);
 388        void                    (*open)(struct trace_iterator *iter);
 389        void                    (*pipe_open)(struct trace_iterator *iter);
 390        void                    (*close)(struct trace_iterator *iter);
 391        void                    (*pipe_close)(struct trace_iterator *iter);
 392        ssize_t                 (*read)(struct trace_iterator *iter,
 393                                        struct file *filp, char __user *ubuf,
 394                                        size_t cnt, loff_t *ppos);
 395        ssize_t                 (*splice_read)(struct trace_iterator *iter,
 396                                               struct file *filp,
 397                                               loff_t *ppos,
 398                                               struct pipe_inode_info *pipe,
 399                                               size_t len,
 400                                               unsigned int flags);
 401#ifdef CONFIG_FTRACE_STARTUP_TEST
 402        int                     (*selftest)(struct tracer *trace,
 403                                            struct trace_array *tr);
 404#endif
 405        void                    (*print_header)(struct seq_file *m);
 406        enum print_line_t       (*print_line)(struct trace_iterator *iter);
 407        /* If you handled the flag setting, return 0 */
 408        int                     (*set_flag)(struct trace_array *tr,
 409                                            u32 old_flags, u32 bit, int set);
 410        /* Return 0 if OK with change, else return non-zero */
 411        int                     (*flag_changed)(struct trace_array *tr,
 412                                                u32 mask, int set);
 413        struct tracer           *next;
 414        struct tracer_flags     *flags;
 415        int                     enabled;
 416        int                     ref;
 417        bool                    print_max;
 418        bool                    allow_instances;
 419#ifdef CONFIG_TRACER_MAX_TRACE
 420        bool                    use_max_tr;
 421#endif
 422};
 423
 424
 425/* Only current can touch trace_recursion */
 426
 427/*
 428 * For function tracing recursion:
 429 *  The order of these bits are important.
 430 *
 431 *  When function tracing occurs, the following steps are made:
 432 *   If arch does not support a ftrace feature:
 433 *    call internal function (uses INTERNAL bits) which calls...
 434 *   If callback is registered to the "global" list, the list
 435 *    function is called and recursion checks the GLOBAL bits.
 436 *    then this function calls...
 437 *   The function callback, which can use the FTRACE bits to
 438 *    check for recursion.
 439 *
 440 * Now if the arch does not suppport a feature, and it calls
 441 * the global list function which calls the ftrace callback
 442 * all three of these steps will do a recursion protection.
 443 * There's no reason to do one if the previous caller already
 444 * did. The recursion that we are protecting against will
 445 * go through the same steps again.
 446 *
 447 * To prevent the multiple recursion checks, if a recursion
 448 * bit is set that is higher than the MAX bit of the current
 449 * check, then we know that the check was made by the previous
 450 * caller, and we can skip the current check.
 451 */
 452enum {
 453        TRACE_BUFFER_BIT,
 454        TRACE_BUFFER_NMI_BIT,
 455        TRACE_BUFFER_IRQ_BIT,
 456        TRACE_BUFFER_SIRQ_BIT,
 457
 458        /* Start of function recursion bits */
 459        TRACE_FTRACE_BIT,
 460        TRACE_FTRACE_NMI_BIT,
 461        TRACE_FTRACE_IRQ_BIT,
 462        TRACE_FTRACE_SIRQ_BIT,
 463
 464        /* INTERNAL_BITs must be greater than FTRACE_BITs */
 465        TRACE_INTERNAL_BIT,
 466        TRACE_INTERNAL_NMI_BIT,
 467        TRACE_INTERNAL_IRQ_BIT,
 468        TRACE_INTERNAL_SIRQ_BIT,
 469
 470        TRACE_BRANCH_BIT,
 471/*
 472 * Abuse of the trace_recursion.
 473 * As we need a way to maintain state if we are tracing the function
 474 * graph in irq because we want to trace a particular function that
 475 * was called in irq context but we have irq tracing off. Since this
 476 * can only be modified by current, we can reuse trace_recursion.
 477 */
 478        TRACE_IRQ_BIT,
 479};
 480
 481#define trace_recursion_set(bit)        do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 482#define trace_recursion_clear(bit)      do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 483#define trace_recursion_test(bit)       ((current)->trace_recursion & (1<<(bit)))
 484
 485#define TRACE_CONTEXT_BITS      4
 486
 487#define TRACE_FTRACE_START      TRACE_FTRACE_BIT
 488#define TRACE_FTRACE_MAX        ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 489
 490#define TRACE_LIST_START        TRACE_INTERNAL_BIT
 491#define TRACE_LIST_MAX          ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 492
 493#define TRACE_CONTEXT_MASK      TRACE_LIST_MAX
 494
 495static __always_inline int trace_get_context_bit(void)
 496{
 497        int bit;
 498
 499        if (in_interrupt()) {
 500                if (in_nmi())
 501                        bit = 0;
 502
 503                else if (in_irq())
 504                        bit = 1;
 505                else
 506                        bit = 2;
 507        } else
 508                bit = 3;
 509
 510        return bit;
 511}
 512
 513static __always_inline int trace_test_and_set_recursion(int start, int max)
 514{
 515        unsigned int val = current->trace_recursion;
 516        int bit;
 517
 518        /* A previous recursion check was made */
 519        if ((val & TRACE_CONTEXT_MASK) > max)
 520                return 0;
 521
 522        bit = trace_get_context_bit() + start;
 523        if (unlikely(val & (1 << bit)))
 524                return -1;
 525
 526        val |= 1 << bit;
 527        current->trace_recursion = val;
 528        barrier();
 529
 530        return bit;
 531}
 532
 533static __always_inline void trace_clear_recursion(int bit)
 534{
 535        unsigned int val = current->trace_recursion;
 536
 537        if (!bit)
 538                return;
 539
 540        bit = 1 << bit;
 541        val &= ~bit;
 542
 543        barrier();
 544        current->trace_recursion = val;
 545}
 546
 547static inline struct ring_buffer_iter *
 548trace_buffer_iter(struct trace_iterator *iter, int cpu)
 549{
 550        if (iter->buffer_iter && iter->buffer_iter[cpu])
 551                return iter->buffer_iter[cpu];
 552        return NULL;
 553}
 554
 555int tracer_init(struct tracer *t, struct trace_array *tr);
 556int tracing_is_enabled(void);
 557void tracing_reset(struct trace_buffer *buf, int cpu);
 558void tracing_reset_online_cpus(struct trace_buffer *buf);
 559void tracing_reset_current(int cpu);
 560void tracing_reset_all_online_cpus(void);
 561int tracing_open_generic(struct inode *inode, struct file *filp);
 562bool tracing_is_disabled(void);
 563struct dentry *trace_create_file(const char *name,
 564                                 umode_t mode,
 565                                 struct dentry *parent,
 566                                 void *data,
 567                                 const struct file_operations *fops);
 568
 569struct dentry *tracing_init_dentry(void);
 570
 571struct ring_buffer_event;
 572
 573struct ring_buffer_event *
 574trace_buffer_lock_reserve(struct ring_buffer *buffer,
 575                          int type,
 576                          unsigned long len,
 577                          unsigned long flags,
 578                          int pc);
 579
 580struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 581                                                struct trace_array_cpu *data);
 582
 583struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 584                                          int *ent_cpu, u64 *ent_ts);
 585
 586void __buffer_unlock_commit(struct ring_buffer *buffer,
 587                            struct ring_buffer_event *event);
 588
 589int trace_empty(struct trace_iterator *iter);
 590
 591void *trace_find_next_entry_inc(struct trace_iterator *iter);
 592
 593void trace_init_global_iter(struct trace_iterator *iter);
 594
 595void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 596
 597void trace_function(struct trace_array *tr,
 598                    unsigned long ip,
 599                    unsigned long parent_ip,
 600                    unsigned long flags, int pc);
 601void trace_graph_function(struct trace_array *tr,
 602                    unsigned long ip,
 603                    unsigned long parent_ip,
 604                    unsigned long flags, int pc);
 605void trace_latency_header(struct seq_file *m);
 606void trace_default_header(struct seq_file *m);
 607void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 608int trace_empty(struct trace_iterator *iter);
 609
 610void trace_graph_return(struct ftrace_graph_ret *trace);
 611int trace_graph_entry(struct ftrace_graph_ent *trace);
 612void set_graph_array(struct trace_array *tr);
 613
 614void tracing_start_cmdline_record(void);
 615void tracing_stop_cmdline_record(void);
 616int register_tracer(struct tracer *type);
 617int is_tracing_stopped(void);
 618
 619loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
 620
 621extern cpumask_var_t __read_mostly tracing_buffer_mask;
 622
 623#define for_each_tracing_cpu(cpu)       \
 624        for_each_cpu(cpu, tracing_buffer_mask)
 625
 626extern unsigned long nsecs_to_usecs(unsigned long nsecs);
 627
 628extern unsigned long tracing_thresh;
 629
 630#ifdef CONFIG_TRACER_MAX_TRACE
 631void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 632void update_max_tr_single(struct trace_array *tr,
 633                          struct task_struct *tsk, int cpu);
 634#endif /* CONFIG_TRACER_MAX_TRACE */
 635
 636#ifdef CONFIG_STACKTRACE
 637void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
 638                            int pc);
 639
 640void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 641                   int pc);
 642#else
 643static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
 644                                          unsigned long flags, int pc)
 645{
 646}
 647
 648static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 649                                 int skip, int pc)
 650{
 651}
 652#endif /* CONFIG_STACKTRACE */
 653
 654extern cycle_t ftrace_now(int cpu);
 655
 656extern void trace_find_cmdline(int pid, char comm[]);
 657
 658#ifdef CONFIG_DYNAMIC_FTRACE
 659extern unsigned long ftrace_update_tot_cnt;
 660#endif
 661#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
 662extern int DYN_FTRACE_TEST_NAME(void);
 663#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
 664extern int DYN_FTRACE_TEST_NAME2(void);
 665
 666extern bool ring_buffer_expanded;
 667extern bool tracing_selftest_disabled;
 668
 669#ifdef CONFIG_FTRACE_STARTUP_TEST
 670extern int trace_selftest_startup_function(struct tracer *trace,
 671                                           struct trace_array *tr);
 672extern int trace_selftest_startup_function_graph(struct tracer *trace,
 673                                                 struct trace_array *tr);
 674extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 675                                          struct trace_array *tr);
 676extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 677                                             struct trace_array *tr);
 678extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 679                                                 struct trace_array *tr);
 680extern int trace_selftest_startup_wakeup(struct tracer *trace,
 681                                         struct trace_array *tr);
 682extern int trace_selftest_startup_nop(struct tracer *trace,
 683                                         struct trace_array *tr);
 684extern int trace_selftest_startup_sched_switch(struct tracer *trace,
 685                                               struct trace_array *tr);
 686extern int trace_selftest_startup_branch(struct tracer *trace,
 687                                         struct trace_array *tr);
 688/*
 689 * Tracer data references selftest functions that only occur
 690 * on boot up. These can be __init functions. Thus, when selftests
 691 * are enabled, then the tracers need to reference __init functions.
 692 */
 693#define __tracer_data           __refdata
 694#else
 695/* Tracers are seldom changed. Optimize when selftests are disabled. */
 696#define __tracer_data           __read_mostly
 697#endif /* CONFIG_FTRACE_STARTUP_TEST */
 698
 699extern void *head_page(struct trace_array_cpu *data);
 700extern unsigned long long ns2usecs(cycle_t nsec);
 701extern int
 702trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
 703extern int
 704trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 705extern int
 706trace_array_vprintk(struct trace_array *tr,
 707                    unsigned long ip, const char *fmt, va_list args);
 708int trace_array_printk(struct trace_array *tr,
 709                       unsigned long ip, const char *fmt, ...);
 710int trace_array_printk_buf(struct ring_buffer *buffer,
 711                           unsigned long ip, const char *fmt, ...);
 712void trace_printk_seq(struct trace_seq *s);
 713enum print_line_t print_trace_line(struct trace_iterator *iter);
 714
 715extern char trace_find_mark(unsigned long long duration);
 716
 717/* Standard output formatting function used for function return traces */
 718#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 719
 720/* Flag options */
 721#define TRACE_GRAPH_PRINT_OVERRUN       0x1
 722#define TRACE_GRAPH_PRINT_CPU           0x2
 723#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
 724#define TRACE_GRAPH_PRINT_PROC          0x8
 725#define TRACE_GRAPH_PRINT_DURATION      0x10
 726#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 727#define TRACE_GRAPH_PRINT_IRQS          0x40
 728#define TRACE_GRAPH_PRINT_TAIL          0x80
 729#define TRACE_GRAPH_SLEEP_TIME          0x100
 730#define TRACE_GRAPH_GRAPH_TIME          0x200
 731#define TRACE_GRAPH_PRINT_FILL_SHIFT    28
 732#define TRACE_GRAPH_PRINT_FILL_MASK     (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 733
 734extern void ftrace_graph_sleep_time_control(bool enable);
 735extern void ftrace_graph_graph_time_control(bool enable);
 736
 737extern enum print_line_t
 738print_graph_function_flags(struct trace_iterator *iter, u32 flags);
 739extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
 740extern void
 741trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 742extern void graph_trace_open(struct trace_iterator *iter);
 743extern void graph_trace_close(struct trace_iterator *iter);
 744extern int __trace_graph_entry(struct trace_array *tr,
 745                               struct ftrace_graph_ent *trace,
 746                               unsigned long flags, int pc);
 747extern void __trace_graph_return(struct trace_array *tr,
 748                                 struct ftrace_graph_ret *trace,
 749                                 unsigned long flags, int pc);
 750
 751
 752#ifdef CONFIG_DYNAMIC_FTRACE
 753/* TODO: make this variable */
 754#define FTRACE_GRAPH_MAX_FUNCS          32
 755extern int ftrace_graph_count;
 756extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
 757extern int ftrace_graph_notrace_count;
 758extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
 759
 760static inline int ftrace_graph_addr(unsigned long addr)
 761{
 762        int i;
 763
 764        if (!ftrace_graph_count)
 765                return 1;
 766
 767        for (i = 0; i < ftrace_graph_count; i++) {
 768                if (addr == ftrace_graph_funcs[i]) {
 769                        /*
 770                         * If no irqs are to be traced, but a set_graph_function
 771                         * is set, and called by an interrupt handler, we still
 772                         * want to trace it.
 773                         */
 774                        if (in_irq())
 775                                trace_recursion_set(TRACE_IRQ_BIT);
 776                        else
 777                                trace_recursion_clear(TRACE_IRQ_BIT);
 778                        return 1;
 779                }
 780        }
 781
 782        return 0;
 783}
 784
 785static inline int ftrace_graph_notrace_addr(unsigned long addr)
 786{
 787        int i;
 788
 789        if (!ftrace_graph_notrace_count)
 790                return 0;
 791
 792        for (i = 0; i < ftrace_graph_notrace_count; i++) {
 793                if (addr == ftrace_graph_notrace_funcs[i])
 794                        return 1;
 795        }
 796
 797        return 0;
 798}
 799#else
 800static inline int ftrace_graph_addr(unsigned long addr)
 801{
 802        return 1;
 803}
 804
 805static inline int ftrace_graph_notrace_addr(unsigned long addr)
 806{
 807        return 0;
 808}
 809#endif /* CONFIG_DYNAMIC_FTRACE */
 810#else /* CONFIG_FUNCTION_GRAPH_TRACER */
 811static inline enum print_line_t
 812print_graph_function_flags(struct trace_iterator *iter, u32 flags)
 813{
 814        return TRACE_TYPE_UNHANDLED;
 815}
 816#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 817
 818extern struct list_head ftrace_pids;
 819
 820#ifdef CONFIG_FUNCTION_TRACER
 821extern bool ftrace_filter_param __initdata;
 822static inline int ftrace_trace_task(struct task_struct *task)
 823{
 824        if (list_empty(&ftrace_pids))
 825                return 1;
 826
 827        return test_tsk_trace_trace(task);
 828}
 829extern int ftrace_is_dead(void);
 830int ftrace_create_function_files(struct trace_array *tr,
 831                                 struct dentry *parent);
 832void ftrace_destroy_function_files(struct trace_array *tr);
 833void ftrace_init_global_array_ops(struct trace_array *tr);
 834void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
 835void ftrace_reset_array_ops(struct trace_array *tr);
 836int using_ftrace_ops_list_func(void);
 837#else
 838static inline int ftrace_trace_task(struct task_struct *task)
 839{
 840        return 1;
 841}
 842static inline int ftrace_is_dead(void) { return 0; }
 843static inline int
 844ftrace_create_function_files(struct trace_array *tr,
 845                             struct dentry *parent)
 846{
 847        return 0;
 848}
 849static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
 850static inline __init void
 851ftrace_init_global_array_ops(struct trace_array *tr) { }
 852static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
 853/* ftace_func_t type is not defined, use macro instead of static inline */
 854#define ftrace_init_array_ops(tr, func) do { } while (0)
 855#endif /* CONFIG_FUNCTION_TRACER */
 856
 857#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
 858void ftrace_create_filter_files(struct ftrace_ops *ops,
 859                                struct dentry *parent);
 860void ftrace_destroy_filter_files(struct ftrace_ops *ops);
 861#else
 862/*
 863 * The ops parameter passed in is usually undefined.
 864 * This must be a macro.
 865 */
 866#define ftrace_create_filter_files(ops, parent) do { } while (0)
 867#define ftrace_destroy_filter_files(ops) do { } while (0)
 868#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
 869
 870bool ftrace_event_is_function(struct trace_event_call *call);
 871
 872/*
 873 * struct trace_parser - servers for reading the user input separated by spaces
 874 * @cont: set if the input is not complete - no final space char was found
 875 * @buffer: holds the parsed user input
 876 * @idx: user input length
 877 * @size: buffer size
 878 */
 879struct trace_parser {
 880        bool            cont;
 881        char            *buffer;
 882        unsigned        idx;
 883        unsigned        size;
 884};
 885
 886static inline bool trace_parser_loaded(struct trace_parser *parser)
 887{
 888        return (parser->idx != 0);
 889}
 890
 891static inline bool trace_parser_cont(struct trace_parser *parser)
 892{
 893        return parser->cont;
 894}
 895
 896static inline void trace_parser_clear(struct trace_parser *parser)
 897{
 898        parser->cont = false;
 899        parser->idx = 0;
 900}
 901
 902extern int trace_parser_get_init(struct trace_parser *parser, int size);
 903extern void trace_parser_put(struct trace_parser *parser);
 904extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 905        size_t cnt, loff_t *ppos);
 906
 907/*
 908 * Only create function graph options if function graph is configured.
 909 */
 910#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 911# define FGRAPH_FLAGS                                           \
 912                C(DISPLAY_GRAPH,        "display-graph"),
 913#else
 914# define FGRAPH_FLAGS
 915#endif
 916
 917#ifdef CONFIG_BRANCH_TRACER
 918# define BRANCH_FLAGS                                   \
 919                C(BRANCH,               "branch"),
 920#else
 921# define BRANCH_FLAGS
 922#endif
 923
 924#ifdef CONFIG_FUNCTION_TRACER
 925# define FUNCTION_FLAGS                                         \
 926                C(FUNCTION,             "function-trace"),
 927# define FUNCTION_DEFAULT_FLAGS         TRACE_ITER_FUNCTION
 928#else
 929# define FUNCTION_FLAGS
 930# define FUNCTION_DEFAULT_FLAGS         0UL
 931#endif
 932
 933#ifdef CONFIG_STACKTRACE
 934# define STACK_FLAGS                            \
 935                C(STACKTRACE,           "stacktrace"),
 936#else
 937# define STACK_FLAGS
 938#endif
 939
 940/*
 941 * trace_iterator_flags is an enumeration that defines bit
 942 * positions into trace_flags that controls the output.
 943 *
 944 * NOTE: These bits must match the trace_options array in
 945 *       trace.c (this macro guarantees it).
 946 */
 947#define TRACE_FLAGS                                             \
 948                C(PRINT_PARENT,         "print-parent"),        \
 949                C(SYM_OFFSET,           "sym-offset"),          \
 950                C(SYM_ADDR,             "sym-addr"),            \
 951                C(VERBOSE,              "verbose"),             \
 952                C(RAW,                  "raw"),                 \
 953                C(HEX,                  "hex"),                 \
 954                C(BIN,                  "bin"),                 \
 955                C(BLOCK,                "block"),               \
 956                C(PRINTK,               "trace_printk"),        \
 957                C(ANNOTATE,             "annotate"),            \
 958                C(USERSTACKTRACE,       "userstacktrace"),      \
 959                C(SYM_USEROBJ,          "sym-userobj"),         \
 960                C(PRINTK_MSGONLY,       "printk-msg-only"),     \
 961                C(CONTEXT_INFO,         "context-info"),   /* Print pid/cpu/time */ \
 962                C(LATENCY_FMT,          "latency-format"),      \
 963                C(RECORD_CMD,           "record-cmd"),          \
 964                C(OVERWRITE,            "overwrite"),           \
 965                C(STOP_ON_FREE,         "disable_on_free"),     \
 966                C(IRQ_INFO,             "irq-info"),            \
 967                C(MARKERS,              "markers"),             \
 968                FUNCTION_FLAGS                                  \
 969                FGRAPH_FLAGS                                    \
 970                STACK_FLAGS                                     \
 971                BRANCH_FLAGS
 972
 973/*
 974 * By defining C, we can make TRACE_FLAGS a list of bit names
 975 * that will define the bits for the flag masks.
 976 */
 977#undef C
 978#define C(a, b) TRACE_ITER_##a##_BIT
 979
 980enum trace_iterator_bits {
 981        TRACE_FLAGS
 982        /* Make sure we don't go more than we have bits for */
 983        TRACE_ITER_LAST_BIT
 984};
 985
 986/*
 987 * By redefining C, we can make TRACE_FLAGS a list of masks that
 988 * use the bits as defined above.
 989 */
 990#undef C
 991#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
 992
 993enum trace_iterator_flags { TRACE_FLAGS };
 994
 995/*
 996 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 997 * control the output of kernel symbols.
 998 */
 999#define TRACE_ITER_SYM_MASK \
1000        (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1001
1002extern struct tracer nop_trace;
1003
1004#ifdef CONFIG_BRANCH_TRACER
1005extern int enable_branch_tracing(struct trace_array *tr);
1006extern void disable_branch_tracing(void);
1007static inline int trace_branch_enable(struct trace_array *tr)
1008{
1009        if (tr->trace_flags & TRACE_ITER_BRANCH)
1010                return enable_branch_tracing(tr);
1011        return 0;
1012}
1013static inline void trace_branch_disable(void)
1014{
1015        /* due to races, always disable */
1016        disable_branch_tracing();
1017}
1018#else
1019static inline int trace_branch_enable(struct trace_array *tr)
1020{
1021        return 0;
1022}
1023static inline void trace_branch_disable(void)
1024{
1025}
1026#endif /* CONFIG_BRANCH_TRACER */
1027
1028/* set ring buffers to default size if not already done so */
1029int tracing_update_buffers(void);
1030
1031struct ftrace_event_field {
1032        struct list_head        link;
1033        const char              *name;
1034        const char              *type;
1035        int                     filter_type;
1036        int                     offset;
1037        int                     size;
1038        int                     is_signed;
1039};
1040
1041struct event_filter {
1042        int                     n_preds;        /* Number assigned */
1043        int                     a_preds;        /* allocated */
1044        struct filter_pred      *preds;
1045        struct filter_pred      *root;
1046        char                    *filter_string;
1047};
1048
1049struct event_subsystem {
1050        struct list_head        list;
1051        const char              *name;
1052        struct event_filter     *filter;
1053        int                     ref_count;
1054};
1055
1056struct trace_subsystem_dir {
1057        struct list_head                list;
1058        struct event_subsystem          *subsystem;
1059        struct trace_array              *tr;
1060        struct dentry                   *entry;
1061        int                             ref_count;
1062        int                             nr_events;
1063};
1064
1065#define FILTER_PRED_INVALID     ((unsigned short)-1)
1066#define FILTER_PRED_IS_RIGHT    (1 << 15)
1067#define FILTER_PRED_FOLD        (1 << 15)
1068
1069/*
1070 * The max preds is the size of unsigned short with
1071 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1072 * and FOLD flags. The other is reserved.
1073 *
1074 * 2^14 preds is way more than enough.
1075 */
1076#define MAX_FILTER_PRED         16384
1077
1078struct filter_pred;
1079struct regex;
1080
1081typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1082
1083typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1084
1085enum regex_type {
1086        MATCH_FULL = 0,
1087        MATCH_FRONT_ONLY,
1088        MATCH_MIDDLE_ONLY,
1089        MATCH_END_ONLY,
1090};
1091
1092struct regex {
1093        char                    pattern[MAX_FILTER_STR_VAL];
1094        int                     len;
1095        int                     field_len;
1096        regex_match_func        match;
1097};
1098
1099struct filter_pred {
1100        filter_pred_fn_t        fn;
1101        u64                     val;
1102        struct regex            regex;
1103        unsigned short          *ops;
1104        struct ftrace_event_field *field;
1105        int                     offset;
1106        int                     not;
1107        int                     op;
1108        unsigned short          index;
1109        unsigned short          parent;
1110        unsigned short          left;
1111        unsigned short          right;
1112};
1113
1114extern enum regex_type
1115filter_parse_regex(char *buff, int len, char **search, int *not);
1116extern void print_event_filter(struct trace_event_file *file,
1117                               struct trace_seq *s);
1118extern int apply_event_filter(struct trace_event_file *file,
1119                              char *filter_string);
1120extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1121                                        char *filter_string);
1122extern void print_subsystem_event_filter(struct event_subsystem *system,
1123                                         struct trace_seq *s);
1124extern int filter_assign_type(const char *type);
1125extern int create_event_filter(struct trace_event_call *call,
1126                               char *filter_str, bool set_str,
1127                               struct event_filter **filterp);
1128extern void free_event_filter(struct event_filter *filter);
1129
1130struct ftrace_event_field *
1131trace_find_event_field(struct trace_event_call *call, char *name);
1132
1133extern void trace_event_enable_cmd_record(bool enable);
1134extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1135extern int event_trace_del_tracer(struct trace_array *tr);
1136
1137extern struct trace_event_file *find_event_file(struct trace_array *tr,
1138                                                const char *system,
1139                                                const char *event);
1140
1141static inline void *event_file_data(struct file *filp)
1142{
1143        return ACCESS_ONCE(file_inode(filp)->i_private);
1144}
1145
1146extern struct mutex event_mutex;
1147extern struct list_head ftrace_events;
1148
1149extern const struct file_operations event_trigger_fops;
1150
1151extern int register_trigger_cmds(void);
1152extern void clear_event_triggers(struct trace_array *tr);
1153
1154struct event_trigger_data {
1155        unsigned long                   count;
1156        int                             ref;
1157        struct event_trigger_ops        *ops;
1158        struct event_command            *cmd_ops;
1159        struct event_filter __rcu       *filter;
1160        char                            *filter_str;
1161        void                            *private_data;
1162        struct list_head                list;
1163};
1164
1165/**
1166 * struct event_trigger_ops - callbacks for trace event triggers
1167 *
1168 * The methods in this structure provide per-event trigger hooks for
1169 * various trigger operations.
1170 *
1171 * All the methods below, except for @init() and @free(), must be
1172 * implemented.
1173 *
1174 * @func: The trigger 'probe' function called when the triggering
1175 *      event occurs.  The data passed into this callback is the data
1176 *      that was supplied to the event_command @reg() function that
1177 *      registered the trigger (see struct event_command).
1178 *
1179 * @init: An optional initialization function called for the trigger
1180 *      when the trigger is registered (via the event_command reg()
1181 *      function).  This can be used to perform per-trigger
1182 *      initialization such as incrementing a per-trigger reference
1183 *      count, for instance.  This is usually implemented by the
1184 *      generic utility function @event_trigger_init() (see
1185 *      trace_event_triggers.c).
1186 *
1187 * @free: An optional de-initialization function called for the
1188 *      trigger when the trigger is unregistered (via the
1189 *      event_command @reg() function).  This can be used to perform
1190 *      per-trigger de-initialization such as decrementing a
1191 *      per-trigger reference count and freeing corresponding trigger
1192 *      data, for instance.  This is usually implemented by the
1193 *      generic utility function @event_trigger_free() (see
1194 *      trace_event_triggers.c).
1195 *
1196 * @print: The callback function invoked to have the trigger print
1197 *      itself.  This is usually implemented by a wrapper function
1198 *      that calls the generic utility function @event_trigger_print()
1199 *      (see trace_event_triggers.c).
1200 */
1201struct event_trigger_ops {
1202        void                    (*func)(struct event_trigger_data *data);
1203        int                     (*init)(struct event_trigger_ops *ops,
1204                                        struct event_trigger_data *data);
1205        void                    (*free)(struct event_trigger_ops *ops,
1206                                        struct event_trigger_data *data);
1207        int                     (*print)(struct seq_file *m,
1208                                         struct event_trigger_ops *ops,
1209                                         struct event_trigger_data *data);
1210};
1211
1212/**
1213 * struct event_command - callbacks and data members for event commands
1214 *
1215 * Event commands are invoked by users by writing the command name
1216 * into the 'trigger' file associated with a trace event.  The
1217 * parameters associated with a specific invocation of an event
1218 * command are used to create an event trigger instance, which is
1219 * added to the list of trigger instances associated with that trace
1220 * event.  When the event is hit, the set of triggers associated with
1221 * that event is invoked.
1222 *
1223 * The data members in this structure provide per-event command data
1224 * for various event commands.
1225 *
1226 * All the data members below, except for @post_trigger, must be set
1227 * for each event command.
1228 *
1229 * @name: The unique name that identifies the event command.  This is
1230 *      the name used when setting triggers via trigger files.
1231 *
1232 * @trigger_type: A unique id that identifies the event command
1233 *      'type'.  This value has two purposes, the first to ensure that
1234 *      only one trigger of the same type can be set at a given time
1235 *      for a particular event e.g. it doesn't make sense to have both
1236 *      a traceon and traceoff trigger attached to a single event at
1237 *      the same time, so traceon and traceoff have the same type
1238 *      though they have different names.  The @trigger_type value is
1239 *      also used as a bit value for deferring the actual trigger
1240 *      action until after the current event is finished.  Some
1241 *      commands need to do this if they themselves log to the trace
1242 *      buffer (see the @post_trigger() member below).  @trigger_type
1243 *      values are defined by adding new values to the trigger_type
1244 *      enum in include/linux/trace_events.h.
1245 *
1246 * @post_trigger: A flag that says whether or not this command needs
1247 *      to have its action delayed until after the current event has
1248 *      been closed.  Some triggers need to avoid being invoked while
1249 *      an event is currently in the process of being logged, since
1250 *      the trigger may itself log data into the trace buffer.  Thus
1251 *      we make sure the current event is committed before invoking
1252 *      those triggers.  To do that, the trigger invocation is split
1253 *      in two - the first part checks the filter using the current
1254 *      trace record; if a command has the @post_trigger flag set, it
1255 *      sets a bit for itself in the return value, otherwise it
1256 *      directly invokes the trigger.  Once all commands have been
1257 *      either invoked or set their return flag, the current record is
1258 *      either committed or discarded.  At that point, if any commands
1259 *      have deferred their triggers, those commands are finally
1260 *      invoked following the close of the current event.  In other
1261 *      words, if the event_trigger_ops @func() probe implementation
1262 *      itself logs to the trace buffer, this flag should be set,
1263 *      otherwise it can be left unspecified.
1264 *
1265 * All the methods below, except for @set_filter(), must be
1266 * implemented.
1267 *
1268 * @func: The callback function responsible for parsing and
1269 *      registering the trigger written to the 'trigger' file by the
1270 *      user.  It allocates the trigger instance and registers it with
1271 *      the appropriate trace event.  It makes use of the other
1272 *      event_command callback functions to orchestrate this, and is
1273 *      usually implemented by the generic utility function
1274 *      @event_trigger_callback() (see trace_event_triggers.c).
1275 *
1276 * @reg: Adds the trigger to the list of triggers associated with the
1277 *      event, and enables the event trigger itself, after
1278 *      initializing it (via the event_trigger_ops @init() function).
1279 *      This is also where commands can use the @trigger_type value to
1280 *      make the decision as to whether or not multiple instances of
1281 *      the trigger should be allowed.  This is usually implemented by
1282 *      the generic utility function @register_trigger() (see
1283 *      trace_event_triggers.c).
1284 *
1285 * @unreg: Removes the trigger from the list of triggers associated
1286 *      with the event, and disables the event trigger itself, after
1287 *      initializing it (via the event_trigger_ops @free() function).
1288 *      This is usually implemented by the generic utility function
1289 *      @unregister_trigger() (see trace_event_triggers.c).
1290 *
1291 * @set_filter: An optional function called to parse and set a filter
1292 *      for the trigger.  If no @set_filter() method is set for the
1293 *      event command, filters set by the user for the command will be
1294 *      ignored.  This is usually implemented by the generic utility
1295 *      function @set_trigger_filter() (see trace_event_triggers.c).
1296 *
1297 * @get_trigger_ops: The callback function invoked to retrieve the
1298 *      event_trigger_ops implementation associated with the command.
1299 */
1300struct event_command {
1301        struct list_head        list;
1302        char                    *name;
1303        enum event_trigger_type trigger_type;
1304        bool                    post_trigger;
1305        int                     (*func)(struct event_command *cmd_ops,
1306                                        struct trace_event_file *file,
1307                                        char *glob, char *cmd, char *params);
1308        int                     (*reg)(char *glob,
1309                                       struct event_trigger_ops *ops,
1310                                       struct event_trigger_data *data,
1311                                       struct trace_event_file *file);
1312        void                    (*unreg)(char *glob,
1313                                         struct event_trigger_ops *ops,
1314                                         struct event_trigger_data *data,
1315                                         struct trace_event_file *file);
1316        int                     (*set_filter)(char *filter_str,
1317                                              struct event_trigger_data *data,
1318                                              struct trace_event_file *file);
1319        struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1320};
1321
1322extern int trace_event_enable_disable(struct trace_event_file *file,
1323                                      int enable, int soft_disable);
1324extern int tracing_alloc_snapshot(void);
1325
1326extern const char *__start___trace_bprintk_fmt[];
1327extern const char *__stop___trace_bprintk_fmt[];
1328
1329extern const char *__start___tracepoint_str[];
1330extern const char *__stop___tracepoint_str[];
1331
1332void trace_printk_control(bool enabled);
1333void trace_printk_init_buffers(void);
1334void trace_printk_start_comm(void);
1335int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1336int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1337
1338/*
1339 * Normal trace_printk() and friends allocates special buffers
1340 * to do the manipulation, as well as saves the print formats
1341 * into sections to display. But the trace infrastructure wants
1342 * to use these without the added overhead at the price of being
1343 * a bit slower (used mainly for warnings, where we don't care
1344 * about performance). The internal_trace_puts() is for such
1345 * a purpose.
1346 */
1347#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1348
1349#undef FTRACE_ENTRY
1350#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)     \
1351        extern struct trace_event_call                                  \
1352        __aligned(4) event_##call;
1353#undef FTRACE_ENTRY_DUP
1354#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
1355        FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1356                     filter)
1357#include "trace_entries.h"
1358
1359#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1360int perf_ftrace_event_register(struct trace_event_call *call,
1361                               enum trace_reg type, void *data);
1362#else
1363#define perf_ftrace_event_register NULL
1364#endif
1365
1366#ifdef CONFIG_FTRACE_SYSCALLS
1367void init_ftrace_syscalls(void);
1368#else
1369static inline void init_ftrace_syscalls(void) { }
1370#endif
1371
1372#ifdef CONFIG_EVENT_TRACING
1373void trace_event_init(void);
1374void trace_event_enum_update(struct trace_enum_map **map, int len);
1375#else
1376static inline void __init trace_event_init(void) { }
1377static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
1378#endif
1379
1380extern struct trace_iterator *tracepoint_print_iter;
1381
1382#endif /* _LINUX_KERNEL_TRACE_H */
1383