linux/kernel/trace/trace.h
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#ifndef _LINUX_KERNEL_TRACE_H
   4#define _LINUX_KERNEL_TRACE_H
   5
   6#include <linux/fs.h>
   7#include <linux/atomic.h>
   8#include <linux/sched.h>
   9#include <linux/clocksource.h>
  10#include <linux/ring_buffer.h>
  11#include <linux/mmiotrace.h>
  12#include <linux/tracepoint.h>
  13#include <linux/ftrace.h>
  14#include <linux/trace.h>
  15#include <linux/hw_breakpoint.h>
  16#include <linux/trace_seq.h>
  17#include <linux/trace_events.h>
  18#include <linux/compiler.h>
  19#include <linux/glob.h>
  20#include <linux/irq_work.h>
  21#include <linux/workqueue.h>
  22#include <linux/ctype.h>
  23#include <linux/once_lite.h>
  24
  25#ifdef CONFIG_FTRACE_SYSCALLS
  26#include <asm/unistd.h>         /* For NR_SYSCALLS           */
  27#include <asm/syscall.h>        /* some archs define it here */
  28#endif
  29
  30enum trace_type {
  31        __TRACE_FIRST_TYPE = 0,
  32
  33        TRACE_FN,
  34        TRACE_CTX,
  35        TRACE_WAKE,
  36        TRACE_STACK,
  37        TRACE_PRINT,
  38        TRACE_BPRINT,
  39        TRACE_MMIO_RW,
  40        TRACE_MMIO_MAP,
  41        TRACE_BRANCH,
  42        TRACE_GRAPH_RET,
  43        TRACE_GRAPH_ENT,
  44        TRACE_USER_STACK,
  45        TRACE_BLK,
  46        TRACE_BPUTS,
  47        TRACE_HWLAT,
  48        TRACE_OSNOISE,
  49        TRACE_TIMERLAT,
  50        TRACE_RAW_DATA,
  51        TRACE_FUNC_REPEATS,
  52
  53        __TRACE_LAST_TYPE,
  54};
  55
  56
  57#undef __field
  58#define __field(type, item)             type    item;
  59
  60#undef __field_fn
  61#define __field_fn(type, item)          type    item;
  62
  63#undef __field_struct
  64#define __field_struct(type, item)      __field(type, item)
  65
  66#undef __field_desc
  67#define __field_desc(type, container, item)
  68
  69#undef __field_packed
  70#define __field_packed(type, container, item)
  71
  72#undef __array
  73#define __array(type, item, size)       type    item[size];
  74
  75#undef __array_desc
  76#define __array_desc(type, container, item, size)
  77
  78#undef __dynamic_array
  79#define __dynamic_array(type, item)     type    item[];
  80
  81#undef F_STRUCT
  82#define F_STRUCT(args...)               args
  83
  84#undef FTRACE_ENTRY
  85#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)             \
  86        struct struct_name {                                            \
  87                struct trace_entry      ent;                            \
  88                tstruct                                                 \
  89        }
  90
  91#undef FTRACE_ENTRY_DUP
  92#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
  93
  94#undef FTRACE_ENTRY_REG
  95#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn)  \
  96        FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
  97
  98#undef FTRACE_ENTRY_PACKED
  99#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)      \
 100        FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
 101
 102#include "trace_entries.h"
 103
 104/* Use this for memory failure errors */
 105#define MEM_FAIL(condition, fmt, ...)                                   \
 106        DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__)
 107
 108/*
 109 * syscalls are special, and need special handling, this is why
 110 * they are not included in trace_entries.h
 111 */
 112struct syscall_trace_enter {
 113        struct trace_entry      ent;
 114        int                     nr;
 115        unsigned long           args[];
 116};
 117
 118struct syscall_trace_exit {
 119        struct trace_entry      ent;
 120        int                     nr;
 121        long                    ret;
 122};
 123
 124struct kprobe_trace_entry_head {
 125        struct trace_entry      ent;
 126        unsigned long           ip;
 127};
 128
 129struct eprobe_trace_entry_head {
 130        struct trace_entry      ent;
 131        unsigned int            type;
 132};
 133
 134struct kretprobe_trace_entry_head {
 135        struct trace_entry      ent;
 136        unsigned long           func;
 137        unsigned long           ret_ip;
 138};
 139
 140#define TRACE_BUF_SIZE          1024
 141
 142struct trace_array;
 143
 144/*
 145 * The CPU trace array - it consists of thousands of trace entries
 146 * plus some other descriptor data: (for example which task started
 147 * the trace, etc.)
 148 */
 149struct trace_array_cpu {
 150        atomic_t                disabled;
 151        void                    *buffer_page;   /* ring buffer spare */
 152
 153        unsigned long           entries;
 154        unsigned long           saved_latency;
 155        unsigned long           critical_start;
 156        unsigned long           critical_end;
 157        unsigned long           critical_sequence;
 158        unsigned long           nice;
 159        unsigned long           policy;
 160        unsigned long           rt_priority;
 161        unsigned long           skipped_entries;
 162        u64                     preempt_timestamp;
 163        pid_t                   pid;
 164        kuid_t                  uid;
 165        char                    comm[TASK_COMM_LEN];
 166
 167#ifdef CONFIG_FUNCTION_TRACER
 168        int                     ftrace_ignore_pid;
 169#endif
 170        bool                    ignore_pid;
 171};
 172
 173struct tracer;
 174struct trace_option_dentry;
 175
 176struct array_buffer {
 177        struct trace_array              *tr;
 178        struct trace_buffer             *buffer;
 179        struct trace_array_cpu __percpu *data;
 180        u64                             time_start;
 181        int                             cpu;
 182};
 183
 184#define TRACE_FLAGS_MAX_SIZE            32
 185
 186struct trace_options {
 187        struct tracer                   *tracer;
 188        struct trace_option_dentry      *topts;
 189};
 190
 191struct trace_pid_list {
 192        int                             pid_max;
 193        unsigned long                   *pids;
 194};
 195
 196enum {
 197        TRACE_PIDS              = BIT(0),
 198        TRACE_NO_PIDS           = BIT(1),
 199};
 200
 201static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
 202                                    struct trace_pid_list *no_pid_list)
 203{
 204        /* Return true if the pid list in type has pids */
 205        return ((type & TRACE_PIDS) && pid_list) ||
 206                ((type & TRACE_NO_PIDS) && no_pid_list);
 207}
 208
 209static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
 210                                         struct trace_pid_list *no_pid_list)
 211{
 212        /*
 213         * Turning off what is in @type, return true if the "other"
 214         * pid list, still has pids in it.
 215         */
 216        return (!(type & TRACE_PIDS) && pid_list) ||
 217                (!(type & TRACE_NO_PIDS) && no_pid_list);
 218}
 219
 220typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
 221
 222/**
 223 * struct cond_snapshot - conditional snapshot data and callback
 224 *
 225 * The cond_snapshot structure encapsulates a callback function and
 226 * data associated with the snapshot for a given tracing instance.
 227 *
 228 * When a snapshot is taken conditionally, by invoking
 229 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
 230 * passed in turn to the cond_snapshot.update() function.  That data
 231 * can be compared by the update() implementation with the cond_data
 232 * contained within the struct cond_snapshot instance associated with
 233 * the trace_array.  Because the tr->max_lock is held throughout the
 234 * update() call, the update() function can directly retrieve the
 235 * cond_snapshot and cond_data associated with the per-instance
 236 * snapshot associated with the trace_array.
 237 *
 238 * The cond_snapshot.update() implementation can save data to be
 239 * associated with the snapshot if it decides to, and returns 'true'
 240 * in that case, or it returns 'false' if the conditional snapshot
 241 * shouldn't be taken.
 242 *
 243 * The cond_snapshot instance is created and associated with the
 244 * user-defined cond_data by tracing_cond_snapshot_enable().
 245 * Likewise, the cond_snapshot instance is destroyed and is no longer
 246 * associated with the trace instance by
 247 * tracing_cond_snapshot_disable().
 248 *
 249 * The method below is required.
 250 *
 251 * @update: When a conditional snapshot is invoked, the update()
 252 *      callback function is invoked with the tr->max_lock held.  The
 253 *      update() implementation signals whether or not to actually
 254 *      take the snapshot, by returning 'true' if so, 'false' if no
 255 *      snapshot should be taken.  Because the max_lock is held for
 256 *      the duration of update(), the implementation is safe to
 257 *      directly retrieved and save any implementation data it needs
 258 *      to in association with the snapshot.
 259 */
 260struct cond_snapshot {
 261        void                            *cond_data;
 262        cond_update_fn_t                update;
 263};
 264
 265/*
 266 * struct trace_func_repeats - used to keep track of the consecutive
 267 * (on the same CPU) calls of a single function.
 268 */
 269struct trace_func_repeats {
 270        unsigned long   ip;
 271        unsigned long   parent_ip;
 272        unsigned long   count;
 273        u64             ts_last_call;
 274};
 275
 276/*
 277 * The trace array - an array of per-CPU trace arrays. This is the
 278 * highest level data structure that individual tracers deal with.
 279 * They have on/off state as well:
 280 */
 281struct trace_array {
 282        struct list_head        list;
 283        char                    *name;
 284        struct array_buffer     array_buffer;
 285#ifdef CONFIG_TRACER_MAX_TRACE
 286        /*
 287         * The max_buffer is used to snapshot the trace when a maximum
 288         * latency is reached, or when the user initiates a snapshot.
 289         * Some tracers will use this to store a maximum trace while
 290         * it continues examining live traces.
 291         *
 292         * The buffers for the max_buffer are set up the same as the array_buffer
 293         * When a snapshot is taken, the buffer of the max_buffer is swapped
 294         * with the buffer of the array_buffer and the buffers are reset for
 295         * the array_buffer so the tracing can continue.
 296         */
 297        struct array_buffer     max_buffer;
 298        bool                    allocated_snapshot;
 299#endif
 300#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
 301        || defined(CONFIG_OSNOISE_TRACER)
 302        unsigned long           max_latency;
 303#ifdef CONFIG_FSNOTIFY
 304        struct dentry           *d_max_latency;
 305        struct work_struct      fsnotify_work;
 306        struct irq_work         fsnotify_irqwork;
 307#endif
 308#endif
 309        struct trace_pid_list   __rcu *filtered_pids;
 310        struct trace_pid_list   __rcu *filtered_no_pids;
 311        /*
 312         * max_lock is used to protect the swapping of buffers
 313         * when taking a max snapshot. The buffers themselves are
 314         * protected by per_cpu spinlocks. But the action of the swap
 315         * needs its own lock.
 316         *
 317         * This is defined as a arch_spinlock_t in order to help
 318         * with performance when lockdep debugging is enabled.
 319         *
 320         * It is also used in other places outside the update_max_tr
 321         * so it needs to be defined outside of the
 322         * CONFIG_TRACER_MAX_TRACE.
 323         */
 324        arch_spinlock_t         max_lock;
 325        int                     buffer_disabled;
 326#ifdef CONFIG_FTRACE_SYSCALLS
 327        int                     sys_refcount_enter;
 328        int                     sys_refcount_exit;
 329        struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
 330        struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
 331#endif
 332        int                     stop_count;
 333        int                     clock_id;
 334        int                     nr_topts;
 335        bool                    clear_trace;
 336        int                     buffer_percent;
 337        unsigned int            n_err_log_entries;
 338        struct tracer           *current_trace;
 339        unsigned int            trace_flags;
 340        unsigned char           trace_flags_index[TRACE_FLAGS_MAX_SIZE];
 341        unsigned int            flags;
 342        raw_spinlock_t          start_lock;
 343        struct list_head        err_log;
 344        struct dentry           *dir;
 345        struct dentry           *options;
 346        struct dentry           *percpu_dir;
 347        struct dentry           *event_dir;
 348        struct trace_options    *topts;
 349        struct list_head        systems;
 350        struct list_head        events;
 351        struct trace_event_file *trace_marker_file;
 352        cpumask_var_t           tracing_cpumask; /* only trace on set CPUs */
 353        int                     ref;
 354        int                     trace_ref;
 355#ifdef CONFIG_FUNCTION_TRACER
 356        struct ftrace_ops       *ops;
 357        struct trace_pid_list   __rcu *function_pids;
 358        struct trace_pid_list   __rcu *function_no_pids;
 359#ifdef CONFIG_DYNAMIC_FTRACE
 360        /* All of these are protected by the ftrace_lock */
 361        struct list_head        func_probes;
 362        struct list_head        mod_trace;
 363        struct list_head        mod_notrace;
 364#endif
 365        /* function tracing enabled */
 366        int                     function_enabled;
 367#endif
 368        int                     no_filter_buffering_ref;
 369        struct list_head        hist_vars;
 370#ifdef CONFIG_TRACER_SNAPSHOT
 371        struct cond_snapshot    *cond_snapshot;
 372#endif
 373        struct trace_func_repeats       __percpu *last_func_repeats;
 374};
 375
 376enum {
 377        TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 378};
 379
 380extern struct list_head ftrace_trace_arrays;
 381
 382extern struct mutex trace_types_lock;
 383
 384extern int trace_array_get(struct trace_array *tr);
 385extern int tracing_check_open_get_tr(struct trace_array *tr);
 386extern struct trace_array *trace_array_find(const char *instance);
 387extern struct trace_array *trace_array_find_get(const char *instance);
 388
 389extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe);
 390extern int tracing_set_filter_buffering(struct trace_array *tr, bool set);
 391extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
 392
 393extern bool trace_clock_in_ns(struct trace_array *tr);
 394
 395/*
 396 * The global tracer (top) should be the first trace array added,
 397 * but we check the flag anyway.
 398 */
 399static inline struct trace_array *top_trace_array(void)
 400{
 401        struct trace_array *tr;
 402
 403        if (list_empty(&ftrace_trace_arrays))
 404                return NULL;
 405
 406        tr = list_entry(ftrace_trace_arrays.prev,
 407                        typeof(*tr), list);
 408        WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
 409        return tr;
 410}
 411
 412#define FTRACE_CMP_TYPE(var, type) \
 413        __builtin_types_compatible_p(typeof(var), type *)
 414
 415#undef IF_ASSIGN
 416#define IF_ASSIGN(var, entry, etype, id)                        \
 417        if (FTRACE_CMP_TYPE(var, etype)) {                      \
 418                var = (typeof(var))(entry);                     \
 419                WARN_ON(id != 0 && (entry)->type != id);        \
 420                break;                                          \
 421        }
 422
 423/* Will cause compile errors if type is not found. */
 424extern void __ftrace_bad_type(void);
 425
 426/*
 427 * The trace_assign_type is a verifier that the entry type is
 428 * the same as the type being assigned. To add new types simply
 429 * add a line with the following format:
 430 *
 431 * IF_ASSIGN(var, ent, type, id);
 432 *
 433 *  Where "type" is the trace type that includes the trace_entry
 434 *  as the "ent" item. And "id" is the trace identifier that is
 435 *  used in the trace_type enum.
 436 *
 437 *  If the type can have more than one id, then use zero.
 438 */
 439#define trace_assign_type(var, ent)                                     \
 440        do {                                                            \
 441                IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);     \
 442                IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
 443                IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
 444                IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
 445                IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
 446                IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
 447                IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);   \
 448                IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);   \
 449                IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
 450                IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
 451                IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
 452                IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,          \
 453                          TRACE_MMIO_RW);                               \
 454                IF_ASSIGN(var, ent, struct trace_mmiotrace_map,         \
 455                          TRACE_MMIO_MAP);                              \
 456                IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
 457                IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
 458                          TRACE_GRAPH_ENT);             \
 459                IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
 460                          TRACE_GRAPH_RET);             \
 461                IF_ASSIGN(var, ent, struct func_repeats_entry,          \
 462                          TRACE_FUNC_REPEATS);                          \
 463                __ftrace_bad_type();                                    \
 464        } while (0)
 465
 466/*
 467 * An option specific to a tracer. This is a boolean value.
 468 * The bit is the bit index that sets its value on the
 469 * flags value in struct tracer_flags.
 470 */
 471struct tracer_opt {
 472        const char      *name; /* Will appear on the trace_options file */
 473        u32             bit; /* Mask assigned in val field in tracer_flags */
 474};
 475
 476/*
 477 * The set of specific options for a tracer. Your tracer
 478 * have to set the initial value of the flags val.
 479 */
 480struct tracer_flags {
 481        u32                     val;
 482        struct tracer_opt       *opts;
 483        struct tracer           *trace;
 484};
 485
 486/* Makes more easy to define a tracer opt */
 487#define TRACER_OPT(s, b)        .name = #s, .bit = b
 488
 489
 490struct trace_option_dentry {
 491        struct tracer_opt               *opt;
 492        struct tracer_flags             *flags;
 493        struct trace_array              *tr;
 494        struct dentry                   *entry;
 495};
 496
 497/**
 498 * struct tracer - a specific tracer and its callbacks to interact with tracefs
 499 * @name: the name chosen to select it on the available_tracers file
 500 * @init: called when one switches to this tracer (echo name > current_tracer)
 501 * @reset: called when one switches to another tracer
 502 * @start: called when tracing is unpaused (echo 1 > tracing_on)
 503 * @stop: called when tracing is paused (echo 0 > tracing_on)
 504 * @update_thresh: called when tracing_thresh is updated
 505 * @open: called when the trace file is opened
 506 * @pipe_open: called when the trace_pipe file is opened
 507 * @close: called when the trace file is released
 508 * @pipe_close: called when the trace_pipe file is released
 509 * @read: override the default read callback on trace_pipe
 510 * @splice_read: override the default splice_read callback on trace_pipe
 511 * @selftest: selftest to run on boot (see trace_selftest.c)
 512 * @print_headers: override the first lines that describe your columns
 513 * @print_line: callback that prints a trace
 514 * @set_flag: signals one of your private flags changed (trace_options file)
 515 * @flags: your private flags
 516 */
 517struct tracer {
 518        const char              *name;
 519        int                     (*init)(struct trace_array *tr);
 520        void                    (*reset)(struct trace_array *tr);
 521        void                    (*start)(struct trace_array *tr);
 522        void                    (*stop)(struct trace_array *tr);
 523        int                     (*update_thresh)(struct trace_array *tr);
 524        void                    (*open)(struct trace_iterator *iter);
 525        void                    (*pipe_open)(struct trace_iterator *iter);
 526        void                    (*close)(struct trace_iterator *iter);
 527        void                    (*pipe_close)(struct trace_iterator *iter);
 528        ssize_t                 (*read)(struct trace_iterator *iter,
 529                                        struct file *filp, char __user *ubuf,
 530                                        size_t cnt, loff_t *ppos);
 531        ssize_t                 (*splice_read)(struct trace_iterator *iter,
 532                                               struct file *filp,
 533                                               loff_t *ppos,
 534                                               struct pipe_inode_info *pipe,
 535                                               size_t len,
 536                                               unsigned int flags);
 537#ifdef CONFIG_FTRACE_STARTUP_TEST
 538        int                     (*selftest)(struct tracer *trace,
 539                                            struct trace_array *tr);
 540#endif
 541        void                    (*print_header)(struct seq_file *m);
 542        enum print_line_t       (*print_line)(struct trace_iterator *iter);
 543        /* If you handled the flag setting, return 0 */
 544        int                     (*set_flag)(struct trace_array *tr,
 545                                            u32 old_flags, u32 bit, int set);
 546        /* Return 0 if OK with change, else return non-zero */
 547        int                     (*flag_changed)(struct trace_array *tr,
 548                                                u32 mask, int set);
 549        struct tracer           *next;
 550        struct tracer_flags     *flags;
 551        int                     enabled;
 552        bool                    print_max;
 553        bool                    allow_instances;
 554#ifdef CONFIG_TRACER_MAX_TRACE
 555        bool                    use_max_tr;
 556#endif
 557        /* True if tracer cannot be enabled in kernel param */
 558        bool                    noboot;
 559};
 560
 561static inline struct ring_buffer_iter *
 562trace_buffer_iter(struct trace_iterator *iter, int cpu)
 563{
 564        return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 565}
 566
 567int tracer_init(struct tracer *t, struct trace_array *tr);
 568int tracing_is_enabled(void);
 569void tracing_reset_online_cpus(struct array_buffer *buf);
 570void tracing_reset_current(int cpu);
 571void tracing_reset_all_online_cpus(void);
 572int tracing_open_generic(struct inode *inode, struct file *filp);
 573int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 574bool tracing_is_disabled(void);
 575bool tracer_tracing_is_on(struct trace_array *tr);
 576void tracer_tracing_on(struct trace_array *tr);
 577void tracer_tracing_off(struct trace_array *tr);
 578struct dentry *trace_create_file(const char *name,
 579                                 umode_t mode,
 580                                 struct dentry *parent,
 581                                 void *data,
 582                                 const struct file_operations *fops);
 583
 584int tracing_init_dentry(void);
 585
 586struct ring_buffer_event;
 587
 588struct ring_buffer_event *
 589trace_buffer_lock_reserve(struct trace_buffer *buffer,
 590                          int type,
 591                          unsigned long len,
 592                          unsigned int trace_ctx);
 593
 594struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 595                                                struct trace_array_cpu *data);
 596
 597struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 598                                          int *ent_cpu, u64 *ent_ts);
 599
 600void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 601                                        struct ring_buffer_event *event);
 602
 603bool trace_is_tracepoint_string(const char *str);
 604const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
 605void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
 606                         va_list ap);
 607
 608int trace_empty(struct trace_iterator *iter);
 609
 610void *trace_find_next_entry_inc(struct trace_iterator *iter);
 611
 612void trace_init_global_iter(struct trace_iterator *iter);
 613
 614void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 615
 616unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
 617unsigned long trace_total_entries(struct trace_array *tr);
 618
 619void trace_function(struct trace_array *tr,
 620                    unsigned long ip,
 621                    unsigned long parent_ip,
 622                    unsigned int trace_ctx);
 623void trace_graph_function(struct trace_array *tr,
 624                    unsigned long ip,
 625                    unsigned long parent_ip,
 626                    unsigned int trace_ctx);
 627void trace_latency_header(struct seq_file *m);
 628void trace_default_header(struct seq_file *m);
 629void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 630
 631void trace_graph_return(struct ftrace_graph_ret *trace);
 632int trace_graph_entry(struct ftrace_graph_ent *trace);
 633void set_graph_array(struct trace_array *tr);
 634
 635void tracing_start_cmdline_record(void);
 636void tracing_stop_cmdline_record(void);
 637void tracing_start_tgid_record(void);
 638void tracing_stop_tgid_record(void);
 639
 640int register_tracer(struct tracer *type);
 641int is_tracing_stopped(void);
 642
 643loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
 644
 645extern cpumask_var_t __read_mostly tracing_buffer_mask;
 646
 647#define for_each_tracing_cpu(cpu)       \
 648        for_each_cpu(cpu, tracing_buffer_mask)
 649
 650extern unsigned long nsecs_to_usecs(unsigned long nsecs);
 651
 652extern unsigned long tracing_thresh;
 653
 654/* PID filtering */
 655
 656extern int pid_max;
 657
 658bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
 659                             pid_t search_pid);
 660bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
 661                            struct trace_pid_list *filtered_no_pids,
 662                            struct task_struct *task);
 663void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
 664                                  struct task_struct *self,
 665                                  struct task_struct *task);
 666void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
 667void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
 668int trace_pid_show(struct seq_file *m, void *v);
 669void trace_free_pid_list(struct trace_pid_list *pid_list);
 670int trace_pid_write(struct trace_pid_list *filtered_pids,
 671                    struct trace_pid_list **new_pid_list,
 672                    const char __user *ubuf, size_t cnt);
 673
 674#ifdef CONFIG_TRACER_MAX_TRACE
 675void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 676                   void *cond_data);
 677void update_max_tr_single(struct trace_array *tr,
 678                          struct task_struct *tsk, int cpu);
 679#endif /* CONFIG_TRACER_MAX_TRACE */
 680
 681#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
 682        || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
 683#define LATENCY_FS_NOTIFY
 684#endif
 685
 686#ifdef LATENCY_FS_NOTIFY
 687void latency_fsnotify(struct trace_array *tr);
 688#else
 689static inline void latency_fsnotify(struct trace_array *tr) { }
 690#endif
 691
 692#ifdef CONFIG_STACKTRACE
 693void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip);
 694#else
 695static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
 696                                 int skip)
 697{
 698}
 699#endif /* CONFIG_STACKTRACE */
 700
 701void trace_last_func_repeats(struct trace_array *tr,
 702                             struct trace_func_repeats *last_info,
 703                             unsigned int trace_ctx);
 704
 705extern u64 ftrace_now(int cpu);
 706
 707extern void trace_find_cmdline(int pid, char comm[]);
 708extern int trace_find_tgid(int pid);
 709extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
 710
 711#ifdef CONFIG_DYNAMIC_FTRACE
 712extern unsigned long ftrace_update_tot_cnt;
 713extern unsigned long ftrace_number_of_pages;
 714extern unsigned long ftrace_number_of_groups;
 715void ftrace_init_trace_array(struct trace_array *tr);
 716#else
 717static inline void ftrace_init_trace_array(struct trace_array *tr) { }
 718#endif
 719#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
 720extern int DYN_FTRACE_TEST_NAME(void);
 721#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
 722extern int DYN_FTRACE_TEST_NAME2(void);
 723
 724extern bool ring_buffer_expanded;
 725extern bool tracing_selftest_disabled;
 726
 727#ifdef CONFIG_FTRACE_STARTUP_TEST
 728extern void __init disable_tracing_selftest(const char *reason);
 729
 730extern int trace_selftest_startup_function(struct tracer *trace,
 731                                           struct trace_array *tr);
 732extern int trace_selftest_startup_function_graph(struct tracer *trace,
 733                                                 struct trace_array *tr);
 734extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 735                                          struct trace_array *tr);
 736extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 737                                             struct trace_array *tr);
 738extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 739                                                 struct trace_array *tr);
 740extern int trace_selftest_startup_wakeup(struct tracer *trace,
 741                                         struct trace_array *tr);
 742extern int trace_selftest_startup_nop(struct tracer *trace,
 743                                         struct trace_array *tr);
 744extern int trace_selftest_startup_branch(struct tracer *trace,
 745                                         struct trace_array *tr);
 746/*
 747 * Tracer data references selftest functions that only occur
 748 * on boot up. These can be __init functions. Thus, when selftests
 749 * are enabled, then the tracers need to reference __init functions.
 750 */
 751#define __tracer_data           __refdata
 752#else
 753static inline void __init disable_tracing_selftest(const char *reason)
 754{
 755}
 756/* Tracers are seldom changed. Optimize when selftests are disabled. */
 757#define __tracer_data           __read_mostly
 758#endif /* CONFIG_FTRACE_STARTUP_TEST */
 759
 760extern void *head_page(struct trace_array_cpu *data);
 761extern unsigned long long ns2usecs(u64 nsec);
 762extern int
 763trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
 764extern int
 765trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 766extern int
 767trace_array_vprintk(struct trace_array *tr,
 768                    unsigned long ip, const char *fmt, va_list args);
 769int trace_array_printk_buf(struct trace_buffer *buffer,
 770                           unsigned long ip, const char *fmt, ...);
 771void trace_printk_seq(struct trace_seq *s);
 772enum print_line_t print_trace_line(struct trace_iterator *iter);
 773
 774extern char trace_find_mark(unsigned long long duration);
 775
 776struct ftrace_hash;
 777
 778struct ftrace_mod_load {
 779        struct list_head        list;
 780        char                    *func;
 781        char                    *module;
 782        int                      enable;
 783};
 784
 785enum {
 786        FTRACE_HASH_FL_MOD      = (1 << 0),
 787};
 788
 789struct ftrace_hash {
 790        unsigned long           size_bits;
 791        struct hlist_head       *buckets;
 792        unsigned long           count;
 793        unsigned long           flags;
 794        struct rcu_head         rcu;
 795};
 796
 797struct ftrace_func_entry *
 798ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
 799
 800static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
 801{
 802        return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
 803}
 804
 805/* Standard output formatting function used for function return traces */
 806#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 807
 808/* Flag options */
 809#define TRACE_GRAPH_PRINT_OVERRUN       0x1
 810#define TRACE_GRAPH_PRINT_CPU           0x2
 811#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
 812#define TRACE_GRAPH_PRINT_PROC          0x8
 813#define TRACE_GRAPH_PRINT_DURATION      0x10
 814#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 815#define TRACE_GRAPH_PRINT_REL_TIME      0x40
 816#define TRACE_GRAPH_PRINT_IRQS          0x80
 817#define TRACE_GRAPH_PRINT_TAIL          0x100
 818#define TRACE_GRAPH_SLEEP_TIME          0x200
 819#define TRACE_GRAPH_GRAPH_TIME          0x400
 820#define TRACE_GRAPH_PRINT_FILL_SHIFT    28
 821#define TRACE_GRAPH_PRINT_FILL_MASK     (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 822
 823extern void ftrace_graph_sleep_time_control(bool enable);
 824
 825#ifdef CONFIG_FUNCTION_PROFILER
 826extern void ftrace_graph_graph_time_control(bool enable);
 827#else
 828static inline void ftrace_graph_graph_time_control(bool enable) { }
 829#endif
 830
 831extern enum print_line_t
 832print_graph_function_flags(struct trace_iterator *iter, u32 flags);
 833extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
 834extern void
 835trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 836extern void graph_trace_open(struct trace_iterator *iter);
 837extern void graph_trace_close(struct trace_iterator *iter);
 838extern int __trace_graph_entry(struct trace_array *tr,
 839                               struct ftrace_graph_ent *trace,
 840                               unsigned int trace_ctx);
 841extern void __trace_graph_return(struct trace_array *tr,
 842                                 struct ftrace_graph_ret *trace,
 843                                 unsigned int trace_ctx);
 844
 845#ifdef CONFIG_DYNAMIC_FTRACE
 846extern struct ftrace_hash __rcu *ftrace_graph_hash;
 847extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
 848
 849static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 850{
 851        unsigned long addr = trace->func;
 852        int ret = 0;
 853        struct ftrace_hash *hash;
 854
 855        preempt_disable_notrace();
 856
 857        /*
 858         * Have to open code "rcu_dereference_sched()" because the
 859         * function graph tracer can be called when RCU is not
 860         * "watching".
 861         * Protected with schedule_on_each_cpu(ftrace_sync)
 862         */
 863        hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
 864
 865        if (ftrace_hash_empty(hash)) {
 866                ret = 1;
 867                goto out;
 868        }
 869
 870        if (ftrace_lookup_ip(hash, addr)) {
 871
 872                /*
 873                 * This needs to be cleared on the return functions
 874                 * when the depth is zero.
 875                 */
 876                trace_recursion_set(TRACE_GRAPH_BIT);
 877                trace_recursion_set_depth(trace->depth);
 878
 879                /*
 880                 * If no irqs are to be traced, but a set_graph_function
 881                 * is set, and called by an interrupt handler, we still
 882                 * want to trace it.
 883                 */
 884                if (in_irq())
 885                        trace_recursion_set(TRACE_IRQ_BIT);
 886                else
 887                        trace_recursion_clear(TRACE_IRQ_BIT);
 888                ret = 1;
 889        }
 890
 891out:
 892        preempt_enable_notrace();
 893        return ret;
 894}
 895
 896static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
 897{
 898        if (trace_recursion_test(TRACE_GRAPH_BIT) &&
 899            trace->depth == trace_recursion_depth())
 900                trace_recursion_clear(TRACE_GRAPH_BIT);
 901}
 902
 903static inline int ftrace_graph_notrace_addr(unsigned long addr)
 904{
 905        int ret = 0;
 906        struct ftrace_hash *notrace_hash;
 907
 908        preempt_disable_notrace();
 909
 910        /*
 911         * Have to open code "rcu_dereference_sched()" because the
 912         * function graph tracer can be called when RCU is not
 913         * "watching".
 914         * Protected with schedule_on_each_cpu(ftrace_sync)
 915         */
 916        notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
 917                                                 !preemptible());
 918
 919        if (ftrace_lookup_ip(notrace_hash, addr))
 920                ret = 1;
 921
 922        preempt_enable_notrace();
 923        return ret;
 924}
 925#else
 926static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 927{
 928        return 1;
 929}
 930
 931static inline int ftrace_graph_notrace_addr(unsigned long addr)
 932{
 933        return 0;
 934}
 935static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
 936{ }
 937#endif /* CONFIG_DYNAMIC_FTRACE */
 938
 939extern unsigned int fgraph_max_depth;
 940
 941static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
 942{
 943        /* trace it when it is-nested-in or is a function enabled. */
 944        return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
 945                 ftrace_graph_addr(trace)) ||
 946                (trace->depth < 0) ||
 947                (fgraph_max_depth && trace->depth >= fgraph_max_depth);
 948}
 949
 950#else /* CONFIG_FUNCTION_GRAPH_TRACER */
 951static inline enum print_line_t
 952print_graph_function_flags(struct trace_iterator *iter, u32 flags)
 953{
 954        return TRACE_TYPE_UNHANDLED;
 955}
 956#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 957
 958extern struct list_head ftrace_pids;
 959
 960#ifdef CONFIG_FUNCTION_TRACER
 961
 962#define FTRACE_PID_IGNORE       -1
 963#define FTRACE_PID_TRACE        -2
 964
 965struct ftrace_func_command {
 966        struct list_head        list;
 967        char                    *name;
 968        int                     (*func)(struct trace_array *tr,
 969                                        struct ftrace_hash *hash,
 970                                        char *func, char *cmd,
 971                                        char *params, int enable);
 972};
 973extern bool ftrace_filter_param __initdata;
 974static inline int ftrace_trace_task(struct trace_array *tr)
 975{
 976        return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
 977                FTRACE_PID_IGNORE;
 978}
 979extern int ftrace_is_dead(void);
 980int ftrace_create_function_files(struct trace_array *tr,
 981                                 struct dentry *parent);
 982void ftrace_destroy_function_files(struct trace_array *tr);
 983int ftrace_allocate_ftrace_ops(struct trace_array *tr);
 984void ftrace_free_ftrace_ops(struct trace_array *tr);
 985void ftrace_init_global_array_ops(struct trace_array *tr);
 986void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
 987void ftrace_reset_array_ops(struct trace_array *tr);
 988void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 989void ftrace_init_tracefs_toplevel(struct trace_array *tr,
 990                                  struct dentry *d_tracer);
 991void ftrace_clear_pids(struct trace_array *tr);
 992int init_function_trace(void);
 993void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
 994#else
 995static inline int ftrace_trace_task(struct trace_array *tr)
 996{
 997        return 1;
 998}
 999static inline int ftrace_is_dead(void) { return 0; }
1000static inline int
1001ftrace_create_function_files(struct trace_array *tr,
1002                             struct dentry *parent)
1003{
1004        return 0;
1005}
1006static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr)
1007{
1008        return 0;
1009}
1010static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { }
1011static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1012static inline __init void
1013ftrace_init_global_array_ops(struct trace_array *tr) { }
1014static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1015static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1016static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1017static inline void ftrace_clear_pids(struct trace_array *tr) { }
1018static inline int init_function_trace(void) { return 0; }
1019static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1020/* ftace_func_t type is not defined, use macro instead of static inline */
1021#define ftrace_init_array_ops(tr, func) do { } while (0)
1022#endif /* CONFIG_FUNCTION_TRACER */
1023
1024#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1025
1026struct ftrace_probe_ops {
1027        void                    (*func)(unsigned long ip,
1028                                        unsigned long parent_ip,
1029                                        struct trace_array *tr,
1030                                        struct ftrace_probe_ops *ops,
1031                                        void *data);
1032        int                     (*init)(struct ftrace_probe_ops *ops,
1033                                        struct trace_array *tr,
1034                                        unsigned long ip, void *init_data,
1035                                        void **data);
1036        void                    (*free)(struct ftrace_probe_ops *ops,
1037                                        struct trace_array *tr,
1038                                        unsigned long ip, void *data);
1039        int                     (*print)(struct seq_file *m,
1040                                         unsigned long ip,
1041                                         struct ftrace_probe_ops *ops,
1042                                         void *data);
1043};
1044
1045struct ftrace_func_mapper;
1046typedef int (*ftrace_mapper_func)(void *data);
1047
1048struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1049void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1050                                           unsigned long ip);
1051int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1052                               unsigned long ip, void *data);
1053void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1054                                   unsigned long ip);
1055void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1056                             ftrace_mapper_func free_func);
1057
1058extern int
1059register_ftrace_function_probe(char *glob, struct trace_array *tr,
1060                               struct ftrace_probe_ops *ops, void *data);
1061extern int
1062unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1063                                      struct ftrace_probe_ops *ops);
1064extern void clear_ftrace_function_probes(struct trace_array *tr);
1065
1066int register_ftrace_command(struct ftrace_func_command *cmd);
1067int unregister_ftrace_command(struct ftrace_func_command *cmd);
1068
1069void ftrace_create_filter_files(struct ftrace_ops *ops,
1070                                struct dentry *parent);
1071void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1072
1073extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1074                             int len, int reset);
1075extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1076                              int len, int reset);
1077#else
1078struct ftrace_func_command;
1079
1080static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1081{
1082        return -EINVAL;
1083}
1084static inline __init int unregister_ftrace_command(char *cmd_name)
1085{
1086        return -EINVAL;
1087}
1088static inline void clear_ftrace_function_probes(struct trace_array *tr)
1089{
1090}
1091
1092/*
1093 * The ops parameter passed in is usually undefined.
1094 * This must be a macro.
1095 */
1096#define ftrace_create_filter_files(ops, parent) do { } while (0)
1097#define ftrace_destroy_filter_files(ops) do { } while (0)
1098#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1099
1100bool ftrace_event_is_function(struct trace_event_call *call);
1101
1102/*
1103 * struct trace_parser - servers for reading the user input separated by spaces
1104 * @cont: set if the input is not complete - no final space char was found
1105 * @buffer: holds the parsed user input
1106 * @idx: user input length
1107 * @size: buffer size
1108 */
1109struct trace_parser {
1110        bool            cont;
1111        char            *buffer;
1112        unsigned        idx;
1113        unsigned        size;
1114};
1115
1116static inline bool trace_parser_loaded(struct trace_parser *parser)
1117{
1118        return (parser->idx != 0);
1119}
1120
1121static inline bool trace_parser_cont(struct trace_parser *parser)
1122{
1123        return parser->cont;
1124}
1125
1126static inline void trace_parser_clear(struct trace_parser *parser)
1127{
1128        parser->cont = false;
1129        parser->idx = 0;
1130}
1131
1132extern int trace_parser_get_init(struct trace_parser *parser, int size);
1133extern void trace_parser_put(struct trace_parser *parser);
1134extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1135        size_t cnt, loff_t *ppos);
1136
1137/*
1138 * Only create function graph options if function graph is configured.
1139 */
1140#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1141# define FGRAPH_FLAGS                                           \
1142                C(DISPLAY_GRAPH,        "display-graph"),
1143#else
1144# define FGRAPH_FLAGS
1145#endif
1146
1147#ifdef CONFIG_BRANCH_TRACER
1148# define BRANCH_FLAGS                                   \
1149                C(BRANCH,               "branch"),
1150#else
1151# define BRANCH_FLAGS
1152#endif
1153
1154#ifdef CONFIG_FUNCTION_TRACER
1155# define FUNCTION_FLAGS                                         \
1156                C(FUNCTION,             "function-trace"),      \
1157                C(FUNC_FORK,            "function-fork"),
1158# define FUNCTION_DEFAULT_FLAGS         TRACE_ITER_FUNCTION
1159#else
1160# define FUNCTION_FLAGS
1161# define FUNCTION_DEFAULT_FLAGS         0UL
1162# define TRACE_ITER_FUNC_FORK           0UL
1163#endif
1164
1165#ifdef CONFIG_STACKTRACE
1166# define STACK_FLAGS                            \
1167                C(STACKTRACE,           "stacktrace"),
1168#else
1169# define STACK_FLAGS
1170#endif
1171
1172/*
1173 * trace_iterator_flags is an enumeration that defines bit
1174 * positions into trace_flags that controls the output.
1175 *
1176 * NOTE: These bits must match the trace_options array in
1177 *       trace.c (this macro guarantees it).
1178 */
1179#define TRACE_FLAGS                                             \
1180                C(PRINT_PARENT,         "print-parent"),        \
1181                C(SYM_OFFSET,           "sym-offset"),          \
1182                C(SYM_ADDR,             "sym-addr"),            \
1183                C(VERBOSE,              "verbose"),             \
1184                C(RAW,                  "raw"),                 \
1185                C(HEX,                  "hex"),                 \
1186                C(BIN,                  "bin"),                 \
1187                C(BLOCK,                "block"),               \
1188                C(PRINTK,               "trace_printk"),        \
1189                C(ANNOTATE,             "annotate"),            \
1190                C(USERSTACKTRACE,       "userstacktrace"),      \
1191                C(SYM_USEROBJ,          "sym-userobj"),         \
1192                C(PRINTK_MSGONLY,       "printk-msg-only"),     \
1193                C(CONTEXT_INFO,         "context-info"),   /* Print pid/cpu/time */ \
1194                C(LATENCY_FMT,          "latency-format"),      \
1195                C(RECORD_CMD,           "record-cmd"),          \
1196                C(RECORD_TGID,          "record-tgid"),         \
1197                C(OVERWRITE,            "overwrite"),           \
1198                C(STOP_ON_FREE,         "disable_on_free"),     \
1199                C(IRQ_INFO,             "irq-info"),            \
1200                C(MARKERS,              "markers"),             \
1201                C(EVENT_FORK,           "event-fork"),          \
1202                C(PAUSE_ON_TRACE,       "pause-on-trace"),      \
1203                C(HASH_PTR,             "hash-ptr"),    /* Print hashed pointer */ \
1204                FUNCTION_FLAGS                                  \
1205                FGRAPH_FLAGS                                    \
1206                STACK_FLAGS                                     \
1207                BRANCH_FLAGS
1208
1209/*
1210 * By defining C, we can make TRACE_FLAGS a list of bit names
1211 * that will define the bits for the flag masks.
1212 */
1213#undef C
1214#define C(a, b) TRACE_ITER_##a##_BIT
1215
1216enum trace_iterator_bits {
1217        TRACE_FLAGS
1218        /* Make sure we don't go more than we have bits for */
1219        TRACE_ITER_LAST_BIT
1220};
1221
1222/*
1223 * By redefining C, we can make TRACE_FLAGS a list of masks that
1224 * use the bits as defined above.
1225 */
1226#undef C
1227#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1228
1229enum trace_iterator_flags { TRACE_FLAGS };
1230
1231/*
1232 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1233 * control the output of kernel symbols.
1234 */
1235#define TRACE_ITER_SYM_MASK \
1236        (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1237
1238extern struct tracer nop_trace;
1239
1240#ifdef CONFIG_BRANCH_TRACER
1241extern int enable_branch_tracing(struct trace_array *tr);
1242extern void disable_branch_tracing(void);
1243static inline int trace_branch_enable(struct trace_array *tr)
1244{
1245        if (tr->trace_flags & TRACE_ITER_BRANCH)
1246                return enable_branch_tracing(tr);
1247        return 0;
1248}
1249static inline void trace_branch_disable(void)
1250{
1251        /* due to races, always disable */
1252        disable_branch_tracing();
1253}
1254#else
1255static inline int trace_branch_enable(struct trace_array *tr)
1256{
1257        return 0;
1258}
1259static inline void trace_branch_disable(void)
1260{
1261}
1262#endif /* CONFIG_BRANCH_TRACER */
1263
1264/* set ring buffers to default size if not already done so */
1265int tracing_update_buffers(void);
1266
1267struct ftrace_event_field {
1268        struct list_head        link;
1269        const char              *name;
1270        const char              *type;
1271        int                     filter_type;
1272        int                     offset;
1273        int                     size;
1274        int                     is_signed;
1275};
1276
1277struct prog_entry;
1278
1279struct event_filter {
1280        struct prog_entry __rcu *prog;
1281        char                    *filter_string;
1282};
1283
1284struct event_subsystem {
1285        struct list_head        list;
1286        const char              *name;
1287        struct event_filter     *filter;
1288        int                     ref_count;
1289};
1290
1291struct trace_subsystem_dir {
1292        struct list_head                list;
1293        struct event_subsystem          *subsystem;
1294        struct trace_array              *tr;
1295        struct dentry                   *entry;
1296        int                             ref_count;
1297        int                             nr_events;
1298};
1299
1300extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1301                                     struct trace_buffer *buffer,
1302                                     struct ring_buffer_event *event);
1303
1304void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1305                                     struct trace_buffer *buffer,
1306                                     struct ring_buffer_event *event,
1307                                     unsigned int trcace_ctx,
1308                                     struct pt_regs *regs);
1309
1310static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1311                                              struct trace_buffer *buffer,
1312                                              struct ring_buffer_event *event,
1313                                              unsigned int trace_ctx)
1314{
1315        trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL);
1316}
1317
1318DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1319DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1320void trace_buffered_event_disable(void);
1321void trace_buffered_event_enable(void);
1322
1323static inline void
1324__trace_event_discard_commit(struct trace_buffer *buffer,
1325                             struct ring_buffer_event *event)
1326{
1327        if (this_cpu_read(trace_buffered_event) == event) {
1328                /* Simply release the temp buffer */
1329                this_cpu_dec(trace_buffered_event_cnt);
1330                return;
1331        }
1332        ring_buffer_discard_commit(buffer, event);
1333}
1334
1335/*
1336 * Helper function for event_trigger_unlock_commit{_regs}().
1337 * If there are event triggers attached to this event that requires
1338 * filtering against its fields, then they will be called as the
1339 * entry already holds the field information of the current event.
1340 *
1341 * It also checks if the event should be discarded or not.
1342 * It is to be discarded if the event is soft disabled and the
1343 * event was only recorded to process triggers, or if the event
1344 * filter is active and this event did not match the filters.
1345 *
1346 * Returns true if the event is discarded, false otherwise.
1347 */
1348static inline bool
1349__event_trigger_test_discard(struct trace_event_file *file,
1350                             struct trace_buffer *buffer,
1351                             struct ring_buffer_event *event,
1352                             void *entry,
1353                             enum event_trigger_type *tt)
1354{
1355        unsigned long eflags = file->flags;
1356
1357        if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1358                *tt = event_triggers_call(file, buffer, entry, event);
1359
1360        if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1361            (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1362             !filter_match_preds(file->filter, entry))) {
1363                __trace_event_discard_commit(buffer, event);
1364                return true;
1365        }
1366
1367        return false;
1368}
1369
1370/**
1371 * event_trigger_unlock_commit - handle triggers and finish event commit
1372 * @file: The file pointer associated with the event
1373 * @buffer: The ring buffer that the event is being written to
1374 * @event: The event meta data in the ring buffer
1375 * @entry: The event itself
1376 * @trace_ctx: The tracing context flags.
1377 *
1378 * This is a helper function to handle triggers that require data
1379 * from the event itself. It also tests the event against filters and
1380 * if the event is soft disabled and should be discarded.
1381 */
1382static inline void
1383event_trigger_unlock_commit(struct trace_event_file *file,
1384                            struct trace_buffer *buffer,
1385                            struct ring_buffer_event *event,
1386                            void *entry, unsigned int trace_ctx)
1387{
1388        enum event_trigger_type tt = ETT_NONE;
1389
1390        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1391                trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx);
1392
1393        if (tt)
1394                event_triggers_post_call(file, tt);
1395}
1396
1397#define FILTER_PRED_INVALID     ((unsigned short)-1)
1398#define FILTER_PRED_IS_RIGHT    (1 << 15)
1399#define FILTER_PRED_FOLD        (1 << 15)
1400
1401/*
1402 * The max preds is the size of unsigned short with
1403 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1404 * and FOLD flags. The other is reserved.
1405 *
1406 * 2^14 preds is way more than enough.
1407 */
1408#define MAX_FILTER_PRED         16384
1409
1410struct filter_pred;
1411struct regex;
1412
1413typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1414
1415typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1416
1417enum regex_type {
1418        MATCH_FULL = 0,
1419        MATCH_FRONT_ONLY,
1420        MATCH_MIDDLE_ONLY,
1421        MATCH_END_ONLY,
1422        MATCH_GLOB,
1423        MATCH_INDEX,
1424};
1425
1426struct regex {
1427        char                    pattern[MAX_FILTER_STR_VAL];
1428        int                     len;
1429        int                     field_len;
1430        regex_match_func        match;
1431};
1432
1433struct filter_pred {
1434        filter_pred_fn_t        fn;
1435        u64                     val;
1436        struct regex            regex;
1437        unsigned short          *ops;
1438        struct ftrace_event_field *field;
1439        int                     offset;
1440        int                     not;
1441        int                     op;
1442};
1443
1444static inline bool is_string_field(struct ftrace_event_field *field)
1445{
1446        return field->filter_type == FILTER_DYN_STRING ||
1447               field->filter_type == FILTER_STATIC_STRING ||
1448               field->filter_type == FILTER_PTR_STRING ||
1449               field->filter_type == FILTER_COMM;
1450}
1451
1452static inline bool is_function_field(struct ftrace_event_field *field)
1453{
1454        return field->filter_type == FILTER_TRACE_FN;
1455}
1456
1457extern enum regex_type
1458filter_parse_regex(char *buff, int len, char **search, int *not);
1459extern void print_event_filter(struct trace_event_file *file,
1460                               struct trace_seq *s);
1461extern int apply_event_filter(struct trace_event_file *file,
1462                              char *filter_string);
1463extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1464                                        char *filter_string);
1465extern void print_subsystem_event_filter(struct event_subsystem *system,
1466                                         struct trace_seq *s);
1467extern int filter_assign_type(const char *type);
1468extern int create_event_filter(struct trace_array *tr,
1469                               struct trace_event_call *call,
1470                               char *filter_str, bool set_str,
1471                               struct event_filter **filterp);
1472extern void free_event_filter(struct event_filter *filter);
1473
1474struct ftrace_event_field *
1475trace_find_event_field(struct trace_event_call *call, char *name);
1476
1477extern void trace_event_enable_cmd_record(bool enable);
1478extern void trace_event_enable_tgid_record(bool enable);
1479
1480extern int event_trace_init(void);
1481extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1482extern int event_trace_del_tracer(struct trace_array *tr);
1483extern void __trace_early_add_events(struct trace_array *tr);
1484
1485extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1486                                                  const char *system,
1487                                                  const char *event);
1488extern struct trace_event_file *find_event_file(struct trace_array *tr,
1489                                                const char *system,
1490                                                const char *event);
1491
1492static inline void *event_file_data(struct file *filp)
1493{
1494        return READ_ONCE(file_inode(filp)->i_private);
1495}
1496
1497extern struct mutex event_mutex;
1498extern struct list_head ftrace_events;
1499
1500extern const struct file_operations event_trigger_fops;
1501extern const struct file_operations event_hist_fops;
1502extern const struct file_operations event_hist_debug_fops;
1503extern const struct file_operations event_inject_fops;
1504
1505#ifdef CONFIG_HIST_TRIGGERS
1506extern int register_trigger_hist_cmd(void);
1507extern int register_trigger_hist_enable_disable_cmds(void);
1508#else
1509static inline int register_trigger_hist_cmd(void) { return 0; }
1510static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1511#endif
1512
1513extern int register_trigger_cmds(void);
1514extern void clear_event_triggers(struct trace_array *tr);
1515
1516enum {
1517        EVENT_TRIGGER_FL_PROBE          = BIT(0),
1518};
1519
1520struct event_trigger_data {
1521        unsigned long                   count;
1522        int                             ref;
1523        int                             flags;
1524        struct event_trigger_ops        *ops;
1525        struct event_command            *cmd_ops;
1526        struct event_filter __rcu       *filter;
1527        char                            *filter_str;
1528        void                            *private_data;
1529        bool                            paused;
1530        bool                            paused_tmp;
1531        struct list_head                list;
1532        char                            *name;
1533        struct list_head                named_list;
1534        struct event_trigger_data       *named_data;
1535};
1536
1537/* Avoid typos */
1538#define ENABLE_EVENT_STR        "enable_event"
1539#define DISABLE_EVENT_STR       "disable_event"
1540#define ENABLE_HIST_STR         "enable_hist"
1541#define DISABLE_HIST_STR        "disable_hist"
1542
1543struct enable_trigger_data {
1544        struct trace_event_file         *file;
1545        bool                            enable;
1546        bool                            hist;
1547};
1548
1549extern int event_enable_trigger_print(struct seq_file *m,
1550                                      struct event_trigger_ops *ops,
1551                                      struct event_trigger_data *data);
1552extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1553                                      struct event_trigger_data *data);
1554extern int event_enable_trigger_func(struct event_command *cmd_ops,
1555                                     struct trace_event_file *file,
1556                                     char *glob, char *cmd, char *param);
1557extern int event_enable_register_trigger(char *glob,
1558                                         struct event_trigger_ops *ops,
1559                                         struct event_trigger_data *data,
1560                                         struct trace_event_file *file);
1561extern void event_enable_unregister_trigger(char *glob,
1562                                            struct event_trigger_ops *ops,
1563                                            struct event_trigger_data *test,
1564                                            struct trace_event_file *file);
1565extern void trigger_data_free(struct event_trigger_data *data);
1566extern int event_trigger_init(struct event_trigger_ops *ops,
1567                              struct event_trigger_data *data);
1568extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1569                                              int trigger_enable);
1570extern void update_cond_flag(struct trace_event_file *file);
1571extern int set_trigger_filter(char *filter_str,
1572                              struct event_trigger_data *trigger_data,
1573                              struct trace_event_file *file);
1574extern struct event_trigger_data *find_named_trigger(const char *name);
1575extern bool is_named_trigger(struct event_trigger_data *test);
1576extern int save_named_trigger(const char *name,
1577                              struct event_trigger_data *data);
1578extern void del_named_trigger(struct event_trigger_data *data);
1579extern void pause_named_trigger(struct event_trigger_data *data);
1580extern void unpause_named_trigger(struct event_trigger_data *data);
1581extern void set_named_trigger_data(struct event_trigger_data *data,
1582                                   struct event_trigger_data *named_data);
1583extern struct event_trigger_data *
1584get_named_trigger_data(struct event_trigger_data *data);
1585extern int register_event_command(struct event_command *cmd);
1586extern int unregister_event_command(struct event_command *cmd);
1587extern int register_trigger_hist_enable_disable_cmds(void);
1588
1589/**
1590 * struct event_trigger_ops - callbacks for trace event triggers
1591 *
1592 * The methods in this structure provide per-event trigger hooks for
1593 * various trigger operations.
1594 *
1595 * All the methods below, except for @init() and @free(), must be
1596 * implemented.
1597 *
1598 * @func: The trigger 'probe' function called when the triggering
1599 *      event occurs.  The data passed into this callback is the data
1600 *      that was supplied to the event_command @reg() function that
1601 *      registered the trigger (see struct event_command) along with
1602 *      the trace record, rec.
1603 *
1604 * @init: An optional initialization function called for the trigger
1605 *      when the trigger is registered (via the event_command reg()
1606 *      function).  This can be used to perform per-trigger
1607 *      initialization such as incrementing a per-trigger reference
1608 *      count, for instance.  This is usually implemented by the
1609 *      generic utility function @event_trigger_init() (see
1610 *      trace_event_triggers.c).
1611 *
1612 * @free: An optional de-initialization function called for the
1613 *      trigger when the trigger is unregistered (via the
1614 *      event_command @reg() function).  This can be used to perform
1615 *      per-trigger de-initialization such as decrementing a
1616 *      per-trigger reference count and freeing corresponding trigger
1617 *      data, for instance.  This is usually implemented by the
1618 *      generic utility function @event_trigger_free() (see
1619 *      trace_event_triggers.c).
1620 *
1621 * @print: The callback function invoked to have the trigger print
1622 *      itself.  This is usually implemented by a wrapper function
1623 *      that calls the generic utility function @event_trigger_print()
1624 *      (see trace_event_triggers.c).
1625 */
1626struct event_trigger_ops {
1627        void                    (*func)(struct event_trigger_data *data,
1628                                        struct trace_buffer *buffer, void *rec,
1629                                        struct ring_buffer_event *rbe);
1630        int                     (*init)(struct event_trigger_ops *ops,
1631                                        struct event_trigger_data *data);
1632        void                    (*free)(struct event_trigger_ops *ops,
1633                                        struct event_trigger_data *data);
1634        int                     (*print)(struct seq_file *m,
1635                                         struct event_trigger_ops *ops,
1636                                         struct event_trigger_data *data);
1637};
1638
1639/**
1640 * struct event_command - callbacks and data members for event commands
1641 *
1642 * Event commands are invoked by users by writing the command name
1643 * into the 'trigger' file associated with a trace event.  The
1644 * parameters associated with a specific invocation of an event
1645 * command are used to create an event trigger instance, which is
1646 * added to the list of trigger instances associated with that trace
1647 * event.  When the event is hit, the set of triggers associated with
1648 * that event is invoked.
1649 *
1650 * The data members in this structure provide per-event command data
1651 * for various event commands.
1652 *
1653 * All the data members below, except for @post_trigger, must be set
1654 * for each event command.
1655 *
1656 * @name: The unique name that identifies the event command.  This is
1657 *      the name used when setting triggers via trigger files.
1658 *
1659 * @trigger_type: A unique id that identifies the event command
1660 *      'type'.  This value has two purposes, the first to ensure that
1661 *      only one trigger of the same type can be set at a given time
1662 *      for a particular event e.g. it doesn't make sense to have both
1663 *      a traceon and traceoff trigger attached to a single event at
1664 *      the same time, so traceon and traceoff have the same type
1665 *      though they have different names.  The @trigger_type value is
1666 *      also used as a bit value for deferring the actual trigger
1667 *      action until after the current event is finished.  Some
1668 *      commands need to do this if they themselves log to the trace
1669 *      buffer (see the @post_trigger() member below).  @trigger_type
1670 *      values are defined by adding new values to the trigger_type
1671 *      enum in include/linux/trace_events.h.
1672 *
1673 * @flags: See the enum event_command_flags below.
1674 *
1675 * All the methods below, except for @set_filter() and @unreg_all(),
1676 * must be implemented.
1677 *
1678 * @func: The callback function responsible for parsing and
1679 *      registering the trigger written to the 'trigger' file by the
1680 *      user.  It allocates the trigger instance and registers it with
1681 *      the appropriate trace event.  It makes use of the other
1682 *      event_command callback functions to orchestrate this, and is
1683 *      usually implemented by the generic utility function
1684 *      @event_trigger_callback() (see trace_event_triggers.c).
1685 *
1686 * @reg: Adds the trigger to the list of triggers associated with the
1687 *      event, and enables the event trigger itself, after
1688 *      initializing it (via the event_trigger_ops @init() function).
1689 *      This is also where commands can use the @trigger_type value to
1690 *      make the decision as to whether or not multiple instances of
1691 *      the trigger should be allowed.  This is usually implemented by
1692 *      the generic utility function @register_trigger() (see
1693 *      trace_event_triggers.c).
1694 *
1695 * @unreg: Removes the trigger from the list of triggers associated
1696 *      with the event, and disables the event trigger itself, after
1697 *      initializing it (via the event_trigger_ops @free() function).
1698 *      This is usually implemented by the generic utility function
1699 *      @unregister_trigger() (see trace_event_triggers.c).
1700 *
1701 * @unreg_all: An optional function called to remove all the triggers
1702 *      from the list of triggers associated with the event.  Called
1703 *      when a trigger file is opened in truncate mode.
1704 *
1705 * @set_filter: An optional function called to parse and set a filter
1706 *      for the trigger.  If no @set_filter() method is set for the
1707 *      event command, filters set by the user for the command will be
1708 *      ignored.  This is usually implemented by the generic utility
1709 *      function @set_trigger_filter() (see trace_event_triggers.c).
1710 *
1711 * @get_trigger_ops: The callback function invoked to retrieve the
1712 *      event_trigger_ops implementation associated with the command.
1713 */
1714struct event_command {
1715        struct list_head        list;
1716        char                    *name;
1717        enum event_trigger_type trigger_type;
1718        int                     flags;
1719        int                     (*func)(struct event_command *cmd_ops,
1720                                        struct trace_event_file *file,
1721                                        char *glob, char *cmd, char *params);
1722        int                     (*reg)(char *glob,
1723                                       struct event_trigger_ops *ops,
1724                                       struct event_trigger_data *data,
1725                                       struct trace_event_file *file);
1726        void                    (*unreg)(char *glob,
1727                                         struct event_trigger_ops *ops,
1728                                         struct event_trigger_data *data,
1729                                         struct trace_event_file *file);
1730        void                    (*unreg_all)(struct trace_event_file *file);
1731        int                     (*set_filter)(char *filter_str,
1732                                              struct event_trigger_data *data,
1733                                              struct trace_event_file *file);
1734        struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1735};
1736
1737/**
1738 * enum event_command_flags - flags for struct event_command
1739 *
1740 * @POST_TRIGGER: A flag that says whether or not this command needs
1741 *      to have its action delayed until after the current event has
1742 *      been closed.  Some triggers need to avoid being invoked while
1743 *      an event is currently in the process of being logged, since
1744 *      the trigger may itself log data into the trace buffer.  Thus
1745 *      we make sure the current event is committed before invoking
1746 *      those triggers.  To do that, the trigger invocation is split
1747 *      in two - the first part checks the filter using the current
1748 *      trace record; if a command has the @post_trigger flag set, it
1749 *      sets a bit for itself in the return value, otherwise it
1750 *      directly invokes the trigger.  Once all commands have been
1751 *      either invoked or set their return flag, the current record is
1752 *      either committed or discarded.  At that point, if any commands
1753 *      have deferred their triggers, those commands are finally
1754 *      invoked following the close of the current event.  In other
1755 *      words, if the event_trigger_ops @func() probe implementation
1756 *      itself logs to the trace buffer, this flag should be set,
1757 *      otherwise it can be left unspecified.
1758 *
1759 * @NEEDS_REC: A flag that says whether or not this command needs
1760 *      access to the trace record in order to perform its function,
1761 *      regardless of whether or not it has a filter associated with
1762 *      it (filters make a trigger require access to the trace record
1763 *      but are not always present).
1764 */
1765enum event_command_flags {
1766        EVENT_CMD_FL_POST_TRIGGER       = 1,
1767        EVENT_CMD_FL_NEEDS_REC          = 2,
1768};
1769
1770static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1771{
1772        return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1773}
1774
1775static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1776{
1777        return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1778}
1779
1780extern int trace_event_enable_disable(struct trace_event_file *file,
1781                                      int enable, int soft_disable);
1782extern int tracing_alloc_snapshot(void);
1783extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1784extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1785
1786extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1787extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1788
1789extern const char *__start___trace_bprintk_fmt[];
1790extern const char *__stop___trace_bprintk_fmt[];
1791
1792extern const char *__start___tracepoint_str[];
1793extern const char *__stop___tracepoint_str[];
1794
1795void trace_printk_control(bool enabled);
1796void trace_printk_start_comm(void);
1797int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1798int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1799
1800/* Used from boot time tracer */
1801extern int trace_set_options(struct trace_array *tr, char *option);
1802extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1803extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1804                                          unsigned long size, int cpu_id);
1805extern int tracing_set_cpumask(struct trace_array *tr,
1806                                cpumask_var_t tracing_cpumask_new);
1807
1808
1809#define MAX_EVENT_NAME_LEN      64
1810
1811extern ssize_t trace_parse_run_command(struct file *file,
1812                const char __user *buffer, size_t count, loff_t *ppos,
1813                int (*createfn)(const char *));
1814
1815extern unsigned int err_pos(char *cmd, const char *str);
1816extern void tracing_log_err(struct trace_array *tr,
1817                            const char *loc, const char *cmd,
1818                            const char **errs, u8 type, u8 pos);
1819
1820/*
1821 * Normal trace_printk() and friends allocates special buffers
1822 * to do the manipulation, as well as saves the print formats
1823 * into sections to display. But the trace infrastructure wants
1824 * to use these without the added overhead at the price of being
1825 * a bit slower (used mainly for warnings, where we don't care
1826 * about performance). The internal_trace_puts() is for such
1827 * a purpose.
1828 */
1829#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1830
1831#undef FTRACE_ENTRY
1832#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)     \
1833        extern struct trace_event_call                                  \
1834        __aligned(4) event_##call;
1835#undef FTRACE_ENTRY_DUP
1836#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \
1837        FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1838#undef FTRACE_ENTRY_PACKED
1839#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
1840        FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
1841
1842#include "trace_entries.h"
1843
1844#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1845int perf_ftrace_event_register(struct trace_event_call *call,
1846                               enum trace_reg type, void *data);
1847#else
1848#define perf_ftrace_event_register NULL
1849#endif
1850
1851#ifdef CONFIG_FTRACE_SYSCALLS
1852void init_ftrace_syscalls(void);
1853const char *get_syscall_name(int syscall);
1854#else
1855static inline void init_ftrace_syscalls(void) { }
1856static inline const char *get_syscall_name(int syscall)
1857{
1858        return NULL;
1859}
1860#endif
1861
1862#ifdef CONFIG_EVENT_TRACING
1863void trace_event_init(void);
1864void trace_event_eval_update(struct trace_eval_map **map, int len);
1865/* Used from boot time tracer */
1866extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
1867extern int trigger_process_regex(struct trace_event_file *file, char *buff);
1868#else
1869static inline void __init trace_event_init(void) { }
1870static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1871#endif
1872
1873#ifdef CONFIG_TRACER_SNAPSHOT
1874void tracing_snapshot_instance(struct trace_array *tr);
1875int tracing_alloc_snapshot_instance(struct trace_array *tr);
1876#else
1877static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1878static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1879{
1880        return 0;
1881}
1882#endif
1883
1884#ifdef CONFIG_PREEMPT_TRACER
1885void tracer_preempt_on(unsigned long a0, unsigned long a1);
1886void tracer_preempt_off(unsigned long a0, unsigned long a1);
1887#else
1888static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1889static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1890#endif
1891#ifdef CONFIG_IRQSOFF_TRACER
1892void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1893void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1894#else
1895static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1896static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1897#endif
1898
1899extern struct trace_iterator *tracepoint_print_iter;
1900
1901/*
1902 * Reset the state of the trace_iterator so that it can read consumed data.
1903 * Normally, the trace_iterator is used for reading the data when it is not
1904 * consumed, and must retain state.
1905 */
1906static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1907{
1908        const size_t offset = offsetof(struct trace_iterator, seq);
1909
1910        /*
1911         * Keep gcc from complaining about overwriting more than just one
1912         * member in the structure.
1913         */
1914        memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1915
1916        iter->pos = -1;
1917}
1918
1919/* Check the name is good for event/group/fields */
1920static inline bool is_good_name(const char *name)
1921{
1922        if (!isalpha(*name) && *name != '_')
1923                return false;
1924        while (*++name != '\0') {
1925                if (!isalpha(*name) && !isdigit(*name) && *name != '_')
1926                        return false;
1927        }
1928        return true;
1929}
1930
1931/* Convert certain expected symbols into '_' when generating event names */
1932static inline void sanitize_event_name(char *name)
1933{
1934        while (*name++ != '\0')
1935                if (*name == ':' || *name == '.')
1936                        *name = '_';
1937}
1938
1939/*
1940 * This is a generic way to read and write a u64 value from a file in tracefs.
1941 *
1942 * The value is stored on the variable pointed by *val. The value needs
1943 * to be at least *min and at most *max. The write is protected by an
1944 * existing *lock.
1945 */
1946struct trace_min_max_param {
1947        struct mutex    *lock;
1948        u64             *val;
1949        u64             *min;
1950        u64             *max;
1951};
1952
1953#define U64_STR_SIZE            24      /* 20 digits max */
1954
1955extern const struct file_operations trace_min_max_fops;
1956
1957#endif /* _LINUX_KERNEL_TRACE_H */
1958