linux/include/linux/ftrace_event.h
<<
>>
Prefs
   1
   2#ifndef _LINUX_FTRACE_EVENT_H
   3#define _LINUX_FTRACE_EVENT_H
   4
   5#include <linux/ring_buffer.h>
   6#include <linux/trace_seq.h>
   7#include <linux/percpu.h>
   8#include <linux/hardirq.h>
   9#include <linux/perf_event.h>
  10#include <linux/tracepoint.h>
  11
  12struct trace_array;
  13struct trace_buffer;
  14struct tracer;
  15struct dentry;
  16
  17struct trace_print_flags {
  18        unsigned long           mask;
  19        const char              *name;
  20};
  21
  22struct trace_print_flags_u64 {
  23        unsigned long long      mask;
  24        const char              *name;
  25};
  26
  27const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  28                                   unsigned long flags,
  29                                   const struct trace_print_flags *flag_array);
  30
  31const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  32                                     const struct trace_print_flags *symbol_array);
  33
  34#if BITS_PER_LONG == 32
  35const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
  36                                         unsigned long long val,
  37                                         const struct trace_print_flags_u64
  38                                                                 *symbol_array);
  39#endif
  40
  41const char *ftrace_print_hex_seq(struct trace_seq *p,
  42                                 const unsigned char *buf, int len);
  43
  44struct trace_iterator;
  45struct trace_event;
  46
  47int ftrace_raw_output_prep(struct trace_iterator *iter,
  48                           struct trace_event *event);
  49
  50/*
  51 * The trace entry - the most basic unit of tracing. This is what
  52 * is printed in the end as a single line in the trace output, such as:
  53 *
  54 *     bash-15816 [01]   235.197585: idle_cpu <- irq_enter
  55 */
  56struct trace_entry {
  57        unsigned short          type;
  58        unsigned char           flags;
  59        unsigned char           preempt_count;
  60        int                     pid;
  61};
  62
  63#define FTRACE_MAX_EVENT                                                \
  64        ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  65
  66/*
  67 * Trace iterator - used by printout routines who present trace
  68 * results to users and which routines might sleep, etc:
  69 */
  70struct trace_iterator {
  71        struct trace_array      *tr;
  72        struct tracer           *trace;
  73        struct trace_buffer     *trace_buffer;
  74        void                    *private;
  75        int                     cpu_file;
  76        struct mutex            mutex;
  77        struct ring_buffer_iter **buffer_iter;
  78        unsigned long           iter_flags;
  79
  80        /* trace_seq for __print_flags() and __print_symbolic() etc. */
  81        struct trace_seq        tmp_seq;
  82
  83        cpumask_var_t           started;
  84
  85        /* it's true when current open file is snapshot */
  86        bool                    snapshot;
  87
  88        /* The below is zeroed out in pipe_read */
  89        struct trace_seq        seq;
  90        struct trace_entry      *ent;
  91        unsigned long           lost_events;
  92        int                     leftover;
  93        int                     ent_size;
  94        int                     cpu;
  95        u64                     ts;
  96
  97        loff_t                  pos;
  98        long                    idx;
  99
 100        /* All new field here will be zeroed out in pipe_read */
 101};
 102
 103enum trace_iter_flags {
 104        TRACE_FILE_LAT_FMT      = 1,
 105        TRACE_FILE_ANNOTATE     = 2,
 106        TRACE_FILE_TIME_IN_NS   = 4,
 107};
 108
 109
 110typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
 111                                      int flags, struct trace_event *event);
 112
 113struct trace_event_functions {
 114        trace_print_func        trace;
 115        trace_print_func        raw;
 116        trace_print_func        hex;
 117        trace_print_func        binary;
 118};
 119
 120struct trace_event {
 121        struct hlist_node               node;
 122        struct list_head                list;
 123        int                             type;
 124        struct trace_event_functions    *funcs;
 125};
 126
 127extern int register_ftrace_event(struct trace_event *event);
 128extern int unregister_ftrace_event(struct trace_event *event);
 129
 130/* Return values for print_line callback */
 131enum print_line_t {
 132        TRACE_TYPE_PARTIAL_LINE = 0,    /* Retry after flushing the seq */
 133        TRACE_TYPE_HANDLED      = 1,
 134        TRACE_TYPE_UNHANDLED    = 2,    /* Relay to other output functions */
 135        TRACE_TYPE_NO_CONSUME   = 3     /* Handled but ask to not consume */
 136};
 137
 138void tracing_generic_entry_update(struct trace_entry *entry,
 139                                  unsigned long flags,
 140                                  int pc);
 141struct ftrace_event_file;
 142
 143struct ring_buffer_event *
 144trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
 145                                struct ftrace_event_file *ftrace_file,
 146                                int type, unsigned long len,
 147                                unsigned long flags, int pc);
 148struct ring_buffer_event *
 149trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
 150                                  int type, unsigned long len,
 151                                  unsigned long flags, int pc);
 152void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
 153                                        struct ring_buffer_event *event,
 154                                        unsigned long flags, int pc);
 155void trace_buffer_unlock_commit(struct ring_buffer *buffer,
 156                                struct ring_buffer_event *event,
 157                                unsigned long flags, int pc);
 158void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
 159                                     struct ring_buffer_event *event,
 160                                     unsigned long flags, int pc,
 161                                     struct pt_regs *regs);
 162void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
 163                                         struct ring_buffer_event *event);
 164
 165void tracing_record_cmdline(struct task_struct *tsk);
 166
 167int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
 168
 169struct event_filter;
 170
 171enum trace_reg {
 172        TRACE_REG_REGISTER,
 173        TRACE_REG_UNREGISTER,
 174#ifdef CONFIG_PERF_EVENTS
 175        TRACE_REG_PERF_REGISTER,
 176        TRACE_REG_PERF_UNREGISTER,
 177        TRACE_REG_PERF_OPEN,
 178        TRACE_REG_PERF_CLOSE,
 179        TRACE_REG_PERF_ADD,
 180        TRACE_REG_PERF_DEL,
 181#endif
 182};
 183
 184struct ftrace_event_call;
 185
 186struct ftrace_event_class {
 187        char                    *system;
 188        void                    *probe;
 189#ifdef CONFIG_PERF_EVENTS
 190        void                    *perf_probe;
 191#endif
 192        int                     (*reg)(struct ftrace_event_call *event,
 193                                       enum trace_reg type, void *data);
 194        int                     (*define_fields)(struct ftrace_event_call *);
 195        struct list_head        *(*get_fields)(struct ftrace_event_call *);
 196        struct list_head        fields;
 197        int                     (*raw_init)(struct ftrace_event_call *);
 198};
 199
 200extern int ftrace_event_reg(struct ftrace_event_call *event,
 201                            enum trace_reg type, void *data);
 202
 203int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
 204                        char *fmt, ...);
 205
 206int ftrace_event_define_field(struct ftrace_event_call *call,
 207                              char *type, int len, char *item, int offset,
 208                              int field_size, int sign, int filter);
 209
 210struct ftrace_event_buffer {
 211        struct ring_buffer              *buffer;
 212        struct ring_buffer_event        *event;
 213        struct ftrace_event_file        *ftrace_file;
 214        void                            *entry;
 215        unsigned long                   flags;
 216        int                             pc;
 217};
 218
 219void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
 220                                  struct ftrace_event_file *ftrace_file,
 221                                  unsigned long len);
 222
 223void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
 224
 225int ftrace_event_define_field(struct ftrace_event_call *call,
 226                              char *type, int len, char *item, int offset,
 227                              int field_size, int sign, int filter);
 228
 229enum {
 230        TRACE_EVENT_FL_FILTERED_BIT,
 231        TRACE_EVENT_FL_CAP_ANY_BIT,
 232        TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 233        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
 234        TRACE_EVENT_FL_WAS_ENABLED_BIT,
 235        TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
 236        TRACE_EVENT_FL_TRACEPOINT_BIT,
 237};
 238
 239/*
 240 * Event flags:
 241 *  FILTERED      - The event has a filter attached
 242 *  CAP_ANY       - Any user can enable for perf
 243 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 244 *  IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
 245 *  WAS_ENABLED   - Set and stays set when an event was ever enabled
 246 *                    (used for module unloading, if a module event is enabled,
 247 *                     it is best to clear the buffers that used it).
 248 *  USE_CALL_FILTER - For ftrace internal events, don't use file filter
 249 *  TRACEPOINT    - Event is a tracepoint
 250 */
 251enum {
 252        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
 253        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
 254        TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 255        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
 256        TRACE_EVENT_FL_WAS_ENABLED      = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
 257        TRACE_EVENT_FL_USE_CALL_FILTER  = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
 258        TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 259};
 260
 261struct ftrace_event_call {
 262        struct list_head        list;
 263        struct ftrace_event_class *class;
 264        union {
 265                char                    *name;
 266                /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
 267                struct tracepoint       *tp;
 268        };
 269        struct trace_event      event;
 270        const char              *print_fmt;
 271        struct event_filter     *filter;
 272        struct list_head        *files;
 273        void                    *mod;
 274        void                    *data;
 275        /*
 276         *   bit 0:             filter_active
 277         *   bit 1:             allow trace by non root (cap any)
 278         *   bit 2:             failed to apply filter
 279         *   bit 3:             ftrace internal event (do not enable)
 280         *   bit 4:             Event was enabled by module
 281         *   bit 5:             use call filter rather than file filter
 282         *   bit 6:             Event is a tracepoint
 283         */
 284        int                     flags; /* static flags of different events */
 285
 286#ifdef CONFIG_PERF_EVENTS
 287        int                             perf_refcount;
 288        struct hlist_head __percpu      *perf_events;
 289
 290        int     (*perf_perm)(struct ftrace_event_call *,
 291                             struct perf_event *);
 292#endif
 293};
 294
 295static inline const char *
 296ftrace_event_name(struct ftrace_event_call *call)
 297{
 298        if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
 299                return call->tp ? call->tp->name : NULL;
 300        else
 301                return call->name;
 302}
 303
 304struct trace_array;
 305struct ftrace_subsystem_dir;
 306
 307enum {
 308        FTRACE_EVENT_FL_ENABLED_BIT,
 309        FTRACE_EVENT_FL_RECORDED_CMD_BIT,
 310        FTRACE_EVENT_FL_FILTERED_BIT,
 311        FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
 312        FTRACE_EVENT_FL_SOFT_MODE_BIT,
 313        FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
 314        FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
 315        FTRACE_EVENT_FL_TRIGGER_COND_BIT,
 316};
 317
 318/*
 319 * Ftrace event file flags:
 320 *  ENABLED       - The event is enabled
 321 *  RECORDED_CMD  - The comms should be recorded at sched_switch
 322 *  FILTERED      - The event has a filter attached
 323 *  NO_SET_FILTER - Set when filter has error and is to be ignored
 324 *  SOFT_MODE     - The event is enabled/disabled by SOFT_DISABLED
 325 *  SOFT_DISABLED - When set, do not trace the event (even though its
 326 *                   tracepoint may be enabled)
 327 *  TRIGGER_MODE  - When set, invoke the triggers associated with the event
 328 *  TRIGGER_COND  - When set, one or more triggers has an associated filter
 329 */
 330enum {
 331        FTRACE_EVENT_FL_ENABLED         = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
 332        FTRACE_EVENT_FL_RECORDED_CMD    = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
 333        FTRACE_EVENT_FL_FILTERED        = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
 334        FTRACE_EVENT_FL_NO_SET_FILTER   = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
 335        FTRACE_EVENT_FL_SOFT_MODE       = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
 336        FTRACE_EVENT_FL_SOFT_DISABLED   = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
 337        FTRACE_EVENT_FL_TRIGGER_MODE    = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
 338        FTRACE_EVENT_FL_TRIGGER_COND    = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
 339};
 340
 341struct ftrace_event_file {
 342        struct list_head                list;
 343        struct ftrace_event_call        *event_call;
 344        struct event_filter             *filter;
 345        struct dentry                   *dir;
 346        struct trace_array              *tr;
 347        struct ftrace_subsystem_dir     *system;
 348        struct list_head                triggers;
 349
 350        /*
 351         * 32 bit flags:
 352         *   bit 0:             enabled
 353         *   bit 1:             enabled cmd record
 354         *   bit 2:             enable/disable with the soft disable bit
 355         *   bit 3:             soft disabled
 356         *   bit 4:             trigger enabled
 357         *
 358         * Note: The bits must be set atomically to prevent races
 359         * from other writers. Reads of flags do not need to be in
 360         * sync as they occur in critical sections. But the way flags
 361         * is currently used, these changes do not affect the code
 362         * except that when a change is made, it may have a slight
 363         * delay in propagating the changes to other CPUs due to
 364         * caching and such. Which is mostly OK ;-)
 365         */
 366        unsigned long           flags;
 367        atomic_t                sm_ref; /* soft-mode reference counter */
 368        atomic_t                tm_ref; /* trigger-mode reference counter */
 369};
 370
 371#define __TRACE_EVENT_FLAGS(name, value)                                \
 372        static int __init trace_init_flags_##name(void)                 \
 373        {                                                               \
 374                event_##name.flags |= value;                            \
 375                return 0;                                               \
 376        }                                                               \
 377        early_initcall(trace_init_flags_##name);
 378
 379#define __TRACE_EVENT_PERF_PERM(name, expr...)                          \
 380        static int perf_perm_##name(struct ftrace_event_call *tp_event, \
 381                                    struct perf_event *p_event)         \
 382        {                                                               \
 383                return ({ expr; });                                     \
 384        }                                                               \
 385        static int __init trace_init_perf_perm_##name(void)             \
 386        {                                                               \
 387                event_##name.perf_perm = &perf_perm_##name;             \
 388                return 0;                                               \
 389        }                                                               \
 390        early_initcall(trace_init_perf_perm_##name);
 391
 392#define PERF_MAX_TRACE_SIZE     2048
 393
 394#define MAX_FILTER_STR_VAL      256     /* Should handle KSYM_SYMBOL_LEN */
 395
 396enum event_trigger_type {
 397        ETT_NONE                = (0),
 398        ETT_TRACE_ONOFF         = (1 << 0),
 399        ETT_SNAPSHOT            = (1 << 1),
 400        ETT_STACKTRACE          = (1 << 2),
 401        ETT_EVENT_ENABLE        = (1 << 3),
 402};
 403
 404extern void destroy_preds(struct ftrace_event_file *file);
 405extern void destroy_call_preds(struct ftrace_event_call *call);
 406extern int filter_match_preds(struct event_filter *filter, void *rec);
 407
 408extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
 409                                struct ring_buffer *buffer,
 410                                struct ring_buffer_event *event);
 411extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
 412                                     struct ring_buffer *buffer,
 413                                     struct ring_buffer_event *event);
 414extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
 415                                                   void *rec);
 416extern void event_triggers_post_call(struct ftrace_event_file *file,
 417                                     enum event_trigger_type tt);
 418
 419/**
 420 * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
 421 * @file: The file pointer of the event to test
 422 *
 423 * If any triggers without filters are attached to this event, they
 424 * will be called here. If the event is soft disabled and has no
 425 * triggers that require testing the fields, it will return true,
 426 * otherwise false.
 427 */
 428static inline bool
 429ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
 430{
 431        unsigned long eflags = file->flags;
 432
 433        if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
 434                if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
 435                        event_triggers_call(file, NULL);
 436                if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
 437                        return true;
 438        }
 439        return false;
 440}
 441
 442/*
 443 * Helper function for event_trigger_unlock_commit{_regs}().
 444 * If there are event triggers attached to this event that requires
 445 * filtering against its fields, then they wil be called as the
 446 * entry already holds the field information of the current event.
 447 *
 448 * It also checks if the event should be discarded or not.
 449 * It is to be discarded if the event is soft disabled and the
 450 * event was only recorded to process triggers, or if the event
 451 * filter is active and this event did not match the filters.
 452 *
 453 * Returns true if the event is discarded, false otherwise.
 454 */
 455static inline bool
 456__event_trigger_test_discard(struct ftrace_event_file *file,
 457                             struct ring_buffer *buffer,
 458                             struct ring_buffer_event *event,
 459                             void *entry,
 460                             enum event_trigger_type *tt)
 461{
 462        unsigned long eflags = file->flags;
 463
 464        if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
 465                *tt = event_triggers_call(file, entry);
 466
 467        if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
 468                ring_buffer_discard_commit(buffer, event);
 469        else if (!filter_check_discard(file, entry, buffer, event))
 470                return false;
 471
 472        return true;
 473}
 474
 475/**
 476 * event_trigger_unlock_commit - handle triggers and finish event commit
 477 * @file: The file pointer assoctiated to the event
 478 * @buffer: The ring buffer that the event is being written to
 479 * @event: The event meta data in the ring buffer
 480 * @entry: The event itself
 481 * @irq_flags: The state of the interrupts at the start of the event
 482 * @pc: The state of the preempt count at the start of the event.
 483 *
 484 * This is a helper function to handle triggers that require data
 485 * from the event itself. It also tests the event against filters and
 486 * if the event is soft disabled and should be discarded.
 487 */
 488static inline void
 489event_trigger_unlock_commit(struct ftrace_event_file *file,
 490                            struct ring_buffer *buffer,
 491                            struct ring_buffer_event *event,
 492                            void *entry, unsigned long irq_flags, int pc)
 493{
 494        enum event_trigger_type tt = ETT_NONE;
 495
 496        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
 497                trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
 498
 499        if (tt)
 500                event_triggers_post_call(file, tt);
 501}
 502
 503/**
 504 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
 505 * @file: The file pointer assoctiated to the event
 506 * @buffer: The ring buffer that the event is being written to
 507 * @event: The event meta data in the ring buffer
 508 * @entry: The event itself
 509 * @irq_flags: The state of the interrupts at the start of the event
 510 * @pc: The state of the preempt count at the start of the event.
 511 *
 512 * This is a helper function to handle triggers that require data
 513 * from the event itself. It also tests the event against filters and
 514 * if the event is soft disabled and should be discarded.
 515 *
 516 * Same as event_trigger_unlock_commit() but calls
 517 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
 518 */
 519static inline void
 520event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
 521                                 struct ring_buffer *buffer,
 522                                 struct ring_buffer_event *event,
 523                                 void *entry, unsigned long irq_flags, int pc,
 524                                 struct pt_regs *regs)
 525{
 526        enum event_trigger_type tt = ETT_NONE;
 527
 528        if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
 529                trace_buffer_unlock_commit_regs(buffer, event,
 530                                                irq_flags, pc, regs);
 531
 532        if (tt)
 533                event_triggers_post_call(file, tt);
 534}
 535
 536enum {
 537        FILTER_OTHER = 0,
 538        FILTER_STATIC_STRING,
 539        FILTER_DYN_STRING,
 540        FILTER_PTR_STRING,
 541        FILTER_TRACE_FN,
 542};
 543
 544extern int trace_event_raw_init(struct ftrace_event_call *call);
 545extern int trace_define_field(struct ftrace_event_call *call, const char *type,
 546                              const char *name, int offset, int size,
 547                              int is_signed, int filter_type);
 548extern int trace_add_event_call(struct ftrace_event_call *call);
 549extern int trace_remove_event_call(struct ftrace_event_call *call);
 550
 551#define is_signed_type(type)    (((type)(-1)) < (type)1)
 552
 553int trace_set_clr_event(const char *system, const char *event, int set);
 554
 555/*
 556 * The double __builtin_constant_p is because gcc will give us an error
 557 * if we try to allocate the static variable to fmt if it is not a
 558 * constant. Even with the outer if statement optimizing out.
 559 */
 560#define event_trace_printk(ip, fmt, args...)                            \
 561do {                                                                    \
 562        __trace_printk_check_format(fmt, ##args);                       \
 563        tracing_record_cmdline(current);                                \
 564        if (__builtin_constant_p(fmt)) {                                \
 565                static const char *trace_printk_fmt                     \
 566                  __attribute__((section("__trace_printk_fmt"))) =      \
 567                        __builtin_constant_p(fmt) ? fmt : NULL;         \
 568                                                                        \
 569                __trace_bprintk(ip, trace_printk_fmt, ##args);          \
 570        } else                                                          \
 571                __trace_printk(ip, fmt, ##args);                        \
 572} while (0)
 573
 574/**
 575 * tracepoint_string - register constant persistent string to trace system
 576 * @str - a constant persistent string that will be referenced in tracepoints
 577 *
 578 * If constant strings are being used in tracepoints, it is faster and
 579 * more efficient to just save the pointer to the string and reference
 580 * that with a printf "%s" instead of saving the string in the ring buffer
 581 * and wasting space and time.
 582 *
 583 * The problem with the above approach is that userspace tools that read
 584 * the binary output of the trace buffers do not have access to the string.
 585 * Instead they just show the address of the string which is not very
 586 * useful to users.
 587 *
 588 * With tracepoint_string(), the string will be registered to the tracing
 589 * system and exported to userspace via the debugfs/tracing/printk_formats
 590 * file that maps the string address to the string text. This way userspace
 591 * tools that read the binary buffers have a way to map the pointers to
 592 * the ASCII strings they represent.
 593 *
 594 * The @str used must be a constant string and persistent as it would not
 595 * make sense to show a string that no longer exists. But it is still fine
 596 * to be used with modules, because when modules are unloaded, if they
 597 * had tracepoints, the ring buffers are cleared too. As long as the string
 598 * does not change during the life of the module, it is fine to use
 599 * tracepoint_string() within a module.
 600 */
 601#define tracepoint_string(str)                                          \
 602        ({                                                              \
 603                static const char *___tp_str __tracepoint_string = str; \
 604                ___tp_str;                                              \
 605        })
 606#define __tracepoint_string     __attribute__((section("__tracepoint_str")))
 607
 608#ifdef CONFIG_PERF_EVENTS
 609struct perf_event;
 610
 611DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
 612
 613extern int  perf_trace_init(struct perf_event *event);
 614extern void perf_trace_destroy(struct perf_event *event);
 615extern int  perf_trace_add(struct perf_event *event, int flags);
 616extern void perf_trace_del(struct perf_event *event, int flags);
 617extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
 618                                     char *filter_str);
 619extern void ftrace_profile_free_filter(struct perf_event *event);
 620extern void *perf_trace_buf_prepare(int size, unsigned short type,
 621                                    struct pt_regs *regs, int *rctxp);
 622
 623static inline void
 624perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
 625                       u64 count, struct pt_regs *regs, void *head,
 626                       struct task_struct *task)
 627{
 628        perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
 629}
 630#endif
 631
 632#endif /* _LINUX_FTRACE_EVENT_H */
 633