linux/include/linux/ftrace.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Ftrace header.  For implementation details beyond the random comments
   4 * scattered below, see: Documentation/trace/ftrace-design.rst
   5 */
   6
   7#ifndef _LINUX_FTRACE_H
   8#define _LINUX_FTRACE_H
   9
  10#include <linux/trace_clock.h>
  11#include <linux/kallsyms.h>
  12#include <linux/linkage.h>
  13#include <linux/bitops.h>
  14#include <linux/ptrace.h>
  15#include <linux/ktime.h>
  16#include <linux/sched.h>
  17#include <linux/types.h>
  18#include <linux/init.h>
  19#include <linux/fs.h>
  20
  21#include <asm/ftrace.h>
  22
  23/*
  24 * If the arch supports passing the variable contents of
  25 * function_trace_op as the third parameter back from the
  26 * mcount call, then the arch should define this as 1.
  27 */
  28#ifndef ARCH_SUPPORTS_FTRACE_OPS
  29#define ARCH_SUPPORTS_FTRACE_OPS 0
  30#endif
  31
  32/*
  33 * If the arch's mcount caller does not support all of ftrace's
  34 * features, then it must call an indirect function that
  35 * does. Or at least does enough to prevent any unwelcomed side effects.
  36 */
  37#if !ARCH_SUPPORTS_FTRACE_OPS
  38# define FTRACE_FORCE_LIST_FUNC 1
  39#else
  40# define FTRACE_FORCE_LIST_FUNC 0
  41#endif
  42
  43/* Main tracing buffer and events set up */
  44#ifdef CONFIG_TRACING
  45void trace_init(void);
  46void early_trace_init(void);
  47#else
  48static inline void trace_init(void) { }
  49static inline void early_trace_init(void) { }
  50#endif
  51
  52struct module;
  53struct ftrace_hash;
  54
  55#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
  56        defined(CONFIG_DYNAMIC_FTRACE)
  57const char *
  58ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  59                   unsigned long *off, char **modname, char *sym);
  60int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  61                           char *type, char *name,
  62                           char *module_name, int *exported);
  63#else
  64static inline const char *
  65ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  66                   unsigned long *off, char **modname, char *sym)
  67{
  68        return NULL;
  69}
  70static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  71                                         char *type, char *name,
  72                                         char *module_name, int *exported)
  73{
  74        return -1;
  75}
  76#endif
  77
  78
  79#ifdef CONFIG_FUNCTION_TRACER
  80
  81extern int ftrace_enabled;
  82extern int
  83ftrace_enable_sysctl(struct ctl_table *table, int write,
  84                     void __user *buffer, size_t *lenp,
  85                     loff_t *ppos);
  86
  87struct ftrace_ops;
  88
  89typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  90                              struct ftrace_ops *op, struct pt_regs *regs);
  91
  92ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  93
  94/*
  95 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  96 * set in the flags member.
  97 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
  98 * IPMODIFY are a kind of attribute flags which can be set only before
  99 * registering the ftrace_ops, and can not be modified while registered.
 100 * Changing those attribute flags after registering ftrace_ops will
 101 * cause unexpected results.
 102 *
 103 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 104 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 105 *           allocated ftrace_ops which need special care
 106 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 107 *            and passed to the callback. If this flag is set, but the
 108 *            architecture does not support passing regs
 109 *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
 110 *            ftrace_ops will fail to register, unless the next flag
 111 *            is set.
 112 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 113 *            handler can handle an arch that does not save regs
 114 *            (the handler tests if regs == NULL), then it can set
 115 *            this flag instead. It will not fail registering the ftrace_ops
 116 *            but, the regs field will be NULL if the arch does not support
 117 *            passing regs to the handler.
 118 *            Note, if this flag is set, the SAVE_REGS flag will automatically
 119 *            get set upon registering the ftrace_ops, if the arch supports it.
 120 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 121 *            that the call back has its own recursion protection. If it does
 122 *            not set this, then the ftrace infrastructure will add recursion
 123 *            protection for the caller.
 124 * STUB   - The ftrace_ops is just a place holder.
 125 * INITIALIZED - The ftrace_ops has already been initialized (first use time
 126 *            register_ftrace_function() is called, it will initialized the ops)
 127 * DELETED - The ops are being deleted, do not let them be registered again.
 128 * ADDING  - The ops is in the process of being added.
 129 * REMOVING - The ops is in the process of being removed.
 130 * MODIFYING - The ops is in the process of changing its filter functions.
 131 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
 132 *            The arch specific code sets this flag when it allocated a
 133 *            trampoline. This lets the arch know that it can update the
 134 *            trampoline in case the callback function changes.
 135 *            The ftrace_ops trampoline can be set by the ftrace users, and
 136 *            in such cases the arch must not modify it. Only the arch ftrace
 137 *            core code should set this flag.
 138 * IPMODIFY - The ops can modify the IP register. This can only be set with
 139 *            SAVE_REGS. If another ops with this flag set is already registered
 140 *            for any of the functions that this ops will be registered for, then
 141 *            this ops will fail to register or set_filter_ip.
 142 * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
 143 * RCU     - Set when the ops can only be called when RCU is watching.
 144 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
 145 */
 146enum {
 147        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
 148        FTRACE_OPS_FL_DYNAMIC                   = 1 << 1,
 149        FTRACE_OPS_FL_SAVE_REGS                 = 1 << 2,
 150        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 3,
 151        FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 4,
 152        FTRACE_OPS_FL_STUB                      = 1 << 5,
 153        FTRACE_OPS_FL_INITIALIZED               = 1 << 6,
 154        FTRACE_OPS_FL_DELETED                   = 1 << 7,
 155        FTRACE_OPS_FL_ADDING                    = 1 << 8,
 156        FTRACE_OPS_FL_REMOVING                  = 1 << 9,
 157        FTRACE_OPS_FL_MODIFYING                 = 1 << 10,
 158        FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 11,
 159        FTRACE_OPS_FL_IPMODIFY                  = 1 << 12,
 160        FTRACE_OPS_FL_PID                       = 1 << 13,
 161        FTRACE_OPS_FL_RCU                       = 1 << 14,
 162        FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 15,
 163};
 164
 165#ifdef CONFIG_DYNAMIC_FTRACE
 166/* The hash used to know what functions callbacks trace */
 167struct ftrace_ops_hash {
 168        struct ftrace_hash __rcu        *notrace_hash;
 169        struct ftrace_hash __rcu        *filter_hash;
 170        struct mutex                    regex_lock;
 171};
 172
 173void ftrace_free_init_mem(void);
 174void ftrace_free_mem(struct module *mod, void *start, void *end);
 175#else
 176static inline void ftrace_free_init_mem(void) { }
 177static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 178#endif
 179
 180/*
 181 * Note, ftrace_ops can be referenced outside of RCU protection, unless
 182 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
 183 * core data, the unregistering of it will perform a scheduling on all CPUs
 184 * to make sure that there are no more users. Depending on the load of the
 185 * system that may take a bit of time.
 186 *
 187 * Any private data added must also take care not to be freed and if private
 188 * data is added to a ftrace_ops that is in core code, the user of the
 189 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
 190 */
 191struct ftrace_ops {
 192        ftrace_func_t                   func;
 193        struct ftrace_ops __rcu         *next;
 194        unsigned long                   flags;
 195        void                            *private;
 196        ftrace_func_t                   saved_func;
 197#ifdef CONFIG_DYNAMIC_FTRACE
 198        struct ftrace_ops_hash          local_hash;
 199        struct ftrace_ops_hash          *func_hash;
 200        struct ftrace_ops_hash          old_hash;
 201        unsigned long                   trampoline;
 202        unsigned long                   trampoline_size;
 203#endif
 204};
 205
 206/*
 207 * Type of the current tracing.
 208 */
 209enum ftrace_tracing_type_t {
 210        FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
 211        FTRACE_TYPE_RETURN,     /* Hook the return of the function */
 212};
 213
 214/* Current tracing type, default is FTRACE_TYPE_ENTER */
 215extern enum ftrace_tracing_type_t ftrace_tracing_type;
 216
 217/*
 218 * The ftrace_ops must be a static and should also
 219 * be read_mostly.  These functions do modify read_mostly variables
 220 * so use them sparely. Never free an ftrace_op or modify the
 221 * next pointer after it has been registered. Even after unregistering
 222 * it, the next pointer may still be used internally.
 223 */
 224int register_ftrace_function(struct ftrace_ops *ops);
 225int unregister_ftrace_function(struct ftrace_ops *ops);
 226
 227extern void ftrace_stub(unsigned long a0, unsigned long a1,
 228                        struct ftrace_ops *op, struct pt_regs *regs);
 229
 230#else /* !CONFIG_FUNCTION_TRACER */
 231/*
 232 * (un)register_ftrace_function must be a macro since the ops parameter
 233 * must not be evaluated.
 234 */
 235#define register_ftrace_function(ops) ({ 0; })
 236#define unregister_ftrace_function(ops) ({ 0; })
 237static inline void ftrace_kill(void) { }
 238static inline void ftrace_free_init_mem(void) { }
 239static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 240#endif /* CONFIG_FUNCTION_TRACER */
 241
 242#ifdef CONFIG_STACK_TRACER
 243
 244#define STACK_TRACE_ENTRIES 500
 245
 246struct stack_trace;
 247
 248extern unsigned stack_trace_index[];
 249extern struct stack_trace stack_trace_max;
 250extern unsigned long stack_trace_max_size;
 251extern arch_spinlock_t stack_trace_max_lock;
 252
 253extern int stack_tracer_enabled;
 254void stack_trace_print(void);
 255int
 256stack_trace_sysctl(struct ctl_table *table, int write,
 257                   void __user *buffer, size_t *lenp,
 258                   loff_t *ppos);
 259
 260/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
 261DECLARE_PER_CPU(int, disable_stack_tracer);
 262
 263/**
 264 * stack_tracer_disable - temporarily disable the stack tracer
 265 *
 266 * There's a few locations (namely in RCU) where stack tracing
 267 * cannot be executed. This function is used to disable stack
 268 * tracing during those critical sections.
 269 *
 270 * This function must be called with preemption or interrupts
 271 * disabled and stack_tracer_enable() must be called shortly after
 272 * while preemption or interrupts are still disabled.
 273 */
 274static inline void stack_tracer_disable(void)
 275{
 276        /* Preemption or interupts must be disabled */
 277        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 278                WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 279        this_cpu_inc(disable_stack_tracer);
 280}
 281
 282/**
 283 * stack_tracer_enable - re-enable the stack tracer
 284 *
 285 * After stack_tracer_disable() is called, stack_tracer_enable()
 286 * must be called shortly afterward.
 287 */
 288static inline void stack_tracer_enable(void)
 289{
 290        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 291                WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 292        this_cpu_dec(disable_stack_tracer);
 293}
 294#else
 295static inline void stack_tracer_disable(void) { }
 296static inline void stack_tracer_enable(void) { }
 297#endif
 298
 299#ifdef CONFIG_DYNAMIC_FTRACE
 300
 301int ftrace_arch_code_modify_prepare(void);
 302int ftrace_arch_code_modify_post_process(void);
 303
 304struct dyn_ftrace;
 305
 306enum ftrace_bug_type {
 307        FTRACE_BUG_UNKNOWN,
 308        FTRACE_BUG_INIT,
 309        FTRACE_BUG_NOP,
 310        FTRACE_BUG_CALL,
 311        FTRACE_BUG_UPDATE,
 312};
 313extern enum ftrace_bug_type ftrace_bug_type;
 314
 315/*
 316 * Archs can set this to point to a variable that holds the value that was
 317 * expected at the call site before calling ftrace_bug().
 318 */
 319extern const void *ftrace_expected;
 320
 321void ftrace_bug(int err, struct dyn_ftrace *rec);
 322
 323struct seq_file;
 324
 325extern int ftrace_text_reserved(const void *start, const void *end);
 326
 327struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
 328
 329bool is_ftrace_trampoline(unsigned long addr);
 330
 331/*
 332 * The dyn_ftrace record's flags field is split into two parts.
 333 * the first part which is '0-FTRACE_REF_MAX' is a counter of
 334 * the number of callbacks that have registered the function that
 335 * the dyn_ftrace descriptor represents.
 336 *
 337 * The second part is a mask:
 338 *  ENABLED - the function is being traced
 339 *  REGS    - the record wants the function to save regs
 340 *  REGS_EN - the function is set up to save regs.
 341 *  IPMODIFY - the record allows for the IP address to be changed.
 342 *  DISABLED - the record is not ready to be touched yet
 343 *
 344 * When a new ftrace_ops is registered and wants a function to save
 345 * pt_regs, the rec->flag REGS is set. When the function has been
 346 * set up to save regs, the REG_EN flag is set. Once a function
 347 * starts saving regs it will do so until all ftrace_ops are removed
 348 * from tracing that function.
 349 */
 350enum {
 351        FTRACE_FL_ENABLED       = (1UL << 31),
 352        FTRACE_FL_REGS          = (1UL << 30),
 353        FTRACE_FL_REGS_EN       = (1UL << 29),
 354        FTRACE_FL_TRAMP         = (1UL << 28),
 355        FTRACE_FL_TRAMP_EN      = (1UL << 27),
 356        FTRACE_FL_IPMODIFY      = (1UL << 26),
 357        FTRACE_FL_DISABLED      = (1UL << 25),
 358};
 359
 360#define FTRACE_REF_MAX_SHIFT    25
 361#define FTRACE_FL_BITS          7
 362#define FTRACE_FL_MASKED_BITS   ((1UL << FTRACE_FL_BITS) - 1)
 363#define FTRACE_FL_MASK          (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
 364#define FTRACE_REF_MAX          ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
 365
 366#define ftrace_rec_count(rec)   ((rec)->flags & ~FTRACE_FL_MASK)
 367
 368struct dyn_ftrace {
 369        unsigned long           ip; /* address of mcount call-site */
 370        unsigned long           flags;
 371        struct dyn_arch_ftrace  arch;
 372};
 373
 374int ftrace_force_update(void);
 375int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 376                         int remove, int reset);
 377int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
 378                       int len, int reset);
 379int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 380                        int len, int reset);
 381void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 382void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
 383void ftrace_free_filter(struct ftrace_ops *ops);
 384void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
 385
 386enum {
 387        FTRACE_UPDATE_CALLS             = (1 << 0),
 388        FTRACE_DISABLE_CALLS            = (1 << 1),
 389        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
 390        FTRACE_START_FUNC_RET           = (1 << 3),
 391        FTRACE_STOP_FUNC_RET            = (1 << 4),
 392        FTRACE_MAY_SLEEP                = (1 << 5),
 393};
 394
 395/*
 396 * The FTRACE_UPDATE_* enum is used to pass information back
 397 * from the ftrace_update_record() and ftrace_test_record()
 398 * functions. These are called by the code update routines
 399 * to find out what is to be done for a given function.
 400 *
 401 *  IGNORE           - The function is already what we want it to be
 402 *  MAKE_CALL        - Start tracing the function
 403 *  MODIFY_CALL      - Stop saving regs for the function
 404 *  MAKE_NOP         - Stop tracing the function
 405 */
 406enum {
 407        FTRACE_UPDATE_IGNORE,
 408        FTRACE_UPDATE_MAKE_CALL,
 409        FTRACE_UPDATE_MODIFY_CALL,
 410        FTRACE_UPDATE_MAKE_NOP,
 411};
 412
 413enum {
 414        FTRACE_ITER_FILTER      = (1 << 0),
 415        FTRACE_ITER_NOTRACE     = (1 << 1),
 416        FTRACE_ITER_PRINTALL    = (1 << 2),
 417        FTRACE_ITER_DO_PROBES   = (1 << 3),
 418        FTRACE_ITER_PROBE       = (1 << 4),
 419        FTRACE_ITER_MOD         = (1 << 5),
 420        FTRACE_ITER_ENABLED     = (1 << 6),
 421};
 422
 423void arch_ftrace_update_code(int command);
 424void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
 425void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
 426void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
 427
 428struct ftrace_rec_iter;
 429
 430struct ftrace_rec_iter *ftrace_rec_iter_start(void);
 431struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
 432struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
 433
 434#define for_ftrace_rec_iter(iter)               \
 435        for (iter = ftrace_rec_iter_start();    \
 436             iter;                              \
 437             iter = ftrace_rec_iter_next(iter))
 438
 439
 440int ftrace_update_record(struct dyn_ftrace *rec, int enable);
 441int ftrace_test_record(struct dyn_ftrace *rec, int enable);
 442void ftrace_run_stop_machine(int command);
 443unsigned long ftrace_location(unsigned long ip);
 444unsigned long ftrace_location_range(unsigned long start, unsigned long end);
 445unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
 446unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
 447
 448extern ftrace_func_t ftrace_trace_function;
 449
 450int ftrace_regex_open(struct ftrace_ops *ops, int flag,
 451                  struct inode *inode, struct file *file);
 452ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 453                            size_t cnt, loff_t *ppos);
 454ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 455                             size_t cnt, loff_t *ppos);
 456int ftrace_regex_release(struct inode *inode, struct file *file);
 457
 458void __init
 459ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
 460
 461/* defined in arch */
 462extern int ftrace_ip_converted(unsigned long ip);
 463extern int ftrace_dyn_arch_init(void);
 464extern void ftrace_replace_code(int enable);
 465extern int ftrace_update_ftrace_func(ftrace_func_t func);
 466extern void ftrace_caller(void);
 467extern void ftrace_regs_caller(void);
 468extern void ftrace_call(void);
 469extern void ftrace_regs_call(void);
 470extern void mcount_call(void);
 471
 472void ftrace_modify_all_code(int command);
 473
 474#ifndef FTRACE_ADDR
 475#define FTRACE_ADDR ((unsigned long)ftrace_caller)
 476#endif
 477
 478#ifndef FTRACE_GRAPH_ADDR
 479#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
 480#endif
 481
 482#ifndef FTRACE_REGS_ADDR
 483#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 484# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
 485#else
 486# define FTRACE_REGS_ADDR FTRACE_ADDR
 487#endif
 488#endif
 489
 490/*
 491 * If an arch would like functions that are only traced
 492 * by the function graph tracer to jump directly to its own
 493 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
 494 * to be that address to jump to.
 495 */
 496#ifndef FTRACE_GRAPH_TRAMP_ADDR
 497#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
 498#endif
 499
 500#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 501extern void ftrace_graph_caller(void);
 502extern int ftrace_enable_ftrace_graph_caller(void);
 503extern int ftrace_disable_ftrace_graph_caller(void);
 504#else
 505static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
 506static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 507#endif
 508
 509/**
 510 * ftrace_make_nop - convert code into nop
 511 * @mod: module structure if called by module load initialization
 512 * @rec: the mcount call site record
 513 * @addr: the address that the call site should be calling
 514 *
 515 * This is a very sensitive operation and great care needs
 516 * to be taken by the arch.  The operation should carefully
 517 * read the location, check to see if what is read is indeed
 518 * what we expect it to be, and then on success of the compare,
 519 * it should write to the location.
 520 *
 521 * The code segment at @rec->ip should be a caller to @addr
 522 *
 523 * Return must be:
 524 *  0 on success
 525 *  -EFAULT on error reading the location
 526 *  -EINVAL on a failed compare of the contents
 527 *  -EPERM  on error writing to the location
 528 * Any other value will be considered a failure.
 529 */
 530extern int ftrace_make_nop(struct module *mod,
 531                           struct dyn_ftrace *rec, unsigned long addr);
 532
 533/**
 534 * ftrace_make_call - convert a nop call site into a call to addr
 535 * @rec: the mcount call site record
 536 * @addr: the address that the call site should call
 537 *
 538 * This is a very sensitive operation and great care needs
 539 * to be taken by the arch.  The operation should carefully
 540 * read the location, check to see if what is read is indeed
 541 * what we expect it to be, and then on success of the compare,
 542 * it should write to the location.
 543 *
 544 * The code segment at @rec->ip should be a nop
 545 *
 546 * Return must be:
 547 *  0 on success
 548 *  -EFAULT on error reading the location
 549 *  -EINVAL on a failed compare of the contents
 550 *  -EPERM  on error writing to the location
 551 * Any other value will be considered a failure.
 552 */
 553extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 554
 555#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 556/**
 557 * ftrace_modify_call - convert from one addr to another (no nop)
 558 * @rec: the mcount call site record
 559 * @old_addr: the address expected to be currently called to
 560 * @addr: the address to change to
 561 *
 562 * This is a very sensitive operation and great care needs
 563 * to be taken by the arch.  The operation should carefully
 564 * read the location, check to see if what is read is indeed
 565 * what we expect it to be, and then on success of the compare,
 566 * it should write to the location.
 567 *
 568 * The code segment at @rec->ip should be a caller to @old_addr
 569 *
 570 * Return must be:
 571 *  0 on success
 572 *  -EFAULT on error reading the location
 573 *  -EINVAL on a failed compare of the contents
 574 *  -EPERM  on error writing to the location
 575 * Any other value will be considered a failure.
 576 */
 577extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 578                              unsigned long addr);
 579#else
 580/* Should never be called */
 581static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 582                                     unsigned long addr)
 583{
 584        return -EINVAL;
 585}
 586#endif
 587
 588/* May be defined in arch */
 589extern int ftrace_arch_read_dyn_info(char *buf, int size);
 590
 591extern int skip_trace(unsigned long ip);
 592extern void ftrace_module_init(struct module *mod);
 593extern void ftrace_module_enable(struct module *mod);
 594extern void ftrace_release_mod(struct module *mod);
 595
 596extern void ftrace_disable_daemon(void);
 597extern void ftrace_enable_daemon(void);
 598#else /* CONFIG_DYNAMIC_FTRACE */
 599static inline int skip_trace(unsigned long ip) { return 0; }
 600static inline int ftrace_force_update(void) { return 0; }
 601static inline void ftrace_disable_daemon(void) { }
 602static inline void ftrace_enable_daemon(void) { }
 603static inline void ftrace_module_init(struct module *mod) { }
 604static inline void ftrace_module_enable(struct module *mod) { }
 605static inline void ftrace_release_mod(struct module *mod) { }
 606static inline int ftrace_text_reserved(const void *start, const void *end)
 607{
 608        return 0;
 609}
 610static inline unsigned long ftrace_location(unsigned long ip)
 611{
 612        return 0;
 613}
 614
 615/*
 616 * Again users of functions that have ftrace_ops may not
 617 * have them defined when ftrace is not enabled, but these
 618 * functions may still be called. Use a macro instead of inline.
 619 */
 620#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
 621#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
 622#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
 623#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
 624#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
 625#define ftrace_free_filter(ops) do { } while (0)
 626#define ftrace_ops_set_global_filter(ops) do { } while (0)
 627
 628static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 629                            size_t cnt, loff_t *ppos) { return -ENODEV; }
 630static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 631                             size_t cnt, loff_t *ppos) { return -ENODEV; }
 632static inline int
 633ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
 634
 635static inline bool is_ftrace_trampoline(unsigned long addr)
 636{
 637        return false;
 638}
 639#endif /* CONFIG_DYNAMIC_FTRACE */
 640
 641/* totally disable ftrace - can not re-enable after this */
 642void ftrace_kill(void);
 643
 644static inline void tracer_disable(void)
 645{
 646#ifdef CONFIG_FUNCTION_TRACER
 647        ftrace_enabled = 0;
 648#endif
 649}
 650
 651/*
 652 * Ftrace disable/restore without lock. Some synchronization mechanism
 653 * must be used to prevent ftrace_enabled to be changed between
 654 * disable/restore.
 655 */
 656static inline int __ftrace_enabled_save(void)
 657{
 658#ifdef CONFIG_FUNCTION_TRACER
 659        int saved_ftrace_enabled = ftrace_enabled;
 660        ftrace_enabled = 0;
 661        return saved_ftrace_enabled;
 662#else
 663        return 0;
 664#endif
 665}
 666
 667static inline void __ftrace_enabled_restore(int enabled)
 668{
 669#ifdef CONFIG_FUNCTION_TRACER
 670        ftrace_enabled = enabled;
 671#endif
 672}
 673
 674/* All archs should have this, but we define it for consistency */
 675#ifndef ftrace_return_address0
 676# define ftrace_return_address0 __builtin_return_address(0)
 677#endif
 678
 679/* Archs may use other ways for ADDR1 and beyond */
 680#ifndef ftrace_return_address
 681# ifdef CONFIG_FRAME_POINTER
 682#  define ftrace_return_address(n) __builtin_return_address(n)
 683# else
 684#  define ftrace_return_address(n) 0UL
 685# endif
 686#endif
 687
 688#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
 689#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
 690#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
 691#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
 692#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
 693#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
 694#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
 695
 696static inline unsigned long get_lock_parent_ip(void)
 697{
 698        unsigned long addr = CALLER_ADDR0;
 699
 700        if (!in_lock_functions(addr))
 701                return addr;
 702        addr = CALLER_ADDR1;
 703        if (!in_lock_functions(addr))
 704                return addr;
 705        return CALLER_ADDR2;
 706}
 707
 708#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
 709  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
 710  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 711#else
 712/*
 713 * Use defines instead of static inlines because some arches will make code out
 714 * of the CALLER_ADDR, when we really want these to be a real nop.
 715 */
 716# define trace_preempt_on(a0, a1) do { } while (0)
 717# define trace_preempt_off(a0, a1) do { } while (0)
 718#endif
 719
 720#ifdef CONFIG_FTRACE_MCOUNT_RECORD
 721extern void ftrace_init(void);
 722#else
 723static inline void ftrace_init(void) { }
 724#endif
 725
 726/*
 727 * Structure that defines an entry function trace.
 728 * It's already packed but the attribute "packed" is needed
 729 * to remove extra padding at the end.
 730 */
 731struct ftrace_graph_ent {
 732        unsigned long func; /* Current function */
 733        int depth;
 734} __packed;
 735
 736/*
 737 * Structure that defines a return function trace.
 738 * It's already packed but the attribute "packed" is needed
 739 * to remove extra padding at the end.
 740 */
 741struct ftrace_graph_ret {
 742        unsigned long func; /* Current function */
 743        /* Number of functions that overran the depth limit for current task */
 744        unsigned long overrun;
 745        unsigned long long calltime;
 746        unsigned long long rettime;
 747        int depth;
 748} __packed;
 749
 750/* Type of the callback handlers for tracing function graph*/
 751typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
 752typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
 753
 754#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 755
 756struct fgraph_ops {
 757        trace_func_graph_ent_t          entryfunc;
 758        trace_func_graph_ret_t          retfunc;
 759};
 760
 761/*
 762 * Stack of return addresses for functions
 763 * of a thread.
 764 * Used in struct thread_info
 765 */
 766struct ftrace_ret_stack {
 767        unsigned long ret;
 768        unsigned long func;
 769        unsigned long long calltime;
 770#ifdef CONFIG_FUNCTION_PROFILER
 771        unsigned long long subtime;
 772#endif
 773#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 774        unsigned long fp;
 775#endif
 776#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 777        unsigned long *retp;
 778#endif
 779};
 780
 781/*
 782 * Primary handler of a function return.
 783 * It relays on ftrace_return_to_handler.
 784 * Defined in entry_32/64.S
 785 */
 786extern void return_to_handler(void);
 787
 788extern int
 789function_graph_enter(unsigned long ret, unsigned long func,
 790                     unsigned long frame_pointer, unsigned long *retp);
 791
 792struct ftrace_ret_stack *
 793ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
 794
 795unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 796                                    unsigned long ret, unsigned long *retp);
 797
 798/*
 799 * Sometimes we don't want to trace a function with the function
 800 * graph tracer but we want them to keep traced by the usual function
 801 * tracer if the function graph tracer is not configured.
 802 */
 803#define __notrace_funcgraph             notrace
 804
 805#define FTRACE_RETFUNC_DEPTH 50
 806#define FTRACE_RETSTACK_ALLOC_SIZE 32
 807
 808extern int register_ftrace_graph(struct fgraph_ops *ops);
 809extern void unregister_ftrace_graph(struct fgraph_ops *ops);
 810
 811extern bool ftrace_graph_is_dead(void);
 812extern void ftrace_graph_stop(void);
 813
 814/* The current handlers in use */
 815extern trace_func_graph_ret_t ftrace_graph_return;
 816extern trace_func_graph_ent_t ftrace_graph_entry;
 817
 818extern void ftrace_graph_init_task(struct task_struct *t);
 819extern void ftrace_graph_exit_task(struct task_struct *t);
 820extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
 821
 822static inline void pause_graph_tracing(void)
 823{
 824        atomic_inc(&current->tracing_graph_pause);
 825}
 826
 827static inline void unpause_graph_tracing(void)
 828{
 829        atomic_dec(&current->tracing_graph_pause);
 830}
 831#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 832
 833#define __notrace_funcgraph
 834
 835static inline void ftrace_graph_init_task(struct task_struct *t) { }
 836static inline void ftrace_graph_exit_task(struct task_struct *t) { }
 837static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
 838
 839/* Define as macros as fgraph_ops may not be defined */
 840#define register_ftrace_graph(ops) ({ -1; })
 841#define unregister_ftrace_graph(ops) do { } while (0)
 842
 843static inline unsigned long
 844ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
 845                      unsigned long *retp)
 846{
 847        return ret;
 848}
 849
 850static inline void pause_graph_tracing(void) { }
 851static inline void unpause_graph_tracing(void) { }
 852#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 853
 854#ifdef CONFIG_TRACING
 855
 856/* flags for current->trace */
 857enum {
 858        TSK_TRACE_FL_TRACE_BIT  = 0,
 859        TSK_TRACE_FL_GRAPH_BIT  = 1,
 860};
 861enum {
 862        TSK_TRACE_FL_TRACE      = 1 << TSK_TRACE_FL_TRACE_BIT,
 863        TSK_TRACE_FL_GRAPH      = 1 << TSK_TRACE_FL_GRAPH_BIT,
 864};
 865
 866static inline void set_tsk_trace_trace(struct task_struct *tsk)
 867{
 868        set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 869}
 870
 871static inline void clear_tsk_trace_trace(struct task_struct *tsk)
 872{
 873        clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 874}
 875
 876static inline int test_tsk_trace_trace(struct task_struct *tsk)
 877{
 878        return tsk->trace & TSK_TRACE_FL_TRACE;
 879}
 880
 881static inline void set_tsk_trace_graph(struct task_struct *tsk)
 882{
 883        set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
 884}
 885
 886static inline void clear_tsk_trace_graph(struct task_struct *tsk)
 887{
 888        clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
 889}
 890
 891static inline int test_tsk_trace_graph(struct task_struct *tsk)
 892{
 893        return tsk->trace & TSK_TRACE_FL_GRAPH;
 894}
 895
 896enum ftrace_dump_mode;
 897
 898extern enum ftrace_dump_mode ftrace_dump_on_oops;
 899extern int tracepoint_printk;
 900
 901extern void disable_trace_on_warning(void);
 902extern int __disable_trace_on_warning;
 903
 904int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 905                             void __user *buffer, size_t *lenp,
 906                             loff_t *ppos);
 907
 908#else /* CONFIG_TRACING */
 909static inline void  disable_trace_on_warning(void) { }
 910#endif /* CONFIG_TRACING */
 911
 912#ifdef CONFIG_FTRACE_SYSCALLS
 913
 914unsigned long arch_syscall_addr(int nr);
 915
 916#endif /* CONFIG_FTRACE_SYSCALLS */
 917
 918#endif /* _LINUX_FTRACE_H */
 919