linux/include/linux/ftrace.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Ftrace header.  For implementation details beyond the random comments
   4 * scattered below, see: Documentation/trace/ftrace-design.rst
   5 */
   6
   7#ifndef _LINUX_FTRACE_H
   8#define _LINUX_FTRACE_H
   9
  10#include <linux/trace_clock.h>
  11#include <linux/kallsyms.h>
  12#include <linux/linkage.h>
  13#include <linux/bitops.h>
  14#include <linux/ptrace.h>
  15#include <linux/ktime.h>
  16#include <linux/sched.h>
  17#include <linux/types.h>
  18#include <linux/init.h>
  19#include <linux/fs.h>
  20
  21#include <asm/ftrace.h>
  22
  23/*
  24 * If the arch supports passing the variable contents of
  25 * function_trace_op as the third parameter back from the
  26 * mcount call, then the arch should define this as 1.
  27 */
  28#ifndef ARCH_SUPPORTS_FTRACE_OPS
  29#define ARCH_SUPPORTS_FTRACE_OPS 0
  30#endif
  31
  32/*
  33 * If the arch's mcount caller does not support all of ftrace's
  34 * features, then it must call an indirect function that
  35 * does. Or at least does enough to prevent any unwelcomed side effects.
  36 */
  37#if !ARCH_SUPPORTS_FTRACE_OPS
  38# define FTRACE_FORCE_LIST_FUNC 1
  39#else
  40# define FTRACE_FORCE_LIST_FUNC 0
  41#endif
  42
  43/* Main tracing buffer and events set up */
  44#ifdef CONFIG_TRACING
  45void trace_init(void);
  46void early_trace_init(void);
  47#else
  48static inline void trace_init(void) { }
  49static inline void early_trace_init(void) { }
  50#endif
  51
  52struct module;
  53struct ftrace_hash;
  54struct ftrace_direct_func;
  55
  56#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
  57        defined(CONFIG_DYNAMIC_FTRACE)
  58const char *
  59ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  60                   unsigned long *off, char **modname, char *sym);
  61#else
  62static inline const char *
  63ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
  64                   unsigned long *off, char **modname, char *sym)
  65{
  66        return NULL;
  67}
  68#endif
  69
  70#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
  71int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  72                           char *type, char *name,
  73                           char *module_name, int *exported);
  74#else
  75static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
  76                                         char *type, char *name,
  77                                         char *module_name, int *exported)
  78{
  79        return -1;
  80}
  81#endif
  82
  83#ifdef CONFIG_FUNCTION_TRACER
  84
  85extern int ftrace_enabled;
  86extern int
  87ftrace_enable_sysctl(struct ctl_table *table, int write,
  88                     void *buffer, size_t *lenp, loff_t *ppos);
  89
  90struct ftrace_ops;
  91
  92typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  93                              struct ftrace_ops *op, struct pt_regs *regs);
  94
  95ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  96
  97/*
  98 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
  99 * set in the flags member.
 100 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
 101 * IPMODIFY are a kind of attribute flags which can be set only before
 102 * registering the ftrace_ops, and can not be modified while registered.
 103 * Changing those attribute flags after registering ftrace_ops will
 104 * cause unexpected results.
 105 *
 106 * ENABLED - set/unset when ftrace_ops is registered/unregistered
 107 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
 108 *           allocated ftrace_ops which need special care
 109 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
 110 *            and passed to the callback. If this flag is set, but the
 111 *            architecture does not support passing regs
 112 *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
 113 *            ftrace_ops will fail to register, unless the next flag
 114 *            is set.
 115 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
 116 *            handler can handle an arch that does not save regs
 117 *            (the handler tests if regs == NULL), then it can set
 118 *            this flag instead. It will not fail registering the ftrace_ops
 119 *            but, the regs field will be NULL if the arch does not support
 120 *            passing regs to the handler.
 121 *            Note, if this flag is set, the SAVE_REGS flag will automatically
 122 *            get set upon registering the ftrace_ops, if the arch supports it.
 123 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
 124 *            that the call back has its own recursion protection. If it does
 125 *            not set this, then the ftrace infrastructure will add recursion
 126 *            protection for the caller.
 127 * STUB   - The ftrace_ops is just a place holder.
 128 * INITIALIZED - The ftrace_ops has already been initialized (first use time
 129 *            register_ftrace_function() is called, it will initialized the ops)
 130 * DELETED - The ops are being deleted, do not let them be registered again.
 131 * ADDING  - The ops is in the process of being added.
 132 * REMOVING - The ops is in the process of being removed.
 133 * MODIFYING - The ops is in the process of changing its filter functions.
 134 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
 135 *            The arch specific code sets this flag when it allocated a
 136 *            trampoline. This lets the arch know that it can update the
 137 *            trampoline in case the callback function changes.
 138 *            The ftrace_ops trampoline can be set by the ftrace users, and
 139 *            in such cases the arch must not modify it. Only the arch ftrace
 140 *            core code should set this flag.
 141 * IPMODIFY - The ops can modify the IP register. This can only be set with
 142 *            SAVE_REGS. If another ops with this flag set is already registered
 143 *            for any of the functions that this ops will be registered for, then
 144 *            this ops will fail to register or set_filter_ip.
 145 * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
 146 * RCU     - Set when the ops can only be called when RCU is watching.
 147 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
 148 * PERMANENT - Set when the ops is permanent and should not be affected by
 149 *             ftrace_enabled.
 150 * DIRECT - Used by the direct ftrace_ops helper for direct functions
 151 *            (internal ftrace only, should not be used by others)
 152 */
 153enum {
 154        FTRACE_OPS_FL_ENABLED                   = BIT(0),
 155        FTRACE_OPS_FL_DYNAMIC                   = BIT(1),
 156        FTRACE_OPS_FL_SAVE_REGS                 = BIT(2),
 157        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = BIT(3),
 158        FTRACE_OPS_FL_RECURSION_SAFE            = BIT(4),
 159        FTRACE_OPS_FL_STUB                      = BIT(5),
 160        FTRACE_OPS_FL_INITIALIZED               = BIT(6),
 161        FTRACE_OPS_FL_DELETED                   = BIT(7),
 162        FTRACE_OPS_FL_ADDING                    = BIT(8),
 163        FTRACE_OPS_FL_REMOVING                  = BIT(9),
 164        FTRACE_OPS_FL_MODIFYING                 = BIT(10),
 165        FTRACE_OPS_FL_ALLOC_TRAMP               = BIT(11),
 166        FTRACE_OPS_FL_IPMODIFY                  = BIT(12),
 167        FTRACE_OPS_FL_PID                       = BIT(13),
 168        FTRACE_OPS_FL_RCU                       = BIT(14),
 169        FTRACE_OPS_FL_TRACE_ARRAY               = BIT(15),
 170        FTRACE_OPS_FL_PERMANENT                 = BIT(16),
 171        FTRACE_OPS_FL_DIRECT                    = BIT(17),
 172};
 173
 174#ifdef CONFIG_DYNAMIC_FTRACE
 175/* The hash used to know what functions callbacks trace */
 176struct ftrace_ops_hash {
 177        struct ftrace_hash __rcu        *notrace_hash;
 178        struct ftrace_hash __rcu        *filter_hash;
 179        struct mutex                    regex_lock;
 180};
 181
 182void ftrace_free_init_mem(void);
 183void ftrace_free_mem(struct module *mod, void *start, void *end);
 184#else
 185static inline void ftrace_free_init_mem(void) { }
 186static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 187#endif
 188
 189/*
 190 * Note, ftrace_ops can be referenced outside of RCU protection, unless
 191 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
 192 * core data, the unregistering of it will perform a scheduling on all CPUs
 193 * to make sure that there are no more users. Depending on the load of the
 194 * system that may take a bit of time.
 195 *
 196 * Any private data added must also take care not to be freed and if private
 197 * data is added to a ftrace_ops that is in core code, the user of the
 198 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
 199 */
 200struct ftrace_ops {
 201        ftrace_func_t                   func;
 202        struct ftrace_ops __rcu         *next;
 203        unsigned long                   flags;
 204        void                            *private;
 205        ftrace_func_t                   saved_func;
 206#ifdef CONFIG_DYNAMIC_FTRACE
 207        struct ftrace_ops_hash          local_hash;
 208        struct ftrace_ops_hash          *func_hash;
 209        struct ftrace_ops_hash          old_hash;
 210        unsigned long                   trampoline;
 211        unsigned long                   trampoline_size;
 212        struct list_head                list;
 213#endif
 214};
 215
 216extern struct ftrace_ops __rcu *ftrace_ops_list;
 217extern struct ftrace_ops ftrace_list_end;
 218
 219/*
 220 * Traverse the ftrace_ops_list, invoking all entries.  The reason that we
 221 * can use rcu_dereference_raw_check() is that elements removed from this list
 222 * are simply leaked, so there is no need to interact with a grace-period
 223 * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
 224 * concurrent insertions into the ftrace_ops_list.
 225 *
 226 * Silly Alpha and silly pointer-speculation compiler optimizations!
 227 */
 228#define do_for_each_ftrace_op(op, list)                 \
 229        op = rcu_dereference_raw_check(list);                   \
 230        do
 231
 232/*
 233 * Optimized for just a single item in the list (as that is the normal case).
 234 */
 235#define while_for_each_ftrace_op(op)                            \
 236        while (likely(op = rcu_dereference_raw_check((op)->next)) &&    \
 237               unlikely((op) != &ftrace_list_end))
 238
 239/*
 240 * Type of the current tracing.
 241 */
 242enum ftrace_tracing_type_t {
 243        FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
 244        FTRACE_TYPE_RETURN,     /* Hook the return of the function */
 245};
 246
 247/* Current tracing type, default is FTRACE_TYPE_ENTER */
 248extern enum ftrace_tracing_type_t ftrace_tracing_type;
 249
 250/*
 251 * The ftrace_ops must be a static and should also
 252 * be read_mostly.  These functions do modify read_mostly variables
 253 * so use them sparely. Never free an ftrace_op or modify the
 254 * next pointer after it has been registered. Even after unregistering
 255 * it, the next pointer may still be used internally.
 256 */
 257int register_ftrace_function(struct ftrace_ops *ops);
 258int unregister_ftrace_function(struct ftrace_ops *ops);
 259
 260extern void ftrace_stub(unsigned long a0, unsigned long a1,
 261                        struct ftrace_ops *op, struct pt_regs *regs);
 262
 263#else /* !CONFIG_FUNCTION_TRACER */
 264/*
 265 * (un)register_ftrace_function must be a macro since the ops parameter
 266 * must not be evaluated.
 267 */
 268#define register_ftrace_function(ops) ({ 0; })
 269#define unregister_ftrace_function(ops) ({ 0; })
 270static inline void ftrace_kill(void) { }
 271static inline void ftrace_free_init_mem(void) { }
 272static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
 273#endif /* CONFIG_FUNCTION_TRACER */
 274
 275struct ftrace_func_entry {
 276        struct hlist_node hlist;
 277        unsigned long ip;
 278        unsigned long direct; /* for direct lookup only */
 279};
 280
 281struct dyn_ftrace;
 282
 283#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 284extern int ftrace_direct_func_count;
 285int register_ftrace_direct(unsigned long ip, unsigned long addr);
 286int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
 287int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
 288struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
 289int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
 290                                struct dyn_ftrace *rec,
 291                                unsigned long old_addr,
 292                                unsigned long new_addr);
 293unsigned long ftrace_find_rec_direct(unsigned long ip);
 294#else
 295# define ftrace_direct_func_count 0
 296static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
 297{
 298        return -ENOTSUPP;
 299}
 300static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
 301{
 302        return -ENOTSUPP;
 303}
 304static inline int modify_ftrace_direct(unsigned long ip,
 305                                       unsigned long old_addr, unsigned long new_addr)
 306{
 307        return -ENOTSUPP;
 308}
 309static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
 310{
 311        return NULL;
 312}
 313static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
 314                                              struct dyn_ftrace *rec,
 315                                              unsigned long old_addr,
 316                                              unsigned long new_addr)
 317{
 318        return -ENODEV;
 319}
 320static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
 321{
 322        return 0;
 323}
 324#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
 325
 326#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 327/*
 328 * This must be implemented by the architecture.
 329 * It is the way the ftrace direct_ops helper, when called
 330 * via ftrace (because there's other callbacks besides the
 331 * direct call), can inform the architecture's trampoline that this
 332 * routine has a direct caller, and what the caller is.
 333 *
 334 * For example, in x86, it returns the direct caller
 335 * callback function via the regs->orig_ax parameter.
 336 * Then in the ftrace trampoline, if this is set, it makes
 337 * the return from the trampoline jump to the direct caller
 338 * instead of going back to the function it just traced.
 339 */
 340static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
 341                                                 unsigned long addr) { }
 342#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
 343
 344#ifdef CONFIG_STACK_TRACER
 345
 346extern int stack_tracer_enabled;
 347
 348int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
 349                       size_t *lenp, loff_t *ppos);
 350
 351/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
 352DECLARE_PER_CPU(int, disable_stack_tracer);
 353
 354/**
 355 * stack_tracer_disable - temporarily disable the stack tracer
 356 *
 357 * There's a few locations (namely in RCU) where stack tracing
 358 * cannot be executed. This function is used to disable stack
 359 * tracing during those critical sections.
 360 *
 361 * This function must be called with preemption or interrupts
 362 * disabled and stack_tracer_enable() must be called shortly after
 363 * while preemption or interrupts are still disabled.
 364 */
 365static inline void stack_tracer_disable(void)
 366{
 367        /* Preemption or interupts must be disabled */
 368        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 369                WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 370        this_cpu_inc(disable_stack_tracer);
 371}
 372
 373/**
 374 * stack_tracer_enable - re-enable the stack tracer
 375 *
 376 * After stack_tracer_disable() is called, stack_tracer_enable()
 377 * must be called shortly afterward.
 378 */
 379static inline void stack_tracer_enable(void)
 380{
 381        if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
 382                WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
 383        this_cpu_dec(disable_stack_tracer);
 384}
 385#else
 386static inline void stack_tracer_disable(void) { }
 387static inline void stack_tracer_enable(void) { }
 388#endif
 389
 390#ifdef CONFIG_DYNAMIC_FTRACE
 391
 392int ftrace_arch_code_modify_prepare(void);
 393int ftrace_arch_code_modify_post_process(void);
 394
 395enum ftrace_bug_type {
 396        FTRACE_BUG_UNKNOWN,
 397        FTRACE_BUG_INIT,
 398        FTRACE_BUG_NOP,
 399        FTRACE_BUG_CALL,
 400        FTRACE_BUG_UPDATE,
 401};
 402extern enum ftrace_bug_type ftrace_bug_type;
 403
 404/*
 405 * Archs can set this to point to a variable that holds the value that was
 406 * expected at the call site before calling ftrace_bug().
 407 */
 408extern const void *ftrace_expected;
 409
 410void ftrace_bug(int err, struct dyn_ftrace *rec);
 411
 412struct seq_file;
 413
 414extern int ftrace_text_reserved(const void *start, const void *end);
 415
 416struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
 417
 418bool is_ftrace_trampoline(unsigned long addr);
 419
 420/*
 421 * The dyn_ftrace record's flags field is split into two parts.
 422 * the first part which is '0-FTRACE_REF_MAX' is a counter of
 423 * the number of callbacks that have registered the function that
 424 * the dyn_ftrace descriptor represents.
 425 *
 426 * The second part is a mask:
 427 *  ENABLED - the function is being traced
 428 *  REGS    - the record wants the function to save regs
 429 *  REGS_EN - the function is set up to save regs.
 430 *  IPMODIFY - the record allows for the IP address to be changed.
 431 *  DISABLED - the record is not ready to be touched yet
 432 *  DIRECT   - there is a direct function to call
 433 *
 434 * When a new ftrace_ops is registered and wants a function to save
 435 * pt_regs, the rec->flags REGS is set. When the function has been
 436 * set up to save regs, the REG_EN flag is set. Once a function
 437 * starts saving regs it will do so until all ftrace_ops are removed
 438 * from tracing that function.
 439 */
 440enum {
 441        FTRACE_FL_ENABLED       = (1UL << 31),
 442        FTRACE_FL_REGS          = (1UL << 30),
 443        FTRACE_FL_REGS_EN       = (1UL << 29),
 444        FTRACE_FL_TRAMP         = (1UL << 28),
 445        FTRACE_FL_TRAMP_EN      = (1UL << 27),
 446        FTRACE_FL_IPMODIFY      = (1UL << 26),
 447        FTRACE_FL_DISABLED      = (1UL << 25),
 448        FTRACE_FL_DIRECT        = (1UL << 24),
 449        FTRACE_FL_DIRECT_EN     = (1UL << 23),
 450};
 451
 452#define FTRACE_REF_MAX_SHIFT    23
 453#define FTRACE_REF_MAX          ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
 454
 455#define ftrace_rec_count(rec)   ((rec)->flags & FTRACE_REF_MAX)
 456
 457struct dyn_ftrace {
 458        unsigned long           ip; /* address of mcount call-site */
 459        unsigned long           flags;
 460        struct dyn_arch_ftrace  arch;
 461};
 462
 463int ftrace_force_update(void);
 464int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
 465                         int remove, int reset);
 466int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
 467                       int len, int reset);
 468int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
 469                        int len, int reset);
 470void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 471void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
 472void ftrace_free_filter(struct ftrace_ops *ops);
 473void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
 474
 475enum {
 476        FTRACE_UPDATE_CALLS             = (1 << 0),
 477        FTRACE_DISABLE_CALLS            = (1 << 1),
 478        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
 479        FTRACE_START_FUNC_RET           = (1 << 3),
 480        FTRACE_STOP_FUNC_RET            = (1 << 4),
 481        FTRACE_MAY_SLEEP                = (1 << 5),
 482};
 483
 484/*
 485 * The FTRACE_UPDATE_* enum is used to pass information back
 486 * from the ftrace_update_record() and ftrace_test_record()
 487 * functions. These are called by the code update routines
 488 * to find out what is to be done for a given function.
 489 *
 490 *  IGNORE           - The function is already what we want it to be
 491 *  MAKE_CALL        - Start tracing the function
 492 *  MODIFY_CALL      - Stop saving regs for the function
 493 *  MAKE_NOP         - Stop tracing the function
 494 */
 495enum {
 496        FTRACE_UPDATE_IGNORE,
 497        FTRACE_UPDATE_MAKE_CALL,
 498        FTRACE_UPDATE_MODIFY_CALL,
 499        FTRACE_UPDATE_MAKE_NOP,
 500};
 501
 502enum {
 503        FTRACE_ITER_FILTER      = (1 << 0),
 504        FTRACE_ITER_NOTRACE     = (1 << 1),
 505        FTRACE_ITER_PRINTALL    = (1 << 2),
 506        FTRACE_ITER_DO_PROBES   = (1 << 3),
 507        FTRACE_ITER_PROBE       = (1 << 4),
 508        FTRACE_ITER_MOD         = (1 << 5),
 509        FTRACE_ITER_ENABLED     = (1 << 6),
 510};
 511
 512void arch_ftrace_update_code(int command);
 513void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
 514void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
 515void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
 516
 517struct ftrace_rec_iter;
 518
 519struct ftrace_rec_iter *ftrace_rec_iter_start(void);
 520struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
 521struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
 522
 523#define for_ftrace_rec_iter(iter)               \
 524        for (iter = ftrace_rec_iter_start();    \
 525             iter;                              \
 526             iter = ftrace_rec_iter_next(iter))
 527
 528
 529int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
 530int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
 531void ftrace_run_stop_machine(int command);
 532unsigned long ftrace_location(unsigned long ip);
 533unsigned long ftrace_location_range(unsigned long start, unsigned long end);
 534unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
 535unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
 536
 537extern ftrace_func_t ftrace_trace_function;
 538
 539int ftrace_regex_open(struct ftrace_ops *ops, int flag,
 540                  struct inode *inode, struct file *file);
 541ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 542                            size_t cnt, loff_t *ppos);
 543ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 544                             size_t cnt, loff_t *ppos);
 545int ftrace_regex_release(struct inode *inode, struct file *file);
 546
 547void __init
 548ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
 549
 550/* defined in arch */
 551extern int ftrace_ip_converted(unsigned long ip);
 552extern int ftrace_dyn_arch_init(void);
 553extern void ftrace_replace_code(int enable);
 554extern int ftrace_update_ftrace_func(ftrace_func_t func);
 555extern void ftrace_caller(void);
 556extern void ftrace_regs_caller(void);
 557extern void ftrace_call(void);
 558extern void ftrace_regs_call(void);
 559extern void mcount_call(void);
 560
 561void ftrace_modify_all_code(int command);
 562
 563#ifndef FTRACE_ADDR
 564#define FTRACE_ADDR ((unsigned long)ftrace_caller)
 565#endif
 566
 567#ifndef FTRACE_GRAPH_ADDR
 568#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
 569#endif
 570
 571#ifndef FTRACE_REGS_ADDR
 572#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 573# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
 574#else
 575# define FTRACE_REGS_ADDR FTRACE_ADDR
 576#endif
 577#endif
 578
 579/*
 580 * If an arch would like functions that are only traced
 581 * by the function graph tracer to jump directly to its own
 582 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
 583 * to be that address to jump to.
 584 */
 585#ifndef FTRACE_GRAPH_TRAMP_ADDR
 586#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
 587#endif
 588
 589#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 590extern void ftrace_graph_caller(void);
 591extern int ftrace_enable_ftrace_graph_caller(void);
 592extern int ftrace_disable_ftrace_graph_caller(void);
 593#else
 594static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
 595static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
 596#endif
 597
 598/**
 599 * ftrace_make_nop - convert code into nop
 600 * @mod: module structure if called by module load initialization
 601 * @rec: the call site record (e.g. mcount/fentry)
 602 * @addr: the address that the call site should be calling
 603 *
 604 * This is a very sensitive operation and great care needs
 605 * to be taken by the arch.  The operation should carefully
 606 * read the location, check to see if what is read is indeed
 607 * what we expect it to be, and then on success of the compare,
 608 * it should write to the location.
 609 *
 610 * The code segment at @rec->ip should be a caller to @addr
 611 *
 612 * Return must be:
 613 *  0 on success
 614 *  -EFAULT on error reading the location
 615 *  -EINVAL on a failed compare of the contents
 616 *  -EPERM  on error writing to the location
 617 * Any other value will be considered a failure.
 618 */
 619extern int ftrace_make_nop(struct module *mod,
 620                           struct dyn_ftrace *rec, unsigned long addr);
 621
 622
 623/**
 624 * ftrace_init_nop - initialize a nop call site
 625 * @mod: module structure if called by module load initialization
 626 * @rec: the call site record (e.g. mcount/fentry)
 627 *
 628 * This is a very sensitive operation and great care needs
 629 * to be taken by the arch.  The operation should carefully
 630 * read the location, check to see if what is read is indeed
 631 * what we expect it to be, and then on success of the compare,
 632 * it should write to the location.
 633 *
 634 * The code segment at @rec->ip should contain the contents created by
 635 * the compiler
 636 *
 637 * Return must be:
 638 *  0 on success
 639 *  -EFAULT on error reading the location
 640 *  -EINVAL on a failed compare of the contents
 641 *  -EPERM  on error writing to the location
 642 * Any other value will be considered a failure.
 643 */
 644#ifndef ftrace_init_nop
 645static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 646{
 647        return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
 648}
 649#endif
 650
 651/**
 652 * ftrace_make_call - convert a nop call site into a call to addr
 653 * @rec: the call site record (e.g. mcount/fentry)
 654 * @addr: the address that the call site should call
 655 *
 656 * This is a very sensitive operation and great care needs
 657 * to be taken by the arch.  The operation should carefully
 658 * read the location, check to see if what is read is indeed
 659 * what we expect it to be, and then on success of the compare,
 660 * it should write to the location.
 661 *
 662 * The code segment at @rec->ip should be a nop
 663 *
 664 * Return must be:
 665 *  0 on success
 666 *  -EFAULT on error reading the location
 667 *  -EINVAL on a failed compare of the contents
 668 *  -EPERM  on error writing to the location
 669 * Any other value will be considered a failure.
 670 */
 671extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 672
 673#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 674/**
 675 * ftrace_modify_call - convert from one addr to another (no nop)
 676 * @rec: the call site record (e.g. mcount/fentry)
 677 * @old_addr: the address expected to be currently called to
 678 * @addr: the address to change to
 679 *
 680 * This is a very sensitive operation and great care needs
 681 * to be taken by the arch.  The operation should carefully
 682 * read the location, check to see if what is read is indeed
 683 * what we expect it to be, and then on success of the compare,
 684 * it should write to the location.
 685 *
 686 * The code segment at @rec->ip should be a caller to @old_addr
 687 *
 688 * Return must be:
 689 *  0 on success
 690 *  -EFAULT on error reading the location
 691 *  -EINVAL on a failed compare of the contents
 692 *  -EPERM  on error writing to the location
 693 * Any other value will be considered a failure.
 694 */
 695extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 696                              unsigned long addr);
 697#else
 698/* Should never be called */
 699static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 700                                     unsigned long addr)
 701{
 702        return -EINVAL;
 703}
 704#endif
 705
 706/* May be defined in arch */
 707extern int ftrace_arch_read_dyn_info(char *buf, int size);
 708
 709extern int skip_trace(unsigned long ip);
 710extern void ftrace_module_init(struct module *mod);
 711extern void ftrace_module_enable(struct module *mod);
 712extern void ftrace_release_mod(struct module *mod);
 713
 714extern void ftrace_disable_daemon(void);
 715extern void ftrace_enable_daemon(void);
 716#else /* CONFIG_DYNAMIC_FTRACE */
 717static inline int skip_trace(unsigned long ip) { return 0; }
 718static inline int ftrace_force_update(void) { return 0; }
 719static inline void ftrace_disable_daemon(void) { }
 720static inline void ftrace_enable_daemon(void) { }
 721static inline void ftrace_module_init(struct module *mod) { }
 722static inline void ftrace_module_enable(struct module *mod) { }
 723static inline void ftrace_release_mod(struct module *mod) { }
 724static inline int ftrace_text_reserved(const void *start, const void *end)
 725{
 726        return 0;
 727}
 728static inline unsigned long ftrace_location(unsigned long ip)
 729{
 730        return 0;
 731}
 732
 733/*
 734 * Again users of functions that have ftrace_ops may not
 735 * have them defined when ftrace is not enabled, but these
 736 * functions may still be called. Use a macro instead of inline.
 737 */
 738#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
 739#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
 740#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
 741#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
 742#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
 743#define ftrace_free_filter(ops) do { } while (0)
 744#define ftrace_ops_set_global_filter(ops) do { } while (0)
 745
 746static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
 747                            size_t cnt, loff_t *ppos) { return -ENODEV; }
 748static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
 749                             size_t cnt, loff_t *ppos) { return -ENODEV; }
 750static inline int
 751ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
 752
 753static inline bool is_ftrace_trampoline(unsigned long addr)
 754{
 755        return false;
 756}
 757#endif /* CONFIG_DYNAMIC_FTRACE */
 758
 759/* totally disable ftrace - can not re-enable after this */
 760void ftrace_kill(void);
 761
 762static inline void tracer_disable(void)
 763{
 764#ifdef CONFIG_FUNCTION_TRACER
 765        ftrace_enabled = 0;
 766#endif
 767}
 768
 769/*
 770 * Ftrace disable/restore without lock. Some synchronization mechanism
 771 * must be used to prevent ftrace_enabled to be changed between
 772 * disable/restore.
 773 */
 774static inline int __ftrace_enabled_save(void)
 775{
 776#ifdef CONFIG_FUNCTION_TRACER
 777        int saved_ftrace_enabled = ftrace_enabled;
 778        ftrace_enabled = 0;
 779        return saved_ftrace_enabled;
 780#else
 781        return 0;
 782#endif
 783}
 784
 785static inline void __ftrace_enabled_restore(int enabled)
 786{
 787#ifdef CONFIG_FUNCTION_TRACER
 788        ftrace_enabled = enabled;
 789#endif
 790}
 791
 792/* All archs should have this, but we define it for consistency */
 793#ifndef ftrace_return_address0
 794# define ftrace_return_address0 __builtin_return_address(0)
 795#endif
 796
 797/* Archs may use other ways for ADDR1 and beyond */
 798#ifndef ftrace_return_address
 799# ifdef CONFIG_FRAME_POINTER
 800#  define ftrace_return_address(n) __builtin_return_address(n)
 801# else
 802#  define ftrace_return_address(n) 0UL
 803# endif
 804#endif
 805
 806#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
 807#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
 808#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
 809#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
 810#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
 811#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
 812#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
 813
 814static inline unsigned long get_lock_parent_ip(void)
 815{
 816        unsigned long addr = CALLER_ADDR0;
 817
 818        if (!in_lock_functions(addr))
 819                return addr;
 820        addr = CALLER_ADDR1;
 821        if (!in_lock_functions(addr))
 822                return addr;
 823        return CALLER_ADDR2;
 824}
 825
 826#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
 827  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
 828  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 829#else
 830/*
 831 * Use defines instead of static inlines because some arches will make code out
 832 * of the CALLER_ADDR, when we really want these to be a real nop.
 833 */
 834# define trace_preempt_on(a0, a1) do { } while (0)
 835# define trace_preempt_off(a0, a1) do { } while (0)
 836#endif
 837
 838#ifdef CONFIG_FTRACE_MCOUNT_RECORD
 839extern void ftrace_init(void);
 840#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
 841#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
 842#else
 843#define FTRACE_CALLSITE_SECTION "__mcount_loc"
 844#endif
 845#else
 846static inline void ftrace_init(void) { }
 847#endif
 848
 849/*
 850 * Structure that defines an entry function trace.
 851 * It's already packed but the attribute "packed" is needed
 852 * to remove extra padding at the end.
 853 */
 854struct ftrace_graph_ent {
 855        unsigned long func; /* Current function */
 856        int depth;
 857} __packed;
 858
 859/*
 860 * Structure that defines a return function trace.
 861 * It's already packed but the attribute "packed" is needed
 862 * to remove extra padding at the end.
 863 */
 864struct ftrace_graph_ret {
 865        unsigned long func; /* Current function */
 866        /* Number of functions that overran the depth limit for current task */
 867        unsigned long overrun;
 868        unsigned long long calltime;
 869        unsigned long long rettime;
 870        int depth;
 871} __packed;
 872
 873/* Type of the callback handlers for tracing function graph*/
 874typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
 875typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
 876
 877extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
 878
 879#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 880
 881struct fgraph_ops {
 882        trace_func_graph_ent_t          entryfunc;
 883        trace_func_graph_ret_t          retfunc;
 884};
 885
 886/*
 887 * Stack of return addresses for functions
 888 * of a thread.
 889 * Used in struct thread_info
 890 */
 891struct ftrace_ret_stack {
 892        unsigned long ret;
 893        unsigned long func;
 894        unsigned long long calltime;
 895#ifdef CONFIG_FUNCTION_PROFILER
 896        unsigned long long subtime;
 897#endif
 898#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 899        unsigned long fp;
 900#endif
 901#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 902        unsigned long *retp;
 903#endif
 904};
 905
 906/*
 907 * Primary handler of a function return.
 908 * It relays on ftrace_return_to_handler.
 909 * Defined in entry_32/64.S
 910 */
 911extern void return_to_handler(void);
 912
 913extern int
 914function_graph_enter(unsigned long ret, unsigned long func,
 915                     unsigned long frame_pointer, unsigned long *retp);
 916
 917struct ftrace_ret_stack *
 918ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
 919
 920unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 921                                    unsigned long ret, unsigned long *retp);
 922
 923/*
 924 * Sometimes we don't want to trace a function with the function
 925 * graph tracer but we want them to keep traced by the usual function
 926 * tracer if the function graph tracer is not configured.
 927 */
 928#define __notrace_funcgraph             notrace
 929
 930#define FTRACE_RETFUNC_DEPTH 50
 931#define FTRACE_RETSTACK_ALLOC_SIZE 32
 932
 933extern int register_ftrace_graph(struct fgraph_ops *ops);
 934extern void unregister_ftrace_graph(struct fgraph_ops *ops);
 935
 936extern bool ftrace_graph_is_dead(void);
 937extern void ftrace_graph_stop(void);
 938
 939/* The current handlers in use */
 940extern trace_func_graph_ret_t ftrace_graph_return;
 941extern trace_func_graph_ent_t ftrace_graph_entry;
 942
 943extern void ftrace_graph_init_task(struct task_struct *t);
 944extern void ftrace_graph_exit_task(struct task_struct *t);
 945extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
 946
 947static inline void pause_graph_tracing(void)
 948{
 949        atomic_inc(&current->tracing_graph_pause);
 950}
 951
 952static inline void unpause_graph_tracing(void)
 953{
 954        atomic_dec(&current->tracing_graph_pause);
 955}
 956#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
 957
 958#define __notrace_funcgraph
 959
 960static inline void ftrace_graph_init_task(struct task_struct *t) { }
 961static inline void ftrace_graph_exit_task(struct task_struct *t) { }
 962static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
 963
 964/* Define as macros as fgraph_ops may not be defined */
 965#define register_ftrace_graph(ops) ({ -1; })
 966#define unregister_ftrace_graph(ops) do { } while (0)
 967
 968static inline unsigned long
 969ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
 970                      unsigned long *retp)
 971{
 972        return ret;
 973}
 974
 975static inline void pause_graph_tracing(void) { }
 976static inline void unpause_graph_tracing(void) { }
 977#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 978
 979#ifdef CONFIG_TRACING
 980
 981/* flags for current->trace */
 982enum {
 983        TSK_TRACE_FL_TRACE_BIT  = 0,
 984        TSK_TRACE_FL_GRAPH_BIT  = 1,
 985};
 986enum {
 987        TSK_TRACE_FL_TRACE      = 1 << TSK_TRACE_FL_TRACE_BIT,
 988        TSK_TRACE_FL_GRAPH      = 1 << TSK_TRACE_FL_GRAPH_BIT,
 989};
 990
 991static inline void set_tsk_trace_trace(struct task_struct *tsk)
 992{
 993        set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 994}
 995
 996static inline void clear_tsk_trace_trace(struct task_struct *tsk)
 997{
 998        clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
 999}
1000
1001static inline int test_tsk_trace_trace(struct task_struct *tsk)
1002{
1003        return tsk->trace & TSK_TRACE_FL_TRACE;
1004}
1005
1006static inline void set_tsk_trace_graph(struct task_struct *tsk)
1007{
1008        set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1009}
1010
1011static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1012{
1013        clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1014}
1015
1016static inline int test_tsk_trace_graph(struct task_struct *tsk)
1017{
1018        return tsk->trace & TSK_TRACE_FL_GRAPH;
1019}
1020
1021enum ftrace_dump_mode;
1022
1023extern enum ftrace_dump_mode ftrace_dump_on_oops;
1024extern int tracepoint_printk;
1025
1026extern void disable_trace_on_warning(void);
1027extern int __disable_trace_on_warning;
1028
1029int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1030                             void *buffer, size_t *lenp, loff_t *ppos);
1031
1032#else /* CONFIG_TRACING */
1033static inline void  disable_trace_on_warning(void) { }
1034#endif /* CONFIG_TRACING */
1035
1036#ifdef CONFIG_FTRACE_SYSCALLS
1037
1038unsigned long arch_syscall_addr(int nr);
1039
1040#endif /* CONFIG_FTRACE_SYSCALLS */
1041
1042#endif /* _LINUX_FTRACE_H */
1043