linux/include/linux/kprobes.h
<<
>>
Prefs
   1#ifndef _LINUX_KPROBES_H
   2#define _LINUX_KPROBES_H
   3/*
   4 *  Kernel Probes (KProbes)
   5 *  include/linux/kprobes.h
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  20 *
  21 * Copyright (C) IBM Corporation, 2002, 2004
  22 *
  23 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  24 *              Probes initial implementation ( includes suggestions from
  25 *              Rusty Russell).
  26 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27 *              interface to access function arguments.
  28 * 2005-May     Hien Nguyen <hien@us.ibm.com> and Jim Keniston
  29 *              <jkenisto@us.ibm.com>  and Prasanna S Panchamukhi
  30 *              <prasanna@in.ibm.com> added function-return probes.
  31 */
  32#include <linux/compiler.h>     /* for __kprobes */
  33#include <linux/linkage.h>
  34#include <linux/list.h>
  35#include <linux/notifier.h>
  36#include <linux/smp.h>
  37#include <linux/bug.h>
  38#include <linux/percpu.h>
  39#include <linux/spinlock.h>
  40#include <linux/rcupdate.h>
  41#include <linux/mutex.h>
  42#include <linux/ftrace.h>
  43
  44#ifdef CONFIG_KPROBES
  45#include <asm/kprobes.h>
  46
  47/* kprobe_status settings */
  48#define KPROBE_HIT_ACTIVE       0x00000001
  49#define KPROBE_HIT_SS           0x00000002
  50#define KPROBE_REENTER          0x00000004
  51#define KPROBE_HIT_SSDONE       0x00000008
  52
  53#else /* CONFIG_KPROBES */
  54typedef int kprobe_opcode_t;
  55struct arch_specific_insn {
  56        int dummy;
  57};
  58#endif /* CONFIG_KPROBES */
  59
  60struct kprobe;
  61struct pt_regs;
  62struct kretprobe;
  63struct kretprobe_instance;
  64typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
  65typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
  66typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
  67                                       unsigned long flags);
  68typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
  69                                       int trapnr);
  70typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
  71                                    struct pt_regs *);
  72
  73struct kprobe {
  74        struct hlist_node hlist;
  75
  76        /* list of kprobes for multi-handler support */
  77        struct list_head list;
  78
  79        /*count the number of times this probe was temporarily disarmed */
  80        unsigned long nmissed;
  81
  82        /* location of the probe point */
  83        kprobe_opcode_t *addr;
  84
  85        /* Allow user to indicate symbol name of the probe point */
  86        const char *symbol_name;
  87
  88        /* Offset into the symbol */
  89        unsigned int offset;
  90
  91        /* Called before addr is executed. */
  92        kprobe_pre_handler_t pre_handler;
  93
  94        /* Called after addr is executed, unless... */
  95        kprobe_post_handler_t post_handler;
  96
  97        /*
  98         * ... called if executing addr causes a fault (eg. page fault).
  99         * Return 1 if it handled fault, otherwise kernel will see it.
 100         */
 101        kprobe_fault_handler_t fault_handler;
 102
 103        /*
 104         * ... called if breakpoint trap occurs in probe handler.
 105         * Return 1 if it handled break, otherwise kernel will see it.
 106         */
 107        kprobe_break_handler_t break_handler;
 108
 109        /* Saved opcode (which has been replaced with breakpoint) */
 110        kprobe_opcode_t opcode;
 111
 112        /* copy of the original instruction */
 113        struct arch_specific_insn ainsn;
 114
 115        /*
 116         * Indicates various status flags.
 117         * Protected by kprobe_mutex after this kprobe is registered.
 118         */
 119        u32 flags;
 120};
 121
 122/* Kprobe status flags */
 123#define KPROBE_FLAG_GONE        1 /* breakpoint has already gone */
 124#define KPROBE_FLAG_DISABLED    2 /* probe is temporarily disabled */
 125#define KPROBE_FLAG_OPTIMIZED   4 /*
 126                                   * probe is really optimized.
 127                                   * NOTE:
 128                                   * this flag is only for optimized_kprobe.
 129                                   */
 130#define KPROBE_FLAG_FTRACE      8 /* probe is using ftrace */
 131
 132/* Has this kprobe gone ? */
 133static inline int kprobe_gone(struct kprobe *p)
 134{
 135        return p->flags & KPROBE_FLAG_GONE;
 136}
 137
 138/* Is this kprobe disabled ? */
 139static inline int kprobe_disabled(struct kprobe *p)
 140{
 141        return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
 142}
 143
 144/* Is this kprobe really running optimized path ? */
 145static inline int kprobe_optimized(struct kprobe *p)
 146{
 147        return p->flags & KPROBE_FLAG_OPTIMIZED;
 148}
 149
 150/* Is this kprobe uses ftrace ? */
 151static inline int kprobe_ftrace(struct kprobe *p)
 152{
 153        return p->flags & KPROBE_FLAG_FTRACE;
 154}
 155
 156/*
 157 * Special probe type that uses setjmp-longjmp type tricks to resume
 158 * execution at a specified entry with a matching prototype corresponding
 159 * to the probed function - a trick to enable arguments to become
 160 * accessible seamlessly by probe handling logic.
 161 * Note:
 162 * Because of the way compilers allocate stack space for local variables
 163 * etc upfront, regardless of sub-scopes within a function, this mirroring
 164 * principle currently works only for probes placed on function entry points.
 165 */
 166struct jprobe {
 167        struct kprobe kp;
 168        void *entry;    /* probe handling code to jump to */
 169};
 170
 171/* For backward compatibility with old code using JPROBE_ENTRY() */
 172#define JPROBE_ENTRY(handler)   (handler)
 173
 174/*
 175 * Function-return probe -
 176 * Note:
 177 * User needs to provide a handler function, and initialize maxactive.
 178 * maxactive - The maximum number of instances of the probed function that
 179 * can be active concurrently.
 180 * nmissed - tracks the number of times the probed function's return was
 181 * ignored, due to maxactive being too low.
 182 *
 183 */
 184struct kretprobe {
 185        struct kprobe kp;
 186        kretprobe_handler_t handler;
 187        kretprobe_handler_t entry_handler;
 188        int maxactive;
 189        int nmissed;
 190        size_t data_size;
 191        struct hlist_head free_instances;
 192        raw_spinlock_t lock;
 193};
 194
 195struct kretprobe_instance {
 196        struct hlist_node hlist;
 197        struct kretprobe *rp;
 198        kprobe_opcode_t *ret_addr;
 199        struct task_struct *task;
 200        char data[0];
 201};
 202
 203struct kretprobe_blackpoint {
 204        const char *name;
 205        void *addr;
 206};
 207
 208struct kprobe_blacklist_entry {
 209        struct list_head list;
 210        unsigned long start_addr;
 211        unsigned long end_addr;
 212};
 213
 214#ifdef CONFIG_KPROBES
 215DECLARE_PER_CPU(struct kprobe *, current_kprobe);
 216DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 217
 218/*
 219 * For #ifdef avoidance:
 220 */
 221static inline int kprobes_built_in(void)
 222{
 223        return 1;
 224}
 225
 226#ifdef CONFIG_KRETPROBES
 227extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
 228                                   struct pt_regs *regs);
 229extern int arch_trampoline_kprobe(struct kprobe *p);
 230#else /* CONFIG_KRETPROBES */
 231static inline void arch_prepare_kretprobe(struct kretprobe *rp,
 232                                        struct pt_regs *regs)
 233{
 234}
 235static inline int arch_trampoline_kprobe(struct kprobe *p)
 236{
 237        return 0;
 238}
 239#endif /* CONFIG_KRETPROBES */
 240
 241extern struct kretprobe_blackpoint kretprobe_blacklist[];
 242
 243static inline void kretprobe_assert(struct kretprobe_instance *ri,
 244        unsigned long orig_ret_address, unsigned long trampoline_address)
 245{
 246        if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
 247                printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
 248                                ri->rp, ri->rp->kp.addr);
 249                BUG();
 250        }
 251}
 252
 253#ifdef CONFIG_KPROBES_SANITY_TEST
 254extern int init_test_probes(void);
 255#else
 256static inline int init_test_probes(void)
 257{
 258        return 0;
 259}
 260#endif /* CONFIG_KPROBES_SANITY_TEST */
 261
 262extern int arch_prepare_kprobe(struct kprobe *p);
 263extern void arch_arm_kprobe(struct kprobe *p);
 264extern void arch_disarm_kprobe(struct kprobe *p);
 265extern int arch_init_kprobes(void);
 266extern void show_registers(struct pt_regs *regs);
 267extern void kprobes_inc_nmissed_count(struct kprobe *p);
 268extern bool arch_within_kprobe_blacklist(unsigned long addr);
 269
 270extern bool within_kprobe_blacklist(unsigned long addr);
 271
 272struct kprobe_insn_cache {
 273        struct mutex mutex;
 274        void *(*alloc)(void);   /* allocate insn page */
 275        void (*free)(void *);   /* free insn page */
 276        struct list_head pages; /* list of kprobe_insn_page */
 277        size_t insn_size;       /* size of instruction slot */
 278        int nr_garbage;
 279};
 280
 281extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
 282extern void __free_insn_slot(struct kprobe_insn_cache *c,
 283                             kprobe_opcode_t *slot, int dirty);
 284
 285#define DEFINE_INSN_CACHE_OPS(__name)                                   \
 286extern struct kprobe_insn_cache kprobe_##__name##_slots;                \
 287                                                                        \
 288static inline kprobe_opcode_t *get_##__name##_slot(void)                \
 289{                                                                       \
 290        return __get_insn_slot(&kprobe_##__name##_slots);               \
 291}                                                                       \
 292                                                                        \
 293static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
 294{                                                                       \
 295        __free_insn_slot(&kprobe_##__name##_slots, slot, dirty);        \
 296}                                                                       \
 297
 298DEFINE_INSN_CACHE_OPS(insn);
 299
 300#ifdef CONFIG_OPTPROBES
 301/*
 302 * Internal structure for direct jump optimized probe
 303 */
 304struct optimized_kprobe {
 305        struct kprobe kp;
 306        struct list_head list;  /* list for optimizing queue */
 307        struct arch_optimized_insn optinsn;
 308};
 309
 310/* Architecture dependent functions for direct jump optimization */
 311extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
 312extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
 313extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
 314                                         struct kprobe *orig);
 315extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
 316extern void arch_optimize_kprobes(struct list_head *oplist);
 317extern void arch_unoptimize_kprobes(struct list_head *oplist,
 318                                    struct list_head *done_list);
 319extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
 320extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
 321                                        unsigned long addr);
 322
 323extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
 324
 325DEFINE_INSN_CACHE_OPS(optinsn);
 326
 327#ifdef CONFIG_SYSCTL
 328extern int sysctl_kprobes_optimization;
 329extern int proc_kprobes_optimization_handler(struct ctl_table *table,
 330                                             int write, void __user *buffer,
 331                                             size_t *length, loff_t *ppos);
 332#endif
 333
 334#endif /* CONFIG_OPTPROBES */
 335#ifdef CONFIG_KPROBES_ON_FTRACE
 336extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 337                                  struct ftrace_ops *ops, struct pt_regs *regs);
 338extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
 339#endif
 340
 341int arch_check_ftrace_location(struct kprobe *p);
 342
 343/* Get the kprobe at this addr (if any) - called with preemption disabled */
 344struct kprobe *get_kprobe(void *addr);
 345void kretprobe_hash_lock(struct task_struct *tsk,
 346                         struct hlist_head **head, unsigned long *flags);
 347void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
 348struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
 349
 350/* kprobe_running() will just return the current_kprobe on this CPU */
 351static inline struct kprobe *kprobe_running(void)
 352{
 353        return (__this_cpu_read(current_kprobe));
 354}
 355
 356static inline void reset_current_kprobe(void)
 357{
 358        __this_cpu_write(current_kprobe, NULL);
 359}
 360
 361static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
 362{
 363        return this_cpu_ptr(&kprobe_ctlblk);
 364}
 365
 366int register_kprobe(struct kprobe *p);
 367void unregister_kprobe(struct kprobe *p);
 368int register_kprobes(struct kprobe **kps, int num);
 369void unregister_kprobes(struct kprobe **kps, int num);
 370int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
 371int longjmp_break_handler(struct kprobe *, struct pt_regs *);
 372int register_jprobe(struct jprobe *p);
 373void unregister_jprobe(struct jprobe *p);
 374int register_jprobes(struct jprobe **jps, int num);
 375void unregister_jprobes(struct jprobe **jps, int num);
 376void jprobe_return(void);
 377unsigned long arch_deref_entry_point(void *);
 378
 379int register_kretprobe(struct kretprobe *rp);
 380void unregister_kretprobe(struct kretprobe *rp);
 381int register_kretprobes(struct kretprobe **rps, int num);
 382void unregister_kretprobes(struct kretprobe **rps, int num);
 383
 384void kprobe_flush_task(struct task_struct *tk);
 385void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
 386
 387int disable_kprobe(struct kprobe *kp);
 388int enable_kprobe(struct kprobe *kp);
 389
 390void dump_kprobe(struct kprobe *kp);
 391
 392#else /* !CONFIG_KPROBES: */
 393
 394static inline int kprobes_built_in(void)
 395{
 396        return 0;
 397}
 398static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 399{
 400        return 0;
 401}
 402static inline struct kprobe *get_kprobe(void *addr)
 403{
 404        return NULL;
 405}
 406static inline struct kprobe *kprobe_running(void)
 407{
 408        return NULL;
 409}
 410static inline int register_kprobe(struct kprobe *p)
 411{
 412        return -ENOSYS;
 413}
 414static inline int register_kprobes(struct kprobe **kps, int num)
 415{
 416        return -ENOSYS;
 417}
 418static inline void unregister_kprobe(struct kprobe *p)
 419{
 420}
 421static inline void unregister_kprobes(struct kprobe **kps, int num)
 422{
 423}
 424static inline int register_jprobe(struct jprobe *p)
 425{
 426        return -ENOSYS;
 427}
 428static inline int register_jprobes(struct jprobe **jps, int num)
 429{
 430        return -ENOSYS;
 431}
 432static inline void unregister_jprobe(struct jprobe *p)
 433{
 434}
 435static inline void unregister_jprobes(struct jprobe **jps, int num)
 436{
 437}
 438static inline void jprobe_return(void)
 439{
 440}
 441static inline int register_kretprobe(struct kretprobe *rp)
 442{
 443        return -ENOSYS;
 444}
 445static inline int register_kretprobes(struct kretprobe **rps, int num)
 446{
 447        return -ENOSYS;
 448}
 449static inline void unregister_kretprobe(struct kretprobe *rp)
 450{
 451}
 452static inline void unregister_kretprobes(struct kretprobe **rps, int num)
 453{
 454}
 455static inline void kprobe_flush_task(struct task_struct *tk)
 456{
 457}
 458static inline int disable_kprobe(struct kprobe *kp)
 459{
 460        return -ENOSYS;
 461}
 462static inline int enable_kprobe(struct kprobe *kp)
 463{
 464        return -ENOSYS;
 465}
 466#endif /* CONFIG_KPROBES */
 467static inline int disable_kretprobe(struct kretprobe *rp)
 468{
 469        return disable_kprobe(&rp->kp);
 470}
 471static inline int enable_kretprobe(struct kretprobe *rp)
 472{
 473        return enable_kprobe(&rp->kp);
 474}
 475static inline int disable_jprobe(struct jprobe *jp)
 476{
 477        return disable_kprobe(&jp->kp);
 478}
 479static inline int enable_jprobe(struct jprobe *jp)
 480{
 481        return enable_kprobe(&jp->kp);
 482}
 483
 484#ifdef CONFIG_KPROBES
 485/*
 486 * Blacklist ganerating macro. Specify functions which is not probed
 487 * by using this macro.
 488 */
 489#define __NOKPROBE_SYMBOL(fname)                        \
 490static unsigned long __used                             \
 491        __attribute__((section("_kprobe_blacklist")))   \
 492        _kbl_addr_##fname = (unsigned long)fname;
 493#define NOKPROBE_SYMBOL(fname)  __NOKPROBE_SYMBOL(fname)
 494#else
 495#define NOKPROBE_SYMBOL(fname)
 496#endif
 497
 498#endif /* _LINUX_KPROBES_H */
 499