linux/kernel/kprobes.c
<<
>>
Prefs
   1/*
   2 *  Kernel Probes (KProbes)
   3 *  kernel/kprobes.c
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18 *
  19 * Copyright (C) IBM Corporation, 2002, 2004
  20 *
  21 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22 *              Probes initial implementation (includes suggestions from
  23 *              Rusty Russell).
  24 * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25 *              hlists and exceptions notifier as suggested by Andi Kleen.
  26 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27 *              interface to access function arguments.
  28 * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29 *              exceptions notifier to be first on the priority list.
  30 * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31 *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32 *              <prasanna@in.ibm.com> added function-return probes.
  33 */
  34#include <linux/kprobes.h>
  35#include <linux/hash.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/stddef.h>
  39#include <linux/export.h>
  40#include <linux/moduleloader.h>
  41#include <linux/kallsyms.h>
  42#include <linux/freezer.h>
  43#include <linux/seq_file.h>
  44#include <linux/debugfs.h>
  45#include <linux/sysctl.h>
  46#include <linux/kdebug.h>
  47#include <linux/memory.h>
  48#include <linux/ftrace.h>
  49#include <linux/cpu.h>
  50#include <linux/jump_label.h>
  51
  52#include <asm/sections.h>
  53#include <asm/cacheflush.h>
  54#include <asm/errno.h>
  55#include <linux/uaccess.h>
  56
  57#define KPROBE_HASH_BITS 6
  58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  59
  60
  61static int kprobes_initialized;
  62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64
  65/* NOTE: change this value only with kprobe_mutex held */
  66static bool kprobes_all_disarmed;
  67
  68/* This protects kprobe_table and optimizing_list */
  69static DEFINE_MUTEX(kprobe_mutex);
  70static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  71static struct {
  72        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  73} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  74
  75kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  76                                        unsigned int __unused)
  77{
  78        return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  79}
  80
  81static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  82{
  83        return &(kretprobe_table_locks[hash].lock);
  84}
  85
  86/* Blacklist -- list of struct kprobe_blacklist_entry */
  87static LIST_HEAD(kprobe_blacklist);
  88
  89#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  90/*
  91 * kprobe->ainsn.insn points to the copy of the instruction to be
  92 * single-stepped. x86_64, POWER4 and above have no-exec support and
  93 * stepping on the instruction on a vmalloced/kmalloced/data page
  94 * is a recipe for disaster
  95 */
  96struct kprobe_insn_page {
  97        struct list_head list;
  98        kprobe_opcode_t *insns;         /* Page of instruction slots */
  99        struct kprobe_insn_cache *cache;
 100        int nused;
 101        int ngarbage;
 102        char slot_used[];
 103};
 104
 105#define KPROBE_INSN_PAGE_SIZE(slots)                    \
 106        (offsetof(struct kprobe_insn_page, slot_used) + \
 107         (sizeof(char) * (slots)))
 108
 109static int slots_per_page(struct kprobe_insn_cache *c)
 110{
 111        return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
 112}
 113
 114enum kprobe_slot_state {
 115        SLOT_CLEAN = 0,
 116        SLOT_DIRTY = 1,
 117        SLOT_USED = 2,
 118};
 119
 120static void *alloc_insn_page(void)
 121{
 122        return module_alloc(PAGE_SIZE);
 123}
 124
 125void __weak free_insn_page(void *page)
 126{
 127        module_memfree(page);
 128}
 129
 130struct kprobe_insn_cache kprobe_insn_slots = {
 131        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 132        .alloc = alloc_insn_page,
 133        .free = free_insn_page,
 134        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 135        .insn_size = MAX_INSN_SIZE,
 136        .nr_garbage = 0,
 137};
 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
 139
 140/**
 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 142 * We allocate an executable page if there's no room on existing ones.
 143 */
 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
 145{
 146        struct kprobe_insn_page *kip;
 147        kprobe_opcode_t *slot = NULL;
 148
 149        /* Since the slot array is not protected by rcu, we need a mutex */
 150        mutex_lock(&c->mutex);
 151 retry:
 152        rcu_read_lock();
 153        list_for_each_entry_rcu(kip, &c->pages, list) {
 154                if (kip->nused < slots_per_page(c)) {
 155                        int i;
 156                        for (i = 0; i < slots_per_page(c); i++) {
 157                                if (kip->slot_used[i] == SLOT_CLEAN) {
 158                                        kip->slot_used[i] = SLOT_USED;
 159                                        kip->nused++;
 160                                        slot = kip->insns + (i * c->insn_size);
 161                                        rcu_read_unlock();
 162                                        goto out;
 163                                }
 164                        }
 165                        /* kip->nused is broken. Fix it. */
 166                        kip->nused = slots_per_page(c);
 167                        WARN_ON(1);
 168                }
 169        }
 170        rcu_read_unlock();
 171
 172        /* If there are any garbage slots, collect it and try again. */
 173        if (c->nr_garbage && collect_garbage_slots(c) == 0)
 174                goto retry;
 175
 176        /* All out of space.  Need to allocate a new page. */
 177        kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 178        if (!kip)
 179                goto out;
 180
 181        /*
 182         * Use module_alloc so this page is within +/- 2GB of where the
 183         * kernel image and loaded module images reside. This is required
 184         * so x86_64 can correctly handle the %rip-relative fixups.
 185         */
 186        kip->insns = c->alloc();
 187        if (!kip->insns) {
 188                kfree(kip);
 189                goto out;
 190        }
 191        INIT_LIST_HEAD(&kip->list);
 192        memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 193        kip->slot_used[0] = SLOT_USED;
 194        kip->nused = 1;
 195        kip->ngarbage = 0;
 196        kip->cache = c;
 197        list_add_rcu(&kip->list, &c->pages);
 198        slot = kip->insns;
 199out:
 200        mutex_unlock(&c->mutex);
 201        return slot;
 202}
 203
 204/* Return 1 if all garbages are collected, otherwise 0. */
 205static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
 206{
 207        kip->slot_used[idx] = SLOT_CLEAN;
 208        kip->nused--;
 209        if (kip->nused == 0) {
 210                /*
 211                 * Page is no longer in use.  Free it unless
 212                 * it's the last one.  We keep the last one
 213                 * so as not to have to set it up again the
 214                 * next time somebody inserts a probe.
 215                 */
 216                if (!list_is_singular(&kip->list)) {
 217                        list_del_rcu(&kip->list);
 218                        synchronize_rcu();
 219                        kip->cache->free(kip->insns);
 220                        kfree(kip);
 221                }
 222                return 1;
 223        }
 224        return 0;
 225}
 226
 227static int collect_garbage_slots(struct kprobe_insn_cache *c)
 228{
 229        struct kprobe_insn_page *kip, *next;
 230
 231        /* Ensure no-one is interrupted on the garbages */
 232        synchronize_sched();
 233
 234        list_for_each_entry_safe(kip, next, &c->pages, list) {
 235                int i;
 236                if (kip->ngarbage == 0)
 237                        continue;
 238                kip->ngarbage = 0;      /* we will collect all garbages */
 239                for (i = 0; i < slots_per_page(c); i++) {
 240                        if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
 241                                break;
 242                }
 243        }
 244        c->nr_garbage = 0;
 245        return 0;
 246}
 247
 248void __free_insn_slot(struct kprobe_insn_cache *c,
 249                      kprobe_opcode_t *slot, int dirty)
 250{
 251        struct kprobe_insn_page *kip;
 252        long idx;
 253
 254        mutex_lock(&c->mutex);
 255        rcu_read_lock();
 256        list_for_each_entry_rcu(kip, &c->pages, list) {
 257                idx = ((long)slot - (long)kip->insns) /
 258                        (c->insn_size * sizeof(kprobe_opcode_t));
 259                if (idx >= 0 && idx < slots_per_page(c))
 260                        goto out;
 261        }
 262        /* Could not find this slot. */
 263        WARN_ON(1);
 264        kip = NULL;
 265out:
 266        rcu_read_unlock();
 267        /* Mark and sweep: this may sleep */
 268        if (kip) {
 269                /* Check double free */
 270                WARN_ON(kip->slot_used[idx] != SLOT_USED);
 271                if (dirty) {
 272                        kip->slot_used[idx] = SLOT_DIRTY;
 273                        kip->ngarbage++;
 274                        if (++c->nr_garbage > slots_per_page(c))
 275                                collect_garbage_slots(c);
 276                } else {
 277                        collect_one_slot(kip, idx);
 278                }
 279        }
 280        mutex_unlock(&c->mutex);
 281}
 282
 283/*
 284 * Check given address is on the page of kprobe instruction slots.
 285 * This will be used for checking whether the address on a stack
 286 * is on a text area or not.
 287 */
 288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
 289{
 290        struct kprobe_insn_page *kip;
 291        bool ret = false;
 292
 293        rcu_read_lock();
 294        list_for_each_entry_rcu(kip, &c->pages, list) {
 295                if (addr >= (unsigned long)kip->insns &&
 296                    addr < (unsigned long)kip->insns + PAGE_SIZE) {
 297                        ret = true;
 298                        break;
 299                }
 300        }
 301        rcu_read_unlock();
 302
 303        return ret;
 304}
 305
 306#ifdef CONFIG_OPTPROBES
 307/* For optimized_kprobe buffer */
 308struct kprobe_insn_cache kprobe_optinsn_slots = {
 309        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 310        .alloc = alloc_insn_page,
 311        .free = free_insn_page,
 312        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 313        /* .insn_size is initialized later */
 314        .nr_garbage = 0,
 315};
 316#endif
 317#endif
 318
 319/* We have preemption disabled.. so it is safe to use __ versions */
 320static inline void set_kprobe_instance(struct kprobe *kp)
 321{
 322        __this_cpu_write(kprobe_instance, kp);
 323}
 324
 325static inline void reset_kprobe_instance(void)
 326{
 327        __this_cpu_write(kprobe_instance, NULL);
 328}
 329
 330/*
 331 * This routine is called either:
 332 *      - under the kprobe_mutex - during kprobe_[un]register()
 333 *                              OR
 334 *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
 335 */
 336struct kprobe *get_kprobe(void *addr)
 337{
 338        struct hlist_head *head;
 339        struct kprobe *p;
 340
 341        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 342        hlist_for_each_entry_rcu(p, head, hlist) {
 343                if (p->addr == addr)
 344                        return p;
 345        }
 346
 347        return NULL;
 348}
 349NOKPROBE_SYMBOL(get_kprobe);
 350
 351static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 352
 353/* Return true if the kprobe is an aggregator */
 354static inline int kprobe_aggrprobe(struct kprobe *p)
 355{
 356        return p->pre_handler == aggr_pre_handler;
 357}
 358
 359/* Return true(!0) if the kprobe is unused */
 360static inline int kprobe_unused(struct kprobe *p)
 361{
 362        return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 363               list_empty(&p->list);
 364}
 365
 366/*
 367 * Keep all fields in the kprobe consistent
 368 */
 369static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 370{
 371        memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 372        memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 373}
 374
 375#ifdef CONFIG_OPTPROBES
 376/* NOTE: change this value only with kprobe_mutex held */
 377static bool kprobes_allow_optimization;
 378
 379/*
 380 * Call all pre_handler on the list, but ignores its return value.
 381 * This must be called from arch-dep optimized caller.
 382 */
 383void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 384{
 385        struct kprobe *kp;
 386
 387        list_for_each_entry_rcu(kp, &p->list, list) {
 388                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 389                        set_kprobe_instance(kp);
 390                        kp->pre_handler(kp, regs);
 391                }
 392                reset_kprobe_instance();
 393        }
 394}
 395NOKPROBE_SYMBOL(opt_pre_handler);
 396
 397/* Free optimized instructions and optimized_kprobe */
 398static void free_aggr_kprobe(struct kprobe *p)
 399{
 400        struct optimized_kprobe *op;
 401
 402        op = container_of(p, struct optimized_kprobe, kp);
 403        arch_remove_optimized_kprobe(op);
 404        arch_remove_kprobe(p);
 405        kfree(op);
 406}
 407
 408/* Return true(!0) if the kprobe is ready for optimization. */
 409static inline int kprobe_optready(struct kprobe *p)
 410{
 411        struct optimized_kprobe *op;
 412
 413        if (kprobe_aggrprobe(p)) {
 414                op = container_of(p, struct optimized_kprobe, kp);
 415                return arch_prepared_optinsn(&op->optinsn);
 416        }
 417
 418        return 0;
 419}
 420
 421/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 422static inline int kprobe_disarmed(struct kprobe *p)
 423{
 424        struct optimized_kprobe *op;
 425
 426        /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 427        if (!kprobe_aggrprobe(p))
 428                return kprobe_disabled(p);
 429
 430        op = container_of(p, struct optimized_kprobe, kp);
 431
 432        return kprobe_disabled(p) && list_empty(&op->list);
 433}
 434
 435/* Return true(!0) if the probe is queued on (un)optimizing lists */
 436static int kprobe_queued(struct kprobe *p)
 437{
 438        struct optimized_kprobe *op;
 439
 440        if (kprobe_aggrprobe(p)) {
 441                op = container_of(p, struct optimized_kprobe, kp);
 442                if (!list_empty(&op->list))
 443                        return 1;
 444        }
 445        return 0;
 446}
 447
 448/*
 449 * Return an optimized kprobe whose optimizing code replaces
 450 * instructions including addr (exclude breakpoint).
 451 */
 452static struct kprobe *get_optimized_kprobe(unsigned long addr)
 453{
 454        int i;
 455        struct kprobe *p = NULL;
 456        struct optimized_kprobe *op;
 457
 458        /* Don't check i == 0, since that is a breakpoint case. */
 459        for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 460                p = get_kprobe((void *)(addr - i));
 461
 462        if (p && kprobe_optready(p)) {
 463                op = container_of(p, struct optimized_kprobe, kp);
 464                if (arch_within_optimized_kprobe(op, addr))
 465                        return p;
 466        }
 467
 468        return NULL;
 469}
 470
 471/* Optimization staging list, protected by kprobe_mutex */
 472static LIST_HEAD(optimizing_list);
 473static LIST_HEAD(unoptimizing_list);
 474static LIST_HEAD(freeing_list);
 475
 476static void kprobe_optimizer(struct work_struct *work);
 477static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 478#define OPTIMIZE_DELAY 5
 479
 480/*
 481 * Optimize (replace a breakpoint with a jump) kprobes listed on
 482 * optimizing_list.
 483 */
 484static void do_optimize_kprobes(void)
 485{
 486        /* Optimization never be done when disarmed */
 487        if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 488            list_empty(&optimizing_list))
 489                return;
 490
 491        /*
 492         * The optimization/unoptimization refers online_cpus via
 493         * stop_machine() and cpu-hotplug modifies online_cpus.
 494         * And same time, text_mutex will be held in cpu-hotplug and here.
 495         * This combination can cause a deadlock (cpu-hotplug try to lock
 496         * text_mutex but stop_machine can not be done because online_cpus
 497         * has been changed)
 498         * To avoid this deadlock, we need to call get_online_cpus()
 499         * for preventing cpu-hotplug outside of text_mutex locking.
 500         */
 501        get_online_cpus();
 502        mutex_lock(&text_mutex);
 503        arch_optimize_kprobes(&optimizing_list);
 504        mutex_unlock(&text_mutex);
 505        put_online_cpus();
 506}
 507
 508/*
 509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 510 * if need) kprobes listed on unoptimizing_list.
 511 */
 512static void do_unoptimize_kprobes(void)
 513{
 514        struct optimized_kprobe *op, *tmp;
 515
 516        /* Unoptimization must be done anytime */
 517        if (list_empty(&unoptimizing_list))
 518                return;
 519
 520        /* Ditto to do_optimize_kprobes */
 521        get_online_cpus();
 522        mutex_lock(&text_mutex);
 523        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 524        /* Loop free_list for disarming */
 525        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 526                /* Disarm probes if marked disabled */
 527                if (kprobe_disabled(&op->kp))
 528                        arch_disarm_kprobe(&op->kp);
 529                if (kprobe_unused(&op->kp)) {
 530                        /*
 531                         * Remove unused probes from hash list. After waiting
 532                         * for synchronization, these probes are reclaimed.
 533                         * (reclaiming is done by do_free_cleaned_kprobes.)
 534                         */
 535                        hlist_del_rcu(&op->kp.hlist);
 536                } else
 537                        list_del_init(&op->list);
 538        }
 539        mutex_unlock(&text_mutex);
 540        put_online_cpus();
 541}
 542
 543/* Reclaim all kprobes on the free_list */
 544static void do_free_cleaned_kprobes(void)
 545{
 546        struct optimized_kprobe *op, *tmp;
 547
 548        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 549                BUG_ON(!kprobe_unused(&op->kp));
 550                list_del_init(&op->list);
 551                free_aggr_kprobe(&op->kp);
 552        }
 553}
 554
 555/* Start optimizer after OPTIMIZE_DELAY passed */
 556static void kick_kprobe_optimizer(void)
 557{
 558        schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 559}
 560
 561/* Kprobe jump optimizer */
 562static void kprobe_optimizer(struct work_struct *work)
 563{
 564        mutex_lock(&kprobe_mutex);
 565        /* Lock modules while optimizing kprobes */
 566        mutex_lock(&module_mutex);
 567
 568        /*
 569         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 570         * kprobes before waiting for quiesence period.
 571         */
 572        do_unoptimize_kprobes();
 573
 574        /*
 575         * Step 2: Wait for quiesence period to ensure all running interrupts
 576         * are done. Because optprobe may modify multiple instructions
 577         * there is a chance that Nth instruction is interrupted. In that
 578         * case, running interrupt can return to 2nd-Nth byte of jump
 579         * instruction. This wait is for avoiding it.
 580         */
 581        synchronize_sched();
 582
 583        /* Step 3: Optimize kprobes after quiesence period */
 584        do_optimize_kprobes();
 585
 586        /* Step 4: Free cleaned kprobes after quiesence period */
 587        do_free_cleaned_kprobes();
 588
 589        mutex_unlock(&module_mutex);
 590        mutex_unlock(&kprobe_mutex);
 591
 592        /* Step 5: Kick optimizer again if needed */
 593        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 594                kick_kprobe_optimizer();
 595}
 596
 597/* Wait for completing optimization and unoptimization */
 598void wait_for_kprobe_optimizer(void)
 599{
 600        mutex_lock(&kprobe_mutex);
 601
 602        while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 603                mutex_unlock(&kprobe_mutex);
 604
 605                /* this will also make optimizing_work execute immmediately */
 606                flush_delayed_work(&optimizing_work);
 607                /* @optimizing_work might not have been queued yet, relax */
 608                cpu_relax();
 609
 610                mutex_lock(&kprobe_mutex);
 611        }
 612
 613        mutex_unlock(&kprobe_mutex);
 614}
 615
 616/* Optimize kprobe if p is ready to be optimized */
 617static void optimize_kprobe(struct kprobe *p)
 618{
 619        struct optimized_kprobe *op;
 620
 621        /* Check if the kprobe is disabled or not ready for optimization. */
 622        if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 623            (kprobe_disabled(p) || kprobes_all_disarmed))
 624                return;
 625
 626        /* Both of break_handler and post_handler are not supported. */
 627        if (p->break_handler || p->post_handler)
 628                return;
 629
 630        op = container_of(p, struct optimized_kprobe, kp);
 631
 632        /* Check there is no other kprobes at the optimized instructions */
 633        if (arch_check_optimized_kprobe(op) < 0)
 634                return;
 635
 636        /* Check if it is already optimized. */
 637        if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 638                return;
 639        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 640
 641        if (!list_empty(&op->list))
 642                /* This is under unoptimizing. Just dequeue the probe */
 643                list_del_init(&op->list);
 644        else {
 645                list_add(&op->list, &optimizing_list);
 646                kick_kprobe_optimizer();
 647        }
 648}
 649
 650/* Short cut to direct unoptimizing */
 651static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 652{
 653        get_online_cpus();
 654        arch_unoptimize_kprobe(op);
 655        put_online_cpus();
 656        if (kprobe_disabled(&op->kp))
 657                arch_disarm_kprobe(&op->kp);
 658}
 659
 660/* Unoptimize a kprobe if p is optimized */
 661static void unoptimize_kprobe(struct kprobe *p, bool force)
 662{
 663        struct optimized_kprobe *op;
 664
 665        if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 666                return; /* This is not an optprobe nor optimized */
 667
 668        op = container_of(p, struct optimized_kprobe, kp);
 669        if (!kprobe_optimized(p)) {
 670                /* Unoptimized or unoptimizing case */
 671                if (force && !list_empty(&op->list)) {
 672                        /*
 673                         * Only if this is unoptimizing kprobe and forced,
 674                         * forcibly unoptimize it. (No need to unoptimize
 675                         * unoptimized kprobe again :)
 676                         */
 677                        list_del_init(&op->list);
 678                        force_unoptimize_kprobe(op);
 679                }
 680                return;
 681        }
 682
 683        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 684        if (!list_empty(&op->list)) {
 685                /* Dequeue from the optimization queue */
 686                list_del_init(&op->list);
 687                return;
 688        }
 689        /* Optimized kprobe case */
 690        if (force)
 691                /* Forcibly update the code: this is a special case */
 692                force_unoptimize_kprobe(op);
 693        else {
 694                list_add(&op->list, &unoptimizing_list);
 695                kick_kprobe_optimizer();
 696        }
 697}
 698
 699/* Cancel unoptimizing for reusing */
 700static void reuse_unused_kprobe(struct kprobe *ap)
 701{
 702        struct optimized_kprobe *op;
 703
 704        BUG_ON(!kprobe_unused(ap));
 705        /*
 706         * Unused kprobe MUST be on the way of delayed unoptimizing (means
 707         * there is still a relative jump) and disabled.
 708         */
 709        op = container_of(ap, struct optimized_kprobe, kp);
 710        if (unlikely(list_empty(&op->list)))
 711                printk(KERN_WARNING "Warning: found a stray unused "
 712                        "aggrprobe@%p\n", ap->addr);
 713        /* Enable the probe again */
 714        ap->flags &= ~KPROBE_FLAG_DISABLED;
 715        /* Optimize it again (remove from op->list) */
 716        BUG_ON(!kprobe_optready(ap));
 717        optimize_kprobe(ap);
 718}
 719
 720/* Remove optimized instructions */
 721static void kill_optimized_kprobe(struct kprobe *p)
 722{
 723        struct optimized_kprobe *op;
 724
 725        op = container_of(p, struct optimized_kprobe, kp);
 726        if (!list_empty(&op->list))
 727                /* Dequeue from the (un)optimization queue */
 728                list_del_init(&op->list);
 729        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 730
 731        if (kprobe_unused(p)) {
 732                /* Enqueue if it is unused */
 733                list_add(&op->list, &freeing_list);
 734                /*
 735                 * Remove unused probes from the hash list. After waiting
 736                 * for synchronization, this probe is reclaimed.
 737                 * (reclaiming is done by do_free_cleaned_kprobes().)
 738                 */
 739                hlist_del_rcu(&op->kp.hlist);
 740        }
 741
 742        /* Don't touch the code, because it is already freed. */
 743        arch_remove_optimized_kprobe(op);
 744}
 745
 746static inline
 747void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 748{
 749        if (!kprobe_ftrace(p))
 750                arch_prepare_optimized_kprobe(op, p);
 751}
 752
 753/* Try to prepare optimized instructions */
 754static void prepare_optimized_kprobe(struct kprobe *p)
 755{
 756        struct optimized_kprobe *op;
 757
 758        op = container_of(p, struct optimized_kprobe, kp);
 759        __prepare_optimized_kprobe(op, p);
 760}
 761
 762/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 763static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 764{
 765        struct optimized_kprobe *op;
 766
 767        op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 768        if (!op)
 769                return NULL;
 770
 771        INIT_LIST_HEAD(&op->list);
 772        op->kp.addr = p->addr;
 773        __prepare_optimized_kprobe(op, p);
 774
 775        return &op->kp;
 776}
 777
 778static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 779
 780/*
 781 * Prepare an optimized_kprobe and optimize it
 782 * NOTE: p must be a normal registered kprobe
 783 */
 784static void try_to_optimize_kprobe(struct kprobe *p)
 785{
 786        struct kprobe *ap;
 787        struct optimized_kprobe *op;
 788
 789        /* Impossible to optimize ftrace-based kprobe */
 790        if (kprobe_ftrace(p))
 791                return;
 792
 793        /* For preparing optimization, jump_label_text_reserved() is called */
 794        jump_label_lock();
 795        mutex_lock(&text_mutex);
 796
 797        ap = alloc_aggr_kprobe(p);
 798        if (!ap)
 799                goto out;
 800
 801        op = container_of(ap, struct optimized_kprobe, kp);
 802        if (!arch_prepared_optinsn(&op->optinsn)) {
 803                /* If failed to setup optimizing, fallback to kprobe */
 804                arch_remove_optimized_kprobe(op);
 805                kfree(op);
 806                goto out;
 807        }
 808
 809        init_aggr_kprobe(ap, p);
 810        optimize_kprobe(ap);    /* This just kicks optimizer thread */
 811
 812out:
 813        mutex_unlock(&text_mutex);
 814        jump_label_unlock();
 815}
 816
 817#ifdef CONFIG_SYSCTL
 818static void optimize_all_kprobes(void)
 819{
 820        struct hlist_head *head;
 821        struct kprobe *p;
 822        unsigned int i;
 823
 824        mutex_lock(&kprobe_mutex);
 825        /* If optimization is already allowed, just return */
 826        if (kprobes_allow_optimization)
 827                goto out;
 828
 829        kprobes_allow_optimization = true;
 830        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 831                head = &kprobe_table[i];
 832                hlist_for_each_entry_rcu(p, head, hlist)
 833                        if (!kprobe_disabled(p))
 834                                optimize_kprobe(p);
 835        }
 836        printk(KERN_INFO "Kprobes globally optimized\n");
 837out:
 838        mutex_unlock(&kprobe_mutex);
 839}
 840
 841static void unoptimize_all_kprobes(void)
 842{
 843        struct hlist_head *head;
 844        struct kprobe *p;
 845        unsigned int i;
 846
 847        mutex_lock(&kprobe_mutex);
 848        /* If optimization is already prohibited, just return */
 849        if (!kprobes_allow_optimization) {
 850                mutex_unlock(&kprobe_mutex);
 851                return;
 852        }
 853
 854        kprobes_allow_optimization = false;
 855        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 856                head = &kprobe_table[i];
 857                hlist_for_each_entry_rcu(p, head, hlist) {
 858                        if (!kprobe_disabled(p))
 859                                unoptimize_kprobe(p, false);
 860                }
 861        }
 862        mutex_unlock(&kprobe_mutex);
 863
 864        /* Wait for unoptimizing completion */
 865        wait_for_kprobe_optimizer();
 866        printk(KERN_INFO "Kprobes globally unoptimized\n");
 867}
 868
 869static DEFINE_MUTEX(kprobe_sysctl_mutex);
 870int sysctl_kprobes_optimization;
 871int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 872                                      void __user *buffer, size_t *length,
 873                                      loff_t *ppos)
 874{
 875        int ret;
 876
 877        mutex_lock(&kprobe_sysctl_mutex);
 878        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 879        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 880
 881        if (sysctl_kprobes_optimization)
 882                optimize_all_kprobes();
 883        else
 884                unoptimize_all_kprobes();
 885        mutex_unlock(&kprobe_sysctl_mutex);
 886
 887        return ret;
 888}
 889#endif /* CONFIG_SYSCTL */
 890
 891/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 892static void __arm_kprobe(struct kprobe *p)
 893{
 894        struct kprobe *_p;
 895
 896        /* Check collision with other optimized kprobes */
 897        _p = get_optimized_kprobe((unsigned long)p->addr);
 898        if (unlikely(_p))
 899                /* Fallback to unoptimized kprobe */
 900                unoptimize_kprobe(_p, true);
 901
 902        arch_arm_kprobe(p);
 903        optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
 904}
 905
 906/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 907static void __disarm_kprobe(struct kprobe *p, bool reopt)
 908{
 909        struct kprobe *_p;
 910
 911        /* Try to unoptimize */
 912        unoptimize_kprobe(p, kprobes_all_disarmed);
 913
 914        if (!kprobe_queued(p)) {
 915                arch_disarm_kprobe(p);
 916                /* If another kprobe was blocked, optimize it. */
 917                _p = get_optimized_kprobe((unsigned long)p->addr);
 918                if (unlikely(_p) && reopt)
 919                        optimize_kprobe(_p);
 920        }
 921        /* TODO: reoptimize others after unoptimized this probe */
 922}
 923
 924#else /* !CONFIG_OPTPROBES */
 925
 926#define optimize_kprobe(p)                      do {} while (0)
 927#define unoptimize_kprobe(p, f)                 do {} while (0)
 928#define kill_optimized_kprobe(p)                do {} while (0)
 929#define prepare_optimized_kprobe(p)             do {} while (0)
 930#define try_to_optimize_kprobe(p)               do {} while (0)
 931#define __arm_kprobe(p)                         arch_arm_kprobe(p)
 932#define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
 933#define kprobe_disarmed(p)                      kprobe_disabled(p)
 934#define wait_for_kprobe_optimizer()             do {} while (0)
 935
 936/* There should be no unused kprobes can be reused without optimization */
 937static void reuse_unused_kprobe(struct kprobe *ap)
 938{
 939        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 940        BUG_ON(kprobe_unused(ap));
 941}
 942
 943static void free_aggr_kprobe(struct kprobe *p)
 944{
 945        arch_remove_kprobe(p);
 946        kfree(p);
 947}
 948
 949static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 950{
 951        return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 952}
 953#endif /* CONFIG_OPTPROBES */
 954
 955#ifdef CONFIG_KPROBES_ON_FTRACE
 956static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 957        .func = kprobe_ftrace_handler,
 958        .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
 959};
 960static int kprobe_ftrace_enabled;
 961
 962/* Must ensure p->addr is really on ftrace */
 963static int prepare_kprobe(struct kprobe *p)
 964{
 965        if (!kprobe_ftrace(p))
 966                return arch_prepare_kprobe(p);
 967
 968        return arch_prepare_kprobe_ftrace(p);
 969}
 970
 971/* Caller must lock kprobe_mutex */
 972static void arm_kprobe_ftrace(struct kprobe *p)
 973{
 974        int ret;
 975
 976        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 977                                   (unsigned long)p->addr, 0, 0);
 978        WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 979        kprobe_ftrace_enabled++;
 980        if (kprobe_ftrace_enabled == 1) {
 981                ret = register_ftrace_function(&kprobe_ftrace_ops);
 982                WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
 983        }
 984}
 985
 986/* Caller must lock kprobe_mutex */
 987static void disarm_kprobe_ftrace(struct kprobe *p)
 988{
 989        int ret;
 990
 991        kprobe_ftrace_enabled--;
 992        if (kprobe_ftrace_enabled == 0) {
 993                ret = unregister_ftrace_function(&kprobe_ftrace_ops);
 994                WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
 995        }
 996        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 997                           (unsigned long)p->addr, 1, 0);
 998        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 999}
1000#else   /* !CONFIG_KPROBES_ON_FTRACE */
1001#define prepare_kprobe(p)       arch_prepare_kprobe(p)
1002#define arm_kprobe_ftrace(p)    do {} while (0)
1003#define disarm_kprobe_ftrace(p) do {} while (0)
1004#endif
1005
1006/* Arm a kprobe with text_mutex */
1007static void arm_kprobe(struct kprobe *kp)
1008{
1009        if (unlikely(kprobe_ftrace(kp))) {
1010                arm_kprobe_ftrace(kp);
1011                return;
1012        }
1013        /*
1014         * Here, since __arm_kprobe() doesn't use stop_machine(),
1015         * this doesn't cause deadlock on text_mutex. So, we don't
1016         * need get_online_cpus().
1017         */
1018        mutex_lock(&text_mutex);
1019        __arm_kprobe(kp);
1020        mutex_unlock(&text_mutex);
1021}
1022
1023/* Disarm a kprobe with text_mutex */
1024static void disarm_kprobe(struct kprobe *kp, bool reopt)
1025{
1026        if (unlikely(kprobe_ftrace(kp))) {
1027                disarm_kprobe_ftrace(kp);
1028                return;
1029        }
1030        /* Ditto */
1031        mutex_lock(&text_mutex);
1032        __disarm_kprobe(kp, reopt);
1033        mutex_unlock(&text_mutex);
1034}
1035
1036/*
1037 * Aggregate handlers for multiple kprobes support - these handlers
1038 * take care of invoking the individual kprobe handlers on p->list
1039 */
1040static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1041{
1042        struct kprobe *kp;
1043
1044        list_for_each_entry_rcu(kp, &p->list, list) {
1045                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1046                        set_kprobe_instance(kp);
1047                        if (kp->pre_handler(kp, regs))
1048                                return 1;
1049                }
1050                reset_kprobe_instance();
1051        }
1052        return 0;
1053}
1054NOKPROBE_SYMBOL(aggr_pre_handler);
1055
1056static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1057                              unsigned long flags)
1058{
1059        struct kprobe *kp;
1060
1061        list_for_each_entry_rcu(kp, &p->list, list) {
1062                if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1063                        set_kprobe_instance(kp);
1064                        kp->post_handler(kp, regs, flags);
1065                        reset_kprobe_instance();
1066                }
1067        }
1068}
1069NOKPROBE_SYMBOL(aggr_post_handler);
1070
1071static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1072                              int trapnr)
1073{
1074        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1075
1076        /*
1077         * if we faulted "during" the execution of a user specified
1078         * probe handler, invoke just that probe's fault handler
1079         */
1080        if (cur && cur->fault_handler) {
1081                if (cur->fault_handler(cur, regs, trapnr))
1082                        return 1;
1083        }
1084        return 0;
1085}
1086NOKPROBE_SYMBOL(aggr_fault_handler);
1087
1088static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1089{
1090        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1091        int ret = 0;
1092
1093        if (cur && cur->break_handler) {
1094                if (cur->break_handler(cur, regs))
1095                        ret = 1;
1096        }
1097        reset_kprobe_instance();
1098        return ret;
1099}
1100NOKPROBE_SYMBOL(aggr_break_handler);
1101
1102/* Walks the list and increments nmissed count for multiprobe case */
1103void kprobes_inc_nmissed_count(struct kprobe *p)
1104{
1105        struct kprobe *kp;
1106        if (!kprobe_aggrprobe(p)) {
1107                p->nmissed++;
1108        } else {
1109                list_for_each_entry_rcu(kp, &p->list, list)
1110                        kp->nmissed++;
1111        }
1112        return;
1113}
1114NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1115
1116void recycle_rp_inst(struct kretprobe_instance *ri,
1117                     struct hlist_head *head)
1118{
1119        struct kretprobe *rp = ri->rp;
1120
1121        /* remove rp inst off the rprobe_inst_table */
1122        hlist_del(&ri->hlist);
1123        INIT_HLIST_NODE(&ri->hlist);
1124        if (likely(rp)) {
1125                raw_spin_lock(&rp->lock);
1126                hlist_add_head(&ri->hlist, &rp->free_instances);
1127                raw_spin_unlock(&rp->lock);
1128        } else
1129                /* Unregistering */
1130                hlist_add_head(&ri->hlist, head);
1131}
1132NOKPROBE_SYMBOL(recycle_rp_inst);
1133
1134void kretprobe_hash_lock(struct task_struct *tsk,
1135                         struct hlist_head **head, unsigned long *flags)
1136__acquires(hlist_lock)
1137{
1138        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1139        raw_spinlock_t *hlist_lock;
1140
1141        *head = &kretprobe_inst_table[hash];
1142        hlist_lock = kretprobe_table_lock_ptr(hash);
1143        raw_spin_lock_irqsave(hlist_lock, *flags);
1144}
1145NOKPROBE_SYMBOL(kretprobe_hash_lock);
1146
1147static void kretprobe_table_lock(unsigned long hash,
1148                                 unsigned long *flags)
1149__acquires(hlist_lock)
1150{
1151        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1152        raw_spin_lock_irqsave(hlist_lock, *flags);
1153}
1154NOKPROBE_SYMBOL(kretprobe_table_lock);
1155
1156void kretprobe_hash_unlock(struct task_struct *tsk,
1157                           unsigned long *flags)
1158__releases(hlist_lock)
1159{
1160        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1161        raw_spinlock_t *hlist_lock;
1162
1163        hlist_lock = kretprobe_table_lock_ptr(hash);
1164        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1165}
1166NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1167
1168static void kretprobe_table_unlock(unsigned long hash,
1169                                   unsigned long *flags)
1170__releases(hlist_lock)
1171{
1172        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1173        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1174}
1175NOKPROBE_SYMBOL(kretprobe_table_unlock);
1176
1177/*
1178 * This function is called from finish_task_switch when task tk becomes dead,
1179 * so that we can recycle any function-return probe instances associated
1180 * with this task. These left over instances represent probed functions
1181 * that have been called but will never return.
1182 */
1183void kprobe_flush_task(struct task_struct *tk)
1184{
1185        struct kretprobe_instance *ri;
1186        struct hlist_head *head, empty_rp;
1187        struct hlist_node *tmp;
1188        unsigned long hash, flags = 0;
1189
1190        if (unlikely(!kprobes_initialized))
1191                /* Early boot.  kretprobe_table_locks not yet initialized. */
1192                return;
1193
1194        INIT_HLIST_HEAD(&empty_rp);
1195        hash = hash_ptr(tk, KPROBE_HASH_BITS);
1196        head = &kretprobe_inst_table[hash];
1197        kretprobe_table_lock(hash, &flags);
1198        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1199                if (ri->task == tk)
1200                        recycle_rp_inst(ri, &empty_rp);
1201        }
1202        kretprobe_table_unlock(hash, &flags);
1203        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1204                hlist_del(&ri->hlist);
1205                kfree(ri);
1206        }
1207}
1208NOKPROBE_SYMBOL(kprobe_flush_task);
1209
1210static inline void free_rp_inst(struct kretprobe *rp)
1211{
1212        struct kretprobe_instance *ri;
1213        struct hlist_node *next;
1214
1215        hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1216                hlist_del(&ri->hlist);
1217                kfree(ri);
1218        }
1219}
1220
1221static void cleanup_rp_inst(struct kretprobe *rp)
1222{
1223        unsigned long flags, hash;
1224        struct kretprobe_instance *ri;
1225        struct hlist_node *next;
1226        struct hlist_head *head;
1227
1228        /* No race here */
1229        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1230                kretprobe_table_lock(hash, &flags);
1231                head = &kretprobe_inst_table[hash];
1232                hlist_for_each_entry_safe(ri, next, head, hlist) {
1233                        if (ri->rp == rp)
1234                                ri->rp = NULL;
1235                }
1236                kretprobe_table_unlock(hash, &flags);
1237        }
1238        free_rp_inst(rp);
1239}
1240NOKPROBE_SYMBOL(cleanup_rp_inst);
1241
1242/*
1243* Add the new probe to ap->list. Fail if this is the
1244* second jprobe at the address - two jprobes can't coexist
1245*/
1246static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1247{
1248        BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1249
1250        if (p->break_handler || p->post_handler)
1251                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1252
1253        if (p->break_handler) {
1254                if (ap->break_handler)
1255                        return -EEXIST;
1256                list_add_tail_rcu(&p->list, &ap->list);
1257                ap->break_handler = aggr_break_handler;
1258        } else
1259                list_add_rcu(&p->list, &ap->list);
1260        if (p->post_handler && !ap->post_handler)
1261                ap->post_handler = aggr_post_handler;
1262
1263        return 0;
1264}
1265
1266/*
1267 * Fill in the required fields of the "manager kprobe". Replace the
1268 * earlier kprobe in the hlist with the manager kprobe
1269 */
1270static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1271{
1272        /* Copy p's insn slot to ap */
1273        copy_kprobe(p, ap);
1274        flush_insn_slot(ap);
1275        ap->addr = p->addr;
1276        ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1277        ap->pre_handler = aggr_pre_handler;
1278        ap->fault_handler = aggr_fault_handler;
1279        /* We don't care the kprobe which has gone. */
1280        if (p->post_handler && !kprobe_gone(p))
1281                ap->post_handler = aggr_post_handler;
1282        if (p->break_handler && !kprobe_gone(p))
1283                ap->break_handler = aggr_break_handler;
1284
1285        INIT_LIST_HEAD(&ap->list);
1286        INIT_HLIST_NODE(&ap->hlist);
1287
1288        list_add_rcu(&p->list, &ap->list);
1289        hlist_replace_rcu(&p->hlist, &ap->hlist);
1290}
1291
1292/*
1293 * This is the second or subsequent kprobe at the address - handle
1294 * the intricacies
1295 */
1296static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1297{
1298        int ret = 0;
1299        struct kprobe *ap = orig_p;
1300
1301        /* For preparing optimization, jump_label_text_reserved() is called */
1302        jump_label_lock();
1303        /*
1304         * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1305         * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1306         */
1307        get_online_cpus();
1308        mutex_lock(&text_mutex);
1309
1310        if (!kprobe_aggrprobe(orig_p)) {
1311                /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1312                ap = alloc_aggr_kprobe(orig_p);
1313                if (!ap) {
1314                        ret = -ENOMEM;
1315                        goto out;
1316                }
1317                init_aggr_kprobe(ap, orig_p);
1318        } else if (kprobe_unused(ap))
1319                /* This probe is going to die. Rescue it */
1320                reuse_unused_kprobe(ap);
1321
1322        if (kprobe_gone(ap)) {
1323                /*
1324                 * Attempting to insert new probe at the same location that
1325                 * had a probe in the module vaddr area which already
1326                 * freed. So, the instruction slot has already been
1327                 * released. We need a new slot for the new probe.
1328                 */
1329                ret = arch_prepare_kprobe(ap);
1330                if (ret)
1331                        /*
1332                         * Even if fail to allocate new slot, don't need to
1333                         * free aggr_probe. It will be used next time, or
1334                         * freed by unregister_kprobe.
1335                         */
1336                        goto out;
1337
1338                /* Prepare optimized instructions if possible. */
1339                prepare_optimized_kprobe(ap);
1340
1341                /*
1342                 * Clear gone flag to prevent allocating new slot again, and
1343                 * set disabled flag because it is not armed yet.
1344                 */
1345                ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1346                            | KPROBE_FLAG_DISABLED;
1347        }
1348
1349        /* Copy ap's insn slot to p */
1350        copy_kprobe(ap, p);
1351        ret = add_new_kprobe(ap, p);
1352
1353out:
1354        mutex_unlock(&text_mutex);
1355        put_online_cpus();
1356        jump_label_unlock();
1357
1358        if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1359                ap->flags &= ~KPROBE_FLAG_DISABLED;
1360                if (!kprobes_all_disarmed)
1361                        /* Arm the breakpoint again. */
1362                        arm_kprobe(ap);
1363        }
1364        return ret;
1365}
1366
1367bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1368{
1369        /* The __kprobes marked functions and entry code must not be probed */
1370        return addr >= (unsigned long)__kprobes_text_start &&
1371               addr < (unsigned long)__kprobes_text_end;
1372}
1373
1374bool within_kprobe_blacklist(unsigned long addr)
1375{
1376        struct kprobe_blacklist_entry *ent;
1377
1378        if (arch_within_kprobe_blacklist(addr))
1379                return true;
1380        /*
1381         * If there exists a kprobe_blacklist, verify and
1382         * fail any probe registration in the prohibited area
1383         */
1384        list_for_each_entry(ent, &kprobe_blacklist, list) {
1385                if (addr >= ent->start_addr && addr < ent->end_addr)
1386                        return true;
1387        }
1388
1389        return false;
1390}
1391
1392/*
1393 * If we have a symbol_name argument, look it up and add the offset field
1394 * to it. This way, we can specify a relative address to a symbol.
1395 * This returns encoded errors if it fails to look up symbol or invalid
1396 * combination of parameters.
1397 */
1398static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1399                        const char *symbol_name, unsigned int offset)
1400{
1401        if ((symbol_name && addr) || (!symbol_name && !addr))
1402                goto invalid;
1403
1404        if (symbol_name) {
1405                addr = kprobe_lookup_name(symbol_name, offset);
1406                if (!addr)
1407                        return ERR_PTR(-ENOENT);
1408        }
1409
1410        addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1411        if (addr)
1412                return addr;
1413
1414invalid:
1415        return ERR_PTR(-EINVAL);
1416}
1417
1418static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1419{
1420        return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1421}
1422
1423/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1424static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1425{
1426        struct kprobe *ap, *list_p;
1427
1428        ap = get_kprobe(p->addr);
1429        if (unlikely(!ap))
1430                return NULL;
1431
1432        if (p != ap) {
1433                list_for_each_entry_rcu(list_p, &ap->list, list)
1434                        if (list_p == p)
1435                        /* kprobe p is a valid probe */
1436                                goto valid;
1437                return NULL;
1438        }
1439valid:
1440        return ap;
1441}
1442
1443/* Return error if the kprobe is being re-registered */
1444static inline int check_kprobe_rereg(struct kprobe *p)
1445{
1446        int ret = 0;
1447
1448        mutex_lock(&kprobe_mutex);
1449        if (__get_valid_kprobe(p))
1450                ret = -EINVAL;
1451        mutex_unlock(&kprobe_mutex);
1452
1453        return ret;
1454}
1455
1456int __weak arch_check_ftrace_location(struct kprobe *p)
1457{
1458        unsigned long ftrace_addr;
1459
1460        ftrace_addr = ftrace_location((unsigned long)p->addr);
1461        if (ftrace_addr) {
1462#ifdef CONFIG_KPROBES_ON_FTRACE
1463                /* Given address is not on the instruction boundary */
1464                if ((unsigned long)p->addr != ftrace_addr)
1465                        return -EILSEQ;
1466                p->flags |= KPROBE_FLAG_FTRACE;
1467#else   /* !CONFIG_KPROBES_ON_FTRACE */
1468                return -EINVAL;
1469#endif
1470        }
1471        return 0;
1472}
1473
1474static int check_kprobe_address_safe(struct kprobe *p,
1475                                     struct module **probed_mod)
1476{
1477        int ret;
1478
1479        ret = arch_check_ftrace_location(p);
1480        if (ret)
1481                return ret;
1482        jump_label_lock();
1483        preempt_disable();
1484
1485        /* Ensure it is not in reserved area nor out of text */
1486        if (!kernel_text_address((unsigned long) p->addr) ||
1487            within_kprobe_blacklist((unsigned long) p->addr) ||
1488            jump_label_text_reserved(p->addr, p->addr)) {
1489                ret = -EINVAL;
1490                goto out;
1491        }
1492
1493        /* Check if are we probing a module */
1494        *probed_mod = __module_text_address((unsigned long) p->addr);
1495        if (*probed_mod) {
1496                /*
1497                 * We must hold a refcount of the probed module while updating
1498                 * its code to prohibit unexpected unloading.
1499                 */
1500                if (unlikely(!try_module_get(*probed_mod))) {
1501                        ret = -ENOENT;
1502                        goto out;
1503                }
1504
1505                /*
1506                 * If the module freed .init.text, we couldn't insert
1507                 * kprobes in there.
1508                 */
1509                if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1510                    (*probed_mod)->state != MODULE_STATE_COMING) {
1511                        module_put(*probed_mod);
1512                        *probed_mod = NULL;
1513                        ret = -ENOENT;
1514                }
1515        }
1516out:
1517        preempt_enable();
1518        jump_label_unlock();
1519
1520        return ret;
1521}
1522
1523int register_kprobe(struct kprobe *p)
1524{
1525        int ret;
1526        struct kprobe *old_p;
1527        struct module *probed_mod;
1528        kprobe_opcode_t *addr;
1529
1530        /* Adjust probe address from symbol */
1531        addr = kprobe_addr(p);
1532        if (IS_ERR(addr))
1533                return PTR_ERR(addr);
1534        p->addr = addr;
1535
1536        ret = check_kprobe_rereg(p);
1537        if (ret)
1538                return ret;
1539
1540        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1541        p->flags &= KPROBE_FLAG_DISABLED;
1542        p->nmissed = 0;
1543        INIT_LIST_HEAD(&p->list);
1544
1545        ret = check_kprobe_address_safe(p, &probed_mod);
1546        if (ret)
1547                return ret;
1548
1549        mutex_lock(&kprobe_mutex);
1550
1551        old_p = get_kprobe(p->addr);
1552        if (old_p) {
1553                /* Since this may unoptimize old_p, locking text_mutex. */
1554                ret = register_aggr_kprobe(old_p, p);
1555                goto out;
1556        }
1557
1558        mutex_lock(&text_mutex);        /* Avoiding text modification */
1559        ret = prepare_kprobe(p);
1560        mutex_unlock(&text_mutex);
1561        if (ret)
1562                goto out;
1563
1564        INIT_HLIST_NODE(&p->hlist);
1565        hlist_add_head_rcu(&p->hlist,
1566                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1567
1568        if (!kprobes_all_disarmed && !kprobe_disabled(p))
1569                arm_kprobe(p);
1570
1571        /* Try to optimize kprobe */
1572        try_to_optimize_kprobe(p);
1573
1574out:
1575        mutex_unlock(&kprobe_mutex);
1576
1577        if (probed_mod)
1578                module_put(probed_mod);
1579
1580        return ret;
1581}
1582EXPORT_SYMBOL_GPL(register_kprobe);
1583
1584/* Check if all probes on the aggrprobe are disabled */
1585static int aggr_kprobe_disabled(struct kprobe *ap)
1586{
1587        struct kprobe *kp;
1588
1589        list_for_each_entry_rcu(kp, &ap->list, list)
1590                if (!kprobe_disabled(kp))
1591                        /*
1592                         * There is an active probe on the list.
1593                         * We can't disable this ap.
1594                         */
1595                        return 0;
1596
1597        return 1;
1598}
1599
1600/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1601static struct kprobe *__disable_kprobe(struct kprobe *p)
1602{
1603        struct kprobe *orig_p;
1604
1605        /* Get an original kprobe for return */
1606        orig_p = __get_valid_kprobe(p);
1607        if (unlikely(orig_p == NULL))
1608                return NULL;
1609
1610        if (!kprobe_disabled(p)) {
1611                /* Disable probe if it is a child probe */
1612                if (p != orig_p)
1613                        p->flags |= KPROBE_FLAG_DISABLED;
1614
1615                /* Try to disarm and disable this/parent probe */
1616                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1617                        /*
1618                         * If kprobes_all_disarmed is set, orig_p
1619                         * should have already been disarmed, so
1620                         * skip unneed disarming process.
1621                         */
1622                        if (!kprobes_all_disarmed)
1623                                disarm_kprobe(orig_p, true);
1624                        orig_p->flags |= KPROBE_FLAG_DISABLED;
1625                }
1626        }
1627
1628        return orig_p;
1629}
1630
1631/*
1632 * Unregister a kprobe without a scheduler synchronization.
1633 */
1634static int __unregister_kprobe_top(struct kprobe *p)
1635{
1636        struct kprobe *ap, *list_p;
1637
1638        /* Disable kprobe. This will disarm it if needed. */
1639        ap = __disable_kprobe(p);
1640        if (ap == NULL)
1641                return -EINVAL;
1642
1643        if (ap == p)
1644                /*
1645                 * This probe is an independent(and non-optimized) kprobe
1646                 * (not an aggrprobe). Remove from the hash list.
1647                 */
1648                goto disarmed;
1649
1650        /* Following process expects this probe is an aggrprobe */
1651        WARN_ON(!kprobe_aggrprobe(ap));
1652
1653        if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1654                /*
1655                 * !disarmed could be happen if the probe is under delayed
1656                 * unoptimizing.
1657                 */
1658                goto disarmed;
1659        else {
1660                /* If disabling probe has special handlers, update aggrprobe */
1661                if (p->break_handler && !kprobe_gone(p))
1662                        ap->break_handler = NULL;
1663                if (p->post_handler && !kprobe_gone(p)) {
1664                        list_for_each_entry_rcu(list_p, &ap->list, list) {
1665                                if ((list_p != p) && (list_p->post_handler))
1666                                        goto noclean;
1667                        }
1668                        ap->post_handler = NULL;
1669                }
1670noclean:
1671                /*
1672                 * Remove from the aggrprobe: this path will do nothing in
1673                 * __unregister_kprobe_bottom().
1674                 */
1675                list_del_rcu(&p->list);
1676                if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1677                        /*
1678                         * Try to optimize this probe again, because post
1679                         * handler may have been changed.
1680                         */
1681                        optimize_kprobe(ap);
1682        }
1683        return 0;
1684
1685disarmed:
1686        BUG_ON(!kprobe_disarmed(ap));
1687        hlist_del_rcu(&ap->hlist);
1688        return 0;
1689}
1690
1691static void __unregister_kprobe_bottom(struct kprobe *p)
1692{
1693        struct kprobe *ap;
1694
1695        if (list_empty(&p->list))
1696                /* This is an independent kprobe */
1697                arch_remove_kprobe(p);
1698        else if (list_is_singular(&p->list)) {
1699                /* This is the last child of an aggrprobe */
1700                ap = list_entry(p->list.next, struct kprobe, list);
1701                list_del(&p->list);
1702                free_aggr_kprobe(ap);
1703        }
1704        /* Otherwise, do nothing. */
1705}
1706
1707int register_kprobes(struct kprobe **kps, int num)
1708{
1709        int i, ret = 0;
1710
1711        if (num <= 0)
1712                return -EINVAL;
1713        for (i = 0; i < num; i++) {
1714                ret = register_kprobe(kps[i]);
1715                if (ret < 0) {
1716                        if (i > 0)
1717                                unregister_kprobes(kps, i);
1718                        break;
1719                }
1720        }
1721        return ret;
1722}
1723EXPORT_SYMBOL_GPL(register_kprobes);
1724
1725void unregister_kprobe(struct kprobe *p)
1726{
1727        unregister_kprobes(&p, 1);
1728}
1729EXPORT_SYMBOL_GPL(unregister_kprobe);
1730
1731void unregister_kprobes(struct kprobe **kps, int num)
1732{
1733        int i;
1734
1735        if (num <= 0)
1736                return;
1737        mutex_lock(&kprobe_mutex);
1738        for (i = 0; i < num; i++)
1739                if (__unregister_kprobe_top(kps[i]) < 0)
1740                        kps[i]->addr = NULL;
1741        mutex_unlock(&kprobe_mutex);
1742
1743        synchronize_sched();
1744        for (i = 0; i < num; i++)
1745                if (kps[i]->addr)
1746                        __unregister_kprobe_bottom(kps[i]);
1747}
1748EXPORT_SYMBOL_GPL(unregister_kprobes);
1749
1750int __weak kprobe_exceptions_notify(struct notifier_block *self,
1751                                        unsigned long val, void *data)
1752{
1753        return NOTIFY_DONE;
1754}
1755NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1756
1757static struct notifier_block kprobe_exceptions_nb = {
1758        .notifier_call = kprobe_exceptions_notify,
1759        .priority = 0x7fffffff /* we need to be notified first */
1760};
1761
1762unsigned long __weak arch_deref_entry_point(void *entry)
1763{
1764        return (unsigned long)entry;
1765}
1766
1767int register_jprobes(struct jprobe **jps, int num)
1768{
1769        struct jprobe *jp;
1770        int ret = 0, i;
1771
1772        if (num <= 0)
1773                return -EINVAL;
1774        for (i = 0; i < num; i++) {
1775                unsigned long addr, offset;
1776                jp = jps[i];
1777                addr = arch_deref_entry_point(jp->entry);
1778
1779                /* Verify probepoint is a function entry point */
1780                if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1781                    offset == 0) {
1782                        jp->kp.pre_handler = setjmp_pre_handler;
1783                        jp->kp.break_handler = longjmp_break_handler;
1784                        ret = register_kprobe(&jp->kp);
1785                } else
1786                        ret = -EINVAL;
1787
1788                if (ret < 0) {
1789                        if (i > 0)
1790                                unregister_jprobes(jps, i);
1791                        break;
1792                }
1793        }
1794        return ret;
1795}
1796EXPORT_SYMBOL_GPL(register_jprobes);
1797
1798int register_jprobe(struct jprobe *jp)
1799{
1800        return register_jprobes(&jp, 1);
1801}
1802EXPORT_SYMBOL_GPL(register_jprobe);
1803
1804void unregister_jprobe(struct jprobe *jp)
1805{
1806        unregister_jprobes(&jp, 1);
1807}
1808EXPORT_SYMBOL_GPL(unregister_jprobe);
1809
1810void unregister_jprobes(struct jprobe **jps, int num)
1811{
1812        int i;
1813
1814        if (num <= 0)
1815                return;
1816        mutex_lock(&kprobe_mutex);
1817        for (i = 0; i < num; i++)
1818                if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1819                        jps[i]->kp.addr = NULL;
1820        mutex_unlock(&kprobe_mutex);
1821
1822        synchronize_sched();
1823        for (i = 0; i < num; i++) {
1824                if (jps[i]->kp.addr)
1825                        __unregister_kprobe_bottom(&jps[i]->kp);
1826        }
1827}
1828EXPORT_SYMBOL_GPL(unregister_jprobes);
1829
1830#ifdef CONFIG_KRETPROBES
1831/*
1832 * This kprobe pre_handler is registered with every kretprobe. When probe
1833 * hits it will set up the return probe.
1834 */
1835static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1836{
1837        struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1838        unsigned long hash, flags = 0;
1839        struct kretprobe_instance *ri;
1840
1841        /*
1842         * To avoid deadlocks, prohibit return probing in NMI contexts,
1843         * just skip the probe and increase the (inexact) 'nmissed'
1844         * statistical counter, so that the user is informed that
1845         * something happened:
1846         */
1847        if (unlikely(in_nmi())) {
1848                rp->nmissed++;
1849                return 0;
1850        }
1851
1852        /* TODO: consider to only swap the RA after the last pre_handler fired */
1853        hash = hash_ptr(current, KPROBE_HASH_BITS);
1854        raw_spin_lock_irqsave(&rp->lock, flags);
1855        if (!hlist_empty(&rp->free_instances)) {
1856                ri = hlist_entry(rp->free_instances.first,
1857                                struct kretprobe_instance, hlist);
1858                hlist_del(&ri->hlist);
1859                raw_spin_unlock_irqrestore(&rp->lock, flags);
1860
1861                ri->rp = rp;
1862                ri->task = current;
1863
1864                if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1865                        raw_spin_lock_irqsave(&rp->lock, flags);
1866                        hlist_add_head(&ri->hlist, &rp->free_instances);
1867                        raw_spin_unlock_irqrestore(&rp->lock, flags);
1868                        return 0;
1869                }
1870
1871                arch_prepare_kretprobe(ri, regs);
1872
1873                /* XXX(hch): why is there no hlist_move_head? */
1874                INIT_HLIST_NODE(&ri->hlist);
1875                kretprobe_table_lock(hash, &flags);
1876                hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1877                kretprobe_table_unlock(hash, &flags);
1878        } else {
1879                rp->nmissed++;
1880                raw_spin_unlock_irqrestore(&rp->lock, flags);
1881        }
1882        return 0;
1883}
1884NOKPROBE_SYMBOL(pre_handler_kretprobe);
1885
1886bool __weak arch_function_offset_within_entry(unsigned long offset)
1887{
1888        return !offset;
1889}
1890
1891bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1892{
1893        kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1894
1895        if (IS_ERR(kp_addr))
1896                return false;
1897
1898        if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1899                                                !arch_function_offset_within_entry(offset))
1900                return false;
1901
1902        return true;
1903}
1904
1905int register_kretprobe(struct kretprobe *rp)
1906{
1907        int ret = 0;
1908        struct kretprobe_instance *inst;
1909        int i;
1910        void *addr;
1911
1912        if (!function_offset_within_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1913                return -EINVAL;
1914
1915        if (kretprobe_blacklist_size) {
1916                addr = kprobe_addr(&rp->kp);
1917                if (IS_ERR(addr))
1918                        return PTR_ERR(addr);
1919
1920                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1921                        if (kretprobe_blacklist[i].addr == addr)
1922                                return -EINVAL;
1923                }
1924        }
1925
1926        rp->kp.pre_handler = pre_handler_kretprobe;
1927        rp->kp.post_handler = NULL;
1928        rp->kp.fault_handler = NULL;
1929        rp->kp.break_handler = NULL;
1930
1931        /* Pre-allocate memory for max kretprobe instances */
1932        if (rp->maxactive <= 0) {
1933#ifdef CONFIG_PREEMPT
1934                rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1935#else
1936                rp->maxactive = num_possible_cpus();
1937#endif
1938        }
1939        raw_spin_lock_init(&rp->lock);
1940        INIT_HLIST_HEAD(&rp->free_instances);
1941        for (i = 0; i < rp->maxactive; i++) {
1942                inst = kmalloc(sizeof(struct kretprobe_instance) +
1943                               rp->data_size, GFP_KERNEL);
1944                if (inst == NULL) {
1945                        free_rp_inst(rp);
1946                        return -ENOMEM;
1947                }
1948                INIT_HLIST_NODE(&inst->hlist);
1949                hlist_add_head(&inst->hlist, &rp->free_instances);
1950        }
1951
1952        rp->nmissed = 0;
1953        /* Establish function entry probe point */
1954        ret = register_kprobe(&rp->kp);
1955        if (ret != 0)
1956                free_rp_inst(rp);
1957        return ret;
1958}
1959EXPORT_SYMBOL_GPL(register_kretprobe);
1960
1961int register_kretprobes(struct kretprobe **rps, int num)
1962{
1963        int ret = 0, i;
1964
1965        if (num <= 0)
1966                return -EINVAL;
1967        for (i = 0; i < num; i++) {
1968                ret = register_kretprobe(rps[i]);
1969                if (ret < 0) {
1970                        if (i > 0)
1971                                unregister_kretprobes(rps, i);
1972                        break;
1973                }
1974        }
1975        return ret;
1976}
1977EXPORT_SYMBOL_GPL(register_kretprobes);
1978
1979void unregister_kretprobe(struct kretprobe *rp)
1980{
1981        unregister_kretprobes(&rp, 1);
1982}
1983EXPORT_SYMBOL_GPL(unregister_kretprobe);
1984
1985void unregister_kretprobes(struct kretprobe **rps, int num)
1986{
1987        int i;
1988
1989        if (num <= 0)
1990                return;
1991        mutex_lock(&kprobe_mutex);
1992        for (i = 0; i < num; i++)
1993                if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1994                        rps[i]->kp.addr = NULL;
1995        mutex_unlock(&kprobe_mutex);
1996
1997        synchronize_sched();
1998        for (i = 0; i < num; i++) {
1999                if (rps[i]->kp.addr) {
2000                        __unregister_kprobe_bottom(&rps[i]->kp);
2001                        cleanup_rp_inst(rps[i]);
2002                }
2003        }
2004}
2005EXPORT_SYMBOL_GPL(unregister_kretprobes);
2006
2007#else /* CONFIG_KRETPROBES */
2008int register_kretprobe(struct kretprobe *rp)
2009{
2010        return -ENOSYS;
2011}
2012EXPORT_SYMBOL_GPL(register_kretprobe);
2013
2014int register_kretprobes(struct kretprobe **rps, int num)
2015{
2016        return -ENOSYS;
2017}
2018EXPORT_SYMBOL_GPL(register_kretprobes);
2019
2020void unregister_kretprobe(struct kretprobe *rp)
2021{
2022}
2023EXPORT_SYMBOL_GPL(unregister_kretprobe);
2024
2025void unregister_kretprobes(struct kretprobe **rps, int num)
2026{
2027}
2028EXPORT_SYMBOL_GPL(unregister_kretprobes);
2029
2030static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2031{
2032        return 0;
2033}
2034NOKPROBE_SYMBOL(pre_handler_kretprobe);
2035
2036#endif /* CONFIG_KRETPROBES */
2037
2038/* Set the kprobe gone and remove its instruction buffer. */
2039static void kill_kprobe(struct kprobe *p)
2040{
2041        struct kprobe *kp;
2042
2043        p->flags |= KPROBE_FLAG_GONE;
2044        if (kprobe_aggrprobe(p)) {
2045                /*
2046                 * If this is an aggr_kprobe, we have to list all the
2047                 * chained probes and mark them GONE.
2048                 */
2049                list_for_each_entry_rcu(kp, &p->list, list)
2050                        kp->flags |= KPROBE_FLAG_GONE;
2051                p->post_handler = NULL;
2052                p->break_handler = NULL;
2053                kill_optimized_kprobe(p);
2054        }
2055        /*
2056         * Here, we can remove insn_slot safely, because no thread calls
2057         * the original probed function (which will be freed soon) any more.
2058         */
2059        arch_remove_kprobe(p);
2060}
2061
2062/* Disable one kprobe */
2063int disable_kprobe(struct kprobe *kp)
2064{
2065        int ret = 0;
2066
2067        mutex_lock(&kprobe_mutex);
2068
2069        /* Disable this kprobe */
2070        if (__disable_kprobe(kp) == NULL)
2071                ret = -EINVAL;
2072
2073        mutex_unlock(&kprobe_mutex);
2074        return ret;
2075}
2076EXPORT_SYMBOL_GPL(disable_kprobe);
2077
2078/* Enable one kprobe */
2079int enable_kprobe(struct kprobe *kp)
2080{
2081        int ret = 0;
2082        struct kprobe *p;
2083
2084        mutex_lock(&kprobe_mutex);
2085
2086        /* Check whether specified probe is valid. */
2087        p = __get_valid_kprobe(kp);
2088        if (unlikely(p == NULL)) {
2089                ret = -EINVAL;
2090                goto out;
2091        }
2092
2093        if (kprobe_gone(kp)) {
2094                /* This kprobe has gone, we couldn't enable it. */
2095                ret = -EINVAL;
2096                goto out;
2097        }
2098
2099        if (p != kp)
2100                kp->flags &= ~KPROBE_FLAG_DISABLED;
2101
2102        if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2103                p->flags &= ~KPROBE_FLAG_DISABLED;
2104                arm_kprobe(p);
2105        }
2106out:
2107        mutex_unlock(&kprobe_mutex);
2108        return ret;
2109}
2110EXPORT_SYMBOL_GPL(enable_kprobe);
2111
2112void dump_kprobe(struct kprobe *kp)
2113{
2114        printk(KERN_WARNING "Dumping kprobe:\n");
2115        printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2116               kp->symbol_name, kp->addr, kp->offset);
2117}
2118NOKPROBE_SYMBOL(dump_kprobe);
2119
2120/*
2121 * Lookup and populate the kprobe_blacklist.
2122 *
2123 * Unlike the kretprobe blacklist, we'll need to determine
2124 * the range of addresses that belong to the said functions,
2125 * since a kprobe need not necessarily be at the beginning
2126 * of a function.
2127 */
2128static int __init populate_kprobe_blacklist(unsigned long *start,
2129                                             unsigned long *end)
2130{
2131        unsigned long *iter;
2132        struct kprobe_blacklist_entry *ent;
2133        unsigned long entry, offset = 0, size = 0;
2134
2135        for (iter = start; iter < end; iter++) {
2136                entry = arch_deref_entry_point((void *)*iter);
2137
2138                if (!kernel_text_address(entry) ||
2139                    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2140                        pr_err("Failed to find blacklist at %p\n",
2141                                (void *)entry);
2142                        continue;
2143                }
2144
2145                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2146                if (!ent)
2147                        return -ENOMEM;
2148                ent->start_addr = entry;
2149                ent->end_addr = entry + size;
2150                INIT_LIST_HEAD(&ent->list);
2151                list_add_tail(&ent->list, &kprobe_blacklist);
2152        }
2153        return 0;
2154}
2155
2156/* Module notifier call back, checking kprobes on the module */
2157static int kprobes_module_callback(struct notifier_block *nb,
2158                                   unsigned long val, void *data)
2159{
2160        struct module *mod = data;
2161        struct hlist_head *head;
2162        struct kprobe *p;
2163        unsigned int i;
2164        int checkcore = (val == MODULE_STATE_GOING);
2165
2166        if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2167                return NOTIFY_DONE;
2168
2169        /*
2170         * When MODULE_STATE_GOING was notified, both of module .text and
2171         * .init.text sections would be freed. When MODULE_STATE_LIVE was
2172         * notified, only .init.text section would be freed. We need to
2173         * disable kprobes which have been inserted in the sections.
2174         */
2175        mutex_lock(&kprobe_mutex);
2176        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2177                head = &kprobe_table[i];
2178                hlist_for_each_entry_rcu(p, head, hlist)
2179                        if (within_module_init((unsigned long)p->addr, mod) ||
2180                            (checkcore &&
2181                             within_module_core((unsigned long)p->addr, mod))) {
2182                                /*
2183                                 * The vaddr this probe is installed will soon
2184                                 * be vfreed buy not synced to disk. Hence,
2185                                 * disarming the breakpoint isn't needed.
2186                                 *
2187                                 * Note, this will also move any optimized probes
2188                                 * that are pending to be removed from their
2189                                 * corresponding lists to the freeing_list and
2190                                 * will not be touched by the delayed
2191                                 * kprobe_optimizer work handler.
2192                                 */
2193                                kill_kprobe(p);
2194                        }
2195        }
2196        mutex_unlock(&kprobe_mutex);
2197        return NOTIFY_DONE;
2198}
2199
2200static struct notifier_block kprobe_module_nb = {
2201        .notifier_call = kprobes_module_callback,
2202        .priority = 0
2203};
2204
2205/* Markers of _kprobe_blacklist section */
2206extern unsigned long __start_kprobe_blacklist[];
2207extern unsigned long __stop_kprobe_blacklist[];
2208
2209static int __init init_kprobes(void)
2210{
2211        int i, err = 0;
2212
2213        /* FIXME allocate the probe table, currently defined statically */
2214        /* initialize all list heads */
2215        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2216                INIT_HLIST_HEAD(&kprobe_table[i]);
2217                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2218                raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2219        }
2220
2221        err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2222                                        __stop_kprobe_blacklist);
2223        if (err) {
2224                pr_err("kprobes: failed to populate blacklist: %d\n", err);
2225                pr_err("Please take care of using kprobes.\n");
2226        }
2227
2228        if (kretprobe_blacklist_size) {
2229                /* lookup the function address from its name */
2230                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2231                        kretprobe_blacklist[i].addr =
2232                                kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2233                        if (!kretprobe_blacklist[i].addr)
2234                                printk("kretprobe: lookup failed: %s\n",
2235                                       kretprobe_blacklist[i].name);
2236                }
2237        }
2238
2239#if defined(CONFIG_OPTPROBES)
2240#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2241        /* Init kprobe_optinsn_slots */
2242        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2243#endif
2244        /* By default, kprobes can be optimized */
2245        kprobes_allow_optimization = true;
2246#endif
2247
2248        /* By default, kprobes are armed */
2249        kprobes_all_disarmed = false;
2250
2251        err = arch_init_kprobes();
2252        if (!err)
2253                err = register_die_notifier(&kprobe_exceptions_nb);
2254        if (!err)
2255                err = register_module_notifier(&kprobe_module_nb);
2256
2257        kprobes_initialized = (err == 0);
2258
2259        if (!err)
2260                init_test_probes();
2261        return err;
2262}
2263
2264#ifdef CONFIG_DEBUG_FS
2265static void report_probe(struct seq_file *pi, struct kprobe *p,
2266                const char *sym, int offset, char *modname, struct kprobe *pp)
2267{
2268        char *kprobe_type;
2269
2270        if (p->pre_handler == pre_handler_kretprobe)
2271                kprobe_type = "r";
2272        else if (p->pre_handler == setjmp_pre_handler)
2273                kprobe_type = "j";
2274        else
2275                kprobe_type = "k";
2276
2277        if (sym)
2278                seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2279                        p->addr, kprobe_type, sym, offset,
2280                        (modname ? modname : " "));
2281        else
2282                seq_printf(pi, "%p  %s  %p ",
2283                        p->addr, kprobe_type, p->addr);
2284
2285        if (!pp)
2286                pp = p;
2287        seq_printf(pi, "%s%s%s%s\n",
2288                (kprobe_gone(p) ? "[GONE]" : ""),
2289                ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2290                (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2291                (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2292}
2293
2294static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2295{
2296        return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2297}
2298
2299static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2300{
2301        (*pos)++;
2302        if (*pos >= KPROBE_TABLE_SIZE)
2303                return NULL;
2304        return pos;
2305}
2306
2307static void kprobe_seq_stop(struct seq_file *f, void *v)
2308{
2309        /* Nothing to do */
2310}
2311
2312static int show_kprobe_addr(struct seq_file *pi, void *v)
2313{
2314        struct hlist_head *head;
2315        struct kprobe *p, *kp;
2316        const char *sym = NULL;
2317        unsigned int i = *(loff_t *) v;
2318        unsigned long offset = 0;
2319        char *modname, namebuf[KSYM_NAME_LEN];
2320
2321        head = &kprobe_table[i];
2322        preempt_disable();
2323        hlist_for_each_entry_rcu(p, head, hlist) {
2324                sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2325                                        &offset, &modname, namebuf);
2326                if (kprobe_aggrprobe(p)) {
2327                        list_for_each_entry_rcu(kp, &p->list, list)
2328                                report_probe(pi, kp, sym, offset, modname, p);
2329                } else
2330                        report_probe(pi, p, sym, offset, modname, NULL);
2331        }
2332        preempt_enable();
2333        return 0;
2334}
2335
2336static const struct seq_operations kprobes_seq_ops = {
2337        .start = kprobe_seq_start,
2338        .next  = kprobe_seq_next,
2339        .stop  = kprobe_seq_stop,
2340        .show  = show_kprobe_addr
2341};
2342
2343static int kprobes_open(struct inode *inode, struct file *filp)
2344{
2345        return seq_open(filp, &kprobes_seq_ops);
2346}
2347
2348static const struct file_operations debugfs_kprobes_operations = {
2349        .open           = kprobes_open,
2350        .read           = seq_read,
2351        .llseek         = seq_lseek,
2352        .release        = seq_release,
2353};
2354
2355/* kprobes/blacklist -- shows which functions can not be probed */
2356static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2357{
2358        return seq_list_start(&kprobe_blacklist, *pos);
2359}
2360
2361static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2362{
2363        return seq_list_next(v, &kprobe_blacklist, pos);
2364}
2365
2366static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2367{
2368        struct kprobe_blacklist_entry *ent =
2369                list_entry(v, struct kprobe_blacklist_entry, list);
2370
2371        seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
2372                   (void *)ent->end_addr, (void *)ent->start_addr);
2373        return 0;
2374}
2375
2376static const struct seq_operations kprobe_blacklist_seq_ops = {
2377        .start = kprobe_blacklist_seq_start,
2378        .next  = kprobe_blacklist_seq_next,
2379        .stop  = kprobe_seq_stop,       /* Reuse void function */
2380        .show  = kprobe_blacklist_seq_show,
2381};
2382
2383static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2384{
2385        return seq_open(filp, &kprobe_blacklist_seq_ops);
2386}
2387
2388static const struct file_operations debugfs_kprobe_blacklist_ops = {
2389        .open           = kprobe_blacklist_open,
2390        .read           = seq_read,
2391        .llseek         = seq_lseek,
2392        .release        = seq_release,
2393};
2394
2395static void arm_all_kprobes(void)
2396{
2397        struct hlist_head *head;
2398        struct kprobe *p;
2399        unsigned int i;
2400
2401        mutex_lock(&kprobe_mutex);
2402
2403        /* If kprobes are armed, just return */
2404        if (!kprobes_all_disarmed)
2405                goto already_enabled;
2406
2407        /*
2408         * optimize_kprobe() called by arm_kprobe() checks
2409         * kprobes_all_disarmed, so set kprobes_all_disarmed before
2410         * arm_kprobe.
2411         */
2412        kprobes_all_disarmed = false;
2413        /* Arming kprobes doesn't optimize kprobe itself */
2414        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2415                head = &kprobe_table[i];
2416                hlist_for_each_entry_rcu(p, head, hlist)
2417                        if (!kprobe_disabled(p))
2418                                arm_kprobe(p);
2419        }
2420
2421        printk(KERN_INFO "Kprobes globally enabled\n");
2422
2423already_enabled:
2424        mutex_unlock(&kprobe_mutex);
2425        return;
2426}
2427
2428static void disarm_all_kprobes(void)
2429{
2430        struct hlist_head *head;
2431        struct kprobe *p;
2432        unsigned int i;
2433
2434        mutex_lock(&kprobe_mutex);
2435
2436        /* If kprobes are already disarmed, just return */
2437        if (kprobes_all_disarmed) {
2438                mutex_unlock(&kprobe_mutex);
2439                return;
2440        }
2441
2442        kprobes_all_disarmed = true;
2443        printk(KERN_INFO "Kprobes globally disabled\n");
2444
2445        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2446                head = &kprobe_table[i];
2447                hlist_for_each_entry_rcu(p, head, hlist) {
2448                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2449                                disarm_kprobe(p, false);
2450                }
2451        }
2452        mutex_unlock(&kprobe_mutex);
2453
2454        /* Wait for disarming all kprobes by optimizer */
2455        wait_for_kprobe_optimizer();
2456}
2457
2458/*
2459 * XXX: The debugfs bool file interface doesn't allow for callbacks
2460 * when the bool state is switched. We can reuse that facility when
2461 * available
2462 */
2463static ssize_t read_enabled_file_bool(struct file *file,
2464               char __user *user_buf, size_t count, loff_t *ppos)
2465{
2466        char buf[3];
2467
2468        if (!kprobes_all_disarmed)
2469                buf[0] = '1';
2470        else
2471                buf[0] = '0';
2472        buf[1] = '\n';
2473        buf[2] = 0x00;
2474        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2475}
2476
2477static ssize_t write_enabled_file_bool(struct file *file,
2478               const char __user *user_buf, size_t count, loff_t *ppos)
2479{
2480        char buf[32];
2481        size_t buf_size;
2482
2483        buf_size = min(count, (sizeof(buf)-1));
2484        if (copy_from_user(buf, user_buf, buf_size))
2485                return -EFAULT;
2486
2487        buf[buf_size] = '\0';
2488        switch (buf[0]) {
2489        case 'y':
2490        case 'Y':
2491        case '1':
2492                arm_all_kprobes();
2493                break;
2494        case 'n':
2495        case 'N':
2496        case '0':
2497                disarm_all_kprobes();
2498                break;
2499        default:
2500                return -EINVAL;
2501        }
2502
2503        return count;
2504}
2505
2506static const struct file_operations fops_kp = {
2507        .read =         read_enabled_file_bool,
2508        .write =        write_enabled_file_bool,
2509        .llseek =       default_llseek,
2510};
2511
2512static int __init debugfs_kprobe_init(void)
2513{
2514        struct dentry *dir, *file;
2515        unsigned int value = 1;
2516
2517        dir = debugfs_create_dir("kprobes", NULL);
2518        if (!dir)
2519                return -ENOMEM;
2520
2521        file = debugfs_create_file("list", 0444, dir, NULL,
2522                                &debugfs_kprobes_operations);
2523        if (!file)
2524                goto error;
2525
2526        file = debugfs_create_file("enabled", 0600, dir,
2527                                        &value, &fops_kp);
2528        if (!file)
2529                goto error;
2530
2531        file = debugfs_create_file("blacklist", 0444, dir, NULL,
2532                                &debugfs_kprobe_blacklist_ops);
2533        if (!file)
2534                goto error;
2535
2536        return 0;
2537
2538error:
2539        debugfs_remove(dir);
2540        return -ENOMEM;
2541}
2542
2543late_initcall(debugfs_kprobe_init);
2544#endif /* CONFIG_DEBUG_FS */
2545
2546module_init(init_kprobes);
2547
2548/* defined in arch/.../kernel/kprobes.c */
2549EXPORT_SYMBOL_GPL(jprobe_return);
2550