linux/kernel/kprobes.c
<<
>>
Prefs
   1/*
   2 *  Kernel Probes (KProbes)
   3 *  kernel/kprobes.c
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18 *
  19 * Copyright (C) IBM Corporation, 2002, 2004
  20 *
  21 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22 *              Probes initial implementation (includes suggestions from
  23 *              Rusty Russell).
  24 * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25 *              hlists and exceptions notifier as suggested by Andi Kleen.
  26 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27 *              interface to access function arguments.
  28 * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29 *              exceptions notifier to be first on the priority list.
  30 * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31 *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32 *              <prasanna@in.ibm.com> added function-return probes.
  33 */
  34#include <linux/kprobes.h>
  35#include <linux/hash.h>
  36#include <linux/init.h>
  37#include <linux/slab.h>
  38#include <linux/stddef.h>
  39#include <linux/export.h>
  40#include <linux/moduleloader.h>
  41#include <linux/kallsyms.h>
  42#include <linux/freezer.h>
  43#include <linux/seq_file.h>
  44#include <linux/debugfs.h>
  45#include <linux/sysctl.h>
  46#include <linux/kdebug.h>
  47#include <linux/memory.h>
  48#include <linux/ftrace.h>
  49#include <linux/cpu.h>
  50#include <linux/jump_label.h>
  51
  52#include <asm/sections.h>
  53#include <asm/cacheflush.h>
  54#include <asm/errno.h>
  55#include <linux/uaccess.h>
  56
  57#define KPROBE_HASH_BITS 6
  58#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  59
  60
  61static int kprobes_initialized;
  62static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64
  65/* NOTE: change this value only with kprobe_mutex held */
  66static bool kprobes_all_disarmed;
  67
  68/* This protects kprobe_table and optimizing_list */
  69static DEFINE_MUTEX(kprobe_mutex);
  70static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  71static struct {
  72        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  73} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  74
  75kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  76                                        unsigned int __unused)
  77{
  78        return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  79}
  80
  81static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  82{
  83        return &(kretprobe_table_locks[hash].lock);
  84}
  85
  86/* Blacklist -- list of struct kprobe_blacklist_entry */
  87static LIST_HEAD(kprobe_blacklist);
  88
  89#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  90/*
  91 * kprobe->ainsn.insn points to the copy of the instruction to be
  92 * single-stepped. x86_64, POWER4 and above have no-exec support and
  93 * stepping on the instruction on a vmalloced/kmalloced/data page
  94 * is a recipe for disaster
  95 */
  96struct kprobe_insn_page {
  97        struct list_head list;
  98        kprobe_opcode_t *insns;         /* Page of instruction slots */
  99        struct kprobe_insn_cache *cache;
 100        int nused;
 101        int ngarbage;
 102        char slot_used[];
 103};
 104
 105#define KPROBE_INSN_PAGE_SIZE(slots)                    \
 106        (offsetof(struct kprobe_insn_page, slot_used) + \
 107         (sizeof(char) * (slots)))
 108
 109static int slots_per_page(struct kprobe_insn_cache *c)
 110{
 111        return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
 112}
 113
 114enum kprobe_slot_state {
 115        SLOT_CLEAN = 0,
 116        SLOT_DIRTY = 1,
 117        SLOT_USED = 2,
 118};
 119
 120void __weak *alloc_insn_page(void)
 121{
 122        return module_alloc(PAGE_SIZE);
 123}
 124
 125void __weak free_insn_page(void *page)
 126{
 127        module_memfree(page);
 128}
 129
 130struct kprobe_insn_cache kprobe_insn_slots = {
 131        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 132        .alloc = alloc_insn_page,
 133        .free = free_insn_page,
 134        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 135        .insn_size = MAX_INSN_SIZE,
 136        .nr_garbage = 0,
 137};
 138static int collect_garbage_slots(struct kprobe_insn_cache *c);
 139
 140/**
 141 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 142 * We allocate an executable page if there's no room on existing ones.
 143 */
 144kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
 145{
 146        struct kprobe_insn_page *kip;
 147        kprobe_opcode_t *slot = NULL;
 148
 149        /* Since the slot array is not protected by rcu, we need a mutex */
 150        mutex_lock(&c->mutex);
 151 retry:
 152        rcu_read_lock();
 153        list_for_each_entry_rcu(kip, &c->pages, list) {
 154                if (kip->nused < slots_per_page(c)) {
 155                        int i;
 156                        for (i = 0; i < slots_per_page(c); i++) {
 157                                if (kip->slot_used[i] == SLOT_CLEAN) {
 158                                        kip->slot_used[i] = SLOT_USED;
 159                                        kip->nused++;
 160                                        slot = kip->insns + (i * c->insn_size);
 161                                        rcu_read_unlock();
 162                                        goto out;
 163                                }
 164                        }
 165                        /* kip->nused is broken. Fix it. */
 166                        kip->nused = slots_per_page(c);
 167                        WARN_ON(1);
 168                }
 169        }
 170        rcu_read_unlock();
 171
 172        /* If there are any garbage slots, collect it and try again. */
 173        if (c->nr_garbage && collect_garbage_slots(c) == 0)
 174                goto retry;
 175
 176        /* All out of space.  Need to allocate a new page. */
 177        kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 178        if (!kip)
 179                goto out;
 180
 181        /*
 182         * Use module_alloc so this page is within +/- 2GB of where the
 183         * kernel image and loaded module images reside. This is required
 184         * so x86_64 can correctly handle the %rip-relative fixups.
 185         */
 186        kip->insns = c->alloc();
 187        if (!kip->insns) {
 188                kfree(kip);
 189                goto out;
 190        }
 191        INIT_LIST_HEAD(&kip->list);
 192        memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 193        kip->slot_used[0] = SLOT_USED;
 194        kip->nused = 1;
 195        kip->ngarbage = 0;
 196        kip->cache = c;
 197        list_add_rcu(&kip->list, &c->pages);
 198        slot = kip->insns;
 199out:
 200        mutex_unlock(&c->mutex);
 201        return slot;
 202}
 203
 204/* Return 1 if all garbages are collected, otherwise 0. */
 205static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
 206{
 207        kip->slot_used[idx] = SLOT_CLEAN;
 208        kip->nused--;
 209        if (kip->nused == 0) {
 210                /*
 211                 * Page is no longer in use.  Free it unless
 212                 * it's the last one.  We keep the last one
 213                 * so as not to have to set it up again the
 214                 * next time somebody inserts a probe.
 215                 */
 216                if (!list_is_singular(&kip->list)) {
 217                        list_del_rcu(&kip->list);
 218                        synchronize_rcu();
 219                        kip->cache->free(kip->insns);
 220                        kfree(kip);
 221                }
 222                return 1;
 223        }
 224        return 0;
 225}
 226
 227static int collect_garbage_slots(struct kprobe_insn_cache *c)
 228{
 229        struct kprobe_insn_page *kip, *next;
 230
 231        /* Ensure no-one is interrupted on the garbages */
 232        synchronize_sched();
 233
 234        list_for_each_entry_safe(kip, next, &c->pages, list) {
 235                int i;
 236                if (kip->ngarbage == 0)
 237                        continue;
 238                kip->ngarbage = 0;      /* we will collect all garbages */
 239                for (i = 0; i < slots_per_page(c); i++) {
 240                        if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
 241                                break;
 242                }
 243        }
 244        c->nr_garbage = 0;
 245        return 0;
 246}
 247
 248void __free_insn_slot(struct kprobe_insn_cache *c,
 249                      kprobe_opcode_t *slot, int dirty)
 250{
 251        struct kprobe_insn_page *kip;
 252        long idx;
 253
 254        mutex_lock(&c->mutex);
 255        rcu_read_lock();
 256        list_for_each_entry_rcu(kip, &c->pages, list) {
 257                idx = ((long)slot - (long)kip->insns) /
 258                        (c->insn_size * sizeof(kprobe_opcode_t));
 259                if (idx >= 0 && idx < slots_per_page(c))
 260                        goto out;
 261        }
 262        /* Could not find this slot. */
 263        WARN_ON(1);
 264        kip = NULL;
 265out:
 266        rcu_read_unlock();
 267        /* Mark and sweep: this may sleep */
 268        if (kip) {
 269                /* Check double free */
 270                WARN_ON(kip->slot_used[idx] != SLOT_USED);
 271                if (dirty) {
 272                        kip->slot_used[idx] = SLOT_DIRTY;
 273                        kip->ngarbage++;
 274                        if (++c->nr_garbage > slots_per_page(c))
 275                                collect_garbage_slots(c);
 276                } else {
 277                        collect_one_slot(kip, idx);
 278                }
 279        }
 280        mutex_unlock(&c->mutex);
 281}
 282
 283/*
 284 * Check given address is on the page of kprobe instruction slots.
 285 * This will be used for checking whether the address on a stack
 286 * is on a text area or not.
 287 */
 288bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
 289{
 290        struct kprobe_insn_page *kip;
 291        bool ret = false;
 292
 293        rcu_read_lock();
 294        list_for_each_entry_rcu(kip, &c->pages, list) {
 295                if (addr >= (unsigned long)kip->insns &&
 296                    addr < (unsigned long)kip->insns + PAGE_SIZE) {
 297                        ret = true;
 298                        break;
 299                }
 300        }
 301        rcu_read_unlock();
 302
 303        return ret;
 304}
 305
 306#ifdef CONFIG_OPTPROBES
 307/* For optimized_kprobe buffer */
 308struct kprobe_insn_cache kprobe_optinsn_slots = {
 309        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 310        .alloc = alloc_insn_page,
 311        .free = free_insn_page,
 312        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 313        /* .insn_size is initialized later */
 314        .nr_garbage = 0,
 315};
 316#endif
 317#endif
 318
 319/* We have preemption disabled.. so it is safe to use __ versions */
 320static inline void set_kprobe_instance(struct kprobe *kp)
 321{
 322        __this_cpu_write(kprobe_instance, kp);
 323}
 324
 325static inline void reset_kprobe_instance(void)
 326{
 327        __this_cpu_write(kprobe_instance, NULL);
 328}
 329
 330/*
 331 * This routine is called either:
 332 *      - under the kprobe_mutex - during kprobe_[un]register()
 333 *                              OR
 334 *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
 335 */
 336struct kprobe *get_kprobe(void *addr)
 337{
 338        struct hlist_head *head;
 339        struct kprobe *p;
 340
 341        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 342        hlist_for_each_entry_rcu(p, head, hlist) {
 343                if (p->addr == addr)
 344                        return p;
 345        }
 346
 347        return NULL;
 348}
 349NOKPROBE_SYMBOL(get_kprobe);
 350
 351static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 352
 353/* Return true if the kprobe is an aggregator */
 354static inline int kprobe_aggrprobe(struct kprobe *p)
 355{
 356        return p->pre_handler == aggr_pre_handler;
 357}
 358
 359/* Return true(!0) if the kprobe is unused */
 360static inline int kprobe_unused(struct kprobe *p)
 361{
 362        return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 363               list_empty(&p->list);
 364}
 365
 366/*
 367 * Keep all fields in the kprobe consistent
 368 */
 369static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 370{
 371        memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 372        memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 373}
 374
 375#ifdef CONFIG_OPTPROBES
 376/* NOTE: change this value only with kprobe_mutex held */
 377static bool kprobes_allow_optimization;
 378
 379/*
 380 * Call all pre_handler on the list, but ignores its return value.
 381 * This must be called from arch-dep optimized caller.
 382 */
 383void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 384{
 385        struct kprobe *kp;
 386
 387        list_for_each_entry_rcu(kp, &p->list, list) {
 388                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 389                        set_kprobe_instance(kp);
 390                        kp->pre_handler(kp, regs);
 391                }
 392                reset_kprobe_instance();
 393        }
 394}
 395NOKPROBE_SYMBOL(opt_pre_handler);
 396
 397/* Free optimized instructions and optimized_kprobe */
 398static void free_aggr_kprobe(struct kprobe *p)
 399{
 400        struct optimized_kprobe *op;
 401
 402        op = container_of(p, struct optimized_kprobe, kp);
 403        arch_remove_optimized_kprobe(op);
 404        arch_remove_kprobe(p);
 405        kfree(op);
 406}
 407
 408/* Return true(!0) if the kprobe is ready for optimization. */
 409static inline int kprobe_optready(struct kprobe *p)
 410{
 411        struct optimized_kprobe *op;
 412
 413        if (kprobe_aggrprobe(p)) {
 414                op = container_of(p, struct optimized_kprobe, kp);
 415                return arch_prepared_optinsn(&op->optinsn);
 416        }
 417
 418        return 0;
 419}
 420
 421/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 422static inline int kprobe_disarmed(struct kprobe *p)
 423{
 424        struct optimized_kprobe *op;
 425
 426        /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 427        if (!kprobe_aggrprobe(p))
 428                return kprobe_disabled(p);
 429
 430        op = container_of(p, struct optimized_kprobe, kp);
 431
 432        return kprobe_disabled(p) && list_empty(&op->list);
 433}
 434
 435/* Return true(!0) if the probe is queued on (un)optimizing lists */
 436static int kprobe_queued(struct kprobe *p)
 437{
 438        struct optimized_kprobe *op;
 439
 440        if (kprobe_aggrprobe(p)) {
 441                op = container_of(p, struct optimized_kprobe, kp);
 442                if (!list_empty(&op->list))
 443                        return 1;
 444        }
 445        return 0;
 446}
 447
 448/*
 449 * Return an optimized kprobe whose optimizing code replaces
 450 * instructions including addr (exclude breakpoint).
 451 */
 452static struct kprobe *get_optimized_kprobe(unsigned long addr)
 453{
 454        int i;
 455        struct kprobe *p = NULL;
 456        struct optimized_kprobe *op;
 457
 458        /* Don't check i == 0, since that is a breakpoint case. */
 459        for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 460                p = get_kprobe((void *)(addr - i));
 461
 462        if (p && kprobe_optready(p)) {
 463                op = container_of(p, struct optimized_kprobe, kp);
 464                if (arch_within_optimized_kprobe(op, addr))
 465                        return p;
 466        }
 467
 468        return NULL;
 469}
 470
 471/* Optimization staging list, protected by kprobe_mutex */
 472static LIST_HEAD(optimizing_list);
 473static LIST_HEAD(unoptimizing_list);
 474static LIST_HEAD(freeing_list);
 475
 476static void kprobe_optimizer(struct work_struct *work);
 477static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 478#define OPTIMIZE_DELAY 5
 479
 480/*
 481 * Optimize (replace a breakpoint with a jump) kprobes listed on
 482 * optimizing_list.
 483 */
 484static void do_optimize_kprobes(void)
 485{
 486        /*
 487         * The optimization/unoptimization refers online_cpus via
 488         * stop_machine() and cpu-hotplug modifies online_cpus.
 489         * And same time, text_mutex will be held in cpu-hotplug and here.
 490         * This combination can cause a deadlock (cpu-hotplug try to lock
 491         * text_mutex but stop_machine can not be done because online_cpus
 492         * has been changed)
 493         * To avoid this deadlock, caller must have locked cpu hotplug
 494         * for preventing cpu-hotplug outside of text_mutex locking.
 495         */
 496        lockdep_assert_cpus_held();
 497
 498        /* Optimization never be done when disarmed */
 499        if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 500            list_empty(&optimizing_list))
 501                return;
 502
 503        mutex_lock(&text_mutex);
 504        arch_optimize_kprobes(&optimizing_list);
 505        mutex_unlock(&text_mutex);
 506}
 507
 508/*
 509 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 510 * if need) kprobes listed on unoptimizing_list.
 511 */
 512static void do_unoptimize_kprobes(void)
 513{
 514        struct optimized_kprobe *op, *tmp;
 515
 516        /* See comment in do_optimize_kprobes() */
 517        lockdep_assert_cpus_held();
 518
 519        /* Unoptimization must be done anytime */
 520        if (list_empty(&unoptimizing_list))
 521                return;
 522
 523        mutex_lock(&text_mutex);
 524        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 525        /* Loop free_list for disarming */
 526        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 527                /* Disarm probes if marked disabled */
 528                if (kprobe_disabled(&op->kp))
 529                        arch_disarm_kprobe(&op->kp);
 530                if (kprobe_unused(&op->kp)) {
 531                        /*
 532                         * Remove unused probes from hash list. After waiting
 533                         * for synchronization, these probes are reclaimed.
 534                         * (reclaiming is done by do_free_cleaned_kprobes.)
 535                         */
 536                        hlist_del_rcu(&op->kp.hlist);
 537                } else
 538                        list_del_init(&op->list);
 539        }
 540        mutex_unlock(&text_mutex);
 541}
 542
 543/* Reclaim all kprobes on the free_list */
 544static void do_free_cleaned_kprobes(void)
 545{
 546        struct optimized_kprobe *op, *tmp;
 547
 548        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 549                BUG_ON(!kprobe_unused(&op->kp));
 550                list_del_init(&op->list);
 551                free_aggr_kprobe(&op->kp);
 552        }
 553}
 554
 555/* Start optimizer after OPTIMIZE_DELAY passed */
 556static void kick_kprobe_optimizer(void)
 557{
 558        schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 559}
 560
 561/* Kprobe jump optimizer */
 562static void kprobe_optimizer(struct work_struct *work)
 563{
 564        mutex_lock(&kprobe_mutex);
 565        cpus_read_lock();
 566        /* Lock modules while optimizing kprobes */
 567        mutex_lock(&module_mutex);
 568
 569        /*
 570         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 571         * kprobes before waiting for quiesence period.
 572         */
 573        do_unoptimize_kprobes();
 574
 575        /*
 576         * Step 2: Wait for quiesence period to ensure all potentially
 577         * preempted tasks to have normally scheduled. Because optprobe
 578         * may modify multiple instructions, there is a chance that Nth
 579         * instruction is preempted. In that case, such tasks can return
 580         * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
 581         * Note that on non-preemptive kernel, this is transparently converted
 582         * to synchronoze_sched() to wait for all interrupts to have completed.
 583         */
 584        synchronize_rcu_tasks();
 585
 586        /* Step 3: Optimize kprobes after quiesence period */
 587        do_optimize_kprobes();
 588
 589        /* Step 4: Free cleaned kprobes after quiesence period */
 590        do_free_cleaned_kprobes();
 591
 592        mutex_unlock(&module_mutex);
 593        cpus_read_unlock();
 594        mutex_unlock(&kprobe_mutex);
 595
 596        /* Step 5: Kick optimizer again if needed */
 597        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 598                kick_kprobe_optimizer();
 599}
 600
 601/* Wait for completing optimization and unoptimization */
 602void wait_for_kprobe_optimizer(void)
 603{
 604        mutex_lock(&kprobe_mutex);
 605
 606        while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 607                mutex_unlock(&kprobe_mutex);
 608
 609                /* this will also make optimizing_work execute immmediately */
 610                flush_delayed_work(&optimizing_work);
 611                /* @optimizing_work might not have been queued yet, relax */
 612                cpu_relax();
 613
 614                mutex_lock(&kprobe_mutex);
 615        }
 616
 617        mutex_unlock(&kprobe_mutex);
 618}
 619
 620/* Optimize kprobe if p is ready to be optimized */
 621static void optimize_kprobe(struct kprobe *p)
 622{
 623        struct optimized_kprobe *op;
 624
 625        /* Check if the kprobe is disabled or not ready for optimization. */
 626        if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 627            (kprobe_disabled(p) || kprobes_all_disarmed))
 628                return;
 629
 630        /* Both of break_handler and post_handler are not supported. */
 631        if (p->break_handler || p->post_handler)
 632                return;
 633
 634        op = container_of(p, struct optimized_kprobe, kp);
 635
 636        /* Check there is no other kprobes at the optimized instructions */
 637        if (arch_check_optimized_kprobe(op) < 0)
 638                return;
 639
 640        /* Check if it is already optimized. */
 641        if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 642                return;
 643        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 644
 645        if (!list_empty(&op->list))
 646                /* This is under unoptimizing. Just dequeue the probe */
 647                list_del_init(&op->list);
 648        else {
 649                list_add(&op->list, &optimizing_list);
 650                kick_kprobe_optimizer();
 651        }
 652}
 653
 654/* Short cut to direct unoptimizing */
 655static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 656{
 657        lockdep_assert_cpus_held();
 658        arch_unoptimize_kprobe(op);
 659        if (kprobe_disabled(&op->kp))
 660                arch_disarm_kprobe(&op->kp);
 661}
 662
 663/* Unoptimize a kprobe if p is optimized */
 664static void unoptimize_kprobe(struct kprobe *p, bool force)
 665{
 666        struct optimized_kprobe *op;
 667
 668        if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 669                return; /* This is not an optprobe nor optimized */
 670
 671        op = container_of(p, struct optimized_kprobe, kp);
 672        if (!kprobe_optimized(p)) {
 673                /* Unoptimized or unoptimizing case */
 674                if (force && !list_empty(&op->list)) {
 675                        /*
 676                         * Only if this is unoptimizing kprobe and forced,
 677                         * forcibly unoptimize it. (No need to unoptimize
 678                         * unoptimized kprobe again :)
 679                         */
 680                        list_del_init(&op->list);
 681                        force_unoptimize_kprobe(op);
 682                }
 683                return;
 684        }
 685
 686        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 687        if (!list_empty(&op->list)) {
 688                /* Dequeue from the optimization queue */
 689                list_del_init(&op->list);
 690                return;
 691        }
 692        /* Optimized kprobe case */
 693        if (force)
 694                /* Forcibly update the code: this is a special case */
 695                force_unoptimize_kprobe(op);
 696        else {
 697                list_add(&op->list, &unoptimizing_list);
 698                kick_kprobe_optimizer();
 699        }
 700}
 701
 702/* Cancel unoptimizing for reusing */
 703static void reuse_unused_kprobe(struct kprobe *ap)
 704{
 705        struct optimized_kprobe *op;
 706
 707        BUG_ON(!kprobe_unused(ap));
 708        /*
 709         * Unused kprobe MUST be on the way of delayed unoptimizing (means
 710         * there is still a relative jump) and disabled.
 711         */
 712        op = container_of(ap, struct optimized_kprobe, kp);
 713        if (unlikely(list_empty(&op->list)))
 714                printk(KERN_WARNING "Warning: found a stray unused "
 715                        "aggrprobe@%p\n", ap->addr);
 716        /* Enable the probe again */
 717        ap->flags &= ~KPROBE_FLAG_DISABLED;
 718        /* Optimize it again (remove from op->list) */
 719        BUG_ON(!kprobe_optready(ap));
 720        optimize_kprobe(ap);
 721}
 722
 723/* Remove optimized instructions */
 724static void kill_optimized_kprobe(struct kprobe *p)
 725{
 726        struct optimized_kprobe *op;
 727
 728        op = container_of(p, struct optimized_kprobe, kp);
 729        if (!list_empty(&op->list))
 730                /* Dequeue from the (un)optimization queue */
 731                list_del_init(&op->list);
 732        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 733
 734        if (kprobe_unused(p)) {
 735                /* Enqueue if it is unused */
 736                list_add(&op->list, &freeing_list);
 737                /*
 738                 * Remove unused probes from the hash list. After waiting
 739                 * for synchronization, this probe is reclaimed.
 740                 * (reclaiming is done by do_free_cleaned_kprobes().)
 741                 */
 742                hlist_del_rcu(&op->kp.hlist);
 743        }
 744
 745        /* Don't touch the code, because it is already freed. */
 746        arch_remove_optimized_kprobe(op);
 747}
 748
 749static inline
 750void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 751{
 752        if (!kprobe_ftrace(p))
 753                arch_prepare_optimized_kprobe(op, p);
 754}
 755
 756/* Try to prepare optimized instructions */
 757static void prepare_optimized_kprobe(struct kprobe *p)
 758{
 759        struct optimized_kprobe *op;
 760
 761        op = container_of(p, struct optimized_kprobe, kp);
 762        __prepare_optimized_kprobe(op, p);
 763}
 764
 765/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 766static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 767{
 768        struct optimized_kprobe *op;
 769
 770        op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 771        if (!op)
 772                return NULL;
 773
 774        INIT_LIST_HEAD(&op->list);
 775        op->kp.addr = p->addr;
 776        __prepare_optimized_kprobe(op, p);
 777
 778        return &op->kp;
 779}
 780
 781static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 782
 783/*
 784 * Prepare an optimized_kprobe and optimize it
 785 * NOTE: p must be a normal registered kprobe
 786 */
 787static void try_to_optimize_kprobe(struct kprobe *p)
 788{
 789        struct kprobe *ap;
 790        struct optimized_kprobe *op;
 791
 792        /* Impossible to optimize ftrace-based kprobe */
 793        if (kprobe_ftrace(p))
 794                return;
 795
 796        /* For preparing optimization, jump_label_text_reserved() is called */
 797        cpus_read_lock();
 798        jump_label_lock();
 799        mutex_lock(&text_mutex);
 800
 801        ap = alloc_aggr_kprobe(p);
 802        if (!ap)
 803                goto out;
 804
 805        op = container_of(ap, struct optimized_kprobe, kp);
 806        if (!arch_prepared_optinsn(&op->optinsn)) {
 807                /* If failed to setup optimizing, fallback to kprobe */
 808                arch_remove_optimized_kprobe(op);
 809                kfree(op);
 810                goto out;
 811        }
 812
 813        init_aggr_kprobe(ap, p);
 814        optimize_kprobe(ap);    /* This just kicks optimizer thread */
 815
 816out:
 817        mutex_unlock(&text_mutex);
 818        jump_label_unlock();
 819        cpus_read_unlock();
 820}
 821
 822#ifdef CONFIG_SYSCTL
 823static void optimize_all_kprobes(void)
 824{
 825        struct hlist_head *head;
 826        struct kprobe *p;
 827        unsigned int i;
 828
 829        mutex_lock(&kprobe_mutex);
 830        /* If optimization is already allowed, just return */
 831        if (kprobes_allow_optimization)
 832                goto out;
 833
 834        cpus_read_lock();
 835        kprobes_allow_optimization = true;
 836        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 837                head = &kprobe_table[i];
 838                hlist_for_each_entry_rcu(p, head, hlist)
 839                        if (!kprobe_disabled(p))
 840                                optimize_kprobe(p);
 841        }
 842        cpus_read_unlock();
 843        printk(KERN_INFO "Kprobes globally optimized\n");
 844out:
 845        mutex_unlock(&kprobe_mutex);
 846}
 847
 848static void unoptimize_all_kprobes(void)
 849{
 850        struct hlist_head *head;
 851        struct kprobe *p;
 852        unsigned int i;
 853
 854        mutex_lock(&kprobe_mutex);
 855        /* If optimization is already prohibited, just return */
 856        if (!kprobes_allow_optimization) {
 857                mutex_unlock(&kprobe_mutex);
 858                return;
 859        }
 860
 861        cpus_read_lock();
 862        kprobes_allow_optimization = false;
 863        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 864                head = &kprobe_table[i];
 865                hlist_for_each_entry_rcu(p, head, hlist) {
 866                        if (!kprobe_disabled(p))
 867                                unoptimize_kprobe(p, false);
 868                }
 869        }
 870        cpus_read_unlock();
 871        mutex_unlock(&kprobe_mutex);
 872
 873        /* Wait for unoptimizing completion */
 874        wait_for_kprobe_optimizer();
 875        printk(KERN_INFO "Kprobes globally unoptimized\n");
 876}
 877
 878static DEFINE_MUTEX(kprobe_sysctl_mutex);
 879int sysctl_kprobes_optimization;
 880int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 881                                      void __user *buffer, size_t *length,
 882                                      loff_t *ppos)
 883{
 884        int ret;
 885
 886        mutex_lock(&kprobe_sysctl_mutex);
 887        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 888        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 889
 890        if (sysctl_kprobes_optimization)
 891                optimize_all_kprobes();
 892        else
 893                unoptimize_all_kprobes();
 894        mutex_unlock(&kprobe_sysctl_mutex);
 895
 896        return ret;
 897}
 898#endif /* CONFIG_SYSCTL */
 899
 900/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 901static void __arm_kprobe(struct kprobe *p)
 902{
 903        struct kprobe *_p;
 904
 905        /* Check collision with other optimized kprobes */
 906        _p = get_optimized_kprobe((unsigned long)p->addr);
 907        if (unlikely(_p))
 908                /* Fallback to unoptimized kprobe */
 909                unoptimize_kprobe(_p, true);
 910
 911        arch_arm_kprobe(p);
 912        optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
 913}
 914
 915/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 916static void __disarm_kprobe(struct kprobe *p, bool reopt)
 917{
 918        struct kprobe *_p;
 919
 920        /* Try to unoptimize */
 921        unoptimize_kprobe(p, kprobes_all_disarmed);
 922
 923        if (!kprobe_queued(p)) {
 924                arch_disarm_kprobe(p);
 925                /* If another kprobe was blocked, optimize it. */
 926                _p = get_optimized_kprobe((unsigned long)p->addr);
 927                if (unlikely(_p) && reopt)
 928                        optimize_kprobe(_p);
 929        }
 930        /* TODO: reoptimize others after unoptimized this probe */
 931}
 932
 933#else /* !CONFIG_OPTPROBES */
 934
 935#define optimize_kprobe(p)                      do {} while (0)
 936#define unoptimize_kprobe(p, f)                 do {} while (0)
 937#define kill_optimized_kprobe(p)                do {} while (0)
 938#define prepare_optimized_kprobe(p)             do {} while (0)
 939#define try_to_optimize_kprobe(p)               do {} while (0)
 940#define __arm_kprobe(p)                         arch_arm_kprobe(p)
 941#define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
 942#define kprobe_disarmed(p)                      kprobe_disabled(p)
 943#define wait_for_kprobe_optimizer()             do {} while (0)
 944
 945/* There should be no unused kprobes can be reused without optimization */
 946static void reuse_unused_kprobe(struct kprobe *ap)
 947{
 948        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 949        BUG_ON(kprobe_unused(ap));
 950}
 951
 952static void free_aggr_kprobe(struct kprobe *p)
 953{
 954        arch_remove_kprobe(p);
 955        kfree(p);
 956}
 957
 958static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 959{
 960        return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 961}
 962#endif /* CONFIG_OPTPROBES */
 963
 964#ifdef CONFIG_KPROBES_ON_FTRACE
 965static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 966        .func = kprobe_ftrace_handler,
 967        .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
 968};
 969static int kprobe_ftrace_enabled;
 970
 971/* Must ensure p->addr is really on ftrace */
 972static int prepare_kprobe(struct kprobe *p)
 973{
 974        if (!kprobe_ftrace(p))
 975                return arch_prepare_kprobe(p);
 976
 977        return arch_prepare_kprobe_ftrace(p);
 978}
 979
 980/* Caller must lock kprobe_mutex */
 981static int arm_kprobe_ftrace(struct kprobe *p)
 982{
 983        int ret = 0;
 984
 985        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 986                                   (unsigned long)p->addr, 0, 0);
 987        if (ret) {
 988                pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 989                return ret;
 990        }
 991
 992        if (kprobe_ftrace_enabled == 0) {
 993                ret = register_ftrace_function(&kprobe_ftrace_ops);
 994                if (ret) {
 995                        pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
 996                        goto err_ftrace;
 997                }
 998        }
 999
1000        kprobe_ftrace_enabled++;
1001        return ret;
1002
1003err_ftrace:
1004        /*
1005         * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1006         * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1007         * empty filter_hash which would undesirably trace all functions.
1008         */
1009        ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1010        return ret;
1011}
1012
1013/* Caller must lock kprobe_mutex */
1014static int disarm_kprobe_ftrace(struct kprobe *p)
1015{
1016        int ret = 0;
1017
1018        if (kprobe_ftrace_enabled == 1) {
1019                ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1020                if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1021                        return ret;
1022        }
1023
1024        kprobe_ftrace_enabled--;
1025
1026        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1027                           (unsigned long)p->addr, 1, 0);
1028        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
1029        return ret;
1030}
1031#else   /* !CONFIG_KPROBES_ON_FTRACE */
1032#define prepare_kprobe(p)       arch_prepare_kprobe(p)
1033#define arm_kprobe_ftrace(p)    (-ENODEV)
1034#define disarm_kprobe_ftrace(p) (-ENODEV)
1035#endif
1036
1037/* Arm a kprobe with text_mutex */
1038static int arm_kprobe(struct kprobe *kp)
1039{
1040        if (unlikely(kprobe_ftrace(kp)))
1041                return arm_kprobe_ftrace(kp);
1042
1043        cpus_read_lock();
1044        mutex_lock(&text_mutex);
1045        __arm_kprobe(kp);
1046        mutex_unlock(&text_mutex);
1047        cpus_read_unlock();
1048
1049        return 0;
1050}
1051
1052/* Disarm a kprobe with text_mutex */
1053static int disarm_kprobe(struct kprobe *kp, bool reopt)
1054{
1055        if (unlikely(kprobe_ftrace(kp)))
1056                return disarm_kprobe_ftrace(kp);
1057
1058        cpus_read_lock();
1059        mutex_lock(&text_mutex);
1060        __disarm_kprobe(kp, reopt);
1061        mutex_unlock(&text_mutex);
1062        cpus_read_unlock();
1063
1064        return 0;
1065}
1066
1067/*
1068 * Aggregate handlers for multiple kprobes support - these handlers
1069 * take care of invoking the individual kprobe handlers on p->list
1070 */
1071static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1072{
1073        struct kprobe *kp;
1074
1075        list_for_each_entry_rcu(kp, &p->list, list) {
1076                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1077                        set_kprobe_instance(kp);
1078                        if (kp->pre_handler(kp, regs))
1079                                return 1;
1080                }
1081                reset_kprobe_instance();
1082        }
1083        return 0;
1084}
1085NOKPROBE_SYMBOL(aggr_pre_handler);
1086
1087static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1088                              unsigned long flags)
1089{
1090        struct kprobe *kp;
1091
1092        list_for_each_entry_rcu(kp, &p->list, list) {
1093                if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1094                        set_kprobe_instance(kp);
1095                        kp->post_handler(kp, regs, flags);
1096                        reset_kprobe_instance();
1097                }
1098        }
1099}
1100NOKPROBE_SYMBOL(aggr_post_handler);
1101
1102static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1103                              int trapnr)
1104{
1105        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1106
1107        /*
1108         * if we faulted "during" the execution of a user specified
1109         * probe handler, invoke just that probe's fault handler
1110         */
1111        if (cur && cur->fault_handler) {
1112                if (cur->fault_handler(cur, regs, trapnr))
1113                        return 1;
1114        }
1115        return 0;
1116}
1117NOKPROBE_SYMBOL(aggr_fault_handler);
1118
1119static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1120{
1121        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1122        int ret = 0;
1123
1124        if (cur && cur->break_handler) {
1125                if (cur->break_handler(cur, regs))
1126                        ret = 1;
1127        }
1128        reset_kprobe_instance();
1129        return ret;
1130}
1131NOKPROBE_SYMBOL(aggr_break_handler);
1132
1133/* Walks the list and increments nmissed count for multiprobe case */
1134void kprobes_inc_nmissed_count(struct kprobe *p)
1135{
1136        struct kprobe *kp;
1137        if (!kprobe_aggrprobe(p)) {
1138                p->nmissed++;
1139        } else {
1140                list_for_each_entry_rcu(kp, &p->list, list)
1141                        kp->nmissed++;
1142        }
1143        return;
1144}
1145NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1146
1147void recycle_rp_inst(struct kretprobe_instance *ri,
1148                     struct hlist_head *head)
1149{
1150        struct kretprobe *rp = ri->rp;
1151
1152        /* remove rp inst off the rprobe_inst_table */
1153        hlist_del(&ri->hlist);
1154        INIT_HLIST_NODE(&ri->hlist);
1155        if (likely(rp)) {
1156                raw_spin_lock(&rp->lock);
1157                hlist_add_head(&ri->hlist, &rp->free_instances);
1158                raw_spin_unlock(&rp->lock);
1159        } else
1160                /* Unregistering */
1161                hlist_add_head(&ri->hlist, head);
1162}
1163NOKPROBE_SYMBOL(recycle_rp_inst);
1164
1165void kretprobe_hash_lock(struct task_struct *tsk,
1166                         struct hlist_head **head, unsigned long *flags)
1167__acquires(hlist_lock)
1168{
1169        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1170        raw_spinlock_t *hlist_lock;
1171
1172        *head = &kretprobe_inst_table[hash];
1173        hlist_lock = kretprobe_table_lock_ptr(hash);
1174        raw_spin_lock_irqsave(hlist_lock, *flags);
1175}
1176NOKPROBE_SYMBOL(kretprobe_hash_lock);
1177
1178static void kretprobe_table_lock(unsigned long hash,
1179                                 unsigned long *flags)
1180__acquires(hlist_lock)
1181{
1182        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1183        raw_spin_lock_irqsave(hlist_lock, *flags);
1184}
1185NOKPROBE_SYMBOL(kretprobe_table_lock);
1186
1187void kretprobe_hash_unlock(struct task_struct *tsk,
1188                           unsigned long *flags)
1189__releases(hlist_lock)
1190{
1191        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1192        raw_spinlock_t *hlist_lock;
1193
1194        hlist_lock = kretprobe_table_lock_ptr(hash);
1195        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1196}
1197NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1198
1199static void kretprobe_table_unlock(unsigned long hash,
1200                                   unsigned long *flags)
1201__releases(hlist_lock)
1202{
1203        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1204        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1205}
1206NOKPROBE_SYMBOL(kretprobe_table_unlock);
1207
1208/*
1209 * This function is called from finish_task_switch when task tk becomes dead,
1210 * so that we can recycle any function-return probe instances associated
1211 * with this task. These left over instances represent probed functions
1212 * that have been called but will never return.
1213 */
1214void kprobe_flush_task(struct task_struct *tk)
1215{
1216        struct kretprobe_instance *ri;
1217        struct hlist_head *head, empty_rp;
1218        struct hlist_node *tmp;
1219        unsigned long hash, flags = 0;
1220
1221        if (unlikely(!kprobes_initialized))
1222                /* Early boot.  kretprobe_table_locks not yet initialized. */
1223                return;
1224
1225        INIT_HLIST_HEAD(&empty_rp);
1226        hash = hash_ptr(tk, KPROBE_HASH_BITS);
1227        head = &kretprobe_inst_table[hash];
1228        kretprobe_table_lock(hash, &flags);
1229        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1230                if (ri->task == tk)
1231                        recycle_rp_inst(ri, &empty_rp);
1232        }
1233        kretprobe_table_unlock(hash, &flags);
1234        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1235                hlist_del(&ri->hlist);
1236                kfree(ri);
1237        }
1238}
1239NOKPROBE_SYMBOL(kprobe_flush_task);
1240
1241static inline void free_rp_inst(struct kretprobe *rp)
1242{
1243        struct kretprobe_instance *ri;
1244        struct hlist_node *next;
1245
1246        hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1247                hlist_del(&ri->hlist);
1248                kfree(ri);
1249        }
1250}
1251
1252static void cleanup_rp_inst(struct kretprobe *rp)
1253{
1254        unsigned long flags, hash;
1255        struct kretprobe_instance *ri;
1256        struct hlist_node *next;
1257        struct hlist_head *head;
1258
1259        /* No race here */
1260        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1261                kretprobe_table_lock(hash, &flags);
1262                head = &kretprobe_inst_table[hash];
1263                hlist_for_each_entry_safe(ri, next, head, hlist) {
1264                        if (ri->rp == rp)
1265                                ri->rp = NULL;
1266                }
1267                kretprobe_table_unlock(hash, &flags);
1268        }
1269        free_rp_inst(rp);
1270}
1271NOKPROBE_SYMBOL(cleanup_rp_inst);
1272
1273/*
1274* Add the new probe to ap->list. Fail if this is the
1275* second jprobe at the address - two jprobes can't coexist
1276*/
1277static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1278{
1279        BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1280
1281        if (p->break_handler || p->post_handler)
1282                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1283
1284        if (p->break_handler) {
1285                if (ap->break_handler)
1286                        return -EEXIST;
1287                list_add_tail_rcu(&p->list, &ap->list);
1288                ap->break_handler = aggr_break_handler;
1289        } else
1290                list_add_rcu(&p->list, &ap->list);
1291        if (p->post_handler && !ap->post_handler)
1292                ap->post_handler = aggr_post_handler;
1293
1294        return 0;
1295}
1296
1297/*
1298 * Fill in the required fields of the "manager kprobe". Replace the
1299 * earlier kprobe in the hlist with the manager kprobe
1300 */
1301static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1302{
1303        /* Copy p's insn slot to ap */
1304        copy_kprobe(p, ap);
1305        flush_insn_slot(ap);
1306        ap->addr = p->addr;
1307        ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1308        ap->pre_handler = aggr_pre_handler;
1309        ap->fault_handler = aggr_fault_handler;
1310        /* We don't care the kprobe which has gone. */
1311        if (p->post_handler && !kprobe_gone(p))
1312                ap->post_handler = aggr_post_handler;
1313        if (p->break_handler && !kprobe_gone(p))
1314                ap->break_handler = aggr_break_handler;
1315
1316        INIT_LIST_HEAD(&ap->list);
1317        INIT_HLIST_NODE(&ap->hlist);
1318
1319        list_add_rcu(&p->list, &ap->list);
1320        hlist_replace_rcu(&p->hlist, &ap->hlist);
1321}
1322
1323/*
1324 * This is the second or subsequent kprobe at the address - handle
1325 * the intricacies
1326 */
1327static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1328{
1329        int ret = 0;
1330        struct kprobe *ap = orig_p;
1331
1332        cpus_read_lock();
1333
1334        /* For preparing optimization, jump_label_text_reserved() is called */
1335        jump_label_lock();
1336        mutex_lock(&text_mutex);
1337
1338        if (!kprobe_aggrprobe(orig_p)) {
1339                /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1340                ap = alloc_aggr_kprobe(orig_p);
1341                if (!ap) {
1342                        ret = -ENOMEM;
1343                        goto out;
1344                }
1345                init_aggr_kprobe(ap, orig_p);
1346        } else if (kprobe_unused(ap))
1347                /* This probe is going to die. Rescue it */
1348                reuse_unused_kprobe(ap);
1349
1350        if (kprobe_gone(ap)) {
1351                /*
1352                 * Attempting to insert new probe at the same location that
1353                 * had a probe in the module vaddr area which already
1354                 * freed. So, the instruction slot has already been
1355                 * released. We need a new slot for the new probe.
1356                 */
1357                ret = arch_prepare_kprobe(ap);
1358                if (ret)
1359                        /*
1360                         * Even if fail to allocate new slot, don't need to
1361                         * free aggr_probe. It will be used next time, or
1362                         * freed by unregister_kprobe.
1363                         */
1364                        goto out;
1365
1366                /* Prepare optimized instructions if possible. */
1367                prepare_optimized_kprobe(ap);
1368
1369                /*
1370                 * Clear gone flag to prevent allocating new slot again, and
1371                 * set disabled flag because it is not armed yet.
1372                 */
1373                ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1374                            | KPROBE_FLAG_DISABLED;
1375        }
1376
1377        /* Copy ap's insn slot to p */
1378        copy_kprobe(ap, p);
1379        ret = add_new_kprobe(ap, p);
1380
1381out:
1382        mutex_unlock(&text_mutex);
1383        jump_label_unlock();
1384        cpus_read_unlock();
1385
1386        if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1387                ap->flags &= ~KPROBE_FLAG_DISABLED;
1388                if (!kprobes_all_disarmed) {
1389                        /* Arm the breakpoint again. */
1390                        ret = arm_kprobe(ap);
1391                        if (ret) {
1392                                ap->flags |= KPROBE_FLAG_DISABLED;
1393                                list_del_rcu(&p->list);
1394                                synchronize_sched();
1395                        }
1396                }
1397        }
1398        return ret;
1399}
1400
1401bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1402{
1403        /* The __kprobes marked functions and entry code must not be probed */
1404        return addr >= (unsigned long)__kprobes_text_start &&
1405               addr < (unsigned long)__kprobes_text_end;
1406}
1407
1408bool within_kprobe_blacklist(unsigned long addr)
1409{
1410        struct kprobe_blacklist_entry *ent;
1411
1412        if (arch_within_kprobe_blacklist(addr))
1413                return true;
1414        /*
1415         * If there exists a kprobe_blacklist, verify and
1416         * fail any probe registration in the prohibited area
1417         */
1418        list_for_each_entry(ent, &kprobe_blacklist, list) {
1419                if (addr >= ent->start_addr && addr < ent->end_addr)
1420                        return true;
1421        }
1422
1423        return false;
1424}
1425
1426/*
1427 * If we have a symbol_name argument, look it up and add the offset field
1428 * to it. This way, we can specify a relative address to a symbol.
1429 * This returns encoded errors if it fails to look up symbol or invalid
1430 * combination of parameters.
1431 */
1432static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1433                        const char *symbol_name, unsigned int offset)
1434{
1435        if ((symbol_name && addr) || (!symbol_name && !addr))
1436                goto invalid;
1437
1438        if (symbol_name) {
1439                addr = kprobe_lookup_name(symbol_name, offset);
1440                if (!addr)
1441                        return ERR_PTR(-ENOENT);
1442        }
1443
1444        addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1445        if (addr)
1446                return addr;
1447
1448invalid:
1449        return ERR_PTR(-EINVAL);
1450}
1451
1452static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1453{
1454        return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1455}
1456
1457/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1458static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1459{
1460        struct kprobe *ap, *list_p;
1461
1462        ap = get_kprobe(p->addr);
1463        if (unlikely(!ap))
1464                return NULL;
1465
1466        if (p != ap) {
1467                list_for_each_entry_rcu(list_p, &ap->list, list)
1468                        if (list_p == p)
1469                        /* kprobe p is a valid probe */
1470                                goto valid;
1471                return NULL;
1472        }
1473valid:
1474        return ap;
1475}
1476
1477/* Return error if the kprobe is being re-registered */
1478static inline int check_kprobe_rereg(struct kprobe *p)
1479{
1480        int ret = 0;
1481
1482        mutex_lock(&kprobe_mutex);
1483        if (__get_valid_kprobe(p))
1484                ret = -EINVAL;
1485        mutex_unlock(&kprobe_mutex);
1486
1487        return ret;
1488}
1489
1490int __weak arch_check_ftrace_location(struct kprobe *p)
1491{
1492        unsigned long ftrace_addr;
1493
1494        ftrace_addr = ftrace_location((unsigned long)p->addr);
1495        if (ftrace_addr) {
1496#ifdef CONFIG_KPROBES_ON_FTRACE
1497                /* Given address is not on the instruction boundary */
1498                if ((unsigned long)p->addr != ftrace_addr)
1499                        return -EILSEQ;
1500                p->flags |= KPROBE_FLAG_FTRACE;
1501#else   /* !CONFIG_KPROBES_ON_FTRACE */
1502                return -EINVAL;
1503#endif
1504        }
1505        return 0;
1506}
1507
1508static int check_kprobe_address_safe(struct kprobe *p,
1509                                     struct module **probed_mod)
1510{
1511        int ret;
1512
1513        ret = arch_check_ftrace_location(p);
1514        if (ret)
1515                return ret;
1516        jump_label_lock();
1517        preempt_disable();
1518
1519        /* Ensure it is not in reserved area nor out of text */
1520        if (!kernel_text_address((unsigned long) p->addr) ||
1521            within_kprobe_blacklist((unsigned long) p->addr) ||
1522            jump_label_text_reserved(p->addr, p->addr)) {
1523                ret = -EINVAL;
1524                goto out;
1525        }
1526
1527        /* Check if are we probing a module */
1528        *probed_mod = __module_text_address((unsigned long) p->addr);
1529        if (*probed_mod) {
1530                /*
1531                 * We must hold a refcount of the probed module while updating
1532                 * its code to prohibit unexpected unloading.
1533                 */
1534                if (unlikely(!try_module_get(*probed_mod))) {
1535                        ret = -ENOENT;
1536                        goto out;
1537                }
1538
1539                /*
1540                 * If the module freed .init.text, we couldn't insert
1541                 * kprobes in there.
1542                 */
1543                if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1544                    (*probed_mod)->state != MODULE_STATE_COMING) {
1545                        module_put(*probed_mod);
1546                        *probed_mod = NULL;
1547                        ret = -ENOENT;
1548                }
1549        }
1550out:
1551        preempt_enable();
1552        jump_label_unlock();
1553
1554        return ret;
1555}
1556
1557int register_kprobe(struct kprobe *p)
1558{
1559        int ret;
1560        struct kprobe *old_p;
1561        struct module *probed_mod;
1562        kprobe_opcode_t *addr;
1563
1564        /* Adjust probe address from symbol */
1565        addr = kprobe_addr(p);
1566        if (IS_ERR(addr))
1567                return PTR_ERR(addr);
1568        p->addr = addr;
1569
1570        ret = check_kprobe_rereg(p);
1571        if (ret)
1572                return ret;
1573
1574        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1575        p->flags &= KPROBE_FLAG_DISABLED;
1576        p->nmissed = 0;
1577        INIT_LIST_HEAD(&p->list);
1578
1579        ret = check_kprobe_address_safe(p, &probed_mod);
1580        if (ret)
1581                return ret;
1582
1583        mutex_lock(&kprobe_mutex);
1584
1585        old_p = get_kprobe(p->addr);
1586        if (old_p) {
1587                /* Since this may unoptimize old_p, locking text_mutex. */
1588                ret = register_aggr_kprobe(old_p, p);
1589                goto out;
1590        }
1591
1592        cpus_read_lock();
1593        /* Prevent text modification */
1594        mutex_lock(&text_mutex);
1595        ret = prepare_kprobe(p);
1596        mutex_unlock(&text_mutex);
1597        cpus_read_unlock();
1598        if (ret)
1599                goto out;
1600
1601        INIT_HLIST_NODE(&p->hlist);
1602        hlist_add_head_rcu(&p->hlist,
1603                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1604
1605        if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1606                ret = arm_kprobe(p);
1607                if (ret) {
1608                        hlist_del_rcu(&p->hlist);
1609                        synchronize_sched();
1610                        goto out;
1611                }
1612        }
1613
1614        /* Try to optimize kprobe */
1615        try_to_optimize_kprobe(p);
1616out:
1617        mutex_unlock(&kprobe_mutex);
1618
1619        if (probed_mod)
1620                module_put(probed_mod);
1621
1622        return ret;
1623}
1624EXPORT_SYMBOL_GPL(register_kprobe);
1625
1626/* Check if all probes on the aggrprobe are disabled */
1627static int aggr_kprobe_disabled(struct kprobe *ap)
1628{
1629        struct kprobe *kp;
1630
1631        list_for_each_entry_rcu(kp, &ap->list, list)
1632                if (!kprobe_disabled(kp))
1633                        /*
1634                         * There is an active probe on the list.
1635                         * We can't disable this ap.
1636                         */
1637                        return 0;
1638
1639        return 1;
1640}
1641
1642/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1643static struct kprobe *__disable_kprobe(struct kprobe *p)
1644{
1645        struct kprobe *orig_p;
1646        int ret;
1647
1648        /* Get an original kprobe for return */
1649        orig_p = __get_valid_kprobe(p);
1650        if (unlikely(orig_p == NULL))
1651                return ERR_PTR(-EINVAL);
1652
1653        if (!kprobe_disabled(p)) {
1654                /* Disable probe if it is a child probe */
1655                if (p != orig_p)
1656                        p->flags |= KPROBE_FLAG_DISABLED;
1657
1658                /* Try to disarm and disable this/parent probe */
1659                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1660                        /*
1661                         * If kprobes_all_disarmed is set, orig_p
1662                         * should have already been disarmed, so
1663                         * skip unneed disarming process.
1664                         */
1665                        if (!kprobes_all_disarmed) {
1666                                ret = disarm_kprobe(orig_p, true);
1667                                if (ret) {
1668                                        p->flags &= ~KPROBE_FLAG_DISABLED;
1669                                        return ERR_PTR(ret);
1670                                }
1671                        }
1672                        orig_p->flags |= KPROBE_FLAG_DISABLED;
1673                }
1674        }
1675
1676        return orig_p;
1677}
1678
1679/*
1680 * Unregister a kprobe without a scheduler synchronization.
1681 */
1682static int __unregister_kprobe_top(struct kprobe *p)
1683{
1684        struct kprobe *ap, *list_p;
1685
1686        /* Disable kprobe. This will disarm it if needed. */
1687        ap = __disable_kprobe(p);
1688        if (IS_ERR(ap))
1689                return PTR_ERR(ap);
1690
1691        if (ap == p)
1692                /*
1693                 * This probe is an independent(and non-optimized) kprobe
1694                 * (not an aggrprobe). Remove from the hash list.
1695                 */
1696                goto disarmed;
1697
1698        /* Following process expects this probe is an aggrprobe */
1699        WARN_ON(!kprobe_aggrprobe(ap));
1700
1701        if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1702                /*
1703                 * !disarmed could be happen if the probe is under delayed
1704                 * unoptimizing.
1705                 */
1706                goto disarmed;
1707        else {
1708                /* If disabling probe has special handlers, update aggrprobe */
1709                if (p->break_handler && !kprobe_gone(p))
1710                        ap->break_handler = NULL;
1711                if (p->post_handler && !kprobe_gone(p)) {
1712                        list_for_each_entry_rcu(list_p, &ap->list, list) {
1713                                if ((list_p != p) && (list_p->post_handler))
1714                                        goto noclean;
1715                        }
1716                        ap->post_handler = NULL;
1717                }
1718noclean:
1719                /*
1720                 * Remove from the aggrprobe: this path will do nothing in
1721                 * __unregister_kprobe_bottom().
1722                 */
1723                list_del_rcu(&p->list);
1724                if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1725                        /*
1726                         * Try to optimize this probe again, because post
1727                         * handler may have been changed.
1728                         */
1729                        optimize_kprobe(ap);
1730        }
1731        return 0;
1732
1733disarmed:
1734        BUG_ON(!kprobe_disarmed(ap));
1735        hlist_del_rcu(&ap->hlist);
1736        return 0;
1737}
1738
1739static void __unregister_kprobe_bottom(struct kprobe *p)
1740{
1741        struct kprobe *ap;
1742
1743        if (list_empty(&p->list))
1744                /* This is an independent kprobe */
1745                arch_remove_kprobe(p);
1746        else if (list_is_singular(&p->list)) {
1747                /* This is the last child of an aggrprobe */
1748                ap = list_entry(p->list.next, struct kprobe, list);
1749                list_del(&p->list);
1750                free_aggr_kprobe(ap);
1751        }
1752        /* Otherwise, do nothing. */
1753}
1754
1755int register_kprobes(struct kprobe **kps, int num)
1756{
1757        int i, ret = 0;
1758
1759        if (num <= 0)
1760                return -EINVAL;
1761        for (i = 0; i < num; i++) {
1762                ret = register_kprobe(kps[i]);
1763                if (ret < 0) {
1764                        if (i > 0)
1765                                unregister_kprobes(kps, i);
1766                        break;
1767                }
1768        }
1769        return ret;
1770}
1771EXPORT_SYMBOL_GPL(register_kprobes);
1772
1773void unregister_kprobe(struct kprobe *p)
1774{
1775        unregister_kprobes(&p, 1);
1776}
1777EXPORT_SYMBOL_GPL(unregister_kprobe);
1778
1779void unregister_kprobes(struct kprobe **kps, int num)
1780{
1781        int i;
1782
1783        if (num <= 0)
1784                return;
1785        mutex_lock(&kprobe_mutex);
1786        for (i = 0; i < num; i++)
1787                if (__unregister_kprobe_top(kps[i]) < 0)
1788                        kps[i]->addr = NULL;
1789        mutex_unlock(&kprobe_mutex);
1790
1791        synchronize_sched();
1792        for (i = 0; i < num; i++)
1793                if (kps[i]->addr)
1794                        __unregister_kprobe_bottom(kps[i]);
1795}
1796EXPORT_SYMBOL_GPL(unregister_kprobes);
1797
1798int __weak kprobe_exceptions_notify(struct notifier_block *self,
1799                                        unsigned long val, void *data)
1800{
1801        return NOTIFY_DONE;
1802}
1803NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1804
1805static struct notifier_block kprobe_exceptions_nb = {
1806        .notifier_call = kprobe_exceptions_notify,
1807        .priority = 0x7fffffff /* we need to be notified first */
1808};
1809
1810unsigned long __weak arch_deref_entry_point(void *entry)
1811{
1812        return (unsigned long)entry;
1813}
1814
1815#if 0
1816int register_jprobes(struct jprobe **jps, int num)
1817{
1818        int ret = 0, i;
1819
1820        if (num <= 0)
1821                return -EINVAL;
1822
1823        for (i = 0; i < num; i++) {
1824                ret = register_jprobe(jps[i]);
1825
1826                if (ret < 0) {
1827                        if (i > 0)
1828                                unregister_jprobes(jps, i);
1829                        break;
1830                }
1831        }
1832
1833        return ret;
1834}
1835EXPORT_SYMBOL_GPL(register_jprobes);
1836
1837int register_jprobe(struct jprobe *jp)
1838{
1839        unsigned long addr, offset;
1840        struct kprobe *kp = &jp->kp;
1841
1842        /*
1843         * Verify probepoint as well as the jprobe handler are
1844         * valid function entry points.
1845         */
1846        addr = arch_deref_entry_point(jp->entry);
1847
1848        if (kallsyms_lookup_size_offset(addr, NULL, &offset) && offset == 0 &&
1849            kprobe_on_func_entry(kp->addr, kp->symbol_name, kp->offset)) {
1850                kp->pre_handler = setjmp_pre_handler;
1851                kp->break_handler = longjmp_break_handler;
1852                return register_kprobe(kp);
1853        }
1854
1855        return -EINVAL;
1856}
1857EXPORT_SYMBOL_GPL(register_jprobe);
1858
1859void unregister_jprobe(struct jprobe *jp)
1860{
1861        unregister_jprobes(&jp, 1);
1862}
1863EXPORT_SYMBOL_GPL(unregister_jprobe);
1864
1865void unregister_jprobes(struct jprobe **jps, int num)
1866{
1867        int i;
1868
1869        if (num <= 0)
1870                return;
1871        mutex_lock(&kprobe_mutex);
1872        for (i = 0; i < num; i++)
1873                if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1874                        jps[i]->kp.addr = NULL;
1875        mutex_unlock(&kprobe_mutex);
1876
1877        synchronize_sched();
1878        for (i = 0; i < num; i++) {
1879                if (jps[i]->kp.addr)
1880                        __unregister_kprobe_bottom(&jps[i]->kp);
1881        }
1882}
1883EXPORT_SYMBOL_GPL(unregister_jprobes);
1884#endif
1885
1886#ifdef CONFIG_KRETPROBES
1887/*
1888 * This kprobe pre_handler is registered with every kretprobe. When probe
1889 * hits it will set up the return probe.
1890 */
1891static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1892{
1893        struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1894        unsigned long hash, flags = 0;
1895        struct kretprobe_instance *ri;
1896
1897        /*
1898         * To avoid deadlocks, prohibit return probing in NMI contexts,
1899         * just skip the probe and increase the (inexact) 'nmissed'
1900         * statistical counter, so that the user is informed that
1901         * something happened:
1902         */
1903        if (unlikely(in_nmi())) {
1904                rp->nmissed++;
1905                return 0;
1906        }
1907
1908        /* TODO: consider to only swap the RA after the last pre_handler fired */
1909        hash = hash_ptr(current, KPROBE_HASH_BITS);
1910        raw_spin_lock_irqsave(&rp->lock, flags);
1911        if (!hlist_empty(&rp->free_instances)) {
1912                ri = hlist_entry(rp->free_instances.first,
1913                                struct kretprobe_instance, hlist);
1914                hlist_del(&ri->hlist);
1915                raw_spin_unlock_irqrestore(&rp->lock, flags);
1916
1917                ri->rp = rp;
1918                ri->task = current;
1919
1920                if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1921                        raw_spin_lock_irqsave(&rp->lock, flags);
1922                        hlist_add_head(&ri->hlist, &rp->free_instances);
1923                        raw_spin_unlock_irqrestore(&rp->lock, flags);
1924                        return 0;
1925                }
1926
1927                arch_prepare_kretprobe(ri, regs);
1928
1929                /* XXX(hch): why is there no hlist_move_head? */
1930                INIT_HLIST_NODE(&ri->hlist);
1931                kretprobe_table_lock(hash, &flags);
1932                hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1933                kretprobe_table_unlock(hash, &flags);
1934        } else {
1935                rp->nmissed++;
1936                raw_spin_unlock_irqrestore(&rp->lock, flags);
1937        }
1938        return 0;
1939}
1940NOKPROBE_SYMBOL(pre_handler_kretprobe);
1941
1942bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1943{
1944        return !offset;
1945}
1946
1947bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1948{
1949        kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1950
1951        if (IS_ERR(kp_addr))
1952                return false;
1953
1954        if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1955                                                !arch_kprobe_on_func_entry(offset))
1956                return false;
1957
1958        return true;
1959}
1960
1961int register_kretprobe(struct kretprobe *rp)
1962{
1963        int ret = 0;
1964        struct kretprobe_instance *inst;
1965        int i;
1966        void *addr;
1967
1968        if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1969                return -EINVAL;
1970
1971        if (kretprobe_blacklist_size) {
1972                addr = kprobe_addr(&rp->kp);
1973                if (IS_ERR(addr))
1974                        return PTR_ERR(addr);
1975
1976                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1977                        if (kretprobe_blacklist[i].addr == addr)
1978                                return -EINVAL;
1979                }
1980        }
1981
1982        rp->kp.pre_handler = pre_handler_kretprobe;
1983        rp->kp.post_handler = NULL;
1984        rp->kp.fault_handler = NULL;
1985        rp->kp.break_handler = NULL;
1986
1987        /* Pre-allocate memory for max kretprobe instances */
1988        if (rp->maxactive <= 0) {
1989#ifdef CONFIG_PREEMPT
1990                rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1991#else
1992                rp->maxactive = num_possible_cpus();
1993#endif
1994        }
1995        raw_spin_lock_init(&rp->lock);
1996        INIT_HLIST_HEAD(&rp->free_instances);
1997        for (i = 0; i < rp->maxactive; i++) {
1998                inst = kmalloc(sizeof(struct kretprobe_instance) +
1999                               rp->data_size, GFP_KERNEL);
2000                if (inst == NULL) {
2001                        free_rp_inst(rp);
2002                        return -ENOMEM;
2003                }
2004                INIT_HLIST_NODE(&inst->hlist);
2005                hlist_add_head(&inst->hlist, &rp->free_instances);
2006        }
2007
2008        rp->nmissed = 0;
2009        /* Establish function entry probe point */
2010        ret = register_kprobe(&rp->kp);
2011        if (ret != 0)
2012                free_rp_inst(rp);
2013        return ret;
2014}
2015EXPORT_SYMBOL_GPL(register_kretprobe);
2016
2017int register_kretprobes(struct kretprobe **rps, int num)
2018{
2019        int ret = 0, i;
2020
2021        if (num <= 0)
2022                return -EINVAL;
2023        for (i = 0; i < num; i++) {
2024                ret = register_kretprobe(rps[i]);
2025                if (ret < 0) {
2026                        if (i > 0)
2027                                unregister_kretprobes(rps, i);
2028                        break;
2029                }
2030        }
2031        return ret;
2032}
2033EXPORT_SYMBOL_GPL(register_kretprobes);
2034
2035void unregister_kretprobe(struct kretprobe *rp)
2036{
2037        unregister_kretprobes(&rp, 1);
2038}
2039EXPORT_SYMBOL_GPL(unregister_kretprobe);
2040
2041void unregister_kretprobes(struct kretprobe **rps, int num)
2042{
2043        int i;
2044
2045        if (num <= 0)
2046                return;
2047        mutex_lock(&kprobe_mutex);
2048        for (i = 0; i < num; i++)
2049                if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2050                        rps[i]->kp.addr = NULL;
2051        mutex_unlock(&kprobe_mutex);
2052
2053        synchronize_sched();
2054        for (i = 0; i < num; i++) {
2055                if (rps[i]->kp.addr) {
2056                        __unregister_kprobe_bottom(&rps[i]->kp);
2057                        cleanup_rp_inst(rps[i]);
2058                }
2059        }
2060}
2061EXPORT_SYMBOL_GPL(unregister_kretprobes);
2062
2063#else /* CONFIG_KRETPROBES */
2064int register_kretprobe(struct kretprobe *rp)
2065{
2066        return -ENOSYS;
2067}
2068EXPORT_SYMBOL_GPL(register_kretprobe);
2069
2070int register_kretprobes(struct kretprobe **rps, int num)
2071{
2072        return -ENOSYS;
2073}
2074EXPORT_SYMBOL_GPL(register_kretprobes);
2075
2076void unregister_kretprobe(struct kretprobe *rp)
2077{
2078}
2079EXPORT_SYMBOL_GPL(unregister_kretprobe);
2080
2081void unregister_kretprobes(struct kretprobe **rps, int num)
2082{
2083}
2084EXPORT_SYMBOL_GPL(unregister_kretprobes);
2085
2086static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2087{
2088        return 0;
2089}
2090NOKPROBE_SYMBOL(pre_handler_kretprobe);
2091
2092#endif /* CONFIG_KRETPROBES */
2093
2094/* Set the kprobe gone and remove its instruction buffer. */
2095static void kill_kprobe(struct kprobe *p)
2096{
2097        struct kprobe *kp;
2098
2099        p->flags |= KPROBE_FLAG_GONE;
2100        if (kprobe_aggrprobe(p)) {
2101                /*
2102                 * If this is an aggr_kprobe, we have to list all the
2103                 * chained probes and mark them GONE.
2104                 */
2105                list_for_each_entry_rcu(kp, &p->list, list)
2106                        kp->flags |= KPROBE_FLAG_GONE;
2107                p->post_handler = NULL;
2108                p->break_handler = NULL;
2109                kill_optimized_kprobe(p);
2110        }
2111        /*
2112         * Here, we can remove insn_slot safely, because no thread calls
2113         * the original probed function (which will be freed soon) any more.
2114         */
2115        arch_remove_kprobe(p);
2116}
2117
2118/* Disable one kprobe */
2119int disable_kprobe(struct kprobe *kp)
2120{
2121        int ret = 0;
2122        struct kprobe *p;
2123
2124        mutex_lock(&kprobe_mutex);
2125
2126        /* Disable this kprobe */
2127        p = __disable_kprobe(kp);
2128        if (IS_ERR(p))
2129                ret = PTR_ERR(p);
2130
2131        mutex_unlock(&kprobe_mutex);
2132        return ret;
2133}
2134EXPORT_SYMBOL_GPL(disable_kprobe);
2135
2136/* Enable one kprobe */
2137int enable_kprobe(struct kprobe *kp)
2138{
2139        int ret = 0;
2140        struct kprobe *p;
2141
2142        mutex_lock(&kprobe_mutex);
2143
2144        /* Check whether specified probe is valid. */
2145        p = __get_valid_kprobe(kp);
2146        if (unlikely(p == NULL)) {
2147                ret = -EINVAL;
2148                goto out;
2149        }
2150
2151        if (kprobe_gone(kp)) {
2152                /* This kprobe has gone, we couldn't enable it. */
2153                ret = -EINVAL;
2154                goto out;
2155        }
2156
2157        if (p != kp)
2158                kp->flags &= ~KPROBE_FLAG_DISABLED;
2159
2160        if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2161                p->flags &= ~KPROBE_FLAG_DISABLED;
2162                ret = arm_kprobe(p);
2163                if (ret)
2164                        p->flags |= KPROBE_FLAG_DISABLED;
2165        }
2166out:
2167        mutex_unlock(&kprobe_mutex);
2168        return ret;
2169}
2170EXPORT_SYMBOL_GPL(enable_kprobe);
2171
2172void dump_kprobe(struct kprobe *kp)
2173{
2174        printk(KERN_WARNING "Dumping kprobe:\n");
2175        printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2176               kp->symbol_name, kp->addr, kp->offset);
2177}
2178NOKPROBE_SYMBOL(dump_kprobe);
2179
2180/*
2181 * Lookup and populate the kprobe_blacklist.
2182 *
2183 * Unlike the kretprobe blacklist, we'll need to determine
2184 * the range of addresses that belong to the said functions,
2185 * since a kprobe need not necessarily be at the beginning
2186 * of a function.
2187 */
2188static int __init populate_kprobe_blacklist(unsigned long *start,
2189                                             unsigned long *end)
2190{
2191        unsigned long *iter;
2192        struct kprobe_blacklist_entry *ent;
2193        unsigned long entry, offset = 0, size = 0;
2194
2195        for (iter = start; iter < end; iter++) {
2196                entry = arch_deref_entry_point((void *)*iter);
2197
2198                if (!kernel_text_address(entry) ||
2199                    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2200                        pr_err("Failed to find blacklist at %p\n",
2201                                (void *)entry);
2202                        continue;
2203                }
2204
2205                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2206                if (!ent)
2207                        return -ENOMEM;
2208                ent->start_addr = entry;
2209                ent->end_addr = entry + size;
2210                INIT_LIST_HEAD(&ent->list);
2211                list_add_tail(&ent->list, &kprobe_blacklist);
2212        }
2213        return 0;
2214}
2215
2216/* Module notifier call back, checking kprobes on the module */
2217static int kprobes_module_callback(struct notifier_block *nb,
2218                                   unsigned long val, void *data)
2219{
2220        struct module *mod = data;
2221        struct hlist_head *head;
2222        struct kprobe *p;
2223        unsigned int i;
2224        int checkcore = (val == MODULE_STATE_GOING);
2225
2226        if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2227                return NOTIFY_DONE;
2228
2229        /*
2230         * When MODULE_STATE_GOING was notified, both of module .text and
2231         * .init.text sections would be freed. When MODULE_STATE_LIVE was
2232         * notified, only .init.text section would be freed. We need to
2233         * disable kprobes which have been inserted in the sections.
2234         */
2235        mutex_lock(&kprobe_mutex);
2236        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2237                head = &kprobe_table[i];
2238                hlist_for_each_entry_rcu(p, head, hlist)
2239                        if (within_module_init((unsigned long)p->addr, mod) ||
2240                            (checkcore &&
2241                             within_module_core((unsigned long)p->addr, mod))) {
2242                                /*
2243                                 * The vaddr this probe is installed will soon
2244                                 * be vfreed buy not synced to disk. Hence,
2245                                 * disarming the breakpoint isn't needed.
2246                                 *
2247                                 * Note, this will also move any optimized probes
2248                                 * that are pending to be removed from their
2249                                 * corresponding lists to the freeing_list and
2250                                 * will not be touched by the delayed
2251                                 * kprobe_optimizer work handler.
2252                                 */
2253                                kill_kprobe(p);
2254                        }
2255        }
2256        mutex_unlock(&kprobe_mutex);
2257        return NOTIFY_DONE;
2258}
2259
2260static struct notifier_block kprobe_module_nb = {
2261        .notifier_call = kprobes_module_callback,
2262        .priority = 0
2263};
2264
2265/* Markers of _kprobe_blacklist section */
2266extern unsigned long __start_kprobe_blacklist[];
2267extern unsigned long __stop_kprobe_blacklist[];
2268
2269static int __init init_kprobes(void)
2270{
2271        int i, err = 0;
2272
2273        /* FIXME allocate the probe table, currently defined statically */
2274        /* initialize all list heads */
2275        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2276                INIT_HLIST_HEAD(&kprobe_table[i]);
2277                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2278                raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2279        }
2280
2281        err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2282                                        __stop_kprobe_blacklist);
2283        if (err) {
2284                pr_err("kprobes: failed to populate blacklist: %d\n", err);
2285                pr_err("Please take care of using kprobes.\n");
2286        }
2287
2288        if (kretprobe_blacklist_size) {
2289                /* lookup the function address from its name */
2290                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2291                        kretprobe_blacklist[i].addr =
2292                                kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2293                        if (!kretprobe_blacklist[i].addr)
2294                                printk("kretprobe: lookup failed: %s\n",
2295                                       kretprobe_blacklist[i].name);
2296                }
2297        }
2298
2299#if defined(CONFIG_OPTPROBES)
2300#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2301        /* Init kprobe_optinsn_slots */
2302        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2303#endif
2304        /* By default, kprobes can be optimized */
2305        kprobes_allow_optimization = true;
2306#endif
2307
2308        /* By default, kprobes are armed */
2309        kprobes_all_disarmed = false;
2310
2311        err = arch_init_kprobes();
2312        if (!err)
2313                err = register_die_notifier(&kprobe_exceptions_nb);
2314        if (!err)
2315                err = register_module_notifier(&kprobe_module_nb);
2316
2317        kprobes_initialized = (err == 0);
2318
2319        if (!err)
2320                init_test_probes();
2321        return err;
2322}
2323
2324#ifdef CONFIG_DEBUG_FS
2325static void report_probe(struct seq_file *pi, struct kprobe *p,
2326                const char *sym, int offset, char *modname, struct kprobe *pp)
2327{
2328        char *kprobe_type;
2329
2330        if (p->pre_handler == pre_handler_kretprobe)
2331                kprobe_type = "r";
2332        else if (p->pre_handler == setjmp_pre_handler)
2333                kprobe_type = "j";
2334        else
2335                kprobe_type = "k";
2336
2337        if (sym)
2338                seq_printf(pi, "%p  %s  %s+0x%x  %s ",
2339                        p->addr, kprobe_type, sym, offset,
2340                        (modname ? modname : " "));
2341        else
2342                seq_printf(pi, "%p  %s  %p ",
2343                        p->addr, kprobe_type, p->addr);
2344
2345        if (!pp)
2346                pp = p;
2347        seq_printf(pi, "%s%s%s%s\n",
2348                (kprobe_gone(p) ? "[GONE]" : ""),
2349                ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2350                (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2351                (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2352}
2353
2354static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2355{
2356        return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2357}
2358
2359static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2360{
2361        (*pos)++;
2362        if (*pos >= KPROBE_TABLE_SIZE)
2363                return NULL;
2364        return pos;
2365}
2366
2367static void kprobe_seq_stop(struct seq_file *f, void *v)
2368{
2369        /* Nothing to do */
2370}
2371
2372static int show_kprobe_addr(struct seq_file *pi, void *v)
2373{
2374        struct hlist_head *head;
2375        struct kprobe *p, *kp;
2376        const char *sym = NULL;
2377        unsigned int i = *(loff_t *) v;
2378        unsigned long offset = 0;
2379        char *modname, namebuf[KSYM_NAME_LEN];
2380
2381        head = &kprobe_table[i];
2382        preempt_disable();
2383        hlist_for_each_entry_rcu(p, head, hlist) {
2384                sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2385                                        &offset, &modname, namebuf);
2386                if (kprobe_aggrprobe(p)) {
2387                        list_for_each_entry_rcu(kp, &p->list, list)
2388                                report_probe(pi, kp, sym, offset, modname, p);
2389                } else
2390                        report_probe(pi, p, sym, offset, modname, NULL);
2391        }
2392        preempt_enable();
2393        return 0;
2394}
2395
2396static const struct seq_operations kprobes_seq_ops = {
2397        .start = kprobe_seq_start,
2398        .next  = kprobe_seq_next,
2399        .stop  = kprobe_seq_stop,
2400        .show  = show_kprobe_addr
2401};
2402
2403static int kprobes_open(struct inode *inode, struct file *filp)
2404{
2405        return seq_open(filp, &kprobes_seq_ops);
2406}
2407
2408static const struct file_operations debugfs_kprobes_operations = {
2409        .open           = kprobes_open,
2410        .read           = seq_read,
2411        .llseek         = seq_lseek,
2412        .release        = seq_release,
2413};
2414
2415/* kprobes/blacklist -- shows which functions can not be probed */
2416static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2417{
2418        return seq_list_start(&kprobe_blacklist, *pos);
2419}
2420
2421static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2422{
2423        return seq_list_next(v, &kprobe_blacklist, pos);
2424}
2425
2426static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2427{
2428        struct kprobe_blacklist_entry *ent =
2429                list_entry(v, struct kprobe_blacklist_entry, list);
2430
2431        seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2432                   (void *)ent->end_addr, (void *)ent->start_addr);
2433        return 0;
2434}
2435
2436static const struct seq_operations kprobe_blacklist_seq_ops = {
2437        .start = kprobe_blacklist_seq_start,
2438        .next  = kprobe_blacklist_seq_next,
2439        .stop  = kprobe_seq_stop,       /* Reuse void function */
2440        .show  = kprobe_blacklist_seq_show,
2441};
2442
2443static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2444{
2445        return seq_open(filp, &kprobe_blacklist_seq_ops);
2446}
2447
2448static const struct file_operations debugfs_kprobe_blacklist_ops = {
2449        .open           = kprobe_blacklist_open,
2450        .read           = seq_read,
2451        .llseek         = seq_lseek,
2452        .release        = seq_release,
2453};
2454
2455static int arm_all_kprobes(void)
2456{
2457        struct hlist_head *head;
2458        struct kprobe *p;
2459        unsigned int i, total = 0, errors = 0;
2460        int err, ret = 0;
2461
2462        mutex_lock(&kprobe_mutex);
2463
2464        /* If kprobes are armed, just return */
2465        if (!kprobes_all_disarmed)
2466                goto already_enabled;
2467
2468        /*
2469         * optimize_kprobe() called by arm_kprobe() checks
2470         * kprobes_all_disarmed, so set kprobes_all_disarmed before
2471         * arm_kprobe.
2472         */
2473        kprobes_all_disarmed = false;
2474        /* Arming kprobes doesn't optimize kprobe itself */
2475        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2476                head = &kprobe_table[i];
2477                /* Arm all kprobes on a best-effort basis */
2478                hlist_for_each_entry_rcu(p, head, hlist) {
2479                        if (!kprobe_disabled(p)) {
2480                                err = arm_kprobe(p);
2481                                if (err)  {
2482                                        errors++;
2483                                        ret = err;
2484                                }
2485                                total++;
2486                        }
2487                }
2488        }
2489
2490        if (errors)
2491                pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2492                        errors, total);
2493        else
2494                pr_info("Kprobes globally enabled\n");
2495
2496already_enabled:
2497        mutex_unlock(&kprobe_mutex);
2498        return ret;
2499}
2500
2501static int disarm_all_kprobes(void)
2502{
2503        struct hlist_head *head;
2504        struct kprobe *p;
2505        unsigned int i, total = 0, errors = 0;
2506        int err, ret = 0;
2507
2508        mutex_lock(&kprobe_mutex);
2509
2510        /* If kprobes are already disarmed, just return */
2511        if (kprobes_all_disarmed) {
2512                mutex_unlock(&kprobe_mutex);
2513                return 0;
2514        }
2515
2516        kprobes_all_disarmed = true;
2517
2518        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2519                head = &kprobe_table[i];
2520                /* Disarm all kprobes on a best-effort basis */
2521                hlist_for_each_entry_rcu(p, head, hlist) {
2522                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2523                                err = disarm_kprobe(p, false);
2524                                if (err) {
2525                                        errors++;
2526                                        ret = err;
2527                                }
2528                                total++;
2529                        }
2530                }
2531        }
2532
2533        if (errors)
2534                pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2535                        errors, total);
2536        else
2537                pr_info("Kprobes globally disabled\n");
2538
2539        mutex_unlock(&kprobe_mutex);
2540
2541        /* Wait for disarming all kprobes by optimizer */
2542        wait_for_kprobe_optimizer();
2543
2544        return ret;
2545}
2546
2547/*
2548 * XXX: The debugfs bool file interface doesn't allow for callbacks
2549 * when the bool state is switched. We can reuse that facility when
2550 * available
2551 */
2552static ssize_t read_enabled_file_bool(struct file *file,
2553               char __user *user_buf, size_t count, loff_t *ppos)
2554{
2555        char buf[3];
2556
2557        if (!kprobes_all_disarmed)
2558                buf[0] = '1';
2559        else
2560                buf[0] = '0';
2561        buf[1] = '\n';
2562        buf[2] = 0x00;
2563        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2564}
2565
2566static ssize_t write_enabled_file_bool(struct file *file,
2567               const char __user *user_buf, size_t count, loff_t *ppos)
2568{
2569        char buf[32];
2570        size_t buf_size;
2571        int ret = 0;
2572
2573        buf_size = min(count, (sizeof(buf)-1));
2574        if (copy_from_user(buf, user_buf, buf_size))
2575                return -EFAULT;
2576
2577        buf[buf_size] = '\0';
2578        switch (buf[0]) {
2579        case 'y':
2580        case 'Y':
2581        case '1':
2582                ret = arm_all_kprobes();
2583                break;
2584        case 'n':
2585        case 'N':
2586        case '0':
2587                ret = disarm_all_kprobes();
2588                break;
2589        default:
2590                return -EINVAL;
2591        }
2592
2593        if (ret)
2594                return ret;
2595
2596        return count;
2597}
2598
2599static const struct file_operations fops_kp = {
2600        .read =         read_enabled_file_bool,
2601        .write =        write_enabled_file_bool,
2602        .llseek =       default_llseek,
2603};
2604
2605static int __init debugfs_kprobe_init(void)
2606{
2607        struct dentry *dir, *file;
2608        unsigned int value = 1;
2609
2610        dir = debugfs_create_dir("kprobes", NULL);
2611        if (!dir)
2612                return -ENOMEM;
2613
2614        file = debugfs_create_file("list", 0444, dir, NULL,
2615                                &debugfs_kprobes_operations);
2616        if (!file)
2617                goto error;
2618
2619        file = debugfs_create_file("enabled", 0600, dir,
2620                                        &value, &fops_kp);
2621        if (!file)
2622                goto error;
2623
2624        file = debugfs_create_file("blacklist", 0444, dir, NULL,
2625                                &debugfs_kprobe_blacklist_ops);
2626        if (!file)
2627                goto error;
2628
2629        return 0;
2630
2631error:
2632        debugfs_remove(dir);
2633        return -ENOMEM;
2634}
2635
2636late_initcall(debugfs_kprobe_init);
2637#endif /* CONFIG_DEBUG_FS */
2638
2639module_init(init_kprobes);
2640
2641/* defined in arch/.../kernel/kprobes.c */
2642EXPORT_SYMBOL_GPL(jprobe_return);
2643