linux/kernel/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Kernel Probes (KProbes)
   4 *  kernel/kprobes.c
   5 *
   6 * Copyright (C) IBM Corporation, 2002, 2004
   7 *
   8 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
   9 *              Probes initial implementation (includes suggestions from
  10 *              Rusty Russell).
  11 * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  12 *              hlists and exceptions notifier as suggested by Andi Kleen.
  13 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  14 *              interface to access function arguments.
  15 * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  16 *              exceptions notifier to be first on the priority list.
  17 * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  18 *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  19 *              <prasanna@in.ibm.com> added function-return probes.
  20 */
  21#include <linux/kprobes.h>
  22#include <linux/hash.h>
  23#include <linux/init.h>
  24#include <linux/slab.h>
  25#include <linux/stddef.h>
  26#include <linux/export.h>
  27#include <linux/moduleloader.h>
  28#include <linux/kallsyms.h>
  29#include <linux/freezer.h>
  30#include <linux/seq_file.h>
  31#include <linux/debugfs.h>
  32#include <linux/sysctl.h>
  33#include <linux/kdebug.h>
  34#include <linux/memory.h>
  35#include <linux/ftrace.h>
  36#include <linux/cpu.h>
  37#include <linux/jump_label.h>
  38
  39#include <asm/sections.h>
  40#include <asm/cacheflush.h>
  41#include <asm/errno.h>
  42#include <linux/uaccess.h>
  43
  44#define KPROBE_HASH_BITS 6
  45#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  46
  47
  48static int kprobes_initialized;
  49static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  50static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  51
  52/* NOTE: change this value only with kprobe_mutex held */
  53static bool kprobes_all_disarmed;
  54
  55/* This protects kprobe_table and optimizing_list */
  56static DEFINE_MUTEX(kprobe_mutex);
  57static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  58static struct {
  59        raw_spinlock_t lock ____cacheline_aligned_in_smp;
  60} kretprobe_table_locks[KPROBE_TABLE_SIZE];
  61
  62kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
  63                                        unsigned int __unused)
  64{
  65        return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
  66}
  67
  68static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  69{
  70        return &(kretprobe_table_locks[hash].lock);
  71}
  72
  73/* Blacklist -- list of struct kprobe_blacklist_entry */
  74static LIST_HEAD(kprobe_blacklist);
  75
  76#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  77/*
  78 * kprobe->ainsn.insn points to the copy of the instruction to be
  79 * single-stepped. x86_64, POWER4 and above have no-exec support and
  80 * stepping on the instruction on a vmalloced/kmalloced/data page
  81 * is a recipe for disaster
  82 */
  83struct kprobe_insn_page {
  84        struct list_head list;
  85        kprobe_opcode_t *insns;         /* Page of instruction slots */
  86        struct kprobe_insn_cache *cache;
  87        int nused;
  88        int ngarbage;
  89        char slot_used[];
  90};
  91
  92#define KPROBE_INSN_PAGE_SIZE(slots)                    \
  93        (offsetof(struct kprobe_insn_page, slot_used) + \
  94         (sizeof(char) * (slots)))
  95
  96static int slots_per_page(struct kprobe_insn_cache *c)
  97{
  98        return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  99}
 100
 101enum kprobe_slot_state {
 102        SLOT_CLEAN = 0,
 103        SLOT_DIRTY = 1,
 104        SLOT_USED = 2,
 105};
 106
 107void __weak *alloc_insn_page(void)
 108{
 109        return module_alloc(PAGE_SIZE);
 110}
 111
 112void __weak free_insn_page(void *page)
 113{
 114        module_memfree(page);
 115}
 116
 117struct kprobe_insn_cache kprobe_insn_slots = {
 118        .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
 119        .alloc = alloc_insn_page,
 120        .free = free_insn_page,
 121        .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
 122        .insn_size = MAX_INSN_SIZE,
 123        .nr_garbage = 0,
 124};
 125static int collect_garbage_slots(struct kprobe_insn_cache *c);
 126
 127/**
 128 * __get_insn_slot() - Find a slot on an executable page for an instruction.
 129 * We allocate an executable page if there's no room on existing ones.
 130 */
 131kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
 132{
 133        struct kprobe_insn_page *kip;
 134        kprobe_opcode_t *slot = NULL;
 135
 136        /* Since the slot array is not protected by rcu, we need a mutex */
 137        mutex_lock(&c->mutex);
 138 retry:
 139        rcu_read_lock();
 140        list_for_each_entry_rcu(kip, &c->pages, list) {
 141                if (kip->nused < slots_per_page(c)) {
 142                        int i;
 143                        for (i = 0; i < slots_per_page(c); i++) {
 144                                if (kip->slot_used[i] == SLOT_CLEAN) {
 145                                        kip->slot_used[i] = SLOT_USED;
 146                                        kip->nused++;
 147                                        slot = kip->insns + (i * c->insn_size);
 148                                        rcu_read_unlock();
 149                                        goto out;
 150                                }
 151                        }
 152                        /* kip->nused is broken. Fix it. */
 153                        kip->nused = slots_per_page(c);
 154                        WARN_ON(1);
 155                }
 156        }
 157        rcu_read_unlock();
 158
 159        /* If there are any garbage slots, collect it and try again. */
 160        if (c->nr_garbage && collect_garbage_slots(c) == 0)
 161                goto retry;
 162
 163        /* All out of space.  Need to allocate a new page. */
 164        kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
 165        if (!kip)
 166                goto out;
 167
 168        /*
 169         * Use module_alloc so this page is within +/- 2GB of where the
 170         * kernel image and loaded module images reside. This is required
 171         * so x86_64 can correctly handle the %rip-relative fixups.
 172         */
 173        kip->insns = c->alloc();
 174        if (!kip->insns) {
 175                kfree(kip);
 176                goto out;
 177        }
 178        INIT_LIST_HEAD(&kip->list);
 179        memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
 180        kip->slot_used[0] = SLOT_USED;
 181        kip->nused = 1;
 182        kip->ngarbage = 0;
 183        kip->cache = c;
 184        list_add_rcu(&kip->list, &c->pages);
 185        slot = kip->insns;
 186out:
 187        mutex_unlock(&c->mutex);
 188        return slot;
 189}
 190
 191/* Return 1 if all garbages are collected, otherwise 0. */
 192static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
 193{
 194        kip->slot_used[idx] = SLOT_CLEAN;
 195        kip->nused--;
 196        if (kip->nused == 0) {
 197                /*
 198                 * Page is no longer in use.  Free it unless
 199                 * it's the last one.  We keep the last one
 200                 * so as not to have to set it up again the
 201                 * next time somebody inserts a probe.
 202                 */
 203                if (!list_is_singular(&kip->list)) {
 204                        list_del_rcu(&kip->list);
 205                        synchronize_rcu();
 206                        kip->cache->free(kip->insns);
 207                        kfree(kip);
 208                }
 209                return 1;
 210        }
 211        return 0;
 212}
 213
 214static int collect_garbage_slots(struct kprobe_insn_cache *c)
 215{
 216        struct kprobe_insn_page *kip, *next;
 217
 218        /* Ensure no-one is interrupted on the garbages */
 219        synchronize_rcu();
 220
 221        list_for_each_entry_safe(kip, next, &c->pages, list) {
 222                int i;
 223                if (kip->ngarbage == 0)
 224                        continue;
 225                kip->ngarbage = 0;      /* we will collect all garbages */
 226                for (i = 0; i < slots_per_page(c); i++) {
 227                        if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
 228                                break;
 229                }
 230        }
 231        c->nr_garbage = 0;
 232        return 0;
 233}
 234
 235void __free_insn_slot(struct kprobe_insn_cache *c,
 236                      kprobe_opcode_t *slot, int dirty)
 237{
 238        struct kprobe_insn_page *kip;
 239        long idx;
 240
 241        mutex_lock(&c->mutex);
 242        rcu_read_lock();
 243        list_for_each_entry_rcu(kip, &c->pages, list) {
 244                idx = ((long)slot - (long)kip->insns) /
 245                        (c->insn_size * sizeof(kprobe_opcode_t));
 246                if (idx >= 0 && idx < slots_per_page(c))
 247                        goto out;
 248        }
 249        /* Could not find this slot. */
 250        WARN_ON(1);
 251        kip = NULL;
 252out:
 253        rcu_read_unlock();
 254        /* Mark and sweep: this may sleep */
 255        if (kip) {
 256                /* Check double free */
 257                WARN_ON(kip->slot_used[idx] != SLOT_USED);
 258                if (dirty) {
 259                        kip->slot_used[idx] = SLOT_DIRTY;
 260                        kip->ngarbage++;
 261                        if (++c->nr_garbage > slots_per_page(c))
 262                                collect_garbage_slots(c);
 263                } else {
 264                        collect_one_slot(kip, idx);
 265                }
 266        }
 267        mutex_unlock(&c->mutex);
 268}
 269
 270/*
 271 * Check given address is on the page of kprobe instruction slots.
 272 * This will be used for checking whether the address on a stack
 273 * is on a text area or not.
 274 */
 275bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
 276{
 277        struct kprobe_insn_page *kip;
 278        bool ret = false;
 279
 280        rcu_read_lock();
 281        list_for_each_entry_rcu(kip, &c->pages, list) {
 282                if (addr >= (unsigned long)kip->insns &&
 283                    addr < (unsigned long)kip->insns + PAGE_SIZE) {
 284                        ret = true;
 285                        break;
 286                }
 287        }
 288        rcu_read_unlock();
 289
 290        return ret;
 291}
 292
 293#ifdef CONFIG_OPTPROBES
 294/* For optimized_kprobe buffer */
 295struct kprobe_insn_cache kprobe_optinsn_slots = {
 296        .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
 297        .alloc = alloc_insn_page,
 298        .free = free_insn_page,
 299        .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
 300        /* .insn_size is initialized later */
 301        .nr_garbage = 0,
 302};
 303#endif
 304#endif
 305
 306/* We have preemption disabled.. so it is safe to use __ versions */
 307static inline void set_kprobe_instance(struct kprobe *kp)
 308{
 309        __this_cpu_write(kprobe_instance, kp);
 310}
 311
 312static inline void reset_kprobe_instance(void)
 313{
 314        __this_cpu_write(kprobe_instance, NULL);
 315}
 316
 317/*
 318 * This routine is called either:
 319 *      - under the kprobe_mutex - during kprobe_[un]register()
 320 *                              OR
 321 *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
 322 */
 323struct kprobe *get_kprobe(void *addr)
 324{
 325        struct hlist_head *head;
 326        struct kprobe *p;
 327
 328        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
 329        hlist_for_each_entry_rcu(p, head, hlist) {
 330                if (p->addr == addr)
 331                        return p;
 332        }
 333
 334        return NULL;
 335}
 336NOKPROBE_SYMBOL(get_kprobe);
 337
 338static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
 339
 340/* Return true if the kprobe is an aggregator */
 341static inline int kprobe_aggrprobe(struct kprobe *p)
 342{
 343        return p->pre_handler == aggr_pre_handler;
 344}
 345
 346/* Return true(!0) if the kprobe is unused */
 347static inline int kprobe_unused(struct kprobe *p)
 348{
 349        return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
 350               list_empty(&p->list);
 351}
 352
 353/*
 354 * Keep all fields in the kprobe consistent
 355 */
 356static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
 357{
 358        memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
 359        memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
 360}
 361
 362#ifdef CONFIG_OPTPROBES
 363/* NOTE: change this value only with kprobe_mutex held */
 364static bool kprobes_allow_optimization;
 365
 366/*
 367 * Call all pre_handler on the list, but ignores its return value.
 368 * This must be called from arch-dep optimized caller.
 369 */
 370void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
 371{
 372        struct kprobe *kp;
 373
 374        list_for_each_entry_rcu(kp, &p->list, list) {
 375                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
 376                        set_kprobe_instance(kp);
 377                        kp->pre_handler(kp, regs);
 378                }
 379                reset_kprobe_instance();
 380        }
 381}
 382NOKPROBE_SYMBOL(opt_pre_handler);
 383
 384/* Free optimized instructions and optimized_kprobe */
 385static void free_aggr_kprobe(struct kprobe *p)
 386{
 387        struct optimized_kprobe *op;
 388
 389        op = container_of(p, struct optimized_kprobe, kp);
 390        arch_remove_optimized_kprobe(op);
 391        arch_remove_kprobe(p);
 392        kfree(op);
 393}
 394
 395/* Return true(!0) if the kprobe is ready for optimization. */
 396static inline int kprobe_optready(struct kprobe *p)
 397{
 398        struct optimized_kprobe *op;
 399
 400        if (kprobe_aggrprobe(p)) {
 401                op = container_of(p, struct optimized_kprobe, kp);
 402                return arch_prepared_optinsn(&op->optinsn);
 403        }
 404
 405        return 0;
 406}
 407
 408/* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
 409static inline int kprobe_disarmed(struct kprobe *p)
 410{
 411        struct optimized_kprobe *op;
 412
 413        /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
 414        if (!kprobe_aggrprobe(p))
 415                return kprobe_disabled(p);
 416
 417        op = container_of(p, struct optimized_kprobe, kp);
 418
 419        return kprobe_disabled(p) && list_empty(&op->list);
 420}
 421
 422/* Return true(!0) if the probe is queued on (un)optimizing lists */
 423static int kprobe_queued(struct kprobe *p)
 424{
 425        struct optimized_kprobe *op;
 426
 427        if (kprobe_aggrprobe(p)) {
 428                op = container_of(p, struct optimized_kprobe, kp);
 429                if (!list_empty(&op->list))
 430                        return 1;
 431        }
 432        return 0;
 433}
 434
 435/*
 436 * Return an optimized kprobe whose optimizing code replaces
 437 * instructions including addr (exclude breakpoint).
 438 */
 439static struct kprobe *get_optimized_kprobe(unsigned long addr)
 440{
 441        int i;
 442        struct kprobe *p = NULL;
 443        struct optimized_kprobe *op;
 444
 445        /* Don't check i == 0, since that is a breakpoint case. */
 446        for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
 447                p = get_kprobe((void *)(addr - i));
 448
 449        if (p && kprobe_optready(p)) {
 450                op = container_of(p, struct optimized_kprobe, kp);
 451                if (arch_within_optimized_kprobe(op, addr))
 452                        return p;
 453        }
 454
 455        return NULL;
 456}
 457
 458/* Optimization staging list, protected by kprobe_mutex */
 459static LIST_HEAD(optimizing_list);
 460static LIST_HEAD(unoptimizing_list);
 461static LIST_HEAD(freeing_list);
 462
 463static void kprobe_optimizer(struct work_struct *work);
 464static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
 465#define OPTIMIZE_DELAY 5
 466
 467/*
 468 * Optimize (replace a breakpoint with a jump) kprobes listed on
 469 * optimizing_list.
 470 */
 471static void do_optimize_kprobes(void)
 472{
 473        lockdep_assert_held(&text_mutex);
 474        /*
 475         * The optimization/unoptimization refers online_cpus via
 476         * stop_machine() and cpu-hotplug modifies online_cpus.
 477         * And same time, text_mutex will be held in cpu-hotplug and here.
 478         * This combination can cause a deadlock (cpu-hotplug try to lock
 479         * text_mutex but stop_machine can not be done because online_cpus
 480         * has been changed)
 481         * To avoid this deadlock, caller must have locked cpu hotplug
 482         * for preventing cpu-hotplug outside of text_mutex locking.
 483         */
 484        lockdep_assert_cpus_held();
 485
 486        /* Optimization never be done when disarmed */
 487        if (kprobes_all_disarmed || !kprobes_allow_optimization ||
 488            list_empty(&optimizing_list))
 489                return;
 490
 491        arch_optimize_kprobes(&optimizing_list);
 492}
 493
 494/*
 495 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
 496 * if need) kprobes listed on unoptimizing_list.
 497 */
 498static void do_unoptimize_kprobes(void)
 499{
 500        struct optimized_kprobe *op, *tmp;
 501
 502        lockdep_assert_held(&text_mutex);
 503        /* See comment in do_optimize_kprobes() */
 504        lockdep_assert_cpus_held();
 505
 506        /* Unoptimization must be done anytime */
 507        if (list_empty(&unoptimizing_list))
 508                return;
 509
 510        arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 511        /* Loop free_list for disarming */
 512        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 513                /* Disarm probes if marked disabled */
 514                if (kprobe_disabled(&op->kp))
 515                        arch_disarm_kprobe(&op->kp);
 516                if (kprobe_unused(&op->kp)) {
 517                        /*
 518                         * Remove unused probes from hash list. After waiting
 519                         * for synchronization, these probes are reclaimed.
 520                         * (reclaiming is done by do_free_cleaned_kprobes.)
 521                         */
 522                        hlist_del_rcu(&op->kp.hlist);
 523                } else
 524                        list_del_init(&op->list);
 525        }
 526}
 527
 528/* Reclaim all kprobes on the free_list */
 529static void do_free_cleaned_kprobes(void)
 530{
 531        struct optimized_kprobe *op, *tmp;
 532
 533        list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 534                list_del_init(&op->list);
 535                if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
 536                        /*
 537                         * This must not happen, but if there is a kprobe
 538                         * still in use, keep it on kprobes hash list.
 539                         */
 540                        continue;
 541                }
 542                free_aggr_kprobe(&op->kp);
 543        }
 544}
 545
 546/* Start optimizer after OPTIMIZE_DELAY passed */
 547static void kick_kprobe_optimizer(void)
 548{
 549        schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 550}
 551
 552/* Kprobe jump optimizer */
 553static void kprobe_optimizer(struct work_struct *work)
 554{
 555        mutex_lock(&kprobe_mutex);
 556        cpus_read_lock();
 557        mutex_lock(&text_mutex);
 558        /* Lock modules while optimizing kprobes */
 559        mutex_lock(&module_mutex);
 560
 561        /*
 562         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 563         * kprobes before waiting for quiesence period.
 564         */
 565        do_unoptimize_kprobes();
 566
 567        /*
 568         * Step 2: Wait for quiesence period to ensure all potentially
 569         * preempted tasks to have normally scheduled. Because optprobe
 570         * may modify multiple instructions, there is a chance that Nth
 571         * instruction is preempted. In that case, such tasks can return
 572         * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
 573         * Note that on non-preemptive kernel, this is transparently converted
 574         * to synchronoze_sched() to wait for all interrupts to have completed.
 575         */
 576        synchronize_rcu_tasks();
 577
 578        /* Step 3: Optimize kprobes after quiesence period */
 579        do_optimize_kprobes();
 580
 581        /* Step 4: Free cleaned kprobes after quiesence period */
 582        do_free_cleaned_kprobes();
 583
 584        mutex_unlock(&module_mutex);
 585        mutex_unlock(&text_mutex);
 586        cpus_read_unlock();
 587        mutex_unlock(&kprobe_mutex);
 588
 589        /* Step 5: Kick optimizer again if needed */
 590        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
 591                kick_kprobe_optimizer();
 592}
 593
 594/* Wait for completing optimization and unoptimization */
 595void wait_for_kprobe_optimizer(void)
 596{
 597        mutex_lock(&kprobe_mutex);
 598
 599        while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
 600                mutex_unlock(&kprobe_mutex);
 601
 602                /* this will also make optimizing_work execute immmediately */
 603                flush_delayed_work(&optimizing_work);
 604                /* @optimizing_work might not have been queued yet, relax */
 605                cpu_relax();
 606
 607                mutex_lock(&kprobe_mutex);
 608        }
 609
 610        mutex_unlock(&kprobe_mutex);
 611}
 612
 613/* Optimize kprobe if p is ready to be optimized */
 614static void optimize_kprobe(struct kprobe *p)
 615{
 616        struct optimized_kprobe *op;
 617
 618        /* Check if the kprobe is disabled or not ready for optimization. */
 619        if (!kprobe_optready(p) || !kprobes_allow_optimization ||
 620            (kprobe_disabled(p) || kprobes_all_disarmed))
 621                return;
 622
 623        /* kprobes with post_handler can not be optimized */
 624        if (p->post_handler)
 625                return;
 626
 627        op = container_of(p, struct optimized_kprobe, kp);
 628
 629        /* Check there is no other kprobes at the optimized instructions */
 630        if (arch_check_optimized_kprobe(op) < 0)
 631                return;
 632
 633        /* Check if it is already optimized. */
 634        if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
 635                return;
 636        op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
 637
 638        if (!list_empty(&op->list))
 639                /* This is under unoptimizing. Just dequeue the probe */
 640                list_del_init(&op->list);
 641        else {
 642                list_add(&op->list, &optimizing_list);
 643                kick_kprobe_optimizer();
 644        }
 645}
 646
 647/* Short cut to direct unoptimizing */
 648static void force_unoptimize_kprobe(struct optimized_kprobe *op)
 649{
 650        lockdep_assert_cpus_held();
 651        arch_unoptimize_kprobe(op);
 652        if (kprobe_disabled(&op->kp))
 653                arch_disarm_kprobe(&op->kp);
 654}
 655
 656/* Unoptimize a kprobe if p is optimized */
 657static void unoptimize_kprobe(struct kprobe *p, bool force)
 658{
 659        struct optimized_kprobe *op;
 660
 661        if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
 662                return; /* This is not an optprobe nor optimized */
 663
 664        op = container_of(p, struct optimized_kprobe, kp);
 665        if (!kprobe_optimized(p)) {
 666                /* Unoptimized or unoptimizing case */
 667                if (force && !list_empty(&op->list)) {
 668                        /*
 669                         * Only if this is unoptimizing kprobe and forced,
 670                         * forcibly unoptimize it. (No need to unoptimize
 671                         * unoptimized kprobe again :)
 672                         */
 673                        list_del_init(&op->list);
 674                        force_unoptimize_kprobe(op);
 675                }
 676                return;
 677        }
 678
 679        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 680        if (!list_empty(&op->list)) {
 681                /* Dequeue from the optimization queue */
 682                list_del_init(&op->list);
 683                return;
 684        }
 685        /* Optimized kprobe case */
 686        if (force)
 687                /* Forcibly update the code: this is a special case */
 688                force_unoptimize_kprobe(op);
 689        else {
 690                list_add(&op->list, &unoptimizing_list);
 691                kick_kprobe_optimizer();
 692        }
 693}
 694
 695/* Cancel unoptimizing for reusing */
 696static int reuse_unused_kprobe(struct kprobe *ap)
 697{
 698        struct optimized_kprobe *op;
 699
 700        /*
 701         * Unused kprobe MUST be on the way of delayed unoptimizing (means
 702         * there is still a relative jump) and disabled.
 703         */
 704        op = container_of(ap, struct optimized_kprobe, kp);
 705        WARN_ON_ONCE(list_empty(&op->list));
 706        /* Enable the probe again */
 707        ap->flags &= ~KPROBE_FLAG_DISABLED;
 708        /* Optimize it again (remove from op->list) */
 709        if (!kprobe_optready(ap))
 710                return -EINVAL;
 711
 712        optimize_kprobe(ap);
 713        return 0;
 714}
 715
 716/* Remove optimized instructions */
 717static void kill_optimized_kprobe(struct kprobe *p)
 718{
 719        struct optimized_kprobe *op;
 720
 721        op = container_of(p, struct optimized_kprobe, kp);
 722        if (!list_empty(&op->list))
 723                /* Dequeue from the (un)optimization queue */
 724                list_del_init(&op->list);
 725        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
 726
 727        if (kprobe_unused(p)) {
 728                /* Enqueue if it is unused */
 729                list_add(&op->list, &freeing_list);
 730                /*
 731                 * Remove unused probes from the hash list. After waiting
 732                 * for synchronization, this probe is reclaimed.
 733                 * (reclaiming is done by do_free_cleaned_kprobes().)
 734                 */
 735                hlist_del_rcu(&op->kp.hlist);
 736        }
 737
 738        /* Don't touch the code, because it is already freed. */
 739        arch_remove_optimized_kprobe(op);
 740}
 741
 742static inline
 743void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
 744{
 745        if (!kprobe_ftrace(p))
 746                arch_prepare_optimized_kprobe(op, p);
 747}
 748
 749/* Try to prepare optimized instructions */
 750static void prepare_optimized_kprobe(struct kprobe *p)
 751{
 752        struct optimized_kprobe *op;
 753
 754        op = container_of(p, struct optimized_kprobe, kp);
 755        __prepare_optimized_kprobe(op, p);
 756}
 757
 758/* Allocate new optimized_kprobe and try to prepare optimized instructions */
 759static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 760{
 761        struct optimized_kprobe *op;
 762
 763        op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
 764        if (!op)
 765                return NULL;
 766
 767        INIT_LIST_HEAD(&op->list);
 768        op->kp.addr = p->addr;
 769        __prepare_optimized_kprobe(op, p);
 770
 771        return &op->kp;
 772}
 773
 774static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
 775
 776/*
 777 * Prepare an optimized_kprobe and optimize it
 778 * NOTE: p must be a normal registered kprobe
 779 */
 780static void try_to_optimize_kprobe(struct kprobe *p)
 781{
 782        struct kprobe *ap;
 783        struct optimized_kprobe *op;
 784
 785        /* Impossible to optimize ftrace-based kprobe */
 786        if (kprobe_ftrace(p))
 787                return;
 788
 789        /* For preparing optimization, jump_label_text_reserved() is called */
 790        cpus_read_lock();
 791        jump_label_lock();
 792        mutex_lock(&text_mutex);
 793
 794        ap = alloc_aggr_kprobe(p);
 795        if (!ap)
 796                goto out;
 797
 798        op = container_of(ap, struct optimized_kprobe, kp);
 799        if (!arch_prepared_optinsn(&op->optinsn)) {
 800                /* If failed to setup optimizing, fallback to kprobe */
 801                arch_remove_optimized_kprobe(op);
 802                kfree(op);
 803                goto out;
 804        }
 805
 806        init_aggr_kprobe(ap, p);
 807        optimize_kprobe(ap);    /* This just kicks optimizer thread */
 808
 809out:
 810        mutex_unlock(&text_mutex);
 811        jump_label_unlock();
 812        cpus_read_unlock();
 813}
 814
 815#ifdef CONFIG_SYSCTL
 816static void optimize_all_kprobes(void)
 817{
 818        struct hlist_head *head;
 819        struct kprobe *p;
 820        unsigned int i;
 821
 822        mutex_lock(&kprobe_mutex);
 823        /* If optimization is already allowed, just return */
 824        if (kprobes_allow_optimization)
 825                goto out;
 826
 827        cpus_read_lock();
 828        kprobes_allow_optimization = true;
 829        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 830                head = &kprobe_table[i];
 831                hlist_for_each_entry_rcu(p, head, hlist)
 832                        if (!kprobe_disabled(p))
 833                                optimize_kprobe(p);
 834        }
 835        cpus_read_unlock();
 836        printk(KERN_INFO "Kprobes globally optimized\n");
 837out:
 838        mutex_unlock(&kprobe_mutex);
 839}
 840
 841static void unoptimize_all_kprobes(void)
 842{
 843        struct hlist_head *head;
 844        struct kprobe *p;
 845        unsigned int i;
 846
 847        mutex_lock(&kprobe_mutex);
 848        /* If optimization is already prohibited, just return */
 849        if (!kprobes_allow_optimization) {
 850                mutex_unlock(&kprobe_mutex);
 851                return;
 852        }
 853
 854        cpus_read_lock();
 855        kprobes_allow_optimization = false;
 856        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
 857                head = &kprobe_table[i];
 858                hlist_for_each_entry_rcu(p, head, hlist) {
 859                        if (!kprobe_disabled(p))
 860                                unoptimize_kprobe(p, false);
 861                }
 862        }
 863        cpus_read_unlock();
 864        mutex_unlock(&kprobe_mutex);
 865
 866        /* Wait for unoptimizing completion */
 867        wait_for_kprobe_optimizer();
 868        printk(KERN_INFO "Kprobes globally unoptimized\n");
 869}
 870
 871static DEFINE_MUTEX(kprobe_sysctl_mutex);
 872int sysctl_kprobes_optimization;
 873int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
 874                                      void __user *buffer, size_t *length,
 875                                      loff_t *ppos)
 876{
 877        int ret;
 878
 879        mutex_lock(&kprobe_sysctl_mutex);
 880        sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
 881        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 882
 883        if (sysctl_kprobes_optimization)
 884                optimize_all_kprobes();
 885        else
 886                unoptimize_all_kprobes();
 887        mutex_unlock(&kprobe_sysctl_mutex);
 888
 889        return ret;
 890}
 891#endif /* CONFIG_SYSCTL */
 892
 893/* Put a breakpoint for a probe. Must be called with text_mutex locked */
 894static void __arm_kprobe(struct kprobe *p)
 895{
 896        struct kprobe *_p;
 897
 898        /* Check collision with other optimized kprobes */
 899        _p = get_optimized_kprobe((unsigned long)p->addr);
 900        if (unlikely(_p))
 901                /* Fallback to unoptimized kprobe */
 902                unoptimize_kprobe(_p, true);
 903
 904        arch_arm_kprobe(p);
 905        optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
 906}
 907
 908/* Remove the breakpoint of a probe. Must be called with text_mutex locked */
 909static void __disarm_kprobe(struct kprobe *p, bool reopt)
 910{
 911        struct kprobe *_p;
 912
 913        /* Try to unoptimize */
 914        unoptimize_kprobe(p, kprobes_all_disarmed);
 915
 916        if (!kprobe_queued(p)) {
 917                arch_disarm_kprobe(p);
 918                /* If another kprobe was blocked, optimize it. */
 919                _p = get_optimized_kprobe((unsigned long)p->addr);
 920                if (unlikely(_p) && reopt)
 921                        optimize_kprobe(_p);
 922        }
 923        /* TODO: reoptimize others after unoptimized this probe */
 924}
 925
 926#else /* !CONFIG_OPTPROBES */
 927
 928#define optimize_kprobe(p)                      do {} while (0)
 929#define unoptimize_kprobe(p, f)                 do {} while (0)
 930#define kill_optimized_kprobe(p)                do {} while (0)
 931#define prepare_optimized_kprobe(p)             do {} while (0)
 932#define try_to_optimize_kprobe(p)               do {} while (0)
 933#define __arm_kprobe(p)                         arch_arm_kprobe(p)
 934#define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
 935#define kprobe_disarmed(p)                      kprobe_disabled(p)
 936#define wait_for_kprobe_optimizer()             do {} while (0)
 937
 938static int reuse_unused_kprobe(struct kprobe *ap)
 939{
 940        /*
 941         * If the optimized kprobe is NOT supported, the aggr kprobe is
 942         * released at the same time that the last aggregated kprobe is
 943         * unregistered.
 944         * Thus there should be no chance to reuse unused kprobe.
 945         */
 946        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
 947        return -EINVAL;
 948}
 949
 950static void free_aggr_kprobe(struct kprobe *p)
 951{
 952        arch_remove_kprobe(p);
 953        kfree(p);
 954}
 955
 956static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 957{
 958        return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
 959}
 960#endif /* CONFIG_OPTPROBES */
 961
 962#ifdef CONFIG_KPROBES_ON_FTRACE
 963static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
 964        .func = kprobe_ftrace_handler,
 965        .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
 966};
 967static int kprobe_ftrace_enabled;
 968
 969/* Must ensure p->addr is really on ftrace */
 970static int prepare_kprobe(struct kprobe *p)
 971{
 972        if (!kprobe_ftrace(p))
 973                return arch_prepare_kprobe(p);
 974
 975        return arch_prepare_kprobe_ftrace(p);
 976}
 977
 978/* Caller must lock kprobe_mutex */
 979static int arm_kprobe_ftrace(struct kprobe *p)
 980{
 981        int ret = 0;
 982
 983        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
 984                                   (unsigned long)p->addr, 0, 0);
 985        if (ret) {
 986                pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
 987                         p->addr, ret);
 988                return ret;
 989        }
 990
 991        if (kprobe_ftrace_enabled == 0) {
 992                ret = register_ftrace_function(&kprobe_ftrace_ops);
 993                if (ret) {
 994                        pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
 995                        goto err_ftrace;
 996                }
 997        }
 998
 999        kprobe_ftrace_enabled++;
1000        return ret;
1001
1002err_ftrace:
1003        /*
1004         * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a
1005         * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental
1006         * empty filter_hash which would undesirably trace all functions.
1007         */
1008        ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0);
1009        return ret;
1010}
1011
1012/* Caller must lock kprobe_mutex */
1013static int disarm_kprobe_ftrace(struct kprobe *p)
1014{
1015        int ret = 0;
1016
1017        if (kprobe_ftrace_enabled == 1) {
1018                ret = unregister_ftrace_function(&kprobe_ftrace_ops);
1019                if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1020                        return ret;
1021        }
1022
1023        kprobe_ftrace_enabled--;
1024
1025        ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
1026                           (unsigned long)p->addr, 1, 0);
1027        WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1028                  p->addr, ret);
1029        return ret;
1030}
1031#else   /* !CONFIG_KPROBES_ON_FTRACE */
1032#define prepare_kprobe(p)       arch_prepare_kprobe(p)
1033#define arm_kprobe_ftrace(p)    (-ENODEV)
1034#define disarm_kprobe_ftrace(p) (-ENODEV)
1035#endif
1036
1037/* Arm a kprobe with text_mutex */
1038static int arm_kprobe(struct kprobe *kp)
1039{
1040        if (unlikely(kprobe_ftrace(kp)))
1041                return arm_kprobe_ftrace(kp);
1042
1043        cpus_read_lock();
1044        mutex_lock(&text_mutex);
1045        __arm_kprobe(kp);
1046        mutex_unlock(&text_mutex);
1047        cpus_read_unlock();
1048
1049        return 0;
1050}
1051
1052/* Disarm a kprobe with text_mutex */
1053static int disarm_kprobe(struct kprobe *kp, bool reopt)
1054{
1055        if (unlikely(kprobe_ftrace(kp)))
1056                return disarm_kprobe_ftrace(kp);
1057
1058        cpus_read_lock();
1059        mutex_lock(&text_mutex);
1060        __disarm_kprobe(kp, reopt);
1061        mutex_unlock(&text_mutex);
1062        cpus_read_unlock();
1063
1064        return 0;
1065}
1066
1067/*
1068 * Aggregate handlers for multiple kprobes support - these handlers
1069 * take care of invoking the individual kprobe handlers on p->list
1070 */
1071static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1072{
1073        struct kprobe *kp;
1074
1075        list_for_each_entry_rcu(kp, &p->list, list) {
1076                if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1077                        set_kprobe_instance(kp);
1078                        if (kp->pre_handler(kp, regs))
1079                                return 1;
1080                }
1081                reset_kprobe_instance();
1082        }
1083        return 0;
1084}
1085NOKPROBE_SYMBOL(aggr_pre_handler);
1086
1087static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1088                              unsigned long flags)
1089{
1090        struct kprobe *kp;
1091
1092        list_for_each_entry_rcu(kp, &p->list, list) {
1093                if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1094                        set_kprobe_instance(kp);
1095                        kp->post_handler(kp, regs, flags);
1096                        reset_kprobe_instance();
1097                }
1098        }
1099}
1100NOKPROBE_SYMBOL(aggr_post_handler);
1101
1102static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1103                              int trapnr)
1104{
1105        struct kprobe *cur = __this_cpu_read(kprobe_instance);
1106
1107        /*
1108         * if we faulted "during" the execution of a user specified
1109         * probe handler, invoke just that probe's fault handler
1110         */
1111        if (cur && cur->fault_handler) {
1112                if (cur->fault_handler(cur, regs, trapnr))
1113                        return 1;
1114        }
1115        return 0;
1116}
1117NOKPROBE_SYMBOL(aggr_fault_handler);
1118
1119/* Walks the list and increments nmissed count for multiprobe case */
1120void kprobes_inc_nmissed_count(struct kprobe *p)
1121{
1122        struct kprobe *kp;
1123        if (!kprobe_aggrprobe(p)) {
1124                p->nmissed++;
1125        } else {
1126                list_for_each_entry_rcu(kp, &p->list, list)
1127                        kp->nmissed++;
1128        }
1129        return;
1130}
1131NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1132
1133void recycle_rp_inst(struct kretprobe_instance *ri,
1134                     struct hlist_head *head)
1135{
1136        struct kretprobe *rp = ri->rp;
1137
1138        /* remove rp inst off the rprobe_inst_table */
1139        hlist_del(&ri->hlist);
1140        INIT_HLIST_NODE(&ri->hlist);
1141        if (likely(rp)) {
1142                raw_spin_lock(&rp->lock);
1143                hlist_add_head(&ri->hlist, &rp->free_instances);
1144                raw_spin_unlock(&rp->lock);
1145        } else
1146                /* Unregistering */
1147                hlist_add_head(&ri->hlist, head);
1148}
1149NOKPROBE_SYMBOL(recycle_rp_inst);
1150
1151void kretprobe_hash_lock(struct task_struct *tsk,
1152                         struct hlist_head **head, unsigned long *flags)
1153__acquires(hlist_lock)
1154{
1155        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1156        raw_spinlock_t *hlist_lock;
1157
1158        *head = &kretprobe_inst_table[hash];
1159        hlist_lock = kretprobe_table_lock_ptr(hash);
1160        raw_spin_lock_irqsave(hlist_lock, *flags);
1161}
1162NOKPROBE_SYMBOL(kretprobe_hash_lock);
1163
1164static void kretprobe_table_lock(unsigned long hash,
1165                                 unsigned long *flags)
1166__acquires(hlist_lock)
1167{
1168        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1169        raw_spin_lock_irqsave(hlist_lock, *flags);
1170}
1171NOKPROBE_SYMBOL(kretprobe_table_lock);
1172
1173void kretprobe_hash_unlock(struct task_struct *tsk,
1174                           unsigned long *flags)
1175__releases(hlist_lock)
1176{
1177        unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1178        raw_spinlock_t *hlist_lock;
1179
1180        hlist_lock = kretprobe_table_lock_ptr(hash);
1181        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1182}
1183NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1184
1185static void kretprobe_table_unlock(unsigned long hash,
1186                                   unsigned long *flags)
1187__releases(hlist_lock)
1188{
1189        raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1190        raw_spin_unlock_irqrestore(hlist_lock, *flags);
1191}
1192NOKPROBE_SYMBOL(kretprobe_table_unlock);
1193
1194/*
1195 * This function is called from finish_task_switch when task tk becomes dead,
1196 * so that we can recycle any function-return probe instances associated
1197 * with this task. These left over instances represent probed functions
1198 * that have been called but will never return.
1199 */
1200void kprobe_flush_task(struct task_struct *tk)
1201{
1202        struct kretprobe_instance *ri;
1203        struct hlist_head *head, empty_rp;
1204        struct hlist_node *tmp;
1205        unsigned long hash, flags = 0;
1206
1207        if (unlikely(!kprobes_initialized))
1208                /* Early boot.  kretprobe_table_locks not yet initialized. */
1209                return;
1210
1211        INIT_HLIST_HEAD(&empty_rp);
1212        hash = hash_ptr(tk, KPROBE_HASH_BITS);
1213        head = &kretprobe_inst_table[hash];
1214        kretprobe_table_lock(hash, &flags);
1215        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1216                if (ri->task == tk)
1217                        recycle_rp_inst(ri, &empty_rp);
1218        }
1219        kretprobe_table_unlock(hash, &flags);
1220        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1221                hlist_del(&ri->hlist);
1222                kfree(ri);
1223        }
1224}
1225NOKPROBE_SYMBOL(kprobe_flush_task);
1226
1227static inline void free_rp_inst(struct kretprobe *rp)
1228{
1229        struct kretprobe_instance *ri;
1230        struct hlist_node *next;
1231
1232        hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1233                hlist_del(&ri->hlist);
1234                kfree(ri);
1235        }
1236}
1237
1238static void cleanup_rp_inst(struct kretprobe *rp)
1239{
1240        unsigned long flags, hash;
1241        struct kretprobe_instance *ri;
1242        struct hlist_node *next;
1243        struct hlist_head *head;
1244
1245        /* No race here */
1246        for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1247                kretprobe_table_lock(hash, &flags);
1248                head = &kretprobe_inst_table[hash];
1249                hlist_for_each_entry_safe(ri, next, head, hlist) {
1250                        if (ri->rp == rp)
1251                                ri->rp = NULL;
1252                }
1253                kretprobe_table_unlock(hash, &flags);
1254        }
1255        free_rp_inst(rp);
1256}
1257NOKPROBE_SYMBOL(cleanup_rp_inst);
1258
1259/* Add the new probe to ap->list */
1260static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1261{
1262        if (p->post_handler)
1263                unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1264
1265        list_add_rcu(&p->list, &ap->list);
1266        if (p->post_handler && !ap->post_handler)
1267                ap->post_handler = aggr_post_handler;
1268
1269        return 0;
1270}
1271
1272/*
1273 * Fill in the required fields of the "manager kprobe". Replace the
1274 * earlier kprobe in the hlist with the manager kprobe
1275 */
1276static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1277{
1278        /* Copy p's insn slot to ap */
1279        copy_kprobe(p, ap);
1280        flush_insn_slot(ap);
1281        ap->addr = p->addr;
1282        ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1283        ap->pre_handler = aggr_pre_handler;
1284        ap->fault_handler = aggr_fault_handler;
1285        /* We don't care the kprobe which has gone. */
1286        if (p->post_handler && !kprobe_gone(p))
1287                ap->post_handler = aggr_post_handler;
1288
1289        INIT_LIST_HEAD(&ap->list);
1290        INIT_HLIST_NODE(&ap->hlist);
1291
1292        list_add_rcu(&p->list, &ap->list);
1293        hlist_replace_rcu(&p->hlist, &ap->hlist);
1294}
1295
1296/*
1297 * This is the second or subsequent kprobe at the address - handle
1298 * the intricacies
1299 */
1300static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1301{
1302        int ret = 0;
1303        struct kprobe *ap = orig_p;
1304
1305        cpus_read_lock();
1306
1307        /* For preparing optimization, jump_label_text_reserved() is called */
1308        jump_label_lock();
1309        mutex_lock(&text_mutex);
1310
1311        if (!kprobe_aggrprobe(orig_p)) {
1312                /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1313                ap = alloc_aggr_kprobe(orig_p);
1314                if (!ap) {
1315                        ret = -ENOMEM;
1316                        goto out;
1317                }
1318                init_aggr_kprobe(ap, orig_p);
1319        } else if (kprobe_unused(ap)) {
1320                /* This probe is going to die. Rescue it */
1321                ret = reuse_unused_kprobe(ap);
1322                if (ret)
1323                        goto out;
1324        }
1325
1326        if (kprobe_gone(ap)) {
1327                /*
1328                 * Attempting to insert new probe at the same location that
1329                 * had a probe in the module vaddr area which already
1330                 * freed. So, the instruction slot has already been
1331                 * released. We need a new slot for the new probe.
1332                 */
1333                ret = arch_prepare_kprobe(ap);
1334                if (ret)
1335                        /*
1336                         * Even if fail to allocate new slot, don't need to
1337                         * free aggr_probe. It will be used next time, or
1338                         * freed by unregister_kprobe.
1339                         */
1340                        goto out;
1341
1342                /* Prepare optimized instructions if possible. */
1343                prepare_optimized_kprobe(ap);
1344
1345                /*
1346                 * Clear gone flag to prevent allocating new slot again, and
1347                 * set disabled flag because it is not armed yet.
1348                 */
1349                ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1350                            | KPROBE_FLAG_DISABLED;
1351        }
1352
1353        /* Copy ap's insn slot to p */
1354        copy_kprobe(ap, p);
1355        ret = add_new_kprobe(ap, p);
1356
1357out:
1358        mutex_unlock(&text_mutex);
1359        jump_label_unlock();
1360        cpus_read_unlock();
1361
1362        if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1363                ap->flags &= ~KPROBE_FLAG_DISABLED;
1364                if (!kprobes_all_disarmed) {
1365                        /* Arm the breakpoint again. */
1366                        ret = arm_kprobe(ap);
1367                        if (ret) {
1368                                ap->flags |= KPROBE_FLAG_DISABLED;
1369                                list_del_rcu(&p->list);
1370                                synchronize_rcu();
1371                        }
1372                }
1373        }
1374        return ret;
1375}
1376
1377bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1378{
1379        /* The __kprobes marked functions and entry code must not be probed */
1380        return addr >= (unsigned long)__kprobes_text_start &&
1381               addr < (unsigned long)__kprobes_text_end;
1382}
1383
1384static bool __within_kprobe_blacklist(unsigned long addr)
1385{
1386        struct kprobe_blacklist_entry *ent;
1387
1388        if (arch_within_kprobe_blacklist(addr))
1389                return true;
1390        /*
1391         * If there exists a kprobe_blacklist, verify and
1392         * fail any probe registration in the prohibited area
1393         */
1394        list_for_each_entry(ent, &kprobe_blacklist, list) {
1395                if (addr >= ent->start_addr && addr < ent->end_addr)
1396                        return true;
1397        }
1398        return false;
1399}
1400
1401bool within_kprobe_blacklist(unsigned long addr)
1402{
1403        char symname[KSYM_NAME_LEN], *p;
1404
1405        if (__within_kprobe_blacklist(addr))
1406                return true;
1407
1408        /* Check if the address is on a suffixed-symbol */
1409        if (!lookup_symbol_name(addr, symname)) {
1410                p = strchr(symname, '.');
1411                if (!p)
1412                        return false;
1413                *p = '\0';
1414                addr = (unsigned long)kprobe_lookup_name(symname, 0);
1415                if (addr)
1416                        return __within_kprobe_blacklist(addr);
1417        }
1418        return false;
1419}
1420
1421/*
1422 * If we have a symbol_name argument, look it up and add the offset field
1423 * to it. This way, we can specify a relative address to a symbol.
1424 * This returns encoded errors if it fails to look up symbol or invalid
1425 * combination of parameters.
1426 */
1427static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1428                        const char *symbol_name, unsigned int offset)
1429{
1430        if ((symbol_name && addr) || (!symbol_name && !addr))
1431                goto invalid;
1432
1433        if (symbol_name) {
1434                addr = kprobe_lookup_name(symbol_name, offset);
1435                if (!addr)
1436                        return ERR_PTR(-ENOENT);
1437        }
1438
1439        addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1440        if (addr)
1441                return addr;
1442
1443invalid:
1444        return ERR_PTR(-EINVAL);
1445}
1446
1447static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1448{
1449        return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1450}
1451
1452/* Check passed kprobe is valid and return kprobe in kprobe_table. */
1453static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1454{
1455        struct kprobe *ap, *list_p;
1456
1457        ap = get_kprobe(p->addr);
1458        if (unlikely(!ap))
1459                return NULL;
1460
1461        if (p != ap) {
1462                list_for_each_entry_rcu(list_p, &ap->list, list)
1463                        if (list_p == p)
1464                        /* kprobe p is a valid probe */
1465                                goto valid;
1466                return NULL;
1467        }
1468valid:
1469        return ap;
1470}
1471
1472/* Return error if the kprobe is being re-registered */
1473static inline int check_kprobe_rereg(struct kprobe *p)
1474{
1475        int ret = 0;
1476
1477        mutex_lock(&kprobe_mutex);
1478        if (__get_valid_kprobe(p))
1479                ret = -EINVAL;
1480        mutex_unlock(&kprobe_mutex);
1481
1482        return ret;
1483}
1484
1485int __weak arch_check_ftrace_location(struct kprobe *p)
1486{
1487        unsigned long ftrace_addr;
1488
1489        ftrace_addr = ftrace_location((unsigned long)p->addr);
1490        if (ftrace_addr) {
1491#ifdef CONFIG_KPROBES_ON_FTRACE
1492                /* Given address is not on the instruction boundary */
1493                if ((unsigned long)p->addr != ftrace_addr)
1494                        return -EILSEQ;
1495                p->flags |= KPROBE_FLAG_FTRACE;
1496#else   /* !CONFIG_KPROBES_ON_FTRACE */
1497                return -EINVAL;
1498#endif
1499        }
1500        return 0;
1501}
1502
1503static int check_kprobe_address_safe(struct kprobe *p,
1504                                     struct module **probed_mod)
1505{
1506        int ret;
1507
1508        ret = arch_check_ftrace_location(p);
1509        if (ret)
1510                return ret;
1511        jump_label_lock();
1512        preempt_disable();
1513
1514        /* Ensure it is not in reserved area nor out of text */
1515        if (!kernel_text_address((unsigned long) p->addr) ||
1516            within_kprobe_blacklist((unsigned long) p->addr) ||
1517            jump_label_text_reserved(p->addr, p->addr)) {
1518                ret = -EINVAL;
1519                goto out;
1520        }
1521
1522        /* Check if are we probing a module */
1523        *probed_mod = __module_text_address((unsigned long) p->addr);
1524        if (*probed_mod) {
1525                /*
1526                 * We must hold a refcount of the probed module while updating
1527                 * its code to prohibit unexpected unloading.
1528                 */
1529                if (unlikely(!try_module_get(*probed_mod))) {
1530                        ret = -ENOENT;
1531                        goto out;
1532                }
1533
1534                /*
1535                 * If the module freed .init.text, we couldn't insert
1536                 * kprobes in there.
1537                 */
1538                if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1539                    (*probed_mod)->state != MODULE_STATE_COMING) {
1540                        module_put(*probed_mod);
1541                        *probed_mod = NULL;
1542                        ret = -ENOENT;
1543                }
1544        }
1545out:
1546        preempt_enable();
1547        jump_label_unlock();
1548
1549        return ret;
1550}
1551
1552int register_kprobe(struct kprobe *p)
1553{
1554        int ret;
1555        struct kprobe *old_p;
1556        struct module *probed_mod;
1557        kprobe_opcode_t *addr;
1558
1559        /* Adjust probe address from symbol */
1560        addr = kprobe_addr(p);
1561        if (IS_ERR(addr))
1562                return PTR_ERR(addr);
1563        p->addr = addr;
1564
1565        ret = check_kprobe_rereg(p);
1566        if (ret)
1567                return ret;
1568
1569        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1570        p->flags &= KPROBE_FLAG_DISABLED;
1571        p->nmissed = 0;
1572        INIT_LIST_HEAD(&p->list);
1573
1574        ret = check_kprobe_address_safe(p, &probed_mod);
1575        if (ret)
1576                return ret;
1577
1578        mutex_lock(&kprobe_mutex);
1579
1580        old_p = get_kprobe(p->addr);
1581        if (old_p) {
1582                /* Since this may unoptimize old_p, locking text_mutex. */
1583                ret = register_aggr_kprobe(old_p, p);
1584                goto out;
1585        }
1586
1587        cpus_read_lock();
1588        /* Prevent text modification */
1589        mutex_lock(&text_mutex);
1590        ret = prepare_kprobe(p);
1591        mutex_unlock(&text_mutex);
1592        cpus_read_unlock();
1593        if (ret)
1594                goto out;
1595
1596        INIT_HLIST_NODE(&p->hlist);
1597        hlist_add_head_rcu(&p->hlist,
1598                       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1599
1600        if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1601                ret = arm_kprobe(p);
1602                if (ret) {
1603                        hlist_del_rcu(&p->hlist);
1604                        synchronize_rcu();
1605                        goto out;
1606                }
1607        }
1608
1609        /* Try to optimize kprobe */
1610        try_to_optimize_kprobe(p);
1611out:
1612        mutex_unlock(&kprobe_mutex);
1613
1614        if (probed_mod)
1615                module_put(probed_mod);
1616
1617        return ret;
1618}
1619EXPORT_SYMBOL_GPL(register_kprobe);
1620
1621/* Check if all probes on the aggrprobe are disabled */
1622static int aggr_kprobe_disabled(struct kprobe *ap)
1623{
1624        struct kprobe *kp;
1625
1626        list_for_each_entry_rcu(kp, &ap->list, list)
1627                if (!kprobe_disabled(kp))
1628                        /*
1629                         * There is an active probe on the list.
1630                         * We can't disable this ap.
1631                         */
1632                        return 0;
1633
1634        return 1;
1635}
1636
1637/* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1638static struct kprobe *__disable_kprobe(struct kprobe *p)
1639{
1640        struct kprobe *orig_p;
1641        int ret;
1642
1643        /* Get an original kprobe for return */
1644        orig_p = __get_valid_kprobe(p);
1645        if (unlikely(orig_p == NULL))
1646                return ERR_PTR(-EINVAL);
1647
1648        if (!kprobe_disabled(p)) {
1649                /* Disable probe if it is a child probe */
1650                if (p != orig_p)
1651                        p->flags |= KPROBE_FLAG_DISABLED;
1652
1653                /* Try to disarm and disable this/parent probe */
1654                if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1655                        /*
1656                         * If kprobes_all_disarmed is set, orig_p
1657                         * should have already been disarmed, so
1658                         * skip unneed disarming process.
1659                         */
1660                        if (!kprobes_all_disarmed) {
1661                                ret = disarm_kprobe(orig_p, true);
1662                                if (ret) {
1663                                        p->flags &= ~KPROBE_FLAG_DISABLED;
1664                                        return ERR_PTR(ret);
1665                                }
1666                        }
1667                        orig_p->flags |= KPROBE_FLAG_DISABLED;
1668                }
1669        }
1670
1671        return orig_p;
1672}
1673
1674/*
1675 * Unregister a kprobe without a scheduler synchronization.
1676 */
1677static int __unregister_kprobe_top(struct kprobe *p)
1678{
1679        struct kprobe *ap, *list_p;
1680
1681        /* Disable kprobe. This will disarm it if needed. */
1682        ap = __disable_kprobe(p);
1683        if (IS_ERR(ap))
1684                return PTR_ERR(ap);
1685
1686        if (ap == p)
1687                /*
1688                 * This probe is an independent(and non-optimized) kprobe
1689                 * (not an aggrprobe). Remove from the hash list.
1690                 */
1691                goto disarmed;
1692
1693        /* Following process expects this probe is an aggrprobe */
1694        WARN_ON(!kprobe_aggrprobe(ap));
1695
1696        if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1697                /*
1698                 * !disarmed could be happen if the probe is under delayed
1699                 * unoptimizing.
1700                 */
1701                goto disarmed;
1702        else {
1703                /* If disabling probe has special handlers, update aggrprobe */
1704                if (p->post_handler && !kprobe_gone(p)) {
1705                        list_for_each_entry_rcu(list_p, &ap->list, list) {
1706                                if ((list_p != p) && (list_p->post_handler))
1707                                        goto noclean;
1708                        }
1709                        ap->post_handler = NULL;
1710                }
1711noclean:
1712                /*
1713                 * Remove from the aggrprobe: this path will do nothing in
1714                 * __unregister_kprobe_bottom().
1715                 */
1716                list_del_rcu(&p->list);
1717                if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1718                        /*
1719                         * Try to optimize this probe again, because post
1720                         * handler may have been changed.
1721                         */
1722                        optimize_kprobe(ap);
1723        }
1724        return 0;
1725
1726disarmed:
1727        hlist_del_rcu(&ap->hlist);
1728        return 0;
1729}
1730
1731static void __unregister_kprobe_bottom(struct kprobe *p)
1732{
1733        struct kprobe *ap;
1734
1735        if (list_empty(&p->list))
1736                /* This is an independent kprobe */
1737                arch_remove_kprobe(p);
1738        else if (list_is_singular(&p->list)) {
1739                /* This is the last child of an aggrprobe */
1740                ap = list_entry(p->list.next, struct kprobe, list);
1741                list_del(&p->list);
1742                free_aggr_kprobe(ap);
1743        }
1744        /* Otherwise, do nothing. */
1745}
1746
1747int register_kprobes(struct kprobe **kps, int num)
1748{
1749        int i, ret = 0;
1750
1751        if (num <= 0)
1752                return -EINVAL;
1753        for (i = 0; i < num; i++) {
1754                ret = register_kprobe(kps[i]);
1755                if (ret < 0) {
1756                        if (i > 0)
1757                                unregister_kprobes(kps, i);
1758                        break;
1759                }
1760        }
1761        return ret;
1762}
1763EXPORT_SYMBOL_GPL(register_kprobes);
1764
1765void unregister_kprobe(struct kprobe *p)
1766{
1767        unregister_kprobes(&p, 1);
1768}
1769EXPORT_SYMBOL_GPL(unregister_kprobe);
1770
1771void unregister_kprobes(struct kprobe **kps, int num)
1772{
1773        int i;
1774
1775        if (num <= 0)
1776                return;
1777        mutex_lock(&kprobe_mutex);
1778        for (i = 0; i < num; i++)
1779                if (__unregister_kprobe_top(kps[i]) < 0)
1780                        kps[i]->addr = NULL;
1781        mutex_unlock(&kprobe_mutex);
1782
1783        synchronize_rcu();
1784        for (i = 0; i < num; i++)
1785                if (kps[i]->addr)
1786                        __unregister_kprobe_bottom(kps[i]);
1787}
1788EXPORT_SYMBOL_GPL(unregister_kprobes);
1789
1790int __weak kprobe_exceptions_notify(struct notifier_block *self,
1791                                        unsigned long val, void *data)
1792{
1793        return NOTIFY_DONE;
1794}
1795NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1796
1797static struct notifier_block kprobe_exceptions_nb = {
1798        .notifier_call = kprobe_exceptions_notify,
1799        .priority = 0x7fffffff /* we need to be notified first */
1800};
1801
1802unsigned long __weak arch_deref_entry_point(void *entry)
1803{
1804        return (unsigned long)entry;
1805}
1806
1807#ifdef CONFIG_KRETPROBES
1808/*
1809 * This kprobe pre_handler is registered with every kretprobe. When probe
1810 * hits it will set up the return probe.
1811 */
1812static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
1813{
1814        struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1815        unsigned long hash, flags = 0;
1816        struct kretprobe_instance *ri;
1817
1818        /*
1819         * To avoid deadlocks, prohibit return probing in NMI contexts,
1820         * just skip the probe and increase the (inexact) 'nmissed'
1821         * statistical counter, so that the user is informed that
1822         * something happened:
1823         */
1824        if (unlikely(in_nmi())) {
1825                rp->nmissed++;
1826                return 0;
1827        }
1828
1829        /* TODO: consider to only swap the RA after the last pre_handler fired */
1830        hash = hash_ptr(current, KPROBE_HASH_BITS);
1831        raw_spin_lock_irqsave(&rp->lock, flags);
1832        if (!hlist_empty(&rp->free_instances)) {
1833                ri = hlist_entry(rp->free_instances.first,
1834                                struct kretprobe_instance, hlist);
1835                hlist_del(&ri->hlist);
1836                raw_spin_unlock_irqrestore(&rp->lock, flags);
1837
1838                ri->rp = rp;
1839                ri->task = current;
1840
1841                if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1842                        raw_spin_lock_irqsave(&rp->lock, flags);
1843                        hlist_add_head(&ri->hlist, &rp->free_instances);
1844                        raw_spin_unlock_irqrestore(&rp->lock, flags);
1845                        return 0;
1846                }
1847
1848                arch_prepare_kretprobe(ri, regs);
1849
1850                /* XXX(hch): why is there no hlist_move_head? */
1851                INIT_HLIST_NODE(&ri->hlist);
1852                kretprobe_table_lock(hash, &flags);
1853                hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1854                kretprobe_table_unlock(hash, &flags);
1855        } else {
1856                rp->nmissed++;
1857                raw_spin_unlock_irqrestore(&rp->lock, flags);
1858        }
1859        return 0;
1860}
1861NOKPROBE_SYMBOL(pre_handler_kretprobe);
1862
1863bool __weak arch_kprobe_on_func_entry(unsigned long offset)
1864{
1865        return !offset;
1866}
1867
1868bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
1869{
1870        kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
1871
1872        if (IS_ERR(kp_addr))
1873                return false;
1874
1875        if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
1876                                                !arch_kprobe_on_func_entry(offset))
1877                return false;
1878
1879        return true;
1880}
1881
1882int register_kretprobe(struct kretprobe *rp)
1883{
1884        int ret = 0;
1885        struct kretprobe_instance *inst;
1886        int i;
1887        void *addr;
1888
1889        if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
1890                return -EINVAL;
1891
1892        if (kretprobe_blacklist_size) {
1893                addr = kprobe_addr(&rp->kp);
1894                if (IS_ERR(addr))
1895                        return PTR_ERR(addr);
1896
1897                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1898                        if (kretprobe_blacklist[i].addr == addr)
1899                                return -EINVAL;
1900                }
1901        }
1902
1903        rp->kp.pre_handler = pre_handler_kretprobe;
1904        rp->kp.post_handler = NULL;
1905        rp->kp.fault_handler = NULL;
1906
1907        /* Pre-allocate memory for max kretprobe instances */
1908        if (rp->maxactive <= 0) {
1909#ifdef CONFIG_PREEMPT
1910                rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1911#else
1912                rp->maxactive = num_possible_cpus();
1913#endif
1914        }
1915        raw_spin_lock_init(&rp->lock);
1916        INIT_HLIST_HEAD(&rp->free_instances);
1917        for (i = 0; i < rp->maxactive; i++) {
1918                inst = kmalloc(sizeof(struct kretprobe_instance) +
1919                               rp->data_size, GFP_KERNEL);
1920                if (inst == NULL) {
1921                        free_rp_inst(rp);
1922                        return -ENOMEM;
1923                }
1924                INIT_HLIST_NODE(&inst->hlist);
1925                hlist_add_head(&inst->hlist, &rp->free_instances);
1926        }
1927
1928        rp->nmissed = 0;
1929        /* Establish function entry probe point */
1930        ret = register_kprobe(&rp->kp);
1931        if (ret != 0)
1932                free_rp_inst(rp);
1933        return ret;
1934}
1935EXPORT_SYMBOL_GPL(register_kretprobe);
1936
1937int register_kretprobes(struct kretprobe **rps, int num)
1938{
1939        int ret = 0, i;
1940
1941        if (num <= 0)
1942                return -EINVAL;
1943        for (i = 0; i < num; i++) {
1944                ret = register_kretprobe(rps[i]);
1945                if (ret < 0) {
1946                        if (i > 0)
1947                                unregister_kretprobes(rps, i);
1948                        break;
1949                }
1950        }
1951        return ret;
1952}
1953EXPORT_SYMBOL_GPL(register_kretprobes);
1954
1955void unregister_kretprobe(struct kretprobe *rp)
1956{
1957        unregister_kretprobes(&rp, 1);
1958}
1959EXPORT_SYMBOL_GPL(unregister_kretprobe);
1960
1961void unregister_kretprobes(struct kretprobe **rps, int num)
1962{
1963        int i;
1964
1965        if (num <= 0)
1966                return;
1967        mutex_lock(&kprobe_mutex);
1968        for (i = 0; i < num; i++)
1969                if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1970                        rps[i]->kp.addr = NULL;
1971        mutex_unlock(&kprobe_mutex);
1972
1973        synchronize_rcu();
1974        for (i = 0; i < num; i++) {
1975                if (rps[i]->kp.addr) {
1976                        __unregister_kprobe_bottom(&rps[i]->kp);
1977                        cleanup_rp_inst(rps[i]);
1978                }
1979        }
1980}
1981EXPORT_SYMBOL_GPL(unregister_kretprobes);
1982
1983#else /* CONFIG_KRETPROBES */
1984int register_kretprobe(struct kretprobe *rp)
1985{
1986        return -ENOSYS;
1987}
1988EXPORT_SYMBOL_GPL(register_kretprobe);
1989
1990int register_kretprobes(struct kretprobe **rps, int num)
1991{
1992        return -ENOSYS;
1993}
1994EXPORT_SYMBOL_GPL(register_kretprobes);
1995
1996void unregister_kretprobe(struct kretprobe *rp)
1997{
1998}
1999EXPORT_SYMBOL_GPL(unregister_kretprobe);
2000
2001void unregister_kretprobes(struct kretprobe **rps, int num)
2002{
2003}
2004EXPORT_SYMBOL_GPL(unregister_kretprobes);
2005
2006static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2007{
2008        return 0;
2009}
2010NOKPROBE_SYMBOL(pre_handler_kretprobe);
2011
2012#endif /* CONFIG_KRETPROBES */
2013
2014/* Set the kprobe gone and remove its instruction buffer. */
2015static void kill_kprobe(struct kprobe *p)
2016{
2017        struct kprobe *kp;
2018
2019        p->flags |= KPROBE_FLAG_GONE;
2020        if (kprobe_aggrprobe(p)) {
2021                /*
2022                 * If this is an aggr_kprobe, we have to list all the
2023                 * chained probes and mark them GONE.
2024                 */
2025                list_for_each_entry_rcu(kp, &p->list, list)
2026                        kp->flags |= KPROBE_FLAG_GONE;
2027                p->post_handler = NULL;
2028                kill_optimized_kprobe(p);
2029        }
2030        /*
2031         * Here, we can remove insn_slot safely, because no thread calls
2032         * the original probed function (which will be freed soon) any more.
2033         */
2034        arch_remove_kprobe(p);
2035}
2036
2037/* Disable one kprobe */
2038int disable_kprobe(struct kprobe *kp)
2039{
2040        int ret = 0;
2041        struct kprobe *p;
2042
2043        mutex_lock(&kprobe_mutex);
2044
2045        /* Disable this kprobe */
2046        p = __disable_kprobe(kp);
2047        if (IS_ERR(p))
2048                ret = PTR_ERR(p);
2049
2050        mutex_unlock(&kprobe_mutex);
2051        return ret;
2052}
2053EXPORT_SYMBOL_GPL(disable_kprobe);
2054
2055/* Enable one kprobe */
2056int enable_kprobe(struct kprobe *kp)
2057{
2058        int ret = 0;
2059        struct kprobe *p;
2060
2061        mutex_lock(&kprobe_mutex);
2062
2063        /* Check whether specified probe is valid. */
2064        p = __get_valid_kprobe(kp);
2065        if (unlikely(p == NULL)) {
2066                ret = -EINVAL;
2067                goto out;
2068        }
2069
2070        if (kprobe_gone(kp)) {
2071                /* This kprobe has gone, we couldn't enable it. */
2072                ret = -EINVAL;
2073                goto out;
2074        }
2075
2076        if (p != kp)
2077                kp->flags &= ~KPROBE_FLAG_DISABLED;
2078
2079        if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2080                p->flags &= ~KPROBE_FLAG_DISABLED;
2081                ret = arm_kprobe(p);
2082                if (ret)
2083                        p->flags |= KPROBE_FLAG_DISABLED;
2084        }
2085out:
2086        mutex_unlock(&kprobe_mutex);
2087        return ret;
2088}
2089EXPORT_SYMBOL_GPL(enable_kprobe);
2090
2091/* Caller must NOT call this in usual path. This is only for critical case */
2092void dump_kprobe(struct kprobe *kp)
2093{
2094        pr_err("Dumping kprobe:\n");
2095        pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2096               kp->symbol_name, kp->offset, kp->addr);
2097}
2098NOKPROBE_SYMBOL(dump_kprobe);
2099
2100int kprobe_add_ksym_blacklist(unsigned long entry)
2101{
2102        struct kprobe_blacklist_entry *ent;
2103        unsigned long offset = 0, size = 0;
2104
2105        if (!kernel_text_address(entry) ||
2106            !kallsyms_lookup_size_offset(entry, &size, &offset))
2107                return -EINVAL;
2108
2109        ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2110        if (!ent)
2111                return -ENOMEM;
2112        ent->start_addr = entry;
2113        ent->end_addr = entry + size;
2114        INIT_LIST_HEAD(&ent->list);
2115        list_add_tail(&ent->list, &kprobe_blacklist);
2116
2117        return (int)size;
2118}
2119
2120/* Add all symbols in given area into kprobe blacklist */
2121int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2122{
2123        unsigned long entry;
2124        int ret = 0;
2125
2126        for (entry = start; entry < end; entry += ret) {
2127                ret = kprobe_add_ksym_blacklist(entry);
2128                if (ret < 0)
2129                        return ret;
2130                if (ret == 0)   /* In case of alias symbol */
2131                        ret = 1;
2132        }
2133        return 0;
2134}
2135
2136int __init __weak arch_populate_kprobe_blacklist(void)
2137{
2138        return 0;
2139}
2140
2141/*
2142 * Lookup and populate the kprobe_blacklist.
2143 *
2144 * Unlike the kretprobe blacklist, we'll need to determine
2145 * the range of addresses that belong to the said functions,
2146 * since a kprobe need not necessarily be at the beginning
2147 * of a function.
2148 */
2149static int __init populate_kprobe_blacklist(unsigned long *start,
2150                                             unsigned long *end)
2151{
2152        unsigned long entry;
2153        unsigned long *iter;
2154        int ret;
2155
2156        for (iter = start; iter < end; iter++) {
2157                entry = arch_deref_entry_point((void *)*iter);
2158                ret = kprobe_add_ksym_blacklist(entry);
2159                if (ret == -EINVAL)
2160                        continue;
2161                if (ret < 0)
2162                        return ret;
2163        }
2164
2165        /* Symbols in __kprobes_text are blacklisted */
2166        ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2167                                        (unsigned long)__kprobes_text_end);
2168
2169        return ret ? : arch_populate_kprobe_blacklist();
2170}
2171
2172/* Module notifier call back, checking kprobes on the module */
2173static int kprobes_module_callback(struct notifier_block *nb,
2174                                   unsigned long val, void *data)
2175{
2176        struct module *mod = data;
2177        struct hlist_head *head;
2178        struct kprobe *p;
2179        unsigned int i;
2180        int checkcore = (val == MODULE_STATE_GOING);
2181
2182        if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2183                return NOTIFY_DONE;
2184
2185        /*
2186         * When MODULE_STATE_GOING was notified, both of module .text and
2187         * .init.text sections would be freed. When MODULE_STATE_LIVE was
2188         * notified, only .init.text section would be freed. We need to
2189         * disable kprobes which have been inserted in the sections.
2190         */
2191        mutex_lock(&kprobe_mutex);
2192        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2193                head = &kprobe_table[i];
2194                hlist_for_each_entry_rcu(p, head, hlist)
2195                        if (within_module_init((unsigned long)p->addr, mod) ||
2196                            (checkcore &&
2197                             within_module_core((unsigned long)p->addr, mod))) {
2198                                /*
2199                                 * The vaddr this probe is installed will soon
2200                                 * be vfreed buy not synced to disk. Hence,
2201                                 * disarming the breakpoint isn't needed.
2202                                 *
2203                                 * Note, this will also move any optimized probes
2204                                 * that are pending to be removed from their
2205                                 * corresponding lists to the freeing_list and
2206                                 * will not be touched by the delayed
2207                                 * kprobe_optimizer work handler.
2208                                 */
2209                                kill_kprobe(p);
2210                        }
2211        }
2212        mutex_unlock(&kprobe_mutex);
2213        return NOTIFY_DONE;
2214}
2215
2216static struct notifier_block kprobe_module_nb = {
2217        .notifier_call = kprobes_module_callback,
2218        .priority = 0
2219};
2220
2221/* Markers of _kprobe_blacklist section */
2222extern unsigned long __start_kprobe_blacklist[];
2223extern unsigned long __stop_kprobe_blacklist[];
2224
2225static int __init init_kprobes(void)
2226{
2227        int i, err = 0;
2228
2229        /* FIXME allocate the probe table, currently defined statically */
2230        /* initialize all list heads */
2231        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2232                INIT_HLIST_HEAD(&kprobe_table[i]);
2233                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2234                raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2235        }
2236
2237        err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2238                                        __stop_kprobe_blacklist);
2239        if (err) {
2240                pr_err("kprobes: failed to populate blacklist: %d\n", err);
2241                pr_err("Please take care of using kprobes.\n");
2242        }
2243
2244        if (kretprobe_blacklist_size) {
2245                /* lookup the function address from its name */
2246                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2247                        kretprobe_blacklist[i].addr =
2248                                kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2249                        if (!kretprobe_blacklist[i].addr)
2250                                printk("kretprobe: lookup failed: %s\n",
2251                                       kretprobe_blacklist[i].name);
2252                }
2253        }
2254
2255#if defined(CONFIG_OPTPROBES)
2256#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2257        /* Init kprobe_optinsn_slots */
2258        kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2259#endif
2260        /* By default, kprobes can be optimized */
2261        kprobes_allow_optimization = true;
2262#endif
2263
2264        /* By default, kprobes are armed */
2265        kprobes_all_disarmed = false;
2266
2267        err = arch_init_kprobes();
2268        if (!err)
2269                err = register_die_notifier(&kprobe_exceptions_nb);
2270        if (!err)
2271                err = register_module_notifier(&kprobe_module_nb);
2272
2273        kprobes_initialized = (err == 0);
2274
2275        if (!err)
2276                init_test_probes();
2277        return err;
2278}
2279subsys_initcall(init_kprobes);
2280
2281#ifdef CONFIG_DEBUG_FS
2282static void report_probe(struct seq_file *pi, struct kprobe *p,
2283                const char *sym, int offset, char *modname, struct kprobe *pp)
2284{
2285        char *kprobe_type;
2286        void *addr = p->addr;
2287
2288        if (p->pre_handler == pre_handler_kretprobe)
2289                kprobe_type = "r";
2290        else
2291                kprobe_type = "k";
2292
2293        if (!kallsyms_show_value())
2294                addr = NULL;
2295
2296        if (sym)
2297                seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2298                        addr, kprobe_type, sym, offset,
2299                        (modname ? modname : " "));
2300        else    /* try to use %pS */
2301                seq_printf(pi, "%px  %s  %pS ",
2302                        addr, kprobe_type, p->addr);
2303
2304        if (!pp)
2305                pp = p;
2306        seq_printf(pi, "%s%s%s%s\n",
2307                (kprobe_gone(p) ? "[GONE]" : ""),
2308                ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2309                (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2310                (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2311}
2312
2313static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2314{
2315        return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2316}
2317
2318static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2319{
2320        (*pos)++;
2321        if (*pos >= KPROBE_TABLE_SIZE)
2322                return NULL;
2323        return pos;
2324}
2325
2326static void kprobe_seq_stop(struct seq_file *f, void *v)
2327{
2328        /* Nothing to do */
2329}
2330
2331static int show_kprobe_addr(struct seq_file *pi, void *v)
2332{
2333        struct hlist_head *head;
2334        struct kprobe *p, *kp;
2335        const char *sym = NULL;
2336        unsigned int i = *(loff_t *) v;
2337        unsigned long offset = 0;
2338        char *modname, namebuf[KSYM_NAME_LEN];
2339
2340        head = &kprobe_table[i];
2341        preempt_disable();
2342        hlist_for_each_entry_rcu(p, head, hlist) {
2343                sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2344                                        &offset, &modname, namebuf);
2345                if (kprobe_aggrprobe(p)) {
2346                        list_for_each_entry_rcu(kp, &p->list, list)
2347                                report_probe(pi, kp, sym, offset, modname, p);
2348                } else
2349                        report_probe(pi, p, sym, offset, modname, NULL);
2350        }
2351        preempt_enable();
2352        return 0;
2353}
2354
2355static const struct seq_operations kprobes_seq_ops = {
2356        .start = kprobe_seq_start,
2357        .next  = kprobe_seq_next,
2358        .stop  = kprobe_seq_stop,
2359        .show  = show_kprobe_addr
2360};
2361
2362static int kprobes_open(struct inode *inode, struct file *filp)
2363{
2364        return seq_open(filp, &kprobes_seq_ops);
2365}
2366
2367static const struct file_operations debugfs_kprobes_operations = {
2368        .open           = kprobes_open,
2369        .read           = seq_read,
2370        .llseek         = seq_lseek,
2371        .release        = seq_release,
2372};
2373
2374/* kprobes/blacklist -- shows which functions can not be probed */
2375static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2376{
2377        return seq_list_start(&kprobe_blacklist, *pos);
2378}
2379
2380static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2381{
2382        return seq_list_next(v, &kprobe_blacklist, pos);
2383}
2384
2385static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2386{
2387        struct kprobe_blacklist_entry *ent =
2388                list_entry(v, struct kprobe_blacklist_entry, list);
2389
2390        /*
2391         * If /proc/kallsyms is not showing kernel address, we won't
2392         * show them here either.
2393         */
2394        if (!kallsyms_show_value())
2395                seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2396                           (void *)ent->start_addr);
2397        else
2398                seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2399                           (void *)ent->end_addr, (void *)ent->start_addr);
2400        return 0;
2401}
2402
2403static const struct seq_operations kprobe_blacklist_seq_ops = {
2404        .start = kprobe_blacklist_seq_start,
2405        .next  = kprobe_blacklist_seq_next,
2406        .stop  = kprobe_seq_stop,       /* Reuse void function */
2407        .show  = kprobe_blacklist_seq_show,
2408};
2409
2410static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
2411{
2412        return seq_open(filp, &kprobe_blacklist_seq_ops);
2413}
2414
2415static const struct file_operations debugfs_kprobe_blacklist_ops = {
2416        .open           = kprobe_blacklist_open,
2417        .read           = seq_read,
2418        .llseek         = seq_lseek,
2419        .release        = seq_release,
2420};
2421
2422static int arm_all_kprobes(void)
2423{
2424        struct hlist_head *head;
2425        struct kprobe *p;
2426        unsigned int i, total = 0, errors = 0;
2427        int err, ret = 0;
2428
2429        mutex_lock(&kprobe_mutex);
2430
2431        /* If kprobes are armed, just return */
2432        if (!kprobes_all_disarmed)
2433                goto already_enabled;
2434
2435        /*
2436         * optimize_kprobe() called by arm_kprobe() checks
2437         * kprobes_all_disarmed, so set kprobes_all_disarmed before
2438         * arm_kprobe.
2439         */
2440        kprobes_all_disarmed = false;
2441        /* Arming kprobes doesn't optimize kprobe itself */
2442        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2443                head = &kprobe_table[i];
2444                /* Arm all kprobes on a best-effort basis */
2445                hlist_for_each_entry_rcu(p, head, hlist) {
2446                        if (!kprobe_disabled(p)) {
2447                                err = arm_kprobe(p);
2448                                if (err)  {
2449                                        errors++;
2450                                        ret = err;
2451                                }
2452                                total++;
2453                        }
2454                }
2455        }
2456
2457        if (errors)
2458                pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2459                        errors, total);
2460        else
2461                pr_info("Kprobes globally enabled\n");
2462
2463already_enabled:
2464        mutex_unlock(&kprobe_mutex);
2465        return ret;
2466}
2467
2468static int disarm_all_kprobes(void)
2469{
2470        struct hlist_head *head;
2471        struct kprobe *p;
2472        unsigned int i, total = 0, errors = 0;
2473        int err, ret = 0;
2474
2475        mutex_lock(&kprobe_mutex);
2476
2477        /* If kprobes are already disarmed, just return */
2478        if (kprobes_all_disarmed) {
2479                mutex_unlock(&kprobe_mutex);
2480                return 0;
2481        }
2482
2483        kprobes_all_disarmed = true;
2484
2485        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2486                head = &kprobe_table[i];
2487                /* Disarm all kprobes on a best-effort basis */
2488                hlist_for_each_entry_rcu(p, head, hlist) {
2489                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2490                                err = disarm_kprobe(p, false);
2491                                if (err) {
2492                                        errors++;
2493                                        ret = err;
2494                                }
2495                                total++;
2496                        }
2497                }
2498        }
2499
2500        if (errors)
2501                pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2502                        errors, total);
2503        else
2504                pr_info("Kprobes globally disabled\n");
2505
2506        mutex_unlock(&kprobe_mutex);
2507
2508        /* Wait for disarming all kprobes by optimizer */
2509        wait_for_kprobe_optimizer();
2510
2511        return ret;
2512}
2513
2514/*
2515 * XXX: The debugfs bool file interface doesn't allow for callbacks
2516 * when the bool state is switched. We can reuse that facility when
2517 * available
2518 */
2519static ssize_t read_enabled_file_bool(struct file *file,
2520               char __user *user_buf, size_t count, loff_t *ppos)
2521{
2522        char buf[3];
2523
2524        if (!kprobes_all_disarmed)
2525                buf[0] = '1';
2526        else
2527                buf[0] = '0';
2528        buf[1] = '\n';
2529        buf[2] = 0x00;
2530        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2531}
2532
2533static ssize_t write_enabled_file_bool(struct file *file,
2534               const char __user *user_buf, size_t count, loff_t *ppos)
2535{
2536        char buf[32];
2537        size_t buf_size;
2538        int ret = 0;
2539
2540        buf_size = min(count, (sizeof(buf)-1));
2541        if (copy_from_user(buf, user_buf, buf_size))
2542                return -EFAULT;
2543
2544        buf[buf_size] = '\0';
2545        switch (buf[0]) {
2546        case 'y':
2547        case 'Y':
2548        case '1':
2549                ret = arm_all_kprobes();
2550                break;
2551        case 'n':
2552        case 'N':
2553        case '0':
2554                ret = disarm_all_kprobes();
2555                break;
2556        default:
2557                return -EINVAL;
2558        }
2559
2560        if (ret)
2561                return ret;
2562
2563        return count;
2564}
2565
2566static const struct file_operations fops_kp = {
2567        .read =         read_enabled_file_bool,
2568        .write =        write_enabled_file_bool,
2569        .llseek =       default_llseek,
2570};
2571
2572static int __init debugfs_kprobe_init(void)
2573{
2574        struct dentry *dir;
2575        unsigned int value = 1;
2576
2577        dir = debugfs_create_dir("kprobes", NULL);
2578
2579        debugfs_create_file("list", 0400, dir, NULL,
2580                            &debugfs_kprobes_operations);
2581
2582        debugfs_create_file("enabled", 0600, dir, &value, &fops_kp);
2583
2584        debugfs_create_file("blacklist", 0400, dir, NULL,
2585                            &debugfs_kprobe_blacklist_ops);
2586
2587        return 0;
2588}
2589
2590late_initcall(debugfs_kprobe_init);
2591#endif /* CONFIG_DEBUG_FS */
2592