linux/arch/mips/kernel/kprobes.c
<<
>>
Prefs
   1/*
   2 *  Kernel Probes (KProbes)
   3 *  arch/mips/kernel/kprobes.c
   4 *
   5 *  Copyright 2006 Sony Corp.
   6 *  Copyright 2010 Cavium Networks
   7 *
   8 *  Some portions copied from the powerpc version.
   9 *
  10 *   Copyright (C) IBM Corporation, 2002, 2004
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; version 2 of the License.
  15 *
  16 *  This program is distributed in the hope that it will be useful,
  17 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  19 *  GNU General Public License for more details.
  20 *
  21 *  You should have received a copy of the GNU General Public License
  22 *  along with this program; if not, write to the Free Software
  23 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  24 */
  25
  26#include <linux/kprobes.h>
  27#include <linux/preempt.h>
  28#include <linux/uaccess.h>
  29#include <linux/kdebug.h>
  30#include <linux/slab.h>
  31
  32#include <asm/ptrace.h>
  33#include <asm/branch.h>
  34#include <asm/break.h>
  35
  36#include "probes-common.h"
  37
  38static const union mips_instruction breakpoint_insn = {
  39        .b_format = {
  40                .opcode = spec_op,
  41                .code = BRK_KPROBE_BP,
  42                .func = break_op
  43        }
  44};
  45
  46static const union mips_instruction breakpoint2_insn = {
  47        .b_format = {
  48                .opcode = spec_op,
  49                .code = BRK_KPROBE_SSTEPBP,
  50                .func = break_op
  51        }
  52};
  53
  54DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  55DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  56
  57static int __kprobes insn_has_delayslot(union mips_instruction insn)
  58{
  59        return __insn_has_delay_slot(insn);
  60}
  61
  62/*
  63 * insn_has_ll_or_sc function checks whether instruction is ll or sc
  64 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
  65 * so we need to prevent it and refuse kprobes insertion for such
  66 * instructions; cannot do much about breakpoint in the middle of
  67 * ll/sc pair; it is upto user to avoid those places
  68 */
  69static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
  70{
  71        int ret = 0;
  72
  73        switch (insn.i_format.opcode) {
  74        case ll_op:
  75        case lld_op:
  76        case sc_op:
  77        case scd_op:
  78                ret = 1;
  79                break;
  80        default:
  81                break;
  82        }
  83        return ret;
  84}
  85
  86int __kprobes arch_prepare_kprobe(struct kprobe *p)
  87{
  88        union mips_instruction insn;
  89        union mips_instruction prev_insn;
  90        int ret = 0;
  91
  92        insn = p->addr[0];
  93
  94        if (insn_has_ll_or_sc(insn)) {
  95                pr_notice("Kprobes for ll and sc instructions are not"
  96                          "supported\n");
  97                ret = -EINVAL;
  98                goto out;
  99        }
 100
 101        if ((probe_kernel_read(&prev_insn, p->addr - 1,
 102                                sizeof(mips_instruction)) == 0) &&
 103                                insn_has_delayslot(prev_insn)) {
 104                pr_notice("Kprobes for branch delayslot are not supported\n");
 105                ret = -EINVAL;
 106                goto out;
 107        }
 108
 109        if (__insn_is_compact_branch(insn)) {
 110                pr_notice("Kprobes for compact branches are not supported\n");
 111                ret = -EINVAL;
 112                goto out;
 113        }
 114
 115        /* insn: must be on special executable page on mips. */
 116        p->ainsn.insn = get_insn_slot();
 117        if (!p->ainsn.insn) {
 118                ret = -ENOMEM;
 119                goto out;
 120        }
 121
 122        /*
 123         * In the kprobe->ainsn.insn[] array we store the original
 124         * instruction at index zero and a break trap instruction at
 125         * index one.
 126         *
 127         * On MIPS arch if the instruction at probed address is a
 128         * branch instruction, we need to execute the instruction at
 129         * Branch Delayslot (BD) at the time of probe hit. As MIPS also
 130         * doesn't have single stepping support, the BD instruction can
 131         * not be executed in-line and it would be executed on SSOL slot
 132         * using a normal breakpoint instruction in the next slot.
 133         * So, read the instruction and save it for later execution.
 134         */
 135        if (insn_has_delayslot(insn))
 136                memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
 137        else
 138                memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
 139
 140        p->ainsn.insn[1] = breakpoint2_insn;
 141        p->opcode = *p->addr;
 142
 143out:
 144        return ret;
 145}
 146
 147void __kprobes arch_arm_kprobe(struct kprobe *p)
 148{
 149        *p->addr = breakpoint_insn;
 150        flush_insn_slot(p);
 151}
 152
 153void __kprobes arch_disarm_kprobe(struct kprobe *p)
 154{
 155        *p->addr = p->opcode;
 156        flush_insn_slot(p);
 157}
 158
 159void __kprobes arch_remove_kprobe(struct kprobe *p)
 160{
 161        if (p->ainsn.insn) {
 162                free_insn_slot(p->ainsn.insn, 0);
 163                p->ainsn.insn = NULL;
 164        }
 165}
 166
 167static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
 168{
 169        kcb->prev_kprobe.kp = kprobe_running();
 170        kcb->prev_kprobe.status = kcb->kprobe_status;
 171        kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
 172        kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
 173        kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
 174}
 175
 176static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 177{
 178        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 179        kcb->kprobe_status = kcb->prev_kprobe.status;
 180        kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
 181        kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
 182        kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
 183}
 184
 185static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 186                               struct kprobe_ctlblk *kcb)
 187{
 188        __this_cpu_write(current_kprobe, p);
 189        kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
 190        kcb->kprobe_saved_epc = regs->cp0_epc;
 191}
 192
 193/**
 194 * evaluate_branch_instrucion -
 195 *
 196 * Evaluate the branch instruction at probed address during probe hit. The
 197 * result of evaluation would be the updated epc. The insturction in delayslot
 198 * would actually be single stepped using a normal breakpoint) on SSOL slot.
 199 *
 200 * The result is also saved in the kprobe control block for later use,
 201 * in case we need to execute the delayslot instruction. The latter will be
 202 * false for NOP instruction in dealyslot and the branch-likely instructions
 203 * when the branch is taken. And for those cases we set a flag as
 204 * SKIP_DELAYSLOT in the kprobe control block
 205 */
 206static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
 207                                        struct kprobe_ctlblk *kcb)
 208{
 209        union mips_instruction insn = p->opcode;
 210        long epc;
 211        int ret = 0;
 212
 213        epc = regs->cp0_epc;
 214        if (epc & 3)
 215                goto unaligned;
 216
 217        if (p->ainsn.insn->word == 0)
 218                kcb->flags |= SKIP_DELAYSLOT;
 219        else
 220                kcb->flags &= ~SKIP_DELAYSLOT;
 221
 222        ret = __compute_return_epc_for_insn(regs, insn);
 223        if (ret < 0)
 224                return ret;
 225
 226        if (ret == BRANCH_LIKELY_TAKEN)
 227                kcb->flags |= SKIP_DELAYSLOT;
 228
 229        kcb->target_epc = regs->cp0_epc;
 230
 231        return 0;
 232
 233unaligned:
 234        pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
 235        force_sig(SIGBUS, current);
 236        return -EFAULT;
 237
 238}
 239
 240static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
 241                                                struct kprobe_ctlblk *kcb)
 242{
 243        int ret = 0;
 244
 245        regs->cp0_status &= ~ST0_IE;
 246
 247        /* single step inline if the instruction is a break */
 248        if (p->opcode.word == breakpoint_insn.word ||
 249            p->opcode.word == breakpoint2_insn.word)
 250                regs->cp0_epc = (unsigned long)p->addr;
 251        else if (insn_has_delayslot(p->opcode)) {
 252                ret = evaluate_branch_instruction(p, regs, kcb);
 253                if (ret < 0) {
 254                        pr_notice("Kprobes: Error in evaluating branch\n");
 255                        return;
 256                }
 257        }
 258        regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
 259}
 260
 261/*
 262 * Called after single-stepping.  p->addr is the address of the
 263 * instruction whose first byte has been replaced by the "break 0"
 264 * instruction.  To avoid the SMP problems that can occur when we
 265 * temporarily put back the original opcode to single-step, we
 266 * single-stepped a copy of the instruction.  The address of this
 267 * copy is p->ainsn.insn.
 268 *
 269 * This function prepares to return from the post-single-step
 270 * breakpoint trap. In case of branch instructions, the target
 271 * epc to be restored.
 272 */
 273static void __kprobes resume_execution(struct kprobe *p,
 274                                       struct pt_regs *regs,
 275                                       struct kprobe_ctlblk *kcb)
 276{
 277        if (insn_has_delayslot(p->opcode))
 278                regs->cp0_epc = kcb->target_epc;
 279        else {
 280                unsigned long orig_epc = kcb->kprobe_saved_epc;
 281                regs->cp0_epc = orig_epc + 4;
 282        }
 283}
 284
 285static int __kprobes kprobe_handler(struct pt_regs *regs)
 286{
 287        struct kprobe *p;
 288        int ret = 0;
 289        kprobe_opcode_t *addr;
 290        struct kprobe_ctlblk *kcb;
 291
 292        addr = (kprobe_opcode_t *) regs->cp0_epc;
 293
 294        /*
 295         * We don't want to be preempted for the entire
 296         * duration of kprobe processing
 297         */
 298        preempt_disable();
 299        kcb = get_kprobe_ctlblk();
 300
 301        /* Check we're not actually recursing */
 302        if (kprobe_running()) {
 303                p = get_kprobe(addr);
 304                if (p) {
 305                        if (kcb->kprobe_status == KPROBE_HIT_SS &&
 306                            p->ainsn.insn->word == breakpoint_insn.word) {
 307                                regs->cp0_status &= ~ST0_IE;
 308                                regs->cp0_status |= kcb->kprobe_saved_SR;
 309                                goto no_kprobe;
 310                        }
 311                        /*
 312                         * We have reentered the kprobe_handler(), since
 313                         * another probe was hit while within the handler.
 314                         * We here save the original kprobes variables and
 315                         * just single step on the instruction of the new probe
 316                         * without calling any user handlers.
 317                         */
 318                        save_previous_kprobe(kcb);
 319                        set_current_kprobe(p, regs, kcb);
 320                        kprobes_inc_nmissed_count(p);
 321                        prepare_singlestep(p, regs, kcb);
 322                        kcb->kprobe_status = KPROBE_REENTER;
 323                        if (kcb->flags & SKIP_DELAYSLOT) {
 324                                resume_execution(p, regs, kcb);
 325                                restore_previous_kprobe(kcb);
 326                                preempt_enable_no_resched();
 327                        }
 328                        return 1;
 329                } else {
 330                        if (addr->word != breakpoint_insn.word) {
 331                                /*
 332                                 * The breakpoint instruction was removed by
 333                                 * another cpu right after we hit, no further
 334                                 * handling of this interrupt is appropriate
 335                                 */
 336                                ret = 1;
 337                                goto no_kprobe;
 338                        }
 339                        p = __this_cpu_read(current_kprobe);
 340                        if (p->break_handler && p->break_handler(p, regs))
 341                                goto ss_probe;
 342                }
 343                goto no_kprobe;
 344        }
 345
 346        p = get_kprobe(addr);
 347        if (!p) {
 348                if (addr->word != breakpoint_insn.word) {
 349                        /*
 350                         * The breakpoint instruction was removed right
 351                         * after we hit it.  Another cpu has removed
 352                         * either a probepoint or a debugger breakpoint
 353                         * at this address.  In either case, no further
 354                         * handling of this interrupt is appropriate.
 355                         */
 356                        ret = 1;
 357                }
 358                /* Not one of ours: let kernel handle it */
 359                goto no_kprobe;
 360        }
 361
 362        set_current_kprobe(p, regs, kcb);
 363        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 364
 365        if (p->pre_handler && p->pre_handler(p, regs)) {
 366                /* handler has already set things up, so skip ss setup */
 367                return 1;
 368        }
 369
 370ss_probe:
 371        prepare_singlestep(p, regs, kcb);
 372        if (kcb->flags & SKIP_DELAYSLOT) {
 373                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 374                if (p->post_handler)
 375                        p->post_handler(p, regs, 0);
 376                resume_execution(p, regs, kcb);
 377                preempt_enable_no_resched();
 378        } else
 379                kcb->kprobe_status = KPROBE_HIT_SS;
 380
 381        return 1;
 382
 383no_kprobe:
 384        preempt_enable_no_resched();
 385        return ret;
 386
 387}
 388
 389static inline int post_kprobe_handler(struct pt_regs *regs)
 390{
 391        struct kprobe *cur = kprobe_running();
 392        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 393
 394        if (!cur)
 395                return 0;
 396
 397        if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 398                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 399                cur->post_handler(cur, regs, 0);
 400        }
 401
 402        resume_execution(cur, regs, kcb);
 403
 404        regs->cp0_status |= kcb->kprobe_saved_SR;
 405
 406        /* Restore back the original saved kprobes variables and continue. */
 407        if (kcb->kprobe_status == KPROBE_REENTER) {
 408                restore_previous_kprobe(kcb);
 409                goto out;
 410        }
 411        reset_current_kprobe();
 412out:
 413        preempt_enable_no_resched();
 414
 415        return 1;
 416}
 417
 418static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 419{
 420        struct kprobe *cur = kprobe_running();
 421        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 422
 423        if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
 424                return 1;
 425
 426        if (kcb->kprobe_status & KPROBE_HIT_SS) {
 427                resume_execution(cur, regs, kcb);
 428                regs->cp0_status |= kcb->kprobe_old_SR;
 429
 430                reset_current_kprobe();
 431                preempt_enable_no_resched();
 432        }
 433        return 0;
 434}
 435
 436/*
 437 * Wrapper routine for handling exceptions.
 438 */
 439int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 440                                       unsigned long val, void *data)
 441{
 442
 443        struct die_args *args = (struct die_args *)data;
 444        int ret = NOTIFY_DONE;
 445
 446        switch (val) {
 447        case DIE_BREAK:
 448                if (kprobe_handler(args->regs))
 449                        ret = NOTIFY_STOP;
 450                break;
 451        case DIE_SSTEPBP:
 452                if (post_kprobe_handler(args->regs))
 453                        ret = NOTIFY_STOP;
 454                break;
 455
 456        case DIE_PAGE_FAULT:
 457                /* kprobe_running() needs smp_processor_id() */
 458                preempt_disable();
 459
 460                if (kprobe_running()
 461                    && kprobe_fault_handler(args->regs, args->trapnr))
 462                        ret = NOTIFY_STOP;
 463                preempt_enable();
 464                break;
 465        default:
 466                break;
 467        }
 468        return ret;
 469}
 470
 471int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 472{
 473        struct jprobe *jp = container_of(p, struct jprobe, kp);
 474        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 475
 476        kcb->jprobe_saved_regs = *regs;
 477        kcb->jprobe_saved_sp = regs->regs[29];
 478
 479        memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
 480               MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
 481
 482        regs->cp0_epc = (unsigned long)(jp->entry);
 483
 484        return 1;
 485}
 486
 487/* Defined in the inline asm below. */
 488void jprobe_return_end(void);
 489
 490void __kprobes jprobe_return(void)
 491{
 492        /* Assembler quirk necessitates this '0,code' business.  */
 493        asm volatile(
 494                "break 0,%0\n\t"
 495                ".globl jprobe_return_end\n"
 496                "jprobe_return_end:\n"
 497                : : "n" (BRK_KPROBE_BP) : "memory");
 498}
 499
 500int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 501{
 502        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 503
 504        if (regs->cp0_epc >= (unsigned long)jprobe_return &&
 505            regs->cp0_epc <= (unsigned long)jprobe_return_end) {
 506                *regs = kcb->jprobe_saved_regs;
 507                memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
 508                       MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
 509                preempt_enable_no_resched();
 510
 511                return 1;
 512        }
 513        return 0;
 514}
 515
 516/*
 517 * Function return probe trampoline:
 518 *      - init_kprobes() establishes a probepoint here
 519 *      - When the probed function returns, this probe causes the
 520 *        handlers to fire
 521 */
 522static void __used kretprobe_trampoline_holder(void)
 523{
 524        asm volatile(
 525                ".set push\n\t"
 526                /* Keep the assembler from reordering and placing JR here. */
 527                ".set noreorder\n\t"
 528                "nop\n\t"
 529                ".global kretprobe_trampoline\n"
 530                "kretprobe_trampoline:\n\t"
 531                "nop\n\t"
 532                ".set pop"
 533                : : : "memory");
 534}
 535
 536void kretprobe_trampoline(void);
 537
 538void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 539                                      struct pt_regs *regs)
 540{
 541        ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
 542
 543        /* Replace the return addr with trampoline addr */
 544        regs->regs[31] = (unsigned long)kretprobe_trampoline;
 545}
 546
 547/*
 548 * Called when the probe at kretprobe trampoline is hit
 549 */
 550static int __kprobes trampoline_probe_handler(struct kprobe *p,
 551                                                struct pt_regs *regs)
 552{
 553        struct kretprobe_instance *ri = NULL;
 554        struct hlist_head *head, empty_rp;
 555        struct hlist_node *tmp;
 556        unsigned long flags, orig_ret_address = 0;
 557        unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
 558
 559        INIT_HLIST_HEAD(&empty_rp);
 560        kretprobe_hash_lock(current, &head, &flags);
 561
 562        /*
 563         * It is possible to have multiple instances associated with a given
 564         * task either because an multiple functions in the call path
 565         * have a return probe installed on them, and/or more than one return
 566         * return probe was registered for a target function.
 567         *
 568         * We can handle this because:
 569         *     - instances are always inserted at the head of the list
 570         *     - when multiple return probes are registered for the same
 571         *       function, the first instance's ret_addr will point to the
 572         *       real return address, and all the rest will point to
 573         *       kretprobe_trampoline
 574         */
 575        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 576                if (ri->task != current)
 577                        /* another task is sharing our hash bucket */
 578                        continue;
 579
 580                if (ri->rp && ri->rp->handler)
 581                        ri->rp->handler(ri, regs);
 582
 583                orig_ret_address = (unsigned long)ri->ret_addr;
 584                recycle_rp_inst(ri, &empty_rp);
 585
 586                if (orig_ret_address != trampoline_address)
 587                        /*
 588                         * This is the real return address. Any other
 589                         * instances associated with this task are for
 590                         * other calls deeper on the call stack
 591                         */
 592                        break;
 593        }
 594
 595        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 596        instruction_pointer(regs) = orig_ret_address;
 597
 598        reset_current_kprobe();
 599        kretprobe_hash_unlock(current, &flags);
 600        preempt_enable_no_resched();
 601
 602        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 603                hlist_del(&ri->hlist);
 604                kfree(ri);
 605        }
 606        /*
 607         * By returning a non-zero value, we are telling
 608         * kprobe_handler() that we don't want the post_handler
 609         * to run (and have re-enabled preemption)
 610         */
 611        return 1;
 612}
 613
 614int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 615{
 616        if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
 617                return 1;
 618
 619        return 0;
 620}
 621
 622static struct kprobe trampoline_p = {
 623        .addr = (kprobe_opcode_t *)kretprobe_trampoline,
 624        .pre_handler = trampoline_probe_handler
 625};
 626
 627int __init arch_init_kprobes(void)
 628{
 629        return register_kprobe(&trampoline_p);
 630}
 631