linux/arch/sparc/kernel/kprobes.c
<<
>>
Prefs
   1/* arch/sparc64/kernel/kprobes.c
   2 *
   3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/kprobes.h>
   8#include <linux/module.h>
   9#include <linux/kdebug.h>
  10#include <linux/slab.h>
  11#include <asm/signal.h>
  12#include <asm/cacheflush.h>
  13#include <asm/uaccess.h>
  14
  15/* We do not have hardware single-stepping on sparc64.
  16 * So we implement software single-stepping with breakpoint
  17 * traps.  The top-level scheme is similar to that used
  18 * in the x86 kprobes implementation.
  19 *
  20 * In the kprobe->ainsn.insn[] array we store the original
  21 * instruction at index zero and a break instruction at
  22 * index one.
  23 *
  24 * When we hit a kprobe we:
  25 * - Run the pre-handler
  26 * - Remember "regs->tnpc" and interrupt level stored in
  27 *   "regs->tstate" so we can restore them later
  28 * - Disable PIL interrupts
  29 * - Set regs->tpc to point to kprobe->ainsn.insn[0]
  30 * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
  31 * - Mark that we are actively in a kprobe
  32 *
  33 * At this point we wait for the second breakpoint at
  34 * kprobe->ainsn.insn[1] to hit.  When it does we:
  35 * - Run the post-handler
  36 * - Set regs->tpc to "remembered" regs->tnpc stored above,
  37 *   restore the PIL interrupt level in "regs->tstate" as well
  38 * - Make any adjustments necessary to regs->tnpc in order
  39 *   to handle relative branches correctly.  See below.
  40 * - Mark that we are no longer actively in a kprobe.
  41 */
  42
  43DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  44DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  45
  46struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  47
  48int __kprobes arch_prepare_kprobe(struct kprobe *p)
  49{
  50        if ((unsigned long) p->addr & 0x3UL)
  51                return -EILSEQ;
  52
  53        p->ainsn.insn[0] = *p->addr;
  54        flushi(&p->ainsn.insn[0]);
  55
  56        p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
  57        flushi(&p->ainsn.insn[1]);
  58
  59        p->opcode = *p->addr;
  60        return 0;
  61}
  62
  63void __kprobes arch_arm_kprobe(struct kprobe *p)
  64{
  65        *p->addr = BREAKPOINT_INSTRUCTION;
  66        flushi(p->addr);
  67}
  68
  69void __kprobes arch_disarm_kprobe(struct kprobe *p)
  70{
  71        *p->addr = p->opcode;
  72        flushi(p->addr);
  73}
  74
  75static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  76{
  77        kcb->prev_kprobe.kp = kprobe_running();
  78        kcb->prev_kprobe.status = kcb->kprobe_status;
  79        kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
  80        kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
  81}
  82
  83static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  84{
  85        __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  86        kcb->kprobe_status = kcb->prev_kprobe.status;
  87        kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
  88        kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
  89}
  90
  91static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  92                                struct kprobe_ctlblk *kcb)
  93{
  94        __get_cpu_var(current_kprobe) = p;
  95        kcb->kprobe_orig_tnpc = regs->tnpc;
  96        kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
  97}
  98
  99static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
 100                        struct kprobe_ctlblk *kcb)
 101{
 102        regs->tstate |= TSTATE_PIL;
 103
 104        /*single step inline, if it a breakpoint instruction*/
 105        if (p->opcode == BREAKPOINT_INSTRUCTION) {
 106                regs->tpc = (unsigned long) p->addr;
 107                regs->tnpc = kcb->kprobe_orig_tnpc;
 108        } else {
 109                regs->tpc = (unsigned long) &p->ainsn.insn[0];
 110                regs->tnpc = (unsigned long) &p->ainsn.insn[1];
 111        }
 112}
 113
 114static int __kprobes kprobe_handler(struct pt_regs *regs)
 115{
 116        struct kprobe *p;
 117        void *addr = (void *) regs->tpc;
 118        int ret = 0;
 119        struct kprobe_ctlblk *kcb;
 120
 121        /*
 122         * We don't want to be preempted for the entire
 123         * duration of kprobe processing
 124         */
 125        preempt_disable();
 126        kcb = get_kprobe_ctlblk();
 127
 128        if (kprobe_running()) {
 129                p = get_kprobe(addr);
 130                if (p) {
 131                        if (kcb->kprobe_status == KPROBE_HIT_SS) {
 132                                regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
 133                                        kcb->kprobe_orig_tstate_pil);
 134                                goto no_kprobe;
 135                        }
 136                        /* We have reentered the kprobe_handler(), since
 137                         * another probe was hit while within the handler.
 138                         * We here save the original kprobes variables and
 139                         * just single step on the instruction of the new probe
 140                         * without calling any user handlers.
 141                         */
 142                        save_previous_kprobe(kcb);
 143                        set_current_kprobe(p, regs, kcb);
 144                        kprobes_inc_nmissed_count(p);
 145                        kcb->kprobe_status = KPROBE_REENTER;
 146                        prepare_singlestep(p, regs, kcb);
 147                        return 1;
 148                } else {
 149                        if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
 150                        /* The breakpoint instruction was removed by
 151                         * another cpu right after we hit, no further
 152                         * handling of this interrupt is appropriate
 153                         */
 154                                ret = 1;
 155                                goto no_kprobe;
 156                        }
 157                        p = __get_cpu_var(current_kprobe);
 158                        if (p->break_handler && p->break_handler(p, regs))
 159                                goto ss_probe;
 160                }
 161                goto no_kprobe;
 162        }
 163
 164        p = get_kprobe(addr);
 165        if (!p) {
 166                if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
 167                        /*
 168                         * The breakpoint instruction was removed right
 169                         * after we hit it.  Another cpu has removed
 170                         * either a probepoint or a debugger breakpoint
 171                         * at this address.  In either case, no further
 172                         * handling of this interrupt is appropriate.
 173                         */
 174                        ret = 1;
 175                }
 176                /* Not one of ours: let kernel handle it */
 177                goto no_kprobe;
 178        }
 179
 180        set_current_kprobe(p, regs, kcb);
 181        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 182        if (p->pre_handler && p->pre_handler(p, regs))
 183                return 1;
 184
 185ss_probe:
 186        prepare_singlestep(p, regs, kcb);
 187        kcb->kprobe_status = KPROBE_HIT_SS;
 188        return 1;
 189
 190no_kprobe:
 191        preempt_enable_no_resched();
 192        return ret;
 193}
 194
 195/* If INSN is a relative control transfer instruction,
 196 * return the corrected branch destination value.
 197 *
 198 * regs->tpc and regs->tnpc still hold the values of the
 199 * program counters at the time of trap due to the execution
 200 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
 201 * 
 202 */
 203static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
 204                                               struct pt_regs *regs)
 205{
 206        unsigned long real_pc = (unsigned long) p->addr;
 207
 208        /* Branch not taken, no mods necessary.  */
 209        if (regs->tnpc == regs->tpc + 0x4UL)
 210                return real_pc + 0x8UL;
 211
 212        /* The three cases are call, branch w/prediction,
 213         * and traditional branch.
 214         */
 215        if ((insn & 0xc0000000) == 0x40000000 ||
 216            (insn & 0xc1c00000) == 0x00400000 ||
 217            (insn & 0xc1c00000) == 0x00800000) {
 218                unsigned long ainsn_addr;
 219
 220                ainsn_addr = (unsigned long) &p->ainsn.insn[0];
 221
 222                /* The instruction did all the work for us
 223                 * already, just apply the offset to the correct
 224                 * instruction location.
 225                 */
 226                return (real_pc + (regs->tnpc - ainsn_addr));
 227        }
 228
 229        /* It is jmpl or some other absolute PC modification instruction,
 230         * leave NPC as-is.
 231         */
 232        return regs->tnpc;
 233}
 234
 235/* If INSN is an instruction which writes it's PC location
 236 * into a destination register, fix that up.
 237 */
 238static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
 239                                  unsigned long real_pc)
 240{
 241        unsigned long *slot = NULL;
 242
 243        /* Simplest case is 'call', which always uses %o7 */
 244        if ((insn & 0xc0000000) == 0x40000000) {
 245                slot = &regs->u_regs[UREG_I7];
 246        }
 247
 248        /* 'jmpl' encodes the register inside of the opcode */
 249        if ((insn & 0xc1f80000) == 0x81c00000) {
 250                unsigned long rd = ((insn >> 25) & 0x1f);
 251
 252                if (rd <= 15) {
 253                        slot = &regs->u_regs[rd];
 254                } else {
 255                        /* Hard case, it goes onto the stack. */
 256                        flushw_all();
 257
 258                        rd -= 16;
 259                        slot = (unsigned long *)
 260                                (regs->u_regs[UREG_FP] + STACK_BIAS);
 261                        slot += rd;
 262                }
 263        }
 264        if (slot != NULL)
 265                *slot = real_pc;
 266}
 267
 268/*
 269 * Called after single-stepping.  p->addr is the address of the
 270 * instruction which has been replaced by the breakpoint
 271 * instruction.  To avoid the SMP problems that can occur when we
 272 * temporarily put back the original opcode to single-step, we
 273 * single-stepped a copy of the instruction.  The address of this
 274 * copy is &p->ainsn.insn[0].
 275 *
 276 * This function prepares to return from the post-single-step
 277 * breakpoint trap.
 278 */
 279static void __kprobes resume_execution(struct kprobe *p,
 280                struct pt_regs *regs, struct kprobe_ctlblk *kcb)
 281{
 282        u32 insn = p->ainsn.insn[0];
 283
 284        regs->tnpc = relbranch_fixup(insn, p, regs);
 285
 286        /* This assignment must occur after relbranch_fixup() */
 287        regs->tpc = kcb->kprobe_orig_tnpc;
 288
 289        retpc_fixup(regs, insn, (unsigned long) p->addr);
 290
 291        regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
 292                        kcb->kprobe_orig_tstate_pil);
 293}
 294
 295static int __kprobes post_kprobe_handler(struct pt_regs *regs)
 296{
 297        struct kprobe *cur = kprobe_running();
 298        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 299
 300        if (!cur)
 301                return 0;
 302
 303        if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 304                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 305                cur->post_handler(cur, regs, 0);
 306        }
 307
 308        resume_execution(cur, regs, kcb);
 309
 310        /*Restore back the original saved kprobes variables and continue. */
 311        if (kcb->kprobe_status == KPROBE_REENTER) {
 312                restore_previous_kprobe(kcb);
 313                goto out;
 314        }
 315        reset_current_kprobe();
 316out:
 317        preempt_enable_no_resched();
 318
 319        return 1;
 320}
 321
 322int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 323{
 324        struct kprobe *cur = kprobe_running();
 325        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 326        const struct exception_table_entry *entry;
 327
 328        switch(kcb->kprobe_status) {
 329        case KPROBE_HIT_SS:
 330        case KPROBE_REENTER:
 331                /*
 332                 * We are here because the instruction being single
 333                 * stepped caused a page fault. We reset the current
 334                 * kprobe and the tpc points back to the probe address
 335                 * and allow the page fault handler to continue as a
 336                 * normal page fault.
 337                 */
 338                regs->tpc = (unsigned long)cur->addr;
 339                regs->tnpc = kcb->kprobe_orig_tnpc;
 340                regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
 341                                kcb->kprobe_orig_tstate_pil);
 342                if (kcb->kprobe_status == KPROBE_REENTER)
 343                        restore_previous_kprobe(kcb);
 344                else
 345                        reset_current_kprobe();
 346                preempt_enable_no_resched();
 347                break;
 348        case KPROBE_HIT_ACTIVE:
 349        case KPROBE_HIT_SSDONE:
 350                /*
 351                 * We increment the nmissed count for accounting,
 352                 * we can also use npre/npostfault count for accouting
 353                 * these specific fault cases.
 354                 */
 355                kprobes_inc_nmissed_count(cur);
 356
 357                /*
 358                 * We come here because instructions in the pre/post
 359                 * handler caused the page_fault, this could happen
 360                 * if handler tries to access user space by
 361                 * copy_from_user(), get_user() etc. Let the
 362                 * user-specified handler try to fix it first.
 363                 */
 364                if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
 365                        return 1;
 366
 367                /*
 368                 * In case the user-specified fault handler returned
 369                 * zero, try to fix up.
 370                 */
 371
 372                entry = search_exception_tables(regs->tpc);
 373                if (entry) {
 374                        regs->tpc = entry->fixup;
 375                        regs->tnpc = regs->tpc + 4;
 376                        return 1;
 377                }
 378
 379                /*
 380                 * fixup_exception() could not handle it,
 381                 * Let do_page_fault() fix it.
 382                 */
 383                break;
 384        default:
 385                break;
 386        }
 387
 388        return 0;
 389}
 390
 391/*
 392 * Wrapper routine to for handling exceptions.
 393 */
 394int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 395                                       unsigned long val, void *data)
 396{
 397        struct die_args *args = (struct die_args *)data;
 398        int ret = NOTIFY_DONE;
 399
 400        if (args->regs && user_mode(args->regs))
 401                return ret;
 402
 403        switch (val) {
 404        case DIE_DEBUG:
 405                if (kprobe_handler(args->regs))
 406                        ret = NOTIFY_STOP;
 407                break;
 408        case DIE_DEBUG_2:
 409                if (post_kprobe_handler(args->regs))
 410                        ret = NOTIFY_STOP;
 411                break;
 412        default:
 413                break;
 414        }
 415        return ret;
 416}
 417
 418asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
 419                                      struct pt_regs *regs)
 420{
 421        BUG_ON(trap_level != 0x170 && trap_level != 0x171);
 422
 423        if (user_mode(regs)) {
 424                local_irq_enable();
 425                bad_trap(regs, trap_level);
 426                return;
 427        }
 428
 429        /* trap_level == 0x170 --> ta 0x70
 430         * trap_level == 0x171 --> ta 0x71
 431         */
 432        if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
 433                       (trap_level == 0x170) ? "debug" : "debug_2",
 434                       regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
 435                bad_trap(regs, trap_level);
 436}
 437
 438/* Jprobes support.  */
 439int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 440{
 441        struct jprobe *jp = container_of(p, struct jprobe, kp);
 442        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 443
 444        memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
 445
 446        regs->tpc  = (unsigned long) jp->entry;
 447        regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
 448        regs->tstate |= TSTATE_PIL;
 449
 450        return 1;
 451}
 452
 453void __kprobes jprobe_return(void)
 454{
 455        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 456        register unsigned long orig_fp asm("g1");
 457
 458        orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
 459        __asm__ __volatile__("\n"
 460"1:     cmp             %%sp, %0\n\t"
 461        "blu,a,pt       %%xcc, 1b\n\t"
 462        " restore\n\t"
 463        ".globl         jprobe_return_trap_instruction\n"
 464"jprobe_return_trap_instruction:\n\t"
 465        "ta             0x70"
 466        : /* no outputs */
 467        : "r" (orig_fp));
 468}
 469
 470extern void jprobe_return_trap_instruction(void);
 471
 472int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 473{
 474        u32 *addr = (u32 *) regs->tpc;
 475        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 476
 477        if (addr == (u32 *) jprobe_return_trap_instruction) {
 478                memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
 479                preempt_enable_no_resched();
 480                return 1;
 481        }
 482        return 0;
 483}
 484
 485/* The value stored in the return address register is actually 2
 486 * instructions before where the callee will return to.
 487 * Sequences usually look something like this
 488 *
 489 *              call    some_function   <--- return register points here
 490 *               nop                    <--- call delay slot
 491 *              whatever                <--- where callee returns to
 492 *
 493 * To keep trampoline_probe_handler logic simpler, we normalize the
 494 * value kept in ri->ret_addr so we don't need to keep adjusting it
 495 * back and forth.
 496 */
 497void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 498                                      struct pt_regs *regs)
 499{
 500        ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
 501
 502        /* Replace the return addr with trampoline addr */
 503        regs->u_regs[UREG_RETPC] =
 504                ((unsigned long)kretprobe_trampoline) - 8;
 505}
 506
 507/*
 508 * Called when the probe at kretprobe trampoline is hit
 509 */
 510int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 511{
 512        struct kretprobe_instance *ri = NULL;
 513        struct hlist_head *head, empty_rp;
 514        struct hlist_node *tmp;
 515        unsigned long flags, orig_ret_address = 0;
 516        unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
 517
 518        INIT_HLIST_HEAD(&empty_rp);
 519        kretprobe_hash_lock(current, &head, &flags);
 520
 521        /*
 522         * It is possible to have multiple instances associated with a given
 523         * task either because an multiple functions in the call path
 524         * have a return probe installed on them, and/or more than one return
 525         * return probe was registered for a target function.
 526         *
 527         * We can handle this because:
 528         *     - instances are always inserted at the head of the list
 529         *     - when multiple return probes are registered for the same
 530         *       function, the first instance's ret_addr will point to the
 531         *       real return address, and all the rest will point to
 532         *       kretprobe_trampoline
 533         */
 534        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 535                if (ri->task != current)
 536                        /* another task is sharing our hash bucket */
 537                        continue;
 538
 539                if (ri->rp && ri->rp->handler)
 540                        ri->rp->handler(ri, regs);
 541
 542                orig_ret_address = (unsigned long)ri->ret_addr;
 543                recycle_rp_inst(ri, &empty_rp);
 544
 545                if (orig_ret_address != trampoline_address)
 546                        /*
 547                         * This is the real return address. Any other
 548                         * instances associated with this task are for
 549                         * other calls deeper on the call stack
 550                         */
 551                        break;
 552        }
 553
 554        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 555        regs->tpc = orig_ret_address;
 556        regs->tnpc = orig_ret_address + 4;
 557
 558        reset_current_kprobe();
 559        kretprobe_hash_unlock(current, &flags);
 560        preempt_enable_no_resched();
 561
 562        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 563                hlist_del(&ri->hlist);
 564                kfree(ri);
 565        }
 566        /*
 567         * By returning a non-zero value, we are telling
 568         * kprobe_handler() that we don't want the post_handler
 569         * to run (and have re-enabled preemption)
 570         */
 571        return 1;
 572}
 573
 574void kretprobe_trampoline_holder(void)
 575{
 576        asm volatile(".global kretprobe_trampoline\n"
 577                     "kretprobe_trampoline:\n"
 578                     "\tnop\n"
 579                     "\tnop\n");
 580}
 581static struct kprobe trampoline_p = {
 582        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 583        .pre_handler = trampoline_probe_handler
 584};
 585
 586int __init arch_init_kprobes(void)
 587{
 588        return register_kprobe(&trampoline_p);
 589}
 590
 591int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 592{
 593        if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 594                return 1;
 595
 596        return 0;
 597}
 598