linux/arch/s390/kernel/kprobes.c
<<
>>
Prefs
   1/*
   2 *  Kernel Probes (KProbes)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corp. 2002, 2006
  19 *
  20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
  21 */
  22
  23#include <linux/kprobes.h>
  24#include <linux/ptrace.h>
  25#include <linux/preempt.h>
  26#include <linux/stop_machine.h>
  27#include <linux/kdebug.h>
  28#include <linux/uaccess.h>
  29#include <linux/module.h>
  30#include <linux/slab.h>
  31#include <linux/hardirq.h>
  32#include <linux/ftrace.h>
  33#include <asm/cacheflush.h>
  34#include <asm/sections.h>
  35#include <asm/dis.h>
  36
  37DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  38DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  39
  40struct kretprobe_blackpoint kretprobe_blacklist[] = { };
  41
  42DEFINE_INSN_CACHE_OPS(dmainsn);
  43
  44static void *alloc_dmainsn_page(void)
  45{
  46        return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
  47}
  48
  49static void free_dmainsn_page(void *page)
  50{
  51        free_page((unsigned long)page);
  52}
  53
  54struct kprobe_insn_cache kprobe_dmainsn_slots = {
  55        .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
  56        .alloc = alloc_dmainsn_page,
  57        .free = free_dmainsn_page,
  58        .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
  59        .insn_size = MAX_INSN_SIZE,
  60};
  61
  62static void copy_instruction(struct kprobe *p)
  63{
  64        unsigned long ip = (unsigned long) p->addr;
  65        s64 disp, new_disp;
  66        u64 addr, new_addr;
  67
  68        if (ftrace_location(ip) == ip) {
  69                /*
  70                 * If kprobes patches the instruction that is morphed by
  71                 * ftrace make sure that kprobes always sees the branch
  72                 * "jg .+24" that skips the mcount block or the "brcl 0,0"
  73                 * in case of hotpatch.
  74                 */
  75                ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
  76                p->ainsn.is_ftrace_insn = 1;
  77        } else
  78                memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
  79        p->opcode = p->ainsn.insn[0];
  80        if (!probe_is_insn_relative_long(p->ainsn.insn))
  81                return;
  82        /*
  83         * For pc-relative instructions in RIL-b or RIL-c format patch the
  84         * RI2 displacement field. We have already made sure that the insn
  85         * slot for the patched instruction is within the same 2GB area
  86         * as the original instruction (either kernel image or module area).
  87         * Therefore the new displacement will always fit.
  88         */
  89        disp = *(s32 *)&p->ainsn.insn[1];
  90        addr = (u64)(unsigned long)p->addr;
  91        new_addr = (u64)(unsigned long)p->ainsn.insn;
  92        new_disp = ((addr + (disp * 2)) - new_addr) / 2;
  93        *(s32 *)&p->ainsn.insn[1] = new_disp;
  94}
  95NOKPROBE_SYMBOL(copy_instruction);
  96
  97static inline int is_kernel_addr(void *addr)
  98{
  99        return addr < (void *)_end;
 100}
 101
 102static int s390_get_insn_slot(struct kprobe *p)
 103{
 104        /*
 105         * Get an insn slot that is within the same 2GB area like the original
 106         * instruction. That way instructions with a 32bit signed displacement
 107         * field can be patched and executed within the insn slot.
 108         */
 109        p->ainsn.insn = NULL;
 110        if (is_kernel_addr(p->addr))
 111                p->ainsn.insn = get_dmainsn_slot();
 112        else if (is_module_addr(p->addr))
 113                p->ainsn.insn = get_insn_slot();
 114        return p->ainsn.insn ? 0 : -ENOMEM;
 115}
 116NOKPROBE_SYMBOL(s390_get_insn_slot);
 117
 118static void s390_free_insn_slot(struct kprobe *p)
 119{
 120        if (!p->ainsn.insn)
 121                return;
 122        if (is_kernel_addr(p->addr))
 123                free_dmainsn_slot(p->ainsn.insn, 0);
 124        else
 125                free_insn_slot(p->ainsn.insn, 0);
 126        p->ainsn.insn = NULL;
 127}
 128NOKPROBE_SYMBOL(s390_free_insn_slot);
 129
 130int arch_prepare_kprobe(struct kprobe *p)
 131{
 132        if ((unsigned long) p->addr & 0x01)
 133                return -EINVAL;
 134        /* Make sure the probe isn't going on a difficult instruction */
 135        if (probe_is_prohibited_opcode(p->addr))
 136                return -EINVAL;
 137        if (s390_get_insn_slot(p))
 138                return -ENOMEM;
 139        copy_instruction(p);
 140        return 0;
 141}
 142NOKPROBE_SYMBOL(arch_prepare_kprobe);
 143
 144int arch_check_ftrace_location(struct kprobe *p)
 145{
 146        return 0;
 147}
 148
 149struct swap_insn_args {
 150        struct kprobe *p;
 151        unsigned int arm_kprobe : 1;
 152};
 153
 154static int swap_instruction(void *data)
 155{
 156        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 157        unsigned long status = kcb->kprobe_status;
 158        struct swap_insn_args *args = data;
 159        struct ftrace_insn new_insn, *insn;
 160        struct kprobe *p = args->p;
 161        size_t len;
 162
 163        new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
 164        len = sizeof(new_insn.opc);
 165        if (!p->ainsn.is_ftrace_insn)
 166                goto skip_ftrace;
 167        len = sizeof(new_insn);
 168        insn = (struct ftrace_insn *) p->addr;
 169        if (args->arm_kprobe) {
 170                if (is_ftrace_nop(insn))
 171                        new_insn.disp = KPROBE_ON_FTRACE_NOP;
 172                else
 173                        new_insn.disp = KPROBE_ON_FTRACE_CALL;
 174        } else {
 175                ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
 176                if (insn->disp == KPROBE_ON_FTRACE_NOP)
 177                        ftrace_generate_nop_insn(&new_insn);
 178        }
 179skip_ftrace:
 180        kcb->kprobe_status = KPROBE_SWAP_INST;
 181        s390_kernel_write(p->addr, &new_insn, len);
 182        kcb->kprobe_status = status;
 183        return 0;
 184}
 185NOKPROBE_SYMBOL(swap_instruction);
 186
 187void arch_arm_kprobe(struct kprobe *p)
 188{
 189        struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
 190
 191        stop_machine(swap_instruction, &args, NULL);
 192}
 193NOKPROBE_SYMBOL(arch_arm_kprobe);
 194
 195void arch_disarm_kprobe(struct kprobe *p)
 196{
 197        struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
 198
 199        stop_machine(swap_instruction, &args, NULL);
 200}
 201NOKPROBE_SYMBOL(arch_disarm_kprobe);
 202
 203void arch_remove_kprobe(struct kprobe *p)
 204{
 205        s390_free_insn_slot(p);
 206}
 207NOKPROBE_SYMBOL(arch_remove_kprobe);
 208
 209static void enable_singlestep(struct kprobe_ctlblk *kcb,
 210                              struct pt_regs *regs,
 211                              unsigned long ip)
 212{
 213        struct per_regs per_kprobe;
 214
 215        /* Set up the PER control registers %cr9-%cr11 */
 216        per_kprobe.control = PER_EVENT_IFETCH;
 217        per_kprobe.start = ip;
 218        per_kprobe.end = ip;
 219
 220        /* Save control regs and psw mask */
 221        __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
 222        kcb->kprobe_saved_imask = regs->psw.mask &
 223                (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
 224
 225        /* Set PER control regs, turns on single step for the given address */
 226        __ctl_load(per_kprobe, 9, 11);
 227        regs->psw.mask |= PSW_MASK_PER;
 228        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 229        regs->psw.addr = ip | PSW_ADDR_AMODE;
 230}
 231NOKPROBE_SYMBOL(enable_singlestep);
 232
 233static void disable_singlestep(struct kprobe_ctlblk *kcb,
 234                               struct pt_regs *regs,
 235                               unsigned long ip)
 236{
 237        /* Restore control regs and psw mask, set new psw address */
 238        __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
 239        regs->psw.mask &= ~PSW_MASK_PER;
 240        regs->psw.mask |= kcb->kprobe_saved_imask;
 241        regs->psw.addr = ip | PSW_ADDR_AMODE;
 242}
 243NOKPROBE_SYMBOL(disable_singlestep);
 244
 245/*
 246 * Activate a kprobe by storing its pointer to current_kprobe. The
 247 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
 248 * two kprobes can be active, see KPROBE_REENTER.
 249 */
 250static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
 251{
 252        kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
 253        kcb->prev_kprobe.status = kcb->kprobe_status;
 254        __this_cpu_write(current_kprobe, p);
 255}
 256NOKPROBE_SYMBOL(push_kprobe);
 257
 258/*
 259 * Deactivate a kprobe by backing up to the previous state. If the
 260 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
 261 * for any other state prev_kprobe.kp will be NULL.
 262 */
 263static void pop_kprobe(struct kprobe_ctlblk *kcb)
 264{
 265        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 266        kcb->kprobe_status = kcb->prev_kprobe.status;
 267}
 268NOKPROBE_SYMBOL(pop_kprobe);
 269
 270void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 271{
 272        ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
 273
 274        /* Replace the return addr with trampoline addr */
 275        regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
 276}
 277NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 278
 279static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
 280{
 281        switch (kcb->kprobe_status) {
 282        case KPROBE_HIT_SSDONE:
 283        case KPROBE_HIT_ACTIVE:
 284                kprobes_inc_nmissed_count(p);
 285                break;
 286        case KPROBE_HIT_SS:
 287        case KPROBE_REENTER:
 288        default:
 289                /*
 290                 * A kprobe on the code path to single step an instruction
 291                 * is a BUG. The code path resides in the .kprobes.text
 292                 * section and is executed with interrupts disabled.
 293                 */
 294                printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
 295                dump_kprobe(p);
 296                BUG();
 297        }
 298}
 299NOKPROBE_SYMBOL(kprobe_reenter_check);
 300
 301static int kprobe_handler(struct pt_regs *regs)
 302{
 303        struct kprobe_ctlblk *kcb;
 304        struct kprobe *p;
 305
 306        /*
 307         * We want to disable preemption for the entire duration of kprobe
 308         * processing. That includes the calls to the pre/post handlers
 309         * and single stepping the kprobe instruction.
 310         */
 311        preempt_disable();
 312        kcb = get_kprobe_ctlblk();
 313        p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
 314
 315        if (p) {
 316                if (kprobe_running()) {
 317                        /*
 318                         * We have hit a kprobe while another is still
 319                         * active. This can happen in the pre and post
 320                         * handler. Single step the instruction of the
 321                         * new probe but do not call any handler function
 322                         * of this secondary kprobe.
 323                         * push_kprobe and pop_kprobe saves and restores
 324                         * the currently active kprobe.
 325                         */
 326                        kprobe_reenter_check(kcb, p);
 327                        push_kprobe(kcb, p);
 328                        kcb->kprobe_status = KPROBE_REENTER;
 329                } else {
 330                        /*
 331                         * If we have no pre-handler or it returned 0, we
 332                         * continue with single stepping. If we have a
 333                         * pre-handler and it returned non-zero, it prepped
 334                         * for calling the break_handler below on re-entry
 335                         * for jprobe processing, so get out doing nothing
 336                         * more here.
 337                         */
 338                        push_kprobe(kcb, p);
 339                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 340                        if (p->pre_handler && p->pre_handler(p, regs))
 341                                return 1;
 342                        kcb->kprobe_status = KPROBE_HIT_SS;
 343                }
 344                enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
 345                return 1;
 346        } else if (kprobe_running()) {
 347                p = __this_cpu_read(current_kprobe);
 348                if (p->break_handler && p->break_handler(p, regs)) {
 349                        /*
 350                         * Continuation after the jprobe completed and
 351                         * caused the jprobe_return trap. The jprobe
 352                         * break_handler "returns" to the original
 353                         * function that still has the kprobe breakpoint
 354                         * installed. We continue with single stepping.
 355                         */
 356                        kcb->kprobe_status = KPROBE_HIT_SS;
 357                        enable_singlestep(kcb, regs,
 358                                          (unsigned long) p->ainsn.insn);
 359                        return 1;
 360                } /* else:
 361                   * No kprobe at this address and the current kprobe
 362                   * has no break handler (no jprobe!). The kernel just
 363                   * exploded, let the standard trap handler pick up the
 364                   * pieces.
 365                   */
 366        } /* else:
 367           * No kprobe at this address and no active kprobe. The trap has
 368           * not been caused by a kprobe breakpoint. The race of breakpoint
 369           * vs. kprobe remove does not exist because on s390 as we use
 370           * stop_machine to arm/disarm the breakpoints.
 371           */
 372        preempt_enable_no_resched();
 373        return 0;
 374}
 375NOKPROBE_SYMBOL(kprobe_handler);
 376
 377/*
 378 * Function return probe trampoline:
 379 *      - init_kprobes() establishes a probepoint here
 380 *      - When the probed function returns, this probe
 381 *              causes the handlers to fire
 382 */
 383static void __used kretprobe_trampoline_holder(void)
 384{
 385        asm volatile(".global kretprobe_trampoline\n"
 386                     "kretprobe_trampoline: bcr 0,0\n");
 387}
 388
 389/*
 390 * Called when the probe at kretprobe trampoline is hit
 391 */
 392static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 393{
 394        struct kretprobe_instance *ri;
 395        struct hlist_head *head, empty_rp;
 396        struct hlist_node *tmp;
 397        unsigned long flags, orig_ret_address;
 398        unsigned long trampoline_address;
 399        kprobe_opcode_t *correct_ret_addr;
 400
 401        INIT_HLIST_HEAD(&empty_rp);
 402        kretprobe_hash_lock(current, &head, &flags);
 403
 404        /*
 405         * It is possible to have multiple instances associated with a given
 406         * task either because an multiple functions in the call path
 407         * have a return probe installed on them, and/or more than one return
 408         * return probe was registered for a target function.
 409         *
 410         * We can handle this because:
 411         *     - instances are always inserted at the head of the list
 412         *     - when multiple return probes are registered for the same
 413         *       function, the first instance's ret_addr will point to the
 414         *       real return address, and all the rest will point to
 415         *       kretprobe_trampoline
 416         */
 417        ri = NULL;
 418        orig_ret_address = 0;
 419        correct_ret_addr = NULL;
 420        trampoline_address = (unsigned long) &kretprobe_trampoline;
 421        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 422                if (ri->task != current)
 423                        /* another task is sharing our hash bucket */
 424                        continue;
 425
 426                orig_ret_address = (unsigned long) ri->ret_addr;
 427
 428                if (orig_ret_address != trampoline_address)
 429                        /*
 430                         * This is the real return address. Any other
 431                         * instances associated with this task are for
 432                         * other calls deeper on the call stack
 433                         */
 434                        break;
 435        }
 436
 437        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 438
 439        correct_ret_addr = ri->ret_addr;
 440        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 441                if (ri->task != current)
 442                        /* another task is sharing our hash bucket */
 443                        continue;
 444
 445                orig_ret_address = (unsigned long) ri->ret_addr;
 446
 447                if (ri->rp && ri->rp->handler) {
 448                        ri->ret_addr = correct_ret_addr;
 449                        ri->rp->handler(ri, regs);
 450                }
 451
 452                recycle_rp_inst(ri, &empty_rp);
 453
 454                if (orig_ret_address != trampoline_address)
 455                        /*
 456                         * This is the real return address. Any other
 457                         * instances associated with this task are for
 458                         * other calls deeper on the call stack
 459                         */
 460                        break;
 461        }
 462
 463        regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
 464
 465        pop_kprobe(get_kprobe_ctlblk());
 466        kretprobe_hash_unlock(current, &flags);
 467        preempt_enable_no_resched();
 468
 469        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 470                hlist_del(&ri->hlist);
 471                kfree(ri);
 472        }
 473        /*
 474         * By returning a non-zero value, we are telling
 475         * kprobe_handler() that we don't want the post_handler
 476         * to run (and have re-enabled preemption)
 477         */
 478        return 1;
 479}
 480NOKPROBE_SYMBOL(trampoline_probe_handler);
 481
 482/*
 483 * Called after single-stepping.  p->addr is the address of the
 484 * instruction whose first byte has been replaced by the "breakpoint"
 485 * instruction.  To avoid the SMP problems that can occur when we
 486 * temporarily put back the original opcode to single-step, we
 487 * single-stepped a copy of the instruction.  The address of this
 488 * copy is p->ainsn.insn.
 489 */
 490static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 491{
 492        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 493        unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
 494        int fixup = probe_get_fixup_type(p->ainsn.insn);
 495
 496        /* Check if the kprobes location is an enabled ftrace caller */
 497        if (p->ainsn.is_ftrace_insn) {
 498                struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
 499                struct ftrace_insn call_insn;
 500
 501                ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
 502                /*
 503                 * A kprobe on an enabled ftrace call site actually single
 504                 * stepped an unconditional branch (ftrace nop equivalent).
 505                 * Now we need to fixup things and pretend that a brasl r0,...
 506                 * was executed instead.
 507                 */
 508                if (insn->disp == KPROBE_ON_FTRACE_CALL) {
 509                        ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
 510                        regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
 511                }
 512        }
 513
 514        if (fixup & FIXUP_PSW_NORMAL)
 515                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 516
 517        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
 518                int ilen = insn_length(p->ainsn.insn[0] >> 8);
 519                if (ip - (unsigned long) p->ainsn.insn == ilen)
 520                        ip = (unsigned long) p->addr + ilen;
 521        }
 522
 523        if (fixup & FIXUP_RETURN_REGISTER) {
 524                int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
 525                regs->gprs[reg] += (unsigned long) p->addr -
 526                                   (unsigned long) p->ainsn.insn;
 527        }
 528
 529        disable_singlestep(kcb, regs, ip);
 530}
 531NOKPROBE_SYMBOL(resume_execution);
 532
 533static int post_kprobe_handler(struct pt_regs *regs)
 534{
 535        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 536        struct kprobe *p = kprobe_running();
 537
 538        if (!p)
 539                return 0;
 540
 541        if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
 542                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 543                p->post_handler(p, regs, 0);
 544        }
 545
 546        resume_execution(p, regs);
 547        pop_kprobe(kcb);
 548        preempt_enable_no_resched();
 549
 550        /*
 551         * if somebody else is singlestepping across a probe point, psw mask
 552         * will have PER set, in which case, continue the remaining processing
 553         * of do_single_step, as if this is not a probe hit.
 554         */
 555        if (regs->psw.mask & PSW_MASK_PER)
 556                return 0;
 557
 558        return 1;
 559}
 560NOKPROBE_SYMBOL(post_kprobe_handler);
 561
 562static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
 563{
 564        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 565        struct kprobe *p = kprobe_running();
 566        const struct exception_table_entry *entry;
 567
 568        switch(kcb->kprobe_status) {
 569        case KPROBE_SWAP_INST:
 570                /* We are here because the instruction replacement failed */
 571                return 0;
 572        case KPROBE_HIT_SS:
 573        case KPROBE_REENTER:
 574                /*
 575                 * We are here because the instruction being single
 576                 * stepped caused a page fault. We reset the current
 577                 * kprobe and the nip points back to the probe address
 578                 * and allow the page fault handler to continue as a
 579                 * normal page fault.
 580                 */
 581                disable_singlestep(kcb, regs, (unsigned long) p->addr);
 582                pop_kprobe(kcb);
 583                preempt_enable_no_resched();
 584                break;
 585        case KPROBE_HIT_ACTIVE:
 586        case KPROBE_HIT_SSDONE:
 587                /*
 588                 * We increment the nmissed count for accounting,
 589                 * we can also use npre/npostfault count for accounting
 590                 * these specific fault cases.
 591                 */
 592                kprobes_inc_nmissed_count(p);
 593
 594                /*
 595                 * We come here because instructions in the pre/post
 596                 * handler caused the page_fault, this could happen
 597                 * if handler tries to access user space by
 598                 * copy_from_user(), get_user() etc. Let the
 599                 * user-specified handler try to fix it first.
 600                 */
 601                if (p->fault_handler && p->fault_handler(p, regs, trapnr))
 602                        return 1;
 603
 604                /*
 605                 * In case the user-specified fault handler returned
 606                 * zero, try to fix up.
 607                 */
 608                entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
 609                if (entry) {
 610                        regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
 611                        return 1;
 612                }
 613
 614                /*
 615                 * fixup_exception() could not handle it,
 616                 * Let do_page_fault() fix it.
 617                 */
 618                break;
 619        default:
 620                break;
 621        }
 622        return 0;
 623}
 624NOKPROBE_SYMBOL(kprobe_trap_handler);
 625
 626int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 627{
 628        int ret;
 629
 630        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 631                local_irq_disable();
 632        ret = kprobe_trap_handler(regs, trapnr);
 633        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 634                local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
 635        return ret;
 636}
 637NOKPROBE_SYMBOL(kprobe_fault_handler);
 638
 639/*
 640 * Wrapper routine to for handling exceptions.
 641 */
 642int kprobe_exceptions_notify(struct notifier_block *self,
 643                             unsigned long val, void *data)
 644{
 645        struct die_args *args = (struct die_args *) data;
 646        struct pt_regs *regs = args->regs;
 647        int ret = NOTIFY_DONE;
 648
 649        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 650                local_irq_disable();
 651
 652        switch (val) {
 653        case DIE_BPT:
 654                if (kprobe_handler(regs))
 655                        ret = NOTIFY_STOP;
 656                break;
 657        case DIE_SSTEP:
 658                if (post_kprobe_handler(regs))
 659                        ret = NOTIFY_STOP;
 660                break;
 661        case DIE_TRAP:
 662                if (!preemptible() && kprobe_running() &&
 663                    kprobe_trap_handler(regs, args->trapnr))
 664                        ret = NOTIFY_STOP;
 665                break;
 666        default:
 667                break;
 668        }
 669
 670        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 671                local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
 672
 673        return ret;
 674}
 675NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 676
 677int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 678{
 679        struct jprobe *jp = container_of(p, struct jprobe, kp);
 680        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 681        unsigned long stack;
 682
 683        memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 684
 685        /* setup return addr to the jprobe handler routine */
 686        regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
 687        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 688
 689        /* r15 is the stack pointer */
 690        stack = (unsigned long) regs->gprs[15];
 691
 692        memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
 693        return 1;
 694}
 695NOKPROBE_SYMBOL(setjmp_pre_handler);
 696
 697void jprobe_return(void)
 698{
 699        asm volatile(".word 0x0002");
 700}
 701NOKPROBE_SYMBOL(jprobe_return);
 702
 703int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 704{
 705        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 706        unsigned long stack;
 707
 708        stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15];
 709
 710        /* Put the regs back */
 711        memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
 712        /* put the stack back */
 713        memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack));
 714        preempt_enable_no_resched();
 715        return 1;
 716}
 717NOKPROBE_SYMBOL(longjmp_break_handler);
 718
 719static struct kprobe trampoline = {
 720        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 721        .pre_handler = trampoline_probe_handler
 722};
 723
 724int __init arch_init_kprobes(void)
 725{
 726        return register_kprobe(&trampoline);
 727}
 728
 729int arch_trampoline_kprobe(struct kprobe *p)
 730{
 731        return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
 732}
 733NOKPROBE_SYMBOL(arch_trampoline_kprobe);
 734