linux/arch/s390/kernel/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 *  Kernel Probes (KProbes)
   4 *
   5 * Copyright IBM Corp. 2002, 2006
   6 *
   7 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
   8 */
   9
  10#include <linux/kprobes.h>
  11#include <linux/ptrace.h>
  12#include <linux/preempt.h>
  13#include <linux/stop_machine.h>
  14#include <linux/kdebug.h>
  15#include <linux/uaccess.h>
  16#include <linux/extable.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/hardirq.h>
  20#include <linux/ftrace.h>
  21#include <asm/set_memory.h>
  22#include <asm/sections.h>
  23#include <asm/dis.h>
  24
  25DEFINE_PER_CPU(struct kprobe *, current_kprobe);
  26DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  27
  28struct kretprobe_blackpoint kretprobe_blacklist[] = { };
  29
  30DEFINE_INSN_CACHE_OPS(dmainsn);
  31
  32static void *alloc_dmainsn_page(void)
  33{
  34        void *page;
  35
  36        page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
  37        if (page)
  38                set_memory_x((unsigned long) page, 1);
  39        return page;
  40}
  41
  42static void free_dmainsn_page(void *page)
  43{
  44        set_memory_nx((unsigned long) page, 1);
  45        free_page((unsigned long)page);
  46}
  47
  48struct kprobe_insn_cache kprobe_dmainsn_slots = {
  49        .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
  50        .alloc = alloc_dmainsn_page,
  51        .free = free_dmainsn_page,
  52        .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
  53        .insn_size = MAX_INSN_SIZE,
  54};
  55
  56static void copy_instruction(struct kprobe *p)
  57{
  58        unsigned long ip = (unsigned long) p->addr;
  59        s64 disp, new_disp;
  60        u64 addr, new_addr;
  61
  62        if (ftrace_location(ip) == ip) {
  63                /*
  64                 * If kprobes patches the instruction that is morphed by
  65                 * ftrace make sure that kprobes always sees the branch
  66                 * "jg .+24" that skips the mcount block or the "brcl 0,0"
  67                 * in case of hotpatch.
  68                 */
  69                ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
  70                p->ainsn.is_ftrace_insn = 1;
  71        } else
  72                memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
  73        p->opcode = p->ainsn.insn[0];
  74        if (!probe_is_insn_relative_long(p->ainsn.insn))
  75                return;
  76        /*
  77         * For pc-relative instructions in RIL-b or RIL-c format patch the
  78         * RI2 displacement field. We have already made sure that the insn
  79         * slot for the patched instruction is within the same 2GB area
  80         * as the original instruction (either kernel image or module area).
  81         * Therefore the new displacement will always fit.
  82         */
  83        disp = *(s32 *)&p->ainsn.insn[1];
  84        addr = (u64)(unsigned long)p->addr;
  85        new_addr = (u64)(unsigned long)p->ainsn.insn;
  86        new_disp = ((addr + (disp * 2)) - new_addr) / 2;
  87        *(s32 *)&p->ainsn.insn[1] = new_disp;
  88}
  89NOKPROBE_SYMBOL(copy_instruction);
  90
  91static inline int is_kernel_addr(void *addr)
  92{
  93        return addr < (void *)_end;
  94}
  95
  96static int s390_get_insn_slot(struct kprobe *p)
  97{
  98        /*
  99         * Get an insn slot that is within the same 2GB area like the original
 100         * instruction. That way instructions with a 32bit signed displacement
 101         * field can be patched and executed within the insn slot.
 102         */
 103        p->ainsn.insn = NULL;
 104        if (is_kernel_addr(p->addr))
 105                p->ainsn.insn = get_dmainsn_slot();
 106        else if (is_module_addr(p->addr))
 107                p->ainsn.insn = get_insn_slot();
 108        return p->ainsn.insn ? 0 : -ENOMEM;
 109}
 110NOKPROBE_SYMBOL(s390_get_insn_slot);
 111
 112static void s390_free_insn_slot(struct kprobe *p)
 113{
 114        if (!p->ainsn.insn)
 115                return;
 116        if (is_kernel_addr(p->addr))
 117                free_dmainsn_slot(p->ainsn.insn, 0);
 118        else
 119                free_insn_slot(p->ainsn.insn, 0);
 120        p->ainsn.insn = NULL;
 121}
 122NOKPROBE_SYMBOL(s390_free_insn_slot);
 123
 124int arch_prepare_kprobe(struct kprobe *p)
 125{
 126        if ((unsigned long) p->addr & 0x01)
 127                return -EINVAL;
 128        /* Make sure the probe isn't going on a difficult instruction */
 129        if (probe_is_prohibited_opcode(p->addr))
 130                return -EINVAL;
 131        if (s390_get_insn_slot(p))
 132                return -ENOMEM;
 133        copy_instruction(p);
 134        return 0;
 135}
 136NOKPROBE_SYMBOL(arch_prepare_kprobe);
 137
 138int arch_check_ftrace_location(struct kprobe *p)
 139{
 140        return 0;
 141}
 142
 143struct swap_insn_args {
 144        struct kprobe *p;
 145        unsigned int arm_kprobe : 1;
 146};
 147
 148static int swap_instruction(void *data)
 149{
 150        struct swap_insn_args *args = data;
 151        struct ftrace_insn new_insn, *insn;
 152        struct kprobe *p = args->p;
 153        size_t len;
 154
 155        new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
 156        len = sizeof(new_insn.opc);
 157        if (!p->ainsn.is_ftrace_insn)
 158                goto skip_ftrace;
 159        len = sizeof(new_insn);
 160        insn = (struct ftrace_insn *) p->addr;
 161        if (args->arm_kprobe) {
 162                if (is_ftrace_nop(insn))
 163                        new_insn.disp = KPROBE_ON_FTRACE_NOP;
 164                else
 165                        new_insn.disp = KPROBE_ON_FTRACE_CALL;
 166        } else {
 167                ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
 168                if (insn->disp == KPROBE_ON_FTRACE_NOP)
 169                        ftrace_generate_nop_insn(&new_insn);
 170        }
 171skip_ftrace:
 172        s390_kernel_write(p->addr, &new_insn, len);
 173        return 0;
 174}
 175NOKPROBE_SYMBOL(swap_instruction);
 176
 177void arch_arm_kprobe(struct kprobe *p)
 178{
 179        struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
 180
 181        stop_machine_cpuslocked(swap_instruction, &args, NULL);
 182}
 183NOKPROBE_SYMBOL(arch_arm_kprobe);
 184
 185void arch_disarm_kprobe(struct kprobe *p)
 186{
 187        struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
 188
 189        stop_machine_cpuslocked(swap_instruction, &args, NULL);
 190}
 191NOKPROBE_SYMBOL(arch_disarm_kprobe);
 192
 193void arch_remove_kprobe(struct kprobe *p)
 194{
 195        s390_free_insn_slot(p);
 196}
 197NOKPROBE_SYMBOL(arch_remove_kprobe);
 198
 199static void enable_singlestep(struct kprobe_ctlblk *kcb,
 200                              struct pt_regs *regs,
 201                              unsigned long ip)
 202{
 203        struct per_regs per_kprobe;
 204
 205        /* Set up the PER control registers %cr9-%cr11 */
 206        per_kprobe.control = PER_EVENT_IFETCH;
 207        per_kprobe.start = ip;
 208        per_kprobe.end = ip;
 209
 210        /* Save control regs and psw mask */
 211        __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
 212        kcb->kprobe_saved_imask = regs->psw.mask &
 213                (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
 214
 215        /* Set PER control regs, turns on single step for the given address */
 216        __ctl_load(per_kprobe, 9, 11);
 217        regs->psw.mask |= PSW_MASK_PER;
 218        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 219        regs->psw.addr = ip;
 220}
 221NOKPROBE_SYMBOL(enable_singlestep);
 222
 223static void disable_singlestep(struct kprobe_ctlblk *kcb,
 224                               struct pt_regs *regs,
 225                               unsigned long ip)
 226{
 227        /* Restore control regs and psw mask, set new psw address */
 228        __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
 229        regs->psw.mask &= ~PSW_MASK_PER;
 230        regs->psw.mask |= kcb->kprobe_saved_imask;
 231        regs->psw.addr = ip;
 232}
 233NOKPROBE_SYMBOL(disable_singlestep);
 234
 235/*
 236 * Activate a kprobe by storing its pointer to current_kprobe. The
 237 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
 238 * two kprobes can be active, see KPROBE_REENTER.
 239 */
 240static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
 241{
 242        kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
 243        kcb->prev_kprobe.status = kcb->kprobe_status;
 244        __this_cpu_write(current_kprobe, p);
 245}
 246NOKPROBE_SYMBOL(push_kprobe);
 247
 248/*
 249 * Deactivate a kprobe by backing up to the previous state. If the
 250 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
 251 * for any other state prev_kprobe.kp will be NULL.
 252 */
 253static void pop_kprobe(struct kprobe_ctlblk *kcb)
 254{
 255        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 256        kcb->kprobe_status = kcb->prev_kprobe.status;
 257}
 258NOKPROBE_SYMBOL(pop_kprobe);
 259
 260void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 261{
 262        ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
 263
 264        /* Replace the return addr with trampoline addr */
 265        regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
 266}
 267NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 268
 269static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
 270{
 271        switch (kcb->kprobe_status) {
 272        case KPROBE_HIT_SSDONE:
 273        case KPROBE_HIT_ACTIVE:
 274                kprobes_inc_nmissed_count(p);
 275                break;
 276        case KPROBE_HIT_SS:
 277        case KPROBE_REENTER:
 278        default:
 279                /*
 280                 * A kprobe on the code path to single step an instruction
 281                 * is a BUG. The code path resides in the .kprobes.text
 282                 * section and is executed with interrupts disabled.
 283                 */
 284                pr_err("Invalid kprobe detected.\n");
 285                dump_kprobe(p);
 286                BUG();
 287        }
 288}
 289NOKPROBE_SYMBOL(kprobe_reenter_check);
 290
 291static int kprobe_handler(struct pt_regs *regs)
 292{
 293        struct kprobe_ctlblk *kcb;
 294        struct kprobe *p;
 295
 296        /*
 297         * We want to disable preemption for the entire duration of kprobe
 298         * processing. That includes the calls to the pre/post handlers
 299         * and single stepping the kprobe instruction.
 300         */
 301        preempt_disable();
 302        kcb = get_kprobe_ctlblk();
 303        p = get_kprobe((void *)(regs->psw.addr - 2));
 304
 305        if (p) {
 306                if (kprobe_running()) {
 307                        /*
 308                         * We have hit a kprobe while another is still
 309                         * active. This can happen in the pre and post
 310                         * handler. Single step the instruction of the
 311                         * new probe but do not call any handler function
 312                         * of this secondary kprobe.
 313                         * push_kprobe and pop_kprobe saves and restores
 314                         * the currently active kprobe.
 315                         */
 316                        kprobe_reenter_check(kcb, p);
 317                        push_kprobe(kcb, p);
 318                        kcb->kprobe_status = KPROBE_REENTER;
 319                } else {
 320                        /*
 321                         * If we have no pre-handler or it returned 0, we
 322                         * continue with single stepping. If we have a
 323                         * pre-handler and it returned non-zero, it prepped
 324                         * for changing execution path, so get out doing
 325                         * nothing more here.
 326                         */
 327                        push_kprobe(kcb, p);
 328                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 329                        if (p->pre_handler && p->pre_handler(p, regs)) {
 330                                pop_kprobe(kcb);
 331                                preempt_enable_no_resched();
 332                                return 1;
 333                        }
 334                        kcb->kprobe_status = KPROBE_HIT_SS;
 335                }
 336                enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
 337                return 1;
 338        } /* else:
 339           * No kprobe at this address and no active kprobe. The trap has
 340           * not been caused by a kprobe breakpoint. The race of breakpoint
 341           * vs. kprobe remove does not exist because on s390 as we use
 342           * stop_machine to arm/disarm the breakpoints.
 343           */
 344        preempt_enable_no_resched();
 345        return 0;
 346}
 347NOKPROBE_SYMBOL(kprobe_handler);
 348
 349/*
 350 * Function return probe trampoline:
 351 *      - init_kprobes() establishes a probepoint here
 352 *      - When the probed function returns, this probe
 353 *              causes the handlers to fire
 354 */
 355static void __used kretprobe_trampoline_holder(void)
 356{
 357        asm volatile(".global kretprobe_trampoline\n"
 358                     "kretprobe_trampoline: bcr 0,0\n");
 359}
 360
 361/*
 362 * Called when the probe at kretprobe trampoline is hit
 363 */
 364static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 365{
 366        struct kretprobe_instance *ri;
 367        struct hlist_head *head, empty_rp;
 368        struct hlist_node *tmp;
 369        unsigned long flags, orig_ret_address;
 370        unsigned long trampoline_address;
 371        kprobe_opcode_t *correct_ret_addr;
 372
 373        INIT_HLIST_HEAD(&empty_rp);
 374        kretprobe_hash_lock(current, &head, &flags);
 375
 376        /*
 377         * It is possible to have multiple instances associated with a given
 378         * task either because an multiple functions in the call path
 379         * have a return probe installed on them, and/or more than one return
 380         * return probe was registered for a target function.
 381         *
 382         * We can handle this because:
 383         *     - instances are always inserted at the head of the list
 384         *     - when multiple return probes are registered for the same
 385         *       function, the first instance's ret_addr will point to the
 386         *       real return address, and all the rest will point to
 387         *       kretprobe_trampoline
 388         */
 389        ri = NULL;
 390        orig_ret_address = 0;
 391        correct_ret_addr = NULL;
 392        trampoline_address = (unsigned long) &kretprobe_trampoline;
 393        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 394                if (ri->task != current)
 395                        /* another task is sharing our hash bucket */
 396                        continue;
 397
 398                orig_ret_address = (unsigned long) ri->ret_addr;
 399
 400                if (orig_ret_address != trampoline_address)
 401                        /*
 402                         * This is the real return address. Any other
 403                         * instances associated with this task are for
 404                         * other calls deeper on the call stack
 405                         */
 406                        break;
 407        }
 408
 409        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 410
 411        correct_ret_addr = ri->ret_addr;
 412        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 413                if (ri->task != current)
 414                        /* another task is sharing our hash bucket */
 415                        continue;
 416
 417                orig_ret_address = (unsigned long) ri->ret_addr;
 418
 419                if (ri->rp && ri->rp->handler) {
 420                        ri->ret_addr = correct_ret_addr;
 421                        ri->rp->handler(ri, regs);
 422                }
 423
 424                recycle_rp_inst(ri, &empty_rp);
 425
 426                if (orig_ret_address != trampoline_address)
 427                        /*
 428                         * This is the real return address. Any other
 429                         * instances associated with this task are for
 430                         * other calls deeper on the call stack
 431                         */
 432                        break;
 433        }
 434
 435        regs->psw.addr = orig_ret_address;
 436
 437        kretprobe_hash_unlock(current, &flags);
 438
 439        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 440                hlist_del(&ri->hlist);
 441                kfree(ri);
 442        }
 443        /*
 444         * By returning a non-zero value, we are telling
 445         * kprobe_handler() that we don't want the post_handler
 446         * to run (and have re-enabled preemption)
 447         */
 448        return 1;
 449}
 450NOKPROBE_SYMBOL(trampoline_probe_handler);
 451
 452/*
 453 * Called after single-stepping.  p->addr is the address of the
 454 * instruction whose first byte has been replaced by the "breakpoint"
 455 * instruction.  To avoid the SMP problems that can occur when we
 456 * temporarily put back the original opcode to single-step, we
 457 * single-stepped a copy of the instruction.  The address of this
 458 * copy is p->ainsn.insn.
 459 */
 460static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 461{
 462        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 463        unsigned long ip = regs->psw.addr;
 464        int fixup = probe_get_fixup_type(p->ainsn.insn);
 465
 466        /* Check if the kprobes location is an enabled ftrace caller */
 467        if (p->ainsn.is_ftrace_insn) {
 468                struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
 469                struct ftrace_insn call_insn;
 470
 471                ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
 472                /*
 473                 * A kprobe on an enabled ftrace call site actually single
 474                 * stepped an unconditional branch (ftrace nop equivalent).
 475                 * Now we need to fixup things and pretend that a brasl r0,...
 476                 * was executed instead.
 477                 */
 478                if (insn->disp == KPROBE_ON_FTRACE_CALL) {
 479                        ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
 480                        regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
 481                }
 482        }
 483
 484        if (fixup & FIXUP_PSW_NORMAL)
 485                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 486
 487        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
 488                int ilen = insn_length(p->ainsn.insn[0] >> 8);
 489                if (ip - (unsigned long) p->ainsn.insn == ilen)
 490                        ip = (unsigned long) p->addr + ilen;
 491        }
 492
 493        if (fixup & FIXUP_RETURN_REGISTER) {
 494                int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
 495                regs->gprs[reg] += (unsigned long) p->addr -
 496                                   (unsigned long) p->ainsn.insn;
 497        }
 498
 499        disable_singlestep(kcb, regs, ip);
 500}
 501NOKPROBE_SYMBOL(resume_execution);
 502
 503static int post_kprobe_handler(struct pt_regs *regs)
 504{
 505        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 506        struct kprobe *p = kprobe_running();
 507
 508        if (!p)
 509                return 0;
 510
 511        if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
 512                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 513                p->post_handler(p, regs, 0);
 514        }
 515
 516        resume_execution(p, regs);
 517        pop_kprobe(kcb);
 518        preempt_enable_no_resched();
 519
 520        /*
 521         * if somebody else is singlestepping across a probe point, psw mask
 522         * will have PER set, in which case, continue the remaining processing
 523         * of do_single_step, as if this is not a probe hit.
 524         */
 525        if (regs->psw.mask & PSW_MASK_PER)
 526                return 0;
 527
 528        return 1;
 529}
 530NOKPROBE_SYMBOL(post_kprobe_handler);
 531
 532static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
 533{
 534        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 535        struct kprobe *p = kprobe_running();
 536        const struct exception_table_entry *entry;
 537
 538        switch(kcb->kprobe_status) {
 539        case KPROBE_HIT_SS:
 540        case KPROBE_REENTER:
 541                /*
 542                 * We are here because the instruction being single
 543                 * stepped caused a page fault. We reset the current
 544                 * kprobe and the nip points back to the probe address
 545                 * and allow the page fault handler to continue as a
 546                 * normal page fault.
 547                 */
 548                disable_singlestep(kcb, regs, (unsigned long) p->addr);
 549                pop_kprobe(kcb);
 550                preempt_enable_no_resched();
 551                break;
 552        case KPROBE_HIT_ACTIVE:
 553        case KPROBE_HIT_SSDONE:
 554                /*
 555                 * We increment the nmissed count for accounting,
 556                 * we can also use npre/npostfault count for accounting
 557                 * these specific fault cases.
 558                 */
 559                kprobes_inc_nmissed_count(p);
 560
 561                /*
 562                 * We come here because instructions in the pre/post
 563                 * handler caused the page_fault, this could happen
 564                 * if handler tries to access user space by
 565                 * copy_from_user(), get_user() etc. Let the
 566                 * user-specified handler try to fix it first.
 567                 */
 568                if (p->fault_handler && p->fault_handler(p, regs, trapnr))
 569                        return 1;
 570
 571                /*
 572                 * In case the user-specified fault handler returned
 573                 * zero, try to fix up.
 574                 */
 575                entry = search_exception_tables(regs->psw.addr);
 576                if (entry) {
 577                        regs->psw.addr = extable_fixup(entry);
 578                        return 1;
 579                }
 580
 581                /*
 582                 * fixup_exception() could not handle it,
 583                 * Let do_page_fault() fix it.
 584                 */
 585                break;
 586        default:
 587                break;
 588        }
 589        return 0;
 590}
 591NOKPROBE_SYMBOL(kprobe_trap_handler);
 592
 593int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 594{
 595        int ret;
 596
 597        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 598                local_irq_disable();
 599        ret = kprobe_trap_handler(regs, trapnr);
 600        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 601                local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
 602        return ret;
 603}
 604NOKPROBE_SYMBOL(kprobe_fault_handler);
 605
 606/*
 607 * Wrapper routine to for handling exceptions.
 608 */
 609int kprobe_exceptions_notify(struct notifier_block *self,
 610                             unsigned long val, void *data)
 611{
 612        struct die_args *args = (struct die_args *) data;
 613        struct pt_regs *regs = args->regs;
 614        int ret = NOTIFY_DONE;
 615
 616        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 617                local_irq_disable();
 618
 619        switch (val) {
 620        case DIE_BPT:
 621                if (kprobe_handler(regs))
 622                        ret = NOTIFY_STOP;
 623                break;
 624        case DIE_SSTEP:
 625                if (post_kprobe_handler(regs))
 626                        ret = NOTIFY_STOP;
 627                break;
 628        case DIE_TRAP:
 629                if (!preemptible() && kprobe_running() &&
 630                    kprobe_trap_handler(regs, args->trapnr))
 631                        ret = NOTIFY_STOP;
 632                break;
 633        default:
 634                break;
 635        }
 636
 637        if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
 638                local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
 639
 640        return ret;
 641}
 642NOKPROBE_SYMBOL(kprobe_exceptions_notify);
 643
 644static struct kprobe trampoline = {
 645        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 646        .pre_handler = trampoline_probe_handler
 647};
 648
 649int __init arch_init_kprobes(void)
 650{
 651        return register_kprobe(&trampoline);
 652}
 653
 654int arch_trampoline_kprobe(struct kprobe *p)
 655{
 656        return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
 657}
 658NOKPROBE_SYMBOL(arch_trampoline_kprobe);
 659