linux/arch/mn10300/kernel/kprobes.c
<<
>>
Prefs
   1/* MN10300 Kernel probes implementation
   2 *
   3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
   4 * Written by Mark Salter (msalter@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public Licence as published by
   8 * the Free Software Foundation; either version 2 of the Licence, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public Licence for more details.
  15 *
  16 * You should have received a copy of the GNU General Public Licence
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20#include <linux/kprobes.h>
  21#include <linux/ptrace.h>
  22#include <linux/spinlock.h>
  23#include <linux/preempt.h>
  24#include <linux/kdebug.h>
  25#include <asm/cacheflush.h>
  26
  27struct kretprobe_blackpoint kretprobe_blacklist[] = { { NULL, NULL } };
  28const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
  29
  30/* kprobe_status settings */
  31#define KPROBE_HIT_ACTIVE       0x00000001
  32#define KPROBE_HIT_SS           0x00000002
  33
  34static struct kprobe *current_kprobe;
  35static unsigned long current_kprobe_orig_pc;
  36static unsigned long current_kprobe_next_pc;
  37static int current_kprobe_ss_flags;
  38static unsigned long kprobe_status;
  39static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
  40static unsigned long current_kprobe_bp_addr;
  41
  42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  43
  44
  45/* singlestep flag bits */
  46#define SINGLESTEP_BRANCH 1
  47#define SINGLESTEP_PCREL  2
  48
  49#define READ_BYTE(p, valp) \
  50        do { *(u8 *)(valp) = *(u8 *)(p); } while (0)
  51
  52#define READ_WORD16(p, valp)                                    \
  53        do {                                                    \
  54                READ_BYTE((p), (valp));                         \
  55                READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1);     \
  56        } while (0)
  57
  58#define READ_WORD32(p, valp)                                    \
  59        do {                                                    \
  60                READ_BYTE((p), (valp));                         \
  61                READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1);     \
  62                READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2);     \
  63                READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3);     \
  64        } while (0)
  65
  66
  67static const u8 mn10300_insn_sizes[256] =
  68{
  69        /* 1  2  3  4  5  6  7  8  9  a  b  c  d  e  f */
  70        1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
  71        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
  72        2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
  73        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
  74        1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
  75        1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
  76        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
  77        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
  78        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
  79        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
  80        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
  81        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
  82        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
  83        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
  84        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
  85        0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1  /* f */
  86};
  87
  88#define LT (1 << 0)
  89#define GT (1 << 1)
  90#define GE (1 << 2)
  91#define LE (1 << 3)
  92#define CS (1 << 4)
  93#define HI (1 << 5)
  94#define CC (1 << 6)
  95#define LS (1 << 7)
  96#define EQ (1 << 8)
  97#define NE (1 << 9)
  98#define RA (1 << 10)
  99#define VC (1 << 11)
 100#define VS (1 << 12)
 101#define NC (1 << 13)
 102#define NS (1 << 14)
 103
 104static const u16 cond_table[] = {
 105        /*  V  C  N  Z  */
 106        /*  0  0  0  0  */ (NE | NC | CC | VC | GE | GT | HI),
 107        /*  0  0  0  1  */ (EQ | NC | CC | VC | GE | LE | LS),
 108        /*  0  0  1  0  */ (NE | NS | CC | VC | LT | LE | HI),
 109        /*  0  0  1  1  */ (EQ | NS | CC | VC | LT | LE | LS),
 110        /*  0  1  0  0  */ (NE | NC | CS | VC | GE | GT | LS),
 111        /*  0  1  0  1  */ (EQ | NC | CS | VC | GE | LE | LS),
 112        /*  0  1  1  0  */ (NE | NS | CS | VC | LT | LE | LS),
 113        /*  0  1  1  1  */ (EQ | NS | CS | VC | LT | LE | LS),
 114        /*  1  0  0  0  */ (NE | NC | CC | VS | LT | LE | HI),
 115        /*  1  0  0  1  */ (EQ | NC | CC | VS | LT | LE | LS),
 116        /*  1  0  1  0  */ (NE | NS | CC | VS | GE | GT | HI),
 117        /*  1  0  1  1  */ (EQ | NS | CC | VS | GE | LE | LS),
 118        /*  1  1  0  0  */ (NE | NC | CS | VS | LT | LE | LS),
 119        /*  1  1  0  1  */ (EQ | NC | CS | VS | LT | LE | LS),
 120        /*  1  1  1  0  */ (NE | NS | CS | VS | GE | GT | LS),
 121        /*  1  1  1  1  */ (EQ | NS | CS | VS | GE | LE | LS),
 122};
 123
 124/*
 125 * Calculate what the PC will be after executing next instruction
 126 */
 127static unsigned find_nextpc(struct pt_regs *regs, int *flags)
 128{
 129        unsigned size;
 130        s8  x8;
 131        s16 x16;
 132        s32 x32;
 133        u8 opc, *pc, *sp, *next;
 134
 135        next = 0;
 136        *flags = SINGLESTEP_PCREL;
 137
 138        pc = (u8 *) regs->pc;
 139        sp = (u8 *) (regs + 1);
 140        opc = *pc;
 141
 142        size = mn10300_insn_sizes[opc];
 143        if (size > 0) {
 144                next = pc + size;
 145        } else {
 146                switch (opc) {
 147                        /* Bxx (d8,PC) */
 148                case 0xc0 ... 0xca:
 149                        x8 = 2;
 150                        if (cond_table[regs->epsw & 0xf] & (1 << (opc & 0xf)))
 151                                x8 = (s8)pc[1];
 152                        next = pc + x8;
 153                        *flags |= SINGLESTEP_BRANCH;
 154                        break;
 155
 156                        /* JMP (d16,PC) or CALL (d16,PC) */
 157                case 0xcc:
 158                case 0xcd:
 159                        READ_WORD16(pc + 1, &x16);
 160                        next = pc + x16;
 161                        *flags |= SINGLESTEP_BRANCH;
 162                        break;
 163
 164                        /* JMP (d32,PC) or CALL (d32,PC) */
 165                case 0xdc:
 166                case 0xdd:
 167                        READ_WORD32(pc + 1, &x32);
 168                        next = pc + x32;
 169                        *flags |= SINGLESTEP_BRANCH;
 170                        break;
 171
 172                        /* RETF */
 173                case 0xde:
 174                        next = (u8 *)regs->mdr;
 175                        *flags &= ~SINGLESTEP_PCREL;
 176                        *flags |= SINGLESTEP_BRANCH;
 177                        break;
 178
 179                        /* RET */
 180                case 0xdf:
 181                        sp += pc[2];
 182                        READ_WORD32(sp, &x32);
 183                        next = (u8 *)x32;
 184                        *flags &= ~SINGLESTEP_PCREL;
 185                        *flags |= SINGLESTEP_BRANCH;
 186                        break;
 187
 188                case 0xf0:
 189                        next = pc + 2;
 190                        opc = pc[1];
 191                        if (opc >= 0xf0 && opc <= 0xf7) {
 192                                /* JMP (An) / CALLS (An) */
 193                                switch (opc & 3) {
 194                                case 0:
 195                                        next = (u8 *)regs->a0;
 196                                        break;
 197                                case 1:
 198                                        next = (u8 *)regs->a1;
 199                                        break;
 200                                case 2:
 201                                        next = (u8 *)regs->a2;
 202                                        break;
 203                                case 3:
 204                                        next = (u8 *)regs->a3;
 205                                        break;
 206                                }
 207                                *flags &= ~SINGLESTEP_PCREL;
 208                                *flags |= SINGLESTEP_BRANCH;
 209                        } else if (opc == 0xfc) {
 210                                /* RETS */
 211                                READ_WORD32(sp, &x32);
 212                                next = (u8 *)x32;
 213                                *flags &= ~SINGLESTEP_PCREL;
 214                                *flags |= SINGLESTEP_BRANCH;
 215                        } else if (opc == 0xfd) {
 216                                /* RTI */
 217                                READ_WORD32(sp + 4, &x32);
 218                                next = (u8 *)x32;
 219                                *flags &= ~SINGLESTEP_PCREL;
 220                                *flags |= SINGLESTEP_BRANCH;
 221                        }
 222                        break;
 223
 224                        /* potential 3-byte conditional branches */
 225                case 0xf8:
 226                        next = pc + 3;
 227                        opc = pc[1];
 228                        if (opc >= 0xe8 && opc <= 0xeb &&
 229                            (cond_table[regs->epsw & 0xf] &
 230                             (1 << ((opc & 0xf) + 3)))
 231                            ) {
 232                                READ_BYTE(pc+2, &x8);
 233                                next = pc + x8;
 234                                *flags |= SINGLESTEP_BRANCH;
 235                        }
 236                        break;
 237
 238                case 0xfa:
 239                        if (pc[1] == 0xff) {
 240                                /* CALLS (d16,PC) */
 241                                READ_WORD16(pc + 2, &x16);
 242                                next = pc + x16;
 243                        } else
 244                                next = pc + 4;
 245                        *flags |= SINGLESTEP_BRANCH;
 246                        break;
 247
 248                case 0xfc:
 249                        x32 = 6;
 250                        if (pc[1] == 0xff) {
 251                                /* CALLS (d32,PC) */
 252                                READ_WORD32(pc + 2, &x32);
 253                        }
 254                        next = pc + x32;
 255                        *flags |= SINGLESTEP_BRANCH;
 256                        break;
 257                        /* LXX (d8,PC) */
 258                        /* SETLB - loads the next four bytes into the LIR reg */
 259                case 0xd0 ... 0xda:
 260                case 0xdb:
 261                        panic("Can't singlestep Lxx/SETLB\n");
 262                        break;
 263                }
 264        }
 265        return (unsigned)next;
 266
 267}
 268
 269/*
 270 * set up out of place singlestep of some branching instructions
 271 */
 272static unsigned __kprobes singlestep_branch_setup(struct pt_regs *regs)
 273{
 274        u8 opc, *pc, *sp, *next;
 275
 276        next = NULL;
 277        pc = (u8 *) regs->pc;
 278        sp = (u8 *) (regs + 1);
 279
 280        switch (pc[0]) {
 281        case 0xc0 ... 0xca:     /* Bxx (d8,PC) */
 282        case 0xcc:              /* JMP (d16,PC) */
 283        case 0xdc:              /* JMP (d32,PC) */
 284        case 0xf8:              /* Bxx (d8,PC)  3-byte version */
 285                /* don't really need to do anything except cause trap  */
 286                next = pc;
 287                break;
 288
 289        case 0xcd:              /* CALL (d16,PC) */
 290                pc[1] = 5;
 291                pc[2] = 0;
 292                next = pc + 5;
 293                break;
 294
 295        case 0xdd:              /* CALL (d32,PC) */
 296                pc[1] = 7;
 297                pc[2] = 0;
 298                pc[3] = 0;
 299                pc[4] = 0;
 300                next = pc + 7;
 301                break;
 302
 303        case 0xde:              /* RETF */
 304                next = pc + 3;
 305                regs->mdr = (unsigned) next;
 306                break;
 307
 308        case 0xdf:              /* RET */
 309                sp += pc[2];
 310                next = pc + 3;
 311                *(unsigned *)sp = (unsigned) next;
 312                break;
 313
 314        case 0xf0:
 315                next = pc + 2;
 316                opc = pc[1];
 317                if (opc >= 0xf0 && opc <= 0xf3) {
 318                        /* CALLS (An) */
 319                        /* use CALLS (d16,PC) to avoid mucking with An */
 320                        pc[0] = 0xfa;
 321                        pc[1] = 0xff;
 322                        pc[2] = 4;
 323                        pc[3] = 0;
 324                        next = pc + 4;
 325                } else if (opc >= 0xf4 && opc <= 0xf7) {
 326                        /* JMP (An) */
 327                        next = pc;
 328                } else if (opc == 0xfc) {
 329                        /* RETS */
 330                        next = pc + 2;
 331                        *(unsigned *) sp = (unsigned) next;
 332                } else if (opc == 0xfd) {
 333                        /* RTI */
 334                        next = pc + 2;
 335                        *(unsigned *)(sp + 4) = (unsigned) next;
 336                }
 337                break;
 338
 339        case 0xfa:      /* CALLS (d16,PC) */
 340                pc[2] = 4;
 341                pc[3] = 0;
 342                next = pc + 4;
 343                break;
 344
 345        case 0xfc:      /* CALLS (d32,PC) */
 346                pc[2] = 6;
 347                pc[3] = 0;
 348                pc[4] = 0;
 349                pc[5] = 0;
 350                next = pc + 6;
 351                break;
 352
 353        case 0xd0 ... 0xda:     /* LXX (d8,PC) */
 354        case 0xdb:              /* SETLB */
 355                panic("Can't singlestep Lxx/SETLB\n");
 356        }
 357
 358        return (unsigned) next;
 359}
 360
 361int __kprobes arch_prepare_kprobe(struct kprobe *p)
 362{
 363        return 0;
 364}
 365
 366void __kprobes arch_copy_kprobe(struct kprobe *p)
 367{
 368        memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
 369}
 370
 371void __kprobes arch_arm_kprobe(struct kprobe *p)
 372{
 373        *p->addr = BREAKPOINT_INSTRUCTION;
 374        flush_icache_range((unsigned long) p->addr,
 375                           (unsigned long) p->addr + sizeof(kprobe_opcode_t));
 376}
 377
 378void __kprobes arch_disarm_kprobe(struct kprobe *p)
 379{
 380        mn10300_dcache_flush();
 381        mn10300_icache_inv();
 382}
 383
 384void arch_remove_kprobe(struct kprobe *p)
 385{
 386}
 387
 388static inline
 389void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 390{
 391        *p->addr = p->opcode;
 392        regs->pc = (unsigned long) p->addr;
 393        mn10300_dcache_flush();
 394        mn10300_icache_inv();
 395}
 396
 397static inline
 398void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 399{
 400        unsigned long nextpc;
 401
 402        current_kprobe_orig_pc = regs->pc;
 403        memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
 404        regs->pc = (unsigned long) current_kprobe_ss_buf;
 405
 406        nextpc = find_nextpc(regs, &current_kprobe_ss_flags);
 407        if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
 408                current_kprobe_next_pc =
 409                        current_kprobe_orig_pc + (nextpc - regs->pc);
 410        else
 411                current_kprobe_next_pc = nextpc;
 412
 413        /* branching instructions need special handling */
 414        if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
 415                nextpc = singlestep_branch_setup(regs);
 416
 417        current_kprobe_bp_addr = nextpc;
 418
 419        *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
 420        mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
 421                                    sizeof(current_kprobe_ss_buf));
 422        mn10300_icache_inv();
 423}
 424
 425static inline int __kprobes kprobe_handler(struct pt_regs *regs)
 426{
 427        struct kprobe *p;
 428        int ret = 0;
 429        unsigned int *addr = (unsigned int *) regs->pc;
 430
 431        /* We're in an interrupt, but this is clear and BUG()-safe. */
 432        preempt_disable();
 433
 434        /* Check we're not actually recursing */
 435        if (kprobe_running()) {
 436                /* We *are* holding lock here, so this is safe.
 437                   Disarm the probe we just hit, and ignore it. */
 438                p = get_kprobe(addr);
 439                if (p) {
 440                        disarm_kprobe(p, regs);
 441                        ret = 1;
 442                } else {
 443                        p = current_kprobe;
 444                        if (p->break_handler && p->break_handler(p, regs))
 445                                goto ss_probe;
 446                }
 447                /* If it's not ours, can't be delete race, (we hold lock). */
 448                goto no_kprobe;
 449        }
 450
 451        p = get_kprobe(addr);
 452        if (!p) {
 453                if (*addr != BREAKPOINT_INSTRUCTION) {
 454                        /* The breakpoint instruction was removed right after
 455                         * we hit it.  Another cpu has removed either a
 456                         * probepoint or a debugger breakpoint at this address.
 457                         * In either case, no further handling of this
 458                         * interrupt is appropriate.
 459                         */
 460                        ret = 1;
 461                }
 462                /* Not one of ours: let kernel handle it */
 463                goto no_kprobe;
 464        }
 465
 466        kprobe_status = KPROBE_HIT_ACTIVE;
 467        current_kprobe = p;
 468        if (p->pre_handler(p, regs)) {
 469                /* handler has already set things up, so skip ss setup */
 470                return 1;
 471        }
 472
 473ss_probe:
 474        prepare_singlestep(p, regs);
 475        kprobe_status = KPROBE_HIT_SS;
 476        return 1;
 477
 478no_kprobe:
 479        preempt_enable_no_resched();
 480        return ret;
 481}
 482
 483/*
 484 * Called after single-stepping.  p->addr is the address of the
 485 * instruction whose first byte has been replaced by the "breakpoint"
 486 * instruction.  To avoid the SMP problems that can occur when we
 487 * temporarily put back the original opcode to single-step, we
 488 * single-stepped a copy of the instruction.  The address of this
 489 * copy is p->ainsn.insn.
 490 */
 491static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
 492{
 493        /* we may need to fixup regs/stack after singlestepping a call insn */
 494        if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
 495                regs->pc = current_kprobe_orig_pc;
 496                switch (p->ainsn.insn[0]) {
 497                case 0xcd:      /* CALL (d16,PC) */
 498                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
 499                        break;
 500                case 0xdd:      /* CALL (d32,PC) */
 501                        /* fixup mdr and return address on stack */
 502                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 7;
 503                        break;
 504                case 0xf0:
 505                        if (p->ainsn.insn[1] >= 0xf0 &&
 506                            p->ainsn.insn[1] <= 0xf3) {
 507                                /* CALLS (An) */
 508                                /* fixup MDR and return address on stack */
 509                                regs->mdr = regs->pc + 2;
 510                                *(unsigned *) regs->sp = regs->mdr;
 511                        }
 512                        break;
 513
 514                case 0xfa:      /* CALLS (d16,PC) */
 515                        /* fixup MDR and return address on stack */
 516                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 4;
 517                        break;
 518
 519                case 0xfc:      /* CALLS (d32,PC) */
 520                        /* fixup MDR and return address on stack */
 521                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 6;
 522                        break;
 523                }
 524        }
 525
 526        regs->pc = current_kprobe_next_pc;
 527        current_kprobe_bp_addr = 0;
 528}
 529
 530static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
 531{
 532        if (!kprobe_running())
 533                return 0;
 534
 535        if (current_kprobe->post_handler)
 536                current_kprobe->post_handler(current_kprobe, regs, 0);
 537
 538        resume_execution(current_kprobe, regs);
 539        reset_current_kprobe();
 540        preempt_enable_no_resched();
 541        return 1;
 542}
 543
 544/* Interrupts disabled, kprobe_lock held. */
 545static inline
 546int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 547{
 548        if (current_kprobe->fault_handler &&
 549            current_kprobe->fault_handler(current_kprobe, regs, trapnr))
 550                return 1;
 551
 552        if (kprobe_status & KPROBE_HIT_SS) {
 553                resume_execution(current_kprobe, regs);
 554                reset_current_kprobe();
 555                preempt_enable_no_resched();
 556        }
 557        return 0;
 558}
 559
 560/*
 561 * Wrapper routine to for handling exceptions.
 562 */
 563int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 564                                       unsigned long val, void *data)
 565{
 566        struct die_args *args = data;
 567
 568        switch (val) {
 569        case DIE_BREAKPOINT:
 570                if (current_kprobe_bp_addr != args->regs->pc) {
 571                        if (kprobe_handler(args->regs))
 572                                return NOTIFY_STOP;
 573                } else {
 574                        if (post_kprobe_handler(args->regs))
 575                                return NOTIFY_STOP;
 576                }
 577                break;
 578        case DIE_GPF:
 579                if (kprobe_running() &&
 580                    kprobe_fault_handler(args->regs, args->trapnr))
 581                        return NOTIFY_STOP;
 582                break;
 583        default:
 584                break;
 585        }
 586        return NOTIFY_DONE;
 587}
 588
 589/* Jprobes support.  */
 590static struct pt_regs jprobe_saved_regs;
 591static struct pt_regs *jprobe_saved_regs_location;
 592static kprobe_opcode_t jprobe_saved_stack[MAX_STACK_SIZE];
 593
 594int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 595{
 596        struct jprobe *jp = container_of(p, struct jprobe, kp);
 597
 598        jprobe_saved_regs_location = regs;
 599        memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
 600
 601        /* Save a whole stack frame, this gets arguments
 602         * pushed onto the stack after using up all the
 603         * arg registers.
 604         */
 605        memcpy(&jprobe_saved_stack, regs + 1, sizeof(jprobe_saved_stack));
 606
 607        /* setup return addr to the jprobe handler routine */
 608        regs->pc = (unsigned long) jp->entry;
 609        return 1;
 610}
 611
 612void __kprobes jprobe_return(void)
 613{
 614        void *orig_sp = jprobe_saved_regs_location + 1;
 615
 616        preempt_enable_no_resched();
 617        asm volatile("          mov     %0,sp\n"
 618                     ".globl    jprobe_return_bp_addr\n"
 619                     "jprobe_return_bp_addr:\n\t"
 620                     "          .byte   0xff\n"
 621                     : : "d" (orig_sp));
 622}
 623
 624extern void jprobe_return_bp_addr(void);
 625
 626int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 627{
 628        u8 *addr = (u8 *) regs->pc;
 629
 630        if (addr == (u8 *) jprobe_return_bp_addr) {
 631                if (jprobe_saved_regs_location != regs) {
 632                        printk(KERN_ERR"JPROBE:"
 633                               " Current regs (%p) does not match saved regs"
 634                               " (%p).\n",
 635                               regs, jprobe_saved_regs_location);
 636                        BUG();
 637                }
 638
 639                /* Restore old register state.
 640                 */
 641                memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
 642
 643                memcpy(regs + 1, &jprobe_saved_stack,
 644                       sizeof(jprobe_saved_stack));
 645                return 1;
 646        }
 647        return 0;
 648}
 649
 650int __init arch_init_kprobes(void)
 651{
 652        return 0;
 653}
 654