linux/arch/mn10300/kernel/kprobes.c
<<
>>
Prefs
   1/* MN10300 Kernel probes implementation
   2 *
   3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
   4 * Written by Mark Salter (msalter@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public Licence as published by
   8 * the Free Software Foundation; either version 2 of the Licence, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public Licence for more details.
  15 *
  16 * You should have received a copy of the GNU General Public Licence
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19 */
  20#include <linux/kprobes.h>
  21#include <linux/ptrace.h>
  22#include <linux/spinlock.h>
  23#include <linux/preempt.h>
  24#include <linux/kdebug.h>
  25#include <asm/cacheflush.h>
  26
  27struct kretprobe_blackpoint kretprobe_blacklist[] = { { NULL, NULL } };
  28const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
  29
  30/* kprobe_status settings */
  31#define KPROBE_HIT_ACTIVE       0x00000001
  32#define KPROBE_HIT_SS           0x00000002
  33
  34static struct kprobe *cur_kprobe;
  35static unsigned long cur_kprobe_orig_pc;
  36static unsigned long cur_kprobe_next_pc;
  37static int cur_kprobe_ss_flags;
  38static unsigned long kprobe_status;
  39static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
  40static unsigned long cur_kprobe_bp_addr;
  41
  42DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  43
  44
  45/* singlestep flag bits */
  46#define SINGLESTEP_BRANCH 1
  47#define SINGLESTEP_PCREL  2
  48
  49#define READ_BYTE(p, valp) \
  50        do { *(u8 *)(valp) = *(u8 *)(p); } while (0)
  51
  52#define READ_WORD16(p, valp)                                    \
  53        do {                                                    \
  54                READ_BYTE((p), (valp));                         \
  55                READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1);     \
  56        } while (0)
  57
  58#define READ_WORD32(p, valp)                                    \
  59        do {                                                    \
  60                READ_BYTE((p), (valp));                         \
  61                READ_BYTE((u8 *)(p) + 1, (u8 *)(valp) + 1);     \
  62                READ_BYTE((u8 *)(p) + 2, (u8 *)(valp) + 2);     \
  63                READ_BYTE((u8 *)(p) + 3, (u8 *)(valp) + 3);     \
  64        } while (0)
  65
  66
  67static const u8 mn10300_insn_sizes[256] =
  68{
  69        /* 1  2  3  4  5  6  7  8  9  a  b  c  d  e  f */
  70        1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, 1, 3, 3, 3, /* 0 */
  71        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
  72        2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, /* 2 */
  73        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, /* 3 */
  74        1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, /* 4 */
  75        1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, /* 5 */
  76        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
  77        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 7 */
  78        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 8 */
  79        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* 9 */
  80        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* a */
  81        2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, /* b */
  82        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 2, /* c */
  83        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d */
  84        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* e */
  85        0, 2, 2, 2, 2, 2, 2, 4, 0, 3, 0, 4, 0, 6, 7, 1  /* f */
  86};
  87
  88#define LT (1 << 0)
  89#define GT (1 << 1)
  90#define GE (1 << 2)
  91#define LE (1 << 3)
  92#define CS (1 << 4)
  93#define HI (1 << 5)
  94#define CC (1 << 6)
  95#define LS (1 << 7)
  96#define EQ (1 << 8)
  97#define NE (1 << 9)
  98#define RA (1 << 10)
  99#define VC (1 << 11)
 100#define VS (1 << 12)
 101#define NC (1 << 13)
 102#define NS (1 << 14)
 103
 104static const u16 cond_table[] = {
 105        /*  V  C  N  Z  */
 106        /*  0  0  0  0  */ (NE | NC | CC | VC | GE | GT | HI),
 107        /*  0  0  0  1  */ (EQ | NC | CC | VC | GE | LE | LS),
 108        /*  0  0  1  0  */ (NE | NS | CC | VC | LT | LE | HI),
 109        /*  0  0  1  1  */ (EQ | NS | CC | VC | LT | LE | LS),
 110        /*  0  1  0  0  */ (NE | NC | CS | VC | GE | GT | LS),
 111        /*  0  1  0  1  */ (EQ | NC | CS | VC | GE | LE | LS),
 112        /*  0  1  1  0  */ (NE | NS | CS | VC | LT | LE | LS),
 113        /*  0  1  1  1  */ (EQ | NS | CS | VC | LT | LE | LS),
 114        /*  1  0  0  0  */ (NE | NC | CC | VS | LT | LE | HI),
 115        /*  1  0  0  1  */ (EQ | NC | CC | VS | LT | LE | LS),
 116        /*  1  0  1  0  */ (NE | NS | CC | VS | GE | GT | HI),
 117        /*  1  0  1  1  */ (EQ | NS | CC | VS | GE | LE | LS),
 118        /*  1  1  0  0  */ (NE | NC | CS | VS | LT | LE | LS),
 119        /*  1  1  0  1  */ (EQ | NC | CS | VS | LT | LE | LS),
 120        /*  1  1  1  0  */ (NE | NS | CS | VS | GE | GT | LS),
 121        /*  1  1  1  1  */ (EQ | NS | CS | VS | GE | LE | LS),
 122};
 123
 124/*
 125 * Calculate what the PC will be after executing next instruction
 126 */
 127static unsigned find_nextpc(struct pt_regs *regs, int *flags)
 128{
 129        unsigned size;
 130        s8  x8;
 131        s16 x16;
 132        s32 x32;
 133        u8 opc, *pc, *sp, *next;
 134
 135        next = 0;
 136        *flags = SINGLESTEP_PCREL;
 137
 138        pc = (u8 *) regs->pc;
 139        sp = (u8 *) (regs + 1);
 140        opc = *pc;
 141
 142        size = mn10300_insn_sizes[opc];
 143        if (size > 0) {
 144                next = pc + size;
 145        } else {
 146                switch (opc) {
 147                        /* Bxx (d8,PC) */
 148                case 0xc0 ... 0xca:
 149                        x8 = 2;
 150                        if (cond_table[regs->epsw & 0xf] & (1 << (opc & 0xf)))
 151                                x8 = (s8)pc[1];
 152                        next = pc + x8;
 153                        *flags |= SINGLESTEP_BRANCH;
 154                        break;
 155
 156                        /* JMP (d16,PC) or CALL (d16,PC) */
 157                case 0xcc:
 158                case 0xcd:
 159                        READ_WORD16(pc + 1, &x16);
 160                        next = pc + x16;
 161                        *flags |= SINGLESTEP_BRANCH;
 162                        break;
 163
 164                        /* JMP (d32,PC) or CALL (d32,PC) */
 165                case 0xdc:
 166                case 0xdd:
 167                        READ_WORD32(pc + 1, &x32);
 168                        next = pc + x32;
 169                        *flags |= SINGLESTEP_BRANCH;
 170                        break;
 171
 172                        /* RETF */
 173                case 0xde:
 174                        next = (u8 *)regs->mdr;
 175                        *flags &= ~SINGLESTEP_PCREL;
 176                        *flags |= SINGLESTEP_BRANCH;
 177                        break;
 178
 179                        /* RET */
 180                case 0xdf:
 181                        sp += pc[2];
 182                        READ_WORD32(sp, &x32);
 183                        next = (u8 *)x32;
 184                        *flags &= ~SINGLESTEP_PCREL;
 185                        *flags |= SINGLESTEP_BRANCH;
 186                        break;
 187
 188                case 0xf0:
 189                        next = pc + 2;
 190                        opc = pc[1];
 191                        if (opc >= 0xf0 && opc <= 0xf7) {
 192                                /* JMP (An) / CALLS (An) */
 193                                switch (opc & 3) {
 194                                case 0:
 195                                        next = (u8 *)regs->a0;
 196                                        break;
 197                                case 1:
 198                                        next = (u8 *)regs->a1;
 199                                        break;
 200                                case 2:
 201                                        next = (u8 *)regs->a2;
 202                                        break;
 203                                case 3:
 204                                        next = (u8 *)regs->a3;
 205                                        break;
 206                                }
 207                                *flags &= ~SINGLESTEP_PCREL;
 208                                *flags |= SINGLESTEP_BRANCH;
 209                        } else if (opc == 0xfc) {
 210                                /* RETS */
 211                                READ_WORD32(sp, &x32);
 212                                next = (u8 *)x32;
 213                                *flags &= ~SINGLESTEP_PCREL;
 214                                *flags |= SINGLESTEP_BRANCH;
 215                        } else if (opc == 0xfd) {
 216                                /* RTI */
 217                                READ_WORD32(sp + 4, &x32);
 218                                next = (u8 *)x32;
 219                                *flags &= ~SINGLESTEP_PCREL;
 220                                *flags |= SINGLESTEP_BRANCH;
 221                        }
 222                        break;
 223
 224                        /* potential 3-byte conditional branches */
 225                case 0xf8:
 226                        next = pc + 3;
 227                        opc = pc[1];
 228                        if (opc >= 0xe8 && opc <= 0xeb &&
 229                            (cond_table[regs->epsw & 0xf] &
 230                             (1 << ((opc & 0xf) + 3)))
 231                            ) {
 232                                READ_BYTE(pc+2, &x8);
 233                                next = pc + x8;
 234                                *flags |= SINGLESTEP_BRANCH;
 235                        }
 236                        break;
 237
 238                case 0xfa:
 239                        if (pc[1] == 0xff) {
 240                                /* CALLS (d16,PC) */
 241                                READ_WORD16(pc + 2, &x16);
 242                                next = pc + x16;
 243                        } else
 244                                next = pc + 4;
 245                        *flags |= SINGLESTEP_BRANCH;
 246                        break;
 247
 248                case 0xfc:
 249                        x32 = 6;
 250                        if (pc[1] == 0xff) {
 251                                /* CALLS (d32,PC) */
 252                                READ_WORD32(pc + 2, &x32);
 253                        }
 254                        next = pc + x32;
 255                        *flags |= SINGLESTEP_BRANCH;
 256                        break;
 257                        /* LXX (d8,PC) */
 258                        /* SETLB - loads the next four bytes into the LIR reg */
 259                case 0xd0 ... 0xda:
 260                case 0xdb:
 261                        panic("Can't singlestep Lxx/SETLB\n");
 262                        break;
 263                }
 264        }
 265        return (unsigned)next;
 266
 267}
 268
 269/*
 270 * set up out of place singlestep of some branching instructions
 271 */
 272static unsigned __kprobes singlestep_branch_setup(struct pt_regs *regs)
 273{
 274        u8 opc, *pc, *sp, *next;
 275
 276        next = NULL;
 277        pc = (u8 *) regs->pc;
 278        sp = (u8 *) (regs + 1);
 279
 280        switch (pc[0]) {
 281        case 0xc0 ... 0xca:     /* Bxx (d8,PC) */
 282        case 0xcc:              /* JMP (d16,PC) */
 283        case 0xdc:              /* JMP (d32,PC) */
 284        case 0xf8:              /* Bxx (d8,PC)  3-byte version */
 285                /* don't really need to do anything except cause trap  */
 286                next = pc;
 287                break;
 288
 289        case 0xcd:              /* CALL (d16,PC) */
 290                pc[1] = 5;
 291                pc[2] = 0;
 292                next = pc + 5;
 293                break;
 294
 295        case 0xdd:              /* CALL (d32,PC) */
 296                pc[1] = 7;
 297                pc[2] = 0;
 298                pc[3] = 0;
 299                pc[4] = 0;
 300                next = pc + 7;
 301                break;
 302
 303        case 0xde:              /* RETF */
 304                next = pc + 3;
 305                regs->mdr = (unsigned) next;
 306                break;
 307
 308        case 0xdf:              /* RET */
 309                sp += pc[2];
 310                next = pc + 3;
 311                *(unsigned *)sp = (unsigned) next;
 312                break;
 313
 314        case 0xf0:
 315                next = pc + 2;
 316                opc = pc[1];
 317                if (opc >= 0xf0 && opc <= 0xf3) {
 318                        /* CALLS (An) */
 319                        /* use CALLS (d16,PC) to avoid mucking with An */
 320                        pc[0] = 0xfa;
 321                        pc[1] = 0xff;
 322                        pc[2] = 4;
 323                        pc[3] = 0;
 324                        next = pc + 4;
 325                } else if (opc >= 0xf4 && opc <= 0xf7) {
 326                        /* JMP (An) */
 327                        next = pc;
 328                } else if (opc == 0xfc) {
 329                        /* RETS */
 330                        next = pc + 2;
 331                        *(unsigned *) sp = (unsigned) next;
 332                } else if (opc == 0xfd) {
 333                        /* RTI */
 334                        next = pc + 2;
 335                        *(unsigned *)(sp + 4) = (unsigned) next;
 336                }
 337                break;
 338
 339        case 0xfa:      /* CALLS (d16,PC) */
 340                pc[2] = 4;
 341                pc[3] = 0;
 342                next = pc + 4;
 343                break;
 344
 345        case 0xfc:      /* CALLS (d32,PC) */
 346                pc[2] = 6;
 347                pc[3] = 0;
 348                pc[4] = 0;
 349                pc[5] = 0;
 350                next = pc + 6;
 351                break;
 352
 353        case 0xd0 ... 0xda:     /* LXX (d8,PC) */
 354        case 0xdb:              /* SETLB */
 355                panic("Can't singlestep Lxx/SETLB\n");
 356        }
 357
 358        return (unsigned) next;
 359}
 360
 361int __kprobes arch_prepare_kprobe(struct kprobe *p)
 362{
 363        return 0;
 364}
 365
 366void __kprobes arch_copy_kprobe(struct kprobe *p)
 367{
 368        memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
 369}
 370
 371void __kprobes arch_arm_kprobe(struct kprobe *p)
 372{
 373        *p->addr = BREAKPOINT_INSTRUCTION;
 374        flush_icache_range((unsigned long) p->addr,
 375                           (unsigned long) p->addr + sizeof(kprobe_opcode_t));
 376}
 377
 378void __kprobes arch_disarm_kprobe(struct kprobe *p)
 379{
 380#ifndef CONFIG_MN10300_CACHE_SNOOP
 381        mn10300_dcache_flush();
 382        mn10300_icache_inv();
 383#endif
 384}
 385
 386void arch_remove_kprobe(struct kprobe *p)
 387{
 388}
 389
 390static inline
 391void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 392{
 393        *p->addr = p->opcode;
 394        regs->pc = (unsigned long) p->addr;
 395#ifndef CONFIG_MN10300_CACHE_SNOOP
 396        mn10300_dcache_flush();
 397        mn10300_icache_inv();
 398#endif
 399}
 400
 401static inline
 402void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 403{
 404        unsigned long nextpc;
 405
 406        cur_kprobe_orig_pc = regs->pc;
 407        memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
 408        regs->pc = (unsigned long) cur_kprobe_ss_buf;
 409
 410        nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
 411        if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
 412                cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
 413        else
 414                cur_kprobe_next_pc = nextpc;
 415
 416        /* branching instructions need special handling */
 417        if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
 418                nextpc = singlestep_branch_setup(regs);
 419
 420        cur_kprobe_bp_addr = nextpc;
 421
 422        *(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
 423        mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
 424                                    sizeof(cur_kprobe_ss_buf));
 425        mn10300_icache_inv();
 426}
 427
 428static inline int __kprobes kprobe_handler(struct pt_regs *regs)
 429{
 430        struct kprobe *p;
 431        int ret = 0;
 432        unsigned int *addr = (unsigned int *) regs->pc;
 433
 434        /* We're in an interrupt, but this is clear and BUG()-safe. */
 435        preempt_disable();
 436
 437        /* Check we're not actually recursing */
 438        if (kprobe_running()) {
 439                /* We *are* holding lock here, so this is safe.
 440                   Disarm the probe we just hit, and ignore it. */
 441                p = get_kprobe(addr);
 442                if (p) {
 443                        disarm_kprobe(p, regs);
 444                        ret = 1;
 445                } else {
 446                        p = cur_kprobe;
 447                        if (p->break_handler && p->break_handler(p, regs))
 448                                goto ss_probe;
 449                }
 450                /* If it's not ours, can't be delete race, (we hold lock). */
 451                goto no_kprobe;
 452        }
 453
 454        p = get_kprobe(addr);
 455        if (!p) {
 456                if (*addr != BREAKPOINT_INSTRUCTION) {
 457                        /* The breakpoint instruction was removed right after
 458                         * we hit it.  Another cpu has removed either a
 459                         * probepoint or a debugger breakpoint at this address.
 460                         * In either case, no further handling of this
 461                         * interrupt is appropriate.
 462                         */
 463                        ret = 1;
 464                }
 465                /* Not one of ours: let kernel handle it */
 466                goto no_kprobe;
 467        }
 468
 469        kprobe_status = KPROBE_HIT_ACTIVE;
 470        cur_kprobe = p;
 471        if (p->pre_handler(p, regs)) {
 472                /* handler has already set things up, so skip ss setup */
 473                return 1;
 474        }
 475
 476ss_probe:
 477        prepare_singlestep(p, regs);
 478        kprobe_status = KPROBE_HIT_SS;
 479        return 1;
 480
 481no_kprobe:
 482        preempt_enable_no_resched();
 483        return ret;
 484}
 485
 486/*
 487 * Called after single-stepping.  p->addr is the address of the
 488 * instruction whose first byte has been replaced by the "breakpoint"
 489 * instruction.  To avoid the SMP problems that can occur when we
 490 * temporarily put back the original opcode to single-step, we
 491 * single-stepped a copy of the instruction.  The address of this
 492 * copy is p->ainsn.insn.
 493 */
 494static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
 495{
 496        /* we may need to fixup regs/stack after singlestepping a call insn */
 497        if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
 498                regs->pc = cur_kprobe_orig_pc;
 499                switch (p->ainsn.insn[0]) {
 500                case 0xcd:      /* CALL (d16,PC) */
 501                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
 502                        break;
 503                case 0xdd:      /* CALL (d32,PC) */
 504                        /* fixup mdr and return address on stack */
 505                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 7;
 506                        break;
 507                case 0xf0:
 508                        if (p->ainsn.insn[1] >= 0xf0 &&
 509                            p->ainsn.insn[1] <= 0xf3) {
 510                                /* CALLS (An) */
 511                                /* fixup MDR and return address on stack */
 512                                regs->mdr = regs->pc + 2;
 513                                *(unsigned *) regs->sp = regs->mdr;
 514                        }
 515                        break;
 516
 517                case 0xfa:      /* CALLS (d16,PC) */
 518                        /* fixup MDR and return address on stack */
 519                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 4;
 520                        break;
 521
 522                case 0xfc:      /* CALLS (d32,PC) */
 523                        /* fixup MDR and return address on stack */
 524                        *(unsigned *) regs->sp = regs->mdr = regs->pc + 6;
 525                        break;
 526                }
 527        }
 528
 529        regs->pc = cur_kprobe_next_pc;
 530        cur_kprobe_bp_addr = 0;
 531}
 532
 533static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
 534{
 535        if (!kprobe_running())
 536                return 0;
 537
 538        if (cur_kprobe->post_handler)
 539                cur_kprobe->post_handler(cur_kprobe, regs, 0);
 540
 541        resume_execution(cur_kprobe, regs);
 542        reset_current_kprobe();
 543        preempt_enable_no_resched();
 544        return 1;
 545}
 546
 547/* Interrupts disabled, kprobe_lock held. */
 548static inline
 549int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 550{
 551        if (cur_kprobe->fault_handler &&
 552            cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
 553                return 1;
 554
 555        if (kprobe_status & KPROBE_HIT_SS) {
 556                resume_execution(cur_kprobe, regs);
 557                reset_current_kprobe();
 558                preempt_enable_no_resched();
 559        }
 560        return 0;
 561}
 562
 563/*
 564 * Wrapper routine to for handling exceptions.
 565 */
 566int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 567                                       unsigned long val, void *data)
 568{
 569        struct die_args *args = data;
 570
 571        switch (val) {
 572        case DIE_BREAKPOINT:
 573                if (cur_kprobe_bp_addr != args->regs->pc) {
 574                        if (kprobe_handler(args->regs))
 575                                return NOTIFY_STOP;
 576                } else {
 577                        if (post_kprobe_handler(args->regs))
 578                                return NOTIFY_STOP;
 579                }
 580                break;
 581        case DIE_GPF:
 582                if (kprobe_running() &&
 583                    kprobe_fault_handler(args->regs, args->trapnr))
 584                        return NOTIFY_STOP;
 585                break;
 586        default:
 587                break;
 588        }
 589        return NOTIFY_DONE;
 590}
 591
 592/* Jprobes support.  */
 593static struct pt_regs jprobe_saved_regs;
 594static struct pt_regs *jprobe_saved_regs_location;
 595static kprobe_opcode_t jprobe_saved_stack[MAX_STACK_SIZE];
 596
 597int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 598{
 599        struct jprobe *jp = container_of(p, struct jprobe, kp);
 600
 601        jprobe_saved_regs_location = regs;
 602        memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
 603
 604        /* Save a whole stack frame, this gets arguments
 605         * pushed onto the stack after using up all the
 606         * arg registers.
 607         */
 608        memcpy(&jprobe_saved_stack, regs + 1, sizeof(jprobe_saved_stack));
 609
 610        /* setup return addr to the jprobe handler routine */
 611        regs->pc = (unsigned long) jp->entry;
 612        return 1;
 613}
 614
 615void __kprobes jprobe_return(void)
 616{
 617        void *orig_sp = jprobe_saved_regs_location + 1;
 618
 619        preempt_enable_no_resched();
 620        asm volatile("          mov     %0,sp\n"
 621                     ".globl    jprobe_return_bp_addr\n"
 622                     "jprobe_return_bp_addr:\n\t"
 623                     "          .byte   0xff\n"
 624                     : : "d" (orig_sp));
 625}
 626
 627extern void jprobe_return_bp_addr(void);
 628
 629int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 630{
 631        u8 *addr = (u8 *) regs->pc;
 632
 633        if (addr == (u8 *) jprobe_return_bp_addr) {
 634                if (jprobe_saved_regs_location != regs) {
 635                        printk(KERN_ERR"JPROBE:"
 636                               " Current regs (%p) does not match saved regs"
 637                               " (%p).\n",
 638                               regs, jprobe_saved_regs_location);
 639                        BUG();
 640                }
 641
 642                /* Restore old register state.
 643                 */
 644                memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
 645
 646                memcpy(regs + 1, &jprobe_saved_stack,
 647                       sizeof(jprobe_saved_stack));
 648                return 1;
 649        }
 650        return 0;
 651}
 652
 653int __init arch_init_kprobes(void)
 654{
 655        return 0;
 656}
 657