linux/arch/powerpc/kernel/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Kernel Probes (KProbes)
   4 *
   5 * Copyright (C) IBM Corporation, 2002, 2004
   6 *
   7 * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
   8 *              Probes initial implementation ( includes contributions from
   9 *              Rusty Russell).
  10 * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  11 *              interface to access function arguments.
  12 * 2004-Nov     Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  13 *              for PPC64
  14 */
  15
  16#include <linux/kprobes.h>
  17#include <linux/ptrace.h>
  18#include <linux/preempt.h>
  19#include <linux/extable.h>
  20#include <linux/kdebug.h>
  21#include <linux/slab.h>
  22#include <asm/code-patching.h>
  23#include <asm/cacheflush.h>
  24#include <asm/sstep.h>
  25#include <asm/sections.h>
  26#include <asm/inst.h>
  27#include <linux/uaccess.h>
  28
  29DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  30DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  31
  32struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  33
  34bool arch_within_kprobe_blacklist(unsigned long addr)
  35{
  36        return  (addr >= (unsigned long)__kprobes_text_start &&
  37                 addr < (unsigned long)__kprobes_text_end) ||
  38                (addr >= (unsigned long)_stext &&
  39                 addr < (unsigned long)__head_end);
  40}
  41
  42kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
  43{
  44        kprobe_opcode_t *addr = NULL;
  45
  46#ifdef PPC64_ELF_ABI_v2
  47        /* PPC64 ABIv2 needs local entry point */
  48        addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  49        if (addr && !offset) {
  50#ifdef CONFIG_KPROBES_ON_FTRACE
  51                unsigned long faddr;
  52                /*
  53                 * Per livepatch.h, ftrace location is always within the first
  54                 * 16 bytes of a function on powerpc with -mprofile-kernel.
  55                 */
  56                faddr = ftrace_location_range((unsigned long)addr,
  57                                              (unsigned long)addr + 16);
  58                if (faddr)
  59                        addr = (kprobe_opcode_t *)faddr;
  60                else
  61#endif
  62                        addr = (kprobe_opcode_t *)ppc_function_entry(addr);
  63        }
  64#elif defined(PPC64_ELF_ABI_v1)
  65        /*
  66         * 64bit powerpc ABIv1 uses function descriptors:
  67         * - Check for the dot variant of the symbol first.
  68         * - If that fails, try looking up the symbol provided.
  69         *
  70         * This ensures we always get to the actual symbol and not
  71         * the descriptor.
  72         *
  73         * Also handle <module:symbol> format.
  74         */
  75        char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
  76        bool dot_appended = false;
  77        const char *c;
  78        ssize_t ret = 0;
  79        int len = 0;
  80
  81        if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
  82                c++;
  83                len = c - name;
  84                memcpy(dot_name, name, len);
  85        } else
  86                c = name;
  87
  88        if (*c != '\0' && *c != '.') {
  89                dot_name[len++] = '.';
  90                dot_appended = true;
  91        }
  92        ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
  93        if (ret > 0)
  94                addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
  95
  96        /* Fallback to the original non-dot symbol lookup */
  97        if (!addr && dot_appended)
  98                addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
  99#else
 100        addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
 101#endif
 102
 103        return addr;
 104}
 105
 106int arch_prepare_kprobe(struct kprobe *p)
 107{
 108        int ret = 0;
 109        struct kprobe *prev;
 110        struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
 111        struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
 112
 113        if ((unsigned long)p->addr & 0x03) {
 114                printk("Attempt to register kprobe at an unaligned address\n");
 115                ret = -EINVAL;
 116        } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
 117                printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
 118                ret = -EINVAL;
 119        } else if (ppc_inst_prefixed(prefix)) {
 120                printk("Cannot register a kprobe on the second word of prefixed instruction\n");
 121                ret = -EINVAL;
 122        }
 123        preempt_disable();
 124        prev = get_kprobe(p->addr - 1);
 125        preempt_enable_no_resched();
 126        if (prev &&
 127            ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
 128                printk("Cannot register a kprobe on the second word of prefixed instruction\n");
 129                ret = -EINVAL;
 130        }
 131
 132        /* insn must be on a special executable page on ppc64.  This is
 133         * not explicitly required on ppc32 (right now), but it doesn't hurt */
 134        if (!ret) {
 135                p->ainsn.insn = get_insn_slot();
 136                if (!p->ainsn.insn)
 137                        ret = -ENOMEM;
 138        }
 139
 140        if (!ret) {
 141                patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
 142                p->opcode = ppc_inst_val(insn);
 143        }
 144
 145        p->ainsn.boostable = 0;
 146        return ret;
 147}
 148NOKPROBE_SYMBOL(arch_prepare_kprobe);
 149
 150void arch_arm_kprobe(struct kprobe *p)
 151{
 152        patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
 153}
 154NOKPROBE_SYMBOL(arch_arm_kprobe);
 155
 156void arch_disarm_kprobe(struct kprobe *p)
 157{
 158        patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
 159}
 160NOKPROBE_SYMBOL(arch_disarm_kprobe);
 161
 162void arch_remove_kprobe(struct kprobe *p)
 163{
 164        if (p->ainsn.insn) {
 165                free_insn_slot(p->ainsn.insn, 0);
 166                p->ainsn.insn = NULL;
 167        }
 168}
 169NOKPROBE_SYMBOL(arch_remove_kprobe);
 170
 171static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 172{
 173        enable_single_step(regs);
 174
 175        /*
 176         * On powerpc we should single step on the original
 177         * instruction even if the probed insn is a trap
 178         * variant as values in regs could play a part in
 179         * if the trap is taken or not
 180         */
 181        regs->nip = (unsigned long)p->ainsn.insn;
 182}
 183
 184static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
 185{
 186        kcb->prev_kprobe.kp = kprobe_running();
 187        kcb->prev_kprobe.status = kcb->kprobe_status;
 188        kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
 189}
 190
 191static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 192{
 193        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 194        kcb->kprobe_status = kcb->prev_kprobe.status;
 195        kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
 196}
 197
 198static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 199                                struct kprobe_ctlblk *kcb)
 200{
 201        __this_cpu_write(current_kprobe, p);
 202        kcb->kprobe_saved_msr = regs->msr;
 203}
 204
 205bool arch_kprobe_on_func_entry(unsigned long offset)
 206{
 207#ifdef PPC64_ELF_ABI_v2
 208#ifdef CONFIG_KPROBES_ON_FTRACE
 209        return offset <= 16;
 210#else
 211        return offset <= 8;
 212#endif
 213#else
 214        return !offset;
 215#endif
 216}
 217
 218void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 219{
 220        ri->ret_addr = (kprobe_opcode_t *)regs->link;
 221        ri->fp = NULL;
 222
 223        /* Replace the return addr with trampoline addr */
 224        regs->link = (unsigned long)kretprobe_trampoline;
 225}
 226NOKPROBE_SYMBOL(arch_prepare_kretprobe);
 227
 228static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
 229{
 230        int ret;
 231        struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
 232
 233        /* regs->nip is also adjusted if emulate_step returns 1 */
 234        ret = emulate_step(regs, insn);
 235        if (ret > 0) {
 236                /*
 237                 * Once this instruction has been boosted
 238                 * successfully, set the boostable flag
 239                 */
 240                if (unlikely(p->ainsn.boostable == 0))
 241                        p->ainsn.boostable = 1;
 242        } else if (ret < 0) {
 243                /*
 244                 * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
 245                 * So, we should never get here... but, its still
 246                 * good to catch them, just in case...
 247                 */
 248                printk("Can't step on instruction %s\n", ppc_inst_as_str(insn));
 249                BUG();
 250        } else {
 251                /*
 252                 * If we haven't previously emulated this instruction, then it
 253                 * can't be boosted. Note it down so we don't try to do so again.
 254                 *
 255                 * If, however, we had emulated this instruction in the past,
 256                 * then this is just an error with the current run (for
 257                 * instance, exceptions due to a load/store). We return 0 so
 258                 * that this is now single-stepped, but continue to try
 259                 * emulating it in subsequent probe hits.
 260                 */
 261                if (unlikely(p->ainsn.boostable != 1))
 262                        p->ainsn.boostable = -1;
 263        }
 264
 265        return ret;
 266}
 267NOKPROBE_SYMBOL(try_to_emulate);
 268
 269int kprobe_handler(struct pt_regs *regs)
 270{
 271        struct kprobe *p;
 272        int ret = 0;
 273        unsigned int *addr = (unsigned int *)regs->nip;
 274        struct kprobe_ctlblk *kcb;
 275
 276        if (user_mode(regs))
 277                return 0;
 278
 279        if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
 280                return 0;
 281
 282        /*
 283         * We don't want to be preempted for the entire
 284         * duration of kprobe processing
 285         */
 286        preempt_disable();
 287        kcb = get_kprobe_ctlblk();
 288
 289        p = get_kprobe(addr);
 290        if (!p) {
 291                unsigned int instr;
 292
 293                if (get_kernel_nofault(instr, addr))
 294                        goto no_kprobe;
 295
 296                if (instr != BREAKPOINT_INSTRUCTION) {
 297                        /*
 298                         * PowerPC has multiple variants of the "trap"
 299                         * instruction. If the current instruction is a
 300                         * trap variant, it could belong to someone else
 301                         */
 302                        if (is_trap(instr))
 303                                goto no_kprobe;
 304                        /*
 305                         * The breakpoint instruction was removed right
 306                         * after we hit it.  Another cpu has removed
 307                         * either a probepoint or a debugger breakpoint
 308                         * at this address.  In either case, no further
 309                         * handling of this interrupt is appropriate.
 310                         */
 311                        ret = 1;
 312                }
 313                /* Not one of ours: let kernel handle it */
 314                goto no_kprobe;
 315        }
 316
 317        /* Check we're not actually recursing */
 318        if (kprobe_running()) {
 319                kprobe_opcode_t insn = *p->ainsn.insn;
 320                if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
 321                        /* Turn off 'trace' bits */
 322                        regs->msr &= ~MSR_SINGLESTEP;
 323                        regs->msr |= kcb->kprobe_saved_msr;
 324                        goto no_kprobe;
 325                }
 326
 327                /*
 328                 * We have reentered the kprobe_handler(), since another probe
 329                 * was hit while within the handler. We here save the original
 330                 * kprobes variables and just single step on the instruction of
 331                 * the new probe without calling any user handlers.
 332                 */
 333                save_previous_kprobe(kcb);
 334                set_current_kprobe(p, regs, kcb);
 335                kprobes_inc_nmissed_count(p);
 336                kcb->kprobe_status = KPROBE_REENTER;
 337                if (p->ainsn.boostable >= 0) {
 338                        ret = try_to_emulate(p, regs);
 339
 340                        if (ret > 0) {
 341                                restore_previous_kprobe(kcb);
 342                                preempt_enable_no_resched();
 343                                return 1;
 344                        }
 345                }
 346                prepare_singlestep(p, regs);
 347                return 1;
 348        }
 349
 350        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 351        set_current_kprobe(p, regs, kcb);
 352        if (p->pre_handler && p->pre_handler(p, regs)) {
 353                /* handler changed execution path, so skip ss setup */
 354                reset_current_kprobe();
 355                preempt_enable_no_resched();
 356                return 1;
 357        }
 358
 359        if (p->ainsn.boostable >= 0) {
 360                ret = try_to_emulate(p, regs);
 361
 362                if (ret > 0) {
 363                        if (p->post_handler)
 364                                p->post_handler(p, regs, 0);
 365
 366                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 367                        reset_current_kprobe();
 368                        preempt_enable_no_resched();
 369                        return 1;
 370                }
 371        }
 372        prepare_singlestep(p, regs);
 373        kcb->kprobe_status = KPROBE_HIT_SS;
 374        return 1;
 375
 376no_kprobe:
 377        preempt_enable_no_resched();
 378        return ret;
 379}
 380NOKPROBE_SYMBOL(kprobe_handler);
 381
 382/*
 383 * Function return probe trampoline:
 384 *      - init_kprobes() establishes a probepoint here
 385 *      - When the probed function returns, this probe
 386 *              causes the handlers to fire
 387 */
 388asm(".global kretprobe_trampoline\n"
 389        ".type kretprobe_trampoline, @function\n"
 390        "kretprobe_trampoline:\n"
 391        "nop\n"
 392        "blr\n"
 393        ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
 394
 395/*
 396 * Called when the probe at kretprobe trampoline is hit
 397 */
 398static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
 399{
 400        unsigned long orig_ret_address;
 401
 402        orig_ret_address = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
 403        /*
 404         * We get here through one of two paths:
 405         * 1. by taking a trap -> kprobe_handler() -> here
 406         * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
 407         *
 408         * When going back through (1), we need regs->nip to be setup properly
 409         * as it is used to determine the return address from the trap.
 410         * For (2), since nip is not honoured with optprobes, we instead setup
 411         * the link register properly so that the subsequent 'blr' in
 412         * kretprobe_trampoline jumps back to the right instruction.
 413         *
 414         * For nip, we should set the address to the previous instruction since
 415         * we end up emulating it in kprobe_handler(), which increments the nip
 416         * again.
 417         */
 418        regs->nip = orig_ret_address - 4;
 419        regs->link = orig_ret_address;
 420
 421        return 0;
 422}
 423NOKPROBE_SYMBOL(trampoline_probe_handler);
 424
 425/*
 426 * Called after single-stepping.  p->addr is the address of the
 427 * instruction whose first byte has been replaced by the "breakpoint"
 428 * instruction.  To avoid the SMP problems that can occur when we
 429 * temporarily put back the original opcode to single-step, we
 430 * single-stepped a copy of the instruction.  The address of this
 431 * copy is p->ainsn.insn.
 432 */
 433int kprobe_post_handler(struct pt_regs *regs)
 434{
 435        int len;
 436        struct kprobe *cur = kprobe_running();
 437        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 438
 439        if (!cur || user_mode(regs))
 440                return 0;
 441
 442        len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
 443        /* make sure we got here for instruction we have a kprobe on */
 444        if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
 445                return 0;
 446
 447        if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
 448                kcb->kprobe_status = KPROBE_HIT_SSDONE;
 449                cur->post_handler(cur, regs, 0);
 450        }
 451
 452        /* Adjust nip to after the single-stepped instruction */
 453        regs->nip = (unsigned long)cur->addr + len;
 454        regs->msr |= kcb->kprobe_saved_msr;
 455
 456        /*Restore back the original saved kprobes variables and continue. */
 457        if (kcb->kprobe_status == KPROBE_REENTER) {
 458                restore_previous_kprobe(kcb);
 459                goto out;
 460        }
 461        reset_current_kprobe();
 462out:
 463        preempt_enable_no_resched();
 464
 465        /*
 466         * if somebody else is singlestepping across a probe point, msr
 467         * will have DE/SE set, in which case, continue the remaining processing
 468         * of do_debug, as if this is not a probe hit.
 469         */
 470        if (regs->msr & MSR_SINGLESTEP)
 471                return 0;
 472
 473        return 1;
 474}
 475NOKPROBE_SYMBOL(kprobe_post_handler);
 476
 477int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 478{
 479        struct kprobe *cur = kprobe_running();
 480        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 481        const struct exception_table_entry *entry;
 482
 483        switch(kcb->kprobe_status) {
 484        case KPROBE_HIT_SS:
 485        case KPROBE_REENTER:
 486                /*
 487                 * We are here because the instruction being single
 488                 * stepped caused a page fault. We reset the current
 489                 * kprobe and the nip points back to the probe address
 490                 * and allow the page fault handler to continue as a
 491                 * normal page fault.
 492                 */
 493                regs->nip = (unsigned long)cur->addr;
 494                regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
 495                regs->msr |= kcb->kprobe_saved_msr;
 496                if (kcb->kprobe_status == KPROBE_REENTER)
 497                        restore_previous_kprobe(kcb);
 498                else
 499                        reset_current_kprobe();
 500                preempt_enable_no_resched();
 501                break;
 502        case KPROBE_HIT_ACTIVE:
 503        case KPROBE_HIT_SSDONE:
 504                /*
 505                 * We increment the nmissed count for accounting,
 506                 * we can also use npre/npostfault count for accounting
 507                 * these specific fault cases.
 508                 */
 509                kprobes_inc_nmissed_count(cur);
 510
 511                /*
 512                 * We come here because instructions in the pre/post
 513                 * handler caused the page_fault, this could happen
 514                 * if handler tries to access user space by
 515                 * copy_from_user(), get_user() etc. Let the
 516                 * user-specified handler try to fix it first.
 517                 */
 518                if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
 519                        return 1;
 520
 521                /*
 522                 * In case the user-specified fault handler returned
 523                 * zero, try to fix up.
 524                 */
 525                if ((entry = search_exception_tables(regs->nip)) != NULL) {
 526                        regs->nip = extable_fixup(entry);
 527                        return 1;
 528                }
 529
 530                /*
 531                 * fixup_exception() could not handle it,
 532                 * Let do_page_fault() fix it.
 533                 */
 534                break;
 535        default:
 536                break;
 537        }
 538        return 0;
 539}
 540NOKPROBE_SYMBOL(kprobe_fault_handler);
 541
 542unsigned long arch_deref_entry_point(void *entry)
 543{
 544#ifdef PPC64_ELF_ABI_v1
 545        if (!kernel_text_address((unsigned long)entry))
 546                return ppc_global_function_entry(entry);
 547        else
 548#endif
 549                return (unsigned long)entry;
 550}
 551NOKPROBE_SYMBOL(arch_deref_entry_point);
 552
 553static struct kprobe trampoline_p = {
 554        .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
 555        .pre_handler = trampoline_probe_handler
 556};
 557
 558int __init arch_init_kprobes(void)
 559{
 560        return register_kprobe(&trampoline_p);
 561}
 562
 563int arch_trampoline_kprobe(struct kprobe *p)
 564{
 565        if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
 566                return 1;
 567
 568        return 0;
 569}
 570NOKPROBE_SYMBOL(arch_trampoline_kprobe);
 571