linux/arch/arm64/kernel/probes/kprobes.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/arm64/kernel/probes/kprobes.c
   4 *
   5 * Kprobes support for ARM64
   6 *
   7 * Copyright (C) 2013 Linaro Limited.
   8 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
   9 */
  10#include <linux/kasan.h>
  11#include <linux/kernel.h>
  12#include <linux/kprobes.h>
  13#include <linux/extable.h>
  14#include <linux/slab.h>
  15#include <linux/stop_machine.h>
  16#include <linux/sched/debug.h>
  17#include <linux/set_memory.h>
  18#include <linux/stringify.h>
  19#include <linux/vmalloc.h>
  20#include <asm/traps.h>
  21#include <asm/ptrace.h>
  22#include <asm/cacheflush.h>
  23#include <asm/debug-monitors.h>
  24#include <asm/daifflags.h>
  25#include <asm/system_misc.h>
  26#include <asm/insn.h>
  27#include <linux/uaccess.h>
  28#include <asm/irq.h>
  29#include <asm/sections.h>
  30
  31#include "decode-insn.h"
  32
  33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  34DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  35
  36static void __kprobes
  37post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
  38
  39static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
  40{
  41        void *addrs[1];
  42        u32 insns[1];
  43
  44        addrs[0] = addr;
  45        insns[0] = opcode;
  46
  47        return aarch64_insn_patch_text(addrs, insns, 1);
  48}
  49
  50static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
  51{
  52        /* prepare insn slot */
  53        patch_text(p->ainsn.api.insn, p->opcode);
  54
  55        flush_icache_range((uintptr_t) (p->ainsn.api.insn),
  56                           (uintptr_t) (p->ainsn.api.insn) +
  57                           MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  58
  59        /*
  60         * Needs restoring of return address after stepping xol.
  61         */
  62        p->ainsn.api.restore = (unsigned long) p->addr +
  63          sizeof(kprobe_opcode_t);
  64}
  65
  66static void __kprobes arch_prepare_simulate(struct kprobe *p)
  67{
  68        /* This instructions is not executed xol. No need to adjust the PC */
  69        p->ainsn.api.restore = 0;
  70}
  71
  72static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
  73{
  74        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  75
  76        if (p->ainsn.api.handler)
  77                p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
  78
  79        /* single step simulated, now go for post processing */
  80        post_kprobe_handler(kcb, regs);
  81}
  82
  83int __kprobes arch_prepare_kprobe(struct kprobe *p)
  84{
  85        unsigned long probe_addr = (unsigned long)p->addr;
  86
  87        if (probe_addr & 0x3)
  88                return -EINVAL;
  89
  90        /* copy instruction */
  91        p->opcode = le32_to_cpu(*p->addr);
  92
  93        if (search_exception_tables(probe_addr))
  94                return -EINVAL;
  95
  96        /* decode instruction */
  97        switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
  98        case INSN_REJECTED:     /* insn not supported */
  99                return -EINVAL;
 100
 101        case INSN_GOOD_NO_SLOT: /* insn need simulation */
 102                p->ainsn.api.insn = NULL;
 103                break;
 104
 105        case INSN_GOOD: /* instruction uses slot */
 106                p->ainsn.api.insn = get_insn_slot();
 107                if (!p->ainsn.api.insn)
 108                        return -ENOMEM;
 109                break;
 110        }
 111
 112        /* prepare the instruction */
 113        if (p->ainsn.api.insn)
 114                arch_prepare_ss_slot(p);
 115        else
 116                arch_prepare_simulate(p);
 117
 118        return 0;
 119}
 120
 121void *alloc_insn_page(void)
 122{
 123        void *page;
 124
 125        page = vmalloc_exec(PAGE_SIZE);
 126        if (page) {
 127                set_memory_ro((unsigned long)page, 1);
 128                set_vm_flush_reset_perms(page);
 129        }
 130
 131        return page;
 132}
 133
 134/* arm kprobe: install breakpoint in text */
 135void __kprobes arch_arm_kprobe(struct kprobe *p)
 136{
 137        patch_text(p->addr, BRK64_OPCODE_KPROBES);
 138}
 139
 140/* disarm kprobe: remove breakpoint from text */
 141void __kprobes arch_disarm_kprobe(struct kprobe *p)
 142{
 143        patch_text(p->addr, p->opcode);
 144}
 145
 146void __kprobes arch_remove_kprobe(struct kprobe *p)
 147{
 148        if (p->ainsn.api.insn) {
 149                free_insn_slot(p->ainsn.api.insn, 0);
 150                p->ainsn.api.insn = NULL;
 151        }
 152}
 153
 154static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 155{
 156        kcb->prev_kprobe.kp = kprobe_running();
 157        kcb->prev_kprobe.status = kcb->kprobe_status;
 158}
 159
 160static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 161{
 162        __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
 163        kcb->kprobe_status = kcb->prev_kprobe.status;
 164}
 165
 166static void __kprobes set_current_kprobe(struct kprobe *p)
 167{
 168        __this_cpu_write(current_kprobe, p);
 169}
 170
 171/*
 172 * Interrupts need to be disabled before single-step mode is set, and not
 173 * reenabled until after single-step mode ends.
 174 * Without disabling interrupt on local CPU, there is a chance of
 175 * interrupt occurrence in the period of exception return and  start of
 176 * out-of-line single-step, that result in wrongly single stepping
 177 * into the interrupt handler.
 178 */
 179static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
 180                                                struct pt_regs *regs)
 181{
 182        kcb->saved_irqflag = regs->pstate & DAIF_MASK;
 183        regs->pstate |= PSR_I_BIT;
 184        /* Unmask PSTATE.D for enabling software step exceptions. */
 185        regs->pstate &= ~PSR_D_BIT;
 186}
 187
 188static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
 189                                                struct pt_regs *regs)
 190{
 191        regs->pstate &= ~DAIF_MASK;
 192        regs->pstate |= kcb->saved_irqflag;
 193}
 194
 195static void __kprobes
 196set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
 197{
 198        kcb->ss_ctx.ss_pending = true;
 199        kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
 200}
 201
 202static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
 203{
 204        kcb->ss_ctx.ss_pending = false;
 205        kcb->ss_ctx.match_addr = 0;
 206}
 207
 208static void __kprobes setup_singlestep(struct kprobe *p,
 209                                       struct pt_regs *regs,
 210                                       struct kprobe_ctlblk *kcb, int reenter)
 211{
 212        unsigned long slot;
 213
 214        if (reenter) {
 215                save_previous_kprobe(kcb);
 216                set_current_kprobe(p);
 217                kcb->kprobe_status = KPROBE_REENTER;
 218        } else {
 219                kcb->kprobe_status = KPROBE_HIT_SS;
 220        }
 221
 222
 223        if (p->ainsn.api.insn) {
 224                /* prepare for single stepping */
 225                slot = (unsigned long)p->ainsn.api.insn;
 226
 227                set_ss_context(kcb, slot);      /* mark pending ss */
 228
 229                /* IRQs and single stepping do not mix well. */
 230                kprobes_save_local_irqflag(kcb, regs);
 231                kernel_enable_single_step(regs);
 232                instruction_pointer_set(regs, slot);
 233        } else {
 234                /* insn simulation */
 235                arch_simulate_insn(p, regs);
 236        }
 237}
 238
 239static int __kprobes reenter_kprobe(struct kprobe *p,
 240                                    struct pt_regs *regs,
 241                                    struct kprobe_ctlblk *kcb)
 242{
 243        switch (kcb->kprobe_status) {
 244        case KPROBE_HIT_SSDONE:
 245        case KPROBE_HIT_ACTIVE:
 246                kprobes_inc_nmissed_count(p);
 247                setup_singlestep(p, regs, kcb, 1);
 248                break;
 249        case KPROBE_HIT_SS:
 250        case KPROBE_REENTER:
 251                pr_warn("Unrecoverable kprobe detected.\n");
 252                dump_kprobe(p);
 253                BUG();
 254                break;
 255        default:
 256                WARN_ON(1);
 257                return 0;
 258        }
 259
 260        return 1;
 261}
 262
 263static void __kprobes
 264post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
 265{
 266        struct kprobe *cur = kprobe_running();
 267
 268        if (!cur)
 269                return;
 270
 271        /* return addr restore if non-branching insn */
 272        if (cur->ainsn.api.restore != 0)
 273                instruction_pointer_set(regs, cur->ainsn.api.restore);
 274
 275        /* restore back original saved kprobe variables and continue */
 276        if (kcb->kprobe_status == KPROBE_REENTER) {
 277                restore_previous_kprobe(kcb);
 278                return;
 279        }
 280        /* call post handler */
 281        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 282        if (cur->post_handler)  {
 283                /* post_handler can hit breakpoint and single step
 284                 * again, so we enable D-flag for recursive exception.
 285                 */
 286                cur->post_handler(cur, regs, 0);
 287        }
 288
 289        reset_current_kprobe();
 290}
 291
 292int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
 293{
 294        struct kprobe *cur = kprobe_running();
 295        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 296
 297        switch (kcb->kprobe_status) {
 298        case KPROBE_HIT_SS:
 299        case KPROBE_REENTER:
 300                /*
 301                 * We are here because the instruction being single
 302                 * stepped caused a page fault. We reset the current
 303                 * kprobe and the ip points back to the probe address
 304                 * and allow the page fault handler to continue as a
 305                 * normal page fault.
 306                 */
 307                instruction_pointer_set(regs, (unsigned long) cur->addr);
 308                if (!instruction_pointer(regs))
 309                        BUG();
 310
 311                kernel_disable_single_step();
 312
 313                if (kcb->kprobe_status == KPROBE_REENTER)
 314                        restore_previous_kprobe(kcb);
 315                else
 316                        reset_current_kprobe();
 317
 318                break;
 319        case KPROBE_HIT_ACTIVE:
 320        case KPROBE_HIT_SSDONE:
 321                /*
 322                 * We increment the nmissed count for accounting,
 323                 * we can also use npre/npostfault count for accounting
 324                 * these specific fault cases.
 325                 */
 326                kprobes_inc_nmissed_count(cur);
 327
 328                /*
 329                 * We come here because instructions in the pre/post
 330                 * handler caused the page_fault, this could happen
 331                 * if handler tries to access user space by
 332                 * copy_from_user(), get_user() etc. Let the
 333                 * user-specified handler try to fix it first.
 334                 */
 335                if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
 336                        return 1;
 337
 338                /*
 339                 * In case the user-specified fault handler returned
 340                 * zero, try to fix up.
 341                 */
 342                if (fixup_exception(regs))
 343                        return 1;
 344        }
 345        return 0;
 346}
 347
 348static void __kprobes kprobe_handler(struct pt_regs *regs)
 349{
 350        struct kprobe *p, *cur_kprobe;
 351        struct kprobe_ctlblk *kcb;
 352        unsigned long addr = instruction_pointer(regs);
 353
 354        kcb = get_kprobe_ctlblk();
 355        cur_kprobe = kprobe_running();
 356
 357        p = get_kprobe((kprobe_opcode_t *) addr);
 358
 359        if (p) {
 360                if (cur_kprobe) {
 361                        if (reenter_kprobe(p, regs, kcb))
 362                                return;
 363                } else {
 364                        /* Probe hit */
 365                        set_current_kprobe(p);
 366                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 367
 368                        /*
 369                         * If we have no pre-handler or it returned 0, we
 370                         * continue with normal processing.  If we have a
 371                         * pre-handler and it returned non-zero, it will
 372                         * modify the execution path and no need to single
 373                         * stepping. Let's just reset current kprobe and exit.
 374                         *
 375                         * pre_handler can hit a breakpoint and can step thru
 376                         * before return, keep PSTATE D-flag enabled until
 377                         * pre_handler return back.
 378                         */
 379                        if (!p->pre_handler || !p->pre_handler(p, regs)) {
 380                                setup_singlestep(p, regs, kcb, 0);
 381                        } else
 382                                reset_current_kprobe();
 383                }
 384        }
 385        /*
 386         * The breakpoint instruction was removed right
 387         * after we hit it.  Another cpu has removed
 388         * either a probepoint or a debugger breakpoint
 389         * at this address.  In either case, no further
 390         * handling of this interrupt is appropriate.
 391         * Return back to original instruction, and continue.
 392         */
 393}
 394
 395static int __kprobes
 396kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
 397{
 398        if ((kcb->ss_ctx.ss_pending)
 399            && (kcb->ss_ctx.match_addr == addr)) {
 400                clear_ss_context(kcb);  /* clear pending ss */
 401                return DBG_HOOK_HANDLED;
 402        }
 403        /* not ours, kprobes should ignore it */
 404        return DBG_HOOK_ERROR;
 405}
 406
 407static int __kprobes
 408kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
 409{
 410        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 411        int retval;
 412
 413        /* return error if this is not our step */
 414        retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
 415
 416        if (retval == DBG_HOOK_HANDLED) {
 417                kprobes_restore_local_irqflag(kcb, regs);
 418                kernel_disable_single_step();
 419
 420                post_kprobe_handler(kcb, regs);
 421        }
 422
 423        return retval;
 424}
 425
 426static struct step_hook kprobes_step_hook = {
 427        .fn = kprobe_single_step_handler,
 428};
 429
 430static int __kprobes
 431kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
 432{
 433        kprobe_handler(regs);
 434        return DBG_HOOK_HANDLED;
 435}
 436
 437static struct break_hook kprobes_break_hook = {
 438        .imm = KPROBES_BRK_IMM,
 439        .fn = kprobe_breakpoint_handler,
 440};
 441
 442/*
 443 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
 444 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
 445 */
 446int __init arch_populate_kprobe_blacklist(void)
 447{
 448        int ret;
 449
 450        ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
 451                                        (unsigned long)__entry_text_end);
 452        if (ret)
 453                return ret;
 454        ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
 455                                        (unsigned long)__irqentry_text_end);
 456        if (ret)
 457                return ret;
 458        ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
 459                                        (unsigned long)__exception_text_end);
 460        if (ret)
 461                return ret;
 462        ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
 463                                        (unsigned long)__idmap_text_end);
 464        if (ret)
 465                return ret;
 466        ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
 467                                        (unsigned long)__hyp_text_end);
 468        if (ret || is_kernel_in_hyp_mode())
 469                return ret;
 470        ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
 471                                        (unsigned long)__hyp_idmap_text_end);
 472        return ret;
 473}
 474
 475void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
 476{
 477        struct kretprobe_instance *ri = NULL;
 478        struct hlist_head *head, empty_rp;
 479        struct hlist_node *tmp;
 480        unsigned long flags, orig_ret_address = 0;
 481        unsigned long trampoline_address =
 482                (unsigned long)&kretprobe_trampoline;
 483        kprobe_opcode_t *correct_ret_addr = NULL;
 484
 485        INIT_HLIST_HEAD(&empty_rp);
 486        kretprobe_hash_lock(current, &head, &flags);
 487
 488        /*
 489         * It is possible to have multiple instances associated with a given
 490         * task either because multiple functions in the call path have
 491         * return probes installed on them, and/or more than one
 492         * return probe was registered for a target function.
 493         *
 494         * We can handle this because:
 495         *     - instances are always pushed into the head of the list
 496         *     - when multiple return probes are registered for the same
 497         *       function, the (chronologically) first instance's ret_addr
 498         *       will be the real return address, and all the rest will
 499         *       point to kretprobe_trampoline.
 500         */
 501        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 502                if (ri->task != current)
 503                        /* another task is sharing our hash bucket */
 504                        continue;
 505
 506                orig_ret_address = (unsigned long)ri->ret_addr;
 507
 508                if (orig_ret_address != trampoline_address)
 509                        /*
 510                         * This is the real return address. Any other
 511                         * instances associated with this task are for
 512                         * other calls deeper on the call stack
 513                         */
 514                        break;
 515        }
 516
 517        kretprobe_assert(ri, orig_ret_address, trampoline_address);
 518
 519        correct_ret_addr = ri->ret_addr;
 520        hlist_for_each_entry_safe(ri, tmp, head, hlist) {
 521                if (ri->task != current)
 522                        /* another task is sharing our hash bucket */
 523                        continue;
 524
 525                orig_ret_address = (unsigned long)ri->ret_addr;
 526                if (ri->rp && ri->rp->handler) {
 527                        __this_cpu_write(current_kprobe, &ri->rp->kp);
 528                        get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
 529                        ri->ret_addr = correct_ret_addr;
 530                        ri->rp->handler(ri, regs);
 531                        __this_cpu_write(current_kprobe, NULL);
 532                }
 533
 534                recycle_rp_inst(ri, &empty_rp);
 535
 536                if (orig_ret_address != trampoline_address)
 537                        /*
 538                         * This is the real return address. Any other
 539                         * instances associated with this task are for
 540                         * other calls deeper on the call stack
 541                         */
 542                        break;
 543        }
 544
 545        kretprobe_hash_unlock(current, &flags);
 546
 547        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
 548                hlist_del(&ri->hlist);
 549                kfree(ri);
 550        }
 551        return (void *)orig_ret_address;
 552}
 553
 554void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 555                                      struct pt_regs *regs)
 556{
 557        ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
 558
 559        /* replace return addr (x30) with trampoline */
 560        regs->regs[30] = (long)&kretprobe_trampoline;
 561}
 562
 563int __kprobes arch_trampoline_kprobe(struct kprobe *p)
 564{
 565        return 0;
 566}
 567
 568int __init arch_init_kprobes(void)
 569{
 570        register_kernel_break_hook(&kprobes_break_hook);
 571        register_kernel_step_hook(&kprobes_step_hook);
 572
 573        return 0;
 574}
 575