linux/arch/s390/kernel/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Dynamic function tracer architecture backend.
   4 *
   5 * Copyright IBM Corp. 2009,2014
   6 *
   7 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   8 *              Martin Schwidefsky <schwidefsky@de.ibm.com>
   9 */
  10
  11#include <linux/moduleloader.h>
  12#include <linux/hardirq.h>
  13#include <linux/uaccess.h>
  14#include <linux/ftrace.h>
  15#include <linux/kernel.h>
  16#include <linux/types.h>
  17#include <linux/kprobes.h>
  18#include <trace/syscall.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/cacheflush.h>
  21#include <asm/ftrace.lds.h>
  22#include <asm/nospec-branch.h>
  23#include <asm/set_memory.h>
  24#include "entry.h"
  25#include "ftrace.h"
  26
  27/*
  28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
  29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
  30 * (since gcc 9 / clang 10) is used.
  31 * In both cases the original and also the disabled function prologue contains
  32 * only a single six byte instruction and looks like this:
  33 * >    brcl    0,0                     # offset 0
  34 * To enable ftrace the code gets patched like above and afterwards looks
  35 * like this:
  36 * >    brasl   %r0,ftrace_caller       # offset 0
  37 *
  38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
  39 * The ftrace function gets called with a non-standard C function call ABI
  40 * where r0 contains the return address. It is also expected that the called
  41 * function only clobbers r0 and r1, but restores r2-r15.
  42 * For module code we can't directly jump to ftrace caller, but need a
  43 * trampoline (ftrace_plt), which clobbers also r1.
  44 */
  45
  46void *ftrace_func __read_mostly = ftrace_stub;
  47struct ftrace_insn {
  48        u16 opc;
  49        s32 disp;
  50} __packed;
  51
  52asm(
  53        "       .align 16\n"
  54        "ftrace_shared_hotpatch_trampoline_br:\n"
  55        "       lmg     %r0,%r1,2(%r1)\n"
  56        "       br      %r1\n"
  57        "ftrace_shared_hotpatch_trampoline_br_end:\n"
  58);
  59
  60#ifdef CONFIG_EXPOLINE
  61asm(
  62        "       .align 16\n"
  63        "ftrace_shared_hotpatch_trampoline_ex:\n"
  64        "       lmg     %r0,%r1,2(%r1)\n"
  65        "       ex      %r0," __stringify(__LC_BR_R1) "(%r0)\n"
  66        "       j       .\n"
  67        "ftrace_shared_hotpatch_trampoline_ex_end:\n"
  68);
  69
  70asm(
  71        "       .align 16\n"
  72        "ftrace_shared_hotpatch_trampoline_exrl:\n"
  73        "       lmg     %r0,%r1,2(%r1)\n"
  74        "       .insn   ril,0xc60000000000,%r0,0f\n" /* exrl */
  75        "       j       .\n"
  76        "0:     br      %r1\n"
  77        "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
  78);
  79#endif /* CONFIG_EXPOLINE */
  80
  81#ifdef CONFIG_MODULES
  82static char *ftrace_plt;
  83
  84asm(
  85        "       .data\n"
  86        "ftrace_plt_template:\n"
  87        "       basr    %r1,%r0\n"
  88        "       lg      %r1,0f-.(%r1)\n"
  89        "       br      %r1\n"
  90        "0:     .quad   ftrace_caller\n"
  91        "ftrace_plt_template_end:\n"
  92        "       .previous\n"
  93);
  94#endif /* CONFIG_MODULES */
  95
  96static const char *ftrace_shared_hotpatch_trampoline(const char **end)
  97{
  98        const char *tstart, *tend;
  99
 100        tstart = ftrace_shared_hotpatch_trampoline_br;
 101        tend = ftrace_shared_hotpatch_trampoline_br_end;
 102#ifdef CONFIG_EXPOLINE
 103        if (!nospec_disable) {
 104                tstart = ftrace_shared_hotpatch_trampoline_ex;
 105                tend = ftrace_shared_hotpatch_trampoline_ex_end;
 106                if (test_facility(35)) { /* exrl */
 107                        tstart = ftrace_shared_hotpatch_trampoline_exrl;
 108                        tend = ftrace_shared_hotpatch_trampoline_exrl_end;
 109                }
 110        }
 111#endif /* CONFIG_EXPOLINE */
 112        if (end)
 113                *end = tend;
 114        return tstart;
 115}
 116
 117bool ftrace_need_init_nop(void)
 118{
 119        return ftrace_shared_hotpatch_trampoline(NULL);
 120}
 121
 122int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 123{
 124        static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
 125                __ftrace_hotpatch_trampolines_start;
 126        static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
 127        static struct ftrace_hotpatch_trampoline *trampoline;
 128        struct ftrace_hotpatch_trampoline **next_trampoline;
 129        struct ftrace_hotpatch_trampoline *trampolines_end;
 130        struct ftrace_hotpatch_trampoline tmp;
 131        struct ftrace_insn *insn;
 132        const char *shared;
 133        s32 disp;
 134
 135        BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
 136                     SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
 137
 138        next_trampoline = &next_vmlinux_trampoline;
 139        trampolines_end = __ftrace_hotpatch_trampolines_end;
 140        shared = ftrace_shared_hotpatch_trampoline(NULL);
 141#ifdef CONFIG_MODULES
 142        if (mod) {
 143                next_trampoline = &mod->arch.next_trampoline;
 144                trampolines_end = mod->arch.trampolines_end;
 145                shared = ftrace_plt;
 146        }
 147#endif
 148
 149        if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
 150                return -ENOMEM;
 151        trampoline = (*next_trampoline)++;
 152
 153        /* Check for the compiler-generated fentry nop (brcl 0, .). */
 154        if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
 155                return -EINVAL;
 156
 157        /* Generate the trampoline. */
 158        tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
 159        tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
 160        tmp.interceptor = FTRACE_ADDR;
 161        tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
 162        s390_kernel_write(trampoline, &tmp, sizeof(tmp));
 163
 164        /* Generate a jump to the trampoline. */
 165        disp = ((char *)trampoline - (char *)rec->ip) / 2;
 166        insn = (struct ftrace_insn *)rec->ip;
 167        s390_kernel_write(&insn->disp, &disp, sizeof(disp));
 168
 169        return 0;
 170}
 171
 172int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 173                       unsigned long addr)
 174{
 175        return 0;
 176}
 177
 178static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
 179{
 180        /* brcl 0,0 */
 181        insn->opc = 0xc004;
 182        insn->disp = 0;
 183}
 184
 185static void ftrace_generate_call_insn(struct ftrace_insn *insn,
 186                                      unsigned long ip)
 187{
 188        unsigned long target;
 189
 190        /* brasl r0,ftrace_caller */
 191        target = FTRACE_ADDR;
 192#ifdef CONFIG_MODULES
 193        if (is_module_addr((void *)ip))
 194                target = (unsigned long)ftrace_plt;
 195#endif /* CONFIG_MODULES */
 196        insn->opc = 0xc005;
 197        insn->disp = (target - ip) / 2;
 198}
 199
 200static void brcl_disable(void *brcl)
 201{
 202        u8 op = 0x04; /* set mask field to zero */
 203
 204        s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
 205}
 206
 207int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 208                    unsigned long addr)
 209{
 210        struct ftrace_insn orig, new, old;
 211
 212        if (ftrace_shared_hotpatch_trampoline(NULL)) {
 213                brcl_disable((void *)rec->ip);
 214                return 0;
 215        }
 216
 217        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
 218                return -EFAULT;
 219        /* Replace ftrace call with a nop. */
 220        ftrace_generate_call_insn(&orig, rec->ip);
 221        ftrace_generate_nop_insn(&new);
 222
 223        /* Verify that the to be replaced code matches what we expect. */
 224        if (memcmp(&orig, &old, sizeof(old)))
 225                return -EINVAL;
 226        s390_kernel_write((void *) rec->ip, &new, sizeof(new));
 227        return 0;
 228}
 229
 230static void brcl_enable(void *brcl)
 231{
 232        u8 op = 0xf4; /* set mask field to all ones */
 233
 234        s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
 235}
 236
 237int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 238{
 239        struct ftrace_insn orig, new, old;
 240
 241        if (ftrace_shared_hotpatch_trampoline(NULL)) {
 242                brcl_enable((void *)rec->ip);
 243                return 0;
 244        }
 245
 246        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
 247                return -EFAULT;
 248        /* Replace nop with an ftrace call. */
 249        ftrace_generate_nop_insn(&orig);
 250        ftrace_generate_call_insn(&new, rec->ip);
 251
 252        /* Verify that the to be replaced code matches what we expect. */
 253        if (memcmp(&orig, &old, sizeof(old)))
 254                return -EINVAL;
 255        s390_kernel_write((void *) rec->ip, &new, sizeof(new));
 256        return 0;
 257}
 258
 259int ftrace_update_ftrace_func(ftrace_func_t func)
 260{
 261        ftrace_func = func;
 262        return 0;
 263}
 264
 265int __init ftrace_dyn_arch_init(void)
 266{
 267        return 0;
 268}
 269
 270void arch_ftrace_update_code(int command)
 271{
 272        if (ftrace_shared_hotpatch_trampoline(NULL))
 273                ftrace_modify_all_code(command);
 274        else
 275                ftrace_run_stop_machine(command);
 276}
 277
 278static void __ftrace_sync(void *dummy)
 279{
 280}
 281
 282int ftrace_arch_code_modify_post_process(void)
 283{
 284        if (ftrace_shared_hotpatch_trampoline(NULL)) {
 285                /* Send SIGP to the other CPUs, so they see the new code. */
 286                smp_call_function(__ftrace_sync, NULL, 1);
 287        }
 288        return 0;
 289}
 290
 291#ifdef CONFIG_MODULES
 292
 293static int __init ftrace_plt_init(void)
 294{
 295        const char *start, *end;
 296
 297        ftrace_plt = module_alloc(PAGE_SIZE);
 298        if (!ftrace_plt)
 299                panic("cannot allocate ftrace plt\n");
 300
 301        start = ftrace_shared_hotpatch_trampoline(&end);
 302        if (!start) {
 303                start = ftrace_plt_template;
 304                end = ftrace_plt_template_end;
 305        }
 306        memcpy(ftrace_plt, start, end - start);
 307        set_memory_ro((unsigned long)ftrace_plt, 1);
 308        return 0;
 309}
 310device_initcall(ftrace_plt_init);
 311
 312#endif /* CONFIG_MODULES */
 313
 314#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 315/*
 316 * Hook the return address and push it in the stack of return addresses
 317 * in current thread info.
 318 */
 319unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
 320                                    unsigned long ip)
 321{
 322        if (unlikely(ftrace_graph_is_dead()))
 323                goto out;
 324        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 325                goto out;
 326        ip -= MCOUNT_INSN_SIZE;
 327        if (!function_graph_enter(ra, ip, 0, (void *) sp))
 328                ra = (unsigned long) return_to_handler;
 329out:
 330        return ra;
 331}
 332NOKPROBE_SYMBOL(prepare_ftrace_return);
 333
 334/*
 335 * Patch the kernel code at ftrace_graph_caller location. The instruction
 336 * there is branch relative on condition. To enable the ftrace graph code
 337 * block, we simply patch the mask field of the instruction to zero and
 338 * turn the instruction into a nop.
 339 * To disable the ftrace graph code the mask field will be patched to
 340 * all ones, which turns the instruction into an unconditional branch.
 341 */
 342int ftrace_enable_ftrace_graph_caller(void)
 343{
 344        brcl_disable(ftrace_graph_caller);
 345        return 0;
 346}
 347
 348int ftrace_disable_ftrace_graph_caller(void)
 349{
 350        brcl_enable(ftrace_graph_caller);
 351        return 0;
 352}
 353
 354#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 355
 356#ifdef CONFIG_KPROBES_ON_FTRACE
 357void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 358                struct ftrace_ops *ops, struct ftrace_regs *fregs)
 359{
 360        struct kprobe_ctlblk *kcb;
 361        struct pt_regs *regs;
 362        struct kprobe *p;
 363        int bit;
 364
 365        bit = ftrace_test_recursion_trylock(ip, parent_ip);
 366        if (bit < 0)
 367                return;
 368
 369        regs = ftrace_get_regs(fregs);
 370        preempt_disable_notrace();
 371        p = get_kprobe((kprobe_opcode_t *)ip);
 372        if (unlikely(!p) || kprobe_disabled(p))
 373                goto out;
 374
 375        if (kprobe_running()) {
 376                kprobes_inc_nmissed_count(p);
 377                goto out;
 378        }
 379
 380        __this_cpu_write(current_kprobe, p);
 381
 382        kcb = get_kprobe_ctlblk();
 383        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 384
 385        instruction_pointer_set(regs, ip);
 386
 387        if (!p->pre_handler || !p->pre_handler(p, regs)) {
 388
 389                instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
 390
 391                if (unlikely(p->post_handler)) {
 392                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 393                        p->post_handler(p, regs, 0);
 394                }
 395        }
 396        __this_cpu_write(current_kprobe, NULL);
 397out:
 398        preempt_enable_notrace();
 399        ftrace_test_recursion_unlock(bit);
 400}
 401NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 402
 403int arch_prepare_kprobe_ftrace(struct kprobe *p)
 404{
 405        p->ainsn.insn = NULL;
 406        return 0;
 407}
 408#endif
 409