linux/arch/s390/kernel/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Dynamic function tracer architecture backend.
   4 *
   5 * Copyright IBM Corp. 2009,2014
   6 *
   7 *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
   8 *              Martin Schwidefsky <schwidefsky@de.ibm.com>
   9 */
  10
  11#include <linux/moduleloader.h>
  12#include <linux/hardirq.h>
  13#include <linux/uaccess.h>
  14#include <linux/ftrace.h>
  15#include <linux/kernel.h>
  16#include <linux/types.h>
  17#include <linux/kprobes.h>
  18#include <trace/syscall.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/cacheflush.h>
  21#include <asm/set_memory.h>
  22#include "entry.h"
  23
  24/*
  25 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
  26 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
  27 * (since gcc 9 / clang 10) is used.
  28 * In both cases the original and also the disabled function prologue contains
  29 * only a single six byte instruction and looks like this:
  30 * >    brcl    0,0                     # offset 0
  31 * To enable ftrace the code gets patched like above and afterwards looks
  32 * like this:
  33 * >    brasl   %r0,ftrace_caller       # offset 0
  34 *
  35 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
  36 * The ftrace function gets called with a non-standard C function call ABI
  37 * where r0 contains the return address. It is also expected that the called
  38 * function only clobbers r0 and r1, but restores r2-r15.
  39 * For module code we can't directly jump to ftrace caller, but need a
  40 * trampoline (ftrace_plt), which clobbers also r1.
  41 */
  42
  43void *ftrace_func __read_mostly = ftrace_stub;
  44unsigned long ftrace_plt;
  45
  46int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
  47                       unsigned long addr)
  48{
  49        return 0;
  50}
  51
  52int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  53                    unsigned long addr)
  54{
  55        struct ftrace_insn orig, new, old;
  56
  57        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
  58                return -EFAULT;
  59        /* Replace ftrace call with a nop. */
  60        ftrace_generate_call_insn(&orig, rec->ip);
  61        ftrace_generate_nop_insn(&new);
  62
  63        /* Verify that the to be replaced code matches what we expect. */
  64        if (memcmp(&orig, &old, sizeof(old)))
  65                return -EINVAL;
  66        s390_kernel_write((void *) rec->ip, &new, sizeof(new));
  67        return 0;
  68}
  69
  70int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  71{
  72        struct ftrace_insn orig, new, old;
  73
  74        if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
  75                return -EFAULT;
  76        /* Replace nop with an ftrace call. */
  77        ftrace_generate_nop_insn(&orig);
  78        ftrace_generate_call_insn(&new, rec->ip);
  79
  80        /* Verify that the to be replaced code matches what we expect. */
  81        if (memcmp(&orig, &old, sizeof(old)))
  82                return -EINVAL;
  83        s390_kernel_write((void *) rec->ip, &new, sizeof(new));
  84        return 0;
  85}
  86
  87int ftrace_update_ftrace_func(ftrace_func_t func)
  88{
  89        ftrace_func = func;
  90        return 0;
  91}
  92
  93int __init ftrace_dyn_arch_init(void)
  94{
  95        return 0;
  96}
  97
  98#ifdef CONFIG_MODULES
  99
 100static int __init ftrace_plt_init(void)
 101{
 102        unsigned int *ip;
 103
 104        ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
 105        if (!ftrace_plt)
 106                panic("cannot allocate ftrace plt\n");
 107        ip = (unsigned int *) ftrace_plt;
 108        ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
 109        ip[1] = 0x100a0004;
 110        ip[2] = 0x07f10000;
 111        ip[3] = FTRACE_ADDR >> 32;
 112        ip[4] = FTRACE_ADDR & 0xffffffff;
 113        set_memory_ro(ftrace_plt, 1);
 114        return 0;
 115}
 116device_initcall(ftrace_plt_init);
 117
 118#endif /* CONFIG_MODULES */
 119
 120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 121/*
 122 * Hook the return address and push it in the stack of return addresses
 123 * in current thread info.
 124 */
 125unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
 126                                    unsigned long ip)
 127{
 128        if (unlikely(ftrace_graph_is_dead()))
 129                goto out;
 130        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 131                goto out;
 132        ip -= MCOUNT_INSN_SIZE;
 133        if (!function_graph_enter(ra, ip, 0, (void *) sp))
 134                ra = (unsigned long) return_to_handler;
 135out:
 136        return ra;
 137}
 138NOKPROBE_SYMBOL(prepare_ftrace_return);
 139
 140/*
 141 * Patch the kernel code at ftrace_graph_caller location. The instruction
 142 * there is branch relative on condition. To enable the ftrace graph code
 143 * block, we simply patch the mask field of the instruction to zero and
 144 * turn the instruction into a nop.
 145 * To disable the ftrace graph code the mask field will be patched to
 146 * all ones, which turns the instruction into an unconditional branch.
 147 */
 148int ftrace_enable_ftrace_graph_caller(void)
 149{
 150        u8 op = 0x04; /* set mask field to zero */
 151
 152        s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 153        return 0;
 154}
 155
 156int ftrace_disable_ftrace_graph_caller(void)
 157{
 158        u8 op = 0xf4; /* set mask field to all ones */
 159
 160        s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
 161        return 0;
 162}
 163
 164#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 165
 166#ifdef CONFIG_KPROBES_ON_FTRACE
 167void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
 168                struct ftrace_ops *ops, struct ftrace_regs *fregs)
 169{
 170        struct kprobe_ctlblk *kcb;
 171        struct pt_regs *regs;
 172        struct kprobe *p;
 173        int bit;
 174
 175        bit = ftrace_test_recursion_trylock(ip, parent_ip);
 176        if (bit < 0)
 177                return;
 178
 179        regs = ftrace_get_regs(fregs);
 180        preempt_disable_notrace();
 181        p = get_kprobe((kprobe_opcode_t *)ip);
 182        if (unlikely(!p) || kprobe_disabled(p))
 183                goto out;
 184
 185        if (kprobe_running()) {
 186                kprobes_inc_nmissed_count(p);
 187                goto out;
 188        }
 189
 190        __this_cpu_write(current_kprobe, p);
 191
 192        kcb = get_kprobe_ctlblk();
 193        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
 194
 195        instruction_pointer_set(regs, ip);
 196
 197        if (!p->pre_handler || !p->pre_handler(p, regs)) {
 198
 199                instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
 200
 201                if (unlikely(p->post_handler)) {
 202                        kcb->kprobe_status = KPROBE_HIT_SSDONE;
 203                        p->post_handler(p, regs, 0);
 204                }
 205        }
 206        __this_cpu_write(current_kprobe, NULL);
 207out:
 208        preempt_enable_notrace();
 209        ftrace_test_recursion_unlock(bit);
 210}
 211NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 212
 213int arch_prepare_kprobe_ftrace(struct kprobe *p)
 214{
 215        p->ainsn.insn = NULL;
 216        return 0;
 217}
 218#endif
 219