linux/arch/riscv/kernel/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2013 Linaro Limited
   4 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   5 * Copyright (C) 2017 Andes Technology Corporation
   6 */
   7
   8#include <linux/ftrace.h>
   9#include <linux/uaccess.h>
  10#include <linux/memory.h>
  11#include <asm/cacheflush.h>
  12#include <asm/patch.h>
  13
  14#ifdef CONFIG_DYNAMIC_FTRACE
  15int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
  16{
  17        mutex_lock(&text_mutex);
  18        return 0;
  19}
  20
  21int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
  22{
  23        mutex_unlock(&text_mutex);
  24        return 0;
  25}
  26
  27static int ftrace_check_current_call(unsigned long hook_pos,
  28                                     unsigned int *expected)
  29{
  30        unsigned int replaced[2];
  31        unsigned int nops[2] = {NOP4, NOP4};
  32
  33        /* we expect nops at the hook position */
  34        if (!expected)
  35                expected = nops;
  36
  37        /*
  38         * Read the text we want to modify;
  39         * return must be -EFAULT on read error
  40         */
  41        if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
  42                        MCOUNT_INSN_SIZE))
  43                return -EFAULT;
  44
  45        /*
  46         * Make sure it is what we expect it to be;
  47         * return must be -EINVAL on failed comparison
  48         */
  49        if (memcmp(expected, replaced, sizeof(replaced))) {
  50                pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
  51                       (void *)hook_pos, expected[0], expected[1], replaced[0],
  52                       replaced[1]);
  53                return -EINVAL;
  54        }
  55
  56        return 0;
  57}
  58
  59static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
  60                                bool enable)
  61{
  62        unsigned int call[2];
  63        unsigned int nops[2] = {NOP4, NOP4};
  64
  65        make_call(hook_pos, target, call);
  66
  67        /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
  68        if (patch_text_nosync
  69            ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
  70                return -EPERM;
  71
  72        return 0;
  73}
  74
  75/*
  76 * Put 5 instructions with 16 bytes at the front of function within
  77 * patchable function entry nops' area.
  78 *
  79 * 0: REG_S  ra, -SZREG(sp)
  80 * 1: auipc  ra, 0x?
  81 * 2: jalr   -?(ra)
  82 * 3: REG_L  ra, -SZREG(sp)
  83 *
  84 * So the opcodes is:
  85 * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
  86 * 1: 0x???????? -> auipc
  87 * 2: 0x???????? -> jalr
  88 * 3: 0xff813083 (ld)/0xffc12083 (lw)
  89 */
  90#if __riscv_xlen == 64
  91#define INSN0   0xfe113c23
  92#define INSN3   0xff813083
  93#elif __riscv_xlen == 32
  94#define INSN0   0xfe112e23
  95#define INSN3   0xffc12083
  96#endif
  97
  98#define FUNC_ENTRY_SIZE 16
  99#define FUNC_ENTRY_JMP  4
 100
 101int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 102{
 103        unsigned int call[4] = {INSN0, 0, 0, INSN3};
 104        unsigned long target = addr;
 105        unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
 106
 107        call[1] = to_auipc_insn((unsigned int)(target - caller));
 108        call[2] = to_jalr_insn((unsigned int)(target - caller));
 109
 110        if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
 111                return -EPERM;
 112
 113        return 0;
 114}
 115
 116int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 117                    unsigned long addr)
 118{
 119        unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
 120
 121        if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
 122                return -EPERM;
 123
 124        return 0;
 125}
 126
 127
 128/*
 129 * This is called early on, and isn't wrapped by
 130 * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
 131 * text_mutex, which triggers a lockdep failure.  SMP isn't running so we could
 132 * just directly poke the text, but it's simpler to just take the lock
 133 * ourselves.
 134 */
 135int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 136{
 137        int out;
 138
 139        ftrace_arch_code_modify_prepare();
 140        out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
 141        ftrace_arch_code_modify_post_process();
 142
 143        return out;
 144}
 145
 146int ftrace_update_ftrace_func(ftrace_func_t func)
 147{
 148        int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
 149                                       (unsigned long)func, true);
 150        if (!ret) {
 151                ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
 152                                           (unsigned long)func, true);
 153        }
 154
 155        return ret;
 156}
 157
 158int __init ftrace_dyn_arch_init(void)
 159{
 160        return 0;
 161}
 162#endif
 163
 164#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 165int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 166                       unsigned long addr)
 167{
 168        unsigned int call[2];
 169        unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
 170        int ret;
 171
 172        make_call(caller, old_addr, call);
 173        ret = ftrace_check_current_call(caller, call);
 174
 175        if (ret)
 176                return ret;
 177
 178        return __ftrace_modify_call(caller, addr, true);
 179}
 180#endif
 181
 182#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 183/*
 184 * Most of this function is copied from arm64.
 185 */
 186void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 187                           unsigned long frame_pointer)
 188{
 189        unsigned long return_hooker = (unsigned long)&return_to_handler;
 190        unsigned long old;
 191
 192        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 193                return;
 194
 195        /*
 196         * We don't suffer access faults, so no extra fault-recovery assembly
 197         * is needed here.
 198         */
 199        old = *parent;
 200
 201        if (!function_graph_enter(old, self_addr, frame_pointer, parent))
 202                *parent = return_hooker;
 203}
 204
 205#ifdef CONFIG_DYNAMIC_FTRACE
 206extern void ftrace_graph_call(void);
 207extern void ftrace_graph_regs_call(void);
 208int ftrace_enable_ftrace_graph_caller(void)
 209{
 210        int ret;
 211
 212        ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
 213                                    (unsigned long)&prepare_ftrace_return, true);
 214        if (ret)
 215                return ret;
 216
 217        return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
 218                                    (unsigned long)&prepare_ftrace_return, true);
 219}
 220
 221int ftrace_disable_ftrace_graph_caller(void)
 222{
 223        int ret;
 224
 225        ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
 226                                    (unsigned long)&prepare_ftrace_return, false);
 227        if (ret)
 228                return ret;
 229
 230        return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
 231                                    (unsigned long)&prepare_ftrace_return, false);
 232}
 233#endif /* CONFIG_DYNAMIC_FTRACE */
 234#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 235