linux/arch/arm64/kernel/ftrace.c
<<
>>
Prefs
   1/*
   2 * arch/arm64/kernel/ftrace.c
   3 *
   4 * Copyright (C) 2013 Linaro Limited
   5 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/ftrace.h>
  13#include <linux/swab.h>
  14#include <linux/uaccess.h>
  15
  16#include <asm/cacheflush.h>
  17#include <asm/ftrace.h>
  18#include <asm/insn.h>
  19
  20#ifdef CONFIG_DYNAMIC_FTRACE
  21/*
  22 * Replace a single instruction, which may be a branch or NOP.
  23 * If @validate == true, a replaced instruction is checked against 'old'.
  24 */
  25static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
  26                              bool validate)
  27{
  28        u32 replaced;
  29
  30        /*
  31         * Note:
  32         * Due to modules and __init, code can disappear and change,
  33         * we need to protect against faulting as well as code changing.
  34         * We do this by aarch64_insn_*() which use the probe_kernel_*().
  35         *
  36         * No lock is held here because all the modifications are run
  37         * through stop_machine().
  38         */
  39        if (validate) {
  40                if (aarch64_insn_read((void *)pc, &replaced))
  41                        return -EFAULT;
  42
  43                if (replaced != old)
  44                        return -EINVAL;
  45        }
  46        if (aarch64_insn_patch_text_nosync((void *)pc, new))
  47                return -EPERM;
  48
  49        return 0;
  50}
  51
  52/*
  53 * Replace tracer function in ftrace_caller()
  54 */
  55int ftrace_update_ftrace_func(ftrace_func_t func)
  56{
  57        unsigned long pc;
  58        u32 new;
  59
  60        pc = (unsigned long)&ftrace_call;
  61        new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
  62                                          AARCH64_INSN_BRANCH_LINK);
  63
  64        return ftrace_modify_code(pc, 0, new, false);
  65}
  66
  67/*
  68 * Turn on the call to ftrace_caller() in instrumented function
  69 */
  70int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  71{
  72        unsigned long pc = rec->ip;
  73        u32 old, new;
  74
  75        old = aarch64_insn_gen_nop();
  76        new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
  77
  78        return ftrace_modify_code(pc, old, new, true);
  79}
  80
  81/*
  82 * Turn off the call to ftrace_caller() in instrumented function
  83 */
  84int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  85                    unsigned long addr)
  86{
  87        unsigned long pc = rec->ip;
  88        u32 old, new;
  89
  90        old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
  91        new = aarch64_insn_gen_nop();
  92
  93        return ftrace_modify_code(pc, old, new, true);
  94}
  95
  96int __init ftrace_dyn_arch_init(void)
  97{
  98        return 0;
  99}
 100#endif /* CONFIG_DYNAMIC_FTRACE */
 101
 102#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 103/*
 104 * function_graph tracer expects ftrace_return_to_handler() to be called
 105 * on the way back to parent. For this purpose, this function is called
 106 * in _mcount() or ftrace_caller() to replace return address (*parent) on
 107 * the call stack to return_to_handler.
 108 *
 109 * Note that @frame_pointer is used only for sanity check later.
 110 */
 111void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 112                           unsigned long frame_pointer)
 113{
 114        unsigned long return_hooker = (unsigned long)&return_to_handler;
 115        unsigned long old;
 116        struct ftrace_graph_ent trace;
 117        int err;
 118
 119        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 120                return;
 121
 122        /*
 123         * Note:
 124         * No protection against faulting at *parent, which may be seen
 125         * on other archs. It's unlikely on AArch64.
 126         */
 127        old = *parent;
 128        *parent = return_hooker;
 129
 130        trace.func = self_addr;
 131        trace.depth = current->curr_ret_stack + 1;
 132
 133        /* Only trace if the calling function expects to */
 134        if (!ftrace_graph_entry(&trace)) {
 135                *parent = old;
 136                return;
 137        }
 138
 139        err = ftrace_push_return_trace(old, self_addr, &trace.depth,
 140                                       frame_pointer);
 141        if (err == -EBUSY) {
 142                *parent = old;
 143                return;
 144        }
 145}
 146
 147#ifdef CONFIG_DYNAMIC_FTRACE
 148/*
 149 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
 150 * depending on @enable.
 151 */
 152static int ftrace_modify_graph_caller(bool enable)
 153{
 154        unsigned long pc = (unsigned long)&ftrace_graph_call;
 155        u32 branch, nop;
 156
 157        branch = aarch64_insn_gen_branch_imm(pc,
 158                                             (unsigned long)ftrace_graph_caller,
 159                                             AARCH64_INSN_BRANCH_NOLINK);
 160        nop = aarch64_insn_gen_nop();
 161
 162        if (enable)
 163                return ftrace_modify_code(pc, nop, branch, true);
 164        else
 165                return ftrace_modify_code(pc, branch, nop, true);
 166}
 167
 168int ftrace_enable_ftrace_graph_caller(void)
 169{
 170        return ftrace_modify_graph_caller(true);
 171}
 172
 173int ftrace_disable_ftrace_graph_caller(void)
 174{
 175        return ftrace_modify_graph_caller(false);
 176}
 177#endif /* CONFIG_DYNAMIC_FTRACE */
 178#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 179