linux/arch/mips/kernel/ftrace.c
<<
>>
Prefs
   1/*
   2 * Code for replacing ftrace calls with jumps.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
   6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
   7 *
   8 * Thanks goes to Steven Rostedt for writing the original x86 version.
   9 */
  10
  11#include <linux/uaccess.h>
  12#include <linux/init.h>
  13#include <linux/ftrace.h>
  14
  15#include <asm/asm.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/cacheflush.h>
  18#include <asm/uasm.h>
  19
  20#include <asm-generic/sections.h>
  21
  22#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
  23#define MCOUNT_OFFSET_INSNS 5
  24#else
  25#define MCOUNT_OFFSET_INSNS 4
  26#endif
  27
  28/*
  29 * Check if the address is in kernel space
  30 *
  31 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
  32 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
  33 */
  34static inline int in_kernel_space(unsigned long ip)
  35{
  36        if (ip >= (unsigned long)_stext &&
  37            ip <= (unsigned long)_etext)
  38                return 1;
  39        return 0;
  40}
  41
  42#ifdef CONFIG_DYNAMIC_FTRACE
  43
  44#define JAL 0x0c000000          /* jump & link: ip --> ra, jump to target */
  45#define ADDR_MASK 0x03ffffff    /*  op_code|addr : 31...26|25 ....0 */
  46#define JUMP_RANGE_MASK ((1UL << 28) - 1)
  47
  48#define INSN_NOP 0x00000000     /* nop */
  49#define INSN_JAL(addr)  \
  50        ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
  51
  52static unsigned int insn_jal_ftrace_caller __read_mostly;
  53static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
  54static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
  55
  56static inline void ftrace_dyn_arch_init_insns(void)
  57{
  58        u32 *buf;
  59        unsigned int v1;
  60
  61        /* lui v1, hi16_mcount */
  62        v1 = 3;
  63        buf = (u32 *)&insn_lui_v1_hi16_mcount;
  64        UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
  65
  66        /* jal (ftrace_caller + 8), jump over the first two instruction */
  67        buf = (u32 *)&insn_jal_ftrace_caller;
  68        uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
  69
  70#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  71        /* j ftrace_graph_caller */
  72        buf = (u32 *)&insn_j_ftrace_graph_caller;
  73        uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
  74#endif
  75}
  76
  77static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
  78{
  79        int faulted;
  80
  81        /* *(unsigned int *)ip = new_code; */
  82        safe_store_code(new_code, ip, faulted);
  83
  84        if (unlikely(faulted))
  85                return -EFAULT;
  86
  87        flush_icache_range(ip, ip + 8);
  88
  89        return 0;
  90}
  91
  92/*
  93 * The details about the calling site of mcount on MIPS
  94 *
  95 * 1. For kernel:
  96 *
  97 * move at, ra
  98 * jal _mcount          --> nop
  99 *
 100 * 2. For modules:
 101 *
 102 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
 103 *
 104 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
 105 * addiu v1, v1, low_16bit_of_mcount
 106 * move at, ra
 107 * move $12, ra_address
 108 * jalr v1
 109 *  sub sp, sp, 8
 110 *                                  1: offset = 5 instructions
 111 * 2.2 For the Other situations
 112 *
 113 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
 114 * addiu v1, v1, low_16bit_of_mcount
 115 * move at, ra
 116 * jalr v1
 117 *  nop | move $12, ra_address | sub sp, sp, 8
 118 *                                  1: offset = 4 instructions
 119 */
 120
 121#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
 122
 123int ftrace_make_nop(struct module *mod,
 124                    struct dyn_ftrace *rec, unsigned long addr)
 125{
 126        unsigned int new;
 127        unsigned long ip = rec->ip;
 128
 129        /*
 130         * If ip is in kernel space, no long call, otherwise, long call is
 131         * needed.
 132         */
 133        new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
 134
 135        return ftrace_modify_code(ip, new);
 136}
 137
 138int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 139{
 140        unsigned int new;
 141        unsigned long ip = rec->ip;
 142
 143        new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
 144                insn_lui_v1_hi16_mcount;
 145
 146        return ftrace_modify_code(ip, new);
 147}
 148
 149#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
 150
 151int ftrace_update_ftrace_func(ftrace_func_t func)
 152{
 153        unsigned int new;
 154
 155        new = INSN_JAL((unsigned long)func);
 156
 157        return ftrace_modify_code(FTRACE_CALL_IP, new);
 158}
 159
 160int __init ftrace_dyn_arch_init(void *data)
 161{
 162        /* Encode the instructions when booting */
 163        ftrace_dyn_arch_init_insns();
 164
 165        /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
 166        ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
 167
 168        /* The return code is retured via data */
 169        *(unsigned long *)data = 0;
 170
 171        return 0;
 172}
 173#endif  /* CONFIG_DYNAMIC_FTRACE */
 174
 175#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 176
 177#ifdef CONFIG_DYNAMIC_FTRACE
 178
 179extern void ftrace_graph_call(void);
 180#define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
 181
 182int ftrace_enable_ftrace_graph_caller(void)
 183{
 184        return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
 185                        insn_j_ftrace_graph_caller);
 186}
 187
 188int ftrace_disable_ftrace_graph_caller(void)
 189{
 190        return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
 191}
 192
 193#endif  /* CONFIG_DYNAMIC_FTRACE */
 194
 195#ifndef KBUILD_MCOUNT_RA_ADDRESS
 196
 197#define S_RA_SP (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
 198#define S_R_SP  (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
 199#define OFFSET_MASK     0xffff  /* stack offset range: 0 ~ PT_SIZE */
 200
 201unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
 202                old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
 203{
 204        unsigned long sp, ip, tmp;
 205        unsigned int code;
 206        int faulted;
 207
 208        /*
 209         * For module, move the ip from the return address after the
 210         * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
 211         * kernel, move after the instruction "move ra, at"(offset is 16)
 212         */
 213        ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
 214
 215        /*
 216         * search the text until finding the non-store instruction or "s{d,w}
 217         * ra, offset(sp)" instruction
 218         */
 219        do {
 220                /* get the code at "ip": code = *(unsigned int *)ip; */
 221                safe_load_code(code, ip, faulted);
 222
 223                if (unlikely(faulted))
 224                        return 0;
 225                /*
 226                 * If we hit the non-store instruction before finding where the
 227                 * ra is stored, then this is a leaf function and it does not
 228                 * store the ra on the stack
 229                 */
 230                if ((code & S_R_SP) != S_R_SP)
 231                        return parent_ra_addr;
 232
 233                /* Move to the next instruction */
 234                ip -= 4;
 235        } while ((code & S_RA_SP) != S_RA_SP);
 236
 237        sp = fp + (code & OFFSET_MASK);
 238
 239        /* tmp = *(unsigned long *)sp; */
 240        safe_load_stack(tmp, sp, faulted);
 241        if (unlikely(faulted))
 242                return 0;
 243
 244        if (tmp == old_parent_ra)
 245                return sp;
 246        return 0;
 247}
 248
 249#endif  /* !KBUILD_MCOUNT_RA_ADDRESS */
 250
 251/*
 252 * Hook the return address and push it in the stack of return addrs
 253 * in current thread info.
 254 */
 255void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 256                           unsigned long fp)
 257{
 258        unsigned long old_parent_ra;
 259        struct ftrace_graph_ent trace;
 260        unsigned long return_hooker = (unsigned long)
 261            &return_to_handler;
 262        int faulted, insns;
 263
 264        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 265                return;
 266
 267        /*
 268         * "parent_ra_addr" is the stack address saved the return address of
 269         * the caller of _mcount.
 270         *
 271         * if the gcc < 4.5, a leaf function does not save the return address
 272         * in the stack address, so, we "emulate" one in _mcount's stack space,
 273         * and hijack it directly, but for a non-leaf function, it save the
 274         * return address to the its own stack space, we can not hijack it
 275         * directly, but need to find the real stack address,
 276         * ftrace_get_parent_addr() does it!
 277         *
 278         * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
 279         * non-leaf function, the location of the return address will be saved
 280         * to $12 for us, and for a leaf function, only put a zero into $12. we
 281         * do it in ftrace_graph_caller of mcount.S.
 282         */
 283
 284        /* old_parent_ra = *parent_ra_addr; */
 285        safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
 286        if (unlikely(faulted))
 287                goto out;
 288#ifndef KBUILD_MCOUNT_RA_ADDRESS
 289        parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
 290                        old_parent_ra, (unsigned long)parent_ra_addr, fp);
 291        /*
 292         * If fails when getting the stack address of the non-leaf function's
 293         * ra, stop function graph tracer and return
 294         */
 295        if (parent_ra_addr == 0)
 296                goto out;
 297#endif
 298        /* *parent_ra_addr = return_hooker; */
 299        safe_store_stack(return_hooker, parent_ra_addr, faulted);
 300        if (unlikely(faulted))
 301                goto out;
 302
 303        if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
 304            == -EBUSY) {
 305                *parent_ra_addr = old_parent_ra;
 306                return;
 307        }
 308
 309        /*
 310         * Get the recorded ip of the current mcount calling site in the
 311         * __mcount_loc section, which will be used to filter the function
 312         * entries configured through the tracing/set_graph_function interface.
 313         */
 314
 315        insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
 316        trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
 317
 318        /* Only trace if the calling function expects to */
 319        if (!ftrace_graph_entry(&trace)) {
 320                current->curr_ret_stack--;
 321                *parent_ra_addr = old_parent_ra;
 322        }
 323        return;
 324out:
 325        ftrace_graph_stop();
 326        WARN_ON(1);
 327}
 328#endif  /* CONFIG_FUNCTION_GRAPH_TRACER */
 329