linux/arch/parisc/kernel/ftrace.c
<<
>>
Prefs
   1/*
   2 * Code for tracing calls in Linux kernel.
   3 * Copyright (C) 2009 Helge Deller <deller@gmx.de>
   4 *
   5 * based on code for x86 which is:
   6 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   7 *
   8 * future possible enhancements:
   9 *      - add CONFIG_DYNAMIC_FTRACE
  10 *      - add CONFIG_STACK_TRACER
  11 */
  12
  13#include <linux/init.h>
  14#include <linux/ftrace.h>
  15
  16#include <asm/sections.h>
  17#include <asm/ftrace.h>
  18
  19
  20
  21#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  22
  23/* Add a function return address to the trace stack on thread info.*/
  24static int push_return_trace(unsigned long ret, unsigned long long time,
  25                                unsigned long func, int *depth)
  26{
  27        int index;
  28
  29        if (!current->ret_stack)
  30                return -EBUSY;
  31
  32        /* The return trace stack is full */
  33        if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  34                atomic_inc(&current->trace_overrun);
  35                return -EBUSY;
  36        }
  37
  38        index = ++current->curr_ret_stack;
  39        barrier();
  40        current->ret_stack[index].ret = ret;
  41        current->ret_stack[index].func = func;
  42        current->ret_stack[index].calltime = time;
  43        *depth = index;
  44
  45        return 0;
  46}
  47
  48/* Retrieve a function return address to the trace stack on thread info.*/
  49static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
  50{
  51        int index;
  52
  53        index = current->curr_ret_stack;
  54
  55        if (unlikely(index < 0)) {
  56                ftrace_graph_stop();
  57                WARN_ON(1);
  58                /* Might as well panic, otherwise we have no where to go */
  59                *ret = (unsigned long)
  60                        dereference_function_descriptor(&panic);
  61                return;
  62        }
  63
  64        *ret = current->ret_stack[index].ret;
  65        trace->func = current->ret_stack[index].func;
  66        trace->calltime = current->ret_stack[index].calltime;
  67        trace->overrun = atomic_read(&current->trace_overrun);
  68        trace->depth = index;
  69        barrier();
  70        current->curr_ret_stack--;
  71
  72}
  73
  74/*
  75 * Send the trace to the ring-buffer.
  76 * @return the original return address.
  77 */
  78unsigned long ftrace_return_to_handler(unsigned long retval0,
  79                                       unsigned long retval1)
  80{
  81        struct ftrace_graph_ret trace;
  82        unsigned long ret;
  83
  84        pop_return_trace(&trace, &ret);
  85        trace.rettime = cpu_clock(raw_smp_processor_id());
  86        ftrace_graph_return(&trace);
  87
  88        if (unlikely(!ret)) {
  89                ftrace_graph_stop();
  90                WARN_ON(1);
  91                /* Might as well panic. What else to do? */
  92                ret = (unsigned long)
  93                        dereference_function_descriptor(&panic);
  94        }
  95
  96        /* HACK: we hand over the old functions' return values
  97           in %r23 and %r24. Assembly in entry.S will take care
  98           and move those to their final registers %ret0 and %ret1 */
  99        asm( "copy %0, %%r23 \n\t"
 100             "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
 101
 102        return ret;
 103}
 104
 105/*
 106 * Hook the return address and push it in the stack of return addrs
 107 * in current thread info.
 108 */
 109void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 110{
 111        unsigned long old;
 112        unsigned long long calltime;
 113        struct ftrace_graph_ent trace;
 114
 115        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 116                return;
 117
 118        old = *parent;
 119        *parent = (unsigned long)
 120                  dereference_function_descriptor(&return_to_handler);
 121
 122        if (unlikely(!__kernel_text_address(old))) {
 123                ftrace_graph_stop();
 124                *parent = old;
 125                WARN_ON(1);
 126                return;
 127        }
 128
 129        calltime = cpu_clock(raw_smp_processor_id());
 130
 131        if (push_return_trace(old, calltime,
 132                                self_addr, &trace.depth) == -EBUSY) {
 133                *parent = old;
 134                return;
 135        }
 136
 137        trace.func = self_addr;
 138
 139        /* Only trace if the calling function expects to */
 140        if (!ftrace_graph_entry(&trace)) {
 141                current->curr_ret_stack--;
 142                *parent = old;
 143        }
 144}
 145
 146#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 147
 148
 149void ftrace_function_trampoline(unsigned long parent,
 150                                unsigned long self_addr,
 151                                unsigned long org_sp_gr3)
 152{
 153        extern ftrace_func_t ftrace_trace_function;
 154
 155        if (function_trace_stop)
 156                return;
 157
 158        if (ftrace_trace_function != ftrace_stub) {
 159                ftrace_trace_function(parent, self_addr);
 160                return;
 161        }
 162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 163        if (ftrace_graph_entry && ftrace_graph_return) {
 164                unsigned long sp;
 165                unsigned long *parent_rp;
 166
 167                asm volatile ("copy %%r30, %0" : "=r"(sp));
 168                /* sanity check: is stack pointer which we got from
 169                   assembler function in entry.S in a reasonable
 170                   range compared to current stack pointer? */
 171                if ((sp - org_sp_gr3) > 0x400)
 172                        return;
 173
 174                /* calculate pointer to %rp in stack */
 175                parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
 176                /* sanity check: parent_rp should hold parent */
 177                if (*parent_rp != parent)
 178                        return;
 179                
 180                prepare_ftrace_return(parent_rp, self_addr);
 181                return;
 182        }
 183#endif
 184}
 185
 186