linux/arch/sparc/kernel/stacktrace.c
<<
>>
Prefs
   1#include <linux/sched.h>
   2#include <linux/stacktrace.h>
   3#include <linux/thread_info.h>
   4#include <linux/ftrace.h>
   5#include <linux/export.h>
   6#include <asm/ptrace.h>
   7#include <asm/stacktrace.h>
   8
   9#include "kstack.h"
  10
  11static void __save_stack_trace(struct thread_info *tp,
  12                               struct stack_trace *trace,
  13                               bool skip_sched)
  14{
  15        unsigned long ksp, fp;
  16#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  17        struct task_struct *t;
  18        int graph = 0;
  19#endif
  20
  21        if (tp == current_thread_info()) {
  22                stack_trace_flush();
  23                __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
  24        } else {
  25                ksp = tp->ksp;
  26        }
  27
  28        fp = ksp + STACK_BIAS;
  29#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  30        t = tp->task;
  31#endif
  32        do {
  33                struct sparc_stackf *sf;
  34                struct pt_regs *regs;
  35                unsigned long pc;
  36
  37                if (!kstack_valid(tp, fp))
  38                        break;
  39
  40                sf = (struct sparc_stackf *) fp;
  41                regs = (struct pt_regs *) (sf + 1);
  42
  43                if (kstack_is_trap_frame(tp, regs)) {
  44                        if (!(regs->tstate & TSTATE_PRIV))
  45                                break;
  46                        pc = regs->tpc;
  47                        fp = regs->u_regs[UREG_I6] + STACK_BIAS;
  48                } else {
  49                        pc = sf->callers_pc;
  50                        fp = (unsigned long)sf->fp + STACK_BIAS;
  51                }
  52
  53                if (trace->skip > 0)
  54                        trace->skip--;
  55                else if (!skip_sched || !in_sched_functions(pc)) {
  56                        trace->entries[trace->nr_entries++] = pc;
  57#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  58                        if ((pc + 8UL) == (unsigned long) &return_to_handler) {
  59                                int index = t->curr_ret_stack;
  60                                if (t->ret_stack && index >= graph) {
  61                                        pc = t->ret_stack[index - graph].ret;
  62                                        if (trace->nr_entries <
  63                                            trace->max_entries)
  64                                                trace->entries[trace->nr_entries++] = pc;
  65                                        graph++;
  66                                }
  67                        }
  68#endif
  69                }
  70        } while (trace->nr_entries < trace->max_entries);
  71}
  72
  73void save_stack_trace(struct stack_trace *trace)
  74{
  75        __save_stack_trace(current_thread_info(), trace, false);
  76}
  77EXPORT_SYMBOL_GPL(save_stack_trace);
  78
  79void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  80{
  81        struct thread_info *tp = task_thread_info(tsk);
  82
  83        __save_stack_trace(tp, trace, true);
  84}
  85EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  86