linux/arch/sparc/kernel/stacktrace.c
<<
>>
Prefs
   1#include <linux/sched.h>
   2#include <linux/sched/debug.h>
   3#include <linux/stacktrace.h>
   4#include <linux/thread_info.h>
   5#include <linux/ftrace.h>
   6#include <linux/export.h>
   7#include <asm/ptrace.h>
   8#include <asm/stacktrace.h>
   9
  10#include "kstack.h"
  11
  12static void __save_stack_trace(struct thread_info *tp,
  13                               struct stack_trace *trace,
  14                               bool skip_sched)
  15{
  16        unsigned long ksp, fp;
  17#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  18        struct task_struct *t;
  19        int graph = 0;
  20#endif
  21
  22        if (tp == current_thread_info()) {
  23                stack_trace_flush();
  24                __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
  25        } else {
  26                ksp = tp->ksp;
  27        }
  28
  29        fp = ksp + STACK_BIAS;
  30#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  31        t = tp->task;
  32#endif
  33        do {
  34                struct sparc_stackf *sf;
  35                struct pt_regs *regs;
  36                unsigned long pc;
  37
  38                if (!kstack_valid(tp, fp))
  39                        break;
  40
  41                sf = (struct sparc_stackf *) fp;
  42                regs = (struct pt_regs *) (sf + 1);
  43
  44                if (kstack_is_trap_frame(tp, regs)) {
  45                        if (!(regs->tstate & TSTATE_PRIV))
  46                                break;
  47                        pc = regs->tpc;
  48                        fp = regs->u_regs[UREG_I6] + STACK_BIAS;
  49                } else {
  50                        pc = sf->callers_pc;
  51                        fp = (unsigned long)sf->fp + STACK_BIAS;
  52                }
  53
  54                if (trace->skip > 0)
  55                        trace->skip--;
  56                else if (!skip_sched || !in_sched_functions(pc)) {
  57                        trace->entries[trace->nr_entries++] = pc;
  58#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  59                        if ((pc + 8UL) == (unsigned long) &return_to_handler) {
  60                                int index = t->curr_ret_stack;
  61                                if (t->ret_stack && index >= graph) {
  62                                        pc = t->ret_stack[index - graph].ret;
  63                                        if (trace->nr_entries <
  64                                            trace->max_entries)
  65                                                trace->entries[trace->nr_entries++] = pc;
  66                                        graph++;
  67                                }
  68                        }
  69#endif
  70                }
  71        } while (trace->nr_entries < trace->max_entries);
  72}
  73
  74void save_stack_trace(struct stack_trace *trace)
  75{
  76        __save_stack_trace(current_thread_info(), trace, false);
  77}
  78EXPORT_SYMBOL_GPL(save_stack_trace);
  79
  80void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  81{
  82        struct thread_info *tp = task_thread_info(tsk);
  83
  84        __save_stack_trace(tp, trace, true);
  85}
  86EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  87