linux/arch/x86/kernel/stacktrace.c
<<
>>
Prefs
   1/*
   2 * Stack trace management functions
   3 *
   4 *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 */
   6#include <linux/sched.h>
   7#include <linux/stacktrace.h>
   8#include <linux/module.h>
   9#include <linux/uaccess.h>
  10#include <asm/stacktrace.h>
  11
  12static int save_stack_stack(void *data, char *name)
  13{
  14        return 0;
  15}
  16
  17static void
  18__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
  19{
  20        struct stack_trace *trace = data;
  21#ifdef CONFIG_FRAME_POINTER
  22        if (!reliable)
  23                return;
  24#endif
  25        if (nosched && in_sched_functions(addr))
  26                return;
  27        if (trace->skip > 0) {
  28                trace->skip--;
  29                return;
  30        }
  31        if (trace->nr_entries < trace->max_entries)
  32                trace->entries[trace->nr_entries++] = addr;
  33}
  34
  35static void save_stack_address(void *data, unsigned long addr, int reliable)
  36{
  37        return __save_stack_address(data, addr, reliable, false);
  38}
  39
  40static void
  41save_stack_address_nosched(void *data, unsigned long addr, int reliable)
  42{
  43        return __save_stack_address(data, addr, reliable, true);
  44}
  45
  46static const struct stacktrace_ops save_stack_ops = {
  47        .stack          = save_stack_stack,
  48        .address        = save_stack_address,
  49        .walk_stack     = print_context_stack,
  50};
  51
  52static const struct stacktrace_ops save_stack_ops_nosched = {
  53        .stack          = save_stack_stack,
  54        .address        = save_stack_address_nosched,
  55        .walk_stack     = print_context_stack,
  56};
  57
  58/*
  59 * Save stack-backtrace addresses into a stack_trace buffer.
  60 */
  61void save_stack_trace(struct stack_trace *trace)
  62{
  63        dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
  64        if (trace->nr_entries < trace->max_entries)
  65                trace->entries[trace->nr_entries++] = ULONG_MAX;
  66}
  67EXPORT_SYMBOL_GPL(save_stack_trace);
  68
  69void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
  70{
  71        dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
  72        if (trace->nr_entries < trace->max_entries)
  73                trace->entries[trace->nr_entries++] = ULONG_MAX;
  74}
  75
  76void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
  77{
  78        dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
  79        if (trace->nr_entries < trace->max_entries)
  80                trace->entries[trace->nr_entries++] = ULONG_MAX;
  81}
  82EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
  83
  84/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
  85
  86struct stack_frame_user {
  87        const void __user       *next_fp;
  88        unsigned long           ret_addr;
  89};
  90
  91static int
  92copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
  93{
  94        int ret;
  95
  96        if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
  97                return 0;
  98
  99        ret = 1;
 100        pagefault_disable();
 101        if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
 102                ret = 0;
 103        pagefault_enable();
 104
 105        return ret;
 106}
 107
 108static inline void __save_stack_trace_user(struct stack_trace *trace)
 109{
 110        const struct pt_regs *regs = task_pt_regs(current);
 111        const void __user *fp = (const void __user *)regs->bp;
 112
 113        if (trace->nr_entries < trace->max_entries)
 114                trace->entries[trace->nr_entries++] = regs->ip;
 115
 116        while (trace->nr_entries < trace->max_entries) {
 117                struct stack_frame_user frame;
 118
 119                frame.next_fp = NULL;
 120                frame.ret_addr = 0;
 121                if (!copy_stack_frame(fp, &frame))
 122                        break;
 123                if ((unsigned long)fp < regs->sp)
 124                        break;
 125                if (frame.ret_addr) {
 126                        trace->entries[trace->nr_entries++] =
 127                                frame.ret_addr;
 128                }
 129                if (fp == frame.next_fp)
 130                        break;
 131                fp = frame.next_fp;
 132        }
 133}
 134
 135void save_stack_trace_user(struct stack_trace *trace)
 136{
 137        /*
 138         * Trace user stack if we are not a kernel thread
 139         */
 140        if (current->mm) {
 141                __save_stack_trace_user(trace);
 142        }
 143        if (trace->nr_entries < trace->max_entries)
 144                trace->entries[trace->nr_entries++] = ULONG_MAX;
 145}
 146
 147