linux/arch/x86/kernel/dumpstack_32.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 */
   5#include <linux/kallsyms.h>
   6#include <linux/kprobes.h>
   7#include <linux/uaccess.h>
   8#include <linux/hardirq.h>
   9#include <linux/kdebug.h>
  10#include <linux/module.h>
  11#include <linux/ptrace.h>
  12#include <linux/kexec.h>
  13#include <linux/sysfs.h>
  14#include <linux/bug.h>
  15#include <linux/nmi.h>
  16
  17#include <asm/stacktrace.h>
  18
  19static void *is_irq_stack(void *p, void *irq)
  20{
  21        if (p < irq || p >= (irq + THREAD_SIZE))
  22                return NULL;
  23        return irq + THREAD_SIZE;
  24}
  25
  26
  27static void *is_hardirq_stack(unsigned long *stack, int cpu)
  28{
  29        void *irq = per_cpu(hardirq_stack, cpu);
  30
  31        return is_irq_stack(stack, irq);
  32}
  33
  34static void *is_softirq_stack(unsigned long *stack, int cpu)
  35{
  36        void *irq = per_cpu(softirq_stack, cpu);
  37
  38        return is_irq_stack(stack, irq);
  39}
  40
  41void dump_trace(struct task_struct *task, struct pt_regs *regs,
  42                unsigned long *stack, unsigned long bp,
  43                const struct stacktrace_ops *ops, void *data)
  44{
  45        const unsigned cpu = get_cpu();
  46        int graph = 0;
  47        u32 *prev_esp;
  48
  49        if (!task)
  50                task = current;
  51
  52        if (!stack) {
  53                unsigned long dummy;
  54
  55                stack = &dummy;
  56                if (task != current)
  57                        stack = (unsigned long *)task->thread.sp;
  58        }
  59
  60        if (!bp)
  61                bp = stack_frame(task, regs);
  62
  63        for (;;) {
  64                struct thread_info *context;
  65                void *end_stack;
  66
  67                end_stack = is_hardirq_stack(stack, cpu);
  68                if (!end_stack)
  69                        end_stack = is_softirq_stack(stack, cpu);
  70
  71                context = task_thread_info(task);
  72                bp = ops->walk_stack(context, stack, bp, ops, data,
  73                                     end_stack, &graph);
  74
  75                /* Stop if not on irq stack */
  76                if (!end_stack)
  77                        break;
  78
  79                /* The previous esp is saved on the bottom of the stack */
  80                prev_esp = (u32 *)(end_stack - THREAD_SIZE);
  81                stack = (unsigned long *)*prev_esp;
  82                if (!stack)
  83                        break;
  84
  85                if (ops->stack(data, "IRQ") < 0)
  86                        break;
  87                touch_nmi_watchdog();
  88        }
  89        put_cpu();
  90}
  91EXPORT_SYMBOL(dump_trace);
  92
  93void
  94show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  95                   unsigned long *sp, unsigned long bp, char *log_lvl)
  96{
  97        unsigned long *stack;
  98        int i;
  99
 100        if (sp == NULL) {
 101                if (task)
 102                        sp = (unsigned long *)task->thread.sp;
 103                else
 104                        sp = (unsigned long *)&sp;
 105        }
 106
 107        stack = sp;
 108        for (i = 0; i < kstack_depth_to_print; i++) {
 109                if (kstack_end(stack))
 110                        break;
 111                if (i && ((i % STACKSLOTS_PER_LINE) == 0))
 112                        pr_cont("\n");
 113                pr_cont(" %08lx", *stack++);
 114                touch_nmi_watchdog();
 115        }
 116        pr_cont("\n");
 117        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 118}
 119
 120
 121void show_regs(struct pt_regs *regs)
 122{
 123        int i;
 124
 125        show_regs_print_info(KERN_EMERG);
 126        __show_regs(regs, !user_mode_vm(regs));
 127
 128        /*
 129         * When in-kernel, we also print out the stack and code at the
 130         * time of the fault..
 131         */
 132        if (!user_mode_vm(regs)) {
 133                unsigned int code_prologue = code_bytes * 43 / 64;
 134                unsigned int code_len = code_bytes;
 135                unsigned char c;
 136                u8 *ip;
 137
 138                pr_emerg("Stack:\n");
 139                show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
 140
 141                pr_emerg("Code:");
 142
 143                ip = (u8 *)regs->ip - code_prologue;
 144                if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
 145                        /* try starting at IP */
 146                        ip = (u8 *)regs->ip;
 147                        code_len = code_len - code_prologue + 1;
 148                }
 149                for (i = 0; i < code_len; i++, ip++) {
 150                        if (ip < (u8 *)PAGE_OFFSET ||
 151                                        probe_kernel_address(ip, c)) {
 152                                pr_cont("  Bad EIP value.");
 153                                break;
 154                        }
 155                        if (ip == (u8 *)regs->ip)
 156                                pr_cont(" <%02x>", c);
 157                        else
 158                                pr_cont(" %02x", c);
 159                }
 160        }
 161        pr_cont("\n");
 162}
 163
 164int is_valid_bugaddr(unsigned long ip)
 165{
 166        unsigned short ud2;
 167
 168        if (ip < PAGE_OFFSET)
 169                return 0;
 170        if (probe_kernel_address((unsigned short *)ip, ud2))
 171                return 0;
 172
 173        return ud2 == 0x0b0f;
 174}
 175