linux/arch/x86/kernel/dumpstack_32.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 */
   5#include <linux/kallsyms.h>
   6#include <linux/kprobes.h>
   7#include <linux/uaccess.h>
   8#include <linux/hardirq.h>
   9#include <linux/kdebug.h>
  10#include <linux/module.h>
  11#include <linux/ptrace.h>
  12#include <linux/kexec.h>
  13#include <linux/sysfs.h>
  14#include <linux/bug.h>
  15#include <linux/nmi.h>
  16
  17#include <asm/stacktrace.h>
  18
  19static void *is_irq_stack(void *p, void *irq)
  20{
  21        if (p < irq || p >= (irq + THREAD_SIZE))
  22                return NULL;
  23        return irq + THREAD_SIZE;
  24}
  25
  26
  27static void *is_hardirq_stack(unsigned long *stack, int cpu)
  28{
  29        void *irq = per_cpu(hardirq_stack, cpu);
  30
  31        return is_irq_stack(stack, irq);
  32}
  33
  34static void *is_softirq_stack(unsigned long *stack, int cpu)
  35{
  36        void *irq = per_cpu(softirq_stack, cpu);
  37
  38        return is_irq_stack(stack, irq);
  39}
  40
  41void dump_trace(struct task_struct *task, struct pt_regs *regs,
  42                unsigned long *stack, unsigned long bp,
  43                const struct stacktrace_ops *ops, void *data)
  44{
  45        const unsigned cpu = get_cpu();
  46        int graph = 0;
  47        u32 *prev_esp;
  48
  49        if (!task)
  50                task = current;
  51
  52        if (!stack) {
  53                unsigned long dummy;
  54
  55                stack = &dummy;
  56                if (task != current)
  57                        stack = (unsigned long *)task->thread.sp;
  58        }
  59
  60        if (!bp)
  61                bp = stack_frame(task, regs);
  62
  63        for (;;) {
  64                struct thread_info *context;
  65                void *end_stack;
  66
  67                end_stack = is_hardirq_stack(stack, cpu);
  68                if (!end_stack)
  69                        end_stack = is_softirq_stack(stack, cpu);
  70
  71                context = task_thread_info(task);
  72                bp = ops->walk_stack(context, stack, bp, ops, data,
  73                                     end_stack, &graph);
  74
  75                /* Stop if not on irq stack */
  76                if (!end_stack)
  77                        break;
  78
  79                /* The previous esp is saved on the bottom of the stack */
  80                prev_esp = (u32 *)(end_stack - THREAD_SIZE);
  81                stack = (unsigned long *)*prev_esp;
  82                if (!stack)
  83                        break;
  84
  85                if (ops->stack(data, "IRQ") < 0)
  86                        break;
  87                touch_nmi_watchdog();
  88        }
  89        put_cpu();
  90}
  91EXPORT_SYMBOL(dump_trace);
  92
  93void
  94show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
  95                   unsigned long *sp, unsigned long bp, char *log_lvl)
  96{
  97        unsigned long *stack;
  98        int i;
  99
 100        if (sp == NULL) {
 101                if (task)
 102                        sp = (unsigned long *)task->thread.sp;
 103                else
 104                        sp = (unsigned long *)&sp;
 105        }
 106
 107        stack = sp;
 108        for (i = 0; i < kstack_depth_to_print; i++) {
 109                if (kstack_end(stack))
 110                        break;
 111                if ((i % STACKSLOTS_PER_LINE) == 0) {
 112                        if (i != 0)
 113                                pr_cont("\n");
 114                        printk("%s %08lx", log_lvl, *stack++);
 115                } else
 116                        pr_cont(" %08lx", *stack++);
 117                touch_nmi_watchdog();
 118        }
 119        pr_cont("\n");
 120        show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 121}
 122
 123
 124void show_regs(struct pt_regs *regs)
 125{
 126        int i;
 127
 128        show_regs_print_info(KERN_EMERG);
 129        __show_regs(regs, !user_mode(regs));
 130
 131        /*
 132         * When in-kernel, we also print out the stack and code at the
 133         * time of the fault..
 134         */
 135        if (!user_mode(regs)) {
 136                unsigned int code_prologue = code_bytes * 43 / 64;
 137                unsigned int code_len = code_bytes;
 138                unsigned char c;
 139                u8 *ip;
 140
 141                pr_emerg("Stack:\n");
 142                show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
 143
 144                pr_emerg("Code:");
 145
 146                ip = (u8 *)regs->ip - code_prologue;
 147                if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
 148                        /* try starting at IP */
 149                        ip = (u8 *)regs->ip;
 150                        code_len = code_len - code_prologue + 1;
 151                }
 152                for (i = 0; i < code_len; i++, ip++) {
 153                        if (ip < (u8 *)PAGE_OFFSET ||
 154                                        probe_kernel_address(ip, c)) {
 155                                pr_cont("  Bad EIP value.");
 156                                break;
 157                        }
 158                        if (ip == (u8 *)regs->ip)
 159                                pr_cont(" <%02x>", c);
 160                        else
 161                                pr_cont(" %02x", c);
 162                }
 163        }
 164        pr_cont("\n");
 165}
 166
 167int is_valid_bugaddr(unsigned long ip)
 168{
 169        unsigned short ud2;
 170
 171        if (ip < PAGE_OFFSET)
 172                return 0;
 173        if (probe_kernel_address((unsigned short *)ip, ud2))
 174                return 0;
 175
 176        return ud2 == 0x0b0f;
 177}
 178