linux/arch/x86/kernel/dumpstack_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright (C) 1991, 1992  Linus Torvalds
   4 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   5 */
   6#include <linux/sched/debug.h>
   7#include <linux/kallsyms.h>
   8#include <linux/kprobes.h>
   9#include <linux/uaccess.h>
  10#include <linux/hardirq.h>
  11#include <linux/kdebug.h>
  12#include <linux/export.h>
  13#include <linux/ptrace.h>
  14#include <linux/kexec.h>
  15#include <linux/sysfs.h>
  16#include <linux/bug.h>
  17#include <linux/nmi.h>
  18
  19#include <asm/cpu_entry_area.h>
  20#include <asm/stacktrace.h>
  21
  22static const char * const exception_stack_names[] = {
  23                [ ESTACK_DF     ]       = "#DF",
  24                [ ESTACK_NMI    ]       = "NMI",
  25                [ ESTACK_DB     ]       = "#DB",
  26                [ ESTACK_MCE    ]       = "#MC",
  27                [ ESTACK_VC     ]       = "#VC",
  28                [ ESTACK_VC2    ]       = "#VC2",
  29};
  30
  31const char *stack_type_name(enum stack_type type)
  32{
  33        BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
  34
  35        if (type == STACK_TYPE_IRQ)
  36                return "IRQ";
  37
  38        if (type == STACK_TYPE_ENTRY) {
  39                /*
  40                 * On 64-bit, we have a generic entry stack that we
  41                 * use for all the kernel entry points, including
  42                 * SYSENTER.
  43                 */
  44                return "ENTRY_TRAMPOLINE";
  45        }
  46
  47        if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
  48                return exception_stack_names[type - STACK_TYPE_EXCEPTION];
  49
  50        return NULL;
  51}
  52
  53/**
  54 * struct estack_pages - Page descriptor for exception stacks
  55 * @offs:       Offset from the start of the exception stack area
  56 * @size:       Size of the exception stack
  57 * @type:       Type to store in the stack_info struct
  58 */
  59struct estack_pages {
  60        u32     offs;
  61        u16     size;
  62        u16     type;
  63};
  64
  65#define EPAGERANGE(st)                                                  \
  66        [PFN_DOWN(CEA_ESTACK_OFFS(st)) ...                              \
  67         PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = {   \
  68                .offs   = CEA_ESTACK_OFFS(st),                          \
  69                .size   = CEA_ESTACK_SIZE(st),                          \
  70                .type   = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
  71
  72/*
  73 * Array of exception stack page descriptors. If the stack is larger than
  74 * PAGE_SIZE, all pages covering a particular stack will have the same
  75 * info. The guard pages including the not mapped DB2 stack are zeroed
  76 * out.
  77 */
  78static const
  79struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
  80        EPAGERANGE(DF),
  81        EPAGERANGE(NMI),
  82        EPAGERANGE(DB),
  83        EPAGERANGE(MCE),
  84        EPAGERANGE(VC),
  85        EPAGERANGE(VC2),
  86};
  87
  88static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
  89{
  90        unsigned long begin, end, stk = (unsigned long)stack;
  91        const struct estack_pages *ep;
  92        struct pt_regs *regs;
  93        unsigned int k;
  94
  95        BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
  96
  97        begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
  98        /*
  99         * Handle the case where stack trace is collected _before_
 100         * cea_exception_stacks had been initialized.
 101         */
 102        if (!begin)
 103                return false;
 104
 105        end = begin + sizeof(struct cea_exception_stacks);
 106        /* Bail if @stack is outside the exception stack area. */
 107        if (stk < begin || stk >= end)
 108                return false;
 109
 110        /* Calc page offset from start of exception stacks */
 111        k = (stk - begin) >> PAGE_SHIFT;
 112        /* Lookup the page descriptor */
 113        ep = &estack_pages[k];
 114        /* Guard page? */
 115        if (!ep->size)
 116                return false;
 117
 118        begin += (unsigned long)ep->offs;
 119        end = begin + (unsigned long)ep->size;
 120        regs = (struct pt_regs *)end - 1;
 121
 122        info->type      = ep->type;
 123        info->begin     = (unsigned long *)begin;
 124        info->end       = (unsigned long *)end;
 125        info->next_sp   = (unsigned long *)regs->sp;
 126        return true;
 127}
 128
 129static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
 130{
 131        unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
 132        unsigned long *begin;
 133
 134        /*
 135         * @end points directly to the top most stack entry to avoid a -8
 136         * adjustment in the stack switch hotpath. Adjust it back before
 137         * calculating @begin.
 138         */
 139        end++;
 140        begin = end - (IRQ_STACK_SIZE / sizeof(long));
 141
 142        /*
 143         * Due to the switching logic RSP can never be == @end because the
 144         * final operation is 'popq %rsp' which means after that RSP points
 145         * to the original stack and not to @end.
 146         */
 147        if (stack < begin || stack >= end)
 148                return false;
 149
 150        info->type      = STACK_TYPE_IRQ;
 151        info->begin     = begin;
 152        info->end       = end;
 153
 154        /*
 155         * The next stack pointer is stored at the top of the irq stack
 156         * before switching to the irq stack. Actual stack entries are all
 157         * below that.
 158         */
 159        info->next_sp = (unsigned long *)*(end - 1);
 160
 161        return true;
 162}
 163
 164bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
 165                                    struct stack_info *info)
 166{
 167        if (in_task_stack(stack, task, info))
 168                return true;
 169
 170        if (task != current)
 171                return false;
 172
 173        if (in_exception_stack(stack, info))
 174                return true;
 175
 176        if (in_irq_stack(stack, info))
 177                return true;
 178
 179        if (in_entry_stack(stack, info))
 180                return true;
 181
 182        return false;
 183}
 184
 185int get_stack_info(unsigned long *stack, struct task_struct *task,
 186                   struct stack_info *info, unsigned long *visit_mask)
 187{
 188        task = task ? : current;
 189
 190        if (!stack)
 191                goto unknown;
 192
 193        if (!get_stack_info_noinstr(stack, task, info))
 194                goto unknown;
 195
 196        /*
 197         * Make sure we don't iterate through any given stack more than once.
 198         * If it comes up a second time then there's something wrong going on:
 199         * just break out and report an unknown stack type.
 200         */
 201        if (visit_mask) {
 202                if (*visit_mask & (1UL << info->type)) {
 203                        if (task == current)
 204                                printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
 205                        goto unknown;
 206                }
 207                *visit_mask |= 1UL << info->type;
 208        }
 209
 210        return 0;
 211
 212unknown:
 213        info->type = STACK_TYPE_UNKNOWN;
 214        return -EINVAL;
 215}
 216