linux/arch/arm64/kernel/stacktrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Stack tracing support
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 */
   7#include <linux/kernel.h>
   8#include <linux/export.h>
   9#include <linux/ftrace.h>
  10#include <linux/kprobes.h>
  11#include <linux/sched.h>
  12#include <linux/sched/debug.h>
  13#include <linux/sched/task_stack.h>
  14#include <linux/stacktrace.h>
  15
  16#include <asm/irq.h>
  17#include <asm/pointer_auth.h>
  18#include <asm/stack_pointer.h>
  19#include <asm/stacktrace.h>
  20
  21/*
  22 * AArch64 PCS assigns the frame pointer to x29.
  23 *
  24 * A simple function prologue looks like this:
  25 *      sub     sp, sp, #0x10
  26 *      stp     x29, x30, [sp]
  27 *      mov     x29, sp
  28 *
  29 * A simple function epilogue looks like this:
  30 *      mov     sp, x29
  31 *      ldp     x29, x30, [sp]
  32 *      add     sp, sp, #0x10
  33 */
  34
  35
  36void start_backtrace(struct stackframe *frame, unsigned long fp,
  37                     unsigned long pc)
  38{
  39        frame->fp = fp;
  40        frame->pc = pc;
  41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  42        frame->graph = 0;
  43#endif
  44
  45        /*
  46         * Prime the first unwind.
  47         *
  48         * In unwind_frame() we'll check that the FP points to a valid stack,
  49         * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
  50         * treated as a transition to whichever stack that happens to be. The
  51         * prev_fp value won't be used, but we set it to 0 such that it is
  52         * definitely not an accessible stack address.
  53         */
  54        bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
  55        frame->prev_fp = 0;
  56        frame->prev_type = STACK_TYPE_UNKNOWN;
  57}
  58
  59/*
  60 * Unwind from one frame record (A) to the next frame record (B).
  61 *
  62 * We terminate early if the location of B indicates a malformed chain of frame
  63 * records (e.g. a cycle), determined based on the location and fp value of A
  64 * and the location (but not the fp value) of B.
  65 */
  66int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
  67{
  68        unsigned long fp = frame->fp;
  69        struct stack_info info;
  70
  71        if (!tsk)
  72                tsk = current;
  73
  74        /* Final frame; nothing to unwind */
  75        if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
  76                return -ENOENT;
  77
  78        if (fp & 0x7)
  79                return -EINVAL;
  80
  81        if (!on_accessible_stack(tsk, fp, 16, &info))
  82                return -EINVAL;
  83
  84        if (test_bit(info.type, frame->stacks_done))
  85                return -EINVAL;
  86
  87        /*
  88         * As stacks grow downward, any valid record on the same stack must be
  89         * at a strictly higher address than the prior record.
  90         *
  91         * Stacks can nest in several valid orders, e.g.
  92         *
  93         * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
  94         * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
  95         *
  96         * ... but the nesting itself is strict. Once we transition from one
  97         * stack to another, it's never valid to unwind back to that first
  98         * stack.
  99         */
 100        if (info.type == frame->prev_type) {
 101                if (fp <= frame->prev_fp)
 102                        return -EINVAL;
 103        } else {
 104                set_bit(frame->prev_type, frame->stacks_done);
 105        }
 106
 107        /*
 108         * Record this frame record's values and location. The prev_fp and
 109         * prev_type are only meaningful to the next unwind_frame() invocation.
 110         */
 111        frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
 112        frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
 113        frame->prev_fp = fp;
 114        frame->prev_type = info.type;
 115
 116#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 117        if (tsk->ret_stack &&
 118                (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
 119                struct ftrace_ret_stack *ret_stack;
 120                /*
 121                 * This is a case where function graph tracer has
 122                 * modified a return address (LR) in a stack frame
 123                 * to hook a function return.
 124                 * So replace it to an original value.
 125                 */
 126                ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
 127                if (WARN_ON_ONCE(!ret_stack))
 128                        return -EINVAL;
 129                frame->pc = ret_stack->ret;
 130        }
 131#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 132
 133        frame->pc = ptrauth_strip_insn_pac(frame->pc);
 134
 135        return 0;
 136}
 137NOKPROBE_SYMBOL(unwind_frame);
 138
 139void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 140                             bool (*fn)(void *, unsigned long), void *data)
 141{
 142        while (1) {
 143                int ret;
 144
 145                if (!fn(data, frame->pc))
 146                        break;
 147                ret = unwind_frame(tsk, frame);
 148                if (ret < 0)
 149                        break;
 150        }
 151}
 152NOKPROBE_SYMBOL(walk_stackframe);
 153
 154static void dump_backtrace_entry(unsigned long where, const char *loglvl)
 155{
 156        printk("%s %pSb\n", loglvl, (void *)where);
 157}
 158
 159void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 160                    const char *loglvl)
 161{
 162        struct stackframe frame;
 163        int skip = 0;
 164
 165        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 166
 167        if (regs) {
 168                if (user_mode(regs))
 169                        return;
 170                skip = 1;
 171        }
 172
 173        if (!tsk)
 174                tsk = current;
 175
 176        if (!try_get_task_stack(tsk))
 177                return;
 178
 179        if (tsk == current) {
 180                start_backtrace(&frame,
 181                                (unsigned long)__builtin_frame_address(0),
 182                                (unsigned long)dump_backtrace);
 183        } else {
 184                /*
 185                 * task blocked in __switch_to
 186                 */
 187                start_backtrace(&frame,
 188                                thread_saved_fp(tsk),
 189                                thread_saved_pc(tsk));
 190        }
 191
 192        printk("%sCall trace:\n", loglvl);
 193        do {
 194                /* skip until specified stack frame */
 195                if (!skip) {
 196                        dump_backtrace_entry(frame.pc, loglvl);
 197                } else if (frame.fp == regs->regs[29]) {
 198                        skip = 0;
 199                        /*
 200                         * Mostly, this is the case where this function is
 201                         * called in panic/abort. As exception handler's
 202                         * stack frame does not contain the corresponding pc
 203                         * at which an exception has taken place, use regs->pc
 204                         * instead.
 205                         */
 206                        dump_backtrace_entry(regs->pc, loglvl);
 207                }
 208        } while (!unwind_frame(tsk, &frame));
 209
 210        put_task_stack(tsk);
 211}
 212
 213void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 214{
 215        dump_backtrace(NULL, tsk, loglvl);
 216        barrier();
 217}
 218
 219#ifdef CONFIG_STACKTRACE
 220
 221noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
 222                              void *cookie, struct task_struct *task,
 223                              struct pt_regs *regs)
 224{
 225        struct stackframe frame;
 226
 227        if (regs)
 228                start_backtrace(&frame, regs->regs[29], regs->pc);
 229        else if (task == current)
 230                start_backtrace(&frame,
 231                                (unsigned long)__builtin_frame_address(1),
 232                                (unsigned long)__builtin_return_address(0));
 233        else
 234                start_backtrace(&frame, thread_saved_fp(task),
 235                                thread_saved_pc(task));
 236
 237        walk_stackframe(task, &frame, consume_entry, cookie);
 238}
 239
 240#endif
 241