linux/arch/arm64/kernel/stacktrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Stack tracing support
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 */
   7#include <linux/kernel.h>
   8#include <linux/export.h>
   9#include <linux/ftrace.h>
  10#include <linux/kprobes.h>
  11#include <linux/sched.h>
  12#include <linux/sched/debug.h>
  13#include <linux/sched/task_stack.h>
  14#include <linux/stacktrace.h>
  15
  16#include <asm/irq.h>
  17#include <asm/stack_pointer.h>
  18#include <asm/stacktrace.h>
  19
  20/*
  21 * AArch64 PCS assigns the frame pointer to x29.
  22 *
  23 * A simple function prologue looks like this:
  24 *      sub     sp, sp, #0x10
  25 *      stp     x29, x30, [sp]
  26 *      mov     x29, sp
  27 *
  28 * A simple function epilogue looks like this:
  29 *      mov     sp, x29
  30 *      ldp     x29, x30, [sp]
  31 *      add     sp, sp, #0x10
  32 */
  33
  34/*
  35 * Unwind from one frame record (A) to the next frame record (B).
  36 *
  37 * We terminate early if the location of B indicates a malformed chain of frame
  38 * records (e.g. a cycle), determined based on the location and fp value of A
  39 * and the location (but not the fp value) of B.
  40 */
  41int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
  42{
  43        unsigned long fp = frame->fp;
  44        struct stack_info info;
  45
  46        if (fp & 0xf)
  47                return -EINVAL;
  48
  49        if (!tsk)
  50                tsk = current;
  51
  52        if (!on_accessible_stack(tsk, fp, &info))
  53                return -EINVAL;
  54
  55        if (test_bit(info.type, frame->stacks_done))
  56                return -EINVAL;
  57
  58        /*
  59         * As stacks grow downward, any valid record on the same stack must be
  60         * at a strictly higher address than the prior record.
  61         *
  62         * Stacks can nest in several valid orders, e.g.
  63         *
  64         * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
  65         * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
  66         *
  67         * ... but the nesting itself is strict. Once we transition from one
  68         * stack to another, it's never valid to unwind back to that first
  69         * stack.
  70         */
  71        if (info.type == frame->prev_type) {
  72                if (fp <= frame->prev_fp)
  73                        return -EINVAL;
  74        } else {
  75                set_bit(frame->prev_type, frame->stacks_done);
  76        }
  77
  78        /*
  79         * Record this frame record's values and location. The prev_fp and
  80         * prev_type are only meaningful to the next unwind_frame() invocation.
  81         */
  82        frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
  83        frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
  84        frame->prev_fp = fp;
  85        frame->prev_type = info.type;
  86
  87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  88        if (tsk->ret_stack &&
  89                        (frame->pc == (unsigned long)return_to_handler)) {
  90                struct ftrace_ret_stack *ret_stack;
  91                /*
  92                 * This is a case where function graph tracer has
  93                 * modified a return address (LR) in a stack frame
  94                 * to hook a function return.
  95                 * So replace it to an original value.
  96                 */
  97                ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
  98                if (WARN_ON_ONCE(!ret_stack))
  99                        return -EINVAL;
 100                frame->pc = ret_stack->ret;
 101        }
 102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 103
 104        /*
 105         * Frames created upon entry from EL0 have NULL FP and PC values, so
 106         * don't bother reporting these. Frames created by __noreturn functions
 107         * might have a valid FP even if PC is bogus, so only terminate where
 108         * both are NULL.
 109         */
 110        if (!frame->fp && !frame->pc)
 111                return -EINVAL;
 112
 113        return 0;
 114}
 115NOKPROBE_SYMBOL(unwind_frame);
 116
 117void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 118                     int (*fn)(struct stackframe *, void *), void *data)
 119{
 120        while (1) {
 121                int ret;
 122
 123                if (fn(frame, data))
 124                        break;
 125                ret = unwind_frame(tsk, frame);
 126                if (ret < 0)
 127                        break;
 128        }
 129}
 130NOKPROBE_SYMBOL(walk_stackframe);
 131
 132#ifdef CONFIG_STACKTRACE
 133struct stack_trace_data {
 134        struct stack_trace *trace;
 135        unsigned int no_sched_functions;
 136        unsigned int skip;
 137};
 138
 139static int save_trace(struct stackframe *frame, void *d)
 140{
 141        struct stack_trace_data *data = d;
 142        struct stack_trace *trace = data->trace;
 143        unsigned long addr = frame->pc;
 144
 145        if (data->no_sched_functions && in_sched_functions(addr))
 146                return 0;
 147        if (data->skip) {
 148                data->skip--;
 149                return 0;
 150        }
 151
 152        trace->entries[trace->nr_entries++] = addr;
 153
 154        return trace->nr_entries >= trace->max_entries;
 155}
 156
 157void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 158{
 159        struct stack_trace_data data;
 160        struct stackframe frame;
 161
 162        data.trace = trace;
 163        data.skip = trace->skip;
 164        data.no_sched_functions = 0;
 165
 166        start_backtrace(&frame, regs->regs[29], regs->pc);
 167        walk_stackframe(current, &frame, save_trace, &data);
 168}
 169EXPORT_SYMBOL_GPL(save_stack_trace_regs);
 170
 171static noinline void __save_stack_trace(struct task_struct *tsk,
 172        struct stack_trace *trace, unsigned int nosched)
 173{
 174        struct stack_trace_data data;
 175        struct stackframe frame;
 176
 177        if (!try_get_task_stack(tsk))
 178                return;
 179
 180        data.trace = trace;
 181        data.skip = trace->skip;
 182        data.no_sched_functions = nosched;
 183
 184        if (tsk != current) {
 185                start_backtrace(&frame, thread_saved_fp(tsk),
 186                                thread_saved_pc(tsk));
 187        } else {
 188                /* We don't want this function nor the caller */
 189                data.skip += 2;
 190                start_backtrace(&frame,
 191                                (unsigned long)__builtin_frame_address(0),
 192                                (unsigned long)__save_stack_trace);
 193        }
 194
 195        walk_stackframe(tsk, &frame, save_trace, &data);
 196
 197        put_task_stack(tsk);
 198}
 199EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 200
 201void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 202{
 203        __save_stack_trace(tsk, trace, 1);
 204}
 205
 206void save_stack_trace(struct stack_trace *trace)
 207{
 208        __save_stack_trace(current, trace, 0);
 209}
 210
 211EXPORT_SYMBOL_GPL(save_stack_trace);
 212#endif
 213