linux/arch/arm64/include/asm/stacktrace.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012 ARM Ltd.
   4 */
   5#ifndef __ASM_STACKTRACE_H
   6#define __ASM_STACKTRACE_H
   7
   8#include <linux/percpu.h>
   9#include <linux/sched.h>
  10#include <linux/sched/task_stack.h>
  11#include <linux/types.h>
  12
  13#include <asm/memory.h>
  14#include <asm/ptrace.h>
  15#include <asm/sdei.h>
  16
  17enum stack_type {
  18        STACK_TYPE_UNKNOWN,
  19        STACK_TYPE_TASK,
  20        STACK_TYPE_IRQ,
  21        STACK_TYPE_OVERFLOW,
  22        STACK_TYPE_SDEI_NORMAL,
  23        STACK_TYPE_SDEI_CRITICAL,
  24        __NR_STACK_TYPES
  25};
  26
  27struct stack_info {
  28        unsigned long low;
  29        unsigned long high;
  30        enum stack_type type;
  31};
  32
  33/*
  34 * A snapshot of a frame record or fp/lr register values, along with some
  35 * accounting information necessary for robust unwinding.
  36 *
  37 * @fp:          The fp value in the frame record (or the real fp)
  38 * @pc:          The lr value in the frame record (or the real lr)
  39 *
  40 * @stacks_done: Stacks which have been entirely unwound, for which it is no
  41 *               longer valid to unwind to.
  42 *
  43 * @prev_fp:     The fp that pointed to this frame record, or a synthetic value
  44 *               of 0. This is used to ensure that within a stack, each
  45 *               subsequent frame record is at an increasing address.
  46 * @prev_type:   The type of stack this frame record was on, or a synthetic
  47 *               value of STACK_TYPE_UNKNOWN. This is used to detect a
  48 *               transition from one stack to another.
  49 *
  50 * @graph:       When FUNCTION_GRAPH_TRACER is selected, holds the index of a
  51 *               replacement lr value in the ftrace graph stack.
  52 */
  53struct stackframe {
  54        unsigned long fp;
  55        unsigned long pc;
  56        DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
  57        unsigned long prev_fp;
  58        enum stack_type prev_type;
  59#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  60        int graph;
  61#endif
  62};
  63
  64extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
  65extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
  66                            bool (*fn)(void *, unsigned long), void *data);
  67extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
  68                           const char *loglvl);
  69
  70DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
  71
  72static inline bool on_stack(unsigned long sp, unsigned long size,
  73                            unsigned long low, unsigned long high,
  74                            enum stack_type type, struct stack_info *info)
  75{
  76        if (!low)
  77                return false;
  78
  79        if (sp < low || sp + size < sp || sp + size > high)
  80                return false;
  81
  82        if (info) {
  83                info->low = low;
  84                info->high = high;
  85                info->type = type;
  86        }
  87        return true;
  88}
  89
  90static inline bool on_irq_stack(unsigned long sp, unsigned long size,
  91                                struct stack_info *info)
  92{
  93        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
  94        unsigned long high = low + IRQ_STACK_SIZE;
  95
  96        return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
  97}
  98
  99static inline bool on_task_stack(const struct task_struct *tsk,
 100                                 unsigned long sp, unsigned long size,
 101                                 struct stack_info *info)
 102{
 103        unsigned long low = (unsigned long)task_stack_page(tsk);
 104        unsigned long high = low + THREAD_SIZE;
 105
 106        return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
 107}
 108
 109#ifdef CONFIG_VMAP_STACK
 110DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 111
 112static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 113                                struct stack_info *info)
 114{
 115        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
 116        unsigned long high = low + OVERFLOW_STACK_SIZE;
 117
 118        return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
 119}
 120#else
 121static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
 122                        struct stack_info *info) { return false; }
 123#endif
 124
 125
 126/*
 127 * We can only safely access per-cpu stacks from current in a non-preemptible
 128 * context.
 129 */
 130static inline bool on_accessible_stack(const struct task_struct *tsk,
 131                                       unsigned long sp, unsigned long size,
 132                                       struct stack_info *info)
 133{
 134        if (info)
 135                info->type = STACK_TYPE_UNKNOWN;
 136
 137        if (on_task_stack(tsk, sp, size, info))
 138                return true;
 139        if (tsk != current || preemptible())
 140                return false;
 141        if (on_irq_stack(sp, size, info))
 142                return true;
 143        if (on_overflow_stack(sp, size, info))
 144                return true;
 145        if (on_sdei_stack(sp, size, info))
 146                return true;
 147
 148        return false;
 149}
 150
 151void start_backtrace(struct stackframe *frame, unsigned long fp,
 152                     unsigned long pc);
 153
 154#endif  /* __ASM_STACKTRACE_H */
 155