linux/arch/arm64/include/asm/stacktrace.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 ARM Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16#ifndef __ASM_STACKTRACE_H
  17#define __ASM_STACKTRACE_H
  18
  19#include <linux/percpu.h>
  20#include <linux/sched.h>
  21#include <linux/sched/task_stack.h>
  22
  23#include <asm/memory.h>
  24#include <asm/ptrace.h>
  25#include <asm/sdei.h>
  26
  27struct stackframe {
  28        unsigned long fp;
  29        unsigned long pc;
  30#ifdef CONFIG_FUNCTION_GRAPH_TRACER
  31        int graph;
  32#endif
  33};
  34
  35extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
  36extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
  37                            int (*fn)(struct stackframe *, void *), void *data);
  38extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
  39
  40DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
  41
  42static inline bool on_irq_stack(unsigned long sp)
  43{
  44        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
  45        unsigned long high = low + IRQ_STACK_SIZE;
  46
  47        if (!low)
  48                return false;
  49
  50        return (low <= sp && sp < high);
  51}
  52
  53static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
  54{
  55        unsigned long low = (unsigned long)task_stack_page(tsk);
  56        unsigned long high = low + THREAD_SIZE;
  57
  58        return (low <= sp && sp < high);
  59}
  60
  61#ifdef CONFIG_VMAP_STACK
  62DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
  63
  64static inline bool on_overflow_stack(unsigned long sp)
  65{
  66        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
  67        unsigned long high = low + OVERFLOW_STACK_SIZE;
  68
  69        return (low <= sp && sp < high);
  70}
  71#else
  72static inline bool on_overflow_stack(unsigned long sp) { return false; }
  73#endif
  74
  75/*
  76 * We can only safely access per-cpu stacks from current in a non-preemptible
  77 * context.
  78 */
  79static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
  80{
  81        if (on_task_stack(tsk, sp))
  82                return true;
  83        if (tsk != current || preemptible())
  84                return false;
  85        if (on_irq_stack(sp))
  86                return true;
  87        if (on_overflow_stack(sp))
  88                return true;
  89        if (on_sdei_stack(sp))
  90                return true;
  91
  92        return false;
  93}
  94
  95#endif  /* __ASM_STACKTRACE_H */
  96