linux/arch/arm64/include/asm/processor.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/processor.h
   3 *
   4 * Copyright (C) 1995-1999 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_PROCESSOR_H
  20#define __ASM_PROCESSOR_H
  21
  22/*
  23 * Default implementation of macro that returns current
  24 * instruction pointer ("program counter").
  25 */
  26#define current_text_addr() ({ __label__ _l; _l: &&_l;})
  27
  28#ifdef __KERNEL__
  29
  30#include <linux/string.h>
  31
  32#include <asm/alternative.h>
  33#include <asm/fpsimd.h>
  34#include <asm/hw_breakpoint.h>
  35#include <asm/lse.h>
  36#include <asm/pgtable-hwdef.h>
  37#include <asm/ptrace.h>
  38#include <asm/types.h>
  39
  40#define STACK_TOP_MAX           TASK_SIZE_64
  41#ifdef CONFIG_COMPAT
  42#define AARCH32_VECTORS_BASE    0xffff0000
  43#define STACK_TOP               (test_thread_flag(TIF_32BIT) ? \
  44                                AARCH32_VECTORS_BASE : STACK_TOP_MAX)
  45#else
  46#define STACK_TOP               STACK_TOP_MAX
  47#endif /* CONFIG_COMPAT */
  48
  49extern phys_addr_t arm64_dma_phys_limit;
  50#define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
  51
  52struct debug_info {
  53#ifdef CONFIG_HAVE_HW_BREAKPOINT
  54        /* Have we suspended stepping by a debugger? */
  55        int                     suspended_step;
  56        /* Allow breakpoints and watchpoints to be disabled for this thread. */
  57        int                     bps_disabled;
  58        int                     wps_disabled;
  59        /* Hardware breakpoints pinned to this task. */
  60        struct perf_event       *hbp_break[ARM_MAX_BRP];
  61        struct perf_event       *hbp_watch[ARM_MAX_WRP];
  62#endif
  63};
  64
  65struct cpu_context {
  66        unsigned long x19;
  67        unsigned long x20;
  68        unsigned long x21;
  69        unsigned long x22;
  70        unsigned long x23;
  71        unsigned long x24;
  72        unsigned long x25;
  73        unsigned long x26;
  74        unsigned long x27;
  75        unsigned long x28;
  76        unsigned long fp;
  77        unsigned long sp;
  78        unsigned long pc;
  79};
  80
  81struct thread_struct {
  82        struct cpu_context      cpu_context;    /* cpu context */
  83        unsigned long           tp_value;       /* TLS register */
  84#ifdef CONFIG_COMPAT
  85        unsigned long           tp2_value;
  86#endif
  87        struct fpsimd_state     fpsimd_state;
  88        unsigned long           fault_address;  /* fault info */
  89        unsigned long           fault_code;     /* ESR_EL1 value */
  90        struct debug_info       debug;          /* debugging */
  91};
  92
  93#ifdef CONFIG_COMPAT
  94#define task_user_tls(t)                                                \
  95({                                                                      \
  96        unsigned long *__tls;                                           \
  97        if (is_compat_thread(task_thread_info(t)))                      \
  98                __tls = &(t)->thread.tp2_value;                         \
  99        else                                                            \
 100                __tls = &(t)->thread.tp_value;                          \
 101        __tls;                                                          \
 102 })
 103#else
 104#define task_user_tls(t)        (&(t)->thread.tp_value)
 105#endif
 106
 107#define INIT_THREAD  {  }
 108
 109static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 110{
 111        memset(regs, 0, sizeof(*regs));
 112        regs->syscallno = ~0UL;
 113        regs->pc = pc;
 114}
 115
 116static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 117                                unsigned long sp)
 118{
 119        start_thread_common(regs, pc);
 120        regs->pstate = PSR_MODE_EL0t;
 121        regs->sp = sp;
 122}
 123
 124#ifdef CONFIG_COMPAT
 125static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 126                                       unsigned long sp)
 127{
 128        start_thread_common(regs, pc);
 129        regs->pstate = COMPAT_PSR_MODE_USR;
 130        if (pc & 1)
 131                regs->pstate |= COMPAT_PSR_T_BIT;
 132
 133#ifdef __AARCH64EB__
 134        regs->pstate |= COMPAT_PSR_E_BIT;
 135#endif
 136
 137        regs->compat_sp = sp;
 138}
 139#endif
 140
 141/* Forward declaration, a strange C thing */
 142struct task_struct;
 143
 144/* Free all resources held by a thread. */
 145extern void release_thread(struct task_struct *);
 146
 147unsigned long get_wchan(struct task_struct *p);
 148
 149static inline void cpu_relax(void)
 150{
 151        asm volatile("yield" ::: "memory");
 152}
 153
 154/* Thread switching */
 155extern struct task_struct *cpu_switch_to(struct task_struct *prev,
 156                                         struct task_struct *next);
 157
 158#define task_pt_regs(p) \
 159        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 160
 161#define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
 162#define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
 163
 164/*
 165 * Prefetching support
 166 */
 167#define ARCH_HAS_PREFETCH
 168static inline void prefetch(const void *ptr)
 169{
 170        asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
 171}
 172
 173#define ARCH_HAS_PREFETCHW
 174static inline void prefetchw(const void *ptr)
 175{
 176        asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
 177}
 178
 179#define ARCH_HAS_SPINLOCK_PREFETCH
 180static inline void spin_lock_prefetch(const void *ptr)
 181{
 182        asm volatile(ARM64_LSE_ATOMIC_INSN(
 183                     "prfm pstl1strm, %a0",
 184                     "nop") : : "p" (ptr));
 185}
 186
 187#define HAVE_ARCH_PICK_MMAP_LAYOUT
 188
 189#endif
 190
 191int cpu_enable_pan(void *__unused);
 192int cpu_enable_cache_maint_trap(void *__unused);
 193
 194#endif /* __ASM_PROCESSOR_H */
 195