linux/arch/arm64/include/asm/processor.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/processor.h
   3 *
   4 * Copyright (C) 1995-1999 Russell King
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_PROCESSOR_H
  20#define __ASM_PROCESSOR_H
  21
  22#define KERNEL_DS               UL(-1)
  23#define USER_DS                 ((UL(1) << MAX_USER_VA_BITS) - 1)
  24
  25/*
  26 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
  27 * no point in shifting all network buffers by 2 bytes just to make some IP
  28 * header fields appear aligned in memory, potentially sacrificing some DMA
  29 * performance on some platforms.
  30 */
  31#define NET_IP_ALIGN    0
  32
  33#ifndef __ASSEMBLY__
  34#ifdef __KERNEL__
  35
  36#include <linux/build_bug.h>
  37#include <linux/cache.h>
  38#include <linux/init.h>
  39#include <linux/stddef.h>
  40#include <linux/string.h>
  41
  42#include <asm/alternative.h>
  43#include <asm/cpufeature.h>
  44#include <asm/hw_breakpoint.h>
  45#include <asm/lse.h>
  46#include <asm/pgtable-hwdef.h>
  47#include <asm/pointer_auth.h>
  48#include <asm/ptrace.h>
  49#include <asm/types.h>
  50
  51/*
  52 * TASK_SIZE - the maximum size of a user space task.
  53 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  54 */
  55
  56#define DEFAULT_MAP_WINDOW_64   (UL(1) << VA_BITS)
  57#define TASK_SIZE_64            (UL(1) << vabits_user)
  58
  59#ifdef CONFIG_COMPAT
  60#define TASK_SIZE_32            UL(0x100000000)
  61#define TASK_SIZE               (test_thread_flag(TIF_32BIT) ? \
  62                                TASK_SIZE_32 : TASK_SIZE_64)
  63#define TASK_SIZE_OF(tsk)       (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
  64                                TASK_SIZE_32 : TASK_SIZE_64)
  65#define DEFAULT_MAP_WINDOW      (test_thread_flag(TIF_32BIT) ? \
  66                                TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
  67#else
  68#define TASK_SIZE               TASK_SIZE_64
  69#define DEFAULT_MAP_WINDOW      DEFAULT_MAP_WINDOW_64
  70#endif /* CONFIG_COMPAT */
  71
  72#ifdef CONFIG_ARM64_FORCE_52BIT
  73#define STACK_TOP_MAX           TASK_SIZE_64
  74#define TASK_UNMAPPED_BASE      (PAGE_ALIGN(TASK_SIZE / 4))
  75#else
  76#define STACK_TOP_MAX           DEFAULT_MAP_WINDOW_64
  77#define TASK_UNMAPPED_BASE      (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
  78#endif /* CONFIG_ARM64_FORCE_52BIT */
  79
  80#ifdef CONFIG_COMPAT
  81#define AARCH32_VECTORS_BASE    0xffff0000
  82#define STACK_TOP               (test_thread_flag(TIF_32BIT) ? \
  83                                AARCH32_VECTORS_BASE : STACK_TOP_MAX)
  84#else
  85#define STACK_TOP               STACK_TOP_MAX
  86#endif /* CONFIG_COMPAT */
  87
  88#ifndef CONFIG_ARM64_FORCE_52BIT
  89#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
  90                                DEFAULT_MAP_WINDOW)
  91
  92#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
  93                                        base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
  94                                        base)
  95#endif /* CONFIG_ARM64_FORCE_52BIT */
  96
  97extern phys_addr_t arm64_dma_phys_limit;
  98#define ARCH_LOW_ADDRESS_LIMIT  (arm64_dma_phys_limit - 1)
  99
 100struct debug_info {
 101#ifdef CONFIG_HAVE_HW_BREAKPOINT
 102        /* Have we suspended stepping by a debugger? */
 103        int                     suspended_step;
 104        /* Allow breakpoints and watchpoints to be disabled for this thread. */
 105        int                     bps_disabled;
 106        int                     wps_disabled;
 107        /* Hardware breakpoints pinned to this task. */
 108        struct perf_event       *hbp_break[ARM_MAX_BRP];
 109        struct perf_event       *hbp_watch[ARM_MAX_WRP];
 110#endif
 111};
 112
 113struct cpu_context {
 114        unsigned long x19;
 115        unsigned long x20;
 116        unsigned long x21;
 117        unsigned long x22;
 118        unsigned long x23;
 119        unsigned long x24;
 120        unsigned long x25;
 121        unsigned long x26;
 122        unsigned long x27;
 123        unsigned long x28;
 124        unsigned long fp;
 125        unsigned long sp;
 126        unsigned long pc;
 127};
 128
 129struct thread_struct {
 130        struct cpu_context      cpu_context;    /* cpu context */
 131
 132        /*
 133         * Whitelisted fields for hardened usercopy:
 134         * Maintainers must ensure manually that this contains no
 135         * implicit padding.
 136         */
 137        struct {
 138                unsigned long   tp_value;       /* TLS register */
 139                unsigned long   tp2_value;
 140                struct user_fpsimd_state fpsimd_state;
 141        } uw;
 142
 143        unsigned int            fpsimd_cpu;
 144        void                    *sve_state;     /* SVE registers, if any */
 145        unsigned int            sve_vl;         /* SVE vector length */
 146        unsigned int            sve_vl_onexec;  /* SVE vl after next exec */
 147        unsigned long           fault_address;  /* fault info */
 148        unsigned long           fault_code;     /* ESR_EL1 value */
 149        struct debug_info       debug;          /* debugging */
 150#ifdef CONFIG_ARM64_PTR_AUTH
 151        struct ptrauth_keys     keys_user;
 152#endif
 153};
 154
 155static inline void arch_thread_struct_whitelist(unsigned long *offset,
 156                                                unsigned long *size)
 157{
 158        /* Verify that there is no padding among the whitelisted fields: */
 159        BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
 160                     sizeof_field(struct thread_struct, uw.tp_value) +
 161                     sizeof_field(struct thread_struct, uw.tp2_value) +
 162                     sizeof_field(struct thread_struct, uw.fpsimd_state));
 163
 164        *offset = offsetof(struct thread_struct, uw);
 165        *size = sizeof_field(struct thread_struct, uw);
 166}
 167
 168#ifdef CONFIG_COMPAT
 169#define task_user_tls(t)                                                \
 170({                                                                      \
 171        unsigned long *__tls;                                           \
 172        if (is_compat_thread(task_thread_info(t)))                      \
 173                __tls = &(t)->thread.uw.tp2_value;                      \
 174        else                                                            \
 175                __tls = &(t)->thread.uw.tp_value;                       \
 176        __tls;                                                          \
 177 })
 178#else
 179#define task_user_tls(t)        (&(t)->thread.uw.tp_value)
 180#endif
 181
 182/* Sync TPIDR_EL0 back to thread_struct for current */
 183void tls_preserve_current_state(void);
 184
 185#define INIT_THREAD {                           \
 186        .fpsimd_cpu = NR_CPUS,                  \
 187}
 188
 189static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
 190{
 191        memset(regs, 0, sizeof(*regs));
 192        forget_syscall(regs);
 193        regs->pc = pc;
 194}
 195
 196static inline void start_thread(struct pt_regs *regs, unsigned long pc,
 197                                unsigned long sp)
 198{
 199        start_thread_common(regs, pc);
 200        regs->pstate = PSR_MODE_EL0t;
 201
 202        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
 203                regs->pstate |= PSR_SSBS_BIT;
 204
 205        regs->sp = sp;
 206}
 207
 208#ifdef CONFIG_COMPAT
 209static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
 210                                       unsigned long sp)
 211{
 212        start_thread_common(regs, pc);
 213        regs->pstate = PSR_AA32_MODE_USR;
 214        if (pc & 1)
 215                regs->pstate |= PSR_AA32_T_BIT;
 216
 217#ifdef __AARCH64EB__
 218        regs->pstate |= PSR_AA32_E_BIT;
 219#endif
 220
 221        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
 222                regs->pstate |= PSR_AA32_SSBS_BIT;
 223
 224        regs->compat_sp = sp;
 225}
 226#endif
 227
 228/* Forward declaration, a strange C thing */
 229struct task_struct;
 230
 231/* Free all resources held by a thread. */
 232extern void release_thread(struct task_struct *);
 233
 234unsigned long get_wchan(struct task_struct *p);
 235
 236static inline void cpu_relax(void)
 237{
 238        asm volatile("yield" ::: "memory");
 239}
 240
 241/* Thread switching */
 242extern struct task_struct *cpu_switch_to(struct task_struct *prev,
 243                                         struct task_struct *next);
 244
 245#define task_pt_regs(p) \
 246        ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 247
 248#define KSTK_EIP(tsk)   ((unsigned long)task_pt_regs(tsk)->pc)
 249#define KSTK_ESP(tsk)   user_stack_pointer(task_pt_regs(tsk))
 250
 251/*
 252 * Prefetching support
 253 */
 254#define ARCH_HAS_PREFETCH
 255static inline void prefetch(const void *ptr)
 256{
 257        asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
 258}
 259
 260#define ARCH_HAS_PREFETCHW
 261static inline void prefetchw(const void *ptr)
 262{
 263        asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
 264}
 265
 266#define ARCH_HAS_SPINLOCK_PREFETCH
 267static inline void spin_lock_prefetch(const void *ptr)
 268{
 269        asm volatile(ARM64_LSE_ATOMIC_INSN(
 270                     "prfm pstl1strm, %a0",
 271                     "nop") : : "p" (ptr));
 272}
 273
 274#define HAVE_ARCH_PICK_MMAP_LAYOUT
 275
 276#endif
 277
 278extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
 279extern void __init minsigstksz_setup(void);
 280
 281/*
 282 * Not at the top of the file due to a direct #include cycle between
 283 * <asm/fpsimd.h> and <asm/processor.h>.  Deferring this #include
 284 * ensures that contents of processor.h are visible to fpsimd.h even if
 285 * processor.h is included first.
 286 *
 287 * These prctl helpers are the only things in this file that require
 288 * fpsimd.h.  The core code expects them to be in this header.
 289 */
 290#include <asm/fpsimd.h>
 291
 292/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
 293#define SVE_SET_VL(arg) sve_set_current_vl(arg)
 294#define SVE_GET_VL()    sve_get_current_vl()
 295
 296/* PR_PAC_RESET_KEYS prctl */
 297#define PAC_RESET_KEYS(tsk, arg)        ptrauth_prctl_reset_keys(tsk, arg)
 298
 299/*
 300 * For CONFIG_GCC_PLUGIN_STACKLEAK
 301 *
 302 * These need to be macros because otherwise we get stuck in a nightmare
 303 * of header definitions for the use of task_stack_page.
 304 */
 305
 306#define current_top_of_stack()                                                  \
 307({                                                                              \
 308        struct stack_info _info;                                                \
 309        BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));   \
 310        _info.high;                                                             \
 311})
 312#define on_thread_stack()       (on_task_stack(current, current_stack_pointer, NULL))
 313
 314#endif /* __ASSEMBLY__ */
 315#endif /* __ASM_PROCESSOR_H */
 316