1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ASM_PROCESSOR_H
20#define __ASM_PROCESSOR_H
21
22
23
24
25
26#define current_text_addr() ({ __label__ _l; _l: &&_l;})
27
28#ifdef __KERNEL__
29
30#include <linux/string.h>
31
32#include <asm/fpsimd.h>
33#include <asm/hw_breakpoint.h>
34#include <asm/ptrace.h>
35#include <asm/types.h>
36
37#ifdef __KERNEL__
38#define STACK_TOP_MAX TASK_SIZE_64
39#ifdef CONFIG_COMPAT
40#define AARCH32_VECTORS_BASE 0xffff0000
41#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
42 AARCH32_VECTORS_BASE : STACK_TOP_MAX)
43#else
44#define STACK_TOP STACK_TOP_MAX
45#endif
46
47#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
48#endif
49
50struct debug_info {
51
52 int suspended_step;
53
54 int bps_disabled;
55 int wps_disabled;
56
57 struct perf_event *hbp_break[ARM_MAX_BRP];
58 struct perf_event *hbp_watch[ARM_MAX_WRP];
59};
60
61struct cpu_context {
62 unsigned long x19;
63 unsigned long x20;
64 unsigned long x21;
65 unsigned long x22;
66 unsigned long x23;
67 unsigned long x24;
68 unsigned long x25;
69 unsigned long x26;
70 unsigned long x27;
71 unsigned long x28;
72 unsigned long fp;
73 unsigned long sp;
74 unsigned long pc;
75};
76
77struct thread_struct {
78 struct cpu_context cpu_context;
79 unsigned long tp_value;
80 struct fpsimd_state fpsimd_state;
81 unsigned long fault_address;
82 struct debug_info debug;
83};
84
85#define INIT_THREAD { }
86
87static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
88{
89 memset(regs, 0, sizeof(*regs));
90 regs->syscallno = ~0UL;
91 regs->pc = pc;
92}
93
94static inline void start_thread(struct pt_regs *regs, unsigned long pc,
95 unsigned long sp)
96{
97 start_thread_common(regs, pc);
98 regs->pstate = PSR_MODE_EL0t;
99 regs->sp = sp;
100}
101
102#ifdef CONFIG_COMPAT
103static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
104 unsigned long sp)
105{
106 start_thread_common(regs, pc);
107 regs->pstate = COMPAT_PSR_MODE_USR;
108 if (pc & 1)
109 regs->pstate |= COMPAT_PSR_T_BIT;
110 regs->compat_sp = sp;
111}
112#endif
113
114
115struct task_struct;
116
117
118extern void release_thread(struct task_struct *);
119
120
121#define prepare_to_copy(tsk) do { } while (0)
122
123unsigned long get_wchan(struct task_struct *p);
124
125#define cpu_relax() barrier()
126
127
128extern struct task_struct *cpu_switch_to(struct task_struct *prev,
129 struct task_struct *next);
130
131#define task_pt_regs(p) \
132 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
133
134#define KSTK_EIP(tsk) task_pt_regs(tsk)->pc
135#define KSTK_ESP(tsk) task_pt_regs(tsk)->sp
136
137
138
139
140#define ARCH_HAS_PREFETCH
141static inline void prefetch(const void *ptr)
142{
143 asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
144}
145
146#define ARCH_HAS_PREFETCHW
147static inline void prefetchw(const void *ptr)
148{
149 asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
150}
151
152#define ARCH_HAS_SPINLOCK_PREFETCH
153static inline void spin_lock_prefetch(const void *x)
154{
155 prefetchw(x);
156}
157
158#define HAVE_ARCH_PICK_MMAP_LAYOUT
159
160#endif
161
162#endif
163