1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/elf.h>
30#include <linux/hw_breakpoint.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/rcupdate.h>
39
40#include <linux/uaccess.h>
41#include <asm/io.h>
42#include <asm/processor.h>
43#include <asm/platform.h>
44#include <asm/mmu.h>
45#include <asm/irq.h>
46#include <linux/atomic.h>
47#include <asm/asm-offsets.h>
48#include <asm/regs.h>
49#include <asm/hw_breakpoint.h>
50
51extern void ret_from_fork(void);
52extern void ret_from_kernel_thread(void);
53
54void (*pm_power_off)(void) = NULL;
55EXPORT_SYMBOL(pm_power_off);
56
57
58#ifdef CONFIG_STACKPROTECTOR
59#include <linux/stackprotector.h>
60unsigned long __stack_chk_guard __read_mostly;
61EXPORT_SYMBOL(__stack_chk_guard);
62#endif
63
64#if XTENSA_HAVE_COPROCESSORS
65
66void coprocessor_release_all(struct thread_info *ti)
67{
68 unsigned long cpenable;
69 int i;
70
71
72
73 preempt_disable();
74
75
76
77 cpenable = ti->cpenable;
78
79 for (i = 0; i < XCHAL_CP_MAX; i++) {
80 if (coprocessor_owner[i] == ti) {
81 coprocessor_owner[i] = 0;
82 cpenable &= ~(1 << i);
83 }
84 }
85
86 ti->cpenable = cpenable;
87 if (ti == current_thread_info())
88 xtensa_set_sr(0, cpenable);
89
90 preempt_enable();
91}
92
93void coprocessor_flush_all(struct thread_info *ti)
94{
95 unsigned long cpenable, old_cpenable;
96 int i;
97
98 preempt_disable();
99
100 old_cpenable = xtensa_get_sr(cpenable);
101 cpenable = ti->cpenable;
102 xtensa_set_sr(cpenable, cpenable);
103
104 for (i = 0; i < XCHAL_CP_MAX; i++) {
105 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
106 coprocessor_flush(ti, i);
107 cpenable >>= 1;
108 }
109 xtensa_set_sr(old_cpenable, cpenable);
110
111 preempt_enable();
112}
113
114#endif
115
116
117
118
119
120void arch_cpu_idle(void)
121{
122 platform_idle();
123}
124
125
126
127
128void exit_thread(struct task_struct *tsk)
129{
130#if XTENSA_HAVE_COPROCESSORS
131 coprocessor_release_all(task_thread_info(tsk));
132#endif
133}
134
135
136
137
138
139void flush_thread(void)
140{
141#if XTENSA_HAVE_COPROCESSORS
142 struct thread_info *ti = current_thread_info();
143 coprocessor_flush_all(ti);
144 coprocessor_release_all(ti);
145#endif
146 flush_ptrace_hw_breakpoint(current);
147}
148
149
150
151
152
153int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
154{
155#if XTENSA_HAVE_COPROCESSORS
156 coprocessor_flush_all(task_thread_info(src));
157#endif
158 *dst = *src;
159 return 0;
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
205 unsigned long thread_fn_arg, struct task_struct *p,
206 unsigned long tls)
207{
208 struct pt_regs *childregs = task_pt_regs(p);
209
210#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
211 struct thread_info *ti;
212#endif
213
214#if defined(__XTENSA_WINDOWED_ABI__)
215
216 SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
217 SPILL_SLOT(childregs, 0) = 0;
218
219 p->thread.sp = (unsigned long)childregs;
220#elif defined(__XTENSA_CALL0_ABI__)
221
222 p->thread.sp = (unsigned long)childregs - 16;
223#else
224#error Unsupported Xtensa ABI
225#endif
226
227 if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
228 struct pt_regs *regs = current_pt_regs();
229 unsigned long usp = usp_thread_fn ?
230 usp_thread_fn : regs->areg[1];
231
232 p->thread.ra = MAKE_RA_FOR_CALL(
233 (unsigned long)ret_from_fork, 0x1);
234
235 *childregs = *regs;
236 childregs->areg[1] = usp;
237 childregs->areg[2] = 0;
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252 if (clone_flags & CLONE_VM) {
253
254 int len = childregs->wmask & ~0xf;
255 if (regs->areg[1] == usp && len != 0) {
256 int callinc = (regs->areg[0] >> 30) & 3;
257 int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
258 put_user(regs->areg[caller_ars+1],
259 (unsigned __user*)(usp - 12));
260 }
261 childregs->wmask = 1;
262 childregs->windowstart = 1;
263 childregs->windowbase = 0;
264 }
265
266 if (clone_flags & CLONE_SETTLS)
267 childregs->threadptr = tls;
268 } else {
269 p->thread.ra = MAKE_RA_FOR_CALL(
270 (unsigned long)ret_from_kernel_thread, 1);
271
272
273#if defined(__XTENSA_WINDOWED_ABI__)
274
275
276
277
278
279 SPILL_SLOT(childregs, 2) = usp_thread_fn;
280 SPILL_SLOT(childregs, 3) = thread_fn_arg;
281#elif defined(__XTENSA_CALL0_ABI__)
282
283
284
285
286 ((unsigned long *)p->thread.sp)[0] = usp_thread_fn;
287 ((unsigned long *)p->thread.sp)[1] = thread_fn_arg;
288#else
289#error Unsupported Xtensa ABI
290#endif
291
292
293
294
295 }
296
297#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
298 ti = task_thread_info(p);
299 ti->cpenable = 0;
300#endif
301
302 clear_ptrace_hw_breakpoint(p);
303
304 return 0;
305}
306
307
308
309
310
311
312unsigned long __get_wchan(struct task_struct *p)
313{
314 unsigned long sp, pc;
315 unsigned long stack_page = (unsigned long) task_stack_page(p);
316 int count = 0;
317
318 sp = p->thread.sp;
319 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
320
321 do {
322 if (sp < stack_page + sizeof(struct task_struct) ||
323 sp >= (stack_page + THREAD_SIZE) ||
324 pc == 0)
325 return 0;
326 if (!in_sched_functions(pc))
327 return pc;
328
329
330
331 pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
332 sp = SPILL_SLOT(sp, 1);
333 } while (count++ < 16);
334 return 0;
335}
336