1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/elf.h>
30#include <linux/hw_breakpoint.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/rcupdate.h>
39
40#include <linux/uaccess.h>
41#include <asm/io.h>
42#include <asm/processor.h>
43#include <asm/platform.h>
44#include <asm/mmu.h>
45#include <asm/irq.h>
46#include <linux/atomic.h>
47#include <asm/asm-offsets.h>
48#include <asm/regs.h>
49#include <asm/hw_breakpoint.h>
50
51extern void ret_from_fork(void);
52extern void ret_from_kernel_thread(void);
53
54void (*pm_power_off)(void) = NULL;
55EXPORT_SYMBOL(pm_power_off);
56
57
58#ifdef CONFIG_STACKPROTECTOR
59#include <linux/stackprotector.h>
60unsigned long __stack_chk_guard __read_mostly;
61EXPORT_SYMBOL(__stack_chk_guard);
62#endif
63
64#if XTENSA_HAVE_COPROCESSORS
65
66void coprocessor_release_all(struct thread_info *ti)
67{
68 unsigned long cpenable;
69 int i;
70
71
72
73 preempt_disable();
74
75
76
77 cpenable = ti->cpenable;
78
79 for (i = 0; i < XCHAL_CP_MAX; i++) {
80 if (coprocessor_owner[i] == ti) {
81 coprocessor_owner[i] = 0;
82 cpenable &= ~(1 << i);
83 }
84 }
85
86 ti->cpenable = cpenable;
87 if (ti == current_thread_info())
88 xtensa_set_sr(0, cpenable);
89
90 preempt_enable();
91}
92
93void coprocessor_flush_all(struct thread_info *ti)
94{
95 unsigned long cpenable, old_cpenable;
96 int i;
97
98 preempt_disable();
99
100 old_cpenable = xtensa_get_sr(cpenable);
101 cpenable = ti->cpenable;
102 xtensa_set_sr(cpenable, cpenable);
103
104 for (i = 0; i < XCHAL_CP_MAX; i++) {
105 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
106 coprocessor_flush(ti, i);
107 cpenable >>= 1;
108 }
109 xtensa_set_sr(old_cpenable, cpenable);
110
111 preempt_enable();
112}
113
114#endif
115
116
117
118
119
120void arch_cpu_idle(void)
121{
122 platform_idle();
123}
124
125
126
127
128void exit_thread(struct task_struct *tsk)
129{
130#if XTENSA_HAVE_COPROCESSORS
131 coprocessor_release_all(task_thread_info(tsk));
132#endif
133}
134
135
136
137
138
139void flush_thread(void)
140{
141#if XTENSA_HAVE_COPROCESSORS
142 struct thread_info *ti = current_thread_info();
143 coprocessor_flush_all(ti);
144 coprocessor_release_all(ti);
145#endif
146 flush_ptrace_hw_breakpoint(current);
147}
148
149
150
151
152
153int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
154{
155#if XTENSA_HAVE_COPROCESSORS
156 coprocessor_flush_all(task_thread_info(src));
157#endif
158 *dst = *src;
159 return 0;
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
205 unsigned long thread_fn_arg, struct task_struct *p,
206 unsigned long tls)
207{
208 struct pt_regs *childregs = task_pt_regs(p);
209
210#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
211 struct thread_info *ti;
212#endif
213
214
215 SPILL_SLOT(childregs, 1) = (unsigned long)childregs;
216 SPILL_SLOT(childregs, 0) = 0;
217
218 p->thread.sp = (unsigned long)childregs;
219
220 if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
221 struct pt_regs *regs = current_pt_regs();
222 unsigned long usp = usp_thread_fn ?
223 usp_thread_fn : regs->areg[1];
224
225 p->thread.ra = MAKE_RA_FOR_CALL(
226 (unsigned long)ret_from_fork, 0x1);
227
228
229
230
231
232 *childregs = *regs;
233 childregs->areg[1] = usp;
234 childregs->areg[2] = 0;
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 if (clone_flags & CLONE_VM) {
250
251 int len = childregs->wmask & ~0xf;
252 if (regs->areg[1] == usp && len != 0) {
253 int callinc = (regs->areg[0] >> 30) & 3;
254 int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
255 put_user(regs->areg[caller_ars+1],
256 (unsigned __user*)(usp - 12));
257 }
258 childregs->wmask = 1;
259 childregs->windowstart = 1;
260 childregs->windowbase = 0;
261 } else {
262 int len = childregs->wmask & ~0xf;
263 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
264 ®s->areg[XCHAL_NUM_AREGS - len/4], len);
265 }
266
267 childregs->syscall = regs->syscall;
268
269 if (clone_flags & CLONE_SETTLS)
270 childregs->threadptr = tls;
271 } else {
272 p->thread.ra = MAKE_RA_FOR_CALL(
273 (unsigned long)ret_from_kernel_thread, 1);
274
275
276
277
278 SPILL_SLOT(childregs, 3) = thread_fn_arg;
279 SPILL_SLOT(childregs, 2) = usp_thread_fn;
280
281
282
283
284 }
285
286#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
287 ti = task_thread_info(p);
288 ti->cpenable = 0;
289#endif
290
291 clear_ptrace_hw_breakpoint(p);
292
293 return 0;
294}
295
296
297
298
299
300
301unsigned long get_wchan(struct task_struct *p)
302{
303 unsigned long sp, pc;
304 unsigned long stack_page = (unsigned long) task_stack_page(p);
305 int count = 0;
306
307 if (!p || p == current || task_is_running(p))
308 return 0;
309
310 sp = p->thread.sp;
311 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
312
313 do {
314 if (sp < stack_page + sizeof(struct task_struct) ||
315 sp >= (stack_page + THREAD_SIZE) ||
316 pc == 0)
317 return 0;
318 if (!in_sched_functions(pc))
319 return pc;
320
321
322
323 pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
324 sp = SPILL_SLOT(sp, 1);
325 } while (count++ < 16);
326 return 0;
327}
328