1
2
3
4
5
6
7
8
9
10
11
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/signal.h>
15#include <linux/errno.h>
16#include <linux/wait.h>
17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
19#include <linux/unistd.h>
20#include <linux/mm.h>
21#include <linux/tty.h>
22#include <linux/binfmts.h>
23#include <linux/bitops.h>
24#include <linux/context_tracking.h>
25
26#include <linux/uaccess.h>
27#include <asm/ptrace.h>
28#include <asm/pgtable.h>
29#include <asm/fpumacro.h>
30#include <asm/uctx.h>
31#include <asm/siginfo.h>
32#include <asm/visasm.h>
33#include <asm/switch_to.h>
34#include <asm/cacheflush.h>
35
36#include "sigutil.h"
37#include "systbls.h"
38#include "kernel.h"
39#include "entry.h"
40
41
42asmlinkage void sparc64_set_context(struct pt_regs *regs)
43{
44 struct ucontext __user *ucp = (struct ucontext __user *)
45 regs->u_regs[UREG_I0];
46 enum ctx_state prev_state = exception_enter();
47 mc_gregset_t __user *grp;
48 unsigned long pc, npc, tstate;
49 unsigned long fp, i7;
50 unsigned char fenab;
51 int err;
52
53 synchronize_user_stack();
54 if (get_thread_wsaved() ||
55 (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
56 (!__access_ok(ucp, sizeof(*ucp))))
57 goto do_sigsegv;
58 grp = &ucp->uc_mcontext.mc_gregs;
59 err = __get_user(pc, &((*grp)[MC_PC]));
60 err |= __get_user(npc, &((*grp)[MC_NPC]));
61 if (err || ((pc | npc) & 3))
62 goto do_sigsegv;
63 if (regs->u_regs[UREG_I1]) {
64 sigset_t set;
65
66 if (_NSIG_WORDS == 1) {
67 if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
68 goto do_sigsegv;
69 } else {
70 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
71 goto do_sigsegv;
72 }
73 set_current_blocked(&set);
74 }
75 if (test_thread_flag(TIF_32BIT)) {
76 pc &= 0xffffffff;
77 npc &= 0xffffffff;
78 }
79 regs->tpc = pc;
80 regs->tnpc = npc;
81 err |= __get_user(regs->y, &((*grp)[MC_Y]));
82 err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
83 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
84 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
85 err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
86 err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
87 err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
88 err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
89 err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
90 err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
91
92
93
94 err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
95 err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
96 err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
97 err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
98 err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
99 err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
100 err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
101 err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
102
103 err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
104 err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
105 err |= __put_user(fp,
106 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
107 err |= __put_user(i7,
108 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
109
110 err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
111 if (fenab) {
112 unsigned long *fpregs = current_thread_info()->fpregs;
113 unsigned long fprs;
114
115 fprs_write(0);
116 err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
117 if (fprs & FPRS_DL)
118 err |= copy_from_user(fpregs,
119 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
120 (sizeof(unsigned int) * 32));
121 if (fprs & FPRS_DU)
122 err |= copy_from_user(fpregs+16,
123 ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
124 (sizeof(unsigned int) * 32));
125 err |= __get_user(current_thread_info()->xfsr[0],
126 &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
127 err |= __get_user(current_thread_info()->gsr[0],
128 &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
129 regs->tstate &= ~TSTATE_PEF;
130 }
131 if (err)
132 goto do_sigsegv;
133out:
134 exception_exit(prev_state);
135 return;
136do_sigsegv:
137 force_sig(SIGSEGV);
138 goto out;
139}
140
141asmlinkage void sparc64_get_context(struct pt_regs *regs)
142{
143 struct ucontext __user *ucp = (struct ucontext __user *)
144 regs->u_regs[UREG_I0];
145 enum ctx_state prev_state = exception_enter();
146 mc_gregset_t __user *grp;
147 mcontext_t __user *mcp;
148 unsigned long fp, i7;
149 unsigned char fenab;
150 int err;
151
152 synchronize_user_stack();
153 if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
154 goto do_sigsegv;
155
156#if 1
157 fenab = 0;
158#else
159 fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
160#endif
161
162 mcp = &ucp->uc_mcontext;
163 grp = &mcp->mc_gregs;
164
165
166 if (test_thread_flag(TIF_32BIT)) {
167 regs->tpc = (regs->tnpc & 0xffffffff);
168 regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
169 } else {
170 regs->tpc = regs->tnpc;
171 regs->tnpc += 4;
172 }
173 err = 0;
174 if (_NSIG_WORDS == 1)
175 err |= __put_user(current->blocked.sig[0],
176 (unsigned long __user *)&ucp->uc_sigmask);
177 else
178 err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked,
179 sizeof(sigset_t));
180
181 err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
182 err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
183 err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
184 err |= __put_user(regs->y, &((*grp)[MC_Y]));
185 err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
186 err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
187 err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
188 err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
189 err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
190 err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
191 err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
192 err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
193 err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
194 err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
195 err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
196 err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
197 err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
198 err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
199 err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
200
201 err |= __get_user(fp,
202 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
203 err |= __get_user(i7,
204 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
205 err |= __put_user(fp, &(mcp->mc_fp));
206 err |= __put_user(i7, &(mcp->mc_i7));
207
208 err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
209 if (fenab) {
210 unsigned long *fpregs = current_thread_info()->fpregs;
211 unsigned long fprs;
212
213 fprs = current_thread_info()->fpsaved[0];
214 if (fprs & FPRS_DL)
215 err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
216 (sizeof(unsigned int) * 32));
217 if (fprs & FPRS_DU)
218 err |= copy_to_user(
219 ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
220 (sizeof(unsigned int) * 32));
221 err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
222 err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
223 err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
224 }
225 if (err)
226 goto do_sigsegv;
227out:
228 exception_exit(prev_state);
229 return;
230do_sigsegv:
231 force_sig(SIGSEGV);
232 goto out;
233}
234
235
236
237
238
239static bool invalid_frame_pointer(void __user *fp)
240{
241 if (((unsigned long) fp) & 15)
242 return true;
243 return false;
244}
245
246struct rt_signal_frame {
247 struct sparc_stackf ss;
248 siginfo_t info;
249 struct pt_regs regs;
250 __siginfo_fpu_t __user *fpu_save;
251 stack_t stack;
252 sigset_t mask;
253 __siginfo_rwin_t *rwin_save;
254};
255
256void do_rt_sigreturn(struct pt_regs *regs)
257{
258 unsigned long tpc, tnpc, tstate, ufp;
259 struct rt_signal_frame __user *sf;
260 __siginfo_fpu_t __user *fpu_save;
261 __siginfo_rwin_t __user *rwin_save;
262 sigset_t set;
263 int err;
264
265
266 current->restart_block.fn = do_no_restart_syscall;
267
268 synchronize_user_stack ();
269 sf = (struct rt_signal_frame __user *)
270 (regs->u_regs [UREG_FP] + STACK_BIAS);
271
272
273 if (invalid_frame_pointer(sf))
274 goto segv;
275
276 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
277 goto segv;
278
279 if ((ufp + STACK_BIAS) & 0x7)
280 goto segv;
281
282 err = __get_user(tpc, &sf->regs.tpc);
283 err |= __get_user(tnpc, &sf->regs.tnpc);
284 if (test_thread_flag(TIF_32BIT)) {
285 tpc &= 0xffffffff;
286 tnpc &= 0xffffffff;
287 }
288 err |= ((tpc | tnpc) & 3);
289
290
291 err |= __get_user(regs->y, &sf->regs.y);
292 err |= __get_user(tstate, &sf->regs.tstate);
293 err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
294
295
296 regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
297 regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
298
299 err |= __get_user(fpu_save, &sf->fpu_save);
300 if (!err && fpu_save)
301 err |= restore_fpu_state(regs, fpu_save);
302
303 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
304 err |= restore_altstack(&sf->stack);
305 if (err)
306 goto segv;
307
308 err |= __get_user(rwin_save, &sf->rwin_save);
309 if (!err && rwin_save) {
310 if (restore_rwin_state(rwin_save))
311 goto segv;
312 }
313
314 regs->tpc = tpc;
315 regs->tnpc = tnpc;
316
317
318 pt_regs_clear_syscall(regs);
319
320 set_current_blocked(&set);
321 return;
322segv:
323 force_sig(SIGSEGV);
324}
325
326static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
327{
328 unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
329
330
331
332
333
334 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
335 return (void __user *) -1L;
336
337
338 sp = sigsp(sp, ksig) - framesize;
339
340
341
342
343
344
345
346 sp &= ~15UL;
347
348 return (void __user *) sp;
349}
350
351static inline int
352setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
353{
354 struct rt_signal_frame __user *sf;
355 int wsaved, err, sf_size;
356 void __user *tail;
357
358
359 synchronize_user_stack();
360 save_and_clear_fpu();
361
362 wsaved = get_thread_wsaved();
363
364 sf_size = sizeof(struct rt_signal_frame);
365 if (current_thread_info()->fpsaved[0] & FPRS_FEF)
366 sf_size += sizeof(__siginfo_fpu_t);
367 if (wsaved)
368 sf_size += sizeof(__siginfo_rwin_t);
369 sf = (struct rt_signal_frame __user *)
370 get_sigframe(ksig, regs, sf_size);
371
372 if (invalid_frame_pointer (sf)) {
373 if (show_unhandled_signals)
374 pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
375 current->comm, current->pid, (unsigned long)sf,
376 regs->tpc, regs->u_regs[UREG_I7]);
377 force_sigsegv(ksig->sig);
378 return -EINVAL;
379 }
380
381 tail = (sf + 1);
382
383
384 err = copy_to_user(&sf->regs, regs, sizeof (*regs));
385
386 if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
387 __siginfo_fpu_t __user *fpu_save = tail;
388 tail += sizeof(__siginfo_fpu_t);
389 err |= save_fpu_state(regs, fpu_save);
390 err |= __put_user((u64)fpu_save, &sf->fpu_save);
391 } else {
392 err |= __put_user(0, &sf->fpu_save);
393 }
394 if (wsaved) {
395 __siginfo_rwin_t __user *rwin_save = tail;
396 tail += sizeof(__siginfo_rwin_t);
397 err |= save_rwin_state(wsaved, rwin_save);
398 err |= __put_user((u64)rwin_save, &sf->rwin_save);
399 set_thread_wsaved(0);
400 } else {
401 err |= __put_user(0, &sf->rwin_save);
402 }
403
404
405 err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
406
407 err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t));
408
409 if (!wsaved) {
410 err |= copy_in_user((u64 __user *)sf,
411 (u64 __user *)(regs->u_regs[UREG_FP] +
412 STACK_BIAS),
413 sizeof(struct reg_window));
414 } else {
415 struct reg_window *rp;
416
417 rp = ¤t_thread_info()->reg_window[wsaved - 1];
418 err |= copy_to_user(sf, rp, sizeof(struct reg_window));
419 }
420 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
421 err |= copy_siginfo_to_user(&sf->info, &ksig->info);
422 else {
423 err |= __put_user(ksig->sig, &sf->info.si_signo);
424 err |= __put_user(SI_NOINFO, &sf->info.si_code);
425 }
426 if (err)
427 return err;
428
429
430 regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
431 regs->u_regs[UREG_I0] = ksig->sig;
432 regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
433
434
435
436
437
438 regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
439
440
441 regs->tpc = (unsigned long) ksig->ka.sa.sa_handler;
442 regs->tnpc = (regs->tpc + 4);
443 if (test_thread_flag(TIF_32BIT)) {
444 regs->tpc &= 0xffffffff;
445 regs->tnpc &= 0xffffffff;
446 }
447
448 regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer;
449 return 0;
450}
451
452static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
453 struct sigaction *sa)
454{
455 switch (regs->u_regs[UREG_I0]) {
456 case ERESTART_RESTARTBLOCK:
457 case ERESTARTNOHAND:
458 no_system_call_restart:
459 regs->u_regs[UREG_I0] = EINTR;
460 regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
461 break;
462 case ERESTARTSYS:
463 if (!(sa->sa_flags & SA_RESTART))
464 goto no_system_call_restart;
465
466 case ERESTARTNOINTR:
467 regs->u_regs[UREG_I0] = orig_i0;
468 regs->tpc -= 4;
469 regs->tnpc -= 4;
470 }
471}
472
473
474
475
476
477static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
478{
479 struct ksignal ksig;
480 int restart_syscall;
481 bool has_handler;
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501 if (pt_regs_is_syscall(regs) &&
502 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
503 regs->u_regs[UREG_G6] = orig_i0;
504
505#ifdef CONFIG_COMPAT
506 if (test_thread_flag(TIF_32BIT)) {
507 do_signal32(regs);
508 return;
509 }
510#endif
511
512 has_handler = get_signal(&ksig);
513
514 restart_syscall = 0;
515 if (pt_regs_is_syscall(regs) &&
516 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
517 restart_syscall = 1;
518 orig_i0 = regs->u_regs[UREG_G6];
519 }
520
521 if (has_handler) {
522 if (restart_syscall)
523 syscall_restart(orig_i0, regs, &ksig.ka.sa);
524 signal_setup_done(setup_rt_frame(&ksig, regs), &ksig, 0);
525 } else {
526 if (restart_syscall) {
527 switch (regs->u_regs[UREG_I0]) {
528 case ERESTARTNOHAND:
529 case ERESTARTSYS:
530 case ERESTARTNOINTR:
531
532 regs->u_regs[UREG_I0] = orig_i0;
533 regs->tpc -= 4;
534 regs->tnpc -= 4;
535 pt_regs_clear_syscall(regs);
536
537 case ERESTART_RESTARTBLOCK:
538 regs->u_regs[UREG_G1] = __NR_restart_syscall;
539 regs->tpc -= 4;
540 regs->tnpc -= 4;
541 pt_regs_clear_syscall(regs);
542 }
543 }
544 restore_saved_sigmask();
545 }
546}
547
548void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
549{
550 user_exit();
551 if (thread_info_flags & _TIF_UPROBE)
552 uprobe_notify_resume(regs);
553 if (thread_info_flags & _TIF_SIGPENDING)
554 do_signal(regs, orig_i0);
555 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
556 clear_thread_flag(TIF_NOTIFY_RESUME);
557 tracehook_notify_resume(regs);
558 }
559 user_enter();
560}
561
562