1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/sched/task_stack.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/audit.h>
20#include <linux/signal.h>
21#include <linux/elf.h>
22#include <linux/regset.h>
23#include <linux/tracehook.h>
24#include <linux/seccomp.h>
25#include <linux/compat.h>
26#include <trace/syscall.h>
27#include <asm/segment.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/pgalloc.h>
31#include <linux/uaccess.h>
32#include <asm/unistd.h>
33#include <asm/switch_to.h>
34#include <asm/runtime_instr.h>
35#include <asm/facility.h>
36
37#include "entry.h"
38
39#ifdef CONFIG_COMPAT
40#include "compat_ptrace.h"
41#endif
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/syscalls.h>
45
46void update_cr_regs(struct task_struct *task)
47{
48 struct pt_regs *regs = task_pt_regs(task);
49 struct thread_struct *thread = &task->thread;
50 struct per_regs old, new;
51 union ctlreg0 cr0_old, cr0_new;
52 union ctlreg2 cr2_old, cr2_new;
53 int cr0_changed, cr2_changed;
54
55 __ctl_store(cr0_old.val, 0, 0);
56 __ctl_store(cr2_old.val, 2, 2);
57 cr0_new = cr0_old;
58 cr2_new = cr2_old;
59
60 if (MACHINE_HAS_TE) {
61
62 cr0_new.tcx = 1;
63 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new.tcx = 0;
65
66 cr2_new.tdc = 0;
67 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
68 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
69 cr2_new.tdc = 1;
70 else
71 cr2_new.tdc = 2;
72 }
73 }
74
75 if (MACHINE_HAS_GS) {
76 cr2_new.gse = 0;
77 if (task->thread.gs_cb)
78 cr2_new.gse = 1;
79 }
80
81 cr0_changed = cr0_new.val != cr0_old.val;
82 cr2_changed = cr2_new.val != cr2_old.val;
83 if (cr0_changed)
84 __ctl_load(cr0_new.val, 0, 0);
85 if (cr2_changed)
86 __ctl_load(cr2_new.val, 2, 2);
87
88 new.control = thread->per_user.control;
89 new.start = thread->per_user.start;
90 new.end = thread->per_user.end;
91
92
93 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
94 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
95 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
96 new.control |= PER_EVENT_BRANCH;
97 else
98 new.control |= PER_EVENT_IFETCH;
99 new.control |= PER_CONTROL_SUSPENSION;
100 new.control |= PER_EVENT_TRANSACTION_END;
101 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
102 new.control |= PER_EVENT_IFETCH;
103 new.start = 0;
104 new.end = -1UL;
105 }
106
107
108 if (!(new.control & PER_EVENT_MASK)) {
109 regs->psw.mask &= ~PSW_MASK_PER;
110 return;
111 }
112 regs->psw.mask |= PSW_MASK_PER;
113 __ctl_store(old, 9, 11);
114 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
115 __ctl_load(new, 9, 11);
116}
117
118void user_enable_single_step(struct task_struct *task)
119{
120 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
121 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
122}
123
124void user_disable_single_step(struct task_struct *task)
125{
126 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
127 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
128}
129
130void user_enable_block_step(struct task_struct *task)
131{
132 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
133 set_tsk_thread_flag(task, TIF_BLOCK_STEP);
134}
135
136
137
138
139
140
141void ptrace_disable(struct task_struct *task)
142{
143 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
144 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
145 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
146 clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
147 task->thread.per_flags = 0;
148}
149
150#define __ADDR_MASK 7
151
152static inline unsigned long __peek_user_per(struct task_struct *child,
153 addr_t addr)
154{
155 struct per_struct_kernel *dummy = NULL;
156
157 if (addr == (addr_t) &dummy->cr9)
158
159 return test_thread_flag(TIF_SINGLE_STEP) ?
160 PER_EVENT_IFETCH : child->thread.per_user.control;
161 else if (addr == (addr_t) &dummy->cr10)
162
163 return test_thread_flag(TIF_SINGLE_STEP) ?
164 0 : child->thread.per_user.start;
165 else if (addr == (addr_t) &dummy->cr11)
166
167 return test_thread_flag(TIF_SINGLE_STEP) ?
168 -1UL : child->thread.per_user.end;
169 else if (addr == (addr_t) &dummy->bits)
170
171 return test_thread_flag(TIF_SINGLE_STEP) ?
172 (1UL << (BITS_PER_LONG - 1)) : 0;
173 else if (addr == (addr_t) &dummy->starting_addr)
174
175 return child->thread.per_user.start;
176 else if (addr == (addr_t) &dummy->ending_addr)
177
178 return child->thread.per_user.end;
179 else if (addr == (addr_t) &dummy->perc_atmid)
180
181 return (unsigned long)
182 child->thread.per_event.cause << (BITS_PER_LONG - 16);
183 else if (addr == (addr_t) &dummy->address)
184
185 return child->thread.per_event.address;
186 else if (addr == (addr_t) &dummy->access_id)
187
188 return (unsigned long)
189 child->thread.per_event.paid << (BITS_PER_LONG - 8);
190 return 0;
191}
192
193
194
195
196
197
198
199
200
201
202static unsigned long __peek_user(struct task_struct *child, addr_t addr)
203{
204 struct user *dummy = NULL;
205 addr_t offset, tmp;
206
207 if (addr < (addr_t) &dummy->regs.acrs) {
208
209
210
211 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
212 if (addr == (addr_t) &dummy->regs.psw.mask) {
213
214 tmp &= PSW_MASK_USER | PSW_MASK_RI;
215 tmp |= PSW_USER_BITS;
216 }
217
218 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
219
220
221
222 offset = addr - (addr_t) &dummy->regs.acrs;
223
224
225
226
227
228 if (addr == (addr_t) &dummy->regs.acrs[15])
229 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
230 else
231 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
232
233 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
234
235
236
237 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
238
239 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
240
241
242
243
244 tmp = 0;
245
246 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
247
248
249
250 tmp = child->thread.fpu.fpc;
251 tmp <<= BITS_PER_LONG - 32;
252
253 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
254
255
256
257
258 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
259 if (MACHINE_HAS_VX)
260 tmp = *(addr_t *)
261 ((addr_t) child->thread.fpu.vxrs + 2*offset);
262 else
263 tmp = *(addr_t *)
264 ((addr_t) child->thread.fpu.fprs + offset);
265
266 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
267
268
269
270 addr -= (addr_t) &dummy->regs.per_info;
271 tmp = __peek_user_per(child, addr);
272
273 } else
274 tmp = 0;
275
276 return tmp;
277}
278
279static int
280peek_user(struct task_struct *child, addr_t addr, addr_t data)
281{
282 addr_t tmp, mask;
283
284
285
286
287
288 mask = __ADDR_MASK;
289 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
290 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
291 mask = 3;
292 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
293 return -EIO;
294
295 tmp = __peek_user(child, addr);
296 return put_user(tmp, (addr_t __user *) data);
297}
298
299static inline void __poke_user_per(struct task_struct *child,
300 addr_t addr, addr_t data)
301{
302 struct per_struct_kernel *dummy = NULL;
303
304
305
306
307
308
309
310
311
312
313
314
315
316 if (addr == (addr_t) &dummy->cr9)
317
318 child->thread.per_user.control =
319 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
320 else if (addr == (addr_t) &dummy->starting_addr)
321
322 child->thread.per_user.start = data;
323 else if (addr == (addr_t) &dummy->ending_addr)
324
325 child->thread.per_user.end = data;
326}
327
328
329
330
331
332
333
334static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
335{
336 struct user *dummy = NULL;
337 addr_t offset;
338
339 if (addr < (addr_t) &dummy->regs.acrs) {
340
341
342
343 if (addr == (addr_t) &dummy->regs.psw.mask) {
344 unsigned long mask = PSW_MASK_USER;
345
346 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
347 if ((data ^ PSW_USER_BITS) & ~mask)
348
349 return -EINVAL;
350 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
351
352 return -EINVAL;
353 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
354
355 return -EINVAL;
356 }
357 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
358
359 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
360
361
362
363 offset = addr - (addr_t) &dummy->regs.acrs;
364
365
366
367
368
369
370 if (addr == (addr_t) &dummy->regs.acrs[15])
371 child->thread.acrs[15] = (unsigned int) (data >> 32);
372 else
373 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
374
375 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
376
377
378
379 task_pt_regs(child)->orig_gpr2 = data;
380
381 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
382
383
384
385
386 return 0;
387
388 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
389
390
391
392 if ((unsigned int) data != 0 ||
393 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
394 return -EINVAL;
395 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
396
397 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
398
399
400
401
402 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
403 if (MACHINE_HAS_VX)
404 *(addr_t *)((addr_t)
405 child->thread.fpu.vxrs + 2*offset) = data;
406 else
407 *(addr_t *)((addr_t)
408 child->thread.fpu.fprs + offset) = data;
409
410 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
411
412
413
414 addr -= (addr_t) &dummy->regs.per_info;
415 __poke_user_per(child, addr, data);
416
417 }
418
419 return 0;
420}
421
422static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
423{
424 addr_t mask;
425
426
427
428
429
430 mask = __ADDR_MASK;
431 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
432 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
433 mask = 3;
434 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
435 return -EIO;
436
437 return __poke_user(child, addr, data);
438}
439
440long arch_ptrace(struct task_struct *child, long request,
441 unsigned long addr, unsigned long data)
442{
443 ptrace_area parea;
444 int copied, ret;
445
446 switch (request) {
447 case PTRACE_PEEKUSR:
448
449 return peek_user(child, addr, data);
450
451 case PTRACE_POKEUSR:
452
453 return poke_user(child, addr, data);
454
455 case PTRACE_PEEKUSR_AREA:
456 case PTRACE_POKEUSR_AREA:
457 if (copy_from_user(&parea, (void __force __user *) addr,
458 sizeof(parea)))
459 return -EFAULT;
460 addr = parea.kernel_addr;
461 data = parea.process_addr;
462 copied = 0;
463 while (copied < parea.len) {
464 if (request == PTRACE_PEEKUSR_AREA)
465 ret = peek_user(child, addr, data);
466 else {
467 addr_t utmp;
468 if (get_user(utmp,
469 (addr_t __force __user *) data))
470 return -EFAULT;
471 ret = poke_user(child, addr, utmp);
472 }
473 if (ret)
474 return ret;
475 addr += sizeof(unsigned long);
476 data += sizeof(unsigned long);
477 copied += sizeof(unsigned long);
478 }
479 return 0;
480 case PTRACE_GET_LAST_BREAK:
481 put_user(child->thread.last_break,
482 (unsigned long __user *) data);
483 return 0;
484 case PTRACE_ENABLE_TE:
485 if (!MACHINE_HAS_TE)
486 return -EIO;
487 child->thread.per_flags &= ~PER_FLAG_NO_TE;
488 return 0;
489 case PTRACE_DISABLE_TE:
490 if (!MACHINE_HAS_TE)
491 return -EIO;
492 child->thread.per_flags |= PER_FLAG_NO_TE;
493 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
494 return 0;
495 case PTRACE_TE_ABORT_RAND:
496 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
497 return -EIO;
498 switch (data) {
499 case 0UL:
500 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
501 break;
502 case 1UL:
503 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
504 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
505 break;
506 case 2UL:
507 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
508 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
509 break;
510 default:
511 return -EINVAL;
512 }
513 return 0;
514 default:
515 return ptrace_request(child, request, addr, data);
516 }
517}
518
519#ifdef CONFIG_COMPAT
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537static inline __u32 __peek_user_per_compat(struct task_struct *child,
538 addr_t addr)
539{
540 struct compat_per_struct_kernel *dummy32 = NULL;
541
542 if (addr == (addr_t) &dummy32->cr9)
543
544 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
545 PER_EVENT_IFETCH : child->thread.per_user.control;
546 else if (addr == (addr_t) &dummy32->cr10)
547
548 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
549 0 : child->thread.per_user.start;
550 else if (addr == (addr_t) &dummy32->cr11)
551
552 return test_thread_flag(TIF_SINGLE_STEP) ?
553 PSW32_ADDR_INSN : child->thread.per_user.end;
554 else if (addr == (addr_t) &dummy32->bits)
555
556 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
557 0x80000000 : 0;
558 else if (addr == (addr_t) &dummy32->starting_addr)
559
560 return (__u32) child->thread.per_user.start;
561 else if (addr == (addr_t) &dummy32->ending_addr)
562
563 return (__u32) child->thread.per_user.end;
564 else if (addr == (addr_t) &dummy32->perc_atmid)
565
566 return (__u32) child->thread.per_event.cause << 16;
567 else if (addr == (addr_t) &dummy32->address)
568
569 return (__u32) child->thread.per_event.address;
570 else if (addr == (addr_t) &dummy32->access_id)
571
572 return (__u32) child->thread.per_event.paid << 24;
573 return 0;
574}
575
576
577
578
579static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
580{
581 struct compat_user *dummy32 = NULL;
582 addr_t offset;
583 __u32 tmp;
584
585 if (addr < (addr_t) &dummy32->regs.acrs) {
586 struct pt_regs *regs = task_pt_regs(child);
587
588
589
590 if (addr == (addr_t) &dummy32->regs.psw.mask) {
591
592 tmp = (__u32)(regs->psw.mask >> 32);
593 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
594 tmp |= PSW32_USER_BITS;
595 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
596
597 tmp = (__u32) regs->psw.addr |
598 (__u32)(regs->psw.mask & PSW_MASK_BA);
599 } else {
600
601 tmp = *(__u32 *)((addr_t) ®s->psw + addr*2 + 4);
602 }
603 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
604
605
606
607 offset = addr - (addr_t) &dummy32->regs.acrs;
608 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
609
610 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
611
612
613
614 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
615
616 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
617
618
619
620
621 tmp = 0;
622
623 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
624
625
626
627 tmp = child->thread.fpu.fpc;
628
629 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
630
631
632
633
634 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
635 if (MACHINE_HAS_VX)
636 tmp = *(__u32 *)
637 ((addr_t) child->thread.fpu.vxrs + 2*offset);
638 else
639 tmp = *(__u32 *)
640 ((addr_t) child->thread.fpu.fprs + offset);
641
642 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
643
644
645
646 addr -= (addr_t) &dummy32->regs.per_info;
647 tmp = __peek_user_per_compat(child, addr);
648
649 } else
650 tmp = 0;
651
652 return tmp;
653}
654
655static int peek_user_compat(struct task_struct *child,
656 addr_t addr, addr_t data)
657{
658 __u32 tmp;
659
660 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
661 return -EIO;
662
663 tmp = __peek_user_compat(child, addr);
664 return put_user(tmp, (__u32 __user *) data);
665}
666
667
668
669
670static inline void __poke_user_per_compat(struct task_struct *child,
671 addr_t addr, __u32 data)
672{
673 struct compat_per_struct_kernel *dummy32 = NULL;
674
675 if (addr == (addr_t) &dummy32->cr9)
676
677 child->thread.per_user.control =
678 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
679 else if (addr == (addr_t) &dummy32->starting_addr)
680
681 child->thread.per_user.start = data;
682 else if (addr == (addr_t) &dummy32->ending_addr)
683
684 child->thread.per_user.end = data;
685}
686
687
688
689
690static int __poke_user_compat(struct task_struct *child,
691 addr_t addr, addr_t data)
692{
693 struct compat_user *dummy32 = NULL;
694 __u32 tmp = (__u32) data;
695 addr_t offset;
696
697 if (addr < (addr_t) &dummy32->regs.acrs) {
698 struct pt_regs *regs = task_pt_regs(child);
699
700
701
702 if (addr == (addr_t) &dummy32->regs.psw.mask) {
703 __u32 mask = PSW32_MASK_USER;
704
705 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
706
707 if ((tmp ^ PSW32_USER_BITS) & ~mask)
708
709 return -EINVAL;
710 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
711
712 return -EINVAL;
713 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
714 (regs->psw.mask & PSW_MASK_BA) |
715 (__u64)(tmp & mask) << 32;
716 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
717
718 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
719
720 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
721 (__u64)(tmp & PSW32_ADDR_AMODE);
722 } else {
723
724 *(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
725 }
726 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
727
728
729
730 offset = addr - (addr_t) &dummy32->regs.acrs;
731 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
732
733 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
734
735
736
737 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
738
739 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
740
741
742
743
744 return 0;
745
746 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
747
748
749
750 if (test_fp_ctl(tmp))
751 return -EINVAL;
752 child->thread.fpu.fpc = data;
753
754 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
755
756
757
758
759 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
760 if (MACHINE_HAS_VX)
761 *(__u32 *)((addr_t)
762 child->thread.fpu.vxrs + 2*offset) = tmp;
763 else
764 *(__u32 *)((addr_t)
765 child->thread.fpu.fprs + offset) = tmp;
766
767 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
768
769
770
771 addr -= (addr_t) &dummy32->regs.per_info;
772 __poke_user_per_compat(child, addr, data);
773 }
774
775 return 0;
776}
777
778static int poke_user_compat(struct task_struct *child,
779 addr_t addr, addr_t data)
780{
781 if (!is_compat_task() || (addr & 3) ||
782 addr > sizeof(struct compat_user) - 3)
783 return -EIO;
784
785 return __poke_user_compat(child, addr, data);
786}
787
788long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
789 compat_ulong_t caddr, compat_ulong_t cdata)
790{
791 unsigned long addr = caddr;
792 unsigned long data = cdata;
793 compat_ptrace_area parea;
794 int copied, ret;
795
796 switch (request) {
797 case PTRACE_PEEKUSR:
798
799 return peek_user_compat(child, addr, data);
800
801 case PTRACE_POKEUSR:
802
803 return poke_user_compat(child, addr, data);
804
805 case PTRACE_PEEKUSR_AREA:
806 case PTRACE_POKEUSR_AREA:
807 if (copy_from_user(&parea, (void __force __user *) addr,
808 sizeof(parea)))
809 return -EFAULT;
810 addr = parea.kernel_addr;
811 data = parea.process_addr;
812 copied = 0;
813 while (copied < parea.len) {
814 if (request == PTRACE_PEEKUSR_AREA)
815 ret = peek_user_compat(child, addr, data);
816 else {
817 __u32 utmp;
818 if (get_user(utmp,
819 (__u32 __force __user *) data))
820 return -EFAULT;
821 ret = poke_user_compat(child, addr, utmp);
822 }
823 if (ret)
824 return ret;
825 addr += sizeof(unsigned int);
826 data += sizeof(unsigned int);
827 copied += sizeof(unsigned int);
828 }
829 return 0;
830 case PTRACE_GET_LAST_BREAK:
831 put_user(child->thread.last_break,
832 (unsigned int __user *) data);
833 return 0;
834 }
835 return compat_ptrace_request(child, request, addr, data);
836}
837#endif
838
839asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
840{
841 unsigned long mask = -1UL;
842
843
844
845
846
847 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
848 (tracehook_report_syscall_entry(regs) ||
849 regs->gprs[2] >= NR_syscalls)) {
850
851
852
853
854
855 clear_pt_regs_flag(regs, PIF_SYSCALL);
856 return -1;
857 }
858
859
860 if (secure_computing(NULL)) {
861
862 return -1;
863 }
864
865 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
866 trace_sys_enter(regs, regs->gprs[2]);
867
868 if (is_compat_task())
869 mask = 0xffffffff;
870
871 audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
872 regs->gprs[3] &mask, regs->gprs[4] &mask,
873 regs->gprs[5] &mask);
874
875 return regs->gprs[2];
876}
877
878asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
879{
880 audit_syscall_exit(regs);
881
882 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
883 trace_sys_exit(regs, regs->gprs[2]);
884
885 if (test_thread_flag(TIF_SYSCALL_TRACE))
886 tracehook_report_syscall_exit(regs, 0);
887}
888
889
890
891
892
893static int s390_regs_get(struct task_struct *target,
894 const struct user_regset *regset,
895 unsigned int pos, unsigned int count,
896 void *kbuf, void __user *ubuf)
897{
898 if (target == current)
899 save_access_regs(target->thread.acrs);
900
901 if (kbuf) {
902 unsigned long *k = kbuf;
903 while (count > 0) {
904 *k++ = __peek_user(target, pos);
905 count -= sizeof(*k);
906 pos += sizeof(*k);
907 }
908 } else {
909 unsigned long __user *u = ubuf;
910 while (count > 0) {
911 if (__put_user(__peek_user(target, pos), u++))
912 return -EFAULT;
913 count -= sizeof(*u);
914 pos += sizeof(*u);
915 }
916 }
917 return 0;
918}
919
920static int s390_regs_set(struct task_struct *target,
921 const struct user_regset *regset,
922 unsigned int pos, unsigned int count,
923 const void *kbuf, const void __user *ubuf)
924{
925 int rc = 0;
926
927 if (target == current)
928 save_access_regs(target->thread.acrs);
929
930 if (kbuf) {
931 const unsigned long *k = kbuf;
932 while (count > 0 && !rc) {
933 rc = __poke_user(target, pos, *k++);
934 count -= sizeof(*k);
935 pos += sizeof(*k);
936 }
937 } else {
938 const unsigned long __user *u = ubuf;
939 while (count > 0 && !rc) {
940 unsigned long word;
941 rc = __get_user(word, u++);
942 if (rc)
943 break;
944 rc = __poke_user(target, pos, word);
945 count -= sizeof(*u);
946 pos += sizeof(*u);
947 }
948 }
949
950 if (rc == 0 && target == current)
951 restore_access_regs(target->thread.acrs);
952
953 return rc;
954}
955
956static int s390_fpregs_get(struct task_struct *target,
957 const struct user_regset *regset, unsigned int pos,
958 unsigned int count, void *kbuf, void __user *ubuf)
959{
960 _s390_fp_regs fp_regs;
961
962 if (target == current)
963 save_fpu_regs();
964
965 fp_regs.fpc = target->thread.fpu.fpc;
966 fpregs_store(&fp_regs, &target->thread.fpu);
967
968 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
969 &fp_regs, 0, -1);
970}
971
972static int s390_fpregs_set(struct task_struct *target,
973 const struct user_regset *regset, unsigned int pos,
974 unsigned int count, const void *kbuf,
975 const void __user *ubuf)
976{
977 int rc = 0;
978 freg_t fprs[__NUM_FPRS];
979
980 if (target == current)
981 save_fpu_regs();
982
983 if (MACHINE_HAS_VX)
984 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
985 else
986 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
987
988
989 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
990 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
991 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
992 0, offsetof(s390_fp_regs, fprs));
993 if (rc)
994 return rc;
995 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
996 return -EINVAL;
997 target->thread.fpu.fpc = ufpc[0];
998 }
999
1000 if (rc == 0 && count > 0)
1001 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1002 fprs, offsetof(s390_fp_regs, fprs), -1);
1003 if (rc)
1004 return rc;
1005
1006 if (MACHINE_HAS_VX)
1007 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1008 else
1009 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1010
1011 return rc;
1012}
1013
1014static int s390_last_break_get(struct task_struct *target,
1015 const struct user_regset *regset,
1016 unsigned int pos, unsigned int count,
1017 void *kbuf, void __user *ubuf)
1018{
1019 if (count > 0) {
1020 if (kbuf) {
1021 unsigned long *k = kbuf;
1022 *k = target->thread.last_break;
1023 } else {
1024 unsigned long __user *u = ubuf;
1025 if (__put_user(target->thread.last_break, u))
1026 return -EFAULT;
1027 }
1028 }
1029 return 0;
1030}
1031
1032static int s390_last_break_set(struct task_struct *target,
1033 const struct user_regset *regset,
1034 unsigned int pos, unsigned int count,
1035 const void *kbuf, const void __user *ubuf)
1036{
1037 return 0;
1038}
1039
1040static int s390_tdb_get(struct task_struct *target,
1041 const struct user_regset *regset,
1042 unsigned int pos, unsigned int count,
1043 void *kbuf, void __user *ubuf)
1044{
1045 struct pt_regs *regs = task_pt_regs(target);
1046 unsigned char *data;
1047
1048 if (!(regs->int_code & 0x200))
1049 return -ENODATA;
1050 data = target->thread.trap_tdb;
1051 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1052}
1053
1054static int s390_tdb_set(struct task_struct *target,
1055 const struct user_regset *regset,
1056 unsigned int pos, unsigned int count,
1057 const void *kbuf, const void __user *ubuf)
1058{
1059 return 0;
1060}
1061
1062static int s390_vxrs_low_get(struct task_struct *target,
1063 const struct user_regset *regset,
1064 unsigned int pos, unsigned int count,
1065 void *kbuf, void __user *ubuf)
1066{
1067 __u64 vxrs[__NUM_VXRS_LOW];
1068 int i;
1069
1070 if (!MACHINE_HAS_VX)
1071 return -ENODEV;
1072 if (target == current)
1073 save_fpu_regs();
1074 for (i = 0; i < __NUM_VXRS_LOW; i++)
1075 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1076 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1077}
1078
1079static int s390_vxrs_low_set(struct task_struct *target,
1080 const struct user_regset *regset,
1081 unsigned int pos, unsigned int count,
1082 const void *kbuf, const void __user *ubuf)
1083{
1084 __u64 vxrs[__NUM_VXRS_LOW];
1085 int i, rc;
1086
1087 if (!MACHINE_HAS_VX)
1088 return -ENODEV;
1089 if (target == current)
1090 save_fpu_regs();
1091
1092 for (i = 0; i < __NUM_VXRS_LOW; i++)
1093 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1094
1095 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1096 if (rc == 0)
1097 for (i = 0; i < __NUM_VXRS_LOW; i++)
1098 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1099
1100 return rc;
1101}
1102
1103static int s390_vxrs_high_get(struct task_struct *target,
1104 const struct user_regset *regset,
1105 unsigned int pos, unsigned int count,
1106 void *kbuf, void __user *ubuf)
1107{
1108 __vector128 vxrs[__NUM_VXRS_HIGH];
1109
1110 if (!MACHINE_HAS_VX)
1111 return -ENODEV;
1112 if (target == current)
1113 save_fpu_regs();
1114 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1115
1116 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1117}
1118
1119static int s390_vxrs_high_set(struct task_struct *target,
1120 const struct user_regset *regset,
1121 unsigned int pos, unsigned int count,
1122 const void *kbuf, const void __user *ubuf)
1123{
1124 int rc;
1125
1126 if (!MACHINE_HAS_VX)
1127 return -ENODEV;
1128 if (target == current)
1129 save_fpu_regs();
1130
1131 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1132 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1133 return rc;
1134}
1135
1136static int s390_system_call_get(struct task_struct *target,
1137 const struct user_regset *regset,
1138 unsigned int pos, unsigned int count,
1139 void *kbuf, void __user *ubuf)
1140{
1141 unsigned int *data = &target->thread.system_call;
1142 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1143 data, 0, sizeof(unsigned int));
1144}
1145
1146static int s390_system_call_set(struct task_struct *target,
1147 const struct user_regset *regset,
1148 unsigned int pos, unsigned int count,
1149 const void *kbuf, const void __user *ubuf)
1150{
1151 unsigned int *data = &target->thread.system_call;
1152 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1153 data, 0, sizeof(unsigned int));
1154}
1155
1156static int s390_gs_cb_get(struct task_struct *target,
1157 const struct user_regset *regset,
1158 unsigned int pos, unsigned int count,
1159 void *kbuf, void __user *ubuf)
1160{
1161 struct gs_cb *data = target->thread.gs_cb;
1162
1163 if (!MACHINE_HAS_GS)
1164 return -ENODEV;
1165 if (!data)
1166 return -ENODATA;
1167 if (target == current)
1168 save_gs_cb(data);
1169 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1170 data, 0, sizeof(struct gs_cb));
1171}
1172
1173static int s390_gs_cb_set(struct task_struct *target,
1174 const struct user_regset *regset,
1175 unsigned int pos, unsigned int count,
1176 const void *kbuf, const void __user *ubuf)
1177{
1178 struct gs_cb gs_cb = { }, *data = NULL;
1179 int rc;
1180
1181 if (!MACHINE_HAS_GS)
1182 return -ENODEV;
1183 if (!target->thread.gs_cb) {
1184 data = kzalloc(sizeof(*data), GFP_KERNEL);
1185 if (!data)
1186 return -ENOMEM;
1187 }
1188 if (!target->thread.gs_cb)
1189 gs_cb.gsd = 25;
1190 else if (target == current)
1191 save_gs_cb(&gs_cb);
1192 else
1193 gs_cb = *target->thread.gs_cb;
1194 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1195 &gs_cb, 0, sizeof(gs_cb));
1196 if (rc) {
1197 kfree(data);
1198 return -EFAULT;
1199 }
1200 preempt_disable();
1201 if (!target->thread.gs_cb)
1202 target->thread.gs_cb = data;
1203 *target->thread.gs_cb = gs_cb;
1204 if (target == current) {
1205 __ctl_set_bit(2, 4);
1206 restore_gs_cb(target->thread.gs_cb);
1207 }
1208 preempt_enable();
1209 return rc;
1210}
1211
1212static int s390_gs_bc_get(struct task_struct *target,
1213 const struct user_regset *regset,
1214 unsigned int pos, unsigned int count,
1215 void *kbuf, void __user *ubuf)
1216{
1217 struct gs_cb *data = target->thread.gs_bc_cb;
1218
1219 if (!MACHINE_HAS_GS)
1220 return -ENODEV;
1221 if (!data)
1222 return -ENODATA;
1223 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1224 data, 0, sizeof(struct gs_cb));
1225}
1226
1227static int s390_gs_bc_set(struct task_struct *target,
1228 const struct user_regset *regset,
1229 unsigned int pos, unsigned int count,
1230 const void *kbuf, const void __user *ubuf)
1231{
1232 struct gs_cb *data = target->thread.gs_bc_cb;
1233
1234 if (!MACHINE_HAS_GS)
1235 return -ENODEV;
1236 if (!data) {
1237 data = kzalloc(sizeof(*data), GFP_KERNEL);
1238 if (!data)
1239 return -ENOMEM;
1240 target->thread.gs_bc_cb = data;
1241 }
1242 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1243 data, 0, sizeof(struct gs_cb));
1244}
1245
1246static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
1247{
1248 return (cb->rca & 0x1f) == 0 &&
1249 (cb->roa & 0xfff) == 0 &&
1250 (cb->rla & 0xfff) == 0xfff &&
1251 cb->s == 1 &&
1252 cb->k == 1 &&
1253 cb->h == 0 &&
1254 cb->reserved1 == 0 &&
1255 cb->ps == 1 &&
1256 cb->qs == 0 &&
1257 cb->pc == 1 &&
1258 cb->qc == 0 &&
1259 cb->reserved2 == 0 &&
1260 cb->key == PAGE_DEFAULT_KEY &&
1261 cb->reserved3 == 0 &&
1262 cb->reserved4 == 0 &&
1263 cb->reserved5 == 0 &&
1264 cb->reserved6 == 0 &&
1265 cb->reserved7 == 0 &&
1266 cb->reserved8 == 0 &&
1267 cb->rla >= cb->roa &&
1268 cb->rca >= cb->roa &&
1269 cb->rca <= cb->rla+1 &&
1270 cb->m < 3;
1271}
1272
1273static int s390_runtime_instr_get(struct task_struct *target,
1274 const struct user_regset *regset,
1275 unsigned int pos, unsigned int count,
1276 void *kbuf, void __user *ubuf)
1277{
1278 struct runtime_instr_cb *data = target->thread.ri_cb;
1279
1280 if (!test_facility(64))
1281 return -ENODEV;
1282 if (!data)
1283 return -ENODATA;
1284
1285 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1286 data, 0, sizeof(struct runtime_instr_cb));
1287}
1288
1289static int s390_runtime_instr_set(struct task_struct *target,
1290 const struct user_regset *regset,
1291 unsigned int pos, unsigned int count,
1292 const void *kbuf, const void __user *ubuf)
1293{
1294 struct runtime_instr_cb ri_cb = { }, *data = NULL;
1295 int rc;
1296
1297 if (!test_facility(64))
1298 return -ENODEV;
1299
1300 if (!target->thread.ri_cb) {
1301 data = kzalloc(sizeof(*data), GFP_KERNEL);
1302 if (!data)
1303 return -ENOMEM;
1304 }
1305
1306 if (target->thread.ri_cb) {
1307 if (target == current)
1308 store_runtime_instr_cb(&ri_cb);
1309 else
1310 ri_cb = *target->thread.ri_cb;
1311 }
1312
1313 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1314 &ri_cb, 0, sizeof(struct runtime_instr_cb));
1315 if (rc) {
1316 kfree(data);
1317 return -EFAULT;
1318 }
1319
1320 if (!is_ri_cb_valid(&ri_cb)) {
1321 kfree(data);
1322 return -EINVAL;
1323 }
1324
1325 preempt_disable();
1326 if (!target->thread.ri_cb)
1327 target->thread.ri_cb = data;
1328 *target->thread.ri_cb = ri_cb;
1329 if (target == current)
1330 load_runtime_instr_cb(target->thread.ri_cb);
1331 preempt_enable();
1332
1333 return 0;
1334}
1335
1336static const struct user_regset s390_regsets[] = {
1337 {
1338 .core_note_type = NT_PRSTATUS,
1339 .n = sizeof(s390_regs) / sizeof(long),
1340 .size = sizeof(long),
1341 .align = sizeof(long),
1342 .get = s390_regs_get,
1343 .set = s390_regs_set,
1344 },
1345 {
1346 .core_note_type = NT_PRFPREG,
1347 .n = sizeof(s390_fp_regs) / sizeof(long),
1348 .size = sizeof(long),
1349 .align = sizeof(long),
1350 .get = s390_fpregs_get,
1351 .set = s390_fpregs_set,
1352 },
1353 {
1354 .core_note_type = NT_S390_SYSTEM_CALL,
1355 .n = 1,
1356 .size = sizeof(unsigned int),
1357 .align = sizeof(unsigned int),
1358 .get = s390_system_call_get,
1359 .set = s390_system_call_set,
1360 },
1361 {
1362 .core_note_type = NT_S390_LAST_BREAK,
1363 .n = 1,
1364 .size = sizeof(long),
1365 .align = sizeof(long),
1366 .get = s390_last_break_get,
1367 .set = s390_last_break_set,
1368 },
1369 {
1370 .core_note_type = NT_S390_TDB,
1371 .n = 1,
1372 .size = 256,
1373 .align = 1,
1374 .get = s390_tdb_get,
1375 .set = s390_tdb_set,
1376 },
1377 {
1378 .core_note_type = NT_S390_VXRS_LOW,
1379 .n = __NUM_VXRS_LOW,
1380 .size = sizeof(__u64),
1381 .align = sizeof(__u64),
1382 .get = s390_vxrs_low_get,
1383 .set = s390_vxrs_low_set,
1384 },
1385 {
1386 .core_note_type = NT_S390_VXRS_HIGH,
1387 .n = __NUM_VXRS_HIGH,
1388 .size = sizeof(__vector128),
1389 .align = sizeof(__vector128),
1390 .get = s390_vxrs_high_get,
1391 .set = s390_vxrs_high_set,
1392 },
1393 {
1394 .core_note_type = NT_S390_GS_CB,
1395 .n = sizeof(struct gs_cb) / sizeof(__u64),
1396 .size = sizeof(__u64),
1397 .align = sizeof(__u64),
1398 .get = s390_gs_cb_get,
1399 .set = s390_gs_cb_set,
1400 },
1401 {
1402 .core_note_type = NT_S390_GS_BC,
1403 .n = sizeof(struct gs_cb) / sizeof(__u64),
1404 .size = sizeof(__u64),
1405 .align = sizeof(__u64),
1406 .get = s390_gs_bc_get,
1407 .set = s390_gs_bc_set,
1408 },
1409 {
1410 .core_note_type = NT_S390_RI_CB,
1411 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1412 .size = sizeof(__u64),
1413 .align = sizeof(__u64),
1414 .get = s390_runtime_instr_get,
1415 .set = s390_runtime_instr_set,
1416 },
1417};
1418
1419static const struct user_regset_view user_s390_view = {
1420 .name = UTS_MACHINE,
1421 .e_machine = EM_S390,
1422 .regsets = s390_regsets,
1423 .n = ARRAY_SIZE(s390_regsets)
1424};
1425
1426#ifdef CONFIG_COMPAT
1427static int s390_compat_regs_get(struct task_struct *target,
1428 const struct user_regset *regset,
1429 unsigned int pos, unsigned int count,
1430 void *kbuf, void __user *ubuf)
1431{
1432 if (target == current)
1433 save_access_regs(target->thread.acrs);
1434
1435 if (kbuf) {
1436 compat_ulong_t *k = kbuf;
1437 while (count > 0) {
1438 *k++ = __peek_user_compat(target, pos);
1439 count -= sizeof(*k);
1440 pos += sizeof(*k);
1441 }
1442 } else {
1443 compat_ulong_t __user *u = ubuf;
1444 while (count > 0) {
1445 if (__put_user(__peek_user_compat(target, pos), u++))
1446 return -EFAULT;
1447 count -= sizeof(*u);
1448 pos += sizeof(*u);
1449 }
1450 }
1451 return 0;
1452}
1453
1454static int s390_compat_regs_set(struct task_struct *target,
1455 const struct user_regset *regset,
1456 unsigned int pos, unsigned int count,
1457 const void *kbuf, const void __user *ubuf)
1458{
1459 int rc = 0;
1460
1461 if (target == current)
1462 save_access_regs(target->thread.acrs);
1463
1464 if (kbuf) {
1465 const compat_ulong_t *k = kbuf;
1466 while (count > 0 && !rc) {
1467 rc = __poke_user_compat(target, pos, *k++);
1468 count -= sizeof(*k);
1469 pos += sizeof(*k);
1470 }
1471 } else {
1472 const compat_ulong_t __user *u = ubuf;
1473 while (count > 0 && !rc) {
1474 compat_ulong_t word;
1475 rc = __get_user(word, u++);
1476 if (rc)
1477 break;
1478 rc = __poke_user_compat(target, pos, word);
1479 count -= sizeof(*u);
1480 pos += sizeof(*u);
1481 }
1482 }
1483
1484 if (rc == 0 && target == current)
1485 restore_access_regs(target->thread.acrs);
1486
1487 return rc;
1488}
1489
1490static int s390_compat_regs_high_get(struct task_struct *target,
1491 const struct user_regset *regset,
1492 unsigned int pos, unsigned int count,
1493 void *kbuf, void __user *ubuf)
1494{
1495 compat_ulong_t *gprs_high;
1496
1497 gprs_high = (compat_ulong_t *)
1498 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1499 if (kbuf) {
1500 compat_ulong_t *k = kbuf;
1501 while (count > 0) {
1502 *k++ = *gprs_high;
1503 gprs_high += 2;
1504 count -= sizeof(*k);
1505 }
1506 } else {
1507 compat_ulong_t __user *u = ubuf;
1508 while (count > 0) {
1509 if (__put_user(*gprs_high, u++))
1510 return -EFAULT;
1511 gprs_high += 2;
1512 count -= sizeof(*u);
1513 }
1514 }
1515 return 0;
1516}
1517
1518static int s390_compat_regs_high_set(struct task_struct *target,
1519 const struct user_regset *regset,
1520 unsigned int pos, unsigned int count,
1521 const void *kbuf, const void __user *ubuf)
1522{
1523 compat_ulong_t *gprs_high;
1524 int rc = 0;
1525
1526 gprs_high = (compat_ulong_t *)
1527 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1528 if (kbuf) {
1529 const compat_ulong_t *k = kbuf;
1530 while (count > 0) {
1531 *gprs_high = *k++;
1532 *gprs_high += 2;
1533 count -= sizeof(*k);
1534 }
1535 } else {
1536 const compat_ulong_t __user *u = ubuf;
1537 while (count > 0 && !rc) {
1538 unsigned long word;
1539 rc = __get_user(word, u++);
1540 if (rc)
1541 break;
1542 *gprs_high = word;
1543 *gprs_high += 2;
1544 count -= sizeof(*u);
1545 }
1546 }
1547
1548 return rc;
1549}
1550
1551static int s390_compat_last_break_get(struct task_struct *target,
1552 const struct user_regset *regset,
1553 unsigned int pos, unsigned int count,
1554 void *kbuf, void __user *ubuf)
1555{
1556 compat_ulong_t last_break;
1557
1558 if (count > 0) {
1559 last_break = target->thread.last_break;
1560 if (kbuf) {
1561 unsigned long *k = kbuf;
1562 *k = last_break;
1563 } else {
1564 unsigned long __user *u = ubuf;
1565 if (__put_user(last_break, u))
1566 return -EFAULT;
1567 }
1568 }
1569 return 0;
1570}
1571
1572static int s390_compat_last_break_set(struct task_struct *target,
1573 const struct user_regset *regset,
1574 unsigned int pos, unsigned int count,
1575 const void *kbuf, const void __user *ubuf)
1576{
1577 return 0;
1578}
1579
1580static const struct user_regset s390_compat_regsets[] = {
1581 {
1582 .core_note_type = NT_PRSTATUS,
1583 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1584 .size = sizeof(compat_long_t),
1585 .align = sizeof(compat_long_t),
1586 .get = s390_compat_regs_get,
1587 .set = s390_compat_regs_set,
1588 },
1589 {
1590 .core_note_type = NT_PRFPREG,
1591 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1592 .size = sizeof(compat_long_t),
1593 .align = sizeof(compat_long_t),
1594 .get = s390_fpregs_get,
1595 .set = s390_fpregs_set,
1596 },
1597 {
1598 .core_note_type = NT_S390_SYSTEM_CALL,
1599 .n = 1,
1600 .size = sizeof(compat_uint_t),
1601 .align = sizeof(compat_uint_t),
1602 .get = s390_system_call_get,
1603 .set = s390_system_call_set,
1604 },
1605 {
1606 .core_note_type = NT_S390_LAST_BREAK,
1607 .n = 1,
1608 .size = sizeof(long),
1609 .align = sizeof(long),
1610 .get = s390_compat_last_break_get,
1611 .set = s390_compat_last_break_set,
1612 },
1613 {
1614 .core_note_type = NT_S390_TDB,
1615 .n = 1,
1616 .size = 256,
1617 .align = 1,
1618 .get = s390_tdb_get,
1619 .set = s390_tdb_set,
1620 },
1621 {
1622 .core_note_type = NT_S390_VXRS_LOW,
1623 .n = __NUM_VXRS_LOW,
1624 .size = sizeof(__u64),
1625 .align = sizeof(__u64),
1626 .get = s390_vxrs_low_get,
1627 .set = s390_vxrs_low_set,
1628 },
1629 {
1630 .core_note_type = NT_S390_VXRS_HIGH,
1631 .n = __NUM_VXRS_HIGH,
1632 .size = sizeof(__vector128),
1633 .align = sizeof(__vector128),
1634 .get = s390_vxrs_high_get,
1635 .set = s390_vxrs_high_set,
1636 },
1637 {
1638 .core_note_type = NT_S390_HIGH_GPRS,
1639 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1640 .size = sizeof(compat_long_t),
1641 .align = sizeof(compat_long_t),
1642 .get = s390_compat_regs_high_get,
1643 .set = s390_compat_regs_high_set,
1644 },
1645 {
1646 .core_note_type = NT_S390_GS_CB,
1647 .n = sizeof(struct gs_cb) / sizeof(__u64),
1648 .size = sizeof(__u64),
1649 .align = sizeof(__u64),
1650 .get = s390_gs_cb_get,
1651 .set = s390_gs_cb_set,
1652 },
1653 {
1654 .core_note_type = NT_S390_GS_BC,
1655 .n = sizeof(struct gs_cb) / sizeof(__u64),
1656 .size = sizeof(__u64),
1657 .align = sizeof(__u64),
1658 .get = s390_gs_bc_get,
1659 .set = s390_gs_bc_set,
1660 },
1661 {
1662 .core_note_type = NT_S390_RI_CB,
1663 .n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
1664 .size = sizeof(__u64),
1665 .align = sizeof(__u64),
1666 .get = s390_runtime_instr_get,
1667 .set = s390_runtime_instr_set,
1668 },
1669};
1670
1671static const struct user_regset_view user_s390_compat_view = {
1672 .name = "s390",
1673 .e_machine = EM_S390,
1674 .regsets = s390_compat_regsets,
1675 .n = ARRAY_SIZE(s390_compat_regsets)
1676};
1677#endif
1678
1679const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1680{
1681#ifdef CONFIG_COMPAT
1682 if (test_tsk_thread_flag(task, TIF_31BIT))
1683 return &user_s390_compat_view;
1684#endif
1685 return &user_s390_view;
1686}
1687
1688static const char *gpr_names[NUM_GPRS] = {
1689 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1690 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1691};
1692
1693unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1694{
1695 if (offset >= NUM_GPRS)
1696 return 0;
1697 return regs->gprs[offset];
1698}
1699
1700int regs_query_register_offset(const char *name)
1701{
1702 unsigned long offset;
1703
1704 if (!name || *name != 'r')
1705 return -EINVAL;
1706 if (kstrtoul(name + 1, 10, &offset))
1707 return -EINVAL;
1708 if (offset >= NUM_GPRS)
1709 return -EINVAL;
1710 return offset;
1711}
1712
1713const char *regs_query_register_name(unsigned int offset)
1714{
1715 if (offset >= NUM_GPRS)
1716 return NULL;
1717 return gpr_names[offset];
1718}
1719
1720static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1721{
1722 unsigned long ksp = kernel_stack_pointer(regs);
1723
1724 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1737{
1738 unsigned long addr;
1739
1740 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1741 if (!regs_within_kernel_stack(regs, addr))
1742 return 0;
1743 return *(unsigned long *)addr;
1744}
1745