1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/sched/task.h>
16#include <linux/sched/task_stack.h>
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/user.h>
21#include <linux/security.h>
22#include <linux/audit.h>
23#include <linux/signal.h>
24#include <linux/regset.h>
25#include <linux/elf.h>
26#include <linux/tracehook.h>
27
28#include <asm/pgtable.h>
29#include <asm/processor.h>
30#include <asm/ptrace_offsets.h>
31#include <asm/rse.h>
32#include <linux/uaccess.h>
33#include <asm/unwind.h>
34#ifdef CONFIG_PERFMON
35#include <asm/perfmon.h>
36#endif
37
38#include "entry.h"
39
40
41
42
43
44
45
46
47
48
49#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
50 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
51
52#define MASK(nbits) ((1UL << (nbits)) - 1)
53#define PFM_MASK MASK(38)
54
55#define PTRACE_DEBUG 0
56
57#if PTRACE_DEBUG
58# define dprintk(format...) printk(format)
59# define inline
60#else
61# define dprintk(format...)
62#endif
63
64
65
66static inline int
67in_syscall (struct pt_regs *pt)
68{
69 return (long) pt->cr_ifs >= 0;
70}
71
72
73
74
75
76unsigned long
77ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
78{
79# define GET_BITS(first, last, unat) \
80 ({ \
81 unsigned long bit = ia64_unat_pos(&pt->r##first); \
82 unsigned long nbits = (last - first + 1); \
83 unsigned long mask = MASK(nbits) << first; \
84 unsigned long dist; \
85 if (bit < first) \
86 dist = 64 + bit - first; \
87 else \
88 dist = bit - first; \
89 ia64_rotr(unat, dist) & mask; \
90 })
91 unsigned long val;
92
93
94
95
96
97
98 val = GET_BITS( 1, 1, scratch_unat);
99 val |= GET_BITS( 2, 3, scratch_unat);
100 val |= GET_BITS(12, 13, scratch_unat);
101 val |= GET_BITS(14, 14, scratch_unat);
102 val |= GET_BITS(15, 15, scratch_unat);
103 val |= GET_BITS( 8, 11, scratch_unat);
104 val |= GET_BITS(16, 31, scratch_unat);
105 return val;
106
107# undef GET_BITS
108}
109
110
111
112
113
114
115unsigned long
116ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
117{
118# define PUT_BITS(first, last, nat) \
119 ({ \
120 unsigned long bit = ia64_unat_pos(&pt->r##first); \
121 unsigned long nbits = (last - first + 1); \
122 unsigned long mask = MASK(nbits) << first; \
123 long dist; \
124 if (bit < first) \
125 dist = 64 + bit - first; \
126 else \
127 dist = bit - first; \
128 ia64_rotl(nat & mask, dist); \
129 })
130 unsigned long scratch_unat;
131
132
133
134
135
136
137 scratch_unat = PUT_BITS( 1, 1, nat);
138 scratch_unat |= PUT_BITS( 2, 3, nat);
139 scratch_unat |= PUT_BITS(12, 13, nat);
140 scratch_unat |= PUT_BITS(14, 14, nat);
141 scratch_unat |= PUT_BITS(15, 15, nat);
142 scratch_unat |= PUT_BITS( 8, 11, nat);
143 scratch_unat |= PUT_BITS(16, 31, nat);
144
145 return scratch_unat;
146
147# undef PUT_BITS
148}
149
150#define IA64_MLX_TEMPLATE 0x2
151#define IA64_MOVL_OPCODE 6
152
153void
154ia64_increment_ip (struct pt_regs *regs)
155{
156 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
157
158 if (ri > 2) {
159 ri = 0;
160 regs->cr_iip += 16;
161 } else if (ri == 2) {
162 get_user(w0, (char __user *) regs->cr_iip + 0);
163 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
164
165
166
167
168
169 ri = 0;
170 regs->cr_iip += 16;
171 }
172 }
173 ia64_psr(regs)->ri = ri;
174}
175
176void
177ia64_decrement_ip (struct pt_regs *regs)
178{
179 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
180
181 if (ia64_psr(regs)->ri == 0) {
182 regs->cr_iip -= 16;
183 ri = 2;
184 get_user(w0, (char __user *) regs->cr_iip + 0);
185 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
186
187
188
189
190
191 ri = 1;
192 }
193 }
194 ia64_psr(regs)->ri = ri;
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251static unsigned long
252get_rnat (struct task_struct *task, struct switch_stack *sw,
253 unsigned long *krbs, unsigned long *urnat_addr,
254 unsigned long *urbs_end)
255{
256 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
257 unsigned long umask = 0, mask, m;
258 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
259 long num_regs, nbits;
260 struct pt_regs *pt;
261
262 pt = task_pt_regs(task);
263 kbsp = (unsigned long *) sw->ar_bspstore;
264 ubspstore = (unsigned long *) pt->ar_bspstore;
265
266 if (urbs_end < urnat_addr)
267 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
268 else
269 nbits = 63;
270 mask = MASK(nbits);
271
272
273
274
275
276
277 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
278 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
279 shift = ia64_rse_slot_num(slot0_kaddr);
280 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
281 rnat0_kaddr = rnat1_kaddr - 64;
282
283 if (ubspstore + 63 > urnat_addr) {
284
285 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
286 urnat = (pt->ar_rnat & umask);
287 mask &= ~umask;
288 if (!mask)
289 return urnat;
290 }
291
292 m = mask << shift;
293 if (rnat0_kaddr >= kbsp)
294 rnat0 = sw->ar_rnat;
295 else if (rnat0_kaddr > krbs)
296 rnat0 = *rnat0_kaddr;
297 urnat |= (rnat0 & m) >> shift;
298
299 m = mask >> (63 - shift);
300 if (rnat1_kaddr >= kbsp)
301 rnat1 = sw->ar_rnat;
302 else if (rnat1_kaddr > krbs)
303 rnat1 = *rnat1_kaddr;
304 urnat |= (rnat1 & m) << (63 - shift);
305 return urnat;
306}
307
308
309
310
311static void
312put_rnat (struct task_struct *task, struct switch_stack *sw,
313 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
314 unsigned long *urbs_end)
315{
316 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
317 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
318 long num_regs, nbits;
319 struct pt_regs *pt;
320 unsigned long cfm, *urbs_kargs;
321
322 pt = task_pt_regs(task);
323 kbsp = (unsigned long *) sw->ar_bspstore;
324 ubspstore = (unsigned long *) pt->ar_bspstore;
325
326 urbs_kargs = urbs_end;
327 if (in_syscall(pt)) {
328
329
330
331
332 cfm = pt->cr_ifs;
333 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
334 }
335
336 if (urbs_kargs >= urnat_addr)
337 nbits = 63;
338 else {
339 if ((urnat_addr - 63) >= urbs_kargs)
340 return;
341 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
342 }
343 mask = MASK(nbits);
344
345
346
347
348
349
350
351 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
352 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
353 shift = ia64_rse_slot_num(slot0_kaddr);
354 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
355 rnat0_kaddr = rnat1_kaddr - 64;
356
357 if (ubspstore + 63 > urnat_addr) {
358
359 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
360 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
361 mask &= ~umask;
362 if (!mask)
363 return;
364 }
365
366
367
368
369 rnat0 = (urnat << shift);
370 m = mask << shift;
371 if (rnat0_kaddr >= kbsp)
372 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
373 else if (rnat0_kaddr > krbs)
374 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
375
376 rnat1 = (urnat >> (63 - shift));
377 m = mask >> (63 - shift);
378 if (rnat1_kaddr >= kbsp)
379 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
380 else if (rnat1_kaddr > krbs)
381 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
382}
383
384static inline int
385on_kernel_rbs (unsigned long addr, unsigned long bspstore,
386 unsigned long urbs_end)
387{
388 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
389 urbs_end);
390 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
391}
392
393
394
395
396
397
398
399
400
401
402
403
404long
405ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
406 unsigned long user_rbs_end, unsigned long addr, long *val)
407{
408 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
409 struct pt_regs *child_regs;
410 size_t copied;
411 long ret;
412
413 urbs_end = (long *) user_rbs_end;
414 laddr = (unsigned long *) addr;
415 child_regs = task_pt_regs(child);
416 bspstore = (unsigned long *) child_regs->ar_bspstore;
417 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
418 if (on_kernel_rbs(addr, (unsigned long) bspstore,
419 (unsigned long) urbs_end))
420 {
421
422
423
424
425
426 rnat_addr = ia64_rse_rnat_addr(laddr);
427 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
428
429 if (laddr == rnat_addr) {
430
431 *val = ret;
432 return 0;
433 }
434
435 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
436
437
438
439
440
441
442
443
444
445 *val = 0;
446 return 0;
447 }
448
449 if (laddr < urbs_end) {
450
451
452
453
454 regnum = ia64_rse_num_regs(bspstore, laddr);
455 *val = *ia64_rse_skip_regs(krbs, regnum);
456 return 0;
457 }
458 }
459 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
460 if (copied != sizeof(ret))
461 return -EIO;
462 *val = ret;
463 return 0;
464}
465
466long
467ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
468 unsigned long user_rbs_end, unsigned long addr, long val)
469{
470 unsigned long *bspstore, *krbs, regnum, *laddr;
471 unsigned long *urbs_end = (long *) user_rbs_end;
472 struct pt_regs *child_regs;
473
474 laddr = (unsigned long *) addr;
475 child_regs = task_pt_regs(child);
476 bspstore = (unsigned long *) child_regs->ar_bspstore;
477 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
478 if (on_kernel_rbs(addr, (unsigned long) bspstore,
479 (unsigned long) urbs_end))
480 {
481
482
483
484
485
486 if (ia64_rse_is_rnat_slot(laddr))
487 put_rnat(child, child_stack, krbs, laddr, val,
488 urbs_end);
489 else {
490 if (laddr < urbs_end) {
491 regnum = ia64_rse_num_regs(bspstore, laddr);
492 *ia64_rse_skip_regs(krbs, regnum) = val;
493 }
494 }
495 } else if (access_process_vm(child, addr, &val, sizeof(val),
496 FOLL_FORCE | FOLL_WRITE)
497 != sizeof(val))
498 return -EIO;
499 return 0;
500}
501
502
503
504
505
506
507
508
509
510unsigned long
511ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
512 unsigned long *cfmp)
513{
514 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
515 long ndirty;
516
517 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
518 bspstore = (unsigned long *) pt->ar_bspstore;
519 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
520
521 if (in_syscall(pt))
522 ndirty += (cfm & 0x7f);
523 else
524 cfm &= ~(1UL << 63);
525
526 if (cfmp)
527 *cfmp = cfm;
528 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
529}
530
531
532
533
534
535
536
537
538long
539ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
540 unsigned long user_rbs_start, unsigned long user_rbs_end)
541{
542 unsigned long addr, val;
543 long ret;
544
545
546 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
547 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
548 if (ret < 0)
549 return ret;
550 if (access_process_vm(child, addr, &val, sizeof(val),
551 FOLL_FORCE | FOLL_WRITE)
552 != sizeof(val))
553 return -EIO;
554 }
555 return 0;
556}
557
558static long
559ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
560 unsigned long user_rbs_start, unsigned long user_rbs_end)
561{
562 unsigned long addr, val;
563 long ret;
564
565
566 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
567 if (access_process_vm(child, addr, &val, sizeof(val),
568 FOLL_FORCE)
569 != sizeof(val))
570 return -EIO;
571
572 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
573 if (ret < 0)
574 return ret;
575 }
576 return 0;
577}
578
579typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
580 unsigned long, unsigned long);
581
582static void do_sync_rbs(struct unw_frame_info *info, void *arg)
583{
584 struct pt_regs *pt;
585 unsigned long urbs_end;
586 syncfunc_t fn = arg;
587
588 if (unw_unwind_to_user(info) < 0)
589 return;
590 pt = task_pt_regs(info->task);
591 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
592
593 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
594}
595
596
597
598
599
600
601
602
603
604
605
606void ia64_ptrace_stop(void)
607{
608 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
609 return;
610 set_notify_resume(current);
611 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
612}
613
614
615
616
617void ia64_sync_krbs(void)
618{
619 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
620
621 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
622}
623
624
625
626
627
628
629
630
631
632void
633ptrace_attach_sync_user_rbs (struct task_struct *child)
634{
635 int stopped = 0;
636 struct unw_frame_info info;
637
638
639
640
641
642
643
644
645 read_lock(&tasklist_lock);
646 if (child->sighand) {
647 spin_lock_irq(&child->sighand->siglock);
648 if (child->state == TASK_STOPPED &&
649 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
650 set_notify_resume(child);
651
652 child->state = TASK_TRACED;
653 stopped = 1;
654 }
655 spin_unlock_irq(&child->sighand->siglock);
656 }
657 read_unlock(&tasklist_lock);
658
659 if (!stopped)
660 return;
661
662 unw_init_from_blocked_task(&info, child);
663 do_sync_rbs(&info, ia64_sync_user_rbs);
664
665
666
667
668
669 read_lock(&tasklist_lock);
670 if (child->sighand) {
671 spin_lock_irq(&child->sighand->siglock);
672 if (child->state == TASK_TRACED &&
673 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
674 child->state = TASK_STOPPED;
675 }
676 spin_unlock_irq(&child->sighand->siglock);
677 }
678 read_unlock(&tasklist_lock);
679}
680
681
682
683
684inline void
685ia64_flush_fph (struct task_struct *task)
686{
687 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
688
689
690
691
692
693 preempt_disable();
694 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
695 psr->mfh = 0;
696 task->thread.flags |= IA64_THREAD_FPH_VALID;
697 ia64_save_fpu(&task->thread.fph[0]);
698 }
699 preempt_enable();
700}
701
702
703
704
705
706
707
708
709
710void
711ia64_sync_fph (struct task_struct *task)
712{
713 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
714
715 ia64_flush_fph(task);
716 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
717 task->thread.flags |= IA64_THREAD_FPH_VALID;
718 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
719 }
720 ia64_drop_fpu(task);
721 psr->dfh = 1;
722}
723
724
725
726
727
728static void
729convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
730 unsigned long cfm)
731{
732 struct unw_frame_info info, prev_info;
733 unsigned long ip, sp, pr;
734
735 unw_init_from_blocked_task(&info, child);
736 while (1) {
737 prev_info = info;
738 if (unw_unwind(&info) < 0)
739 return;
740
741 unw_get_sp(&info, &sp);
742 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
743 < IA64_PT_REGS_SIZE) {
744 dprintk("ptrace.%s: ran off the top of the kernel "
745 "stack\n", __func__);
746 return;
747 }
748 if (unw_get_pr (&prev_info, &pr) < 0) {
749 unw_get_rp(&prev_info, &ip);
750 dprintk("ptrace.%s: failed to read "
751 "predicate register (ip=0x%lx)\n",
752 __func__, ip);
753 return;
754 }
755 if (unw_is_intr_frame(&info)
756 && (pr & (1UL << PRED_USER_STACK)))
757 break;
758 }
759
760
761
762
763
764
765
766 unw_get_pr(&prev_info, &pr);
767 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
768 pr |= (1UL << PRED_NON_SYSCALL);
769 unw_set_pr(&prev_info, pr);
770
771 pt->cr_ifs = (1UL << 63) | cfm;
772
773
774
775
776
777 pt->r2 = 0;
778 pt->r3 = 0;
779 pt->r14 = 0;
780 memset(&pt->r16, 0, 16*8);
781 memset(&pt->f6, 0, 6*16);
782 pt->b7 = 0;
783 pt->ar_ccv = 0;
784 pt->ar_csd = 0;
785 pt->ar_ssd = 0;
786}
787
788static int
789access_nat_bits (struct task_struct *child, struct pt_regs *pt,
790 struct unw_frame_info *info,
791 unsigned long *data, int write_access)
792{
793 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
794 char nat = 0;
795
796 if (write_access) {
797 nat_bits = *data;
798 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
799 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
800 dprintk("ptrace: failed to set ar.unat\n");
801 return -1;
802 }
803 for (regnum = 4; regnum <= 7; ++regnum) {
804 unw_get_gr(info, regnum, &dummy, &nat);
805 unw_set_gr(info, regnum, dummy,
806 (nat_bits >> regnum) & 1);
807 }
808 } else {
809 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
810 dprintk("ptrace: failed to read ar.unat\n");
811 return -1;
812 }
813 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
814 for (regnum = 4; regnum <= 7; ++regnum) {
815 unw_get_gr(info, regnum, &dummy, &nat);
816 nat_bits |= (nat != 0) << regnum;
817 }
818 *data = nat_bits;
819 }
820 return 0;
821}
822
823static int
824access_uarea (struct task_struct *child, unsigned long addr,
825 unsigned long *data, int write_access);
826
827static long
828ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
829{
830 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
831 struct unw_frame_info info;
832 struct ia64_fpreg fpval;
833 struct switch_stack *sw;
834 struct pt_regs *pt;
835 long ret, retval = 0;
836 char nat = 0;
837 int i;
838
839 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
840 return -EIO;
841
842 pt = task_pt_regs(child);
843 sw = (struct switch_stack *) (child->thread.ksp + 16);
844 unw_init_from_blocked_task(&info, child);
845 if (unw_unwind_to_user(&info) < 0) {
846 return -EIO;
847 }
848
849 if (((unsigned long) ppr & 0x7) != 0) {
850 dprintk("ptrace:unaligned register address %p\n", ppr);
851 return -EIO;
852 }
853
854 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
855 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
856 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
857 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
858 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
859 || access_uarea(child, PT_CFM, &cfm, 0)
860 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
861 return -EIO;
862
863
864
865 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
866 retval |= __put_user(psr, &ppr->cr_ipsr);
867
868
869
870 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
871 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
872 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
873 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
874 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
875 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
876
877 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
878 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
879 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
880 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
881 retval |= __put_user(cfm, &ppr->cfm);
882
883
884
885 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
886 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
887
888
889
890 for (i = 4; i < 8; i++) {
891 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
892 return -EIO;
893 retval |= __put_user(val, &ppr->gr[i]);
894 }
895
896
897
898 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
899
900
901
902 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
903 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
904 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
905
906
907
908 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
909
910
911
912 retval |= __put_user(pt->b0, &ppr->br[0]);
913
914
915
916 for (i = 1; i < 6; i++) {
917 if (unw_access_br(&info, i, &val, 0) < 0)
918 return -EIO;
919 __put_user(val, &ppr->br[i]);
920 }
921
922
923
924 retval |= __put_user(pt->b6, &ppr->br[6]);
925 retval |= __put_user(pt->b7, &ppr->br[7]);
926
927
928
929 for (i = 2; i < 6; i++) {
930 if (unw_get_fr(&info, i, &fpval) < 0)
931 return -EIO;
932 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
933 }
934
935
936
937 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
938 sizeof(struct ia64_fpreg) * 6);
939
940
941
942 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
943 sizeof(struct ia64_fpreg) * 4);
944
945
946
947 for (i = 16; i < 32; i++) {
948 if (unw_get_fr(&info, i, &fpval) < 0)
949 return -EIO;
950 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
951 }
952
953
954
955 ia64_flush_fph(child);
956 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
957 sizeof(ppr->fr[32]) * 96);
958
959
960
961 retval |= __put_user(pt->pr, &ppr->pr);
962
963
964
965 retval |= __put_user(nat_bits, &ppr->nat);
966
967 ret = retval ? -EIO : 0;
968 return ret;
969}
970
971static long
972ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
973{
974 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
975 struct unw_frame_info info;
976 struct switch_stack *sw;
977 struct ia64_fpreg fpval;
978 struct pt_regs *pt;
979 long ret, retval = 0;
980 int i;
981
982 memset(&fpval, 0, sizeof(fpval));
983
984 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
985 return -EIO;
986
987 pt = task_pt_regs(child);
988 sw = (struct switch_stack *) (child->thread.ksp + 16);
989 unw_init_from_blocked_task(&info, child);
990 if (unw_unwind_to_user(&info) < 0) {
991 return -EIO;
992 }
993
994 if (((unsigned long) ppr & 0x7) != 0) {
995 dprintk("ptrace:unaligned register address %p\n", ppr);
996 return -EIO;
997 }
998
999
1000
1001 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1002 retval |= __get_user(psr, &ppr->cr_ipsr);
1003
1004
1005
1006 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1007 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1008 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1009 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1010 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1011 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1012
1013 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1014 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1015 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1016 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1017 retval |= __get_user(cfm, &ppr->cfm);
1018
1019
1020
1021 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1022 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1023
1024
1025
1026 for (i = 4; i < 8; i++) {
1027 retval |= __get_user(val, &ppr->gr[i]);
1028
1029 if (unw_set_gr(&info, i, val, 0) < 0)
1030 return -EIO;
1031 }
1032
1033
1034
1035 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1036
1037
1038
1039 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1040 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1041 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1042
1043
1044
1045 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1046
1047
1048
1049 retval |= __get_user(pt->b0, &ppr->br[0]);
1050
1051
1052
1053 for (i = 1; i < 6; i++) {
1054 retval |= __get_user(val, &ppr->br[i]);
1055 unw_set_br(&info, i, val);
1056 }
1057
1058
1059
1060 retval |= __get_user(pt->b6, &ppr->br[6]);
1061 retval |= __get_user(pt->b7, &ppr->br[7]);
1062
1063
1064
1065 for (i = 2; i < 6; i++) {
1066 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1067 if (unw_set_fr(&info, i, fpval) < 0)
1068 return -EIO;
1069 }
1070
1071
1072
1073 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1074 sizeof(ppr->fr[6]) * 6);
1075
1076
1077
1078 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1079 sizeof(ppr->fr[12]) * 4);
1080
1081
1082
1083 for (i = 16; i < 32; i++) {
1084 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1085 sizeof(fpval));
1086 if (unw_set_fr(&info, i, fpval) < 0)
1087 return -EIO;
1088 }
1089
1090
1091
1092 ia64_sync_fph(child);
1093 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1094 sizeof(ppr->fr[32]) * 96);
1095
1096
1097
1098 retval |= __get_user(pt->pr, &ppr->pr);
1099
1100
1101
1102 retval |= __get_user(nat_bits, &ppr->nat);
1103
1104 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1105 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1106 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1107 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1108 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1109 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1110 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1111 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1112
1113 ret = retval ? -EIO : 0;
1114 return ret;
1115}
1116
1117void
1118user_enable_single_step (struct task_struct *child)
1119{
1120 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1121
1122 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1123 child_psr->ss = 1;
1124}
1125
1126void
1127user_enable_block_step (struct task_struct *child)
1128{
1129 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1130
1131 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1132 child_psr->tb = 1;
1133}
1134
1135void
1136user_disable_single_step (struct task_struct *child)
1137{
1138 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1139
1140
1141 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1142 child_psr->ss = 0;
1143 child_psr->tb = 0;
1144}
1145
1146
1147
1148
1149
1150
1151void
1152ptrace_disable (struct task_struct *child)
1153{
1154 user_disable_single_step(child);
1155}
1156
1157long
1158arch_ptrace (struct task_struct *child, long request,
1159 unsigned long addr, unsigned long data)
1160{
1161 switch (request) {
1162 case PTRACE_PEEKTEXT:
1163 case PTRACE_PEEKDATA:
1164
1165 if (ptrace_access_vm(child, addr, &data, sizeof(data),
1166 FOLL_FORCE)
1167 != sizeof(data))
1168 return -EIO;
1169
1170 force_successful_syscall_return();
1171 return data;
1172
1173
1174
1175
1176
1177 case PTRACE_PEEKUSR:
1178
1179 if (access_uarea(child, addr, &data, 0) < 0)
1180 return -EIO;
1181
1182 force_successful_syscall_return();
1183 return data;
1184
1185 case PTRACE_POKEUSR:
1186
1187 if (access_uarea(child, addr, &data, 1) < 0)
1188 return -EIO;
1189 return 0;
1190
1191 case PTRACE_OLD_GETSIGINFO:
1192
1193 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1194
1195 case PTRACE_OLD_SETSIGINFO:
1196
1197 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1198
1199 case PTRACE_GETREGS:
1200 return ptrace_getregs(child,
1201 (struct pt_all_user_regs __user *) data);
1202
1203 case PTRACE_SETREGS:
1204 return ptrace_setregs(child,
1205 (struct pt_all_user_regs __user *) data);
1206
1207 default:
1208 return ptrace_request(child, request, addr, data);
1209 }
1210}
1211
1212
1213
1214
1215asmlinkage long
1216syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1217 long arg4, long arg5, long arg6, long arg7,
1218 struct pt_regs regs)
1219{
1220 if (test_thread_flag(TIF_SYSCALL_TRACE))
1221 if (tracehook_report_syscall_entry(®s))
1222 return -ENOSYS;
1223
1224
1225 if (test_thread_flag(TIF_RESTORE_RSE))
1226 ia64_sync_krbs();
1227
1228
1229 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1230
1231 return 0;
1232}
1233
1234
1235
1236asmlinkage void
1237syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1238 long arg4, long arg5, long arg6, long arg7,
1239 struct pt_regs regs)
1240{
1241 int step;
1242
1243 audit_syscall_exit(®s);
1244
1245 step = test_thread_flag(TIF_SINGLESTEP);
1246 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1247 tracehook_report_syscall_exit(®s, step);
1248
1249
1250 if (test_thread_flag(TIF_RESTORE_RSE))
1251 ia64_sync_krbs();
1252}
1253
1254
1255struct regset_get {
1256 void *kbuf;
1257 void __user *ubuf;
1258};
1259
1260struct regset_set {
1261 const void *kbuf;
1262 const void __user *ubuf;
1263};
1264
1265struct regset_getset {
1266 struct task_struct *target;
1267 const struct user_regset *regset;
1268 union {
1269 struct regset_get get;
1270 struct regset_set set;
1271 } u;
1272 unsigned int pos;
1273 unsigned int count;
1274 int ret;
1275};
1276
1277static int
1278access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1279 unsigned long addr, unsigned long *data, int write_access)
1280{
1281 struct pt_regs *pt;
1282 unsigned long *ptr = NULL;
1283 int ret;
1284 char nat = 0;
1285
1286 pt = task_pt_regs(target);
1287 switch (addr) {
1288 case ELF_GR_OFFSET(1):
1289 ptr = &pt->r1;
1290 break;
1291 case ELF_GR_OFFSET(2):
1292 case ELF_GR_OFFSET(3):
1293 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1294 break;
1295 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1296 if (write_access) {
1297
1298 unsigned long dummy;
1299
1300 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1301 if (ret < 0)
1302 return ret;
1303 }
1304 return unw_access_gr(info, addr/8, data, &nat, write_access);
1305 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1306 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1307 break;
1308 case ELF_GR_OFFSET(12):
1309 case ELF_GR_OFFSET(13):
1310 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1311 break;
1312 case ELF_GR_OFFSET(14):
1313 ptr = &pt->r14;
1314 break;
1315 case ELF_GR_OFFSET(15):
1316 ptr = &pt->r15;
1317 }
1318 if (write_access)
1319 *ptr = *data;
1320 else
1321 *data = *ptr;
1322 return 0;
1323}
1324
1325static int
1326access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1327 unsigned long addr, unsigned long *data, int write_access)
1328{
1329 struct pt_regs *pt;
1330 unsigned long *ptr = NULL;
1331
1332 pt = task_pt_regs(target);
1333 switch (addr) {
1334 case ELF_BR_OFFSET(0):
1335 ptr = &pt->b0;
1336 break;
1337 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1338 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1339 data, write_access);
1340 case ELF_BR_OFFSET(6):
1341 ptr = &pt->b6;
1342 break;
1343 case ELF_BR_OFFSET(7):
1344 ptr = &pt->b7;
1345 }
1346 if (write_access)
1347 *ptr = *data;
1348 else
1349 *data = *ptr;
1350 return 0;
1351}
1352
1353static int
1354access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1355 unsigned long addr, unsigned long *data, int write_access)
1356{
1357 struct pt_regs *pt;
1358 unsigned long cfm, urbs_end;
1359 unsigned long *ptr = NULL;
1360
1361 pt = task_pt_regs(target);
1362 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1363 switch (addr) {
1364 case ELF_AR_RSC_OFFSET:
1365
1366 if (write_access)
1367 pt->ar_rsc = *data | (3 << 2);
1368 else
1369 *data = pt->ar_rsc;
1370 return 0;
1371 case ELF_AR_BSP_OFFSET:
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1397 if (write_access) {
1398 if (*data != urbs_end) {
1399 if (in_syscall(pt))
1400 convert_to_non_syscall(target,
1401 pt,
1402 cfm);
1403
1404
1405
1406
1407 pt->loadrs = 0;
1408 pt->ar_bspstore = *data;
1409 }
1410 } else
1411 *data = urbs_end;
1412 return 0;
1413 case ELF_AR_BSPSTORE_OFFSET:
1414 ptr = &pt->ar_bspstore;
1415 break;
1416 case ELF_AR_RNAT_OFFSET:
1417 ptr = &pt->ar_rnat;
1418 break;
1419 case ELF_AR_CCV_OFFSET:
1420 ptr = &pt->ar_ccv;
1421 break;
1422 case ELF_AR_UNAT_OFFSET:
1423 ptr = &pt->ar_unat;
1424 break;
1425 case ELF_AR_FPSR_OFFSET:
1426 ptr = &pt->ar_fpsr;
1427 break;
1428 case ELF_AR_PFS_OFFSET:
1429 ptr = &pt->ar_pfs;
1430 break;
1431 case ELF_AR_LC_OFFSET:
1432 return unw_access_ar(info, UNW_AR_LC, data,
1433 write_access);
1434 case ELF_AR_EC_OFFSET:
1435 return unw_access_ar(info, UNW_AR_EC, data,
1436 write_access);
1437 case ELF_AR_CSD_OFFSET:
1438 ptr = &pt->ar_csd;
1439 break;
1440 case ELF_AR_SSD_OFFSET:
1441 ptr = &pt->ar_ssd;
1442 }
1443 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1444 switch (addr) {
1445 case ELF_CR_IIP_OFFSET:
1446 ptr = &pt->cr_iip;
1447 break;
1448 case ELF_CFM_OFFSET:
1449 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1450 if (write_access) {
1451 if (((cfm ^ *data) & PFM_MASK) != 0) {
1452 if (in_syscall(pt))
1453 convert_to_non_syscall(target,
1454 pt,
1455 cfm);
1456 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1457 | (*data & PFM_MASK));
1458 }
1459 } else
1460 *data = cfm;
1461 return 0;
1462 case ELF_CR_IPSR_OFFSET:
1463 if (write_access) {
1464 unsigned long tmp = *data;
1465
1466 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1467 tmp &= ~IA64_PSR_RI;
1468 pt->cr_ipsr = ((tmp & IPSR_MASK)
1469 | (pt->cr_ipsr & ~IPSR_MASK));
1470 } else
1471 *data = (pt->cr_ipsr & IPSR_MASK);
1472 return 0;
1473 }
1474 } else if (addr == ELF_NAT_OFFSET)
1475 return access_nat_bits(target, pt, info,
1476 data, write_access);
1477 else if (addr == ELF_PR_OFFSET)
1478 ptr = &pt->pr;
1479 else
1480 return -1;
1481
1482 if (write_access)
1483 *ptr = *data;
1484 else
1485 *data = *ptr;
1486
1487 return 0;
1488}
1489
1490static int
1491access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1492 unsigned long addr, unsigned long *data, int write_access)
1493{
1494 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1495 return access_elf_gpreg(target, info, addr, data, write_access);
1496 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1497 return access_elf_breg(target, info, addr, data, write_access);
1498 else
1499 return access_elf_areg(target, info, addr, data, write_access);
1500}
1501
1502void do_gpregs_get(struct unw_frame_info *info, void *arg)
1503{
1504 struct pt_regs *pt;
1505 struct regset_getset *dst = arg;
1506 elf_greg_t tmp[16];
1507 unsigned int i, index, min_copy;
1508
1509 if (unw_unwind_to_user(info) < 0)
1510 return;
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1526 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1527 &dst->u.get.kbuf,
1528 &dst->u.get.ubuf,
1529 0, ELF_GR_OFFSET(1));
1530 if (dst->ret || dst->count == 0)
1531 return;
1532 }
1533
1534
1535 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1536 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1537 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1538 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1539 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1540 index++)
1541 if (access_elf_reg(dst->target, info, i,
1542 &tmp[index], 0) < 0) {
1543 dst->ret = -EIO;
1544 return;
1545 }
1546 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1547 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1548 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1549 if (dst->ret || dst->count == 0)
1550 return;
1551 }
1552
1553
1554 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1555 pt = task_pt_regs(dst->target);
1556 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1557 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1558 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1559 if (dst->ret || dst->count == 0)
1560 return;
1561 }
1562
1563
1564 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1565 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1566 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1567 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1568 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1569 index++)
1570 if (access_elf_reg(dst->target, info, i,
1571 &tmp[index], 0) < 0) {
1572 dst->ret = -EIO;
1573 return;
1574 }
1575 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1576 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1577 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1578 if (dst->ret || dst->count == 0)
1579 return;
1580 }
1581
1582
1583
1584
1585 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1586 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1587 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1588 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1589 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1590 index++)
1591 if (access_elf_reg(dst->target, info, i,
1592 &tmp[index], 0) < 0) {
1593 dst->ret = -EIO;
1594 return;
1595 }
1596 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1597 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1598 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1599 }
1600}
1601
1602void do_gpregs_set(struct unw_frame_info *info, void *arg)
1603{
1604 struct pt_regs *pt;
1605 struct regset_getset *dst = arg;
1606 elf_greg_t tmp[16];
1607 unsigned int i, index;
1608
1609 if (unw_unwind_to_user(info) < 0)
1610 return;
1611
1612
1613 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1614 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1615 &dst->u.set.kbuf,
1616 &dst->u.set.ubuf,
1617 0, ELF_GR_OFFSET(1));
1618 if (dst->ret || dst->count == 0)
1619 return;
1620 }
1621
1622
1623 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1624 i = dst->pos;
1625 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1626 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1627 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1628 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1629 if (dst->ret)
1630 return;
1631 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1632 if (access_elf_reg(dst->target, info, i,
1633 &tmp[index], 1) < 0) {
1634 dst->ret = -EIO;
1635 return;
1636 }
1637 if (dst->count == 0)
1638 return;
1639 }
1640
1641
1642 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1643 pt = task_pt_regs(dst->target);
1644 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1645 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1646 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1647 if (dst->ret || dst->count == 0)
1648 return;
1649 }
1650
1651
1652 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1653 i = dst->pos;
1654 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1655 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1656 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1657 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1658 if (dst->ret)
1659 return;
1660 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1661 if (access_elf_reg(dst->target, info, i,
1662 &tmp[index], 1) < 0) {
1663 dst->ret = -EIO;
1664 return;
1665 }
1666 if (dst->count == 0)
1667 return;
1668 }
1669
1670
1671
1672
1673 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1674 i = dst->pos;
1675 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1676 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1677 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1678 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1679 if (dst->ret)
1680 return;
1681 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1682 if (access_elf_reg(dst->target, info, i,
1683 &tmp[index], 1) < 0) {
1684 dst->ret = -EIO;
1685 return;
1686 }
1687 }
1688}
1689
1690#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1691
1692void do_fpregs_get(struct unw_frame_info *info, void *arg)
1693{
1694 struct regset_getset *dst = arg;
1695 struct task_struct *task = dst->target;
1696 elf_fpreg_t tmp[30];
1697 int index, min_copy, i;
1698
1699 if (unw_unwind_to_user(info) < 0)
1700 return;
1701
1702
1703 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1704 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1705 &dst->u.get.kbuf,
1706 &dst->u.get.ubuf,
1707 0, ELF_FP_OFFSET(2));
1708 if (dst->count == 0 || dst->ret)
1709 return;
1710 }
1711
1712
1713 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1714 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1715
1716 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1717 dst->pos + dst->count);
1718 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1719 index++)
1720 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1721 &tmp[index])) {
1722 dst->ret = -EIO;
1723 return;
1724 }
1725 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1726 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1727 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1728 if (dst->count == 0 || dst->ret)
1729 return;
1730 }
1731
1732
1733 if (dst->count > 0) {
1734 ia64_flush_fph(dst->target);
1735 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1736 dst->ret = user_regset_copyout(
1737 &dst->pos, &dst->count,
1738 &dst->u.get.kbuf, &dst->u.get.ubuf,
1739 &dst->target->thread.fph,
1740 ELF_FP_OFFSET(32), -1);
1741 else
1742
1743 dst->ret = user_regset_copyout_zero(
1744 &dst->pos, &dst->count,
1745 &dst->u.get.kbuf, &dst->u.get.ubuf,
1746 ELF_FP_OFFSET(32), -1);
1747 }
1748}
1749
1750void do_fpregs_set(struct unw_frame_info *info, void *arg)
1751{
1752 struct regset_getset *dst = arg;
1753 elf_fpreg_t fpreg, tmp[30];
1754 int index, start, end;
1755
1756 if (unw_unwind_to_user(info) < 0)
1757 return;
1758
1759
1760 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1761 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1762 &dst->u.set.kbuf,
1763 &dst->u.set.ubuf,
1764 0, ELF_FP_OFFSET(2));
1765 if (dst->count == 0 || dst->ret)
1766 return;
1767 }
1768
1769
1770 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1771 start = dst->pos;
1772 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1773 dst->pos + dst->count);
1774 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1775 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1776 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1777 if (dst->ret)
1778 return;
1779
1780 if (start & 0xF) {
1781 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1782 &fpreg)) {
1783 dst->ret = -EIO;
1784 return;
1785 }
1786 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1787 = fpreg.u.bits[0];
1788 start &= ~0xFUL;
1789 }
1790 if (end & 0xF) {
1791 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1792 &fpreg)) {
1793 dst->ret = -EIO;
1794 return;
1795 }
1796 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1797 = fpreg.u.bits[1];
1798 end = (end + 0xF) & ~0xFUL;
1799 }
1800
1801 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1802 index = start / sizeof(elf_fpreg_t);
1803 if (unw_set_fr(info, index, tmp[index - 2])) {
1804 dst->ret = -EIO;
1805 return;
1806 }
1807 }
1808 if (dst->ret || dst->count == 0)
1809 return;
1810 }
1811
1812
1813 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1814 ia64_sync_fph(dst->target);
1815 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1816 &dst->u.set.kbuf,
1817 &dst->u.set.ubuf,
1818 &dst->target->thread.fph,
1819 ELF_FP_OFFSET(32), -1);
1820 }
1821}
1822
1823static int
1824do_regset_call(void (*call)(struct unw_frame_info *, void *),
1825 struct task_struct *target,
1826 const struct user_regset *regset,
1827 unsigned int pos, unsigned int count,
1828 const void *kbuf, const void __user *ubuf)
1829{
1830 struct regset_getset info = { .target = target, .regset = regset,
1831 .pos = pos, .count = count,
1832 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1833 .ret = 0 };
1834
1835 if (target == current)
1836 unw_init_running(call, &info);
1837 else {
1838 struct unw_frame_info ufi;
1839 memset(&ufi, 0, sizeof(ufi));
1840 unw_init_from_blocked_task(&ufi, target);
1841 (*call)(&ufi, &info);
1842 }
1843
1844 return info.ret;
1845}
1846
1847static int
1848gpregs_get(struct task_struct *target,
1849 const struct user_regset *regset,
1850 unsigned int pos, unsigned int count,
1851 void *kbuf, void __user *ubuf)
1852{
1853 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1854 kbuf, ubuf);
1855}
1856
1857static int gpregs_set(struct task_struct *target,
1858 const struct user_regset *regset,
1859 unsigned int pos, unsigned int count,
1860 const void *kbuf, const void __user *ubuf)
1861{
1862 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1863 kbuf, ubuf);
1864}
1865
1866static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1867{
1868 do_sync_rbs(info, ia64_sync_user_rbs);
1869}
1870
1871
1872
1873
1874
1875
1876static int
1877gpregs_writeback(struct task_struct *target,
1878 const struct user_regset *regset,
1879 int now)
1880{
1881 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1882 return 0;
1883 set_notify_resume(target);
1884 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1885 NULL, NULL);
1886}
1887
1888static int
1889fpregs_active(struct task_struct *target, const struct user_regset *regset)
1890{
1891 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1892}
1893
1894static int fpregs_get(struct task_struct *target,
1895 const struct user_regset *regset,
1896 unsigned int pos, unsigned int count,
1897 void *kbuf, void __user *ubuf)
1898{
1899 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1900 kbuf, ubuf);
1901}
1902
1903static int fpregs_set(struct task_struct *target,
1904 const struct user_regset *regset,
1905 unsigned int pos, unsigned int count,
1906 const void *kbuf, const void __user *ubuf)
1907{
1908 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1909 kbuf, ubuf);
1910}
1911
1912static int
1913access_uarea(struct task_struct *child, unsigned long addr,
1914 unsigned long *data, int write_access)
1915{
1916 unsigned int pos = -1;
1917 int ret;
1918 unsigned long *ptr, regnum;
1919
1920 if ((addr & 0x7) != 0) {
1921 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1922 return -1;
1923 }
1924 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1925 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1926 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1927 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1928 dprintk("ptrace: rejecting access to register "
1929 "address 0x%lx\n", addr);
1930 return -1;
1931 }
1932
1933 switch (addr) {
1934 case PT_F32 ... (PT_F127 + 15):
1935 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1936 break;
1937 case PT_F2 ... (PT_F5 + 15):
1938 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1939 break;
1940 case PT_F10 ... (PT_F31 + 15):
1941 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1942 break;
1943 case PT_F6 ... (PT_F9 + 15):
1944 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1945 break;
1946 }
1947
1948 if (pos != -1) {
1949 if (write_access)
1950 ret = fpregs_set(child, NULL, pos,
1951 sizeof(unsigned long), data, NULL);
1952 else
1953 ret = fpregs_get(child, NULL, pos,
1954 sizeof(unsigned long), data, NULL);
1955 if (ret != 0)
1956 return -1;
1957 return 0;
1958 }
1959
1960 switch (addr) {
1961 case PT_NAT_BITS:
1962 pos = ELF_NAT_OFFSET;
1963 break;
1964 case PT_R4 ... PT_R7:
1965 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1966 break;
1967 case PT_B1 ... PT_B5:
1968 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1969 break;
1970 case PT_AR_EC:
1971 pos = ELF_AR_EC_OFFSET;
1972 break;
1973 case PT_AR_LC:
1974 pos = ELF_AR_LC_OFFSET;
1975 break;
1976 case PT_CR_IPSR:
1977 pos = ELF_CR_IPSR_OFFSET;
1978 break;
1979 case PT_CR_IIP:
1980 pos = ELF_CR_IIP_OFFSET;
1981 break;
1982 case PT_CFM:
1983 pos = ELF_CFM_OFFSET;
1984 break;
1985 case PT_AR_UNAT:
1986 pos = ELF_AR_UNAT_OFFSET;
1987 break;
1988 case PT_AR_PFS:
1989 pos = ELF_AR_PFS_OFFSET;
1990 break;
1991 case PT_AR_RSC:
1992 pos = ELF_AR_RSC_OFFSET;
1993 break;
1994 case PT_AR_RNAT:
1995 pos = ELF_AR_RNAT_OFFSET;
1996 break;
1997 case PT_AR_BSPSTORE:
1998 pos = ELF_AR_BSPSTORE_OFFSET;
1999 break;
2000 case PT_PR:
2001 pos = ELF_PR_OFFSET;
2002 break;
2003 case PT_B6:
2004 pos = ELF_BR_OFFSET(6);
2005 break;
2006 case PT_AR_BSP:
2007 pos = ELF_AR_BSP_OFFSET;
2008 break;
2009 case PT_R1 ... PT_R3:
2010 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2011 break;
2012 case PT_R12 ... PT_R15:
2013 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2014 break;
2015 case PT_R8 ... PT_R11:
2016 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2017 break;
2018 case PT_R16 ... PT_R31:
2019 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2020 break;
2021 case PT_AR_CCV:
2022 pos = ELF_AR_CCV_OFFSET;
2023 break;
2024 case PT_AR_FPSR:
2025 pos = ELF_AR_FPSR_OFFSET;
2026 break;
2027 case PT_B0:
2028 pos = ELF_BR_OFFSET(0);
2029 break;
2030 case PT_B7:
2031 pos = ELF_BR_OFFSET(7);
2032 break;
2033 case PT_AR_CSD:
2034 pos = ELF_AR_CSD_OFFSET;
2035 break;
2036 case PT_AR_SSD:
2037 pos = ELF_AR_SSD_OFFSET;
2038 break;
2039 }
2040
2041 if (pos != -1) {
2042 if (write_access)
2043 ret = gpregs_set(child, NULL, pos,
2044 sizeof(unsigned long), data, NULL);
2045 else
2046 ret = gpregs_get(child, NULL, pos,
2047 sizeof(unsigned long), data, NULL);
2048 if (ret != 0)
2049 return -1;
2050 return 0;
2051 }
2052
2053
2054 if (addr >= PT_IBR) {
2055 regnum = (addr - PT_IBR) >> 3;
2056 ptr = &child->thread.ibr[0];
2057 } else {
2058 regnum = (addr - PT_DBR) >> 3;
2059 ptr = &child->thread.dbr[0];
2060 }
2061
2062 if (regnum >= 8) {
2063 dprintk("ptrace: rejecting access to register "
2064 "address 0x%lx\n", addr);
2065 return -1;
2066 }
2067#ifdef CONFIG_PERFMON
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 if (pfm_use_debug_registers(child))
2086 return -1;
2087#endif
2088
2089 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2090 child->thread.flags |= IA64_THREAD_DBG_VALID;
2091 memset(child->thread.dbr, 0,
2092 sizeof(child->thread.dbr));
2093 memset(child->thread.ibr, 0,
2094 sizeof(child->thread.ibr));
2095 }
2096
2097 ptr += regnum;
2098
2099 if ((regnum & 1) && write_access) {
2100
2101 *ptr = *data & ~(7UL << 56);
2102 return 0;
2103 }
2104 if (write_access)
2105 *ptr = *data;
2106 else
2107 *data = *ptr;
2108 return 0;
2109}
2110
2111static const struct user_regset native_regsets[] = {
2112 {
2113 .core_note_type = NT_PRSTATUS,
2114 .n = ELF_NGREG,
2115 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2116 .get = gpregs_get, .set = gpregs_set,
2117 .writeback = gpregs_writeback
2118 },
2119 {
2120 .core_note_type = NT_PRFPREG,
2121 .n = ELF_NFPREG,
2122 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2123 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2124 },
2125};
2126
2127static const struct user_regset_view user_ia64_view = {
2128 .name = "ia64",
2129 .e_machine = EM_IA_64,
2130 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2131};
2132
2133const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2134{
2135 return &user_ia64_view;
2136}
2137
2138struct syscall_get_set_args {
2139 unsigned int i;
2140 unsigned int n;
2141 unsigned long *args;
2142 struct pt_regs *regs;
2143 int rw;
2144};
2145
2146static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2147{
2148 struct syscall_get_set_args *args = data;
2149 struct pt_regs *pt = args->regs;
2150 unsigned long *krbs, cfm, ndirty;
2151 int i, count;
2152
2153 if (unw_unwind_to_user(info) < 0)
2154 return;
2155
2156 cfm = pt->cr_ifs;
2157 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2158 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2159
2160 count = 0;
2161 if (in_syscall(pt))
2162 count = min_t(int, args->n, cfm & 0x7f);
2163
2164 for (i = 0; i < count; i++) {
2165 if (args->rw)
2166 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2167 args->args[i];
2168 else
2169 args->args[i] = *ia64_rse_skip_regs(krbs,
2170 ndirty + i + args->i);
2171 }
2172
2173 if (!args->rw) {
2174 while (i < args->n) {
2175 args->args[i] = 0;
2176 i++;
2177 }
2178 }
2179}
2180
2181void ia64_syscall_get_set_arguments(struct task_struct *task,
2182 struct pt_regs *regs, unsigned long *args, int rw)
2183{
2184 struct syscall_get_set_args data = {
2185 .i = 0,
2186 .n = 6,
2187 .args = args,
2188 .regs = regs,
2189 .rw = rw,
2190 };
2191
2192 if (task == current)
2193 unw_init_running(syscall_get_set_args_cb, &data);
2194 else {
2195 struct unw_frame_info ufi;
2196 memset(&ufi, 0, sizeof(ufi));
2197 unw_init_from_blocked_task(&ufi, task);
2198 syscall_get_set_args_cb(&ufi, &data);
2199 }
2200}
2201