1
2
3
4
5
6
7
8
9
10
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/audit.h>
20#include <linux/signal.h>
21#include <linux/regset.h>
22#include <linux/elf.h>
23#include <linux/tracehook.h>
24
25#include <asm/pgtable.h>
26#include <asm/processor.h>
27#include <asm/ptrace_offsets.h>
28#include <asm/rse.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <asm/unwind.h>
32#ifdef CONFIG_PERFMON
33#include <asm/perfmon.h>
34#endif
35
36#include "entry.h"
37
38
39
40
41
42
43
44
45
46
47#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
48 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
49
50#define MASK(nbits) ((1UL << (nbits)) - 1)
51#define PFM_MASK MASK(38)
52
53#define PTRACE_DEBUG 0
54
55#if PTRACE_DEBUG
56# define dprintk(format...) printk(format)
57# define inline
58#else
59# define dprintk(format...)
60#endif
61
62
63
64static inline int
65in_syscall (struct pt_regs *pt)
66{
67 return (long) pt->cr_ifs >= 0;
68}
69
70
71
72
73
74unsigned long
75ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
76{
77# define GET_BITS(first, last, unat) \
78 ({ \
79 unsigned long bit = ia64_unat_pos(&pt->r##first); \
80 unsigned long nbits = (last - first + 1); \
81 unsigned long mask = MASK(nbits) << first; \
82 unsigned long dist; \
83 if (bit < first) \
84 dist = 64 + bit - first; \
85 else \
86 dist = bit - first; \
87 ia64_rotr(unat, dist) & mask; \
88 })
89 unsigned long val;
90
91
92
93
94
95
96 val = GET_BITS( 1, 1, scratch_unat);
97 val |= GET_BITS( 2, 3, scratch_unat);
98 val |= GET_BITS(12, 13, scratch_unat);
99 val |= GET_BITS(14, 14, scratch_unat);
100 val |= GET_BITS(15, 15, scratch_unat);
101 val |= GET_BITS( 8, 11, scratch_unat);
102 val |= GET_BITS(16, 31, scratch_unat);
103 return val;
104
105# undef GET_BITS
106}
107
108
109
110
111
112
113unsigned long
114ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
115{
116# define PUT_BITS(first, last, nat) \
117 ({ \
118 unsigned long bit = ia64_unat_pos(&pt->r##first); \
119 unsigned long nbits = (last - first + 1); \
120 unsigned long mask = MASK(nbits) << first; \
121 long dist; \
122 if (bit < first) \
123 dist = 64 + bit - first; \
124 else \
125 dist = bit - first; \
126 ia64_rotl(nat & mask, dist); \
127 })
128 unsigned long scratch_unat;
129
130
131
132
133
134
135 scratch_unat = PUT_BITS( 1, 1, nat);
136 scratch_unat |= PUT_BITS( 2, 3, nat);
137 scratch_unat |= PUT_BITS(12, 13, nat);
138 scratch_unat |= PUT_BITS(14, 14, nat);
139 scratch_unat |= PUT_BITS(15, 15, nat);
140 scratch_unat |= PUT_BITS( 8, 11, nat);
141 scratch_unat |= PUT_BITS(16, 31, nat);
142
143 return scratch_unat;
144
145# undef PUT_BITS
146}
147
148#define IA64_MLX_TEMPLATE 0x2
149#define IA64_MOVL_OPCODE 6
150
151void
152ia64_increment_ip (struct pt_regs *regs)
153{
154 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
155
156 if (ri > 2) {
157 ri = 0;
158 regs->cr_iip += 16;
159 } else if (ri == 2) {
160 get_user(w0, (char __user *) regs->cr_iip + 0);
161 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
162
163
164
165
166
167 ri = 0;
168 regs->cr_iip += 16;
169 }
170 }
171 ia64_psr(regs)->ri = ri;
172}
173
174void
175ia64_decrement_ip (struct pt_regs *regs)
176{
177 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
178
179 if (ia64_psr(regs)->ri == 0) {
180 regs->cr_iip -= 16;
181 ri = 2;
182 get_user(w0, (char __user *) regs->cr_iip + 0);
183 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
184
185
186
187
188
189 ri = 1;
190 }
191 }
192 ia64_psr(regs)->ri = ri;
193}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249static unsigned long
250get_rnat (struct task_struct *task, struct switch_stack *sw,
251 unsigned long *krbs, unsigned long *urnat_addr,
252 unsigned long *urbs_end)
253{
254 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
255 unsigned long umask = 0, mask, m;
256 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
257 long num_regs, nbits;
258 struct pt_regs *pt;
259
260 pt = task_pt_regs(task);
261 kbsp = (unsigned long *) sw->ar_bspstore;
262 ubspstore = (unsigned long *) pt->ar_bspstore;
263
264 if (urbs_end < urnat_addr)
265 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
266 else
267 nbits = 63;
268 mask = MASK(nbits);
269
270
271
272
273
274
275 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
276 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
277 shift = ia64_rse_slot_num(slot0_kaddr);
278 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
279 rnat0_kaddr = rnat1_kaddr - 64;
280
281 if (ubspstore + 63 > urnat_addr) {
282
283 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
284 urnat = (pt->ar_rnat & umask);
285 mask &= ~umask;
286 if (!mask)
287 return urnat;
288 }
289
290 m = mask << shift;
291 if (rnat0_kaddr >= kbsp)
292 rnat0 = sw->ar_rnat;
293 else if (rnat0_kaddr > krbs)
294 rnat0 = *rnat0_kaddr;
295 urnat |= (rnat0 & m) >> shift;
296
297 m = mask >> (63 - shift);
298 if (rnat1_kaddr >= kbsp)
299 rnat1 = sw->ar_rnat;
300 else if (rnat1_kaddr > krbs)
301 rnat1 = *rnat1_kaddr;
302 urnat |= (rnat1 & m) << (63 - shift);
303 return urnat;
304}
305
306
307
308
309static void
310put_rnat (struct task_struct *task, struct switch_stack *sw,
311 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
312 unsigned long *urbs_end)
313{
314 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
315 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
316 long num_regs, nbits;
317 struct pt_regs *pt;
318 unsigned long cfm, *urbs_kargs;
319
320 pt = task_pt_regs(task);
321 kbsp = (unsigned long *) sw->ar_bspstore;
322 ubspstore = (unsigned long *) pt->ar_bspstore;
323
324 urbs_kargs = urbs_end;
325 if (in_syscall(pt)) {
326
327
328
329
330 cfm = pt->cr_ifs;
331 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
332 }
333
334 if (urbs_kargs >= urnat_addr)
335 nbits = 63;
336 else {
337 if ((urnat_addr - 63) >= urbs_kargs)
338 return;
339 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
340 }
341 mask = MASK(nbits);
342
343
344
345
346
347
348
349 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
350 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
351 shift = ia64_rse_slot_num(slot0_kaddr);
352 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
353 rnat0_kaddr = rnat1_kaddr - 64;
354
355 if (ubspstore + 63 > urnat_addr) {
356
357 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
358 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
359 mask &= ~umask;
360 if (!mask)
361 return;
362 }
363
364
365
366
367 rnat0 = (urnat << shift);
368 m = mask << shift;
369 if (rnat0_kaddr >= kbsp)
370 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
371 else if (rnat0_kaddr > krbs)
372 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
373
374 rnat1 = (urnat >> (63 - shift));
375 m = mask >> (63 - shift);
376 if (rnat1_kaddr >= kbsp)
377 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
378 else if (rnat1_kaddr > krbs)
379 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
380}
381
382static inline int
383on_kernel_rbs (unsigned long addr, unsigned long bspstore,
384 unsigned long urbs_end)
385{
386 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
387 urbs_end);
388 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
389}
390
391
392
393
394
395
396
397
398
399
400
401
402long
403ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
404 unsigned long user_rbs_end, unsigned long addr, long *val)
405{
406 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
407 struct pt_regs *child_regs;
408 size_t copied;
409 long ret;
410
411 urbs_end = (long *) user_rbs_end;
412 laddr = (unsigned long *) addr;
413 child_regs = task_pt_regs(child);
414 bspstore = (unsigned long *) child_regs->ar_bspstore;
415 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
416 if (on_kernel_rbs(addr, (unsigned long) bspstore,
417 (unsigned long) urbs_end))
418 {
419
420
421
422
423
424 rnat_addr = ia64_rse_rnat_addr(laddr);
425 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
426
427 if (laddr == rnat_addr) {
428
429 *val = ret;
430 return 0;
431 }
432
433 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
434
435
436
437
438
439
440
441
442
443 *val = 0;
444 return 0;
445 }
446
447 if (laddr < urbs_end) {
448
449
450
451
452 regnum = ia64_rse_num_regs(bspstore, laddr);
453 *val = *ia64_rse_skip_regs(krbs, regnum);
454 return 0;
455 }
456 }
457 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
458 if (copied != sizeof(ret))
459 return -EIO;
460 *val = ret;
461 return 0;
462}
463
464long
465ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
466 unsigned long user_rbs_end, unsigned long addr, long val)
467{
468 unsigned long *bspstore, *krbs, regnum, *laddr;
469 unsigned long *urbs_end = (long *) user_rbs_end;
470 struct pt_regs *child_regs;
471
472 laddr = (unsigned long *) addr;
473 child_regs = task_pt_regs(child);
474 bspstore = (unsigned long *) child_regs->ar_bspstore;
475 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
476 if (on_kernel_rbs(addr, (unsigned long) bspstore,
477 (unsigned long) urbs_end))
478 {
479
480
481
482
483
484 if (ia64_rse_is_rnat_slot(laddr))
485 put_rnat(child, child_stack, krbs, laddr, val,
486 urbs_end);
487 else {
488 if (laddr < urbs_end) {
489 regnum = ia64_rse_num_regs(bspstore, laddr);
490 *ia64_rse_skip_regs(krbs, regnum) = val;
491 }
492 }
493 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
494 != sizeof(val))
495 return -EIO;
496 return 0;
497}
498
499
500
501
502
503
504
505
506
507unsigned long
508ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
509 unsigned long *cfmp)
510{
511 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
512 long ndirty;
513
514 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
515 bspstore = (unsigned long *) pt->ar_bspstore;
516 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
517
518 if (in_syscall(pt))
519 ndirty += (cfm & 0x7f);
520 else
521 cfm &= ~(1UL << 63);
522
523 if (cfmp)
524 *cfmp = cfm;
525 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
526}
527
528
529
530
531
532
533
534
535long
536ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
537 unsigned long user_rbs_start, unsigned long user_rbs_end)
538{
539 unsigned long addr, val;
540 long ret;
541
542
543 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
544 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
545 if (ret < 0)
546 return ret;
547 if (access_process_vm(child, addr, &val, sizeof(val), 1)
548 != sizeof(val))
549 return -EIO;
550 }
551 return 0;
552}
553
554static long
555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
556 unsigned long user_rbs_start, unsigned long user_rbs_end)
557{
558 unsigned long addr, val;
559 long ret;
560
561
562 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
563 if (access_process_vm(child, addr, &val, sizeof(val), 0)
564 != sizeof(val))
565 return -EIO;
566
567 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
568 if (ret < 0)
569 return ret;
570 }
571 return 0;
572}
573
574typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
575 unsigned long, unsigned long);
576
577static void do_sync_rbs(struct unw_frame_info *info, void *arg)
578{
579 struct pt_regs *pt;
580 unsigned long urbs_end;
581 syncfunc_t fn = arg;
582
583 if (unw_unwind_to_user(info) < 0)
584 return;
585 pt = task_pt_regs(info->task);
586 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
587
588 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
589}
590
591
592
593
594
595
596
597
598
599
600
601void ia64_ptrace_stop(void)
602{
603 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
604 return;
605 set_notify_resume(current);
606 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
607}
608
609
610
611
612void ia64_sync_krbs(void)
613{
614 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
615
616 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
617}
618
619
620
621
622
623
624
625
626
627void
628ptrace_attach_sync_user_rbs (struct task_struct *child)
629{
630 int stopped = 0;
631 struct unw_frame_info info;
632
633
634
635
636
637
638
639
640 read_lock(&tasklist_lock);
641 if (child->sighand) {
642 spin_lock_irq(&child->sighand->siglock);
643 if (child->state == TASK_STOPPED &&
644 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
645 set_notify_resume(child);
646
647 child->state = TASK_TRACED;
648 stopped = 1;
649 }
650 spin_unlock_irq(&child->sighand->siglock);
651 }
652 read_unlock(&tasklist_lock);
653
654 if (!stopped)
655 return;
656
657 unw_init_from_blocked_task(&info, child);
658 do_sync_rbs(&info, ia64_sync_user_rbs);
659
660
661
662
663
664 read_lock(&tasklist_lock);
665 if (child->sighand) {
666 spin_lock_irq(&child->sighand->siglock);
667 if (child->state == TASK_TRACED &&
668 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
669 child->state = TASK_STOPPED;
670 }
671 spin_unlock_irq(&child->sighand->siglock);
672 }
673 read_unlock(&tasklist_lock);
674}
675
676static inline int
677thread_matches (struct task_struct *thread, unsigned long addr)
678{
679 unsigned long thread_rbs_end;
680 struct pt_regs *thread_regs;
681
682 if (ptrace_check_attach(thread, 0) < 0)
683
684
685
686
687
688
689
690
691
692
693 return 0;
694
695 thread_regs = task_pt_regs(thread);
696 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
697 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
698 return 0;
699
700 return 1;
701}
702
703
704
705
706inline void
707ia64_flush_fph (struct task_struct *task)
708{
709 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
710
711
712
713
714
715 preempt_disable();
716 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
717 psr->mfh = 0;
718 task->thread.flags |= IA64_THREAD_FPH_VALID;
719 ia64_save_fpu(&task->thread.fph[0]);
720 }
721 preempt_enable();
722}
723
724
725
726
727
728
729
730
731
732void
733ia64_sync_fph (struct task_struct *task)
734{
735 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
736
737 ia64_flush_fph(task);
738 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
739 task->thread.flags |= IA64_THREAD_FPH_VALID;
740 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
741 }
742 ia64_drop_fpu(task);
743 psr->dfh = 1;
744}
745
746
747
748
749
750static void
751convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
752 unsigned long cfm)
753{
754 struct unw_frame_info info, prev_info;
755 unsigned long ip, sp, pr;
756
757 unw_init_from_blocked_task(&info, child);
758 while (1) {
759 prev_info = info;
760 if (unw_unwind(&info) < 0)
761 return;
762
763 unw_get_sp(&info, &sp);
764 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
765 < IA64_PT_REGS_SIZE) {
766 dprintk("ptrace.%s: ran off the top of the kernel "
767 "stack\n", __func__);
768 return;
769 }
770 if (unw_get_pr (&prev_info, &pr) < 0) {
771 unw_get_rp(&prev_info, &ip);
772 dprintk("ptrace.%s: failed to read "
773 "predicate register (ip=0x%lx)\n",
774 __func__, ip);
775 return;
776 }
777 if (unw_is_intr_frame(&info)
778 && (pr & (1UL << PRED_USER_STACK)))
779 break;
780 }
781
782
783
784
785
786
787
788 unw_get_pr(&prev_info, &pr);
789 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
790 pr |= (1UL << PRED_NON_SYSCALL);
791 unw_set_pr(&prev_info, pr);
792
793 pt->cr_ifs = (1UL << 63) | cfm;
794
795
796
797
798
799 pt->r2 = 0;
800 pt->r3 = 0;
801 pt->r14 = 0;
802 memset(&pt->r16, 0, 16*8);
803 memset(&pt->f6, 0, 6*16);
804 pt->b7 = 0;
805 pt->ar_ccv = 0;
806 pt->ar_csd = 0;
807 pt->ar_ssd = 0;
808}
809
810static int
811access_nat_bits (struct task_struct *child, struct pt_regs *pt,
812 struct unw_frame_info *info,
813 unsigned long *data, int write_access)
814{
815 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
816 char nat = 0;
817
818 if (write_access) {
819 nat_bits = *data;
820 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
821 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
822 dprintk("ptrace: failed to set ar.unat\n");
823 return -1;
824 }
825 for (regnum = 4; regnum <= 7; ++regnum) {
826 unw_get_gr(info, regnum, &dummy, &nat);
827 unw_set_gr(info, regnum, dummy,
828 (nat_bits >> regnum) & 1);
829 }
830 } else {
831 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
832 dprintk("ptrace: failed to read ar.unat\n");
833 return -1;
834 }
835 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
836 for (regnum = 4; regnum <= 7; ++regnum) {
837 unw_get_gr(info, regnum, &dummy, &nat);
838 nat_bits |= (nat != 0) << regnum;
839 }
840 *data = nat_bits;
841 }
842 return 0;
843}
844
845static int
846access_uarea (struct task_struct *child, unsigned long addr,
847 unsigned long *data, int write_access);
848
849static long
850ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
851{
852 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
853 struct unw_frame_info info;
854 struct ia64_fpreg fpval;
855 struct switch_stack *sw;
856 struct pt_regs *pt;
857 long ret, retval = 0;
858 char nat = 0;
859 int i;
860
861 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
862 return -EIO;
863
864 pt = task_pt_regs(child);
865 sw = (struct switch_stack *) (child->thread.ksp + 16);
866 unw_init_from_blocked_task(&info, child);
867 if (unw_unwind_to_user(&info) < 0) {
868 return -EIO;
869 }
870
871 if (((unsigned long) ppr & 0x7) != 0) {
872 dprintk("ptrace:unaligned register address %p\n", ppr);
873 return -EIO;
874 }
875
876 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
877 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
878 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
879 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
880 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
881 || access_uarea(child, PT_CFM, &cfm, 0)
882 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
883 return -EIO;
884
885
886
887 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
888 retval |= __put_user(psr, &ppr->cr_ipsr);
889
890
891
892 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
893 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
894 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
895 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
896 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
897 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
898
899 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
900 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
901 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
902 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
903 retval |= __put_user(cfm, &ppr->cfm);
904
905
906
907 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
908 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
909
910
911
912 for (i = 4; i < 8; i++) {
913 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
914 return -EIO;
915 retval |= __put_user(val, &ppr->gr[i]);
916 }
917
918
919
920 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
921
922
923
924 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
925 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
926 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
927
928
929
930 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
931
932
933
934 retval |= __put_user(pt->b0, &ppr->br[0]);
935
936
937
938 for (i = 1; i < 6; i++) {
939 if (unw_access_br(&info, i, &val, 0) < 0)
940 return -EIO;
941 __put_user(val, &ppr->br[i]);
942 }
943
944
945
946 retval |= __put_user(pt->b6, &ppr->br[6]);
947 retval |= __put_user(pt->b7, &ppr->br[7]);
948
949
950
951 for (i = 2; i < 6; i++) {
952 if (unw_get_fr(&info, i, &fpval) < 0)
953 return -EIO;
954 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
955 }
956
957
958
959 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
960 sizeof(struct ia64_fpreg) * 6);
961
962
963
964 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
965 sizeof(struct ia64_fpreg) * 4);
966
967
968
969 for (i = 16; i < 32; i++) {
970 if (unw_get_fr(&info, i, &fpval) < 0)
971 return -EIO;
972 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
973 }
974
975
976
977 ia64_flush_fph(child);
978 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
979 sizeof(ppr->fr[32]) * 96);
980
981
982
983 retval |= __put_user(pt->pr, &ppr->pr);
984
985
986
987 retval |= __put_user(nat_bits, &ppr->nat);
988
989 ret = retval ? -EIO : 0;
990 return ret;
991}
992
993static long
994ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
995{
996 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
997 struct unw_frame_info info;
998 struct switch_stack *sw;
999 struct ia64_fpreg fpval;
1000 struct pt_regs *pt;
1001 long ret, retval = 0;
1002 int i;
1003
1004 memset(&fpval, 0, sizeof(fpval));
1005
1006 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1007 return -EIO;
1008
1009 pt = task_pt_regs(child);
1010 sw = (struct switch_stack *) (child->thread.ksp + 16);
1011 unw_init_from_blocked_task(&info, child);
1012 if (unw_unwind_to_user(&info) < 0) {
1013 return -EIO;
1014 }
1015
1016 if (((unsigned long) ppr & 0x7) != 0) {
1017 dprintk("ptrace:unaligned register address %p\n", ppr);
1018 return -EIO;
1019 }
1020
1021
1022
1023 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1024 retval |= __get_user(psr, &ppr->cr_ipsr);
1025
1026
1027
1028 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1029 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1030 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1031 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1032 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1033 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1034
1035 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1036 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1037 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1038 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1039 retval |= __get_user(cfm, &ppr->cfm);
1040
1041
1042
1043 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1044 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1045
1046
1047
1048 for (i = 4; i < 8; i++) {
1049 retval |= __get_user(val, &ppr->gr[i]);
1050
1051 if (unw_set_gr(&info, i, val, 0) < 0)
1052 return -EIO;
1053 }
1054
1055
1056
1057 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1058
1059
1060
1061 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1062 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1063 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1064
1065
1066
1067 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1068
1069
1070
1071 retval |= __get_user(pt->b0, &ppr->br[0]);
1072
1073
1074
1075 for (i = 1; i < 6; i++) {
1076 retval |= __get_user(val, &ppr->br[i]);
1077 unw_set_br(&info, i, val);
1078 }
1079
1080
1081
1082 retval |= __get_user(pt->b6, &ppr->br[6]);
1083 retval |= __get_user(pt->b7, &ppr->br[7]);
1084
1085
1086
1087 for (i = 2; i < 6; i++) {
1088 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1089 if (unw_set_fr(&info, i, fpval) < 0)
1090 return -EIO;
1091 }
1092
1093
1094
1095 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1096 sizeof(ppr->fr[6]) * 6);
1097
1098
1099
1100 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1101 sizeof(ppr->fr[12]) * 4);
1102
1103
1104
1105 for (i = 16; i < 32; i++) {
1106 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1107 sizeof(fpval));
1108 if (unw_set_fr(&info, i, fpval) < 0)
1109 return -EIO;
1110 }
1111
1112
1113
1114 ia64_sync_fph(child);
1115 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1116 sizeof(ppr->fr[32]) * 96);
1117
1118
1119
1120 retval |= __get_user(pt->pr, &ppr->pr);
1121
1122
1123
1124 retval |= __get_user(nat_bits, &ppr->nat);
1125
1126 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1127 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1128 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1129 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1130 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1131 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1132 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1133 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1134
1135 ret = retval ? -EIO : 0;
1136 return ret;
1137}
1138
1139void
1140user_enable_single_step (struct task_struct *child)
1141{
1142 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1143
1144 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1145 child_psr->ss = 1;
1146}
1147
1148void
1149user_enable_block_step (struct task_struct *child)
1150{
1151 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1152
1153 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1154 child_psr->tb = 1;
1155}
1156
1157void
1158user_disable_single_step (struct task_struct *child)
1159{
1160 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1161
1162
1163 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1164 child_psr->ss = 0;
1165 child_psr->tb = 0;
1166}
1167
1168
1169
1170
1171
1172
1173void
1174ptrace_disable (struct task_struct *child)
1175{
1176 user_disable_single_step(child);
1177}
1178
1179long
1180arch_ptrace (struct task_struct *child, long request,
1181 unsigned long addr, unsigned long data)
1182{
1183 switch (request) {
1184 case PTRACE_PEEKTEXT:
1185 case PTRACE_PEEKDATA:
1186
1187 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1188 != sizeof(data))
1189 return -EIO;
1190
1191 force_successful_syscall_return();
1192 return data;
1193
1194
1195
1196
1197
1198 case PTRACE_PEEKUSR:
1199
1200 if (access_uarea(child, addr, &data, 0) < 0)
1201 return -EIO;
1202
1203 force_successful_syscall_return();
1204 return data;
1205
1206 case PTRACE_POKEUSR:
1207
1208 if (access_uarea(child, addr, &data, 1) < 0)
1209 return -EIO;
1210 return 0;
1211
1212 case PTRACE_OLD_GETSIGINFO:
1213
1214 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215
1216 case PTRACE_OLD_SETSIGINFO:
1217
1218 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219
1220 case PTRACE_GETREGS:
1221 return ptrace_getregs(child,
1222 (struct pt_all_user_regs __user *) data);
1223
1224 case PTRACE_SETREGS:
1225 return ptrace_setregs(child,
1226 (struct pt_all_user_regs __user *) data);
1227
1228 default:
1229 return ptrace_request(child, request, addr, data);
1230 }
1231}
1232
1233
1234
1235
1236asmlinkage long
1237syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1238 long arg4, long arg5, long arg6, long arg7,
1239 struct pt_regs regs)
1240{
1241 if (test_thread_flag(TIF_SYSCALL_TRACE))
1242 if (tracehook_report_syscall_entry(®s))
1243 return -ENOSYS;
1244
1245
1246 if (test_thread_flag(TIF_RESTORE_RSE))
1247 ia64_sync_krbs();
1248
1249 if (unlikely(current->audit_context)) {
1250 long syscall;
1251 int arch;
1252
1253 syscall = regs.r15;
1254 arch = AUDIT_ARCH_IA64;
1255
1256 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1257 }
1258
1259 return 0;
1260}
1261
1262
1263
1264asmlinkage void
1265syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1266 long arg4, long arg5, long arg6, long arg7,
1267 struct pt_regs regs)
1268{
1269 int step;
1270
1271 if (unlikely(current->audit_context)) {
1272 int success = AUDITSC_RESULT(regs.r10);
1273 long result = regs.r8;
1274
1275 if (success != AUDITSC_SUCCESS)
1276 result = -result;
1277 audit_syscall_exit(success, result);
1278 }
1279
1280 step = test_thread_flag(TIF_SINGLESTEP);
1281 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1282 tracehook_report_syscall_exit(®s, step);
1283
1284
1285 if (test_thread_flag(TIF_RESTORE_RSE))
1286 ia64_sync_krbs();
1287}
1288
1289
1290struct regset_get {
1291 void *kbuf;
1292 void __user *ubuf;
1293};
1294
1295struct regset_set {
1296 const void *kbuf;
1297 const void __user *ubuf;
1298};
1299
1300struct regset_getset {
1301 struct task_struct *target;
1302 const struct user_regset *regset;
1303 union {
1304 struct regset_get get;
1305 struct regset_set set;
1306 } u;
1307 unsigned int pos;
1308 unsigned int count;
1309 int ret;
1310};
1311
1312static int
1313access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1314 unsigned long addr, unsigned long *data, int write_access)
1315{
1316 struct pt_regs *pt;
1317 unsigned long *ptr = NULL;
1318 int ret;
1319 char nat = 0;
1320
1321 pt = task_pt_regs(target);
1322 switch (addr) {
1323 case ELF_GR_OFFSET(1):
1324 ptr = &pt->r1;
1325 break;
1326 case ELF_GR_OFFSET(2):
1327 case ELF_GR_OFFSET(3):
1328 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1329 break;
1330 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1331 if (write_access) {
1332
1333 unsigned long dummy;
1334
1335 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1336 if (ret < 0)
1337 return ret;
1338 }
1339 return unw_access_gr(info, addr/8, data, &nat, write_access);
1340 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1341 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1342 break;
1343 case ELF_GR_OFFSET(12):
1344 case ELF_GR_OFFSET(13):
1345 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1346 break;
1347 case ELF_GR_OFFSET(14):
1348 ptr = &pt->r14;
1349 break;
1350 case ELF_GR_OFFSET(15):
1351 ptr = &pt->r15;
1352 }
1353 if (write_access)
1354 *ptr = *data;
1355 else
1356 *data = *ptr;
1357 return 0;
1358}
1359
1360static int
1361access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1362 unsigned long addr, unsigned long *data, int write_access)
1363{
1364 struct pt_regs *pt;
1365 unsigned long *ptr = NULL;
1366
1367 pt = task_pt_regs(target);
1368 switch (addr) {
1369 case ELF_BR_OFFSET(0):
1370 ptr = &pt->b0;
1371 break;
1372 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1373 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1374 data, write_access);
1375 case ELF_BR_OFFSET(6):
1376 ptr = &pt->b6;
1377 break;
1378 case ELF_BR_OFFSET(7):
1379 ptr = &pt->b7;
1380 }
1381 if (write_access)
1382 *ptr = *data;
1383 else
1384 *data = *ptr;
1385 return 0;
1386}
1387
1388static int
1389access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1390 unsigned long addr, unsigned long *data, int write_access)
1391{
1392 struct pt_regs *pt;
1393 unsigned long cfm, urbs_end;
1394 unsigned long *ptr = NULL;
1395
1396 pt = task_pt_regs(target);
1397 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1398 switch (addr) {
1399 case ELF_AR_RSC_OFFSET:
1400
1401 if (write_access)
1402 pt->ar_rsc = *data | (3 << 2);
1403 else
1404 *data = pt->ar_rsc;
1405 return 0;
1406 case ELF_AR_BSP_OFFSET:
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1432 if (write_access) {
1433 if (*data != urbs_end) {
1434 if (in_syscall(pt))
1435 convert_to_non_syscall(target,
1436 pt,
1437 cfm);
1438
1439
1440
1441
1442 pt->loadrs = 0;
1443 pt->ar_bspstore = *data;
1444 }
1445 } else
1446 *data = urbs_end;
1447 return 0;
1448 case ELF_AR_BSPSTORE_OFFSET:
1449 ptr = &pt->ar_bspstore;
1450 break;
1451 case ELF_AR_RNAT_OFFSET:
1452 ptr = &pt->ar_rnat;
1453 break;
1454 case ELF_AR_CCV_OFFSET:
1455 ptr = &pt->ar_ccv;
1456 break;
1457 case ELF_AR_UNAT_OFFSET:
1458 ptr = &pt->ar_unat;
1459 break;
1460 case ELF_AR_FPSR_OFFSET:
1461 ptr = &pt->ar_fpsr;
1462 break;
1463 case ELF_AR_PFS_OFFSET:
1464 ptr = &pt->ar_pfs;
1465 break;
1466 case ELF_AR_LC_OFFSET:
1467 return unw_access_ar(info, UNW_AR_LC, data,
1468 write_access);
1469 case ELF_AR_EC_OFFSET:
1470 return unw_access_ar(info, UNW_AR_EC, data,
1471 write_access);
1472 case ELF_AR_CSD_OFFSET:
1473 ptr = &pt->ar_csd;
1474 break;
1475 case ELF_AR_SSD_OFFSET:
1476 ptr = &pt->ar_ssd;
1477 }
1478 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1479 switch (addr) {
1480 case ELF_CR_IIP_OFFSET:
1481 ptr = &pt->cr_iip;
1482 break;
1483 case ELF_CFM_OFFSET:
1484 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1485 if (write_access) {
1486 if (((cfm ^ *data) & PFM_MASK) != 0) {
1487 if (in_syscall(pt))
1488 convert_to_non_syscall(target,
1489 pt,
1490 cfm);
1491 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1492 | (*data & PFM_MASK));
1493 }
1494 } else
1495 *data = cfm;
1496 return 0;
1497 case ELF_CR_IPSR_OFFSET:
1498 if (write_access) {
1499 unsigned long tmp = *data;
1500
1501 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1502 tmp &= ~IA64_PSR_RI;
1503 pt->cr_ipsr = ((tmp & IPSR_MASK)
1504 | (pt->cr_ipsr & ~IPSR_MASK));
1505 } else
1506 *data = (pt->cr_ipsr & IPSR_MASK);
1507 return 0;
1508 }
1509 } else if (addr == ELF_NAT_OFFSET)
1510 return access_nat_bits(target, pt, info,
1511 data, write_access);
1512 else if (addr == ELF_PR_OFFSET)
1513 ptr = &pt->pr;
1514 else
1515 return -1;
1516
1517 if (write_access)
1518 *ptr = *data;
1519 else
1520 *data = *ptr;
1521
1522 return 0;
1523}
1524
1525static int
1526access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1527 unsigned long addr, unsigned long *data, int write_access)
1528{
1529 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1530 return access_elf_gpreg(target, info, addr, data, write_access);
1531 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1532 return access_elf_breg(target, info, addr, data, write_access);
1533 else
1534 return access_elf_areg(target, info, addr, data, write_access);
1535}
1536
1537void do_gpregs_get(struct unw_frame_info *info, void *arg)
1538{
1539 struct pt_regs *pt;
1540 struct regset_getset *dst = arg;
1541 elf_greg_t tmp[16];
1542 unsigned int i, index, min_copy;
1543
1544 if (unw_unwind_to_user(info) < 0)
1545 return;
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1561 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1562 &dst->u.get.kbuf,
1563 &dst->u.get.ubuf,
1564 0, ELF_GR_OFFSET(1));
1565 if (dst->ret || dst->count == 0)
1566 return;
1567 }
1568
1569
1570 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1571 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1572 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1573 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1574 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1575 index++)
1576 if (access_elf_reg(dst->target, info, i,
1577 &tmp[index], 0) < 0) {
1578 dst->ret = -EIO;
1579 return;
1580 }
1581 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1582 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1583 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1584 if (dst->ret || dst->count == 0)
1585 return;
1586 }
1587
1588
1589 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1590 pt = task_pt_regs(dst->target);
1591 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1592 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1593 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1594 if (dst->ret || dst->count == 0)
1595 return;
1596 }
1597
1598
1599 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1600 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1601 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1602 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1603 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1604 index++)
1605 if (access_elf_reg(dst->target, info, i,
1606 &tmp[index], 0) < 0) {
1607 dst->ret = -EIO;
1608 return;
1609 }
1610 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1611 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1612 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1613 if (dst->ret || dst->count == 0)
1614 return;
1615 }
1616
1617
1618
1619
1620 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1621 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1622 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1623 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1624 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1625 index++)
1626 if (access_elf_reg(dst->target, info, i,
1627 &tmp[index], 0) < 0) {
1628 dst->ret = -EIO;
1629 return;
1630 }
1631 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1632 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1633 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1634 }
1635}
1636
1637void do_gpregs_set(struct unw_frame_info *info, void *arg)
1638{
1639 struct pt_regs *pt;
1640 struct regset_getset *dst = arg;
1641 elf_greg_t tmp[16];
1642 unsigned int i, index;
1643
1644 if (unw_unwind_to_user(info) < 0)
1645 return;
1646
1647
1648 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1649 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1650 &dst->u.set.kbuf,
1651 &dst->u.set.ubuf,
1652 0, ELF_GR_OFFSET(1));
1653 if (dst->ret || dst->count == 0)
1654 return;
1655 }
1656
1657
1658 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1659 i = dst->pos;
1660 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1661 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1662 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1663 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1664 if (dst->ret)
1665 return;
1666 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1667 if (access_elf_reg(dst->target, info, i,
1668 &tmp[index], 1) < 0) {
1669 dst->ret = -EIO;
1670 return;
1671 }
1672 if (dst->count == 0)
1673 return;
1674 }
1675
1676
1677 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1678 pt = task_pt_regs(dst->target);
1679 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1680 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1681 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1682 if (dst->ret || dst->count == 0)
1683 return;
1684 }
1685
1686
1687 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1688 i = dst->pos;
1689 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1690 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1691 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1692 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1693 if (dst->ret)
1694 return;
1695 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1696 if (access_elf_reg(dst->target, info, i,
1697 &tmp[index], 1) < 0) {
1698 dst->ret = -EIO;
1699 return;
1700 }
1701 if (dst->count == 0)
1702 return;
1703 }
1704
1705
1706
1707
1708 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1709 i = dst->pos;
1710 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1711 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1712 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1713 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1714 if (dst->ret)
1715 return;
1716 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1717 if (access_elf_reg(dst->target, info, i,
1718 &tmp[index], 1) < 0) {
1719 dst->ret = -EIO;
1720 return;
1721 }
1722 }
1723}
1724
1725#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1726
1727void do_fpregs_get(struct unw_frame_info *info, void *arg)
1728{
1729 struct regset_getset *dst = arg;
1730 struct task_struct *task = dst->target;
1731 elf_fpreg_t tmp[30];
1732 int index, min_copy, i;
1733
1734 if (unw_unwind_to_user(info) < 0)
1735 return;
1736
1737
1738 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1739 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1740 &dst->u.get.kbuf,
1741 &dst->u.get.ubuf,
1742 0, ELF_FP_OFFSET(2));
1743 if (dst->count == 0 || dst->ret)
1744 return;
1745 }
1746
1747
1748 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1749 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1750
1751 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1752 dst->pos + dst->count);
1753 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1754 index++)
1755 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1756 &tmp[index])) {
1757 dst->ret = -EIO;
1758 return;
1759 }
1760 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1761 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1762 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1763 if (dst->count == 0 || dst->ret)
1764 return;
1765 }
1766
1767
1768 if (dst->count > 0) {
1769 ia64_flush_fph(dst->target);
1770 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1771 dst->ret = user_regset_copyout(
1772 &dst->pos, &dst->count,
1773 &dst->u.get.kbuf, &dst->u.get.ubuf,
1774 &dst->target->thread.fph,
1775 ELF_FP_OFFSET(32), -1);
1776 else
1777
1778 dst->ret = user_regset_copyout_zero(
1779 &dst->pos, &dst->count,
1780 &dst->u.get.kbuf, &dst->u.get.ubuf,
1781 ELF_FP_OFFSET(32), -1);
1782 }
1783}
1784
1785void do_fpregs_set(struct unw_frame_info *info, void *arg)
1786{
1787 struct regset_getset *dst = arg;
1788 elf_fpreg_t fpreg, tmp[30];
1789 int index, start, end;
1790
1791 if (unw_unwind_to_user(info) < 0)
1792 return;
1793
1794
1795 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1796 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1797 &dst->u.set.kbuf,
1798 &dst->u.set.ubuf,
1799 0, ELF_FP_OFFSET(2));
1800 if (dst->count == 0 || dst->ret)
1801 return;
1802 }
1803
1804
1805 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1806 start = dst->pos;
1807 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1808 dst->pos + dst->count);
1809 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1810 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1811 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1812 if (dst->ret)
1813 return;
1814
1815 if (start & 0xF) {
1816 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1817 &fpreg)) {
1818 dst->ret = -EIO;
1819 return;
1820 }
1821 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1822 = fpreg.u.bits[0];
1823 start &= ~0xFUL;
1824 }
1825 if (end & 0xF) {
1826 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1827 &fpreg)) {
1828 dst->ret = -EIO;
1829 return;
1830 }
1831 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1832 = fpreg.u.bits[1];
1833 end = (end + 0xF) & ~0xFUL;
1834 }
1835
1836 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1837 index = start / sizeof(elf_fpreg_t);
1838 if (unw_set_fr(info, index, tmp[index - 2])) {
1839 dst->ret = -EIO;
1840 return;
1841 }
1842 }
1843 if (dst->ret || dst->count == 0)
1844 return;
1845 }
1846
1847
1848 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1849 ia64_sync_fph(dst->target);
1850 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1851 &dst->u.set.kbuf,
1852 &dst->u.set.ubuf,
1853 &dst->target->thread.fph,
1854 ELF_FP_OFFSET(32), -1);
1855 }
1856}
1857
1858static int
1859do_regset_call(void (*call)(struct unw_frame_info *, void *),
1860 struct task_struct *target,
1861 const struct user_regset *regset,
1862 unsigned int pos, unsigned int count,
1863 const void *kbuf, const void __user *ubuf)
1864{
1865 struct regset_getset info = { .target = target, .regset = regset,
1866 .pos = pos, .count = count,
1867 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1868 .ret = 0 };
1869
1870 if (target == current)
1871 unw_init_running(call, &info);
1872 else {
1873 struct unw_frame_info ufi;
1874 memset(&ufi, 0, sizeof(ufi));
1875 unw_init_from_blocked_task(&ufi, target);
1876 (*call)(&ufi, &info);
1877 }
1878
1879 return info.ret;
1880}
1881
1882static int
1883gpregs_get(struct task_struct *target,
1884 const struct user_regset *regset,
1885 unsigned int pos, unsigned int count,
1886 void *kbuf, void __user *ubuf)
1887{
1888 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1889 kbuf, ubuf);
1890}
1891
1892static int gpregs_set(struct task_struct *target,
1893 const struct user_regset *regset,
1894 unsigned int pos, unsigned int count,
1895 const void *kbuf, const void __user *ubuf)
1896{
1897 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1898 kbuf, ubuf);
1899}
1900
1901static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1902{
1903 do_sync_rbs(info, ia64_sync_user_rbs);
1904}
1905
1906
1907
1908
1909
1910
1911static int
1912gpregs_writeback(struct task_struct *target,
1913 const struct user_regset *regset,
1914 int now)
1915{
1916 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1917 return 0;
1918 set_notify_resume(target);
1919 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1920 NULL, NULL);
1921}
1922
1923static int
1924fpregs_active(struct task_struct *target, const struct user_regset *regset)
1925{
1926 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1927}
1928
1929static int fpregs_get(struct task_struct *target,
1930 const struct user_regset *regset,
1931 unsigned int pos, unsigned int count,
1932 void *kbuf, void __user *ubuf)
1933{
1934 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1935 kbuf, ubuf);
1936}
1937
1938static int fpregs_set(struct task_struct *target,
1939 const struct user_regset *regset,
1940 unsigned int pos, unsigned int count,
1941 const void *kbuf, const void __user *ubuf)
1942{
1943 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1944 kbuf, ubuf);
1945}
1946
1947static int
1948access_uarea(struct task_struct *child, unsigned long addr,
1949 unsigned long *data, int write_access)
1950{
1951 unsigned int pos = -1;
1952 int ret;
1953 unsigned long *ptr, regnum;
1954
1955 if ((addr & 0x7) != 0) {
1956 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1957 return -1;
1958 }
1959 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1960 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1961 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1962 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1963 dprintk("ptrace: rejecting access to register "
1964 "address 0x%lx\n", addr);
1965 return -1;
1966 }
1967
1968 switch (addr) {
1969 case PT_F32 ... (PT_F127 + 15):
1970 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1971 break;
1972 case PT_F2 ... (PT_F5 + 15):
1973 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1974 break;
1975 case PT_F10 ... (PT_F31 + 15):
1976 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1977 break;
1978 case PT_F6 ... (PT_F9 + 15):
1979 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1980 break;
1981 }
1982
1983 if (pos != -1) {
1984 if (write_access)
1985 ret = fpregs_set(child, NULL, pos,
1986 sizeof(unsigned long), data, NULL);
1987 else
1988 ret = fpregs_get(child, NULL, pos,
1989 sizeof(unsigned long), data, NULL);
1990 if (ret != 0)
1991 return -1;
1992 return 0;
1993 }
1994
1995 switch (addr) {
1996 case PT_NAT_BITS:
1997 pos = ELF_NAT_OFFSET;
1998 break;
1999 case PT_R4 ... PT_R7:
2000 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2001 break;
2002 case PT_B1 ... PT_B5:
2003 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2004 break;
2005 case PT_AR_EC:
2006 pos = ELF_AR_EC_OFFSET;
2007 break;
2008 case PT_AR_LC:
2009 pos = ELF_AR_LC_OFFSET;
2010 break;
2011 case PT_CR_IPSR:
2012 pos = ELF_CR_IPSR_OFFSET;
2013 break;
2014 case PT_CR_IIP:
2015 pos = ELF_CR_IIP_OFFSET;
2016 break;
2017 case PT_CFM:
2018 pos = ELF_CFM_OFFSET;
2019 break;
2020 case PT_AR_UNAT:
2021 pos = ELF_AR_UNAT_OFFSET;
2022 break;
2023 case PT_AR_PFS:
2024 pos = ELF_AR_PFS_OFFSET;
2025 break;
2026 case PT_AR_RSC:
2027 pos = ELF_AR_RSC_OFFSET;
2028 break;
2029 case PT_AR_RNAT:
2030 pos = ELF_AR_RNAT_OFFSET;
2031 break;
2032 case PT_AR_BSPSTORE:
2033 pos = ELF_AR_BSPSTORE_OFFSET;
2034 break;
2035 case PT_PR:
2036 pos = ELF_PR_OFFSET;
2037 break;
2038 case PT_B6:
2039 pos = ELF_BR_OFFSET(6);
2040 break;
2041 case PT_AR_BSP:
2042 pos = ELF_AR_BSP_OFFSET;
2043 break;
2044 case PT_R1 ... PT_R3:
2045 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2046 break;
2047 case PT_R12 ... PT_R15:
2048 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2049 break;
2050 case PT_R8 ... PT_R11:
2051 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2052 break;
2053 case PT_R16 ... PT_R31:
2054 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2055 break;
2056 case PT_AR_CCV:
2057 pos = ELF_AR_CCV_OFFSET;
2058 break;
2059 case PT_AR_FPSR:
2060 pos = ELF_AR_FPSR_OFFSET;
2061 break;
2062 case PT_B0:
2063 pos = ELF_BR_OFFSET(0);
2064 break;
2065 case PT_B7:
2066 pos = ELF_BR_OFFSET(7);
2067 break;
2068 case PT_AR_CSD:
2069 pos = ELF_AR_CSD_OFFSET;
2070 break;
2071 case PT_AR_SSD:
2072 pos = ELF_AR_SSD_OFFSET;
2073 break;
2074 }
2075
2076 if (pos != -1) {
2077 if (write_access)
2078 ret = gpregs_set(child, NULL, pos,
2079 sizeof(unsigned long), data, NULL);
2080 else
2081 ret = gpregs_get(child, NULL, pos,
2082 sizeof(unsigned long), data, NULL);
2083 if (ret != 0)
2084 return -1;
2085 return 0;
2086 }
2087
2088
2089 if (addr >= PT_IBR) {
2090 regnum = (addr - PT_IBR) >> 3;
2091 ptr = &child->thread.ibr[0];
2092 } else {
2093 regnum = (addr - PT_DBR) >> 3;
2094 ptr = &child->thread.dbr[0];
2095 }
2096
2097 if (regnum >= 8) {
2098 dprintk("ptrace: rejecting access to register "
2099 "address 0x%lx\n", addr);
2100 return -1;
2101 }
2102#ifdef CONFIG_PERFMON
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 if (pfm_use_debug_registers(child))
2121 return -1;
2122#endif
2123
2124 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2125 child->thread.flags |= IA64_THREAD_DBG_VALID;
2126 memset(child->thread.dbr, 0,
2127 sizeof(child->thread.dbr));
2128 memset(child->thread.ibr, 0,
2129 sizeof(child->thread.ibr));
2130 }
2131
2132 ptr += regnum;
2133
2134 if ((regnum & 1) && write_access) {
2135
2136 *ptr = *data & ~(7UL << 56);
2137 return 0;
2138 }
2139 if (write_access)
2140 *ptr = *data;
2141 else
2142 *data = *ptr;
2143 return 0;
2144}
2145
2146static const struct user_regset native_regsets[] = {
2147 {
2148 .core_note_type = NT_PRSTATUS,
2149 .n = ELF_NGREG,
2150 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2151 .get = gpregs_get, .set = gpregs_set,
2152 .writeback = gpregs_writeback
2153 },
2154 {
2155 .core_note_type = NT_PRFPREG,
2156 .n = ELF_NFPREG,
2157 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2158 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2159 },
2160};
2161
2162static const struct user_regset_view user_ia64_view = {
2163 .name = "ia64",
2164 .e_machine = EM_IA_64,
2165 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2166};
2167
2168const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2169{
2170 return &user_ia64_view;
2171}
2172
2173struct syscall_get_set_args {
2174 unsigned int i;
2175 unsigned int n;
2176 unsigned long *args;
2177 struct pt_regs *regs;
2178 int rw;
2179};
2180
2181static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2182{
2183 struct syscall_get_set_args *args = data;
2184 struct pt_regs *pt = args->regs;
2185 unsigned long *krbs, cfm, ndirty;
2186 int i, count;
2187
2188 if (unw_unwind_to_user(info) < 0)
2189 return;
2190
2191 cfm = pt->cr_ifs;
2192 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2193 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2194
2195 count = 0;
2196 if (in_syscall(pt))
2197 count = min_t(int, args->n, cfm & 0x7f);
2198
2199 for (i = 0; i < count; i++) {
2200 if (args->rw)
2201 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2202 args->args[i];
2203 else
2204 args->args[i] = *ia64_rse_skip_regs(krbs,
2205 ndirty + i + args->i);
2206 }
2207
2208 if (!args->rw) {
2209 while (i < args->n) {
2210 args->args[i] = 0;
2211 i++;
2212 }
2213 }
2214}
2215
2216void ia64_syscall_get_set_arguments(struct task_struct *task,
2217 struct pt_regs *regs, unsigned int i, unsigned int n,
2218 unsigned long *args, int rw)
2219{
2220 struct syscall_get_set_args data = {
2221 .i = i,
2222 .n = n,
2223 .args = args,
2224 .regs = regs,
2225 .rw = rw,
2226 };
2227
2228 if (task == current)
2229 unw_init_running(syscall_get_set_args_cb, &data);
2230 else {
2231 struct unw_frame_info ufi;
2232 memset(&ufi, 0, sizeof(ufi));
2233 unw_init_from_blocked_task(&ufi, task);
2234 syscall_get_set_args_cb(&ufi, &data);
2235 }
2236}
2237