1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/regset.h>
17#include <linux/tracehook.h>
18#include <linux/user.h>
19#include <linux/elf.h>
20#include <linux/security.h>
21#include <linux/audit.h>
22#include <linux/seccomp.h>
23#include <linux/signal.h>
24#include <linux/workqueue.h>
25
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/system.h>
29#include <asm/processor.h>
30#include <asm/i387.h>
31#include <asm/debugreg.h>
32#include <asm/ldt.h>
33#include <asm/desc.h>
34#include <asm/prctl.h>
35#include <asm/proto.h>
36#include <asm/ds.h>
37
38#include "tls.h"
39
40#define CREATE_TRACE_POINTS
41#include <trace/events/syscalls.h>
42
43enum x86_regset {
44 REGSET_GENERAL,
45 REGSET_FP,
46 REGSET_XFP,
47 REGSET_IOPERM64 = REGSET_XFP,
48 REGSET_TLS,
49 REGSET_IOPERM32,
50};
51
52
53
54
55
56
57
58
59
60#define FLAG_MASK_32 ((unsigned long) \
61 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
62 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
63 X86_EFLAGS_SF | X86_EFLAGS_TF | \
64 X86_EFLAGS_DF | X86_EFLAGS_OF | \
65 X86_EFLAGS_RF | X86_EFLAGS_AC))
66
67
68
69
70static inline bool invalid_selector(u16 value)
71{
72 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
73}
74
75#ifdef CONFIG_X86_32
76
77#define FLAG_MASK FLAG_MASK_32
78
79static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
80{
81 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
82 return ®s->bx + (regno >> 2);
83}
84
85static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
86{
87
88
89
90 unsigned int retval;
91 if (offset != offsetof(struct user_regs_struct, gs))
92 retval = *pt_regs_access(task_pt_regs(task), offset);
93 else {
94 if (task == current)
95 retval = get_user_gs(task_pt_regs(task));
96 else
97 retval = task_user_gs(task);
98 }
99 return retval;
100}
101
102static int set_segment_reg(struct task_struct *task,
103 unsigned long offset, u16 value)
104{
105
106
107
108 if (invalid_selector(value))
109 return -EIO;
110
111
112
113
114
115
116
117
118
119
120 switch (offset) {
121 case offsetof(struct user_regs_struct, cs):
122 case offsetof(struct user_regs_struct, ss):
123 if (unlikely(value == 0))
124 return -EIO;
125
126 default:
127 *pt_regs_access(task_pt_regs(task), offset) = value;
128 break;
129
130 case offsetof(struct user_regs_struct, gs):
131 if (task == current)
132 set_user_gs(task_pt_regs(task), value);
133 else
134 task_user_gs(task) = value;
135 }
136
137 return 0;
138}
139
140static unsigned long debugreg_addr_limit(struct task_struct *task)
141{
142 return TASK_SIZE - 3;
143}
144
145#else
146
147#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
148
149static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
150{
151 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
152 return ®s->r15 + (offset / sizeof(regs->r15));
153}
154
155static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
156{
157
158
159
160 unsigned int seg;
161
162 switch (offset) {
163 case offsetof(struct user_regs_struct, fs):
164 if (task == current) {
165
166 asm("movl %%fs,%0" : "=r" (seg));
167 return seg;
168 }
169 return task->thread.fsindex;
170 case offsetof(struct user_regs_struct, gs):
171 if (task == current) {
172 asm("movl %%gs,%0" : "=r" (seg));
173 return seg;
174 }
175 return task->thread.gsindex;
176 case offsetof(struct user_regs_struct, ds):
177 if (task == current) {
178 asm("movl %%ds,%0" : "=r" (seg));
179 return seg;
180 }
181 return task->thread.ds;
182 case offsetof(struct user_regs_struct, es):
183 if (task == current) {
184 asm("movl %%es,%0" : "=r" (seg));
185 return seg;
186 }
187 return task->thread.es;
188
189 case offsetof(struct user_regs_struct, cs):
190 case offsetof(struct user_regs_struct, ss):
191 break;
192 }
193 return *pt_regs_access(task_pt_regs(task), offset);
194}
195
196static int set_segment_reg(struct task_struct *task,
197 unsigned long offset, u16 value)
198{
199
200
201
202 if (invalid_selector(value))
203 return -EIO;
204
205 switch (offset) {
206 case offsetof(struct user_regs_struct,fs):
207
208
209
210
211 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
212 task->thread.fs != 0) ||
213 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
214 task->thread.fs == 0))
215 break;
216 task->thread.fsindex = value;
217 if (task == current)
218 loadsegment(fs, task->thread.fsindex);
219 break;
220 case offsetof(struct user_regs_struct,gs):
221
222
223
224
225 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
226 task->thread.gs != 0) ||
227 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
228 task->thread.gs == 0))
229 break;
230 task->thread.gsindex = value;
231 if (task == current)
232 load_gs_index(task->thread.gsindex);
233 break;
234 case offsetof(struct user_regs_struct,ds):
235 task->thread.ds = value;
236 if (task == current)
237 loadsegment(ds, task->thread.ds);
238 break;
239 case offsetof(struct user_regs_struct,es):
240 task->thread.es = value;
241 if (task == current)
242 loadsegment(es, task->thread.es);
243 break;
244
245
246
247
248 case offsetof(struct user_regs_struct,cs):
249 if (unlikely(value == 0))
250 return -EIO;
251#ifdef CONFIG_IA32_EMULATION
252 if (test_tsk_thread_flag(task, TIF_IA32))
253 task_pt_regs(task)->cs = value;
254#endif
255 break;
256 case offsetof(struct user_regs_struct,ss):
257 if (unlikely(value == 0))
258 return -EIO;
259#ifdef CONFIG_IA32_EMULATION
260 if (test_tsk_thread_flag(task, TIF_IA32))
261 task_pt_regs(task)->ss = value;
262#endif
263 break;
264 }
265
266 return 0;
267}
268
269static unsigned long debugreg_addr_limit(struct task_struct *task)
270{
271#ifdef CONFIG_IA32_EMULATION
272 if (test_tsk_thread_flag(task, TIF_IA32))
273 return IA32_PAGE_OFFSET - 3;
274#endif
275 return TASK_SIZE_MAX - 7;
276}
277
278#endif
279
280static unsigned long get_flags(struct task_struct *task)
281{
282 unsigned long retval = task_pt_regs(task)->flags;
283
284
285
286
287 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
288 retval &= ~X86_EFLAGS_TF;
289
290 return retval;
291}
292
293static int set_flags(struct task_struct *task, unsigned long value)
294{
295 struct pt_regs *regs = task_pt_regs(task);
296
297
298
299
300
301
302 if (value & X86_EFLAGS_TF)
303 clear_tsk_thread_flag(task, TIF_FORCED_TF);
304 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
305 value |= X86_EFLAGS_TF;
306
307 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
308
309 return 0;
310}
311
312static int putreg(struct task_struct *child,
313 unsigned long offset, unsigned long value)
314{
315 switch (offset) {
316 case offsetof(struct user_regs_struct, cs):
317 case offsetof(struct user_regs_struct, ds):
318 case offsetof(struct user_regs_struct, es):
319 case offsetof(struct user_regs_struct, fs):
320 case offsetof(struct user_regs_struct, gs):
321 case offsetof(struct user_regs_struct, ss):
322 return set_segment_reg(child, offset, value);
323
324 case offsetof(struct user_regs_struct, flags):
325 return set_flags(child, value);
326
327#ifdef CONFIG_X86_64
328 case offsetof(struct user_regs_struct,fs_base):
329 if (value >= TASK_SIZE_OF(child))
330 return -EIO;
331
332
333
334
335
336 if (child->thread.fs != value)
337 return do_arch_prctl(child, ARCH_SET_FS, value);
338 return 0;
339 case offsetof(struct user_regs_struct,gs_base):
340
341
342
343 if (value >= TASK_SIZE_OF(child))
344 return -EIO;
345 if (child->thread.gs != value)
346 return do_arch_prctl(child, ARCH_SET_GS, value);
347 return 0;
348#endif
349 }
350
351 *pt_regs_access(task_pt_regs(child), offset) = value;
352 return 0;
353}
354
355static unsigned long getreg(struct task_struct *task, unsigned long offset)
356{
357 switch (offset) {
358 case offsetof(struct user_regs_struct, cs):
359 case offsetof(struct user_regs_struct, ds):
360 case offsetof(struct user_regs_struct, es):
361 case offsetof(struct user_regs_struct, fs):
362 case offsetof(struct user_regs_struct, gs):
363 case offsetof(struct user_regs_struct, ss):
364 return get_segment_reg(task, offset);
365
366 case offsetof(struct user_regs_struct, flags):
367 return get_flags(task);
368
369#ifdef CONFIG_X86_64
370 case offsetof(struct user_regs_struct, fs_base): {
371
372
373
374
375
376 unsigned int seg = task->thread.fsindex;
377 if (task->thread.fs != 0)
378 return task->thread.fs;
379 if (task == current)
380 asm("movl %%fs,%0" : "=r" (seg));
381 if (seg != FS_TLS_SEL)
382 return 0;
383 return get_desc_base(&task->thread.tls_array[FS_TLS]);
384 }
385 case offsetof(struct user_regs_struct, gs_base): {
386
387
388
389 unsigned int seg = task->thread.gsindex;
390 if (task->thread.gs != 0)
391 return task->thread.gs;
392 if (task == current)
393 asm("movl %%gs,%0" : "=r" (seg));
394 if (seg != GS_TLS_SEL)
395 return 0;
396 return get_desc_base(&task->thread.tls_array[GS_TLS]);
397 }
398#endif
399 }
400
401 return *pt_regs_access(task_pt_regs(task), offset);
402}
403
404static int genregs_get(struct task_struct *target,
405 const struct user_regset *regset,
406 unsigned int pos, unsigned int count,
407 void *kbuf, void __user *ubuf)
408{
409 if (kbuf) {
410 unsigned long *k = kbuf;
411 while (count > 0) {
412 *k++ = getreg(target, pos);
413 count -= sizeof(*k);
414 pos += sizeof(*k);
415 }
416 } else {
417 unsigned long __user *u = ubuf;
418 while (count > 0) {
419 if (__put_user(getreg(target, pos), u++))
420 return -EFAULT;
421 count -= sizeof(*u);
422 pos += sizeof(*u);
423 }
424 }
425
426 return 0;
427}
428
429static int genregs_set(struct task_struct *target,
430 const struct user_regset *regset,
431 unsigned int pos, unsigned int count,
432 const void *kbuf, const void __user *ubuf)
433{
434 int ret = 0;
435 if (kbuf) {
436 const unsigned long *k = kbuf;
437 while (count > 0 && !ret) {
438 ret = putreg(target, pos, *k++);
439 count -= sizeof(*k);
440 pos += sizeof(*k);
441 }
442 } else {
443 const unsigned long __user *u = ubuf;
444 while (count > 0 && !ret) {
445 unsigned long word;
446 ret = __get_user(word, u++);
447 if (ret)
448 break;
449 ret = putreg(target, pos, word);
450 count -= sizeof(*u);
451 pos += sizeof(*u);
452 }
453 }
454 return ret;
455}
456
457
458
459
460
461
462static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
463{
464 switch (n) {
465 case 0: return child->thread.debugreg0;
466 case 1: return child->thread.debugreg1;
467 case 2: return child->thread.debugreg2;
468 case 3: return child->thread.debugreg3;
469 case 6: return child->thread.debugreg6;
470 case 7: return child->thread.debugreg7;
471 }
472 return 0;
473}
474
475static int ptrace_set_debugreg(struct task_struct *child,
476 int n, unsigned long data)
477{
478 int i;
479
480 if (unlikely(n == 4 || n == 5))
481 return -EIO;
482
483 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
484 return -EIO;
485
486 switch (n) {
487 case 0: child->thread.debugreg0 = data; break;
488 case 1: child->thread.debugreg1 = data; break;
489 case 2: child->thread.debugreg2 = data; break;
490 case 3: child->thread.debugreg3 = data; break;
491
492 case 6:
493 if ((data & ~0xffffffffUL) != 0)
494 return -EIO;
495 child->thread.debugreg6 = data;
496 break;
497
498 case 7:
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529#ifdef CONFIG_X86_32
530#define DR7_MASK 0x5f54
531#else
532#define DR7_MASK 0x5554
533#endif
534 data &= ~DR_CONTROL_RESERVED;
535 for (i = 0; i < 4; i++)
536 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
537 return -EIO;
538 child->thread.debugreg7 = data;
539 if (data)
540 set_tsk_thread_flag(child, TIF_DEBUG);
541 else
542 clear_tsk_thread_flag(child, TIF_DEBUG);
543 break;
544 }
545
546 return 0;
547}
548
549
550
551
552
553static int ioperm_active(struct task_struct *target,
554 const struct user_regset *regset)
555{
556 return target->thread.io_bitmap_max / regset->size;
557}
558
559static int ioperm_get(struct task_struct *target,
560 const struct user_regset *regset,
561 unsigned int pos, unsigned int count,
562 void *kbuf, void __user *ubuf)
563{
564 if (!target->thread.io_bitmap_ptr)
565 return -ENXIO;
566
567 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
568 target->thread.io_bitmap_ptr,
569 0, IO_BITMAP_BYTES);
570}
571
572#ifdef CONFIG_X86_PTRACE_BTS
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590struct bts_context {
591
592 struct bts_tracer *tracer;
593
594
595 void *buffer;
596 unsigned int size;
597
598
599 struct mm_struct *mm;
600
601
602 struct task_struct *task;
603
604
605 unsigned int bts_ovfl_signal;
606
607
608 struct work_struct work;
609};
610
611static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
612{
613 void *buffer = NULL;
614 int err = -ENOMEM;
615
616 err = account_locked_memory(current->mm, current->signal->rlim, size);
617 if (err < 0)
618 return err;
619
620 buffer = kzalloc(size, GFP_KERNEL);
621 if (!buffer)
622 goto out_refund;
623
624 context->buffer = buffer;
625 context->size = size;
626 context->mm = get_task_mm(current);
627
628 return 0;
629
630 out_refund:
631 refund_locked_memory(current->mm, size);
632 return err;
633}
634
635static inline void free_bts_buffer(struct bts_context *context)
636{
637 if (!context->buffer)
638 return;
639
640 kfree(context->buffer);
641 context->buffer = NULL;
642
643 refund_locked_memory(context->mm, context->size);
644 context->size = 0;
645
646 mmput(context->mm);
647 context->mm = NULL;
648}
649
650static void free_bts_context_work(struct work_struct *w)
651{
652 struct bts_context *context;
653
654 context = container_of(w, struct bts_context, work);
655
656 ds_release_bts(context->tracer);
657 put_task_struct(context->task);
658 free_bts_buffer(context);
659 kfree(context);
660}
661
662static inline void free_bts_context(struct bts_context *context)
663{
664 INIT_WORK(&context->work, free_bts_context_work);
665 schedule_work(&context->work);
666}
667
668static inline struct bts_context *alloc_bts_context(struct task_struct *task)
669{
670 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
671 if (context) {
672 context->task = task;
673 task->bts = context;
674
675 get_task_struct(task);
676 }
677
678 return context;
679}
680
681static int ptrace_bts_read_record(struct task_struct *child, size_t index,
682 struct bts_struct __user *out)
683{
684 struct bts_context *context;
685 const struct bts_trace *trace;
686 struct bts_struct bts;
687 const unsigned char *at;
688 int error;
689
690 context = child->bts;
691 if (!context)
692 return -ESRCH;
693
694 trace = ds_read_bts(context->tracer);
695 if (!trace)
696 return -ESRCH;
697
698 at = trace->ds.top - ((index + 1) * trace->ds.size);
699 if ((void *)at < trace->ds.begin)
700 at += (trace->ds.n * trace->ds.size);
701
702 if (!trace->read)
703 return -EOPNOTSUPP;
704
705 error = trace->read(context->tracer, at, &bts);
706 if (error < 0)
707 return error;
708
709 if (copy_to_user(out, &bts, sizeof(bts)))
710 return -EFAULT;
711
712 return sizeof(bts);
713}
714
715static int ptrace_bts_drain(struct task_struct *child,
716 long size,
717 struct bts_struct __user *out)
718{
719 struct bts_context *context;
720 const struct bts_trace *trace;
721 const unsigned char *at;
722 int error, drained = 0;
723
724 context = child->bts;
725 if (!context)
726 return -ESRCH;
727
728 trace = ds_read_bts(context->tracer);
729 if (!trace)
730 return -ESRCH;
731
732 if (!trace->read)
733 return -EOPNOTSUPP;
734
735 if (size < (trace->ds.top - trace->ds.begin))
736 return -EIO;
737
738 for (at = trace->ds.begin; (void *)at < trace->ds.top;
739 out++, drained++, at += trace->ds.size) {
740 struct bts_struct bts;
741
742 error = trace->read(context->tracer, at, &bts);
743 if (error < 0)
744 return error;
745
746 if (copy_to_user(out, &bts, sizeof(bts)))
747 return -EFAULT;
748 }
749
750 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
751
752 error = ds_reset_bts(context->tracer);
753 if (error < 0)
754 return error;
755
756 return drained;
757}
758
759static int ptrace_bts_config(struct task_struct *child,
760 long cfg_size,
761 const struct ptrace_bts_config __user *ucfg)
762{
763 struct bts_context *context;
764 struct ptrace_bts_config cfg;
765 unsigned int flags = 0;
766
767 if (cfg_size < sizeof(cfg))
768 return -EIO;
769
770 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
771 return -EFAULT;
772
773 context = child->bts;
774 if (!context)
775 context = alloc_bts_context(child);
776 if (!context)
777 return -ENOMEM;
778
779 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
780 if (!cfg.signal)
781 return -EINVAL;
782
783 return -EOPNOTSUPP;
784 context->bts_ovfl_signal = cfg.signal;
785 }
786
787 ds_release_bts(context->tracer);
788 context->tracer = NULL;
789
790 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
791 int err;
792
793 free_bts_buffer(context);
794 if (!cfg.size)
795 return 0;
796
797 err = alloc_bts_buffer(context, cfg.size);
798 if (err < 0)
799 return err;
800 }
801
802 if (cfg.flags & PTRACE_BTS_O_TRACE)
803 flags |= BTS_USER;
804
805 if (cfg.flags & PTRACE_BTS_O_SCHED)
806 flags |= BTS_TIMESTAMPS;
807
808 context->tracer =
809 ds_request_bts_task(child, context->buffer, context->size,
810 NULL, (size_t)-1, flags);
811 if (unlikely(IS_ERR(context->tracer))) {
812 int error = PTR_ERR(context->tracer);
813
814 free_bts_buffer(context);
815 context->tracer = NULL;
816 return error;
817 }
818
819 return sizeof(cfg);
820}
821
822static int ptrace_bts_status(struct task_struct *child,
823 long cfg_size,
824 struct ptrace_bts_config __user *ucfg)
825{
826 struct bts_context *context;
827 const struct bts_trace *trace;
828 struct ptrace_bts_config cfg;
829
830 context = child->bts;
831 if (!context)
832 return -ESRCH;
833
834 if (cfg_size < sizeof(cfg))
835 return -EIO;
836
837 trace = ds_read_bts(context->tracer);
838 if (!trace)
839 return -ESRCH;
840
841 memset(&cfg, 0, sizeof(cfg));
842 cfg.size = trace->ds.end - trace->ds.begin;
843 cfg.signal = context->bts_ovfl_signal;
844 cfg.bts_size = sizeof(struct bts_struct);
845
846 if (cfg.signal)
847 cfg.flags |= PTRACE_BTS_O_SIGNAL;
848
849 if (trace->ds.flags & BTS_USER)
850 cfg.flags |= PTRACE_BTS_O_TRACE;
851
852 if (trace->ds.flags & BTS_TIMESTAMPS)
853 cfg.flags |= PTRACE_BTS_O_SCHED;
854
855 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
856 return -EFAULT;
857
858 return sizeof(cfg);
859}
860
861static int ptrace_bts_clear(struct task_struct *child)
862{
863 struct bts_context *context;
864 const struct bts_trace *trace;
865
866 context = child->bts;
867 if (!context)
868 return -ESRCH;
869
870 trace = ds_read_bts(context->tracer);
871 if (!trace)
872 return -ESRCH;
873
874 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
875
876 return ds_reset_bts(context->tracer);
877}
878
879static int ptrace_bts_size(struct task_struct *child)
880{
881 struct bts_context *context;
882 const struct bts_trace *trace;
883
884 context = child->bts;
885 if (!context)
886 return -ESRCH;
887
888 trace = ds_read_bts(context->tracer);
889 if (!trace)
890 return -ESRCH;
891
892 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
893}
894
895
896
897
898
899void ptrace_bts_untrace(struct task_struct *child)
900{
901 if (unlikely(child->bts)) {
902 free_bts_context(child->bts);
903 child->bts = NULL;
904 }
905}
906#endif
907
908
909
910
911
912
913void ptrace_disable(struct task_struct *child)
914{
915 user_disable_single_step(child);
916#ifdef TIF_SYSCALL_EMU
917 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
918#endif
919}
920
921#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
922static const struct user_regset_view user_x86_32_view;
923#endif
924
925long arch_ptrace(struct task_struct *child, long request, long addr, long data)
926{
927 int ret;
928 unsigned long __user *datap = (unsigned long __user *)data;
929
930 switch (request) {
931
932 case PTRACE_PEEKUSR: {
933 unsigned long tmp;
934
935 ret = -EIO;
936 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
937 addr >= sizeof(struct user))
938 break;
939
940 tmp = 0;
941 if (addr < sizeof(struct user_regs_struct))
942 tmp = getreg(child, addr);
943 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
944 addr <= offsetof(struct user, u_debugreg[7])) {
945 addr -= offsetof(struct user, u_debugreg[0]);
946 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
947 }
948 ret = put_user(tmp, datap);
949 break;
950 }
951
952 case PTRACE_POKEUSR:
953 ret = -EIO;
954 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
955 addr >= sizeof(struct user))
956 break;
957
958 if (addr < sizeof(struct user_regs_struct))
959 ret = putreg(child, addr, data);
960 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
961 addr <= offsetof(struct user, u_debugreg[7])) {
962 addr -= offsetof(struct user, u_debugreg[0]);
963 ret = ptrace_set_debugreg(child,
964 addr / sizeof(data), data);
965 }
966 break;
967
968 case PTRACE_GETREGS:
969 return copy_regset_to_user(child,
970 task_user_regset_view(current),
971 REGSET_GENERAL,
972 0, sizeof(struct user_regs_struct),
973 datap);
974
975 case PTRACE_SETREGS:
976 return copy_regset_from_user(child,
977 task_user_regset_view(current),
978 REGSET_GENERAL,
979 0, sizeof(struct user_regs_struct),
980 datap);
981
982 case PTRACE_GETFPREGS:
983 return copy_regset_to_user(child,
984 task_user_regset_view(current),
985 REGSET_FP,
986 0, sizeof(struct user_i387_struct),
987 datap);
988
989 case PTRACE_SETFPREGS:
990 return copy_regset_from_user(child,
991 task_user_regset_view(current),
992 REGSET_FP,
993 0, sizeof(struct user_i387_struct),
994 datap);
995
996#ifdef CONFIG_X86_32
997 case PTRACE_GETFPXREGS:
998 return copy_regset_to_user(child, &user_x86_32_view,
999 REGSET_XFP,
1000 0, sizeof(struct user_fxsr_struct),
1001 datap) ? -EIO : 0;
1002
1003 case PTRACE_SETFPXREGS:
1004 return copy_regset_from_user(child, &user_x86_32_view,
1005 REGSET_XFP,
1006 0, sizeof(struct user_fxsr_struct),
1007 datap) ? -EIO : 0;
1008#endif
1009
1010#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1011 case PTRACE_GET_THREAD_AREA:
1012 if (addr < 0)
1013 return -EIO;
1014 ret = do_get_thread_area(child, addr,
1015 (struct user_desc __user *) data);
1016 break;
1017
1018 case PTRACE_SET_THREAD_AREA:
1019 if (addr < 0)
1020 return -EIO;
1021 ret = do_set_thread_area(child, addr,
1022 (struct user_desc __user *) data, 0);
1023 break;
1024#endif
1025
1026#ifdef CONFIG_X86_64
1027
1028
1029
1030 case PTRACE_ARCH_PRCTL:
1031 ret = do_arch_prctl(child, data, addr);
1032 break;
1033#endif
1034
1035
1036
1037
1038#ifdef CONFIG_X86_PTRACE_BTS
1039 case PTRACE_BTS_CONFIG:
1040 ret = ptrace_bts_config
1041 (child, data, (struct ptrace_bts_config __user *)addr);
1042 break;
1043
1044 case PTRACE_BTS_STATUS:
1045 ret = ptrace_bts_status
1046 (child, data, (struct ptrace_bts_config __user *)addr);
1047 break;
1048
1049 case PTRACE_BTS_SIZE:
1050 ret = ptrace_bts_size(child);
1051 break;
1052
1053 case PTRACE_BTS_GET:
1054 ret = ptrace_bts_read_record
1055 (child, data, (struct bts_struct __user *) addr);
1056 break;
1057
1058 case PTRACE_BTS_CLEAR:
1059 ret = ptrace_bts_clear(child);
1060 break;
1061
1062 case PTRACE_BTS_DRAIN:
1063 ret = ptrace_bts_drain
1064 (child, data, (struct bts_struct __user *) addr);
1065 break;
1066#endif
1067
1068 default:
1069 ret = ptrace_request(child, request, addr, data);
1070 break;
1071 }
1072
1073 return ret;
1074}
1075
1076#ifdef CONFIG_IA32_EMULATION
1077
1078#include <linux/compat.h>
1079#include <linux/syscalls.h>
1080#include <asm/ia32.h>
1081#include <asm/user32.h>
1082
1083#define R32(l,q) \
1084 case offsetof(struct user32, regs.l): \
1085 regs->q = value; break
1086
1087#define SEG32(rs) \
1088 case offsetof(struct user32, regs.rs): \
1089 return set_segment_reg(child, \
1090 offsetof(struct user_regs_struct, rs), \
1091 value); \
1092 break
1093
1094static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1095{
1096 struct pt_regs *regs = task_pt_regs(child);
1097
1098 switch (regno) {
1099
1100 SEG32(cs);
1101 SEG32(ds);
1102 SEG32(es);
1103 SEG32(fs);
1104 SEG32(gs);
1105 SEG32(ss);
1106
1107 R32(ebx, bx);
1108 R32(ecx, cx);
1109 R32(edx, dx);
1110 R32(edi, di);
1111 R32(esi, si);
1112 R32(ebp, bp);
1113 R32(eax, ax);
1114 R32(eip, ip);
1115 R32(esp, sp);
1116
1117 case offsetof(struct user32, regs.orig_eax):
1118
1119
1120
1121
1122
1123
1124
1125 regs->orig_ax = value;
1126 if (syscall_get_nr(child, regs) >= 0)
1127 task_thread_info(child)->status |= TS_COMPAT;
1128 break;
1129
1130 case offsetof(struct user32, regs.eflags):
1131 return set_flags(child, value);
1132
1133 case offsetof(struct user32, u_debugreg[0]) ...
1134 offsetof(struct user32, u_debugreg[7]):
1135 regno -= offsetof(struct user32, u_debugreg[0]);
1136 return ptrace_set_debugreg(child, regno / 4, value);
1137
1138 default:
1139 if (regno > sizeof(struct user32) || (regno & 3))
1140 return -EIO;
1141
1142
1143
1144
1145
1146 break;
1147 }
1148 return 0;
1149}
1150
1151#undef R32
1152#undef SEG32
1153
1154#define R32(l,q) \
1155 case offsetof(struct user32, regs.l): \
1156 *val = regs->q; break
1157
1158#define SEG32(rs) \
1159 case offsetof(struct user32, regs.rs): \
1160 *val = get_segment_reg(child, \
1161 offsetof(struct user_regs_struct, rs)); \
1162 break
1163
1164static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1165{
1166 struct pt_regs *regs = task_pt_regs(child);
1167
1168 switch (regno) {
1169
1170 SEG32(ds);
1171 SEG32(es);
1172 SEG32(fs);
1173 SEG32(gs);
1174
1175 R32(cs, cs);
1176 R32(ss, ss);
1177 R32(ebx, bx);
1178 R32(ecx, cx);
1179 R32(edx, dx);
1180 R32(edi, di);
1181 R32(esi, si);
1182 R32(ebp, bp);
1183 R32(eax, ax);
1184 R32(orig_eax, orig_ax);
1185 R32(eip, ip);
1186 R32(esp, sp);
1187
1188 case offsetof(struct user32, regs.eflags):
1189 *val = get_flags(child);
1190 break;
1191
1192 case offsetof(struct user32, u_debugreg[0]) ...
1193 offsetof(struct user32, u_debugreg[7]):
1194 regno -= offsetof(struct user32, u_debugreg[0]);
1195 *val = ptrace_get_debugreg(child, regno / 4);
1196 break;
1197
1198 default:
1199 if (regno > sizeof(struct user32) || (regno & 3))
1200 return -EIO;
1201
1202
1203
1204
1205
1206 *val = 0;
1207 break;
1208 }
1209 return 0;
1210}
1211
1212#undef R32
1213#undef SEG32
1214
1215static int genregs32_get(struct task_struct *target,
1216 const struct user_regset *regset,
1217 unsigned int pos, unsigned int count,
1218 void *kbuf, void __user *ubuf)
1219{
1220 if (kbuf) {
1221 compat_ulong_t *k = kbuf;
1222 while (count > 0) {
1223 getreg32(target, pos, k++);
1224 count -= sizeof(*k);
1225 pos += sizeof(*k);
1226 }
1227 } else {
1228 compat_ulong_t __user *u = ubuf;
1229 while (count > 0) {
1230 compat_ulong_t word;
1231 getreg32(target, pos, &word);
1232 if (__put_user(word, u++))
1233 return -EFAULT;
1234 count -= sizeof(*u);
1235 pos += sizeof(*u);
1236 }
1237 }
1238
1239 return 0;
1240}
1241
1242static int genregs32_set(struct task_struct *target,
1243 const struct user_regset *regset,
1244 unsigned int pos, unsigned int count,
1245 const void *kbuf, const void __user *ubuf)
1246{
1247 int ret = 0;
1248 if (kbuf) {
1249 const compat_ulong_t *k = kbuf;
1250 while (count > 0 && !ret) {
1251 ret = putreg32(target, pos, *k++);
1252 count -= sizeof(*k);
1253 pos += sizeof(*k);
1254 }
1255 } else {
1256 const compat_ulong_t __user *u = ubuf;
1257 while (count > 0 && !ret) {
1258 compat_ulong_t word;
1259 ret = __get_user(word, u++);
1260 if (ret)
1261 break;
1262 ret = putreg32(target, pos, word);
1263 count -= sizeof(*u);
1264 pos += sizeof(*u);
1265 }
1266 }
1267 return ret;
1268}
1269
1270long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1271 compat_ulong_t caddr, compat_ulong_t cdata)
1272{
1273 unsigned long addr = caddr;
1274 unsigned long data = cdata;
1275 void __user *datap = compat_ptr(data);
1276 int ret;
1277 __u32 val;
1278
1279 switch (request) {
1280 case PTRACE_PEEKUSR:
1281 ret = getreg32(child, addr, &val);
1282 if (ret == 0)
1283 ret = put_user(val, (__u32 __user *)datap);
1284 break;
1285
1286 case PTRACE_POKEUSR:
1287 ret = putreg32(child, addr, data);
1288 break;
1289
1290 case PTRACE_GETREGS:
1291 return copy_regset_to_user(child, &user_x86_32_view,
1292 REGSET_GENERAL,
1293 0, sizeof(struct user_regs_struct32),
1294 datap);
1295
1296 case PTRACE_SETREGS:
1297 return copy_regset_from_user(child, &user_x86_32_view,
1298 REGSET_GENERAL, 0,
1299 sizeof(struct user_regs_struct32),
1300 datap);
1301
1302 case PTRACE_GETFPREGS:
1303 return copy_regset_to_user(child, &user_x86_32_view,
1304 REGSET_FP, 0,
1305 sizeof(struct user_i387_ia32_struct),
1306 datap);
1307
1308 case PTRACE_SETFPREGS:
1309 return copy_regset_from_user(
1310 child, &user_x86_32_view, REGSET_FP,
1311 0, sizeof(struct user_i387_ia32_struct), datap);
1312
1313 case PTRACE_GETFPXREGS:
1314 return copy_regset_to_user(child, &user_x86_32_view,
1315 REGSET_XFP, 0,
1316 sizeof(struct user32_fxsr_struct),
1317 datap);
1318
1319 case PTRACE_SETFPXREGS:
1320 return copy_regset_from_user(child, &user_x86_32_view,
1321 REGSET_XFP, 0,
1322 sizeof(struct user32_fxsr_struct),
1323 datap);
1324
1325 case PTRACE_GET_THREAD_AREA:
1326 case PTRACE_SET_THREAD_AREA:
1327#ifdef CONFIG_X86_PTRACE_BTS
1328 case PTRACE_BTS_CONFIG:
1329 case PTRACE_BTS_STATUS:
1330 case PTRACE_BTS_SIZE:
1331 case PTRACE_BTS_GET:
1332 case PTRACE_BTS_CLEAR:
1333 case PTRACE_BTS_DRAIN:
1334#endif
1335 return arch_ptrace(child, request, addr, data);
1336
1337 default:
1338 return compat_ptrace_request(child, request, addr, data);
1339 }
1340
1341 return ret;
1342}
1343
1344#endif
1345
1346#ifdef CONFIG_X86_64
1347
1348static const struct user_regset x86_64_regsets[] = {
1349 [REGSET_GENERAL] = {
1350 .core_note_type = NT_PRSTATUS,
1351 .n = sizeof(struct user_regs_struct) / sizeof(long),
1352 .size = sizeof(long), .align = sizeof(long),
1353 .get = genregs_get, .set = genregs_set
1354 },
1355 [REGSET_FP] = {
1356 .core_note_type = NT_PRFPREG,
1357 .n = sizeof(struct user_i387_struct) / sizeof(long),
1358 .size = sizeof(long), .align = sizeof(long),
1359 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1360 },
1361 [REGSET_IOPERM64] = {
1362 .core_note_type = NT_386_IOPERM,
1363 .n = IO_BITMAP_LONGS,
1364 .size = sizeof(long), .align = sizeof(long),
1365 .active = ioperm_active, .get = ioperm_get
1366 },
1367};
1368
1369static const struct user_regset_view user_x86_64_view = {
1370 .name = "x86_64", .e_machine = EM_X86_64,
1371 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1372};
1373
1374#else
1375
1376#define user_regs_struct32 user_regs_struct
1377#define genregs32_get genregs_get
1378#define genregs32_set genregs_set
1379
1380#define user_i387_ia32_struct user_i387_struct
1381#define user32_fxsr_struct user_fxsr_struct
1382
1383#endif
1384
1385#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1386static const struct user_regset x86_32_regsets[] = {
1387 [REGSET_GENERAL] = {
1388 .core_note_type = NT_PRSTATUS,
1389 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1390 .size = sizeof(u32), .align = sizeof(u32),
1391 .get = genregs32_get, .set = genregs32_set
1392 },
1393 [REGSET_FP] = {
1394 .core_note_type = NT_PRFPREG,
1395 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1396 .size = sizeof(u32), .align = sizeof(u32),
1397 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1398 },
1399 [REGSET_XFP] = {
1400 .core_note_type = NT_PRXFPREG,
1401 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1402 .size = sizeof(u32), .align = sizeof(u32),
1403 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1404 },
1405 [REGSET_TLS] = {
1406 .core_note_type = NT_386_TLS,
1407 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1408 .size = sizeof(struct user_desc),
1409 .align = sizeof(struct user_desc),
1410 .active = regset_tls_active,
1411 .get = regset_tls_get, .set = regset_tls_set
1412 },
1413 [REGSET_IOPERM32] = {
1414 .core_note_type = NT_386_IOPERM,
1415 .n = IO_BITMAP_BYTES / sizeof(u32),
1416 .size = sizeof(u32), .align = sizeof(u32),
1417 .active = ioperm_active, .get = ioperm_get
1418 },
1419};
1420
1421static const struct user_regset_view user_x86_32_view = {
1422 .name = "i386", .e_machine = EM_386,
1423 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1424};
1425#endif
1426
1427const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1428{
1429#ifdef CONFIG_IA32_EMULATION
1430 if (test_tsk_thread_flag(task, TIF_IA32))
1431#endif
1432#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1433 return &user_x86_32_view;
1434#endif
1435#ifdef CONFIG_X86_64
1436 return &user_x86_64_view;
1437#endif
1438}
1439
1440void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1441 int error_code, int si_code)
1442{
1443 struct siginfo info;
1444
1445 tsk->thread.trap_no = 1;
1446 tsk->thread.error_code = error_code;
1447
1448 memset(&info, 0, sizeof(info));
1449 info.si_signo = SIGTRAP;
1450 info.si_code = si_code;
1451
1452
1453 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1454
1455
1456 force_sig_info(SIGTRAP, &info, tsk);
1457}
1458
1459
1460#ifdef CONFIG_X86_32
1461# define IS_IA32 1
1462#elif defined CONFIG_IA32_EMULATION
1463# define IS_IA32 is_compat_task()
1464#else
1465# define IS_IA32 0
1466#endif
1467
1468
1469
1470
1471
1472asmregparm long syscall_trace_enter(struct pt_regs *regs)
1473{
1474 long ret = 0;
1475
1476
1477
1478
1479
1480
1481
1482
1483 if (test_thread_flag(TIF_SINGLESTEP))
1484 regs->flags |= X86_EFLAGS_TF;
1485
1486
1487 secure_computing(regs->orig_ax);
1488
1489 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1490 ret = -1L;
1491
1492 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1493 tracehook_report_syscall_entry(regs))
1494 ret = -1L;
1495
1496 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1497 trace_sys_enter(regs, regs->orig_ax);
1498
1499 if (unlikely(current->audit_context)) {
1500 if (IS_IA32)
1501 audit_syscall_entry(AUDIT_ARCH_I386,
1502 regs->orig_ax,
1503 regs->bx, regs->cx,
1504 regs->dx, regs->si);
1505#ifdef CONFIG_X86_64
1506 else
1507 audit_syscall_entry(AUDIT_ARCH_X86_64,
1508 regs->orig_ax,
1509 regs->di, regs->si,
1510 regs->dx, regs->r10);
1511#endif
1512 }
1513
1514 return ret ?: regs->orig_ax;
1515}
1516
1517asmregparm void syscall_trace_leave(struct pt_regs *regs)
1518{
1519 if (unlikely(current->audit_context))
1520 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1521
1522 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1523 trace_sys_exit(regs, regs->ax);
1524
1525 if (test_thread_flag(TIF_SYSCALL_TRACE))
1526 tracehook_report_syscall_exit(regs, 0);
1527
1528
1529
1530
1531
1532
1533
1534 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1535 return;
1536
1537
1538
1539
1540
1541 if (test_thread_flag(TIF_SINGLESTEP) &&
1542 tracehook_consider_fatal_signal(current, SIGTRAP))
1543 send_sigtrap(current, regs, 0, TRAP_BRKPT);
1544}
1545