1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/sched/task_stack.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/ptrace.h>
15#include <linux/tracehook.h>
16#include <linux/user.h>
17#include <linux/elf.h>
18#include <linux/security.h>
19#include <linux/audit.h>
20#include <linux/seccomp.h>
21#include <linux/signal.h>
22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h>
24#include <linux/rcupdate.h>
25#include <linux/export.h>
26#include <linux/context_tracking.h>
27
28#include <linux/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31#include <asm/fpu/internal.h>
32#include <asm/fpu/signal.h>
33#include <asm/fpu/regset.h>
34#include <asm/debugreg.h>
35#include <asm/ldt.h>
36#include <asm/desc.h>
37#include <asm/prctl.h>
38#include <asm/proto.h>
39#include <asm/hw_breakpoint.h>
40#include <asm/traps.h>
41#include <asm/syscall.h>
42#include <asm/fsgsbase.h>
43
44#include "tls.h"
45
46enum x86_regset {
47 REGSET_GENERAL,
48 REGSET_FP,
49 REGSET_XFP,
50 REGSET_IOPERM64 = REGSET_XFP,
51 REGSET_XSTATE,
52 REGSET_TLS,
53 REGSET_IOPERM32,
54};
55
56struct pt_regs_offset {
57 const char *name;
58 int offset;
59};
60
61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62#define REG_OFFSET_END {.name = NULL, .offset = 0}
63
64static const struct pt_regs_offset regoffset_table[] = {
65#ifdef CONFIG_X86_64
66 REG_OFFSET_NAME(r15),
67 REG_OFFSET_NAME(r14),
68 REG_OFFSET_NAME(r13),
69 REG_OFFSET_NAME(r12),
70 REG_OFFSET_NAME(r11),
71 REG_OFFSET_NAME(r10),
72 REG_OFFSET_NAME(r9),
73 REG_OFFSET_NAME(r8),
74#endif
75 REG_OFFSET_NAME(bx),
76 REG_OFFSET_NAME(cx),
77 REG_OFFSET_NAME(dx),
78 REG_OFFSET_NAME(si),
79 REG_OFFSET_NAME(di),
80 REG_OFFSET_NAME(bp),
81 REG_OFFSET_NAME(ax),
82#ifdef CONFIG_X86_32
83 REG_OFFSET_NAME(ds),
84 REG_OFFSET_NAME(es),
85 REG_OFFSET_NAME(fs),
86 REG_OFFSET_NAME(gs),
87#endif
88 REG_OFFSET_NAME(orig_ax),
89 REG_OFFSET_NAME(ip),
90 REG_OFFSET_NAME(cs),
91 REG_OFFSET_NAME(flags),
92 REG_OFFSET_NAME(sp),
93 REG_OFFSET_NAME(ss),
94 REG_OFFSET_END,
95};
96
97
98
99
100
101
102
103
104int regs_query_register_offset(const char *name)
105{
106 const struct pt_regs_offset *roff;
107 for (roff = regoffset_table; roff->name != NULL; roff++)
108 if (!strcmp(roff->name, name))
109 return roff->offset;
110 return -EINVAL;
111}
112
113
114
115
116
117
118
119
120const char *regs_query_register_name(unsigned int offset)
121{
122 const struct pt_regs_offset *roff;
123 for (roff = regoffset_table; roff->name != NULL; roff++)
124 if (roff->offset == offset)
125 return roff->name;
126 return NULL;
127}
128
129
130
131
132
133
134
135
136
137#define FLAG_MASK_32 ((unsigned long) \
138 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
139 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
140 X86_EFLAGS_SF | X86_EFLAGS_TF | \
141 X86_EFLAGS_DF | X86_EFLAGS_OF | \
142 X86_EFLAGS_RF | X86_EFLAGS_AC))
143
144
145
146
147static inline bool invalid_selector(u16 value)
148{
149 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
150}
151
152#ifdef CONFIG_X86_32
153
154#define FLAG_MASK FLAG_MASK_32
155
156
157
158
159
160
161
162
163
164
165
166
167
168unsigned long kernel_stack_pointer(struct pt_regs *regs)
169{
170 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
171 unsigned long sp = (unsigned long)®s->sp;
172 u32 *prev_esp;
173
174 if (context == (sp & ~(THREAD_SIZE - 1)))
175 return sp;
176
177 prev_esp = (u32 *)(context);
178 if (*prev_esp)
179 return (unsigned long)*prev_esp;
180
181 return (unsigned long)regs;
182}
183EXPORT_SYMBOL_GPL(kernel_stack_pointer);
184
185static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
186{
187 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
188 return ®s->bx + (regno >> 2);
189}
190
191static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
192{
193
194
195
196 unsigned int retval;
197 if (offset != offsetof(struct user_regs_struct, gs))
198 retval = *pt_regs_access(task_pt_regs(task), offset);
199 else {
200 if (task == current)
201 retval = get_user_gs(task_pt_regs(task));
202 else
203 retval = task_user_gs(task);
204 }
205 return retval;
206}
207
208static int set_segment_reg(struct task_struct *task,
209 unsigned long offset, u16 value)
210{
211
212
213
214 if (invalid_selector(value))
215 return -EIO;
216
217
218
219
220
221
222
223
224
225
226 switch (offset) {
227 case offsetof(struct user_regs_struct, cs):
228 case offsetof(struct user_regs_struct, ss):
229 if (unlikely(value == 0))
230 return -EIO;
231
232 default:
233 *pt_regs_access(task_pt_regs(task), offset) = value;
234 break;
235
236 case offsetof(struct user_regs_struct, gs):
237 if (task == current)
238 set_user_gs(task_pt_regs(task), value);
239 else
240 task_user_gs(task) = value;
241 }
242
243 return 0;
244}
245
246#else
247
248#define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
249
250static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
251{
252 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
253 return ®s->r15 + (offset / sizeof(regs->r15));
254}
255
256static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
257{
258
259
260
261 unsigned int seg;
262
263 switch (offset) {
264 case offsetof(struct user_regs_struct, fs):
265 if (task == current) {
266
267 asm("movl %%fs,%0" : "=r" (seg));
268 return seg;
269 }
270 return task->thread.fsindex;
271 case offsetof(struct user_regs_struct, gs):
272 if (task == current) {
273 asm("movl %%gs,%0" : "=r" (seg));
274 return seg;
275 }
276 return task->thread.gsindex;
277 case offsetof(struct user_regs_struct, ds):
278 if (task == current) {
279 asm("movl %%ds,%0" : "=r" (seg));
280 return seg;
281 }
282 return task->thread.ds;
283 case offsetof(struct user_regs_struct, es):
284 if (task == current) {
285 asm("movl %%es,%0" : "=r" (seg));
286 return seg;
287 }
288 return task->thread.es;
289
290 case offsetof(struct user_regs_struct, cs):
291 case offsetof(struct user_regs_struct, ss):
292 break;
293 }
294 return *pt_regs_access(task_pt_regs(task), offset);
295}
296
297static int set_segment_reg(struct task_struct *task,
298 unsigned long offset, u16 value)
299{
300
301
302
303 if (invalid_selector(value))
304 return -EIO;
305
306 switch (offset) {
307 case offsetof(struct user_regs_struct,fs):
308 task->thread.fsindex = value;
309 if (task == current)
310 loadsegment(fs, task->thread.fsindex);
311 break;
312 case offsetof(struct user_regs_struct,gs):
313 task->thread.gsindex = value;
314 if (task == current)
315 load_gs_index(task->thread.gsindex);
316 break;
317 case offsetof(struct user_regs_struct,ds):
318 task->thread.ds = value;
319 if (task == current)
320 loadsegment(ds, task->thread.ds);
321 break;
322 case offsetof(struct user_regs_struct,es):
323 task->thread.es = value;
324 if (task == current)
325 loadsegment(es, task->thread.es);
326 break;
327
328
329
330
331 case offsetof(struct user_regs_struct,cs):
332 if (unlikely(value == 0))
333 return -EIO;
334 task_pt_regs(task)->cs = value;
335 break;
336 case offsetof(struct user_regs_struct,ss):
337 if (unlikely(value == 0))
338 return -EIO;
339 task_pt_regs(task)->ss = value;
340 break;
341 }
342
343 return 0;
344}
345
346#endif
347
348static unsigned long get_flags(struct task_struct *task)
349{
350 unsigned long retval = task_pt_regs(task)->flags;
351
352
353
354
355 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
356 retval &= ~X86_EFLAGS_TF;
357
358 return retval;
359}
360
361static int set_flags(struct task_struct *task, unsigned long value)
362{
363 struct pt_regs *regs = task_pt_regs(task);
364
365
366
367
368
369
370 if (value & X86_EFLAGS_TF)
371 clear_tsk_thread_flag(task, TIF_FORCED_TF);
372 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
373 value |= X86_EFLAGS_TF;
374
375 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
376
377 return 0;
378}
379
380static int putreg(struct task_struct *child,
381 unsigned long offset, unsigned long value)
382{
383 switch (offset) {
384 case offsetof(struct user_regs_struct, cs):
385 case offsetof(struct user_regs_struct, ds):
386 case offsetof(struct user_regs_struct, es):
387 case offsetof(struct user_regs_struct, fs):
388 case offsetof(struct user_regs_struct, gs):
389 case offsetof(struct user_regs_struct, ss):
390 return set_segment_reg(child, offset, value);
391
392 case offsetof(struct user_regs_struct, flags):
393 return set_flags(child, value);
394
395#ifdef CONFIG_X86_64
396 case offsetof(struct user_regs_struct,fs_base):
397 if (value >= TASK_SIZE_MAX)
398 return -EIO;
399
400
401
402
403
404 if (child->thread.fsbase != value)
405 return do_arch_prctl_64(child, ARCH_SET_FS, value);
406 return 0;
407 case offsetof(struct user_regs_struct,gs_base):
408
409
410
411 if (value >= TASK_SIZE_MAX)
412 return -EIO;
413 if (child->thread.gsbase != value)
414 return do_arch_prctl_64(child, ARCH_SET_GS, value);
415 return 0;
416#endif
417 }
418
419 *pt_regs_access(task_pt_regs(child), offset) = value;
420 return 0;
421}
422
423static unsigned long getreg(struct task_struct *task, unsigned long offset)
424{
425 switch (offset) {
426 case offsetof(struct user_regs_struct, cs):
427 case offsetof(struct user_regs_struct, ds):
428 case offsetof(struct user_regs_struct, es):
429 case offsetof(struct user_regs_struct, fs):
430 case offsetof(struct user_regs_struct, gs):
431 case offsetof(struct user_regs_struct, ss):
432 return get_segment_reg(task, offset);
433
434 case offsetof(struct user_regs_struct, flags):
435 return get_flags(task);
436
437#ifdef CONFIG_X86_64
438 case offsetof(struct user_regs_struct, fs_base):
439 return x86_fsbase_read_task(task);
440 case offsetof(struct user_regs_struct, gs_base):
441 return x86_gsbase_read_task(task);
442#endif
443 }
444
445 return *pt_regs_access(task_pt_regs(task), offset);
446}
447
448static int genregs_get(struct task_struct *target,
449 const struct user_regset *regset,
450 unsigned int pos, unsigned int count,
451 void *kbuf, void __user *ubuf)
452{
453 if (kbuf) {
454 unsigned long *k = kbuf;
455 while (count >= sizeof(*k)) {
456 *k++ = getreg(target, pos);
457 count -= sizeof(*k);
458 pos += sizeof(*k);
459 }
460 } else {
461 unsigned long __user *u = ubuf;
462 while (count >= sizeof(*u)) {
463 if (__put_user(getreg(target, pos), u++))
464 return -EFAULT;
465 count -= sizeof(*u);
466 pos += sizeof(*u);
467 }
468 }
469
470 return 0;
471}
472
473static int genregs_set(struct task_struct *target,
474 const struct user_regset *regset,
475 unsigned int pos, unsigned int count,
476 const void *kbuf, const void __user *ubuf)
477{
478 int ret = 0;
479 if (kbuf) {
480 const unsigned long *k = kbuf;
481 while (count >= sizeof(*k) && !ret) {
482 ret = putreg(target, pos, *k++);
483 count -= sizeof(*k);
484 pos += sizeof(*k);
485 }
486 } else {
487 const unsigned long __user *u = ubuf;
488 while (count >= sizeof(*u) && !ret) {
489 unsigned long word;
490 ret = __get_user(word, u++);
491 if (ret)
492 break;
493 ret = putreg(target, pos, word);
494 count -= sizeof(*u);
495 pos += sizeof(*u);
496 }
497 }
498 return ret;
499}
500
501static void ptrace_triggered(struct perf_event *bp,
502 struct perf_sample_data *data,
503 struct pt_regs *regs)
504{
505 int i;
506 struct thread_struct *thread = &(current->thread);
507
508
509
510
511
512 for (i = 0; i < HBP_NUM; i++) {
513 if (thread->ptrace_bps[i] == bp)
514 break;
515 }
516
517 thread->debugreg6 |= (DR_TRAP0 << i);
518}
519
520
521
522
523
524
525static unsigned long ptrace_get_dr7(struct perf_event *bp[])
526{
527 int i;
528 int dr7 = 0;
529 struct arch_hw_breakpoint *info;
530
531 for (i = 0; i < HBP_NUM; i++) {
532 if (bp[i] && !bp[i]->attr.disabled) {
533 info = counter_arch_bp(bp[i]);
534 dr7 |= encode_dr7(i, info->len, info->type);
535 }
536 }
537
538 return dr7;
539}
540
541static int ptrace_fill_bp_fields(struct perf_event_attr *attr,
542 int len, int type, bool disabled)
543{
544 int err, bp_len, bp_type;
545
546 err = arch_bp_generic_fields(len, type, &bp_len, &bp_type);
547 if (!err) {
548 attr->bp_len = bp_len;
549 attr->bp_type = bp_type;
550 attr->disabled = disabled;
551 }
552
553 return err;
554}
555
556static struct perf_event *
557ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
558 unsigned long addr, bool disabled)
559{
560 struct perf_event_attr attr;
561 int err;
562
563 ptrace_breakpoint_init(&attr);
564 attr.bp_addr = addr;
565
566 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
567 if (err)
568 return ERR_PTR(err);
569
570 return register_user_hw_breakpoint(&attr, ptrace_triggered,
571 NULL, tsk);
572}
573
574static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
575 int disabled)
576{
577 struct perf_event_attr attr = bp->attr;
578 int err;
579
580 err = ptrace_fill_bp_fields(&attr, len, type, disabled);
581 if (err)
582 return err;
583
584 return modify_user_hw_breakpoint(bp, &attr);
585}
586
587
588
589
590static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
591{
592 struct thread_struct *thread = &tsk->thread;
593 unsigned long old_dr7;
594 bool second_pass = false;
595 int i, rc, ret = 0;
596
597 data &= ~DR_CONTROL_RESERVED;
598 old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
599
600restore:
601 rc = 0;
602 for (i = 0; i < HBP_NUM; i++) {
603 unsigned len, type;
604 bool disabled = !decode_dr7(data, i, &len, &type);
605 struct perf_event *bp = thread->ptrace_bps[i];
606
607 if (!bp) {
608 if (disabled)
609 continue;
610
611 bp = ptrace_register_breakpoint(tsk,
612 len, type, 0, disabled);
613 if (IS_ERR(bp)) {
614 rc = PTR_ERR(bp);
615 break;
616 }
617
618 thread->ptrace_bps[i] = bp;
619 continue;
620 }
621
622 rc = ptrace_modify_breakpoint(bp, len, type, disabled);
623 if (rc)
624 break;
625 }
626
627
628 if (rc && !WARN_ON(second_pass)) {
629 ret = rc;
630 data = old_dr7;
631 second_pass = true;
632 goto restore;
633 }
634
635 return ret;
636}
637
638
639
640
641static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
642{
643 struct thread_struct *thread = &tsk->thread;
644 unsigned long val = 0;
645
646 if (n < HBP_NUM) {
647 struct perf_event *bp = thread->ptrace_bps[n];
648
649 if (bp)
650 val = bp->hw.info.address;
651 } else if (n == 6) {
652 val = thread->debugreg6;
653 } else if (n == 7) {
654 val = thread->ptrace_dr7;
655 }
656 return val;
657}
658
659static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
660 unsigned long addr)
661{
662 struct thread_struct *t = &tsk->thread;
663 struct perf_event *bp = t->ptrace_bps[nr];
664 int err = 0;
665
666 if (!bp) {
667
668
669
670
671
672
673
674
675
676
677
678 bp = ptrace_register_breakpoint(tsk,
679 X86_BREAKPOINT_LEN_1, X86_BREAKPOINT_WRITE,
680 addr, true);
681 if (IS_ERR(bp))
682 err = PTR_ERR(bp);
683 else
684 t->ptrace_bps[nr] = bp;
685 } else {
686 struct perf_event_attr attr = bp->attr;
687
688 attr.bp_addr = addr;
689 err = modify_user_hw_breakpoint(bp, &attr);
690 }
691
692 return err;
693}
694
695
696
697
698static int ptrace_set_debugreg(struct task_struct *tsk, int n,
699 unsigned long val)
700{
701 struct thread_struct *thread = &tsk->thread;
702
703 int rc = -EIO;
704
705 if (n < HBP_NUM) {
706 rc = ptrace_set_breakpoint_addr(tsk, n, val);
707 } else if (n == 6) {
708 thread->debugreg6 = val;
709 rc = 0;
710 } else if (n == 7) {
711 rc = ptrace_write_dr7(tsk, val);
712 if (!rc)
713 thread->ptrace_dr7 = val;
714 }
715 return rc;
716}
717
718
719
720
721
722static int ioperm_active(struct task_struct *target,
723 const struct user_regset *regset)
724{
725 return target->thread.io_bitmap_max / regset->size;
726}
727
728static int ioperm_get(struct task_struct *target,
729 const struct user_regset *regset,
730 unsigned int pos, unsigned int count,
731 void *kbuf, void __user *ubuf)
732{
733 if (!target->thread.io_bitmap_ptr)
734 return -ENXIO;
735
736 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
737 target->thread.io_bitmap_ptr,
738 0, IO_BITMAP_BYTES);
739}
740
741
742
743
744
745
746void ptrace_disable(struct task_struct *child)
747{
748 user_disable_single_step(child);
749#ifdef TIF_SYSCALL_EMU
750 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
751#endif
752}
753
754#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
755static const struct user_regset_view user_x86_32_view;
756#endif
757
758long arch_ptrace(struct task_struct *child, long request,
759 unsigned long addr, unsigned long data)
760{
761 int ret;
762 unsigned long __user *datap = (unsigned long __user *)data;
763
764 switch (request) {
765
766 case PTRACE_PEEKUSR: {
767 unsigned long tmp;
768
769 ret = -EIO;
770 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
771 break;
772
773 tmp = 0;
774 if (addr < sizeof(struct user_regs_struct))
775 tmp = getreg(child, addr);
776 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
777 addr <= offsetof(struct user, u_debugreg[7])) {
778 addr -= offsetof(struct user, u_debugreg[0]);
779 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
780 }
781 ret = put_user(tmp, datap);
782 break;
783 }
784
785 case PTRACE_POKEUSR:
786 ret = -EIO;
787 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
788 break;
789
790 if (addr < sizeof(struct user_regs_struct))
791 ret = putreg(child, addr, data);
792 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
793 addr <= offsetof(struct user, u_debugreg[7])) {
794 addr -= offsetof(struct user, u_debugreg[0]);
795 ret = ptrace_set_debugreg(child,
796 addr / sizeof(data), data);
797 }
798 break;
799
800 case PTRACE_GETREGS:
801 return copy_regset_to_user(child,
802 task_user_regset_view(current),
803 REGSET_GENERAL,
804 0, sizeof(struct user_regs_struct),
805 datap);
806
807 case PTRACE_SETREGS:
808 return copy_regset_from_user(child,
809 task_user_regset_view(current),
810 REGSET_GENERAL,
811 0, sizeof(struct user_regs_struct),
812 datap);
813
814 case PTRACE_GETFPREGS:
815 return copy_regset_to_user(child,
816 task_user_regset_view(current),
817 REGSET_FP,
818 0, sizeof(struct user_i387_struct),
819 datap);
820
821 case PTRACE_SETFPREGS:
822 return copy_regset_from_user(child,
823 task_user_regset_view(current),
824 REGSET_FP,
825 0, sizeof(struct user_i387_struct),
826 datap);
827
828#ifdef CONFIG_X86_32
829 case PTRACE_GETFPXREGS:
830 return copy_regset_to_user(child, &user_x86_32_view,
831 REGSET_XFP,
832 0, sizeof(struct user_fxsr_struct),
833 datap) ? -EIO : 0;
834
835 case PTRACE_SETFPXREGS:
836 return copy_regset_from_user(child, &user_x86_32_view,
837 REGSET_XFP,
838 0, sizeof(struct user_fxsr_struct),
839 datap) ? -EIO : 0;
840#endif
841
842#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
843 case PTRACE_GET_THREAD_AREA:
844 if ((int) addr < 0)
845 return -EIO;
846 ret = do_get_thread_area(child, addr,
847 (struct user_desc __user *)data);
848 break;
849
850 case PTRACE_SET_THREAD_AREA:
851 if ((int) addr < 0)
852 return -EIO;
853 ret = do_set_thread_area(child, addr,
854 (struct user_desc __user *)data, 0);
855 break;
856#endif
857
858#ifdef CONFIG_X86_64
859
860
861
862 case PTRACE_ARCH_PRCTL:
863 ret = do_arch_prctl_64(child, data, addr);
864 break;
865#endif
866
867 default:
868 ret = ptrace_request(child, request, addr, data);
869 break;
870 }
871
872 return ret;
873}
874
875#ifdef CONFIG_IA32_EMULATION
876
877#include <linux/compat.h>
878#include <linux/syscalls.h>
879#include <asm/ia32.h>
880#include <asm/user32.h>
881
882#define R32(l,q) \
883 case offsetof(struct user32, regs.l): \
884 regs->q = value; break
885
886#define SEG32(rs) \
887 case offsetof(struct user32, regs.rs): \
888 return set_segment_reg(child, \
889 offsetof(struct user_regs_struct, rs), \
890 value); \
891 break
892
893static int putreg32(struct task_struct *child, unsigned regno, u32 value)
894{
895 struct pt_regs *regs = task_pt_regs(child);
896
897 switch (regno) {
898
899 SEG32(cs);
900 SEG32(ds);
901 SEG32(es);
902 SEG32(fs);
903 SEG32(gs);
904 SEG32(ss);
905
906 R32(ebx, bx);
907 R32(ecx, cx);
908 R32(edx, dx);
909 R32(edi, di);
910 R32(esi, si);
911 R32(ebp, bp);
912 R32(eax, ax);
913 R32(eip, ip);
914 R32(esp, sp);
915
916 case offsetof(struct user32, regs.orig_eax):
917
918
919
920
921
922
923
924
925
926
927 regs->orig_ax = value;
928 if (syscall_get_nr(child, regs) >= 0)
929 child->thread_info.status |= TS_I386_REGS_POKED;
930 break;
931
932 case offsetof(struct user32, regs.eflags):
933 return set_flags(child, value);
934
935 case offsetof(struct user32, u_debugreg[0]) ...
936 offsetof(struct user32, u_debugreg[7]):
937 regno -= offsetof(struct user32, u_debugreg[0]);
938 return ptrace_set_debugreg(child, regno / 4, value);
939
940 default:
941 if (regno > sizeof(struct user32) || (regno & 3))
942 return -EIO;
943
944
945
946
947
948 break;
949 }
950 return 0;
951}
952
953#undef R32
954#undef SEG32
955
956#define R32(l,q) \
957 case offsetof(struct user32, regs.l): \
958 *val = regs->q; break
959
960#define SEG32(rs) \
961 case offsetof(struct user32, regs.rs): \
962 *val = get_segment_reg(child, \
963 offsetof(struct user_regs_struct, rs)); \
964 break
965
966static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
967{
968 struct pt_regs *regs = task_pt_regs(child);
969
970 switch (regno) {
971
972 SEG32(ds);
973 SEG32(es);
974 SEG32(fs);
975 SEG32(gs);
976
977 R32(cs, cs);
978 R32(ss, ss);
979 R32(ebx, bx);
980 R32(ecx, cx);
981 R32(edx, dx);
982 R32(edi, di);
983 R32(esi, si);
984 R32(ebp, bp);
985 R32(eax, ax);
986 R32(orig_eax, orig_ax);
987 R32(eip, ip);
988 R32(esp, sp);
989
990 case offsetof(struct user32, regs.eflags):
991 *val = get_flags(child);
992 break;
993
994 case offsetof(struct user32, u_debugreg[0]) ...
995 offsetof(struct user32, u_debugreg[7]):
996 regno -= offsetof(struct user32, u_debugreg[0]);
997 *val = ptrace_get_debugreg(child, regno / 4);
998 break;
999
1000 default:
1001 if (regno > sizeof(struct user32) || (regno & 3))
1002 return -EIO;
1003
1004
1005
1006
1007
1008 *val = 0;
1009 break;
1010 }
1011 return 0;
1012}
1013
1014#undef R32
1015#undef SEG32
1016
1017static int genregs32_get(struct task_struct *target,
1018 const struct user_regset *regset,
1019 unsigned int pos, unsigned int count,
1020 void *kbuf, void __user *ubuf)
1021{
1022 if (kbuf) {
1023 compat_ulong_t *k = kbuf;
1024 while (count >= sizeof(*k)) {
1025 getreg32(target, pos, k++);
1026 count -= sizeof(*k);
1027 pos += sizeof(*k);
1028 }
1029 } else {
1030 compat_ulong_t __user *u = ubuf;
1031 while (count >= sizeof(*u)) {
1032 compat_ulong_t word;
1033 getreg32(target, pos, &word);
1034 if (__put_user(word, u++))
1035 return -EFAULT;
1036 count -= sizeof(*u);
1037 pos += sizeof(*u);
1038 }
1039 }
1040
1041 return 0;
1042}
1043
1044static int genregs32_set(struct task_struct *target,
1045 const struct user_regset *regset,
1046 unsigned int pos, unsigned int count,
1047 const void *kbuf, const void __user *ubuf)
1048{
1049 int ret = 0;
1050 if (kbuf) {
1051 const compat_ulong_t *k = kbuf;
1052 while (count >= sizeof(*k) && !ret) {
1053 ret = putreg32(target, pos, *k++);
1054 count -= sizeof(*k);
1055 pos += sizeof(*k);
1056 }
1057 } else {
1058 const compat_ulong_t __user *u = ubuf;
1059 while (count >= sizeof(*u) && !ret) {
1060 compat_ulong_t word;
1061 ret = __get_user(word, u++);
1062 if (ret)
1063 break;
1064 ret = putreg32(target, pos, word);
1065 count -= sizeof(*u);
1066 pos += sizeof(*u);
1067 }
1068 }
1069 return ret;
1070}
1071
1072static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
1073 compat_ulong_t caddr, compat_ulong_t cdata)
1074{
1075 unsigned long addr = caddr;
1076 unsigned long data = cdata;
1077 void __user *datap = compat_ptr(data);
1078 int ret;
1079 __u32 val;
1080
1081 switch (request) {
1082 case PTRACE_PEEKUSR:
1083 ret = getreg32(child, addr, &val);
1084 if (ret == 0)
1085 ret = put_user(val, (__u32 __user *)datap);
1086 break;
1087
1088 case PTRACE_POKEUSR:
1089 ret = putreg32(child, addr, data);
1090 break;
1091
1092 case PTRACE_GETREGS:
1093 return copy_regset_to_user(child, &user_x86_32_view,
1094 REGSET_GENERAL,
1095 0, sizeof(struct user_regs_struct32),
1096 datap);
1097
1098 case PTRACE_SETREGS:
1099 return copy_regset_from_user(child, &user_x86_32_view,
1100 REGSET_GENERAL, 0,
1101 sizeof(struct user_regs_struct32),
1102 datap);
1103
1104 case PTRACE_GETFPREGS:
1105 return copy_regset_to_user(child, &user_x86_32_view,
1106 REGSET_FP, 0,
1107 sizeof(struct user_i387_ia32_struct),
1108 datap);
1109
1110 case PTRACE_SETFPREGS:
1111 return copy_regset_from_user(
1112 child, &user_x86_32_view, REGSET_FP,
1113 0, sizeof(struct user_i387_ia32_struct), datap);
1114
1115 case PTRACE_GETFPXREGS:
1116 return copy_regset_to_user(child, &user_x86_32_view,
1117 REGSET_XFP, 0,
1118 sizeof(struct user32_fxsr_struct),
1119 datap);
1120
1121 case PTRACE_SETFPXREGS:
1122 return copy_regset_from_user(child, &user_x86_32_view,
1123 REGSET_XFP, 0,
1124 sizeof(struct user32_fxsr_struct),
1125 datap);
1126
1127 case PTRACE_GET_THREAD_AREA:
1128 case PTRACE_SET_THREAD_AREA:
1129 return arch_ptrace(child, request, addr, data);
1130
1131 default:
1132 return compat_ptrace_request(child, request, addr, data);
1133 }
1134
1135 return ret;
1136}
1137#endif
1138
1139#ifdef CONFIG_X86_X32_ABI
1140static long x32_arch_ptrace(struct task_struct *child,
1141 compat_long_t request, compat_ulong_t caddr,
1142 compat_ulong_t cdata)
1143{
1144 unsigned long addr = caddr;
1145 unsigned long data = cdata;
1146 void __user *datap = compat_ptr(data);
1147 int ret;
1148
1149 switch (request) {
1150
1151
1152 case PTRACE_PEEKUSR: {
1153 u32 tmp;
1154
1155 ret = -EIO;
1156 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1157 addr < offsetof(struct user_regs_struct, cs))
1158 break;
1159
1160 tmp = 0;
1161 if (addr < sizeof(struct user_regs_struct))
1162 tmp = getreg(child, addr);
1163 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1164 addr <= offsetof(struct user, u_debugreg[7])) {
1165 addr -= offsetof(struct user, u_debugreg[0]);
1166 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1167 }
1168 ret = put_user(tmp, (__u32 __user *)datap);
1169 break;
1170 }
1171
1172
1173
1174
1175 case PTRACE_POKEUSR:
1176 ret = -EIO;
1177 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1178 addr < offsetof(struct user_regs_struct, cs))
1179 break;
1180
1181 if (addr < sizeof(struct user_regs_struct))
1182 ret = putreg(child, addr, data);
1183 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1184 addr <= offsetof(struct user, u_debugreg[7])) {
1185 addr -= offsetof(struct user, u_debugreg[0]);
1186 ret = ptrace_set_debugreg(child,
1187 addr / sizeof(data), data);
1188 }
1189 break;
1190
1191 case PTRACE_GETREGS:
1192 return copy_regset_to_user(child,
1193 task_user_regset_view(current),
1194 REGSET_GENERAL,
1195 0, sizeof(struct user_regs_struct),
1196 datap);
1197
1198 case PTRACE_SETREGS:
1199 return copy_regset_from_user(child,
1200 task_user_regset_view(current),
1201 REGSET_GENERAL,
1202 0, sizeof(struct user_regs_struct),
1203 datap);
1204
1205 case PTRACE_GETFPREGS:
1206 return copy_regset_to_user(child,
1207 task_user_regset_view(current),
1208 REGSET_FP,
1209 0, sizeof(struct user_i387_struct),
1210 datap);
1211
1212 case PTRACE_SETFPREGS:
1213 return copy_regset_from_user(child,
1214 task_user_regset_view(current),
1215 REGSET_FP,
1216 0, sizeof(struct user_i387_struct),
1217 datap);
1218
1219 default:
1220 return compat_ptrace_request(child, request, addr, data);
1221 }
1222
1223 return ret;
1224}
1225#endif
1226
1227#ifdef CONFIG_COMPAT
1228long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1229 compat_ulong_t caddr, compat_ulong_t cdata)
1230{
1231#ifdef CONFIG_X86_X32_ABI
1232 if (!in_ia32_syscall())
1233 return x32_arch_ptrace(child, request, caddr, cdata);
1234#endif
1235#ifdef CONFIG_IA32_EMULATION
1236 return ia32_arch_ptrace(child, request, caddr, cdata);
1237#else
1238 return 0;
1239#endif
1240}
1241#endif
1242
1243#ifdef CONFIG_X86_64
1244
1245static struct user_regset x86_64_regsets[] __ro_after_init = {
1246 [REGSET_GENERAL] = {
1247 .core_note_type = NT_PRSTATUS,
1248 .n = sizeof(struct user_regs_struct) / sizeof(long),
1249 .size = sizeof(long), .align = sizeof(long),
1250 .get = genregs_get, .set = genregs_set
1251 },
1252 [REGSET_FP] = {
1253 .core_note_type = NT_PRFPREG,
1254 .n = sizeof(struct user_i387_struct) / sizeof(long),
1255 .size = sizeof(long), .align = sizeof(long),
1256 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1257 },
1258 [REGSET_XSTATE] = {
1259 .core_note_type = NT_X86_XSTATE,
1260 .size = sizeof(u64), .align = sizeof(u64),
1261 .active = xstateregs_active, .get = xstateregs_get,
1262 .set = xstateregs_set
1263 },
1264 [REGSET_IOPERM64] = {
1265 .core_note_type = NT_386_IOPERM,
1266 .n = IO_BITMAP_LONGS,
1267 .size = sizeof(long), .align = sizeof(long),
1268 .active = ioperm_active, .get = ioperm_get
1269 },
1270};
1271
1272static const struct user_regset_view user_x86_64_view = {
1273 .name = "x86_64", .e_machine = EM_X86_64,
1274 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1275};
1276
1277#else
1278
1279#define user_regs_struct32 user_regs_struct
1280#define genregs32_get genregs_get
1281#define genregs32_set genregs_set
1282
1283#endif
1284
1285#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1286static struct user_regset x86_32_regsets[] __ro_after_init = {
1287 [REGSET_GENERAL] = {
1288 .core_note_type = NT_PRSTATUS,
1289 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1290 .size = sizeof(u32), .align = sizeof(u32),
1291 .get = genregs32_get, .set = genregs32_set
1292 },
1293 [REGSET_FP] = {
1294 .core_note_type = NT_PRFPREG,
1295 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1296 .size = sizeof(u32), .align = sizeof(u32),
1297 .active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
1298 },
1299 [REGSET_XFP] = {
1300 .core_note_type = NT_PRXFPREG,
1301 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1302 .size = sizeof(u32), .align = sizeof(u32),
1303 .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
1304 },
1305 [REGSET_XSTATE] = {
1306 .core_note_type = NT_X86_XSTATE,
1307 .size = sizeof(u64), .align = sizeof(u64),
1308 .active = xstateregs_active, .get = xstateregs_get,
1309 .set = xstateregs_set
1310 },
1311 [REGSET_TLS] = {
1312 .core_note_type = NT_386_TLS,
1313 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1314 .size = sizeof(struct user_desc),
1315 .align = sizeof(struct user_desc),
1316 .active = regset_tls_active,
1317 .get = regset_tls_get, .set = regset_tls_set
1318 },
1319 [REGSET_IOPERM32] = {
1320 .core_note_type = NT_386_IOPERM,
1321 .n = IO_BITMAP_BYTES / sizeof(u32),
1322 .size = sizeof(u32), .align = sizeof(u32),
1323 .active = ioperm_active, .get = ioperm_get
1324 },
1325};
1326
1327static const struct user_regset_view user_x86_32_view = {
1328 .name = "i386", .e_machine = EM_386,
1329 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1330};
1331#endif
1332
1333
1334
1335
1336
1337u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
1338
1339void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
1340{
1341#ifdef CONFIG_X86_64
1342 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1343#endif
1344#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1345 x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
1346#endif
1347 xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
1348}
1349
1350const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1351{
1352#ifdef CONFIG_IA32_EMULATION
1353 if (!user_64bit_mode(task_pt_regs(task)))
1354#endif
1355#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1356 return &user_x86_32_view;
1357#endif
1358#ifdef CONFIG_X86_64
1359 return &user_x86_64_view;
1360#endif
1361}
1362
1363void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1364 int error_code, int si_code)
1365{
1366 tsk->thread.trap_nr = X86_TRAP_DB;
1367 tsk->thread.error_code = error_code;
1368
1369
1370 force_sig_fault(SIGTRAP, si_code,
1371 user_mode(regs) ? (void __user *)regs->ip : NULL, tsk);
1372}
1373
1374void user_single_step_report(struct pt_regs *regs)
1375{
1376 send_sigtrap(current, regs, 0, TRAP_BRKPT);
1377}
1378