1
2
3
4
5
6
7
8
9
10#include <linux/capability.h>
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <linux/pagemap.h>
17#include <linux/ptrace.h>
18#include <linux/security.h>
19#include <linux/signal.h>
20#include <linux/uio.h>
21#include <linux/audit.h>
22#include <linux/pid_namespace.h>
23#include <linux/syscalls.h>
24#include <linux/uaccess.h>
25#include <linux/regset.h>
26#include <linux/hw_breakpoint.h>
27#include <linux/cn_proc.h>
28#include <linux/compat.h>
29
30
31
32
33
34
35
36
37void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
38{
39 BUG_ON(!list_empty(&child->ptrace_entry));
40 list_add(&child->ptrace_entry, &new_parent->ptraced);
41 child->parent = new_parent;
42}
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72void __ptrace_unlink(struct task_struct *child)
73{
74 BUG_ON(!child->ptrace);
75
76 child->ptrace = 0;
77 child->parent = child->real_parent;
78 list_del_init(&child->ptrace_entry);
79
80 spin_lock(&child->sighand->siglock);
81
82
83
84
85
86 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
87 task_clear_jobctl_trapping(child);
88
89
90
91
92
93 if (!(child->flags & PF_EXITING) &&
94 (child->signal->flags & SIGNAL_STOP_STOPPED ||
95 child->signal->group_stop_count)) {
96 child->jobctl |= JOBCTL_STOP_PENDING;
97
98
99
100
101
102
103
104
105 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
106 child->jobctl |= SIGSTOP;
107 }
108
109
110
111
112
113
114
115 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
116 ptrace_signal_wake_up(child, true);
117
118 spin_unlock(&child->sighand->siglock);
119}
120
121
122static bool ptrace_freeze_traced(struct task_struct *task)
123{
124 bool ret = false;
125
126
127 if (task->jobctl & JOBCTL_LISTENING)
128 return ret;
129
130 spin_lock_irq(&task->sighand->siglock);
131 if (task_is_traced(task) && !__fatal_signal_pending(task)) {
132 task->state = __TASK_TRACED;
133 ret = true;
134 }
135 spin_unlock_irq(&task->sighand->siglock);
136
137 return ret;
138}
139
140static void ptrace_unfreeze_traced(struct task_struct *task)
141{
142 if (task->state != __TASK_TRACED)
143 return;
144
145 WARN_ON(!task->ptrace || task->parent != current);
146
147
148
149
150
151 spin_lock_irq(&task->sighand->siglock);
152 if (task->state == __TASK_TRACED) {
153 if (__fatal_signal_pending(task))
154 wake_up_state(task, __TASK_TRACED);
155 else
156 task->state = TASK_TRACED;
157 }
158 spin_unlock_irq(&task->sighand->siglock);
159}
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
179{
180 int ret = -ESRCH;
181
182
183
184
185
186
187
188
189 tasklist_read_lock();
190 if (child->ptrace && child->parent == current) {
191 WARN_ON(child->state == __TASK_TRACED);
192
193
194
195
196 if (ignore_state || ptrace_freeze_traced(child))
197 ret = 0;
198 }
199 qread_unlock(&tasklist_lock);
200
201 if (!ret && !ignore_state) {
202 if (!wait_task_inactive(child, __TASK_TRACED)) {
203
204
205
206
207
208 WARN_ON(child->state == __TASK_TRACED);
209 ret = -ESRCH;
210 }
211 }
212
213 return ret;
214}
215
216static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
217{
218 if (mode & PTRACE_MODE_NOAUDIT)
219 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
220 else
221 return has_ns_capability(current, ns, CAP_SYS_PTRACE);
222}
223
224
225int __ptrace_may_access(struct task_struct *task, unsigned int mode)
226{
227 const struct cred *cred = current_cred(), *tcred;
228 int dumpable = 0;
229 kuid_t caller_uid;
230 kgid_t caller_gid;
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251 if (same_thread_group(task, current))
252 return 0;
253 rcu_read_lock();
254 if (mode & PTRACE_MODE_FSCREDS) {
255 caller_uid = cred->fsuid;
256 caller_gid = cred->fsgid;
257 } else {
258
259
260
261
262
263
264
265
266 caller_uid = cred->uid;
267 caller_gid = cred->gid;
268 }
269 tcred = __task_cred(task);
270 if (uid_eq(caller_uid, tcred->euid) &&
271 uid_eq(caller_uid, tcred->suid) &&
272 uid_eq(caller_uid, tcred->uid) &&
273 gid_eq(caller_gid, tcred->egid) &&
274 gid_eq(caller_gid, tcred->sgid) &&
275 gid_eq(caller_gid, tcred->gid))
276 goto ok;
277 if (!(mode & PTRACE_MODE_NOACCESS_CHK) &&
278 ptrace_has_cap(tcred->user_ns, mode))
279 goto ok;
280 rcu_read_unlock();
281 return -EPERM;
282ok:
283 rcu_read_unlock();
284 smp_rmb();
285 if (task->mm)
286 dumpable = get_dumpable(task->mm);
287 rcu_read_lock();
288 if (dumpable != SUID_DUMP_USER &&
289 ((mode & PTRACE_MODE_NOACCESS_CHK) ||
290 !ptrace_has_cap(__task_cred(task)->user_ns, mode))) {
291 rcu_read_unlock();
292 return -EPERM;
293 }
294 rcu_read_unlock();
295
296 if (!(mode & PTRACE_MODE_NOACCESS_CHK))
297 return security_ptrace_access_check(task, mode);
298
299 return 0;
300}
301
302bool ptrace_may_access(struct task_struct *task, unsigned int mode)
303{
304 int err;
305 task_lock(task);
306 err = __ptrace_may_access(task, mode);
307 task_unlock(task);
308 return !err;
309}
310
311static int ptrace_attach(struct task_struct *task, long request,
312 unsigned long addr,
313 unsigned long flags)
314{
315 bool seize = (request == PTRACE_SEIZE);
316 int retval;
317
318 retval = -EIO;
319 if (seize) {
320 if (addr != 0)
321 goto out;
322 if (flags & ~(unsigned long)PTRACE_O_MASK)
323 goto out;
324 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
325 } else {
326 flags = PT_PTRACED;
327 }
328
329 audit_ptrace(task);
330
331 retval = -EPERM;
332 if (unlikely(task->flags & PF_KTHREAD))
333 goto out;
334 if (same_thread_group(task, current))
335 goto out;
336
337
338
339
340
341
342 retval = -ERESTARTNOINTR;
343 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
344 goto out;
345
346 task_lock(task);
347 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
348 task_unlock(task);
349 if (retval)
350 goto unlock_creds;
351
352 tasklist_write_lock_irq();
353 retval = -EPERM;
354 if (unlikely(task->exit_state))
355 goto unlock_tasklist;
356 if (task->ptrace)
357 goto unlock_tasklist;
358
359 if (seize)
360 flags |= PT_SEIZED;
361 rcu_read_lock();
362 if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
363 flags |= PT_PTRACE_CAP;
364 rcu_read_unlock();
365 task->ptrace = flags;
366
367 __ptrace_link(task, current);
368
369
370 if (!seize)
371 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
372
373 spin_lock(&task->sighand->siglock);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392 if (task_is_stopped(task) &&
393 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
394 signal_wake_up_state(task, __TASK_STOPPED);
395
396 spin_unlock(&task->sighand->siglock);
397
398 retval = 0;
399unlock_tasklist:
400 qwrite_unlock_irq(&tasklist_lock);
401unlock_creds:
402 mutex_unlock(&task->signal->cred_guard_mutex);
403out:
404 if (!retval) {
405 int trapping_bit = JOBCTL_TRAPPING_BIT;
406#ifdef __BIG_ENDIAN
407
408 trapping_bit += (sizeof(long) - sizeof(task->jobctl))
409 * BITS_PER_BYTE;
410#endif
411
412
413
414
415
416
417
418 wait_on_bit(&task->jobctl, trapping_bit, TASK_KILLABLE);
419 proc_ptrace_connector(task, PTRACE_ATTACH);
420 }
421
422 return retval;
423}
424
425
426
427
428
429
430
431static int ptrace_traceme(void)
432{
433 int ret = -EPERM;
434
435 tasklist_write_lock_irq();
436
437 if (!current->ptrace) {
438 ret = security_ptrace_traceme(current->parent);
439
440
441
442
443
444 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
445 current->ptrace = PT_PTRACED;
446 __ptrace_link(current, current->real_parent);
447 }
448 }
449 qwrite_unlock_irq(&tasklist_lock);
450
451 return ret;
452}
453
454
455
456
457static int ignoring_children(struct sighand_struct *sigh)
458{
459 int ret;
460 spin_lock(&sigh->siglock);
461 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
462 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
463 spin_unlock(&sigh->siglock);
464 return ret;
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
483{
484 bool dead;
485
486 __ptrace_unlink(p);
487
488 if (p->exit_state != EXIT_ZOMBIE)
489 return false;
490
491 dead = !thread_group_leader(p);
492
493 if (!dead && thread_group_empty(p)) {
494 if (!same_thread_group(p->real_parent, tracer))
495 dead = do_notify_parent(p, p->exit_signal);
496 else if (ignoring_children(tracer->sighand)) {
497 __wake_up_parent(p, tracer);
498 dead = true;
499 }
500 }
501
502 if (dead)
503 p->exit_state = EXIT_DEAD;
504 return dead;
505}
506
507static int ptrace_detach(struct task_struct *child, unsigned int data)
508{
509 bool dead = false;
510
511 if (!valid_signal(data))
512 return -EIO;
513
514
515 ptrace_disable(child);
516 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
517
518 tasklist_write_lock_irq();
519
520
521
522
523 if (child->ptrace) {
524 child->exit_code = data;
525 dead = __ptrace_detach(current, child);
526 }
527 qwrite_unlock_irq(&tasklist_lock);
528
529 proc_ptrace_connector(child, PTRACE_DETACH);
530 if (unlikely(dead))
531 release_task(child);
532
533 return 0;
534}
535
536
537
538
539
540
541void exit_ptrace(struct task_struct *tracer)
542 __releases(&tasklist_lock)
543 __acquires(&tasklist_lock)
544{
545 struct task_struct *p, *n;
546 LIST_HEAD(ptrace_dead);
547
548 if (likely(list_empty(&tracer->ptraced)))
549 return;
550
551 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
552 if (unlikely(p->ptrace & PT_EXITKILL))
553 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
554
555 if (__ptrace_detach(tracer, p))
556 list_add(&p->ptrace_entry, &ptrace_dead);
557 }
558
559 qwrite_unlock_irq(&tasklist_lock);
560 BUG_ON(!list_empty(&tracer->ptraced));
561
562 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
563 list_del_init(&p->ptrace_entry);
564 release_task(p);
565 }
566
567 tasklist_write_lock_irq();
568}
569
570int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
571{
572 int copied = 0;
573
574 while (len > 0) {
575 char buf[128];
576 int this_len, retval;
577
578 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
579 retval = access_process_vm(tsk, src, buf, this_len, 0);
580 if (!retval) {
581 if (copied)
582 break;
583 return -EIO;
584 }
585 if (copy_to_user(dst, buf, retval))
586 return -EFAULT;
587 copied += retval;
588 src += retval;
589 dst += retval;
590 len -= retval;
591 }
592 return copied;
593}
594
595int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
596{
597 int copied = 0;
598
599 while (len > 0) {
600 char buf[128];
601 int this_len, retval;
602
603 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
604 if (copy_from_user(buf, src, this_len))
605 return -EFAULT;
606 retval = access_process_vm(tsk, dst, buf, this_len, FOLL_WRITE);
607 if (!retval) {
608 if (copied)
609 break;
610 return -EIO;
611 }
612 copied += retval;
613 src += retval;
614 dst += retval;
615 len -= retval;
616 }
617 return copied;
618}
619
620static int ptrace_setoptions(struct task_struct *child, unsigned long data)
621{
622 unsigned flags;
623
624 if (data & ~(unsigned long)PTRACE_O_MASK)
625 return -EINVAL;
626
627
628 flags = child->ptrace;
629 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
630 flags |= (data << PT_OPT_FLAG_SHIFT);
631 child->ptrace = flags;
632
633 return 0;
634}
635
636static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
637{
638 unsigned long flags;
639 int error = -ESRCH;
640
641 if (lock_task_sighand(child, &flags)) {
642 error = -EINVAL;
643 if (likely(child->last_siginfo != NULL)) {
644 *info = *child->last_siginfo;
645 error = 0;
646 }
647 unlock_task_sighand(child, &flags);
648 }
649 return error;
650}
651
652static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
653{
654 unsigned long flags;
655 int error = -ESRCH;
656
657 if (lock_task_sighand(child, &flags)) {
658 error = -EINVAL;
659 if (likely(child->last_siginfo != NULL)) {
660 *child->last_siginfo = *info;
661 error = 0;
662 }
663 unlock_task_sighand(child, &flags);
664 }
665 return error;
666}
667
668static int ptrace_peek_siginfo(struct task_struct *child,
669 unsigned long addr,
670 unsigned long data)
671{
672 struct ptrace_peeksiginfo_args arg;
673 struct sigpending *pending;
674 struct sigqueue *q;
675 int ret, i;
676
677 ret = copy_from_user(&arg, (void __user *) addr,
678 sizeof(struct ptrace_peeksiginfo_args));
679 if (ret)
680 return -EFAULT;
681
682 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
683 return -EINVAL;
684
685 if (arg.nr < 0)
686 return -EINVAL;
687
688 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
689 pending = &child->signal->shared_pending;
690 else
691 pending = &child->pending;
692
693 for (i = 0; i < arg.nr; ) {
694 siginfo_t info;
695 s32 off = arg.off + i;
696
697 spin_lock_irq(&child->sighand->siglock);
698 list_for_each_entry(q, &pending->list, list) {
699 if (!off--) {
700 copy_siginfo(&info, &q->info);
701 break;
702 }
703 }
704 spin_unlock_irq(&child->sighand->siglock);
705
706 if (off >= 0)
707 break;
708
709#ifdef CONFIG_COMPAT
710 if (unlikely(is_compat_task())) {
711 compat_siginfo_t __user *uinfo = compat_ptr(data);
712
713 if (copy_siginfo_to_user32(uinfo, &info) ||
714 __put_user(info.si_code, &uinfo->si_code)) {
715 ret = -EFAULT;
716 break;
717 }
718
719 } else
720#endif
721 {
722 siginfo_t __user *uinfo = (siginfo_t __user *) data;
723
724 if (copy_siginfo_to_user(uinfo, &info) ||
725 __put_user(info.si_code, &uinfo->si_code)) {
726 ret = -EFAULT;
727 break;
728 }
729 }
730
731 data += sizeof(siginfo_t);
732 i++;
733
734 if (signal_pending(current))
735 break;
736
737 cond_resched();
738 }
739
740 if (i > 0)
741 return i;
742
743 return ret;
744}
745
746#ifdef PTRACE_SINGLESTEP
747#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
748#else
749#define is_singlestep(request) 0
750#endif
751
752#ifdef PTRACE_SINGLEBLOCK
753#define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
754#else
755#define is_singleblock(request) 0
756#endif
757
758#ifdef PTRACE_SYSEMU
759#define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
760#else
761#define is_sysemu_singlestep(request) 0
762#endif
763
764static int ptrace_resume(struct task_struct *child, long request,
765 unsigned long data)
766{
767 if (!valid_signal(data))
768 return -EIO;
769
770 if (request == PTRACE_SYSCALL)
771 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
772 else
773 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
774
775#ifdef TIF_SYSCALL_EMU
776 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
777 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
778 else
779 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
780#endif
781
782 if (is_singleblock(request)) {
783 if (unlikely(!arch_has_block_step()))
784 return -EIO;
785 user_enable_block_step(child);
786 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
787 if (unlikely(!arch_has_single_step()))
788 return -EIO;
789 user_enable_single_step(child);
790 } else {
791 user_disable_single_step(child);
792 }
793
794 child->exit_code = data;
795 wake_up_state(child, __TASK_TRACED);
796
797 return 0;
798}
799
800#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
801
802static const struct user_regset *
803find_regset(const struct user_regset_view *view, unsigned int type)
804{
805 const struct user_regset *regset;
806 int n;
807
808 for (n = 0; n < view->n; ++n) {
809 regset = view->regsets + n;
810 if (regset->core_note_type == type)
811 return regset;
812 }
813
814 return NULL;
815}
816
817static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
818 struct iovec *kiov)
819{
820 const struct user_regset_view *view = task_user_regset_view(task);
821 const struct user_regset *regset = find_regset(view, type);
822 int regset_no;
823
824 if (!regset || (kiov->iov_len % regset->size) != 0)
825 return -EINVAL;
826
827 regset_no = regset - view->regsets;
828 kiov->iov_len = min(kiov->iov_len,
829 (__kernel_size_t) (regset->n * regset->size));
830
831 if (req == PTRACE_GETREGSET)
832 return copy_regset_to_user(task, view, regset_no, 0,
833 kiov->iov_len, kiov->iov_base);
834 else
835 return copy_regset_from_user(task, view, regset_no, 0,
836 kiov->iov_len, kiov->iov_base);
837}
838
839
840
841
842
843
844EXPORT_SYMBOL_GPL(task_user_regset_view);
845#endif
846
847int ptrace_request(struct task_struct *child, long request,
848 unsigned long addr, unsigned long data)
849{
850 bool seized = child->ptrace & PT_SEIZED;
851 int ret = -EIO;
852 siginfo_t siginfo, *si;
853 void __user *datavp = (void __user *) data;
854 unsigned long __user *datalp = datavp;
855 unsigned long flags;
856
857 switch (request) {
858 case PTRACE_PEEKTEXT:
859 case PTRACE_PEEKDATA:
860 return generic_ptrace_peekdata(child, addr, data);
861 case PTRACE_POKETEXT:
862 case PTRACE_POKEDATA:
863 return generic_ptrace_pokedata(child, addr, data);
864
865#ifdef PTRACE_OLDSETOPTIONS
866 case PTRACE_OLDSETOPTIONS:
867#endif
868 case PTRACE_SETOPTIONS:
869 ret = ptrace_setoptions(child, data);
870 break;
871 case PTRACE_GETEVENTMSG:
872 ret = put_user(child->ptrace_message, datalp);
873 break;
874
875 case PTRACE_PEEKSIGINFO:
876 ret = ptrace_peek_siginfo(child, addr, data);
877 break;
878
879 case PTRACE_GETSIGINFO:
880 ret = ptrace_getsiginfo(child, &siginfo);
881 if (!ret)
882 ret = copy_siginfo_to_user(datavp, &siginfo);
883 break;
884
885 case PTRACE_SETSIGINFO:
886 if (copy_from_user(&siginfo, datavp, sizeof siginfo))
887 ret = -EFAULT;
888 else
889 ret = ptrace_setsiginfo(child, &siginfo);
890 break;
891
892 case PTRACE_GETSIGMASK:
893 if (addr != sizeof(sigset_t)) {
894 ret = -EINVAL;
895 break;
896 }
897
898 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
899 ret = -EFAULT;
900 else
901 ret = 0;
902
903 break;
904
905 case PTRACE_SETSIGMASK: {
906 sigset_t new_set;
907
908 if (addr != sizeof(sigset_t)) {
909 ret = -EINVAL;
910 break;
911 }
912
913 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
914 ret = -EFAULT;
915 break;
916 }
917
918 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
919
920
921
922
923
924
925 spin_lock_irq(&child->sighand->siglock);
926 child->blocked = new_set;
927 spin_unlock_irq(&child->sighand->siglock);
928
929 ret = 0;
930 break;
931 }
932
933 case PTRACE_INTERRUPT:
934
935
936
937
938
939
940
941
942
943
944 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
945 break;
946
947
948
949
950
951
952
953 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
954 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
955
956 unlock_task_sighand(child, &flags);
957 ret = 0;
958 break;
959
960 case PTRACE_LISTEN:
961
962
963
964
965
966
967
968
969 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
970 break;
971
972 si = child->last_siginfo;
973 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
974 child->jobctl |= JOBCTL_LISTENING;
975
976
977
978
979 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
980 ptrace_signal_wake_up(child, true);
981 ret = 0;
982 }
983 unlock_task_sighand(child, &flags);
984 break;
985
986 case PTRACE_DETACH:
987 ret = ptrace_detach(child, data);
988 break;
989
990#ifdef CONFIG_BINFMT_ELF_FDPIC
991 case PTRACE_GETFDPIC: {
992 struct mm_struct *mm = get_task_mm(child);
993 unsigned long tmp = 0;
994
995 ret = -ESRCH;
996 if (!mm)
997 break;
998
999 switch (addr) {
1000 case PTRACE_GETFDPIC_EXEC:
1001 tmp = mm->context.exec_fdpic_loadmap;
1002 break;
1003 case PTRACE_GETFDPIC_INTERP:
1004 tmp = mm->context.interp_fdpic_loadmap;
1005 break;
1006 default:
1007 break;
1008 }
1009 mmput(mm);
1010
1011 ret = put_user(tmp, datalp);
1012 break;
1013 }
1014#endif
1015
1016#ifdef PTRACE_SINGLESTEP
1017 case PTRACE_SINGLESTEP:
1018#endif
1019#ifdef PTRACE_SINGLEBLOCK
1020 case PTRACE_SINGLEBLOCK:
1021#endif
1022#ifdef PTRACE_SYSEMU
1023 case PTRACE_SYSEMU:
1024 case PTRACE_SYSEMU_SINGLESTEP:
1025#endif
1026 case PTRACE_SYSCALL:
1027 case PTRACE_CONT:
1028 return ptrace_resume(child, request, data);
1029
1030 case PTRACE_KILL:
1031 if (child->exit_state)
1032 return 0;
1033 return ptrace_resume(child, request, SIGKILL);
1034
1035#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1036 case PTRACE_GETREGSET:
1037 case PTRACE_SETREGSET: {
1038 struct iovec kiov;
1039 struct iovec __user *uiov = datavp;
1040
1041 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1042 return -EFAULT;
1043
1044 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1045 __get_user(kiov.iov_len, &uiov->iov_len))
1046 return -EFAULT;
1047
1048 ret = ptrace_regset(child, request, addr, &kiov);
1049 if (!ret)
1050 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1051 break;
1052 }
1053#endif
1054 default:
1055 break;
1056 }
1057
1058 return ret;
1059}
1060
1061static struct task_struct *ptrace_get_task_struct(pid_t pid)
1062{
1063 struct task_struct *child;
1064
1065 rcu_read_lock();
1066 child = find_task_by_vpid(pid);
1067 if (child)
1068 get_task_struct(child);
1069 rcu_read_unlock();
1070
1071 if (!child)
1072 return ERR_PTR(-ESRCH);
1073 return child;
1074}
1075
1076#ifndef arch_ptrace_attach
1077#define arch_ptrace_attach(child) do { } while (0)
1078#endif
1079
1080SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1081 unsigned long, data)
1082{
1083 struct task_struct *child;
1084 long ret;
1085
1086 if (request == PTRACE_TRACEME) {
1087 ret = ptrace_traceme();
1088 if (!ret)
1089 arch_ptrace_attach(current);
1090 goto out;
1091 }
1092
1093 child = ptrace_get_task_struct(pid);
1094 if (IS_ERR(child)) {
1095 ret = PTR_ERR(child);
1096 goto out;
1097 }
1098
1099 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1100 ret = ptrace_attach(child, request, addr, data);
1101
1102
1103
1104
1105 if (!ret)
1106 arch_ptrace_attach(child);
1107 goto out_put_task_struct;
1108 }
1109
1110 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1111 request == PTRACE_INTERRUPT);
1112 if (ret < 0)
1113 goto out_put_task_struct;
1114
1115 ret = arch_ptrace(child, request, addr, data);
1116 if (ret || request != PTRACE_DETACH)
1117 ptrace_unfreeze_traced(child);
1118
1119 out_put_task_struct:
1120 put_task_struct(child);
1121 out:
1122 return ret;
1123}
1124
1125int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1126 unsigned long data)
1127{
1128 unsigned long tmp;
1129 int copied;
1130
1131 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
1132 if (copied != sizeof(tmp))
1133 return -EIO;
1134 return put_user(tmp, (unsigned long __user *)data);
1135}
1136
1137int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1138 unsigned long data)
1139{
1140 int copied;
1141
1142 copied = access_process_vm(tsk, addr, &data, sizeof(data), FOLL_WRITE);
1143 return (copied == sizeof(data)) ? 0 : -EIO;
1144}
1145
1146#if defined CONFIG_COMPAT
1147#include <linux/compat.h>
1148
1149int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1150 compat_ulong_t addr, compat_ulong_t data)
1151{
1152 compat_ulong_t __user *datap = compat_ptr(data);
1153 compat_ulong_t word;
1154 siginfo_t siginfo;
1155 int ret;
1156
1157 switch (request) {
1158 case PTRACE_PEEKTEXT:
1159 case PTRACE_PEEKDATA:
1160 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
1161 if (ret != sizeof(word))
1162 ret = -EIO;
1163 else
1164 ret = put_user(word, datap);
1165 break;
1166
1167 case PTRACE_POKETEXT:
1168 case PTRACE_POKEDATA:
1169 ret = access_process_vm(child, addr, &data, sizeof(data), FOLL_WRITE);
1170 ret = (ret != sizeof(data) ? -EIO : 0);
1171 break;
1172
1173 case PTRACE_GETEVENTMSG:
1174 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1175 break;
1176
1177 case PTRACE_GETSIGINFO:
1178 ret = ptrace_getsiginfo(child, &siginfo);
1179 if (!ret)
1180 ret = copy_siginfo_to_user32(
1181 (struct compat_siginfo __user *) datap,
1182 &siginfo);
1183 break;
1184
1185 case PTRACE_SETSIGINFO:
1186 memset(&siginfo, 0, sizeof siginfo);
1187 if (copy_siginfo_from_user32(
1188 &siginfo, (struct compat_siginfo __user *) datap))
1189 ret = -EFAULT;
1190 else
1191 ret = ptrace_setsiginfo(child, &siginfo);
1192 break;
1193#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1194 case PTRACE_GETREGSET:
1195 case PTRACE_SETREGSET:
1196 {
1197 struct iovec kiov;
1198 struct compat_iovec __user *uiov =
1199 (struct compat_iovec __user *) datap;
1200 compat_uptr_t ptr;
1201 compat_size_t len;
1202
1203 if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1204 return -EFAULT;
1205
1206 if (__get_user(ptr, &uiov->iov_base) ||
1207 __get_user(len, &uiov->iov_len))
1208 return -EFAULT;
1209
1210 kiov.iov_base = compat_ptr(ptr);
1211 kiov.iov_len = len;
1212
1213 ret = ptrace_regset(child, request, addr, &kiov);
1214 if (!ret)
1215 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1216 break;
1217 }
1218#endif
1219
1220 default:
1221 ret = ptrace_request(child, request, addr, data);
1222 }
1223
1224 return ret;
1225}
1226
1227asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
1228 compat_long_t addr, compat_long_t data)
1229{
1230 struct task_struct *child;
1231 long ret;
1232
1233 if (request == PTRACE_TRACEME) {
1234 ret = ptrace_traceme();
1235 goto out;
1236 }
1237
1238 child = ptrace_get_task_struct(pid);
1239 if (IS_ERR(child)) {
1240 ret = PTR_ERR(child);
1241 goto out;
1242 }
1243
1244 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1245 ret = ptrace_attach(child, request, addr, data);
1246
1247
1248
1249
1250 if (!ret)
1251 arch_ptrace_attach(child);
1252 goto out_put_task_struct;
1253 }
1254
1255 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1256 request == PTRACE_INTERRUPT);
1257 if (!ret) {
1258 ret = compat_arch_ptrace(child, request, addr, data);
1259 if (ret || request != PTRACE_DETACH)
1260 ptrace_unfreeze_traced(child);
1261 }
1262
1263 out_put_task_struct:
1264 put_task_struct(child);
1265 out:
1266 return ret;
1267}
1268#endif
1269
1270#ifdef CONFIG_HAVE_HW_BREAKPOINT
1271int ptrace_get_breakpoints(struct task_struct *tsk)
1272{
1273 if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
1274 return 0;
1275
1276 return -1;
1277}
1278
1279void ptrace_put_breakpoints(struct task_struct *tsk)
1280{
1281 if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
1282 flush_ptrace_hw_breakpoint(tsk);
1283}
1284#endif
1285