linux/kernel/ptrace.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/ptrace.c
   3 *
   4 * (C) Copyright 1999 Linus Torvalds
   5 *
   6 * Common interfaces for "ptrace()" which we do not want
   7 * to continually duplicate across every architecture.
   8 */
   9
  10#include <linux/capability.h>
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/errno.h>
  14#include <linux/mm.h>
  15#include <linux/highmem.h>
  16#include <linux/pagemap.h>
  17#include <linux/ptrace.h>
  18#include <linux/security.h>
  19#include <linux/signal.h>
  20#include <linux/uio.h>
  21#include <linux/audit.h>
  22#include <linux/pid_namespace.h>
  23#include <linux/syscalls.h>
  24#include <linux/uaccess.h>
  25#include <linux/regset.h>
  26#include <linux/hw_breakpoint.h>
  27#include <linux/cn_proc.h>
  28#include <linux/compat.h>
  29
  30/*
  31 * Access another process' address space via ptrace.
  32 * Source/target buffer must be kernel space,
  33 * Do not walk the page table directly, use get_user_pages
  34 */
  35int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
  36                     void *buf, int len, unsigned int gup_flags)
  37{
  38        struct mm_struct *mm;
  39        int ret;
  40
  41        mm = get_task_mm(tsk);
  42        if (!mm)
  43                return 0;
  44
  45        if (!tsk->ptrace ||
  46            (current != tsk->parent) ||
  47            ((get_dumpable(mm) != SUID_DUMP_USER) &&
  48             !ptracer_capable(tsk, mm->user_ns))) {
  49                mmput(mm);
  50                return 0;
  51        }
  52
  53        ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
  54        mmput(mm);
  55
  56        return ret;
  57}
  58
  59
  60/*
  61 * ptrace a task: make the debugger its new parent and
  62 * move it to the ptrace list.
  63 *
  64 * Must be called with the tasklist lock write-held.
  65 */
  66void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  67{
  68        BUG_ON(!list_empty(&child->ptrace_entry));
  69        list_add(&child->ptrace_entry, &new_parent->ptraced);
  70        child->parent = new_parent;
  71        rcu_read_lock();
  72        child->ptracer_cred = get_cred(__task_cred(new_parent));
  73        rcu_read_unlock();
  74}
  75
  76/**
  77 * __ptrace_unlink - unlink ptracee and restore its execution state
  78 * @child: ptracee to be unlinked
  79 *
  80 * Remove @child from the ptrace list, move it back to the original parent,
  81 * and restore the execution state so that it conforms to the group stop
  82 * state.
  83 *
  84 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
  85 * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
  86 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
  87 * If the ptracer is exiting, the ptracee can be in any state.
  88 *
  89 * After detach, the ptracee should be in a state which conforms to the
  90 * group stop.  If the group is stopped or in the process of stopping, the
  91 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
  92 * up from TASK_TRACED.
  93 *
  94 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
  95 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
  96 * to but in the opposite direction of what happens while attaching to a
  97 * stopped task.  However, in this direction, the intermediate RUNNING
  98 * state is not hidden even from the current ptracer and if it immediately
  99 * re-attaches and performs a WNOHANG wait(2), it may fail.
 100 *
 101 * CONTEXT:
 102 * write_lock_irq(tasklist_lock)
 103 */
 104void __ptrace_unlink(struct task_struct *child)
 105{
 106        const struct cred *old_cred;
 107        BUG_ON(!child->ptrace);
 108
 109        clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 110
 111        child->parent = child->real_parent;
 112        list_del_init(&child->ptrace_entry);
 113        old_cred = child->ptracer_cred;
 114        child->ptracer_cred = NULL;
 115        put_cred(old_cred);
 116
 117        spin_lock(&child->sighand->siglock);
 118        child->ptrace = 0;
 119        /*
 120         * Clear all pending traps and TRAPPING.  TRAPPING should be
 121         * cleared regardless of JOBCTL_STOP_PENDING.  Do it explicitly.
 122         */
 123        task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
 124        task_clear_jobctl_trapping(child);
 125
 126        /*
 127         * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
 128         * @child isn't dead.
 129         */
 130        if (!(child->flags & PF_EXITING) &&
 131            (child->signal->flags & SIGNAL_STOP_STOPPED ||
 132             child->signal->group_stop_count)) {
 133                child->jobctl |= JOBCTL_STOP_PENDING;
 134
 135                /*
 136                 * This is only possible if this thread was cloned by the
 137                 * traced task running in the stopped group, set the signal
 138                 * for the future reports.
 139                 * FIXME: we should change ptrace_init_task() to handle this
 140                 * case.
 141                 */
 142                if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
 143                        child->jobctl |= SIGSTOP;
 144        }
 145
 146        /*
 147         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
 148         * @child in the butt.  Note that @resume should be used iff @child
 149         * is in TASK_TRACED; otherwise, we might unduly disrupt
 150         * TASK_KILLABLE sleeps.
 151         */
 152        if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
 153                ptrace_signal_wake_up(child, true);
 154
 155        spin_unlock(&child->sighand->siglock);
 156}
 157
 158/* Ensure that nothing can wake it up, even SIGKILL */
 159static bool ptrace_freeze_traced(struct task_struct *task)
 160{
 161        bool ret = false;
 162
 163        /* Lockless, nobody but us can set this flag */
 164        if (task->jobctl & JOBCTL_LISTENING)
 165                return ret;
 166
 167        spin_lock_irq(&task->sighand->siglock);
 168        if (task_is_traced(task) && !__fatal_signal_pending(task)) {
 169                task->state = __TASK_TRACED;
 170                ret = true;
 171        }
 172        spin_unlock_irq(&task->sighand->siglock);
 173
 174        return ret;
 175}
 176
 177static void ptrace_unfreeze_traced(struct task_struct *task)
 178{
 179        if (task->state != __TASK_TRACED)
 180                return;
 181
 182        WARN_ON(!task->ptrace || task->parent != current);
 183
 184        spin_lock_irq(&task->sighand->siglock);
 185        if (__fatal_signal_pending(task))
 186                wake_up_state(task, __TASK_TRACED);
 187        else
 188                task->state = TASK_TRACED;
 189        spin_unlock_irq(&task->sighand->siglock);
 190}
 191
 192/**
 193 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
 194 * @child: ptracee to check for
 195 * @ignore_state: don't check whether @child is currently %TASK_TRACED
 196 *
 197 * Check whether @child is being ptraced by %current and ready for further
 198 * ptrace operations.  If @ignore_state is %false, @child also should be in
 199 * %TASK_TRACED state and on return the child is guaranteed to be traced
 200 * and not executing.  If @ignore_state is %true, @child can be in any
 201 * state.
 202 *
 203 * CONTEXT:
 204 * Grabs and releases tasklist_lock and @child->sighand->siglock.
 205 *
 206 * RETURNS:
 207 * 0 on success, -ESRCH if %child is not ready.
 208 */
 209static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 210{
 211        int ret = -ESRCH;
 212
 213        /*
 214         * We take the read lock around doing both checks to close a
 215         * possible race where someone else was tracing our child and
 216         * detached between these two checks.  After this locked check,
 217         * we are sure that this is our traced child and that can only
 218         * be changed by us so it's not changing right after this.
 219         */
 220        read_lock(&tasklist_lock);
 221        if (child->ptrace && child->parent == current) {
 222                WARN_ON(child->state == __TASK_TRACED);
 223                /*
 224                 * child->sighand can't be NULL, release_task()
 225                 * does ptrace_unlink() before __exit_signal().
 226                 */
 227                if (ignore_state || ptrace_freeze_traced(child))
 228                        ret = 0;
 229        }
 230        read_unlock(&tasklist_lock);
 231
 232        if (!ret && !ignore_state) {
 233                if (!wait_task_inactive(child, __TASK_TRACED)) {
 234                        /*
 235                         * This can only happen if may_ptrace_stop() fails and
 236                         * ptrace_stop() changes ->state back to TASK_RUNNING,
 237                         * so we should not worry about leaking __TASK_TRACED.
 238                         */
 239                        WARN_ON(child->state == __TASK_TRACED);
 240                        ret = -ESRCH;
 241                }
 242        }
 243
 244        return ret;
 245}
 246
 247static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 248{
 249        if (mode & PTRACE_MODE_NOAUDIT)
 250                return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
 251        else
 252                return has_ns_capability(current, ns, CAP_SYS_PTRACE);
 253}
 254
 255/* Returns 0 on success, -errno on denial. */
 256static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 257{
 258        const struct cred *cred = current_cred(), *tcred;
 259        struct mm_struct *mm;
 260        kuid_t caller_uid;
 261        kgid_t caller_gid;
 262
 263        if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
 264                WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
 265                return -EPERM;
 266        }
 267
 268        /* May we inspect the given task?
 269         * This check is used both for attaching with ptrace
 270         * and for allowing access to sensitive information in /proc.
 271         *
 272         * ptrace_attach denies several cases that /proc allows
 273         * because setting up the necessary parent/child relationship
 274         * or halting the specified task is impossible.
 275         */
 276
 277        /* Don't let security modules deny introspection */
 278        if (same_thread_group(task, current))
 279                return 0;
 280        rcu_read_lock();
 281        if (mode & PTRACE_MODE_FSCREDS) {
 282                caller_uid = cred->fsuid;
 283                caller_gid = cred->fsgid;
 284        } else {
 285                /*
 286                 * Using the euid would make more sense here, but something
 287                 * in userland might rely on the old behavior, and this
 288                 * shouldn't be a security problem since
 289                 * PTRACE_MODE_REALCREDS implies that the caller explicitly
 290                 * used a syscall that requests access to another process
 291                 * (and not a filesystem syscall to procfs).
 292                 */
 293                caller_uid = cred->uid;
 294                caller_gid = cred->gid;
 295        }
 296        tcred = __task_cred(task);
 297        if (uid_eq(caller_uid, tcred->euid) &&
 298            uid_eq(caller_uid, tcred->suid) &&
 299            uid_eq(caller_uid, tcred->uid)  &&
 300            gid_eq(caller_gid, tcred->egid) &&
 301            gid_eq(caller_gid, tcred->sgid) &&
 302            gid_eq(caller_gid, tcred->gid))
 303                goto ok;
 304        if (ptrace_has_cap(tcred->user_ns, mode))
 305                goto ok;
 306        rcu_read_unlock();
 307        return -EPERM;
 308ok:
 309        rcu_read_unlock();
 310        mm = task->mm;
 311        if (mm &&
 312            ((get_dumpable(mm) != SUID_DUMP_USER) &&
 313             !ptrace_has_cap(mm->user_ns, mode)))
 314            return -EPERM;
 315
 316        return security_ptrace_access_check(task, mode);
 317}
 318
 319bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 320{
 321        int err;
 322        task_lock(task);
 323        err = __ptrace_may_access(task, mode);
 324        task_unlock(task);
 325        return !err;
 326}
 327
 328static int ptrace_attach(struct task_struct *task, long request,
 329                         unsigned long addr,
 330                         unsigned long flags)
 331{
 332        bool seize = (request == PTRACE_SEIZE);
 333        int retval;
 334
 335        retval = -EIO;
 336        if (seize) {
 337                if (addr != 0)
 338                        goto out;
 339                if (flags & ~(unsigned long)PTRACE_O_MASK)
 340                        goto out;
 341                flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
 342        } else {
 343                flags = PT_PTRACED;
 344        }
 345
 346        audit_ptrace(task);
 347
 348        retval = -EPERM;
 349        if (unlikely(task->flags & PF_KTHREAD))
 350                goto out;
 351        if (same_thread_group(task, current))
 352                goto out;
 353
 354        /*
 355         * Protect exec's credential calculations against our interference;
 356         * SUID, SGID and LSM creds get determined differently
 357         * under ptrace.
 358         */
 359        retval = -ERESTARTNOINTR;
 360        if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
 361                goto out;
 362
 363        task_lock(task);
 364        retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
 365        task_unlock(task);
 366        if (retval)
 367                goto unlock_creds;
 368
 369        write_lock_irq(&tasklist_lock);
 370        retval = -EPERM;
 371        if (unlikely(task->exit_state))
 372                goto unlock_tasklist;
 373        if (task->ptrace)
 374                goto unlock_tasklist;
 375
 376        if (seize)
 377                flags |= PT_SEIZED;
 378        task->ptrace = flags;
 379
 380        __ptrace_link(task, current);
 381
 382        /* SEIZE doesn't trap tracee on attach */
 383        if (!seize)
 384                send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 385
 386        spin_lock(&task->sighand->siglock);
 387
 388        /*
 389         * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
 390         * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
 391         * will be cleared if the child completes the transition or any
 392         * event which clears the group stop states happens.  We'll wait
 393         * for the transition to complete before returning from this
 394         * function.
 395         *
 396         * This hides STOPPED -> RUNNING -> TRACED transition from the
 397         * attaching thread but a different thread in the same group can
 398         * still observe the transient RUNNING state.  IOW, if another
 399         * thread's WNOHANG wait(2) on the stopped tracee races against
 400         * ATTACH, the wait(2) may fail due to the transient RUNNING.
 401         *
 402         * The following task_is_stopped() test is safe as both transitions
 403         * in and out of STOPPED are protected by siglock.
 404         */
 405        if (task_is_stopped(task) &&
 406            task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
 407                signal_wake_up_state(task, __TASK_STOPPED);
 408
 409        spin_unlock(&task->sighand->siglock);
 410
 411        retval = 0;
 412unlock_tasklist:
 413        write_unlock_irq(&tasklist_lock);
 414unlock_creds:
 415        mutex_unlock(&task->signal->cred_guard_mutex);
 416out:
 417        if (!retval) {
 418                /*
 419                 * We do not bother to change retval or clear JOBCTL_TRAPPING
 420                 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
 421                 * not return to user-mode, it will exit and clear this bit in
 422                 * __ptrace_unlink() if it wasn't already cleared by the tracee;
 423                 * and until then nobody can ptrace this task.
 424                 */
 425                wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
 426                proc_ptrace_connector(task, PTRACE_ATTACH);
 427        }
 428
 429        return retval;
 430}
 431
 432/**
 433 * ptrace_traceme  --  helper for PTRACE_TRACEME
 434 *
 435 * Performs checks and sets PT_PTRACED.
 436 * Should be used by all ptrace implementations for PTRACE_TRACEME.
 437 */
 438static int ptrace_traceme(void)
 439{
 440        int ret = -EPERM;
 441
 442        write_lock_irq(&tasklist_lock);
 443        /* Are we already being traced? */
 444        if (!current->ptrace) {
 445                ret = security_ptrace_traceme(current->parent);
 446                /*
 447                 * Check PF_EXITING to ensure ->real_parent has not passed
 448                 * exit_ptrace(). Otherwise we don't report the error but
 449                 * pretend ->real_parent untraces us right after return.
 450                 */
 451                if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 452                        current->ptrace = PT_PTRACED;
 453                        __ptrace_link(current, current->real_parent);
 454                }
 455        }
 456        write_unlock_irq(&tasklist_lock);
 457
 458        return ret;
 459}
 460
 461/*
 462 * Called with irqs disabled, returns true if childs should reap themselves.
 463 */
 464static int ignoring_children(struct sighand_struct *sigh)
 465{
 466        int ret;
 467        spin_lock(&sigh->siglock);
 468        ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
 469              (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
 470        spin_unlock(&sigh->siglock);
 471        return ret;
 472}
 473
 474/*
 475 * Called with tasklist_lock held for writing.
 476 * Unlink a traced task, and clean it up if it was a traced zombie.
 477 * Return true if it needs to be reaped with release_task().
 478 * (We can't call release_task() here because we already hold tasklist_lock.)
 479 *
 480 * If it's a zombie, our attachedness prevented normal parent notification
 481 * or self-reaping.  Do notification now if it would have happened earlier.
 482 * If it should reap itself, return true.
 483 *
 484 * If it's our own child, there is no notification to do. But if our normal
 485 * children self-reap, then this child was prevented by ptrace and we must
 486 * reap it now, in that case we must also wake up sub-threads sleeping in
 487 * do_wait().
 488 */
 489static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
 490{
 491        bool dead;
 492
 493        __ptrace_unlink(p);
 494
 495        if (p->exit_state != EXIT_ZOMBIE)
 496                return false;
 497
 498        dead = !thread_group_leader(p);
 499
 500        if (!dead && thread_group_empty(p)) {
 501                if (!same_thread_group(p->real_parent, tracer))
 502                        dead = do_notify_parent(p, p->exit_signal);
 503                else if (ignoring_children(tracer->sighand)) {
 504                        __wake_up_parent(p, tracer);
 505                        dead = true;
 506                }
 507        }
 508        /* Mark it as in the process of being reaped. */
 509        if (dead)
 510                p->exit_state = EXIT_DEAD;
 511        return dead;
 512}
 513
 514static int ptrace_detach(struct task_struct *child, unsigned int data)
 515{
 516        if (!valid_signal(data))
 517                return -EIO;
 518
 519        /* Architecture-specific hardware disable .. */
 520        ptrace_disable(child);
 521
 522        write_lock_irq(&tasklist_lock);
 523        /*
 524         * We rely on ptrace_freeze_traced(). It can't be killed and
 525         * untraced by another thread, it can't be a zombie.
 526         */
 527        WARN_ON(!child->ptrace || child->exit_state);
 528        /*
 529         * tasklist_lock avoids the race with wait_task_stopped(), see
 530         * the comment in ptrace_resume().
 531         */
 532        child->exit_code = data;
 533        __ptrace_detach(current, child);
 534        write_unlock_irq(&tasklist_lock);
 535
 536        proc_ptrace_connector(child, PTRACE_DETACH);
 537
 538        return 0;
 539}
 540
 541/*
 542 * Detach all tasks we were using ptrace on. Called with tasklist held
 543 * for writing.
 544 */
 545void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
 546{
 547        struct task_struct *p, *n;
 548
 549        list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
 550                if (unlikely(p->ptrace & PT_EXITKILL))
 551                        send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
 552
 553                if (__ptrace_detach(tracer, p))
 554                        list_add(&p->ptrace_entry, dead);
 555        }
 556}
 557
 558int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 559{
 560        int copied = 0;
 561
 562        while (len > 0) {
 563                char buf[128];
 564                int this_len, retval;
 565
 566                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 567                retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
 568
 569                if (!retval) {
 570                        if (copied)
 571                                break;
 572                        return -EIO;
 573                }
 574                if (copy_to_user(dst, buf, retval))
 575                        return -EFAULT;
 576                copied += retval;
 577                src += retval;
 578                dst += retval;
 579                len -= retval;
 580        }
 581        return copied;
 582}
 583
 584int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
 585{
 586        int copied = 0;
 587
 588        while (len > 0) {
 589                char buf[128];
 590                int this_len, retval;
 591
 592                this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 593                if (copy_from_user(buf, src, this_len))
 594                        return -EFAULT;
 595                retval = ptrace_access_vm(tsk, dst, buf, this_len,
 596                                FOLL_FORCE | FOLL_WRITE);
 597                if (!retval) {
 598                        if (copied)
 599                                break;
 600                        return -EIO;
 601                }
 602                copied += retval;
 603                src += retval;
 604                dst += retval;
 605                len -= retval;
 606        }
 607        return copied;
 608}
 609
 610static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 611{
 612        unsigned flags;
 613
 614        if (data & ~(unsigned long)PTRACE_O_MASK)
 615                return -EINVAL;
 616
 617        if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
 618                if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
 619                    !IS_ENABLED(CONFIG_SECCOMP))
 620                        return -EINVAL;
 621
 622                if (!capable(CAP_SYS_ADMIN))
 623                        return -EPERM;
 624
 625                if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
 626                    current->ptrace & PT_SUSPEND_SECCOMP)
 627                        return -EPERM;
 628        }
 629
 630        /* Avoid intermediate state when all opts are cleared */
 631        flags = child->ptrace;
 632        flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
 633        flags |= (data << PT_OPT_FLAG_SHIFT);
 634        child->ptrace = flags;
 635
 636        return 0;
 637}
 638
 639static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
 640{
 641        unsigned long flags;
 642        int error = -ESRCH;
 643
 644        if (lock_task_sighand(child, &flags)) {
 645                error = -EINVAL;
 646                if (likely(child->last_siginfo != NULL)) {
 647                        *info = *child->last_siginfo;
 648                        error = 0;
 649                }
 650                unlock_task_sighand(child, &flags);
 651        }
 652        return error;
 653}
 654
 655static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
 656{
 657        unsigned long flags;
 658        int error = -ESRCH;
 659
 660        if (lock_task_sighand(child, &flags)) {
 661                error = -EINVAL;
 662                if (likely(child->last_siginfo != NULL)) {
 663                        *child->last_siginfo = *info;
 664                        error = 0;
 665                }
 666                unlock_task_sighand(child, &flags);
 667        }
 668        return error;
 669}
 670
 671static int ptrace_peek_siginfo(struct task_struct *child,
 672                                unsigned long addr,
 673                                unsigned long data)
 674{
 675        struct ptrace_peeksiginfo_args arg;
 676        struct sigpending *pending;
 677        struct sigqueue *q;
 678        int ret, i;
 679
 680        ret = copy_from_user(&arg, (void __user *) addr,
 681                                sizeof(struct ptrace_peeksiginfo_args));
 682        if (ret)
 683                return -EFAULT;
 684
 685        if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
 686                return -EINVAL; /* unknown flags */
 687
 688        if (arg.nr < 0)
 689                return -EINVAL;
 690
 691        if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
 692                pending = &child->signal->shared_pending;
 693        else
 694                pending = &child->pending;
 695
 696        for (i = 0; i < arg.nr; ) {
 697                siginfo_t info;
 698                s32 off = arg.off + i;
 699
 700                spin_lock_irq(&child->sighand->siglock);
 701                list_for_each_entry(q, &pending->list, list) {
 702                        if (!off--) {
 703                                copy_siginfo(&info, &q->info);
 704                                break;
 705                        }
 706                }
 707                spin_unlock_irq(&child->sighand->siglock);
 708
 709                if (off >= 0) /* beyond the end of the list */
 710                        break;
 711
 712#ifdef CONFIG_COMPAT
 713                if (unlikely(in_compat_syscall())) {
 714                        compat_siginfo_t __user *uinfo = compat_ptr(data);
 715
 716                        if (copy_siginfo_to_user32(uinfo, &info) ||
 717                            __put_user(info.si_code, &uinfo->si_code)) {
 718                                ret = -EFAULT;
 719                                break;
 720                        }
 721
 722                } else
 723#endif
 724                {
 725                        siginfo_t __user *uinfo = (siginfo_t __user *) data;
 726
 727                        if (copy_siginfo_to_user(uinfo, &info) ||
 728                            __put_user(info.si_code, &uinfo->si_code)) {
 729                                ret = -EFAULT;
 730                                break;
 731                        }
 732                }
 733
 734                data += sizeof(siginfo_t);
 735                i++;
 736
 737                if (signal_pending(current))
 738                        break;
 739
 740                cond_resched();
 741        }
 742
 743        if (i > 0)
 744                return i;
 745
 746        return ret;
 747}
 748
 749#ifdef PTRACE_SINGLESTEP
 750#define is_singlestep(request)          ((request) == PTRACE_SINGLESTEP)
 751#else
 752#define is_singlestep(request)          0
 753#endif
 754
 755#ifdef PTRACE_SINGLEBLOCK
 756#define is_singleblock(request)         ((request) == PTRACE_SINGLEBLOCK)
 757#else
 758#define is_singleblock(request)         0
 759#endif
 760
 761#ifdef PTRACE_SYSEMU
 762#define is_sysemu_singlestep(request)   ((request) == PTRACE_SYSEMU_SINGLESTEP)
 763#else
 764#define is_sysemu_singlestep(request)   0
 765#endif
 766
 767static int ptrace_resume(struct task_struct *child, long request,
 768                         unsigned long data)
 769{
 770        bool need_siglock;
 771
 772        if (!valid_signal(data))
 773                return -EIO;
 774
 775        if (request == PTRACE_SYSCALL)
 776                set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 777        else
 778                clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 779
 780#ifdef TIF_SYSCALL_EMU
 781        if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
 782                set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 783        else
 784                clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
 785#endif
 786
 787        if (is_singleblock(request)) {
 788                if (unlikely(!arch_has_block_step()))
 789                        return -EIO;
 790                user_enable_block_step(child);
 791        } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 792                if (unlikely(!arch_has_single_step()))
 793                        return -EIO;
 794                user_enable_single_step(child);
 795        } else {
 796                user_disable_single_step(child);
 797        }
 798
 799        /*
 800         * Change ->exit_code and ->state under siglock to avoid the race
 801         * with wait_task_stopped() in between; a non-zero ->exit_code will
 802         * wrongly look like another report from tracee.
 803         *
 804         * Note that we need siglock even if ->exit_code == data and/or this
 805         * status was not reported yet, the new status must not be cleared by
 806         * wait_task_stopped() after resume.
 807         *
 808         * If data == 0 we do not care if wait_task_stopped() reports the old
 809         * status and clears the code too; this can't race with the tracee, it
 810         * takes siglock after resume.
 811         */
 812        need_siglock = data && !thread_group_empty(current);
 813        if (need_siglock)
 814                spin_lock_irq(&child->sighand->siglock);
 815        child->exit_code = data;
 816        wake_up_state(child, __TASK_TRACED);
 817        if (need_siglock)
 818                spin_unlock_irq(&child->sighand->siglock);
 819
 820        return 0;
 821}
 822
 823#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
 824
 825static const struct user_regset *
 826find_regset(const struct user_regset_view *view, unsigned int type)
 827{
 828        const struct user_regset *regset;
 829        int n;
 830
 831        for (n = 0; n < view->n; ++n) {
 832                regset = view->regsets + n;
 833                if (regset->core_note_type == type)
 834                        return regset;
 835        }
 836
 837        return NULL;
 838}
 839
 840static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
 841                         struct iovec *kiov)
 842{
 843        const struct user_regset_view *view = task_user_regset_view(task);
 844        const struct user_regset *regset = find_regset(view, type);
 845        int regset_no;
 846
 847        if (!regset || (kiov->iov_len % regset->size) != 0)
 848                return -EINVAL;
 849
 850        regset_no = regset - view->regsets;
 851        kiov->iov_len = min(kiov->iov_len,
 852                            (__kernel_size_t) (regset->n * regset->size));
 853
 854        if (req == PTRACE_GETREGSET)
 855                return copy_regset_to_user(task, view, regset_no, 0,
 856                                           kiov->iov_len, kiov->iov_base);
 857        else
 858                return copy_regset_from_user(task, view, regset_no, 0,
 859                                             kiov->iov_len, kiov->iov_base);
 860}
 861
 862/*
 863 * This is declared in linux/regset.h and defined in machine-dependent
 864 * code.  We put the export here, near the primary machine-neutral use,
 865 * to ensure no machine forgets it.
 866 */
 867EXPORT_SYMBOL_GPL(task_user_regset_view);
 868#endif
 869
 870int ptrace_request(struct task_struct *child, long request,
 871                   unsigned long addr, unsigned long data)
 872{
 873        bool seized = child->ptrace & PT_SEIZED;
 874        int ret = -EIO;
 875        siginfo_t siginfo, *si;
 876        void __user *datavp = (void __user *) data;
 877        unsigned long __user *datalp = datavp;
 878        unsigned long flags;
 879
 880        switch (request) {
 881        case PTRACE_PEEKTEXT:
 882        case PTRACE_PEEKDATA:
 883                return generic_ptrace_peekdata(child, addr, data);
 884        case PTRACE_POKETEXT:
 885        case PTRACE_POKEDATA:
 886                return generic_ptrace_pokedata(child, addr, data);
 887
 888#ifdef PTRACE_OLDSETOPTIONS
 889        case PTRACE_OLDSETOPTIONS:
 890#endif
 891        case PTRACE_SETOPTIONS:
 892                ret = ptrace_setoptions(child, data);
 893                break;
 894        case PTRACE_GETEVENTMSG:
 895                ret = put_user(child->ptrace_message, datalp);
 896                break;
 897
 898        case PTRACE_PEEKSIGINFO:
 899                ret = ptrace_peek_siginfo(child, addr, data);
 900                break;
 901
 902        case PTRACE_GETSIGINFO:
 903                ret = ptrace_getsiginfo(child, &siginfo);
 904                if (!ret)
 905                        ret = copy_siginfo_to_user(datavp, &siginfo);
 906                break;
 907
 908        case PTRACE_SETSIGINFO:
 909                if (copy_from_user(&siginfo, datavp, sizeof siginfo))
 910                        ret = -EFAULT;
 911                else
 912                        ret = ptrace_setsiginfo(child, &siginfo);
 913                break;
 914
 915        case PTRACE_GETSIGMASK:
 916                if (addr != sizeof(sigset_t)) {
 917                        ret = -EINVAL;
 918                        break;
 919                }
 920
 921                if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
 922                        ret = -EFAULT;
 923                else
 924                        ret = 0;
 925
 926                break;
 927
 928        case PTRACE_SETSIGMASK: {
 929                sigset_t new_set;
 930
 931                if (addr != sizeof(sigset_t)) {
 932                        ret = -EINVAL;
 933                        break;
 934                }
 935
 936                if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
 937                        ret = -EFAULT;
 938                        break;
 939                }
 940
 941                sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
 942
 943                /*
 944                 * Every thread does recalc_sigpending() after resume, so
 945                 * retarget_shared_pending() and recalc_sigpending() are not
 946                 * called here.
 947                 */
 948                spin_lock_irq(&child->sighand->siglock);
 949                child->blocked = new_set;
 950                spin_unlock_irq(&child->sighand->siglock);
 951
 952                ret = 0;
 953                break;
 954        }
 955
 956        case PTRACE_INTERRUPT:
 957                /*
 958                 * Stop tracee without any side-effect on signal or job
 959                 * control.  At least one trap is guaranteed to happen
 960                 * after this request.  If @child is already trapped, the
 961                 * current trap is not disturbed and another trap will
 962                 * happen after the current trap is ended with PTRACE_CONT.
 963                 *
 964                 * The actual trap might not be PTRACE_EVENT_STOP trap but
 965                 * the pending condition is cleared regardless.
 966                 */
 967                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 968                        break;
 969
 970                /*
 971                 * INTERRUPT doesn't disturb existing trap sans one
 972                 * exception.  If ptracer issued LISTEN for the current
 973                 * STOP, this INTERRUPT should clear LISTEN and re-trap
 974                 * tracee into STOP.
 975                 */
 976                if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
 977                        ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 978
 979                unlock_task_sighand(child, &flags);
 980                ret = 0;
 981                break;
 982
 983        case PTRACE_LISTEN:
 984                /*
 985                 * Listen for events.  Tracee must be in STOP.  It's not
 986                 * resumed per-se but is not considered to be in TRACED by
 987                 * wait(2) or ptrace(2).  If an async event (e.g. group
 988                 * stop state change) happens, tracee will enter STOP trap
 989                 * again.  Alternatively, ptracer can issue INTERRUPT to
 990                 * finish listening and re-trap tracee into STOP.
 991                 */
 992                if (unlikely(!seized || !lock_task_sighand(child, &flags)))
 993                        break;
 994
 995                si = child->last_siginfo;
 996                if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
 997                        child->jobctl |= JOBCTL_LISTENING;
 998                        /*
 999                         * If NOTIFY is set, it means event happened between
1000                         * start of this trap and now.  Trigger re-trap.
1001                         */
1002                        if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1003                                ptrace_signal_wake_up(child, true);
1004                        ret = 0;
1005                }
1006                unlock_task_sighand(child, &flags);
1007                break;
1008
1009        case PTRACE_DETACH:      /* detach a process that was attached. */
1010                ret = ptrace_detach(child, data);
1011                break;
1012
1013#ifdef CONFIG_BINFMT_ELF_FDPIC
1014        case PTRACE_GETFDPIC: {
1015                struct mm_struct *mm = get_task_mm(child);
1016                unsigned long tmp = 0;
1017
1018                ret = -ESRCH;
1019                if (!mm)
1020                        break;
1021
1022                switch (addr) {
1023                case PTRACE_GETFDPIC_EXEC:
1024                        tmp = mm->context.exec_fdpic_loadmap;
1025                        break;
1026                case PTRACE_GETFDPIC_INTERP:
1027                        tmp = mm->context.interp_fdpic_loadmap;
1028                        break;
1029                default:
1030                        break;
1031                }
1032                mmput(mm);
1033
1034                ret = put_user(tmp, datalp);
1035                break;
1036        }
1037#endif
1038
1039#ifdef PTRACE_SINGLESTEP
1040        case PTRACE_SINGLESTEP:
1041#endif
1042#ifdef PTRACE_SINGLEBLOCK
1043        case PTRACE_SINGLEBLOCK:
1044#endif
1045#ifdef PTRACE_SYSEMU
1046        case PTRACE_SYSEMU:
1047        case PTRACE_SYSEMU_SINGLESTEP:
1048#endif
1049        case PTRACE_SYSCALL:
1050        case PTRACE_CONT:
1051                return ptrace_resume(child, request, data);
1052
1053        case PTRACE_KILL:
1054                if (child->exit_state)  /* already dead */
1055                        return 0;
1056                return ptrace_resume(child, request, SIGKILL);
1057
1058#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1059        case PTRACE_GETREGSET:
1060        case PTRACE_SETREGSET: {
1061                struct iovec kiov;
1062                struct iovec __user *uiov = datavp;
1063
1064                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1065                        return -EFAULT;
1066
1067                if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1068                    __get_user(kiov.iov_len, &uiov->iov_len))
1069                        return -EFAULT;
1070
1071                ret = ptrace_regset(child, request, addr, &kiov);
1072                if (!ret)
1073                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
1074                break;
1075        }
1076#endif
1077
1078        case PTRACE_SECCOMP_GET_FILTER:
1079                ret = seccomp_get_filter(child, addr, datavp);
1080                break;
1081
1082        default:
1083                break;
1084        }
1085
1086        return ret;
1087}
1088
1089static struct task_struct *ptrace_get_task_struct(pid_t pid)
1090{
1091        struct task_struct *child;
1092
1093        rcu_read_lock();
1094        child = find_task_by_vpid(pid);
1095        if (child)
1096                get_task_struct(child);
1097        rcu_read_unlock();
1098
1099        if (!child)
1100                return ERR_PTR(-ESRCH);
1101        return child;
1102}
1103
1104#ifndef arch_ptrace_attach
1105#define arch_ptrace_attach(child)       do { } while (0)
1106#endif
1107
1108SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1109                unsigned long, data)
1110{
1111        struct task_struct *child;
1112        long ret;
1113
1114        if (request == PTRACE_TRACEME) {
1115                ret = ptrace_traceme();
1116                if (!ret)
1117                        arch_ptrace_attach(current);
1118                goto out;
1119        }
1120
1121        child = ptrace_get_task_struct(pid);
1122        if (IS_ERR(child)) {
1123                ret = PTR_ERR(child);
1124                goto out;
1125        }
1126
1127        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1128                ret = ptrace_attach(child, request, addr, data);
1129                /*
1130                 * Some architectures need to do book-keeping after
1131                 * a ptrace attach.
1132                 */
1133                if (!ret)
1134                        arch_ptrace_attach(child);
1135                goto out_put_task_struct;
1136        }
1137
1138        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1139                                  request == PTRACE_INTERRUPT);
1140        if (ret < 0)
1141                goto out_put_task_struct;
1142
1143        ret = arch_ptrace(child, request, addr, data);
1144        if (ret || request != PTRACE_DETACH)
1145                ptrace_unfreeze_traced(child);
1146
1147 out_put_task_struct:
1148        put_task_struct(child);
1149 out:
1150        return ret;
1151}
1152
1153int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1154                            unsigned long data)
1155{
1156        unsigned long tmp;
1157        int copied;
1158
1159        copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1160        if (copied != sizeof(tmp))
1161                return -EIO;
1162        return put_user(tmp, (unsigned long __user *)data);
1163}
1164
1165int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1166                            unsigned long data)
1167{
1168        int copied;
1169
1170        copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1171                        FOLL_FORCE | FOLL_WRITE);
1172        return (copied == sizeof(data)) ? 0 : -EIO;
1173}
1174
1175#if defined CONFIG_COMPAT
1176
1177int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1178                          compat_ulong_t addr, compat_ulong_t data)
1179{
1180        compat_ulong_t __user *datap = compat_ptr(data);
1181        compat_ulong_t word;
1182        siginfo_t siginfo;
1183        int ret;
1184
1185        switch (request) {
1186        case PTRACE_PEEKTEXT:
1187        case PTRACE_PEEKDATA:
1188                ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1189                                FOLL_FORCE);
1190                if (ret != sizeof(word))
1191                        ret = -EIO;
1192                else
1193                        ret = put_user(word, datap);
1194                break;
1195
1196        case PTRACE_POKETEXT:
1197        case PTRACE_POKEDATA:
1198                ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1199                                FOLL_FORCE | FOLL_WRITE);
1200                ret = (ret != sizeof(data) ? -EIO : 0);
1201                break;
1202
1203        case PTRACE_GETEVENTMSG:
1204                ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1205                break;
1206
1207        case PTRACE_GETSIGINFO:
1208                ret = ptrace_getsiginfo(child, &siginfo);
1209                if (!ret)
1210                        ret = copy_siginfo_to_user32(
1211                                (struct compat_siginfo __user *) datap,
1212                                &siginfo);
1213                break;
1214
1215        case PTRACE_SETSIGINFO:
1216                memset(&siginfo, 0, sizeof siginfo);
1217                if (copy_siginfo_from_user32(
1218                            &siginfo, (struct compat_siginfo __user *) datap))
1219                        ret = -EFAULT;
1220                else
1221                        ret = ptrace_setsiginfo(child, &siginfo);
1222                break;
1223#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1224        case PTRACE_GETREGSET:
1225        case PTRACE_SETREGSET:
1226        {
1227                struct iovec kiov;
1228                struct compat_iovec __user *uiov =
1229                        (struct compat_iovec __user *) datap;
1230                compat_uptr_t ptr;
1231                compat_size_t len;
1232
1233                if (!access_ok(VERIFY_WRITE, uiov, sizeof(*uiov)))
1234                        return -EFAULT;
1235
1236                if (__get_user(ptr, &uiov->iov_base) ||
1237                    __get_user(len, &uiov->iov_len))
1238                        return -EFAULT;
1239
1240                kiov.iov_base = compat_ptr(ptr);
1241                kiov.iov_len = len;
1242
1243                ret = ptrace_regset(child, request, addr, &kiov);
1244                if (!ret)
1245                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
1246                break;
1247        }
1248#endif
1249
1250        default:
1251                ret = ptrace_request(child, request, addr, data);
1252        }
1253
1254        return ret;
1255}
1256
1257COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1258                       compat_long_t, addr, compat_long_t, data)
1259{
1260        struct task_struct *child;
1261        long ret;
1262
1263        if (request == PTRACE_TRACEME) {
1264                ret = ptrace_traceme();
1265                goto out;
1266        }
1267
1268        child = ptrace_get_task_struct(pid);
1269        if (IS_ERR(child)) {
1270                ret = PTR_ERR(child);
1271                goto out;
1272        }
1273
1274        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1275                ret = ptrace_attach(child, request, addr, data);
1276                /*
1277                 * Some architectures need to do book-keeping after
1278                 * a ptrace attach.
1279                 */
1280                if (!ret)
1281                        arch_ptrace_attach(child);
1282                goto out_put_task_struct;
1283        }
1284
1285        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1286                                  request == PTRACE_INTERRUPT);
1287        if (!ret) {
1288                ret = compat_arch_ptrace(child, request, addr, data);
1289                if (ret || request != PTRACE_DETACH)
1290                        ptrace_unfreeze_traced(child);
1291        }
1292
1293 out_put_task_struct:
1294        put_task_struct(child);
1295 out:
1296        return ret;
1297}
1298#endif  /* CONFIG_COMPAT */
1299