linux/kernel/pid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic pidhash and scalable, time-bounded PID allocator
   4 *
   5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
   6 * (C) 2004 Nadia Yvette Chambers, Oracle
   7 * (C) 2002-2004 Ingo Molnar, Red Hat
   8 *
   9 * pid-structures are backing objects for tasks sharing a given ID to chain
  10 * against. There is very little to them aside from hashing them and
  11 * parking tasks using given ID's on a list.
  12 *
  13 * The hash is always changed with the tasklist_lock write-acquired,
  14 * and the hash is only accessed with the tasklist_lock at least
  15 * read-acquired, so there's no additional SMP locking needed here.
  16 *
  17 * We have a list of bitmap pages, which bitmaps represent the PID space.
  18 * Allocating and freeing PIDs is completely lockless. The worst-case
  19 * allocation scenario when all but one out of 1 million PIDs possible are
  20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  22 *
  23 * Pid namespaces:
  24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  26 *     Many thanks to Oleg Nesterov for comments and help
  27 *
  28 */
  29
  30#include <linux/mm.h>
  31#include <linux/export.h>
  32#include <linux/slab.h>
  33#include <linux/init.h>
  34#include <linux/rculist.h>
  35#include <linux/memblock.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/init_task.h>
  38#include <linux/syscalls.h>
  39#include <linux/proc_ns.h>
  40#include <linux/refcount.h>
  41#include <linux/anon_inodes.h>
  42#include <linux/sched/signal.h>
  43#include <linux/sched/task.h>
  44#include <linux/idr.h>
  45#include <net/sock.h>
  46#include <uapi/linux/pidfd.h>
  47
  48struct pid init_struct_pid = {
  49        .count          = REFCOUNT_INIT(1),
  50        .tasks          = {
  51                { .first = NULL },
  52                { .first = NULL },
  53                { .first = NULL },
  54        },
  55        .level          = 0,
  56        .numbers        = { {
  57                .nr             = 0,
  58                .ns             = &init_pid_ns,
  59        }, }
  60};
  61
  62int pid_max = PID_MAX_DEFAULT;
  63
  64#define RESERVED_PIDS           300
  65
  66int pid_max_min = RESERVED_PIDS + 1;
  67int pid_max_max = PID_MAX_LIMIT;
  68
  69/*
  70 * PID-map pages start out as NULL, they get allocated upon
  71 * first use and are never deallocated. This way a low pid_max
  72 * value does not cause lots of bitmaps to be allocated, but
  73 * the scheme scales to up to 4 million PIDs, runtime.
  74 */
  75struct pid_namespace init_pid_ns = {
  76        .ns.count = REFCOUNT_INIT(2),
  77        .idr = IDR_INIT(init_pid_ns.idr),
  78        .pid_allocated = PIDNS_ADDING,
  79        .level = 0,
  80        .child_reaper = &init_task,
  81        .user_ns = &init_user_ns,
  82        .ns.inum = PROC_PID_INIT_INO,
  83#ifdef CONFIG_PID_NS
  84        .ns.ops = &pidns_operations,
  85#endif
  86};
  87EXPORT_SYMBOL_GPL(init_pid_ns);
  88
  89/*
  90 * Note: disable interrupts while the pidmap_lock is held as an
  91 * interrupt might come in and do read_lock(&tasklist_lock).
  92 *
  93 * If we don't disable interrupts there is a nasty deadlock between
  94 * detach_pid()->free_pid() and another cpu that does
  95 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
  96 * read_lock(&tasklist_lock);
  97 *
  98 * After we clean up the tasklist_lock and know there are no
  99 * irq handlers that take it we can leave the interrupts enabled.
 100 * For now it is easier to be safe than to prove it can't happen.
 101 */
 102
 103static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 104
 105void put_pid(struct pid *pid)
 106{
 107        struct pid_namespace *ns;
 108
 109        if (!pid)
 110                return;
 111
 112        ns = pid->numbers[pid->level].ns;
 113        if (refcount_dec_and_test(&pid->count)) {
 114                kmem_cache_free(ns->pid_cachep, pid);
 115                put_pid_ns(ns);
 116        }
 117}
 118EXPORT_SYMBOL_GPL(put_pid);
 119
 120static void delayed_put_pid(struct rcu_head *rhp)
 121{
 122        struct pid *pid = container_of(rhp, struct pid, rcu);
 123        put_pid(pid);
 124}
 125
 126void free_pid(struct pid *pid)
 127{
 128        /* We can be called with write_lock_irq(&tasklist_lock) held */
 129        int i;
 130        unsigned long flags;
 131
 132        spin_lock_irqsave(&pidmap_lock, flags);
 133        for (i = 0; i <= pid->level; i++) {
 134                struct upid *upid = pid->numbers + i;
 135                struct pid_namespace *ns = upid->ns;
 136                switch (--ns->pid_allocated) {
 137                case 2:
 138                case 1:
 139                        /* When all that is left in the pid namespace
 140                         * is the reaper wake up the reaper.  The reaper
 141                         * may be sleeping in zap_pid_ns_processes().
 142                         */
 143                        wake_up_process(ns->child_reaper);
 144                        break;
 145                case PIDNS_ADDING:
 146                        /* Handle a fork failure of the first process */
 147                        WARN_ON(ns->child_reaper);
 148                        ns->pid_allocated = 0;
 149                        break;
 150                }
 151
 152                idr_remove(&ns->idr, upid->nr);
 153        }
 154        spin_unlock_irqrestore(&pidmap_lock, flags);
 155
 156        call_rcu(&pid->rcu, delayed_put_pid);
 157}
 158
 159struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 160                      size_t set_tid_size)
 161{
 162        struct pid *pid;
 163        enum pid_type type;
 164        int i, nr;
 165        struct pid_namespace *tmp;
 166        struct upid *upid;
 167        int retval = -ENOMEM;
 168
 169        /*
 170         * set_tid_size contains the size of the set_tid array. Starting at
 171         * the most nested currently active PID namespace it tells alloc_pid()
 172         * which PID to set for a process in that most nested PID namespace
 173         * up to set_tid_size PID namespaces. It does not have to set the PID
 174         * for a process in all nested PID namespaces but set_tid_size must
 175         * never be greater than the current ns->level + 1.
 176         */
 177        if (set_tid_size > ns->level + 1)
 178                return ERR_PTR(-EINVAL);
 179
 180        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
 181        if (!pid)
 182                return ERR_PTR(retval);
 183
 184        tmp = ns;
 185        pid->level = ns->level;
 186
 187        for (i = ns->level; i >= 0; i--) {
 188                int tid = 0;
 189
 190                if (set_tid_size) {
 191                        tid = set_tid[ns->level - i];
 192
 193                        retval = -EINVAL;
 194                        if (tid < 1 || tid >= pid_max)
 195                                goto out_free;
 196                        /*
 197                         * Also fail if a PID != 1 is requested and
 198                         * no PID 1 exists.
 199                         */
 200                        if (tid != 1 && !tmp->child_reaper)
 201                                goto out_free;
 202                        retval = -EPERM;
 203                        if (!checkpoint_restore_ns_capable(tmp->user_ns))
 204                                goto out_free;
 205                        set_tid_size--;
 206                }
 207
 208                idr_preload(GFP_KERNEL);
 209                spin_lock_irq(&pidmap_lock);
 210
 211                if (tid) {
 212                        nr = idr_alloc(&tmp->idr, NULL, tid,
 213                                       tid + 1, GFP_ATOMIC);
 214                        /*
 215                         * If ENOSPC is returned it means that the PID is
 216                         * alreay in use. Return EEXIST in that case.
 217                         */
 218                        if (nr == -ENOSPC)
 219                                nr = -EEXIST;
 220                } else {
 221                        int pid_min = 1;
 222                        /*
 223                         * init really needs pid 1, but after reaching the
 224                         * maximum wrap back to RESERVED_PIDS
 225                         */
 226                        if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
 227                                pid_min = RESERVED_PIDS;
 228
 229                        /*
 230                         * Store a null pointer so find_pid_ns does not find
 231                         * a partially initialized PID (see below).
 232                         */
 233                        nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
 234                                              pid_max, GFP_ATOMIC);
 235                }
 236                spin_unlock_irq(&pidmap_lock);
 237                idr_preload_end();
 238
 239                if (nr < 0) {
 240                        retval = (nr == -ENOSPC) ? -EAGAIN : nr;
 241                        goto out_free;
 242                }
 243
 244                pid->numbers[i].nr = nr;
 245                pid->numbers[i].ns = tmp;
 246                tmp = tmp->parent;
 247        }
 248
 249        /*
 250         * ENOMEM is not the most obvious choice especially for the case
 251         * where the child subreaper has already exited and the pid
 252         * namespace denies the creation of any new processes. But ENOMEM
 253         * is what we have exposed to userspace for a long time and it is
 254         * documented behavior for pid namespaces. So we can't easily
 255         * change it even if there were an error code better suited.
 256         */
 257        retval = -ENOMEM;
 258
 259        get_pid_ns(ns);
 260        refcount_set(&pid->count, 1);
 261        spin_lock_init(&pid->lock);
 262        for (type = 0; type < PIDTYPE_MAX; ++type)
 263                INIT_HLIST_HEAD(&pid->tasks[type]);
 264
 265        init_waitqueue_head(&pid->wait_pidfd);
 266        INIT_HLIST_HEAD(&pid->inodes);
 267
 268        upid = pid->numbers + ns->level;
 269        spin_lock_irq(&pidmap_lock);
 270        if (!(ns->pid_allocated & PIDNS_ADDING))
 271                goto out_unlock;
 272        for ( ; upid >= pid->numbers; --upid) {
 273                /* Make the PID visible to find_pid_ns. */
 274                idr_replace(&upid->ns->idr, pid, upid->nr);
 275                upid->ns->pid_allocated++;
 276        }
 277        spin_unlock_irq(&pidmap_lock);
 278
 279        return pid;
 280
 281out_unlock:
 282        spin_unlock_irq(&pidmap_lock);
 283        put_pid_ns(ns);
 284
 285out_free:
 286        spin_lock_irq(&pidmap_lock);
 287        while (++i <= ns->level) {
 288                upid = pid->numbers + i;
 289                idr_remove(&upid->ns->idr, upid->nr);
 290        }
 291
 292        /* On failure to allocate the first pid, reset the state */
 293        if (ns->pid_allocated == PIDNS_ADDING)
 294                idr_set_cursor(&ns->idr, 0);
 295
 296        spin_unlock_irq(&pidmap_lock);
 297
 298        kmem_cache_free(ns->pid_cachep, pid);
 299        return ERR_PTR(retval);
 300}
 301
 302void disable_pid_allocation(struct pid_namespace *ns)
 303{
 304        spin_lock_irq(&pidmap_lock);
 305        ns->pid_allocated &= ~PIDNS_ADDING;
 306        spin_unlock_irq(&pidmap_lock);
 307}
 308
 309struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 310{
 311        return idr_find(&ns->idr, nr);
 312}
 313EXPORT_SYMBOL_GPL(find_pid_ns);
 314
 315struct pid *find_vpid(int nr)
 316{
 317        return find_pid_ns(nr, task_active_pid_ns(current));
 318}
 319EXPORT_SYMBOL_GPL(find_vpid);
 320
 321static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
 322{
 323        return (type == PIDTYPE_PID) ?
 324                &task->thread_pid :
 325                &task->signal->pids[type];
 326}
 327
 328/*
 329 * attach_pid() must be called with the tasklist_lock write-held.
 330 */
 331void attach_pid(struct task_struct *task, enum pid_type type)
 332{
 333        struct pid *pid = *task_pid_ptr(task, type);
 334        hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
 335}
 336
 337static void __change_pid(struct task_struct *task, enum pid_type type,
 338                        struct pid *new)
 339{
 340        struct pid **pid_ptr = task_pid_ptr(task, type);
 341        struct pid *pid;
 342        int tmp;
 343
 344        pid = *pid_ptr;
 345
 346        hlist_del_rcu(&task->pid_links[type]);
 347        *pid_ptr = new;
 348
 349        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
 350                if (pid_has_task(pid, tmp))
 351                        return;
 352
 353        free_pid(pid);
 354}
 355
 356void detach_pid(struct task_struct *task, enum pid_type type)
 357{
 358        __change_pid(task, type, NULL);
 359}
 360
 361void change_pid(struct task_struct *task, enum pid_type type,
 362                struct pid *pid)
 363{
 364        __change_pid(task, type, pid);
 365        attach_pid(task, type);
 366}
 367
 368void exchange_tids(struct task_struct *left, struct task_struct *right)
 369{
 370        struct pid *pid1 = left->thread_pid;
 371        struct pid *pid2 = right->thread_pid;
 372        struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
 373        struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
 374
 375        /* Swap the single entry tid lists */
 376        hlists_swap_heads_rcu(head1, head2);
 377
 378        /* Swap the per task_struct pid */
 379        rcu_assign_pointer(left->thread_pid, pid2);
 380        rcu_assign_pointer(right->thread_pid, pid1);
 381
 382        /* Swap the cached value */
 383        WRITE_ONCE(left->pid, pid_nr(pid2));
 384        WRITE_ONCE(right->pid, pid_nr(pid1));
 385}
 386
 387/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 388void transfer_pid(struct task_struct *old, struct task_struct *new,
 389                           enum pid_type type)
 390{
 391        if (type == PIDTYPE_PID)
 392                new->thread_pid = old->thread_pid;
 393        hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
 394}
 395
 396struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 397{
 398        struct task_struct *result = NULL;
 399        if (pid) {
 400                struct hlist_node *first;
 401                first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
 402                                              lockdep_tasklist_lock_is_held());
 403                if (first)
 404                        result = hlist_entry(first, struct task_struct, pid_links[(type)]);
 405        }
 406        return result;
 407}
 408EXPORT_SYMBOL(pid_task);
 409
 410/*
 411 * Must be called under rcu_read_lock().
 412 */
 413struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 414{
 415        RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
 416                         "find_task_by_pid_ns() needs rcu_read_lock() protection");
 417        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 418}
 419
 420struct task_struct *find_task_by_vpid(pid_t vnr)
 421{
 422        return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 423}
 424
 425struct task_struct *find_get_task_by_vpid(pid_t nr)
 426{
 427        struct task_struct *task;
 428
 429        rcu_read_lock();
 430        task = find_task_by_vpid(nr);
 431        if (task)
 432                get_task_struct(task);
 433        rcu_read_unlock();
 434
 435        return task;
 436}
 437
 438struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 439{
 440        struct pid *pid;
 441        rcu_read_lock();
 442        pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
 443        rcu_read_unlock();
 444        return pid;
 445}
 446EXPORT_SYMBOL_GPL(get_task_pid);
 447
 448struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
 449{
 450        struct task_struct *result;
 451        rcu_read_lock();
 452        result = pid_task(pid, type);
 453        if (result)
 454                get_task_struct(result);
 455        rcu_read_unlock();
 456        return result;
 457}
 458EXPORT_SYMBOL_GPL(get_pid_task);
 459
 460struct pid *find_get_pid(pid_t nr)
 461{
 462        struct pid *pid;
 463
 464        rcu_read_lock();
 465        pid = get_pid(find_vpid(nr));
 466        rcu_read_unlock();
 467
 468        return pid;
 469}
 470EXPORT_SYMBOL_GPL(find_get_pid);
 471
 472pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
 473{
 474        struct upid *upid;
 475        pid_t nr = 0;
 476
 477        if (pid && ns->level <= pid->level) {
 478                upid = &pid->numbers[ns->level];
 479                if (upid->ns == ns)
 480                        nr = upid->nr;
 481        }
 482        return nr;
 483}
 484EXPORT_SYMBOL_GPL(pid_nr_ns);
 485
 486pid_t pid_vnr(struct pid *pid)
 487{
 488        return pid_nr_ns(pid, task_active_pid_ns(current));
 489}
 490EXPORT_SYMBOL_GPL(pid_vnr);
 491
 492pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 493                        struct pid_namespace *ns)
 494{
 495        pid_t nr = 0;
 496
 497        rcu_read_lock();
 498        if (!ns)
 499                ns = task_active_pid_ns(current);
 500        nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
 501        rcu_read_unlock();
 502
 503        return nr;
 504}
 505EXPORT_SYMBOL(__task_pid_nr_ns);
 506
 507struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
 508{
 509        return ns_of_pid(task_pid(tsk));
 510}
 511EXPORT_SYMBOL_GPL(task_active_pid_ns);
 512
 513/*
 514 * Used by proc to find the first pid that is greater than or equal to nr.
 515 *
 516 * If there is a pid at nr this function is exactly the same as find_pid_ns.
 517 */
 518struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 519{
 520        return idr_get_next(&ns->idr, &nr);
 521}
 522
 523struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags)
 524{
 525        struct fd f;
 526        struct pid *pid;
 527
 528        f = fdget(fd);
 529        if (!f.file)
 530                return ERR_PTR(-EBADF);
 531
 532        pid = pidfd_pid(f.file);
 533        if (!IS_ERR(pid)) {
 534                get_pid(pid);
 535                *flags = f.file->f_flags;
 536        }
 537
 538        fdput(f);
 539        return pid;
 540}
 541
 542/**
 543 * pidfd_create() - Create a new pid file descriptor.
 544 *
 545 * @pid:   struct pid that the pidfd will reference
 546 * @flags: flags to pass
 547 *
 548 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
 549 *
 550 * Note, that this function can only be called after the fd table has
 551 * been unshared to avoid leaking the pidfd to the new process.
 552 *
 553 * This symbol should not be explicitly exported to loadable modules.
 554 *
 555 * Return: On success, a cloexec pidfd is returned.
 556 *         On error, a negative errno number will be returned.
 557 */
 558int pidfd_create(struct pid *pid, unsigned int flags)
 559{
 560        int fd;
 561
 562        if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
 563                return -EINVAL;
 564
 565        if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
 566                return -EINVAL;
 567
 568        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
 569                              flags | O_RDWR | O_CLOEXEC);
 570        if (fd < 0)
 571                put_pid(pid);
 572
 573        return fd;
 574}
 575
 576/**
 577 * pidfd_open() - Open new pid file descriptor.
 578 *
 579 * @pid:   pid for which to retrieve a pidfd
 580 * @flags: flags to pass
 581 *
 582 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
 583 * the process identified by @pid. Currently, the process identified by
 584 * @pid must be a thread-group leader. This restriction currently exists
 585 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
 586 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
 587 * leaders).
 588 *
 589 * Return: On success, a cloexec pidfd is returned.
 590 *         On error, a negative errno number will be returned.
 591 */
 592SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
 593{
 594        int fd;
 595        struct pid *p;
 596
 597        if (flags & ~PIDFD_NONBLOCK)
 598                return -EINVAL;
 599
 600        if (pid <= 0)
 601                return -EINVAL;
 602
 603        p = find_get_pid(pid);
 604        if (!p)
 605                return -ESRCH;
 606
 607        fd = pidfd_create(p, flags);
 608
 609        put_pid(p);
 610        return fd;
 611}
 612
 613void __init pid_idr_init(void)
 614{
 615        /* Verify no one has done anything silly: */
 616        BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
 617
 618        /* bump default and minimum pid_max based on number of cpus */
 619        pid_max = min(pid_max_max, max_t(int, pid_max,
 620                                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
 621        pid_max_min = max_t(int, pid_max_min,
 622                                PIDS_PER_CPU_MIN * num_possible_cpus());
 623        pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
 624
 625        idr_init(&init_pid_ns.idr);
 626
 627        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
 628                        SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 629}
 630
 631static struct file *__pidfd_fget(struct task_struct *task, int fd)
 632{
 633        struct file *file;
 634        int ret;
 635
 636        ret = down_read_killable(&task->signal->exec_update_lock);
 637        if (ret)
 638                return ERR_PTR(ret);
 639
 640        if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
 641                file = fget_task(task, fd);
 642        else
 643                file = ERR_PTR(-EPERM);
 644
 645        up_read(&task->signal->exec_update_lock);
 646
 647        return file ?: ERR_PTR(-EBADF);
 648}
 649
 650static int pidfd_getfd(struct pid *pid, int fd)
 651{
 652        struct task_struct *task;
 653        struct file *file;
 654        int ret;
 655
 656        task = get_pid_task(pid, PIDTYPE_PID);
 657        if (!task)
 658                return -ESRCH;
 659
 660        file = __pidfd_fget(task, fd);
 661        put_task_struct(task);
 662        if (IS_ERR(file))
 663                return PTR_ERR(file);
 664
 665        ret = receive_fd(file, O_CLOEXEC);
 666        fput(file);
 667
 668        return ret;
 669}
 670
 671/**
 672 * sys_pidfd_getfd() - Get a file descriptor from another process
 673 *
 674 * @pidfd:      the pidfd file descriptor of the process
 675 * @fd:         the file descriptor number to get
 676 * @flags:      flags on how to get the fd (reserved)
 677 *
 678 * This syscall gets a copy of a file descriptor from another process
 679 * based on the pidfd, and file descriptor number. It requires that
 680 * the calling process has the ability to ptrace the process represented
 681 * by the pidfd. The process which is having its file descriptor copied
 682 * is otherwise unaffected.
 683 *
 684 * Return: On success, a cloexec file descriptor is returned.
 685 *         On error, a negative errno number will be returned.
 686 */
 687SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
 688                unsigned int, flags)
 689{
 690        struct pid *pid;
 691        struct fd f;
 692        int ret;
 693
 694        /* flags is currently unused - make sure it's unset */
 695        if (flags)
 696                return -EINVAL;
 697
 698        f = fdget(pidfd);
 699        if (!f.file)
 700                return -EBADF;
 701
 702        pid = pidfd_pid(f.file);
 703        if (IS_ERR(pid))
 704                ret = PTR_ERR(pid);
 705        else
 706                ret = pidfd_getfd(pid, fd);
 707
 708        fdput(f);
 709        return ret;
 710}
 711