linux/kernel/pid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic pidhash and scalable, time-bounded PID allocator
   4 *
   5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
   6 * (C) 2004 Nadia Yvette Chambers, Oracle
   7 * (C) 2002-2004 Ingo Molnar, Red Hat
   8 *
   9 * pid-structures are backing objects for tasks sharing a given ID to chain
  10 * against. There is very little to them aside from hashing them and
  11 * parking tasks using given ID's on a list.
  12 *
  13 * The hash is always changed with the tasklist_lock write-acquired,
  14 * and the hash is only accessed with the tasklist_lock at least
  15 * read-acquired, so there's no additional SMP locking needed here.
  16 *
  17 * We have a list of bitmap pages, which bitmaps represent the PID space.
  18 * Allocating and freeing PIDs is completely lockless. The worst-case
  19 * allocation scenario when all but one out of 1 million PIDs possible are
  20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  22 *
  23 * Pid namespaces:
  24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  26 *     Many thanks to Oleg Nesterov for comments and help
  27 *
  28 */
  29
  30#include <linux/mm.h>
  31#include <linux/export.h>
  32#include <linux/slab.h>
  33#include <linux/init.h>
  34#include <linux/rculist.h>
  35#include <linux/memblock.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/init_task.h>
  38#include <linux/syscalls.h>
  39#include <linux/proc_ns.h>
  40#include <linux/refcount.h>
  41#include <linux/anon_inodes.h>
  42#include <linux/sched/signal.h>
  43#include <linux/sched/task.h>
  44#include <linux/idr.h>
  45
  46struct pid init_struct_pid = {
  47        .count          = REFCOUNT_INIT(1),
  48        .tasks          = {
  49                { .first = NULL },
  50                { .first = NULL },
  51                { .first = NULL },
  52        },
  53        .level          = 0,
  54        .numbers        = { {
  55                .nr             = 0,
  56                .ns             = &init_pid_ns,
  57        }, }
  58};
  59
  60int pid_max = PID_MAX_DEFAULT;
  61
  62#define RESERVED_PIDS           300
  63
  64int pid_max_min = RESERVED_PIDS + 1;
  65int pid_max_max = PID_MAX_LIMIT;
  66
  67/*
  68 * PID-map pages start out as NULL, they get allocated upon
  69 * first use and are never deallocated. This way a low pid_max
  70 * value does not cause lots of bitmaps to be allocated, but
  71 * the scheme scales to up to 4 million PIDs, runtime.
  72 */
  73struct pid_namespace init_pid_ns = {
  74        .kref = KREF_INIT(2),
  75        .idr = IDR_INIT(init_pid_ns.idr),
  76        .pid_allocated = PIDNS_ADDING,
  77        .level = 0,
  78        .child_reaper = &init_task,
  79        .user_ns = &init_user_ns,
  80        .ns.inum = PROC_PID_INIT_INO,
  81#ifdef CONFIG_PID_NS
  82        .ns.ops = &pidns_operations,
  83#endif
  84};
  85EXPORT_SYMBOL_GPL(init_pid_ns);
  86
  87/*
  88 * Note: disable interrupts while the pidmap_lock is held as an
  89 * interrupt might come in and do read_lock(&tasklist_lock).
  90 *
  91 * If we don't disable interrupts there is a nasty deadlock between
  92 * detach_pid()->free_pid() and another cpu that does
  93 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
  94 * read_lock(&tasklist_lock);
  95 *
  96 * After we clean up the tasklist_lock and know there are no
  97 * irq handlers that take it we can leave the interrupts enabled.
  98 * For now it is easier to be safe than to prove it can't happen.
  99 */
 100
 101static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 102
 103void put_pid(struct pid *pid)
 104{
 105        struct pid_namespace *ns;
 106
 107        if (!pid)
 108                return;
 109
 110        ns = pid->numbers[pid->level].ns;
 111        if (refcount_dec_and_test(&pid->count)) {
 112                kmem_cache_free(ns->pid_cachep, pid);
 113                put_pid_ns(ns);
 114        }
 115}
 116EXPORT_SYMBOL_GPL(put_pid);
 117
 118static void delayed_put_pid(struct rcu_head *rhp)
 119{
 120        struct pid *pid = container_of(rhp, struct pid, rcu);
 121        put_pid(pid);
 122}
 123
 124void free_pid(struct pid *pid)
 125{
 126        /* We can be called with write_lock_irq(&tasklist_lock) held */
 127        int i;
 128        unsigned long flags;
 129
 130        spin_lock_irqsave(&pidmap_lock, flags);
 131        for (i = 0; i <= pid->level; i++) {
 132                struct upid *upid = pid->numbers + i;
 133                struct pid_namespace *ns = upid->ns;
 134                switch (--ns->pid_allocated) {
 135                case 2:
 136                case 1:
 137                        /* When all that is left in the pid namespace
 138                         * is the reaper wake up the reaper.  The reaper
 139                         * may be sleeping in zap_pid_ns_processes().
 140                         */
 141                        wake_up_process(ns->child_reaper);
 142                        break;
 143                case PIDNS_ADDING:
 144                        /* Handle a fork failure of the first process */
 145                        WARN_ON(ns->child_reaper);
 146                        ns->pid_allocated = 0;
 147                        break;
 148                }
 149
 150                idr_remove(&ns->idr, upid->nr);
 151        }
 152        spin_unlock_irqrestore(&pidmap_lock, flags);
 153
 154        call_rcu(&pid->rcu, delayed_put_pid);
 155}
 156
 157struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 158                      size_t set_tid_size)
 159{
 160        struct pid *pid;
 161        enum pid_type type;
 162        int i, nr;
 163        struct pid_namespace *tmp;
 164        struct upid *upid;
 165        int retval = -ENOMEM;
 166
 167        /*
 168         * set_tid_size contains the size of the set_tid array. Starting at
 169         * the most nested currently active PID namespace it tells alloc_pid()
 170         * which PID to set for a process in that most nested PID namespace
 171         * up to set_tid_size PID namespaces. It does not have to set the PID
 172         * for a process in all nested PID namespaces but set_tid_size must
 173         * never be greater than the current ns->level + 1.
 174         */
 175        if (set_tid_size > ns->level + 1)
 176                return ERR_PTR(-EINVAL);
 177
 178        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
 179        if (!pid)
 180                return ERR_PTR(retval);
 181
 182        tmp = ns;
 183        pid->level = ns->level;
 184
 185        for (i = ns->level; i >= 0; i--) {
 186                int tid = 0;
 187
 188                if (set_tid_size) {
 189                        tid = set_tid[ns->level - i];
 190
 191                        retval = -EINVAL;
 192                        if (tid < 1 || tid >= pid_max)
 193                                goto out_free;
 194                        /*
 195                         * Also fail if a PID != 1 is requested and
 196                         * no PID 1 exists.
 197                         */
 198                        if (tid != 1 && !tmp->child_reaper)
 199                                goto out_free;
 200                        retval = -EPERM;
 201                        if (!ns_capable(tmp->user_ns, CAP_SYS_ADMIN))
 202                                goto out_free;
 203                        set_tid_size--;
 204                }
 205
 206                idr_preload(GFP_KERNEL);
 207                spin_lock_irq(&pidmap_lock);
 208
 209                if (tid) {
 210                        nr = idr_alloc(&tmp->idr, NULL, tid,
 211                                       tid + 1, GFP_ATOMIC);
 212                        /*
 213                         * If ENOSPC is returned it means that the PID is
 214                         * alreay in use. Return EEXIST in that case.
 215                         */
 216                        if (nr == -ENOSPC)
 217                                nr = -EEXIST;
 218                } else {
 219                        int pid_min = 1;
 220                        /*
 221                         * init really needs pid 1, but after reaching the
 222                         * maximum wrap back to RESERVED_PIDS
 223                         */
 224                        if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
 225                                pid_min = RESERVED_PIDS;
 226
 227                        /*
 228                         * Store a null pointer so find_pid_ns does not find
 229                         * a partially initialized PID (see below).
 230                         */
 231                        nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
 232                                              pid_max, GFP_ATOMIC);
 233                }
 234                spin_unlock_irq(&pidmap_lock);
 235                idr_preload_end();
 236
 237                if (nr < 0) {
 238                        retval = (nr == -ENOSPC) ? -EAGAIN : nr;
 239                        goto out_free;
 240                }
 241
 242                pid->numbers[i].nr = nr;
 243                pid->numbers[i].ns = tmp;
 244                tmp = tmp->parent;
 245        }
 246
 247        /*
 248         * ENOMEM is not the most obvious choice especially for the case
 249         * where the child subreaper has already exited and the pid
 250         * namespace denies the creation of any new processes. But ENOMEM
 251         * is what we have exposed to userspace for a long time and it is
 252         * documented behavior for pid namespaces. So we can't easily
 253         * change it even if there were an error code better suited.
 254         */
 255        retval = -ENOMEM;
 256
 257        get_pid_ns(ns);
 258        refcount_set(&pid->count, 1);
 259        spin_lock_init(&pid->lock);
 260        for (type = 0; type < PIDTYPE_MAX; ++type)
 261                INIT_HLIST_HEAD(&pid->tasks[type]);
 262
 263        init_waitqueue_head(&pid->wait_pidfd);
 264        INIT_HLIST_HEAD(&pid->inodes);
 265
 266        upid = pid->numbers + ns->level;
 267        spin_lock_irq(&pidmap_lock);
 268        if (!(ns->pid_allocated & PIDNS_ADDING))
 269                goto out_unlock;
 270        for ( ; upid >= pid->numbers; --upid) {
 271                /* Make the PID visible to find_pid_ns. */
 272                idr_replace(&upid->ns->idr, pid, upid->nr);
 273                upid->ns->pid_allocated++;
 274        }
 275        spin_unlock_irq(&pidmap_lock);
 276
 277        return pid;
 278
 279out_unlock:
 280        spin_unlock_irq(&pidmap_lock);
 281        put_pid_ns(ns);
 282
 283out_free:
 284        spin_lock_irq(&pidmap_lock);
 285        while (++i <= ns->level) {
 286                upid = pid->numbers + i;
 287                idr_remove(&upid->ns->idr, upid->nr);
 288        }
 289
 290        /* On failure to allocate the first pid, reset the state */
 291        if (ns->pid_allocated == PIDNS_ADDING)
 292                idr_set_cursor(&ns->idr, 0);
 293
 294        spin_unlock_irq(&pidmap_lock);
 295
 296        kmem_cache_free(ns->pid_cachep, pid);
 297        return ERR_PTR(retval);
 298}
 299
 300void disable_pid_allocation(struct pid_namespace *ns)
 301{
 302        spin_lock_irq(&pidmap_lock);
 303        ns->pid_allocated &= ~PIDNS_ADDING;
 304        spin_unlock_irq(&pidmap_lock);
 305}
 306
 307struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 308{
 309        return idr_find(&ns->idr, nr);
 310}
 311EXPORT_SYMBOL_GPL(find_pid_ns);
 312
 313struct pid *find_vpid(int nr)
 314{
 315        return find_pid_ns(nr, task_active_pid_ns(current));
 316}
 317EXPORT_SYMBOL_GPL(find_vpid);
 318
 319static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
 320{
 321        return (type == PIDTYPE_PID) ?
 322                &task->thread_pid :
 323                &task->signal->pids[type];
 324}
 325
 326/*
 327 * attach_pid() must be called with the tasklist_lock write-held.
 328 */
 329void attach_pid(struct task_struct *task, enum pid_type type)
 330{
 331        struct pid *pid = *task_pid_ptr(task, type);
 332        hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
 333}
 334
 335static void __change_pid(struct task_struct *task, enum pid_type type,
 336                        struct pid *new)
 337{
 338        struct pid **pid_ptr = task_pid_ptr(task, type);
 339        struct pid *pid;
 340        int tmp;
 341
 342        pid = *pid_ptr;
 343
 344        hlist_del_rcu(&task->pid_links[type]);
 345        *pid_ptr = new;
 346
 347        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
 348                if (pid_has_task(pid, tmp))
 349                        return;
 350
 351        free_pid(pid);
 352}
 353
 354void detach_pid(struct task_struct *task, enum pid_type type)
 355{
 356        __change_pid(task, type, NULL);
 357}
 358
 359void change_pid(struct task_struct *task, enum pid_type type,
 360                struct pid *pid)
 361{
 362        __change_pid(task, type, pid);
 363        attach_pid(task, type);
 364}
 365
 366void exchange_tids(struct task_struct *left, struct task_struct *right)
 367{
 368        struct pid *pid1 = left->thread_pid;
 369        struct pid *pid2 = right->thread_pid;
 370        struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
 371        struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
 372
 373        /* Swap the single entry tid lists */
 374        hlists_swap_heads_rcu(head1, head2);
 375
 376        /* Swap the per task_struct pid */
 377        rcu_assign_pointer(left->thread_pid, pid2);
 378        rcu_assign_pointer(right->thread_pid, pid1);
 379
 380        /* Swap the cached value */
 381        WRITE_ONCE(left->pid, pid_nr(pid2));
 382        WRITE_ONCE(right->pid, pid_nr(pid1));
 383}
 384
 385/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 386void transfer_pid(struct task_struct *old, struct task_struct *new,
 387                           enum pid_type type)
 388{
 389        if (type == PIDTYPE_PID)
 390                new->thread_pid = old->thread_pid;
 391        hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
 392}
 393
 394struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 395{
 396        struct task_struct *result = NULL;
 397        if (pid) {
 398                struct hlist_node *first;
 399                first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
 400                                              lockdep_tasklist_lock_is_held());
 401                if (first)
 402                        result = hlist_entry(first, struct task_struct, pid_links[(type)]);
 403        }
 404        return result;
 405}
 406EXPORT_SYMBOL(pid_task);
 407
 408/*
 409 * Must be called under rcu_read_lock().
 410 */
 411struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 412{
 413        RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
 414                         "find_task_by_pid_ns() needs rcu_read_lock() protection");
 415        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 416}
 417
 418struct task_struct *find_task_by_vpid(pid_t vnr)
 419{
 420        return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 421}
 422
 423struct task_struct *find_get_task_by_vpid(pid_t nr)
 424{
 425        struct task_struct *task;
 426
 427        rcu_read_lock();
 428        task = find_task_by_vpid(nr);
 429        if (task)
 430                get_task_struct(task);
 431        rcu_read_unlock();
 432
 433        return task;
 434}
 435
 436struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 437{
 438        struct pid *pid;
 439        rcu_read_lock();
 440        pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
 441        rcu_read_unlock();
 442        return pid;
 443}
 444EXPORT_SYMBOL_GPL(get_task_pid);
 445
 446struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
 447{
 448        struct task_struct *result;
 449        rcu_read_lock();
 450        result = pid_task(pid, type);
 451        if (result)
 452                get_task_struct(result);
 453        rcu_read_unlock();
 454        return result;
 455}
 456EXPORT_SYMBOL_GPL(get_pid_task);
 457
 458struct pid *find_get_pid(pid_t nr)
 459{
 460        struct pid *pid;
 461
 462        rcu_read_lock();
 463        pid = get_pid(find_vpid(nr));
 464        rcu_read_unlock();
 465
 466        return pid;
 467}
 468EXPORT_SYMBOL_GPL(find_get_pid);
 469
 470pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
 471{
 472        struct upid *upid;
 473        pid_t nr = 0;
 474
 475        if (pid && ns->level <= pid->level) {
 476                upid = &pid->numbers[ns->level];
 477                if (upid->ns == ns)
 478                        nr = upid->nr;
 479        }
 480        return nr;
 481}
 482EXPORT_SYMBOL_GPL(pid_nr_ns);
 483
 484pid_t pid_vnr(struct pid *pid)
 485{
 486        return pid_nr_ns(pid, task_active_pid_ns(current));
 487}
 488EXPORT_SYMBOL_GPL(pid_vnr);
 489
 490pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 491                        struct pid_namespace *ns)
 492{
 493        pid_t nr = 0;
 494
 495        rcu_read_lock();
 496        if (!ns)
 497                ns = task_active_pid_ns(current);
 498        nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
 499        rcu_read_unlock();
 500
 501        return nr;
 502}
 503EXPORT_SYMBOL(__task_pid_nr_ns);
 504
 505struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
 506{
 507        return ns_of_pid(task_pid(tsk));
 508}
 509EXPORT_SYMBOL_GPL(task_active_pid_ns);
 510
 511/*
 512 * Used by proc to find the first pid that is greater than or equal to nr.
 513 *
 514 * If there is a pid at nr this function is exactly the same as find_pid_ns.
 515 */
 516struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 517{
 518        return idr_get_next(&ns->idr, &nr);
 519}
 520
 521/**
 522 * pidfd_create() - Create a new pid file descriptor.
 523 *
 524 * @pid:  struct pid that the pidfd will reference
 525 *
 526 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
 527 *
 528 * Note, that this function can only be called after the fd table has
 529 * been unshared to avoid leaking the pidfd to the new process.
 530 *
 531 * Return: On success, a cloexec pidfd is returned.
 532 *         On error, a negative errno number will be returned.
 533 */
 534static int pidfd_create(struct pid *pid)
 535{
 536        int fd;
 537
 538        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
 539                              O_RDWR | O_CLOEXEC);
 540        if (fd < 0)
 541                put_pid(pid);
 542
 543        return fd;
 544}
 545
 546/**
 547 * pidfd_open() - Open new pid file descriptor.
 548 *
 549 * @pid:   pid for which to retrieve a pidfd
 550 * @flags: flags to pass
 551 *
 552 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
 553 * the process identified by @pid. Currently, the process identified by
 554 * @pid must be a thread-group leader. This restriction currently exists
 555 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
 556 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
 557 * leaders).
 558 *
 559 * Return: On success, a cloexec pidfd is returned.
 560 *         On error, a negative errno number will be returned.
 561 */
 562SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
 563{
 564        int fd;
 565        struct pid *p;
 566
 567        if (flags)
 568                return -EINVAL;
 569
 570        if (pid <= 0)
 571                return -EINVAL;
 572
 573        p = find_get_pid(pid);
 574        if (!p)
 575                return -ESRCH;
 576
 577        if (pid_has_task(p, PIDTYPE_TGID))
 578                fd = pidfd_create(p);
 579        else
 580                fd = -EINVAL;
 581
 582        put_pid(p);
 583        return fd;
 584}
 585
 586void __init pid_idr_init(void)
 587{
 588        /* Verify no one has done anything silly: */
 589        BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
 590
 591        /* bump default and minimum pid_max based on number of cpus */
 592        pid_max = min(pid_max_max, max_t(int, pid_max,
 593                                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
 594        pid_max_min = max_t(int, pid_max_min,
 595                                PIDS_PER_CPU_MIN * num_possible_cpus());
 596        pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
 597
 598        idr_init(&init_pid_ns.idr);
 599
 600        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
 601                        SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 602}
 603
 604static struct file *__pidfd_fget(struct task_struct *task, int fd)
 605{
 606        struct file *file;
 607        int ret;
 608
 609        ret = mutex_lock_killable(&task->signal->exec_update_mutex);
 610        if (ret)
 611                return ERR_PTR(ret);
 612
 613        if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
 614                file = fget_task(task, fd);
 615        else
 616                file = ERR_PTR(-EPERM);
 617
 618        mutex_unlock(&task->signal->exec_update_mutex);
 619
 620        return file ?: ERR_PTR(-EBADF);
 621}
 622
 623static int pidfd_getfd(struct pid *pid, int fd)
 624{
 625        struct task_struct *task;
 626        struct file *file;
 627        int ret;
 628
 629        task = get_pid_task(pid, PIDTYPE_PID);
 630        if (!task)
 631                return -ESRCH;
 632
 633        file = __pidfd_fget(task, fd);
 634        put_task_struct(task);
 635        if (IS_ERR(file))
 636                return PTR_ERR(file);
 637
 638        ret = security_file_receive(file);
 639        if (ret) {
 640                fput(file);
 641                return ret;
 642        }
 643
 644        ret = get_unused_fd_flags(O_CLOEXEC);
 645        if (ret < 0)
 646                fput(file);
 647        else
 648                fd_install(ret, file);
 649
 650        return ret;
 651}
 652
 653/**
 654 * sys_pidfd_getfd() - Get a file descriptor from another process
 655 *
 656 * @pidfd:      the pidfd file descriptor of the process
 657 * @fd:         the file descriptor number to get
 658 * @flags:      flags on how to get the fd (reserved)
 659 *
 660 * This syscall gets a copy of a file descriptor from another process
 661 * based on the pidfd, and file descriptor number. It requires that
 662 * the calling process has the ability to ptrace the process represented
 663 * by the pidfd. The process which is having its file descriptor copied
 664 * is otherwise unaffected.
 665 *
 666 * Return: On success, a cloexec file descriptor is returned.
 667 *         On error, a negative errno number will be returned.
 668 */
 669SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
 670                unsigned int, flags)
 671{
 672        struct pid *pid;
 673        struct fd f;
 674        int ret;
 675
 676        /* flags is currently unused - make sure it's unset */
 677        if (flags)
 678                return -EINVAL;
 679
 680        f = fdget(pidfd);
 681        if (!f.file)
 682                return -EBADF;
 683
 684        pid = pidfd_pid(f.file);
 685        if (IS_ERR(pid))
 686                ret = PTR_ERR(pid);
 687        else
 688                ret = pidfd_getfd(pid, fd);
 689
 690        fdput(f);
 691        return ret;
 692}
 693