linux/kernel/pid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Generic pidhash and scalable, time-bounded PID allocator
   4 *
   5 * (C) 2002-2003 Nadia Yvette Chambers, IBM
   6 * (C) 2004 Nadia Yvette Chambers, Oracle
   7 * (C) 2002-2004 Ingo Molnar, Red Hat
   8 *
   9 * pid-structures are backing objects for tasks sharing a given ID to chain
  10 * against. There is very little to them aside from hashing them and
  11 * parking tasks using given ID's on a list.
  12 *
  13 * The hash is always changed with the tasklist_lock write-acquired,
  14 * and the hash is only accessed with the tasklist_lock at least
  15 * read-acquired, so there's no additional SMP locking needed here.
  16 *
  17 * We have a list of bitmap pages, which bitmaps represent the PID space.
  18 * Allocating and freeing PIDs is completely lockless. The worst-case
  19 * allocation scenario when all but one out of 1 million PIDs possible are
  20 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  21 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  22 *
  23 * Pid namespaces:
  24 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  25 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  26 *     Many thanks to Oleg Nesterov for comments and help
  27 *
  28 */
  29
  30#include <linux/mm.h>
  31#include <linux/export.h>
  32#include <linux/slab.h>
  33#include <linux/init.h>
  34#include <linux/rculist.h>
  35#include <linux/memblock.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/init_task.h>
  38#include <linux/syscalls.h>
  39#include <linux/proc_ns.h>
  40#include <linux/refcount.h>
  41#include <linux/anon_inodes.h>
  42#include <linux/sched/signal.h>
  43#include <linux/sched/task.h>
  44#include <linux/idr.h>
  45#include <net/sock.h>
  46
  47struct pid init_struct_pid = {
  48        .count          = REFCOUNT_INIT(1),
  49        .tasks          = {
  50                { .first = NULL },
  51                { .first = NULL },
  52                { .first = NULL },
  53        },
  54        .level          = 0,
  55        .numbers        = { {
  56                .nr             = 0,
  57                .ns             = &init_pid_ns,
  58        }, }
  59};
  60
  61int pid_max = PID_MAX_DEFAULT;
  62
  63#define RESERVED_PIDS           300
  64
  65int pid_max_min = RESERVED_PIDS + 1;
  66int pid_max_max = PID_MAX_LIMIT;
  67
  68/*
  69 * PID-map pages start out as NULL, they get allocated upon
  70 * first use and are never deallocated. This way a low pid_max
  71 * value does not cause lots of bitmaps to be allocated, but
  72 * the scheme scales to up to 4 million PIDs, runtime.
  73 */
  74struct pid_namespace init_pid_ns = {
  75        .kref = KREF_INIT(2),
  76        .idr = IDR_INIT(init_pid_ns.idr),
  77        .pid_allocated = PIDNS_ADDING,
  78        .level = 0,
  79        .child_reaper = &init_task,
  80        .user_ns = &init_user_ns,
  81        .ns.inum = PROC_PID_INIT_INO,
  82#ifdef CONFIG_PID_NS
  83        .ns.ops = &pidns_operations,
  84#endif
  85};
  86EXPORT_SYMBOL_GPL(init_pid_ns);
  87
  88/*
  89 * Note: disable interrupts while the pidmap_lock is held as an
  90 * interrupt might come in and do read_lock(&tasklist_lock).
  91 *
  92 * If we don't disable interrupts there is a nasty deadlock between
  93 * detach_pid()->free_pid() and another cpu that does
  94 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
  95 * read_lock(&tasklist_lock);
  96 *
  97 * After we clean up the tasklist_lock and know there are no
  98 * irq handlers that take it we can leave the interrupts enabled.
  99 * For now it is easier to be safe than to prove it can't happen.
 100 */
 101
 102static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 103
 104void put_pid(struct pid *pid)
 105{
 106        struct pid_namespace *ns;
 107
 108        if (!pid)
 109                return;
 110
 111        ns = pid->numbers[pid->level].ns;
 112        if (refcount_dec_and_test(&pid->count)) {
 113                kmem_cache_free(ns->pid_cachep, pid);
 114                put_pid_ns(ns);
 115        }
 116}
 117EXPORT_SYMBOL_GPL(put_pid);
 118
 119static void delayed_put_pid(struct rcu_head *rhp)
 120{
 121        struct pid *pid = container_of(rhp, struct pid, rcu);
 122        put_pid(pid);
 123}
 124
 125void free_pid(struct pid *pid)
 126{
 127        /* We can be called with write_lock_irq(&tasklist_lock) held */
 128        int i;
 129        unsigned long flags;
 130
 131        spin_lock_irqsave(&pidmap_lock, flags);
 132        for (i = 0; i <= pid->level; i++) {
 133                struct upid *upid = pid->numbers + i;
 134                struct pid_namespace *ns = upid->ns;
 135                switch (--ns->pid_allocated) {
 136                case 2:
 137                case 1:
 138                        /* When all that is left in the pid namespace
 139                         * is the reaper wake up the reaper.  The reaper
 140                         * may be sleeping in zap_pid_ns_processes().
 141                         */
 142                        wake_up_process(ns->child_reaper);
 143                        break;
 144                case PIDNS_ADDING:
 145                        /* Handle a fork failure of the first process */
 146                        WARN_ON(ns->child_reaper);
 147                        ns->pid_allocated = 0;
 148                        break;
 149                }
 150
 151                idr_remove(&ns->idr, upid->nr);
 152        }
 153        spin_unlock_irqrestore(&pidmap_lock, flags);
 154
 155        call_rcu(&pid->rcu, delayed_put_pid);
 156}
 157
 158struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 159                      size_t set_tid_size)
 160{
 161        struct pid *pid;
 162        enum pid_type type;
 163        int i, nr;
 164        struct pid_namespace *tmp;
 165        struct upid *upid;
 166        int retval = -ENOMEM;
 167
 168        /*
 169         * set_tid_size contains the size of the set_tid array. Starting at
 170         * the most nested currently active PID namespace it tells alloc_pid()
 171         * which PID to set for a process in that most nested PID namespace
 172         * up to set_tid_size PID namespaces. It does not have to set the PID
 173         * for a process in all nested PID namespaces but set_tid_size must
 174         * never be greater than the current ns->level + 1.
 175         */
 176        if (set_tid_size > ns->level + 1)
 177                return ERR_PTR(-EINVAL);
 178
 179        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
 180        if (!pid)
 181                return ERR_PTR(retval);
 182
 183        tmp = ns;
 184        pid->level = ns->level;
 185
 186        for (i = ns->level; i >= 0; i--) {
 187                int tid = 0;
 188
 189                if (set_tid_size) {
 190                        tid = set_tid[ns->level - i];
 191
 192                        retval = -EINVAL;
 193                        if (tid < 1 || tid >= pid_max)
 194                                goto out_free;
 195                        /*
 196                         * Also fail if a PID != 1 is requested and
 197                         * no PID 1 exists.
 198                         */
 199                        if (tid != 1 && !tmp->child_reaper)
 200                                goto out_free;
 201                        retval = -EPERM;
 202                        if (!checkpoint_restore_ns_capable(tmp->user_ns))
 203                                goto out_free;
 204                        set_tid_size--;
 205                }
 206
 207                idr_preload(GFP_KERNEL);
 208                spin_lock_irq(&pidmap_lock);
 209
 210                if (tid) {
 211                        nr = idr_alloc(&tmp->idr, NULL, tid,
 212                                       tid + 1, GFP_ATOMIC);
 213                        /*
 214                         * If ENOSPC is returned it means that the PID is
 215                         * alreay in use. Return EEXIST in that case.
 216                         */
 217                        if (nr == -ENOSPC)
 218                                nr = -EEXIST;
 219                } else {
 220                        int pid_min = 1;
 221                        /*
 222                         * init really needs pid 1, but after reaching the
 223                         * maximum wrap back to RESERVED_PIDS
 224                         */
 225                        if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS)
 226                                pid_min = RESERVED_PIDS;
 227
 228                        /*
 229                         * Store a null pointer so find_pid_ns does not find
 230                         * a partially initialized PID (see below).
 231                         */
 232                        nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
 233                                              pid_max, GFP_ATOMIC);
 234                }
 235                spin_unlock_irq(&pidmap_lock);
 236                idr_preload_end();
 237
 238                if (nr < 0) {
 239                        retval = (nr == -ENOSPC) ? -EAGAIN : nr;
 240                        goto out_free;
 241                }
 242
 243                pid->numbers[i].nr = nr;
 244                pid->numbers[i].ns = tmp;
 245                tmp = tmp->parent;
 246        }
 247
 248        /*
 249         * ENOMEM is not the most obvious choice especially for the case
 250         * where the child subreaper has already exited and the pid
 251         * namespace denies the creation of any new processes. But ENOMEM
 252         * is what we have exposed to userspace for a long time and it is
 253         * documented behavior for pid namespaces. So we can't easily
 254         * change it even if there were an error code better suited.
 255         */
 256        retval = -ENOMEM;
 257
 258        get_pid_ns(ns);
 259        refcount_set(&pid->count, 1);
 260        spin_lock_init(&pid->lock);
 261        for (type = 0; type < PIDTYPE_MAX; ++type)
 262                INIT_HLIST_HEAD(&pid->tasks[type]);
 263
 264        init_waitqueue_head(&pid->wait_pidfd);
 265        INIT_HLIST_HEAD(&pid->inodes);
 266
 267        upid = pid->numbers + ns->level;
 268        spin_lock_irq(&pidmap_lock);
 269        if (!(ns->pid_allocated & PIDNS_ADDING))
 270                goto out_unlock;
 271        for ( ; upid >= pid->numbers; --upid) {
 272                /* Make the PID visible to find_pid_ns. */
 273                idr_replace(&upid->ns->idr, pid, upid->nr);
 274                upid->ns->pid_allocated++;
 275        }
 276        spin_unlock_irq(&pidmap_lock);
 277
 278        return pid;
 279
 280out_unlock:
 281        spin_unlock_irq(&pidmap_lock);
 282        put_pid_ns(ns);
 283
 284out_free:
 285        spin_lock_irq(&pidmap_lock);
 286        while (++i <= ns->level) {
 287                upid = pid->numbers + i;
 288                idr_remove(&upid->ns->idr, upid->nr);
 289        }
 290
 291        /* On failure to allocate the first pid, reset the state */
 292        if (ns->pid_allocated == PIDNS_ADDING)
 293                idr_set_cursor(&ns->idr, 0);
 294
 295        spin_unlock_irq(&pidmap_lock);
 296
 297        kmem_cache_free(ns->pid_cachep, pid);
 298        return ERR_PTR(retval);
 299}
 300
 301void disable_pid_allocation(struct pid_namespace *ns)
 302{
 303        spin_lock_irq(&pidmap_lock);
 304        ns->pid_allocated &= ~PIDNS_ADDING;
 305        spin_unlock_irq(&pidmap_lock);
 306}
 307
 308struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 309{
 310        return idr_find(&ns->idr, nr);
 311}
 312EXPORT_SYMBOL_GPL(find_pid_ns);
 313
 314struct pid *find_vpid(int nr)
 315{
 316        return find_pid_ns(nr, task_active_pid_ns(current));
 317}
 318EXPORT_SYMBOL_GPL(find_vpid);
 319
 320static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type)
 321{
 322        return (type == PIDTYPE_PID) ?
 323                &task->thread_pid :
 324                &task->signal->pids[type];
 325}
 326
 327/*
 328 * attach_pid() must be called with the tasklist_lock write-held.
 329 */
 330void attach_pid(struct task_struct *task, enum pid_type type)
 331{
 332        struct pid *pid = *task_pid_ptr(task, type);
 333        hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]);
 334}
 335
 336static void __change_pid(struct task_struct *task, enum pid_type type,
 337                        struct pid *new)
 338{
 339        struct pid **pid_ptr = task_pid_ptr(task, type);
 340        struct pid *pid;
 341        int tmp;
 342
 343        pid = *pid_ptr;
 344
 345        hlist_del_rcu(&task->pid_links[type]);
 346        *pid_ptr = new;
 347
 348        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
 349                if (pid_has_task(pid, tmp))
 350                        return;
 351
 352        free_pid(pid);
 353}
 354
 355void detach_pid(struct task_struct *task, enum pid_type type)
 356{
 357        __change_pid(task, type, NULL);
 358}
 359
 360void change_pid(struct task_struct *task, enum pid_type type,
 361                struct pid *pid)
 362{
 363        __change_pid(task, type, pid);
 364        attach_pid(task, type);
 365}
 366
 367void exchange_tids(struct task_struct *left, struct task_struct *right)
 368{
 369        struct pid *pid1 = left->thread_pid;
 370        struct pid *pid2 = right->thread_pid;
 371        struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
 372        struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
 373
 374        /* Swap the single entry tid lists */
 375        hlists_swap_heads_rcu(head1, head2);
 376
 377        /* Swap the per task_struct pid */
 378        rcu_assign_pointer(left->thread_pid, pid2);
 379        rcu_assign_pointer(right->thread_pid, pid1);
 380
 381        /* Swap the cached value */
 382        WRITE_ONCE(left->pid, pid_nr(pid2));
 383        WRITE_ONCE(right->pid, pid_nr(pid1));
 384}
 385
 386/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 387void transfer_pid(struct task_struct *old, struct task_struct *new,
 388                           enum pid_type type)
 389{
 390        if (type == PIDTYPE_PID)
 391                new->thread_pid = old->thread_pid;
 392        hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]);
 393}
 394
 395struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 396{
 397        struct task_struct *result = NULL;
 398        if (pid) {
 399                struct hlist_node *first;
 400                first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
 401                                              lockdep_tasklist_lock_is_held());
 402                if (first)
 403                        result = hlist_entry(first, struct task_struct, pid_links[(type)]);
 404        }
 405        return result;
 406}
 407EXPORT_SYMBOL(pid_task);
 408
 409/*
 410 * Must be called under rcu_read_lock().
 411 */
 412struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 413{
 414        RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
 415                         "find_task_by_pid_ns() needs rcu_read_lock() protection");
 416        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 417}
 418
 419struct task_struct *find_task_by_vpid(pid_t vnr)
 420{
 421        return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 422}
 423
 424struct task_struct *find_get_task_by_vpid(pid_t nr)
 425{
 426        struct task_struct *task;
 427
 428        rcu_read_lock();
 429        task = find_task_by_vpid(nr);
 430        if (task)
 431                get_task_struct(task);
 432        rcu_read_unlock();
 433
 434        return task;
 435}
 436
 437struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 438{
 439        struct pid *pid;
 440        rcu_read_lock();
 441        pid = get_pid(rcu_dereference(*task_pid_ptr(task, type)));
 442        rcu_read_unlock();
 443        return pid;
 444}
 445EXPORT_SYMBOL_GPL(get_task_pid);
 446
 447struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
 448{
 449        struct task_struct *result;
 450        rcu_read_lock();
 451        result = pid_task(pid, type);
 452        if (result)
 453                get_task_struct(result);
 454        rcu_read_unlock();
 455        return result;
 456}
 457EXPORT_SYMBOL_GPL(get_pid_task);
 458
 459struct pid *find_get_pid(pid_t nr)
 460{
 461        struct pid *pid;
 462
 463        rcu_read_lock();
 464        pid = get_pid(find_vpid(nr));
 465        rcu_read_unlock();
 466
 467        return pid;
 468}
 469EXPORT_SYMBOL_GPL(find_get_pid);
 470
 471pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
 472{
 473        struct upid *upid;
 474        pid_t nr = 0;
 475
 476        if (pid && ns->level <= pid->level) {
 477                upid = &pid->numbers[ns->level];
 478                if (upid->ns == ns)
 479                        nr = upid->nr;
 480        }
 481        return nr;
 482}
 483EXPORT_SYMBOL_GPL(pid_nr_ns);
 484
 485pid_t pid_vnr(struct pid *pid)
 486{
 487        return pid_nr_ns(pid, task_active_pid_ns(current));
 488}
 489EXPORT_SYMBOL_GPL(pid_vnr);
 490
 491pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 492                        struct pid_namespace *ns)
 493{
 494        pid_t nr = 0;
 495
 496        rcu_read_lock();
 497        if (!ns)
 498                ns = task_active_pid_ns(current);
 499        nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
 500        rcu_read_unlock();
 501
 502        return nr;
 503}
 504EXPORT_SYMBOL(__task_pid_nr_ns);
 505
 506struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
 507{
 508        return ns_of_pid(task_pid(tsk));
 509}
 510EXPORT_SYMBOL_GPL(task_active_pid_ns);
 511
 512/*
 513 * Used by proc to find the first pid that is greater than or equal to nr.
 514 *
 515 * If there is a pid at nr this function is exactly the same as find_pid_ns.
 516 */
 517struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 518{
 519        return idr_get_next(&ns->idr, &nr);
 520}
 521
 522/**
 523 * pidfd_create() - Create a new pid file descriptor.
 524 *
 525 * @pid:  struct pid that the pidfd will reference
 526 *
 527 * This creates a new pid file descriptor with the O_CLOEXEC flag set.
 528 *
 529 * Note, that this function can only be called after the fd table has
 530 * been unshared to avoid leaking the pidfd to the new process.
 531 *
 532 * Return: On success, a cloexec pidfd is returned.
 533 *         On error, a negative errno number will be returned.
 534 */
 535static int pidfd_create(struct pid *pid)
 536{
 537        int fd;
 538
 539        fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid),
 540                              O_RDWR | O_CLOEXEC);
 541        if (fd < 0)
 542                put_pid(pid);
 543
 544        return fd;
 545}
 546
 547/**
 548 * pidfd_open() - Open new pid file descriptor.
 549 *
 550 * @pid:   pid for which to retrieve a pidfd
 551 * @flags: flags to pass
 552 *
 553 * This creates a new pid file descriptor with the O_CLOEXEC flag set for
 554 * the process identified by @pid. Currently, the process identified by
 555 * @pid must be a thread-group leader. This restriction currently exists
 556 * for all aspects of pidfds including pidfd creation (CLONE_PIDFD cannot
 557 * be used with CLONE_THREAD) and pidfd polling (only supports thread group
 558 * leaders).
 559 *
 560 * Return: On success, a cloexec pidfd is returned.
 561 *         On error, a negative errno number will be returned.
 562 */
 563SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
 564{
 565        int fd;
 566        struct pid *p;
 567
 568        if (flags)
 569                return -EINVAL;
 570
 571        if (pid <= 0)
 572                return -EINVAL;
 573
 574        p = find_get_pid(pid);
 575        if (!p)
 576                return -ESRCH;
 577
 578        if (pid_has_task(p, PIDTYPE_TGID))
 579                fd = pidfd_create(p);
 580        else
 581                fd = -EINVAL;
 582
 583        put_pid(p);
 584        return fd;
 585}
 586
 587void __init pid_idr_init(void)
 588{
 589        /* Verify no one has done anything silly: */
 590        BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
 591
 592        /* bump default and minimum pid_max based on number of cpus */
 593        pid_max = min(pid_max_max, max_t(int, pid_max,
 594                                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
 595        pid_max_min = max_t(int, pid_max_min,
 596                                PIDS_PER_CPU_MIN * num_possible_cpus());
 597        pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
 598
 599        idr_init(&init_pid_ns.idr);
 600
 601        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
 602                        SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
 603}
 604
 605static struct file *__pidfd_fget(struct task_struct *task, int fd)
 606{
 607        struct file *file;
 608        int ret;
 609
 610        ret = mutex_lock_killable(&task->signal->exec_update_mutex);
 611        if (ret)
 612                return ERR_PTR(ret);
 613
 614        if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS))
 615                file = fget_task(task, fd);
 616        else
 617                file = ERR_PTR(-EPERM);
 618
 619        mutex_unlock(&task->signal->exec_update_mutex);
 620
 621        return file ?: ERR_PTR(-EBADF);
 622}
 623
 624static int pidfd_getfd(struct pid *pid, int fd)
 625{
 626        struct task_struct *task;
 627        struct file *file;
 628        int ret;
 629
 630        task = get_pid_task(pid, PIDTYPE_PID);
 631        if (!task)
 632                return -ESRCH;
 633
 634        file = __pidfd_fget(task, fd);
 635        put_task_struct(task);
 636        if (IS_ERR(file))
 637                return PTR_ERR(file);
 638
 639        ret = receive_fd(file, O_CLOEXEC);
 640        fput(file);
 641
 642        return ret;
 643}
 644
 645/**
 646 * sys_pidfd_getfd() - Get a file descriptor from another process
 647 *
 648 * @pidfd:      the pidfd file descriptor of the process
 649 * @fd:         the file descriptor number to get
 650 * @flags:      flags on how to get the fd (reserved)
 651 *
 652 * This syscall gets a copy of a file descriptor from another process
 653 * based on the pidfd, and file descriptor number. It requires that
 654 * the calling process has the ability to ptrace the process represented
 655 * by the pidfd. The process which is having its file descriptor copied
 656 * is otherwise unaffected.
 657 *
 658 * Return: On success, a cloexec file descriptor is returned.
 659 *         On error, a negative errno number will be returned.
 660 */
 661SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd,
 662                unsigned int, flags)
 663{
 664        struct pid *pid;
 665        struct fd f;
 666        int ret;
 667
 668        /* flags is currently unused - make sure it's unset */
 669        if (flags)
 670                return -EINVAL;
 671
 672        f = fdget(pidfd);
 673        if (!f.file)
 674                return -EBADF;
 675
 676        pid = pidfd_pid(f.file);
 677        if (IS_ERR(pid))
 678                ret = PTR_ERR(pid);
 679        else
 680                ret = pidfd_getfd(pid, fd);
 681
 682        fdput(f);
 683        return ret;
 684}
 685