linux/kernel/pid.c
<<
>>
Prefs
   1/*
   2 * Generic pidhash and scalable, time-bounded PID allocator
   3 *
   4 * (C) 2002-2003 William Irwin, IBM
   5 * (C) 2004 William Irwin, Oracle
   6 * (C) 2002-2004 Ingo Molnar, Red Hat
   7 *
   8 * pid-structures are backing objects for tasks sharing a given ID to chain
   9 * against. There is very little to them aside from hashing them and
  10 * parking tasks using given ID's on a list.
  11 *
  12 * The hash is always changed with the tasklist_lock write-acquired,
  13 * and the hash is only accessed with the tasklist_lock at least
  14 * read-acquired, so there's no additional SMP locking needed here.
  15 *
  16 * We have a list of bitmap pages, which bitmaps represent the PID space.
  17 * Allocating and freeing PIDs is completely lockless. The worst-case
  18 * allocation scenario when all but one out of 1 million PIDs possible are
  19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  21 *
  22 * Pid namespaces:
  23 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  24 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  25 *     Many thanks to Oleg Nesterov for comments and help
  26 *
  27 */
  28
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/rculist.h>
  34#include <linux/bootmem.h>
  35#include <linux/hash.h>
  36#include <linux/pid_namespace.h>
  37#include <linux/init_task.h>
  38#include <linux/syscalls.h>
  39
  40#define pid_hashfn(nr, ns)      \
  41        hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
  42static struct hlist_head *pid_hash;
  43static unsigned int pidhash_shift = 4;
  44struct pid init_struct_pid = INIT_STRUCT_PID;
  45
  46int pid_max = PID_MAX_DEFAULT;
  47
  48#define RESERVED_PIDS           300
  49
  50int pid_max_min = RESERVED_PIDS + 1;
  51int pid_max_max = PID_MAX_LIMIT;
  52
  53#define BITS_PER_PAGE           (PAGE_SIZE*8)
  54#define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
  55
  56static inline int mk_pid(struct pid_namespace *pid_ns,
  57                struct pidmap *map, int off)
  58{
  59        return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
  60}
  61
  62#define find_next_offset(map, off)                                      \
  63                find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
  64
  65/*
  66 * PID-map pages start out as NULL, they get allocated upon
  67 * first use and are never deallocated. This way a low pid_max
  68 * value does not cause lots of bitmaps to be allocated, but
  69 * the scheme scales to up to 4 million PIDs, runtime.
  70 */
  71struct pid_namespace init_pid_ns = {
  72        .kref = {
  73                .refcount       = ATOMIC_INIT(2),
  74        },
  75        .pidmap = {
  76                [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
  77        },
  78        .last_pid = 0,
  79        .level = 0,
  80        .child_reaper = &init_task,
  81};
  82EXPORT_SYMBOL_GPL(init_pid_ns);
  83
  84int is_container_init(struct task_struct *tsk)
  85{
  86        int ret = 0;
  87        struct pid *pid;
  88
  89        rcu_read_lock();
  90        pid = task_pid(tsk);
  91        if (pid != NULL && pid->numbers[pid->level].nr == 1)
  92                ret = 1;
  93        rcu_read_unlock();
  94
  95        return ret;
  96}
  97EXPORT_SYMBOL(is_container_init);
  98
  99/*
 100 * Note: disable interrupts while the pidmap_lock is held as an
 101 * interrupt might come in and do read_lock(&tasklist_lock).
 102 *
 103 * If we don't disable interrupts there is a nasty deadlock between
 104 * detach_pid()->free_pid() and another cpu that does
 105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 106 * read_lock(&tasklist_lock);
 107 *
 108 * After we clean up the tasklist_lock and know there are no
 109 * irq handlers that take it we can leave the interrupts enabled.
 110 * For now it is easier to be safe than to prove it can't happen.
 111 */
 112
 113static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 114
 115static void free_pidmap(struct upid *upid)
 116{
 117        int nr = upid->nr;
 118        struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
 119        int offset = nr & BITS_PER_PAGE_MASK;
 120
 121        clear_bit(offset, map->page);
 122        atomic_inc(&map->nr_free);
 123}
 124
 125static int alloc_pidmap(struct pid_namespace *pid_ns)
 126{
 127        int i, offset, max_scan, pid, last = pid_ns->last_pid;
 128        struct pidmap *map;
 129
 130        pid = last + 1;
 131        if (pid >= pid_max)
 132                pid = RESERVED_PIDS;
 133        offset = pid & BITS_PER_PAGE_MASK;
 134        map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
 135        max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
 136        for (i = 0; i <= max_scan; ++i) {
 137                if (unlikely(!map->page)) {
 138                        void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 139                        /*
 140                         * Free the page if someone raced with us
 141                         * installing it:
 142                         */
 143                        spin_lock_irq(&pidmap_lock);
 144                        if (map->page)
 145                                kfree(page);
 146                        else
 147                                map->page = page;
 148                        spin_unlock_irq(&pidmap_lock);
 149                        if (unlikely(!map->page))
 150                                break;
 151                }
 152                if (likely(atomic_read(&map->nr_free))) {
 153                        do {
 154                                if (!test_and_set_bit(offset, map->page)) {
 155                                        atomic_dec(&map->nr_free);
 156                                        pid_ns->last_pid = pid;
 157                                        return pid;
 158                                }
 159                                offset = find_next_offset(map, offset);
 160                                pid = mk_pid(pid_ns, map, offset);
 161                        /*
 162                         * find_next_offset() found a bit, the pid from it
 163                         * is in-bounds, and if we fell back to the last
 164                         * bitmap block and the final block was the same
 165                         * as the starting point, pid is before last_pid.
 166                         */
 167                        } while (offset < BITS_PER_PAGE && pid < pid_max &&
 168                                        (i != max_scan || pid < last ||
 169                                            !((last+1) & BITS_PER_PAGE_MASK)));
 170                }
 171                if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
 172                        ++map;
 173                        offset = 0;
 174                } else {
 175                        map = &pid_ns->pidmap[0];
 176                        offset = RESERVED_PIDS;
 177                        if (unlikely(last == offset))
 178                                break;
 179                }
 180                pid = mk_pid(pid_ns, map, offset);
 181        }
 182        return -1;
 183}
 184
 185int next_pidmap(struct pid_namespace *pid_ns, int last)
 186{
 187        int offset;
 188        struct pidmap *map, *end;
 189
 190        offset = (last + 1) & BITS_PER_PAGE_MASK;
 191        map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
 192        end = &pid_ns->pidmap[PIDMAP_ENTRIES];
 193        for (; map < end; map++, offset = 0) {
 194                if (unlikely(!map->page))
 195                        continue;
 196                offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
 197                if (offset < BITS_PER_PAGE)
 198                        return mk_pid(pid_ns, map, offset);
 199        }
 200        return -1;
 201}
 202
 203void put_pid(struct pid *pid)
 204{
 205        struct pid_namespace *ns;
 206
 207        if (!pid)
 208                return;
 209
 210        ns = pid->numbers[pid->level].ns;
 211        if ((atomic_read(&pid->count) == 1) ||
 212             atomic_dec_and_test(&pid->count)) {
 213                kmem_cache_free(ns->pid_cachep, pid);
 214                put_pid_ns(ns);
 215        }
 216}
 217EXPORT_SYMBOL_GPL(put_pid);
 218
 219static void delayed_put_pid(struct rcu_head *rhp)
 220{
 221        struct pid *pid = container_of(rhp, struct pid, rcu);
 222        put_pid(pid);
 223}
 224
 225void free_pid(struct pid *pid)
 226{
 227        /* We can be called with write_lock_irq(&tasklist_lock) held */
 228        int i;
 229        unsigned long flags;
 230
 231        spin_lock_irqsave(&pidmap_lock, flags);
 232        for (i = 0; i <= pid->level; i++)
 233                hlist_del_rcu(&pid->numbers[i].pid_chain);
 234        spin_unlock_irqrestore(&pidmap_lock, flags);
 235
 236        for (i = 0; i <= pid->level; i++)
 237                free_pidmap(pid->numbers + i);
 238
 239        call_rcu(&pid->rcu, delayed_put_pid);
 240}
 241
 242struct pid *alloc_pid(struct pid_namespace *ns)
 243{
 244        struct pid *pid;
 245        enum pid_type type;
 246        int i, nr;
 247        struct pid_namespace *tmp;
 248        struct upid *upid;
 249
 250        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
 251        if (!pid)
 252                goto out;
 253
 254        tmp = ns;
 255        for (i = ns->level; i >= 0; i--) {
 256                nr = alloc_pidmap(tmp);
 257                if (nr < 0)
 258                        goto out_free;
 259
 260                pid->numbers[i].nr = nr;
 261                pid->numbers[i].ns = tmp;
 262                tmp = tmp->parent;
 263        }
 264
 265        get_pid_ns(ns);
 266        pid->level = ns->level;
 267        atomic_set(&pid->count, 1);
 268        for (type = 0; type < PIDTYPE_MAX; ++type)
 269                INIT_HLIST_HEAD(&pid->tasks[type]);
 270
 271        spin_lock_irq(&pidmap_lock);
 272        for (i = ns->level; i >= 0; i--) {
 273                upid = &pid->numbers[i];
 274                hlist_add_head_rcu(&upid->pid_chain,
 275                                &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
 276        }
 277        spin_unlock_irq(&pidmap_lock);
 278
 279out:
 280        return pid;
 281
 282out_free:
 283        while (++i <= ns->level)
 284                free_pidmap(pid->numbers + i);
 285
 286        kmem_cache_free(ns->pid_cachep, pid);
 287        pid = NULL;
 288        goto out;
 289}
 290
 291struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
 292{
 293        struct hlist_node *elem;
 294        struct upid *pnr;
 295
 296        hlist_for_each_entry_rcu(pnr, elem,
 297                        &pid_hash[pid_hashfn(nr, ns)], pid_chain)
 298                if (pnr->nr == nr && pnr->ns == ns)
 299                        return container_of(pnr, struct pid,
 300                                        numbers[ns->level]);
 301
 302        return NULL;
 303}
 304EXPORT_SYMBOL_GPL(find_pid_ns);
 305
 306struct pid *find_vpid(int nr)
 307{
 308        return find_pid_ns(nr, current->nsproxy->pid_ns);
 309}
 310EXPORT_SYMBOL_GPL(find_vpid);
 311
 312/*
 313 * attach_pid() must be called with the tasklist_lock write-held.
 314 */
 315void attach_pid(struct task_struct *task, enum pid_type type,
 316                struct pid *pid)
 317{
 318        struct pid_link *link;
 319
 320        link = &task->pids[type];
 321        link->pid = pid;
 322        hlist_add_head_rcu(&link->node, &pid->tasks[type]);
 323}
 324
 325static void __change_pid(struct task_struct *task, enum pid_type type,
 326                        struct pid *new)
 327{
 328        struct pid_link *link;
 329        struct pid *pid;
 330        int tmp;
 331
 332        link = &task->pids[type];
 333        pid = link->pid;
 334
 335        hlist_del_rcu(&link->node);
 336        link->pid = new;
 337
 338        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
 339                if (!hlist_empty(&pid->tasks[tmp]))
 340                        return;
 341
 342        free_pid(pid);
 343}
 344
 345void detach_pid(struct task_struct *task, enum pid_type type)
 346{
 347        __change_pid(task, type, NULL);
 348}
 349
 350void change_pid(struct task_struct *task, enum pid_type type,
 351                struct pid *pid)
 352{
 353        __change_pid(task, type, pid);
 354        attach_pid(task, type, pid);
 355}
 356
 357/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 358void transfer_pid(struct task_struct *old, struct task_struct *new,
 359                           enum pid_type type)
 360{
 361        new->pids[type].pid = old->pids[type].pid;
 362        hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
 363}
 364
 365struct task_struct *pid_task(struct pid *pid, enum pid_type type)
 366{
 367        struct task_struct *result = NULL;
 368        if (pid) {
 369                struct hlist_node *first;
 370                first = rcu_dereference(pid->tasks[type].first);
 371                if (first)
 372                        result = hlist_entry(first, struct task_struct, pids[(type)].node);
 373        }
 374        return result;
 375}
 376EXPORT_SYMBOL(pid_task);
 377
 378/*
 379 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
 380 */
 381struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 382{
 383        return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 384}
 385
 386struct task_struct *find_task_by_vpid(pid_t vnr)
 387{
 388        return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
 389}
 390
 391struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 392{
 393        struct pid *pid;
 394        rcu_read_lock();
 395        if (type != PIDTYPE_PID)
 396                task = task->group_leader;
 397        pid = get_pid(task->pids[type].pid);
 398        rcu_read_unlock();
 399        return pid;
 400}
 401
 402struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
 403{
 404        struct task_struct *result;
 405        rcu_read_lock();
 406        result = pid_task(pid, type);
 407        if (result)
 408                get_task_struct(result);
 409        rcu_read_unlock();
 410        return result;
 411}
 412
 413struct pid *find_get_pid(pid_t nr)
 414{
 415        struct pid *pid;
 416
 417        rcu_read_lock();
 418        pid = get_pid(find_vpid(nr));
 419        rcu_read_unlock();
 420
 421        return pid;
 422}
 423EXPORT_SYMBOL_GPL(find_get_pid);
 424
 425pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
 426{
 427        struct upid *upid;
 428        pid_t nr = 0;
 429
 430        if (pid && ns->level <= pid->level) {
 431                upid = &pid->numbers[ns->level];
 432                if (upid->ns == ns)
 433                        nr = upid->nr;
 434        }
 435        return nr;
 436}
 437
 438pid_t pid_vnr(struct pid *pid)
 439{
 440        return pid_nr_ns(pid, current->nsproxy->pid_ns);
 441}
 442EXPORT_SYMBOL_GPL(pid_vnr);
 443
 444pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
 445                        struct pid_namespace *ns)
 446{
 447        pid_t nr = 0;
 448
 449        rcu_read_lock();
 450        if (!ns)
 451                ns = current->nsproxy->pid_ns;
 452        if (likely(pid_alive(task))) {
 453                if (type != PIDTYPE_PID)
 454                        task = task->group_leader;
 455                nr = pid_nr_ns(task->pids[type].pid, ns);
 456        }
 457        rcu_read_unlock();
 458
 459        return nr;
 460}
 461EXPORT_SYMBOL(__task_pid_nr_ns);
 462
 463pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 464{
 465        return pid_nr_ns(task_tgid(tsk), ns);
 466}
 467EXPORT_SYMBOL(task_tgid_nr_ns);
 468
 469struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
 470{
 471        return ns_of_pid(task_pid(tsk));
 472}
 473EXPORT_SYMBOL_GPL(task_active_pid_ns);
 474
 475/*
 476 * Used by proc to find the first pid that is greater than or equal to nr.
 477 *
 478 * If there is a pid at nr this function is exactly the same as find_pid_ns.
 479 */
 480struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 481{
 482        struct pid *pid;
 483
 484        do {
 485                pid = find_pid_ns(nr, ns);
 486                if (pid)
 487                        break;
 488                nr = next_pidmap(ns, nr);
 489        } while (nr > 0);
 490
 491        return pid;
 492}
 493
 494/*
 495 * The pid hash table is scaled according to the amount of memory in the
 496 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
 497 * more.
 498 */
 499void __init pidhash_init(void)
 500{
 501        int i, pidhash_size;
 502
 503        pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
 504                                           HASH_EARLY | HASH_SMALL,
 505                                           &pidhash_shift, NULL, 4096);
 506        pidhash_size = 1 << pidhash_shift;
 507
 508        for (i = 0; i < pidhash_size; i++)
 509                INIT_HLIST_HEAD(&pid_hash[i]);
 510}
 511
 512void __init pidmap_init(void)
 513{
 514        init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 515        /* Reserve PID 0. We never call free_pidmap(0) */
 516        set_bit(0, init_pid_ns.pidmap[0].page);
 517        atomic_dec(&init_pid_ns.pidmap[0].nr_free);
 518
 519        init_pid_ns.pid_cachep = KMEM_CACHE(pid,
 520                        SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 521}
 522