linux/kernel/pid.c
<<
>>
Prefs
   1/*
   2 * Generic pidhash and scalable, time-bounded PID allocator
   3 *
   4 * (C) 2002-2003 William Irwin, IBM
   5 * (C) 2004 William Irwin, Oracle
   6 * (C) 2002-2004 Ingo Molnar, Red Hat
   7 *
   8 * pid-structures are backing objects for tasks sharing a given ID to chain
   9 * against. There is very little to them aside from hashing them and
  10 * parking tasks using given ID's on a list.
  11 *
  12 * The hash is always changed with the tasklist_lock write-acquired,
  13 * and the hash is only accessed with the tasklist_lock at least
  14 * read-acquired, so there's no additional SMP locking needed here.
  15 *
  16 * We have a list of bitmap pages, which bitmaps represent the PID space.
  17 * Allocating and freeing PIDs is completely lockless. The worst-case
  18 * allocation scenario when all but one out of 1 million PIDs possible are
  19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
  20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
  21 *
  22 * Pid namespaces:
  23 *    (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
  24 *    (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
  25 *     Many thanks to Oleg Nesterov for comments and help
  26 *
  27 */
  28
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/bootmem.h>
  34#include <linux/hash.h>
  35#include <linux/pid_namespace.h>
  36#include <linux/init_task.h>
  37#include <linux/syscalls.h>
  38
  39#define pid_hashfn(nr, ns)      \
  40        hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
  41static struct hlist_head *pid_hash;
  42static int pidhash_shift;
  43struct pid init_struct_pid = INIT_STRUCT_PID;
  44static struct kmem_cache *pid_ns_cachep;
  45
  46int pid_max = PID_MAX_DEFAULT;
  47
  48#define RESERVED_PIDS           300
  49
  50int pid_max_min = RESERVED_PIDS + 1;
  51int pid_max_max = PID_MAX_LIMIT;
  52
  53#define BITS_PER_PAGE           (PAGE_SIZE*8)
  54#define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
  55
  56static inline int mk_pid(struct pid_namespace *pid_ns,
  57                struct pidmap *map, int off)
  58{
  59        return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
  60}
  61
  62#define find_next_offset(map, off)                                      \
  63                find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
  64
  65/*
  66 * PID-map pages start out as NULL, they get allocated upon
  67 * first use and are never deallocated. This way a low pid_max
  68 * value does not cause lots of bitmaps to be allocated, but
  69 * the scheme scales to up to 4 million PIDs, runtime.
  70 */
  71struct pid_namespace init_pid_ns = {
  72        .kref = {
  73                .refcount       = ATOMIC_INIT(2),
  74        },
  75        .pidmap = {
  76                [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
  77        },
  78        .last_pid = 0,
  79        .level = 0,
  80        .child_reaper = &init_task,
  81};
  82EXPORT_SYMBOL_GPL(init_pid_ns);
  83
  84int is_container_init(struct task_struct *tsk)
  85{
  86        int ret = 0;
  87        struct pid *pid;
  88
  89        rcu_read_lock();
  90        pid = task_pid(tsk);
  91        if (pid != NULL && pid->numbers[pid->level].nr == 1)
  92                ret = 1;
  93        rcu_read_unlock();
  94
  95        return ret;
  96}
  97EXPORT_SYMBOL(is_container_init);
  98
  99/*
 100 * Note: disable interrupts while the pidmap_lock is held as an
 101 * interrupt might come in and do read_lock(&tasklist_lock).
 102 *
 103 * If we don't disable interrupts there is a nasty deadlock between
 104 * detach_pid()->free_pid() and another cpu that does
 105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
 106 * read_lock(&tasklist_lock);
 107 *
 108 * After we clean up the tasklist_lock and know there are no
 109 * irq handlers that take it we can leave the interrupts enabled.
 110 * For now it is easier to be safe than to prove it can't happen.
 111 */
 112
 113static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 114
 115static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
 116{
 117        struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
 118        int offset = pid & BITS_PER_PAGE_MASK;
 119
 120        clear_bit(offset, map->page);
 121        atomic_inc(&map->nr_free);
 122}
 123
 124static int alloc_pidmap(struct pid_namespace *pid_ns)
 125{
 126        int i, offset, max_scan, pid, last = pid_ns->last_pid;
 127        struct pidmap *map;
 128
 129        pid = last + 1;
 130        if (pid >= pid_max)
 131                pid = RESERVED_PIDS;
 132        offset = pid & BITS_PER_PAGE_MASK;
 133        map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
 134        max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
 135        for (i = 0; i <= max_scan; ++i) {
 136                if (unlikely(!map->page)) {
 137                        void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 138                        /*
 139                         * Free the page if someone raced with us
 140                         * installing it:
 141                         */
 142                        spin_lock_irq(&pidmap_lock);
 143                        if (map->page)
 144                                kfree(page);
 145                        else
 146                                map->page = page;
 147                        spin_unlock_irq(&pidmap_lock);
 148                        if (unlikely(!map->page))
 149                                break;
 150                }
 151                if (likely(atomic_read(&map->nr_free))) {
 152                        do {
 153                                if (!test_and_set_bit(offset, map->page)) {
 154                                        atomic_dec(&map->nr_free);
 155                                        pid_ns->last_pid = pid;
 156                                        return pid;
 157                                }
 158                                offset = find_next_offset(map, offset);
 159                                pid = mk_pid(pid_ns, map, offset);
 160                        /*
 161                         * find_next_offset() found a bit, the pid from it
 162                         * is in-bounds, and if we fell back to the last
 163                         * bitmap block and the final block was the same
 164                         * as the starting point, pid is before last_pid.
 165                         */
 166                        } while (offset < BITS_PER_PAGE && pid < pid_max &&
 167                                        (i != max_scan || pid < last ||
 168                                            !((last+1) & BITS_PER_PAGE_MASK)));
 169                }
 170                if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
 171                        ++map;
 172                        offset = 0;
 173                } else {
 174                        map = &pid_ns->pidmap[0];
 175                        offset = RESERVED_PIDS;
 176                        if (unlikely(last == offset))
 177                                break;
 178                }
 179                pid = mk_pid(pid_ns, map, offset);
 180        }
 181        return -1;
 182}
 183
 184static int next_pidmap(struct pid_namespace *pid_ns, int last)
 185{
 186        int offset;
 187        struct pidmap *map, *end;
 188
 189        offset = (last + 1) & BITS_PER_PAGE_MASK;
 190        map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
 191        end = &pid_ns->pidmap[PIDMAP_ENTRIES];
 192        for (; map < end; map++, offset = 0) {
 193                if (unlikely(!map->page))
 194                        continue;
 195                offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
 196                if (offset < BITS_PER_PAGE)
 197                        return mk_pid(pid_ns, map, offset);
 198        }
 199        return -1;
 200}
 201
 202fastcall void put_pid(struct pid *pid)
 203{
 204        struct pid_namespace *ns;
 205
 206        if (!pid)
 207                return;
 208
 209        ns = pid->numbers[pid->level].ns;
 210        if ((atomic_read(&pid->count) == 1) ||
 211             atomic_dec_and_test(&pid->count)) {
 212                kmem_cache_free(ns->pid_cachep, pid);
 213                put_pid_ns(ns);
 214        }
 215}
 216EXPORT_SYMBOL_GPL(put_pid);
 217
 218static void delayed_put_pid(struct rcu_head *rhp)
 219{
 220        struct pid *pid = container_of(rhp, struct pid, rcu);
 221        put_pid(pid);
 222}
 223
 224fastcall void free_pid(struct pid *pid)
 225{
 226        /* We can be called with write_lock_irq(&tasklist_lock) held */
 227        int i;
 228        unsigned long flags;
 229
 230        spin_lock_irqsave(&pidmap_lock, flags);
 231        for (i = 0; i <= pid->level; i++)
 232                hlist_del_rcu(&pid->numbers[i].pid_chain);
 233        spin_unlock_irqrestore(&pidmap_lock, flags);
 234
 235        for (i = 0; i <= pid->level; i++)
 236                free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
 237
 238        call_rcu(&pid->rcu, delayed_put_pid);
 239}
 240
 241struct pid *alloc_pid(struct pid_namespace *ns)
 242{
 243        struct pid *pid;
 244        enum pid_type type;
 245        int i, nr;
 246        struct pid_namespace *tmp;
 247        struct upid *upid;
 248
 249        pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
 250        if (!pid)
 251                goto out;
 252
 253        tmp = ns;
 254        for (i = ns->level; i >= 0; i--) {
 255                nr = alloc_pidmap(tmp);
 256                if (nr < 0)
 257                        goto out_free;
 258
 259                pid->numbers[i].nr = nr;
 260                pid->numbers[i].ns = tmp;
 261                tmp = tmp->parent;
 262        }
 263
 264        get_pid_ns(ns);
 265        pid->level = ns->level;
 266        atomic_set(&pid->count, 1);
 267        for (type = 0; type < PIDTYPE_MAX; ++type)
 268                INIT_HLIST_HEAD(&pid->tasks[type]);
 269
 270        spin_lock_irq(&pidmap_lock);
 271        for (i = ns->level; i >= 0; i--) {
 272                upid = &pid->numbers[i];
 273                hlist_add_head_rcu(&upid->pid_chain,
 274                                &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
 275        }
 276        spin_unlock_irq(&pidmap_lock);
 277
 278out:
 279        return pid;
 280
 281out_free:
 282        for (i++; i <= ns->level; i++)
 283                free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
 284
 285        kmem_cache_free(ns->pid_cachep, pid);
 286        pid = NULL;
 287        goto out;
 288}
 289
 290struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
 291{
 292        struct hlist_node *elem;
 293        struct upid *pnr;
 294
 295        hlist_for_each_entry_rcu(pnr, elem,
 296                        &pid_hash[pid_hashfn(nr, ns)], pid_chain)
 297                if (pnr->nr == nr && pnr->ns == ns)
 298                        return container_of(pnr, struct pid,
 299                                        numbers[ns->level]);
 300
 301        return NULL;
 302}
 303EXPORT_SYMBOL_GPL(find_pid_ns);
 304
 305struct pid *find_vpid(int nr)
 306{
 307        return find_pid_ns(nr, current->nsproxy->pid_ns);
 308}
 309EXPORT_SYMBOL_GPL(find_vpid);
 310
 311struct pid *find_pid(int nr)
 312{
 313        return find_pid_ns(nr, &init_pid_ns);
 314}
 315EXPORT_SYMBOL_GPL(find_pid);
 316
 317/*
 318 * attach_pid() must be called with the tasklist_lock write-held.
 319 */
 320int fastcall attach_pid(struct task_struct *task, enum pid_type type,
 321                struct pid *pid)
 322{
 323        struct pid_link *link;
 324
 325        link = &task->pids[type];
 326        link->pid = pid;
 327        hlist_add_head_rcu(&link->node, &pid->tasks[type]);
 328
 329        return 0;
 330}
 331
 332void fastcall detach_pid(struct task_struct *task, enum pid_type type)
 333{
 334        struct pid_link *link;
 335        struct pid *pid;
 336        int tmp;
 337
 338        link = &task->pids[type];
 339        pid = link->pid;
 340
 341        hlist_del_rcu(&link->node);
 342        link->pid = NULL;
 343
 344        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
 345                if (!hlist_empty(&pid->tasks[tmp]))
 346                        return;
 347
 348        free_pid(pid);
 349}
 350
 351/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 352void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
 353                           enum pid_type type)
 354{
 355        new->pids[type].pid = old->pids[type].pid;
 356        hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
 357        old->pids[type].pid = NULL;
 358}
 359
 360struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
 361{
 362        struct task_struct *result = NULL;
 363        if (pid) {
 364                struct hlist_node *first;
 365                first = rcu_dereference(pid->tasks[type].first);
 366                if (first)
 367                        result = hlist_entry(first, struct task_struct, pids[(type)].node);
 368        }
 369        return result;
 370}
 371
 372/*
 373 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
 374 */
 375struct task_struct *find_task_by_pid_type_ns(int type, int nr,
 376                struct pid_namespace *ns)
 377{
 378        return pid_task(find_pid_ns(nr, ns), type);
 379}
 380
 381EXPORT_SYMBOL(find_task_by_pid_type_ns);
 382
 383struct task_struct *find_task_by_pid(pid_t nr)
 384{
 385        return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
 386}
 387EXPORT_SYMBOL(find_task_by_pid);
 388
 389struct task_struct *find_task_by_vpid(pid_t vnr)
 390{
 391        return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
 392                        current->nsproxy->pid_ns);
 393}
 394EXPORT_SYMBOL(find_task_by_vpid);
 395
 396struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 397{
 398        return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
 399}
 400EXPORT_SYMBOL(find_task_by_pid_ns);
 401
 402struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
 403{
 404        struct pid *pid;
 405        rcu_read_lock();
 406        pid = get_pid(task->pids[type].pid);
 407        rcu_read_unlock();
 408        return pid;
 409}
 410
 411struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
 412{
 413        struct task_struct *result;
 414        rcu_read_lock();
 415        result = pid_task(pid, type);
 416        if (result)
 417                get_task_struct(result);
 418        rcu_read_unlock();
 419        return result;
 420}
 421
 422struct pid *find_get_pid(pid_t nr)
 423{
 424        struct pid *pid;
 425
 426        rcu_read_lock();
 427        pid = get_pid(find_vpid(nr));
 428        rcu_read_unlock();
 429
 430        return pid;
 431}
 432
 433pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
 434{
 435        struct upid *upid;
 436        pid_t nr = 0;
 437
 438        if (pid && ns->level <= pid->level) {
 439                upid = &pid->numbers[ns->level];
 440                if (upid->ns == ns)
 441                        nr = upid->nr;
 442        }
 443        return nr;
 444}
 445
 446pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 447{
 448        return pid_nr_ns(task_pid(tsk), ns);
 449}
 450EXPORT_SYMBOL(task_pid_nr_ns);
 451
 452pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 453{
 454        return pid_nr_ns(task_tgid(tsk), ns);
 455}
 456EXPORT_SYMBOL(task_tgid_nr_ns);
 457
 458pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 459{
 460        return pid_nr_ns(task_pgrp(tsk), ns);
 461}
 462EXPORT_SYMBOL(task_pgrp_nr_ns);
 463
 464pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 465{
 466        return pid_nr_ns(task_session(tsk), ns);
 467}
 468EXPORT_SYMBOL(task_session_nr_ns);
 469
 470/*
 471 * Used by proc to find the first pid that is greater then or equal to nr.
 472 *
 473 * If there is a pid at nr this function is exactly the same as find_pid.
 474 */
 475struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
 476{
 477        struct pid *pid;
 478
 479        do {
 480                pid = find_pid_ns(nr, ns);
 481                if (pid)
 482                        break;
 483                nr = next_pidmap(ns, nr);
 484        } while (nr > 0);
 485
 486        return pid;
 487}
 488EXPORT_SYMBOL_GPL(find_get_pid);
 489
 490struct pid_cache {
 491        int nr_ids;
 492        char name[16];
 493        struct kmem_cache *cachep;
 494        struct list_head list;
 495};
 496
 497static LIST_HEAD(pid_caches_lh);
 498static DEFINE_MUTEX(pid_caches_mutex);
 499
 500/*
 501 * creates the kmem cache to allocate pids from.
 502 * @nr_ids: the number of numerical ids this pid will have to carry
 503 */
 504
 505static struct kmem_cache *create_pid_cachep(int nr_ids)
 506{
 507        struct pid_cache *pcache;
 508        struct kmem_cache *cachep;
 509
 510        mutex_lock(&pid_caches_mutex);
 511        list_for_each_entry (pcache, &pid_caches_lh, list)
 512                if (pcache->nr_ids == nr_ids)
 513                        goto out;
 514
 515        pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
 516        if (pcache == NULL)
 517                goto err_alloc;
 518
 519        snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
 520        cachep = kmem_cache_create(pcache->name,
 521                        sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
 522                        0, SLAB_HWCACHE_ALIGN, NULL);
 523        if (cachep == NULL)
 524                goto err_cachep;
 525
 526        pcache->nr_ids = nr_ids;
 527        pcache->cachep = cachep;
 528        list_add(&pcache->list, &pid_caches_lh);
 529out:
 530        mutex_unlock(&pid_caches_mutex);
 531        return pcache->cachep;
 532
 533err_cachep:
 534        kfree(pcache);
 535err_alloc:
 536        mutex_unlock(&pid_caches_mutex);
 537        return NULL;
 538}
 539
 540#ifdef CONFIG_PID_NS
 541static struct pid_namespace *create_pid_namespace(int level)
 542{
 543        struct pid_namespace *ns;
 544        int i;
 545
 546        ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
 547        if (ns == NULL)
 548                goto out;
 549
 550        ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 551        if (!ns->pidmap[0].page)
 552                goto out_free;
 553
 554        ns->pid_cachep = create_pid_cachep(level + 1);
 555        if (ns->pid_cachep == NULL)
 556                goto out_free_map;
 557
 558        kref_init(&ns->kref);
 559        ns->last_pid = 0;
 560        ns->child_reaper = NULL;
 561        ns->level = level;
 562
 563        set_bit(0, ns->pidmap[0].page);
 564        atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
 565
 566        for (i = 1; i < PIDMAP_ENTRIES; i++) {
 567                ns->pidmap[i].page = 0;
 568                atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
 569        }
 570
 571        return ns;
 572
 573out_free_map:
 574        kfree(ns->pidmap[0].page);
 575out_free:
 576        kmem_cache_free(pid_ns_cachep, ns);
 577out:
 578        return ERR_PTR(-ENOMEM);
 579}
 580
 581static void destroy_pid_namespace(struct pid_namespace *ns)
 582{
 583        int i;
 584
 585        for (i = 0; i < PIDMAP_ENTRIES; i++)
 586                kfree(ns->pidmap[i].page);
 587        kmem_cache_free(pid_ns_cachep, ns);
 588}
 589
 590struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
 591{
 592        struct pid_namespace *new_ns;
 593
 594        BUG_ON(!old_ns);
 595        new_ns = get_pid_ns(old_ns);
 596        if (!(flags & CLONE_NEWPID))
 597                goto out;
 598
 599        new_ns = ERR_PTR(-EINVAL);
 600        if (flags & CLONE_THREAD)
 601                goto out_put;
 602
 603        new_ns = create_pid_namespace(old_ns->level + 1);
 604        if (!IS_ERR(new_ns))
 605                new_ns->parent = get_pid_ns(old_ns);
 606
 607out_put:
 608        put_pid_ns(old_ns);
 609out:
 610        return new_ns;
 611}
 612
 613void free_pid_ns(struct kref *kref)
 614{
 615        struct pid_namespace *ns, *parent;
 616
 617        ns = container_of(kref, struct pid_namespace, kref);
 618
 619        parent = ns->parent;
 620        destroy_pid_namespace(ns);
 621
 622        if (parent != NULL)
 623                put_pid_ns(parent);
 624}
 625#endif /* CONFIG_PID_NS */
 626
 627void zap_pid_ns_processes(struct pid_namespace *pid_ns)
 628{
 629        int nr;
 630        int rc;
 631
 632        /*
 633         * The last thread in the cgroup-init thread group is terminating.
 634         * Find remaining pid_ts in the namespace, signal and wait for them
 635         * to exit.
 636         *
 637         * Note:  This signals each threads in the namespace - even those that
 638         *        belong to the same thread group, To avoid this, we would have
 639         *        to walk the entire tasklist looking a processes in this
 640         *        namespace, but that could be unnecessarily expensive if the
 641         *        pid namespace has just a few processes. Or we need to
 642         *        maintain a tasklist for each pid namespace.
 643         *
 644         */
 645        read_lock(&tasklist_lock);
 646        nr = next_pidmap(pid_ns, 1);
 647        while (nr > 0) {
 648                kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
 649                nr = next_pidmap(pid_ns, nr);
 650        }
 651        read_unlock(&tasklist_lock);
 652
 653        do {
 654                clear_thread_flag(TIF_SIGPENDING);
 655                rc = sys_wait4(-1, NULL, __WALL, NULL);
 656        } while (rc != -ECHILD);
 657
 658
 659        /* Child reaper for the pid namespace is going away */
 660        pid_ns->child_reaper = NULL;
 661        return;
 662}
 663
 664/*
 665 * The pid hash table is scaled according to the amount of memory in the
 666 * machine.  From a minimum of 16 slots up to 4096 slots at one gigabyte or
 667 * more.
 668 */
 669void __init pidhash_init(void)
 670{
 671        int i, pidhash_size;
 672        unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
 673
 674        pidhash_shift = max(4, fls(megabytes * 4));
 675        pidhash_shift = min(12, pidhash_shift);
 676        pidhash_size = 1 << pidhash_shift;
 677
 678        printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
 679                pidhash_size, pidhash_shift,
 680                pidhash_size * sizeof(struct hlist_head));
 681
 682        pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
 683        if (!pid_hash)
 684                panic("Could not alloc pidhash!\n");
 685        for (i = 0; i < pidhash_size; i++)
 686                INIT_HLIST_HEAD(&pid_hash[i]);
 687}
 688
 689void __init pidmap_init(void)
 690{
 691        init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
 692        /* Reserve PID 0. We never call free_pidmap(0) */
 693        set_bit(0, init_pid_ns.pidmap[0].page);
 694        atomic_dec(&init_pid_ns.pidmap[0].nr_free);
 695
 696        init_pid_ns.pid_cachep = create_pid_cachep(1);
 697        if (init_pid_ns.pid_cachep == NULL)
 698                panic("Can't create pid_1 cachep\n");
 699
 700        pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
 701}
 702