linux/kernel/user.c
<<
>>
Prefs
   1/*
   2 * The "user cache".
   3 *
   4 * (C) Copyright 1991-2000 Linus Torvalds
   5 *
   6 * We have a per-user structure to keep track of how many
   7 * processes, files etc the user has claimed, in order to be
   8 * able to have per-user limits for system resources. 
   9 */
  10
  11#include <linux/init.h>
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/bitops.h>
  15#include <linux/key.h>
  16#include <linux/sched/user.h>
  17#include <linux/interrupt.h>
  18#include <linux/export.h>
  19#include <linux/user_namespace.h>
  20#include <linux/proc_ns.h>
  21
  22/*
  23 * userns count is 1 for root user, 1 for init_uts_ns,
  24 * and 1 for... ?
  25 */
  26struct user_namespace init_user_ns = {
  27        .uid_map = {
  28                .nr_extents = 1,
  29                .extent[0] = {
  30                        .first = 0,
  31                        .lower_first = 0,
  32                        .count = 4294967295U,
  33                },
  34        },
  35        .gid_map = {
  36                .nr_extents = 1,
  37                .extent[0] = {
  38                        .first = 0,
  39                        .lower_first = 0,
  40                        .count = 4294967295U,
  41                },
  42        },
  43        .projid_map = {
  44                .nr_extents = 1,
  45                .extent[0] = {
  46                        .first = 0,
  47                        .lower_first = 0,
  48                        .count = 4294967295U,
  49                },
  50        },
  51        .count = ATOMIC_INIT(3),
  52        .owner = GLOBAL_ROOT_UID,
  53        .group = GLOBAL_ROOT_GID,
  54        .ns.inum = PROC_USER_INIT_INO,
  55#ifdef CONFIG_USER_NS
  56        .ns.ops = &userns_operations,
  57#endif
  58        .flags = USERNS_INIT_FLAGS,
  59#ifdef CONFIG_PERSISTENT_KEYRINGS
  60        .persistent_keyring_register_sem =
  61        __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
  62#endif
  63};
  64EXPORT_SYMBOL_GPL(init_user_ns);
  65
  66/*
  67 * UID task count cache, to get fast user lookup in "alloc_uid"
  68 * when changing user ID's (ie setuid() and friends).
  69 */
  70
  71#define UIDHASH_BITS    (CONFIG_BASE_SMALL ? 3 : 7)
  72#define UIDHASH_SZ      (1 << UIDHASH_BITS)
  73#define UIDHASH_MASK            (UIDHASH_SZ - 1)
  74#define __uidhashfn(uid)        (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  75#define uidhashentry(uid)       (uidhash_table + __uidhashfn((__kuid_val(uid))))
  76
  77static struct kmem_cache *uid_cachep;
  78struct hlist_head uidhash_table[UIDHASH_SZ];
  79
  80/*
  81 * The uidhash_lock is mostly taken from process context, but it is
  82 * occasionally also taken from softirq/tasklet context, when
  83 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  84 * But free_uid() is also called with local interrupts disabled, and running
  85 * local_bh_enable() with local interrupts disabled is an error - we'll run
  86 * softirq callbacks, and they can unconditionally enable interrupts, and
  87 * the caller of free_uid() didn't expect that..
  88 */
  89static DEFINE_SPINLOCK(uidhash_lock);
  90
  91/* root_user.__count is 1, for init task cred */
  92struct user_struct root_user = {
  93        .__count        = ATOMIC_INIT(1),
  94        .processes      = ATOMIC_INIT(1),
  95        .sigpending     = ATOMIC_INIT(0),
  96        .locked_shm     = 0,
  97        .uid            = GLOBAL_ROOT_UID,
  98};
  99
 100/*
 101 * These routines must be called with the uidhash spinlock held!
 102 */
 103static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
 104{
 105        hlist_add_head(&up->uidhash_node, hashent);
 106}
 107
 108static void uid_hash_remove(struct user_struct *up)
 109{
 110        hlist_del_init(&up->uidhash_node);
 111}
 112
 113static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
 114{
 115        struct user_struct *user;
 116
 117        hlist_for_each_entry(user, hashent, uidhash_node) {
 118                if (uid_eq(user->uid, uid)) {
 119                        atomic_inc(&user->__count);
 120                        return user;
 121                }
 122        }
 123
 124        return NULL;
 125}
 126
 127/* IRQs are disabled and uidhash_lock is held upon function entry.
 128 * IRQ state (as stored in flags) is restored and uidhash_lock released
 129 * upon function exit.
 130 */
 131static void free_user(struct user_struct *up, unsigned long flags)
 132        __releases(&uidhash_lock)
 133{
 134        uid_hash_remove(up);
 135        spin_unlock_irqrestore(&uidhash_lock, flags);
 136        key_put(up->uid_keyring);
 137        key_put(up->session_keyring);
 138        kmem_cache_free(uid_cachep, up);
 139}
 140
 141/*
 142 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
 143 * caller must undo that ref with free_uid().
 144 *
 145 * If the user_struct could not be found, return NULL.
 146 */
 147struct user_struct *find_user(kuid_t uid)
 148{
 149        struct user_struct *ret;
 150        unsigned long flags;
 151
 152        spin_lock_irqsave(&uidhash_lock, flags);
 153        ret = uid_hash_find(uid, uidhashentry(uid));
 154        spin_unlock_irqrestore(&uidhash_lock, flags);
 155        return ret;
 156}
 157
 158void free_uid(struct user_struct *up)
 159{
 160        unsigned long flags;
 161
 162        if (!up)
 163                return;
 164
 165        local_irq_save(flags);
 166        if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
 167                free_user(up, flags);
 168        else
 169                local_irq_restore(flags);
 170}
 171
 172struct user_struct *alloc_uid(kuid_t uid)
 173{
 174        struct hlist_head *hashent = uidhashentry(uid);
 175        struct user_struct *up, *new;
 176
 177        spin_lock_irq(&uidhash_lock);
 178        up = uid_hash_find(uid, hashent);
 179        spin_unlock_irq(&uidhash_lock);
 180
 181        if (!up) {
 182                new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
 183                if (!new)
 184                        goto out_unlock;
 185
 186                new->uid = uid;
 187                atomic_set(&new->__count, 1);
 188
 189                /*
 190                 * Before adding this, check whether we raced
 191                 * on adding the same user already..
 192                 */
 193                spin_lock_irq(&uidhash_lock);
 194                up = uid_hash_find(uid, hashent);
 195                if (up) {
 196                        key_put(new->uid_keyring);
 197                        key_put(new->session_keyring);
 198                        kmem_cache_free(uid_cachep, new);
 199                } else {
 200                        uid_hash_insert(new, hashent);
 201                        up = new;
 202                }
 203                spin_unlock_irq(&uidhash_lock);
 204        }
 205
 206        return up;
 207
 208out_unlock:
 209        return NULL;
 210}
 211
 212static int __init uid_cache_init(void)
 213{
 214        int n;
 215
 216        uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
 217                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 218
 219        for(n = 0; n < UIDHASH_SZ; ++n)
 220                INIT_HLIST_HEAD(uidhash_table + n);
 221
 222        /* Insert the root user immediately (init already runs as root) */
 223        spin_lock_irq(&uidhash_lock);
 224        uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
 225        spin_unlock_irq(&uidhash_lock);
 226
 227        return 0;
 228}
 229subsys_initcall(uid_cache_init);
 230