linux/fs/file_table.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/file_table.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
   6 */
   7
   8#include <linux/string.h>
   9#include <linux/slab.h>
  10#include <linux/file.h>
  11#include <linux/fdtable.h>
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/security.h>
  16#include <linux/cred.h>
  17#include <linux/eventpoll.h>
  18#include <linux/rcupdate.h>
  19#include <linux/mount.h>
  20#include <linux/capability.h>
  21#include <linux/cdev.h>
  22#include <linux/fsnotify.h>
  23#include <linux/sysctl.h>
  24#include <linux/percpu_counter.h>
  25#include <linux/percpu.h>
  26#include <linux/task_work.h>
  27#include <linux/ima.h>
  28#include <linux/swap.h>
  29
  30#include <linux/atomic.h>
  31
  32#include "internal.h"
  33
  34/* sysctl tunables... */
  35struct files_stat_struct files_stat = {
  36        .max_files = NR_FILE
  37};
  38
  39/* SLAB cache for file structures */
  40static struct kmem_cache *filp_cachep __read_mostly;
  41
  42static struct percpu_counter nr_files __cacheline_aligned_in_smp;
  43
  44static void file_free_rcu(struct rcu_head *head)
  45{
  46        struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
  47
  48        put_cred(f->f_cred);
  49        kmem_cache_free(filp_cachep, f);
  50}
  51
  52static inline void file_free(struct file *f)
  53{
  54        percpu_counter_dec(&nr_files);
  55        call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
  56}
  57
  58/*
  59 * Return the total number of open files in the system
  60 */
  61static long get_nr_files(void)
  62{
  63        return percpu_counter_read_positive(&nr_files);
  64}
  65
  66/*
  67 * Return the maximum number of open files in the system
  68 */
  69unsigned long get_max_files(void)
  70{
  71        return files_stat.max_files;
  72}
  73EXPORT_SYMBOL_GPL(get_max_files);
  74
  75/*
  76 * Handle nr_files sysctl
  77 */
  78#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
  79int proc_nr_files(struct ctl_table *table, int write,
  80                     void __user *buffer, size_t *lenp, loff_t *ppos)
  81{
  82        files_stat.nr_files = get_nr_files();
  83        return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
  84}
  85#else
  86int proc_nr_files(struct ctl_table *table, int write,
  87                     void __user *buffer, size_t *lenp, loff_t *ppos)
  88{
  89        return -ENOSYS;
  90}
  91#endif
  92
  93/* Find an unused file structure and return a pointer to it.
  94 * Returns an error pointer if some error happend e.g. we over file
  95 * structures limit, run out of memory or operation is not permitted.
  96 *
  97 * Be very careful using this.  You are responsible for
  98 * getting write access to any mount that you might assign
  99 * to this filp, if it is opened for write.  If this is not
 100 * done, you will imbalance int the mount's writer count
 101 * and a warning at __fput() time.
 102 */
 103struct file *get_empty_filp(void)
 104{
 105        const struct cred *cred = current_cred();
 106        static long old_max;
 107        struct file *f;
 108        int error;
 109
 110        /*
 111         * Privileged users can go above max_files
 112         */
 113        if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
 114                /*
 115                 * percpu_counters are inaccurate.  Do an expensive check before
 116                 * we go and fail.
 117                 */
 118                if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
 119                        goto over;
 120        }
 121
 122        f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
 123        if (unlikely(!f))
 124                return ERR_PTR(-ENOMEM);
 125
 126        percpu_counter_inc(&nr_files);
 127        f->f_cred = get_cred(cred);
 128        error = security_file_alloc(f);
 129        if (unlikely(error)) {
 130                file_free(f);
 131                return ERR_PTR(error);
 132        }
 133
 134        atomic_long_set(&f->f_count, 1);
 135        rwlock_init(&f->f_owner.lock);
 136        spin_lock_init(&f->f_lock);
 137        mutex_init(&f->f_pos_lock);
 138        eventpoll_init_file(f);
 139        /* f->f_version: 0 */
 140        return f;
 141
 142over:
 143        /* Ran out of filps - report that */
 144        if (get_nr_files() > old_max) {
 145                pr_info("VFS: file-max limit %lu reached\n", get_max_files());
 146                old_max = get_nr_files();
 147        }
 148        return ERR_PTR(-ENFILE);
 149}
 150
 151/**
 152 * alloc_file - allocate and initialize a 'struct file'
 153 *
 154 * @path: the (dentry, vfsmount) pair for the new file
 155 * @mode: the mode with which the new file will be opened
 156 * @fop: the 'struct file_operations' for the new file
 157 */
 158struct file *alloc_file(const struct path *path, fmode_t mode,
 159                const struct file_operations *fop)
 160{
 161        struct file *file;
 162
 163        file = get_empty_filp();
 164        if (IS_ERR(file))
 165                return file;
 166
 167        file->f_path = *path;
 168        file->f_inode = path->dentry->d_inode;
 169        file->f_mapping = path->dentry->d_inode->i_mapping;
 170        file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
 171        if ((mode & FMODE_READ) &&
 172             likely(fop->read || fop->read_iter))
 173                mode |= FMODE_CAN_READ;
 174        if ((mode & FMODE_WRITE) &&
 175             likely(fop->write || fop->write_iter))
 176                mode |= FMODE_CAN_WRITE;
 177        file->f_mode = mode;
 178        file->f_op = fop;
 179        if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 180                i_readcount_inc(path->dentry->d_inode);
 181        return file;
 182}
 183EXPORT_SYMBOL(alloc_file);
 184
 185/* the real guts of fput() - releasing the last reference to file
 186 */
 187static void __fput(struct file *file)
 188{
 189        struct dentry *dentry = file->f_path.dentry;
 190        struct vfsmount *mnt = file->f_path.mnt;
 191        struct inode *inode = file->f_inode;
 192
 193        might_sleep();
 194
 195        fsnotify_close(file);
 196        /*
 197         * The function eventpoll_release() should be the first called
 198         * in the file cleanup chain.
 199         */
 200        eventpoll_release(file);
 201        locks_remove_file(file);
 202
 203        ima_file_free(file);
 204        if (unlikely(file->f_flags & FASYNC)) {
 205                if (file->f_op->fasync)
 206                        file->f_op->fasync(-1, file, 0);
 207        }
 208        if (file->f_op->release)
 209                file->f_op->release(inode, file);
 210        security_file_free(file);
 211        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL &&
 212                     !(file->f_mode & FMODE_PATH))) {
 213                cdev_put(inode->i_cdev);
 214        }
 215        fops_put(file->f_op);
 216        put_pid(file->f_owner.pid);
 217        if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
 218                i_readcount_dec(inode);
 219        if (file->f_mode & FMODE_WRITER) {
 220                put_write_access(inode);
 221                __mnt_drop_write(mnt);
 222        }
 223        file->f_path.dentry = NULL;
 224        file->f_path.mnt = NULL;
 225        file->f_inode = NULL;
 226        file_free(file);
 227        dput(dentry);
 228        mntput(mnt);
 229}
 230
 231static LLIST_HEAD(delayed_fput_list);
 232static void delayed_fput(struct work_struct *unused)
 233{
 234        struct llist_node *node = llist_del_all(&delayed_fput_list);
 235        struct file *f, *t;
 236
 237        llist_for_each_entry_safe(f, t, node, f_u.fu_llist)
 238                __fput(f);
 239}
 240
 241static void ____fput(struct callback_head *work)
 242{
 243        __fput(container_of(work, struct file, f_u.fu_rcuhead));
 244}
 245
 246/*
 247 * If kernel thread really needs to have the final fput() it has done
 248 * to complete, call this.  The only user right now is the boot - we
 249 * *do* need to make sure our writes to binaries on initramfs has
 250 * not left us with opened struct file waiting for __fput() - execve()
 251 * won't work without that.  Please, don't add more callers without
 252 * very good reasons; in particular, never call that with locks
 253 * held and never call that from a thread that might need to do
 254 * some work on any kind of umount.
 255 */
 256void flush_delayed_fput(void)
 257{
 258        delayed_fput(NULL);
 259}
 260
 261static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
 262
 263void fput(struct file *file)
 264{
 265        if (atomic_long_dec_and_test(&file->f_count)) {
 266                struct task_struct *task = current;
 267
 268                if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
 269                        init_task_work(&file->f_u.fu_rcuhead, ____fput);
 270                        if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
 271                                return;
 272                        /*
 273                         * After this task has run exit_task_work(),
 274                         * task_work_add() will fail.  Fall through to delayed
 275                         * fput to avoid leaking *file.
 276                         */
 277                }
 278
 279                if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
 280                        schedule_delayed_work(&delayed_fput_work, 1);
 281        }
 282}
 283
 284/*
 285 * synchronous analog of fput(); for kernel threads that might be needed
 286 * in some umount() (and thus can't use flush_delayed_fput() without
 287 * risking deadlocks), need to wait for completion of __fput() and know
 288 * for this specific struct file it won't involve anything that would
 289 * need them.  Use only if you really need it - at the very least,
 290 * don't blindly convert fput() by kernel thread to that.
 291 */
 292void __fput_sync(struct file *file)
 293{
 294        if (atomic_long_dec_and_test(&file->f_count)) {
 295                struct task_struct *task = current;
 296                BUG_ON(!(task->flags & PF_KTHREAD));
 297                __fput(file);
 298        }
 299}
 300
 301EXPORT_SYMBOL(fput);
 302
 303void put_filp(struct file *file)
 304{
 305        if (atomic_long_dec_and_test(&file->f_count)) {
 306                security_file_free(file);
 307                file_free(file);
 308        }
 309}
 310
 311void __init files_init(void)
 312{
 313        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
 314                        SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
 315        percpu_counter_init(&nr_files, 0, GFP_KERNEL);
 316}
 317
 318/*
 319 * One file with associated inode and dcache is very roughly 1K. Per default
 320 * do not use more than 10% of our memory for files.
 321 */
 322void __init files_maxfiles_init(void)
 323{
 324        unsigned long n;
 325        unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
 326
 327        memreserve = min(memreserve, totalram_pages - 1);
 328        n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
 329
 330        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
 331}
 332