linux/fs/file.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/file.c
   3 *
   4 *  Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
   5 *
   6 *  Manage the dynamic fd arrays in the process files_struct.
   7 */
   8
   9#include <linux/syscalls.h>
  10#include <linux/export.h>
  11#include <linux/fs.h>
  12#include <linux/mm.h>
  13#include <linux/mmzone.h>
  14#include <linux/time.h>
  15#include <linux/sched.h>
  16#include <linux/slab.h>
  17#include <linux/vmalloc.h>
  18#include <linux/file.h>
  19#include <linux/fdtable.h>
  20#include <linux/bitops.h>
  21#include <linux/interrupt.h>
  22#include <linux/spinlock.h>
  23#include <linux/rcupdate.h>
  24#include <linux/workqueue.h>
  25
  26int sysctl_nr_open __read_mostly = 1024*1024;
  27int sysctl_nr_open_min = BITS_PER_LONG;
  28/* our min() is unusable in constant expressions ;-/ */
  29#define __const_min(x, y) ((x) < (y) ? (x) : (y))
  30int sysctl_nr_open_max = __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) &
  31                         -BITS_PER_LONG;
  32
  33static void *alloc_fdmem(size_t size)
  34{
  35        /*
  36         * Very large allocations can stress page reclaim, so fall back to
  37         * vmalloc() if the allocation size will be considered "large" by the VM.
  38         */
  39        if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  40                void *data = kmalloc(size, GFP_KERNEL_ACCOUNT |
  41                                     __GFP_NOWARN | __GFP_NORETRY);
  42                if (data != NULL)
  43                        return data;
  44        }
  45        return __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM, PAGE_KERNEL);
  46}
  47
  48static void __free_fdtable(struct fdtable *fdt)
  49{
  50        kvfree(fdt->fd);
  51        kvfree(fdt->open_fds);
  52        kfree(fdt);
  53}
  54
  55static void free_fdtable_rcu(struct rcu_head *rcu)
  56{
  57        __free_fdtable(container_of(rcu, struct fdtable, rcu));
  58}
  59
  60#define BITBIT_NR(nr)   BITS_TO_LONGS(BITS_TO_LONGS(nr))
  61#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
  62
  63/*
  64 * Copy 'count' fd bits from the old table to the new table and clear the extra
  65 * space if any.  This does not copy the file pointers.  Called with the files
  66 * spinlock held for write.
  67 */
  68static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
  69                            unsigned int count)
  70{
  71        unsigned int cpy, set;
  72
  73        cpy = count / BITS_PER_BYTE;
  74        set = (nfdt->max_fds - count) / BITS_PER_BYTE;
  75        memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
  76        memset((char *)nfdt->open_fds + cpy, 0, set);
  77        memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
  78        memset((char *)nfdt->close_on_exec + cpy, 0, set);
  79
  80        cpy = BITBIT_SIZE(count);
  81        set = BITBIT_SIZE(nfdt->max_fds) - cpy;
  82        memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
  83        memset((char *)nfdt->full_fds_bits + cpy, 0, set);
  84}
  85
  86/*
  87 * Copy all file descriptors from the old table to the new, expanded table and
  88 * clear the extra space.  Called with the files spinlock held for write.
  89 */
  90static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
  91{
  92        unsigned int cpy, set;
  93
  94        BUG_ON(nfdt->max_fds < ofdt->max_fds);
  95
  96        cpy = ofdt->max_fds * sizeof(struct file *);
  97        set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
  98        memcpy(nfdt->fd, ofdt->fd, cpy);
  99        memset((char *)nfdt->fd + cpy, 0, set);
 100
 101        copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
 102}
 103
 104static struct fdtable * alloc_fdtable(unsigned int nr)
 105{
 106        struct fdtable *fdt;
 107        void *data;
 108
 109        /*
 110         * Figure out how many fds we actually want to support in this fdtable.
 111         * Allocation steps are keyed to the size of the fdarray, since it
 112         * grows far faster than any of the other dynamic data. We try to fit
 113         * the fdarray into comfortable page-tuned chunks: starting at 1024B
 114         * and growing in powers of two from there on.
 115         */
 116        nr /= (1024 / sizeof(struct file *));
 117        nr = roundup_pow_of_two(nr + 1);
 118        nr *= (1024 / sizeof(struct file *));
 119        /*
 120         * Note that this can drive nr *below* what we had passed if sysctl_nr_open
 121         * had been set lower between the check in expand_files() and here.  Deal
 122         * with that in caller, it's cheaper that way.
 123         *
 124         * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
 125         * bitmaps handling below becomes unpleasant, to put it mildly...
 126         */
 127        if (unlikely(nr > sysctl_nr_open))
 128                nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
 129
 130        fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
 131        if (!fdt)
 132                goto out;
 133        fdt->max_fds = nr;
 134        data = alloc_fdmem(nr * sizeof(struct file *));
 135        if (!data)
 136                goto out_fdt;
 137        fdt->fd = data;
 138
 139        data = alloc_fdmem(max_t(size_t,
 140                                 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
 141        if (!data)
 142                goto out_arr;
 143        fdt->open_fds = data;
 144        data += nr / BITS_PER_BYTE;
 145        fdt->close_on_exec = data;
 146        data += nr / BITS_PER_BYTE;
 147        fdt->full_fds_bits = data;
 148
 149        return fdt;
 150
 151out_arr:
 152        kvfree(fdt->fd);
 153out_fdt:
 154        kfree(fdt);
 155out:
 156        return NULL;
 157}
 158
 159/*
 160 * Expand the file descriptor table.
 161 * This function will allocate a new fdtable and both fd array and fdset, of
 162 * the given size.
 163 * Return <0 error code on error; 1 on successful completion.
 164 * The files->file_lock should be held on entry, and will be held on exit.
 165 */
 166static int expand_fdtable(struct files_struct *files, int nr)
 167        __releases(files->file_lock)
 168        __acquires(files->file_lock)
 169{
 170        struct fdtable *new_fdt, *cur_fdt;
 171
 172        spin_unlock(&files->file_lock);
 173        new_fdt = alloc_fdtable(nr);
 174
 175        /* make sure all __fd_install() have seen resize_in_progress
 176         * or have finished their rcu_read_lock_sched() section.
 177         */
 178        if (atomic_read(&files->count) > 1)
 179                synchronize_sched();
 180
 181        spin_lock(&files->file_lock);
 182        if (!new_fdt)
 183                return -ENOMEM;
 184        /*
 185         * extremely unlikely race - sysctl_nr_open decreased between the check in
 186         * caller and alloc_fdtable().  Cheaper to catch it here...
 187         */
 188        if (unlikely(new_fdt->max_fds <= nr)) {
 189                __free_fdtable(new_fdt);
 190                return -EMFILE;
 191        }
 192        cur_fdt = files_fdtable(files);
 193        BUG_ON(nr < cur_fdt->max_fds);
 194        copy_fdtable(new_fdt, cur_fdt);
 195        rcu_assign_pointer(files->fdt, new_fdt);
 196        if (cur_fdt != &files->fdtab)
 197                call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
 198        /* coupled with smp_rmb() in __fd_install() */
 199        smp_wmb();
 200        return 1;
 201}
 202
 203/*
 204 * Expand files.
 205 * This function will expand the file structures, if the requested size exceeds
 206 * the current capacity and there is room for expansion.
 207 * Return <0 error code on error; 0 when nothing done; 1 when files were
 208 * expanded and execution may have blocked.
 209 * The files->file_lock should be held on entry, and will be held on exit.
 210 */
 211static int expand_files(struct files_struct *files, int nr)
 212        __releases(files->file_lock)
 213        __acquires(files->file_lock)
 214{
 215        struct fdtable *fdt;
 216        int expanded = 0;
 217
 218repeat:
 219        fdt = files_fdtable(files);
 220
 221        /* Do we need to expand? */
 222        if (nr < fdt->max_fds)
 223                return expanded;
 224
 225        /* Can we expand? */
 226        if (nr >= sysctl_nr_open)
 227                return -EMFILE;
 228
 229        if (unlikely(files->resize_in_progress)) {
 230                spin_unlock(&files->file_lock);
 231                expanded = 1;
 232                wait_event(files->resize_wait, !files->resize_in_progress);
 233                spin_lock(&files->file_lock);
 234                goto repeat;
 235        }
 236
 237        /* All good, so we try */
 238        files->resize_in_progress = true;
 239        expanded = expand_fdtable(files, nr);
 240        files->resize_in_progress = false;
 241
 242        wake_up_all(&files->resize_wait);
 243        return expanded;
 244}
 245
 246static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
 247{
 248        __set_bit(fd, fdt->close_on_exec);
 249}
 250
 251static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
 252{
 253        if (test_bit(fd, fdt->close_on_exec))
 254                __clear_bit(fd, fdt->close_on_exec);
 255}
 256
 257static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt)
 258{
 259        __set_bit(fd, fdt->open_fds);
 260        fd /= BITS_PER_LONG;
 261        if (!~fdt->open_fds[fd])
 262                __set_bit(fd, fdt->full_fds_bits);
 263}
 264
 265static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
 266{
 267        __clear_bit(fd, fdt->open_fds);
 268        __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits);
 269}
 270
 271static int count_open_files(struct fdtable *fdt)
 272{
 273        int size = fdt->max_fds;
 274        int i;
 275
 276        /* Find the last open fd */
 277        for (i = size / BITS_PER_LONG; i > 0; ) {
 278                if (fdt->open_fds[--i])
 279                        break;
 280        }
 281        i = (i + 1) * BITS_PER_LONG;
 282        return i;
 283}
 284
 285/*
 286 * Allocate a new files structure and copy contents from the
 287 * passed in files structure.
 288 * errorp will be valid only when the returned files_struct is NULL.
 289 */
 290struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
 291{
 292        struct files_struct *newf;
 293        struct file **old_fds, **new_fds;
 294        int open_files, i;
 295        struct fdtable *old_fdt, *new_fdt;
 296
 297        *errorp = -ENOMEM;
 298        newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
 299        if (!newf)
 300                goto out;
 301
 302        atomic_set(&newf->count, 1);
 303
 304        spin_lock_init(&newf->file_lock);
 305        newf->resize_in_progress = false;
 306        init_waitqueue_head(&newf->resize_wait);
 307        newf->next_fd = 0;
 308        new_fdt = &newf->fdtab;
 309        new_fdt->max_fds = NR_OPEN_DEFAULT;
 310        new_fdt->close_on_exec = newf->close_on_exec_init;
 311        new_fdt->open_fds = newf->open_fds_init;
 312        new_fdt->full_fds_bits = newf->full_fds_bits_init;
 313        new_fdt->fd = &newf->fd_array[0];
 314
 315        spin_lock(&oldf->file_lock);
 316        old_fdt = files_fdtable(oldf);
 317        open_files = count_open_files(old_fdt);
 318
 319        /*
 320         * Check whether we need to allocate a larger fd array and fd set.
 321         */
 322        while (unlikely(open_files > new_fdt->max_fds)) {
 323                spin_unlock(&oldf->file_lock);
 324
 325                if (new_fdt != &newf->fdtab)
 326                        __free_fdtable(new_fdt);
 327
 328                new_fdt = alloc_fdtable(open_files - 1);
 329                if (!new_fdt) {
 330                        *errorp = -ENOMEM;
 331                        goto out_release;
 332                }
 333
 334                /* beyond sysctl_nr_open; nothing to do */
 335                if (unlikely(new_fdt->max_fds < open_files)) {
 336                        __free_fdtable(new_fdt);
 337                        *errorp = -EMFILE;
 338                        goto out_release;
 339                }
 340
 341                /*
 342                 * Reacquire the oldf lock and a pointer to its fd table
 343                 * who knows it may have a new bigger fd table. We need
 344                 * the latest pointer.
 345                 */
 346                spin_lock(&oldf->file_lock);
 347                old_fdt = files_fdtable(oldf);
 348                open_files = count_open_files(old_fdt);
 349        }
 350
 351        copy_fd_bitmaps(new_fdt, old_fdt, open_files);
 352
 353        old_fds = old_fdt->fd;
 354        new_fds = new_fdt->fd;
 355
 356        for (i = open_files; i != 0; i--) {
 357                struct file *f = *old_fds++;
 358                if (f) {
 359                        get_file(f);
 360                } else {
 361                        /*
 362                         * The fd may be claimed in the fd bitmap but not yet
 363                         * instantiated in the files array if a sibling thread
 364                         * is partway through open().  So make sure that this
 365                         * fd is available to the new process.
 366                         */
 367                        __clear_open_fd(open_files - i, new_fdt);
 368                }
 369                rcu_assign_pointer(*new_fds++, f);
 370        }
 371        spin_unlock(&oldf->file_lock);
 372
 373        /* clear the remainder */
 374        memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
 375
 376        rcu_assign_pointer(newf->fdt, new_fdt);
 377
 378        return newf;
 379
 380out_release:
 381        kmem_cache_free(files_cachep, newf);
 382out:
 383        return NULL;
 384}
 385
 386static struct fdtable *close_files(struct files_struct * files)
 387{
 388        /*
 389         * It is safe to dereference the fd table without RCU or
 390         * ->file_lock because this is the last reference to the
 391         * files structure.
 392         */
 393        struct fdtable *fdt = rcu_dereference_raw(files->fdt);
 394        int i, j = 0;
 395
 396        for (;;) {
 397                unsigned long set;
 398                i = j * BITS_PER_LONG;
 399                if (i >= fdt->max_fds)
 400                        break;
 401                set = fdt->open_fds[j++];
 402                while (set) {
 403                        if (set & 1) {
 404                                struct file * file = xchg(&fdt->fd[i], NULL);
 405                                if (file) {
 406                                        filp_close(file, files);
 407                                        cond_resched_rcu_qs();
 408                                }
 409                        }
 410                        i++;
 411                        set >>= 1;
 412                }
 413        }
 414
 415        return fdt;
 416}
 417
 418struct files_struct *get_files_struct(struct task_struct *task)
 419{
 420        struct files_struct *files;
 421
 422        task_lock(task);
 423        files = task->files;
 424        if (files)
 425                atomic_inc(&files->count);
 426        task_unlock(task);
 427
 428        return files;
 429}
 430
 431void put_files_struct(struct files_struct *files)
 432{
 433        if (atomic_dec_and_test(&files->count)) {
 434                struct fdtable *fdt = close_files(files);
 435
 436                /* free the arrays if they are not embedded */
 437                if (fdt != &files->fdtab)
 438                        __free_fdtable(fdt);
 439                kmem_cache_free(files_cachep, files);
 440        }
 441}
 442
 443void reset_files_struct(struct files_struct *files)
 444{
 445        struct task_struct *tsk = current;
 446        struct files_struct *old;
 447
 448        old = tsk->files;
 449        task_lock(tsk);
 450        tsk->files = files;
 451        task_unlock(tsk);
 452        put_files_struct(old);
 453}
 454
 455void exit_files(struct task_struct *tsk)
 456{
 457        struct files_struct * files = tsk->files;
 458
 459        if (files) {
 460                task_lock(tsk);
 461                tsk->files = NULL;
 462                task_unlock(tsk);
 463                put_files_struct(files);
 464        }
 465}
 466
 467struct files_struct init_files = {
 468        .count          = ATOMIC_INIT(1),
 469        .fdt            = &init_files.fdtab,
 470        .fdtab          = {
 471                .max_fds        = NR_OPEN_DEFAULT,
 472                .fd             = &init_files.fd_array[0],
 473                .close_on_exec  = init_files.close_on_exec_init,
 474                .open_fds       = init_files.open_fds_init,
 475                .full_fds_bits  = init_files.full_fds_bits_init,
 476        },
 477        .file_lock      = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
 478};
 479
 480static unsigned long find_next_fd(struct fdtable *fdt, unsigned long start)
 481{
 482        unsigned long maxfd = fdt->max_fds;
 483        unsigned long maxbit = maxfd / BITS_PER_LONG;
 484        unsigned long bitbit = start / BITS_PER_LONG;
 485
 486        bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
 487        if (bitbit > maxfd)
 488                return maxfd;
 489        if (bitbit > start)
 490                start = bitbit;
 491        return find_next_zero_bit(fdt->open_fds, maxfd, start);
 492}
 493
 494/*
 495 * allocate a file descriptor, mark it busy.
 496 */
 497int __alloc_fd(struct files_struct *files,
 498               unsigned start, unsigned end, unsigned flags)
 499{
 500        unsigned int fd;
 501        int error;
 502        struct fdtable *fdt;
 503
 504        spin_lock(&files->file_lock);
 505repeat:
 506        fdt = files_fdtable(files);
 507        fd = start;
 508        if (fd < files->next_fd)
 509                fd = files->next_fd;
 510
 511        if (fd < fdt->max_fds)
 512                fd = find_next_fd(fdt, fd);
 513
 514        /*
 515         * N.B. For clone tasks sharing a files structure, this test
 516         * will limit the total number of files that can be opened.
 517         */
 518        error = -EMFILE;
 519        if (fd >= end)
 520                goto out;
 521
 522        error = expand_files(files, fd);
 523        if (error < 0)
 524                goto out;
 525
 526        /*
 527         * If we needed to expand the fs array we
 528         * might have blocked - try again.
 529         */
 530        if (error)
 531                goto repeat;
 532
 533        if (start <= files->next_fd)
 534                files->next_fd = fd + 1;
 535
 536        __set_open_fd(fd, fdt);
 537        if (flags & O_CLOEXEC)
 538                __set_close_on_exec(fd, fdt);
 539        else
 540                __clear_close_on_exec(fd, fdt);
 541        error = fd;
 542#if 1
 543        /* Sanity check */
 544        if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
 545                printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
 546                rcu_assign_pointer(fdt->fd[fd], NULL);
 547        }
 548#endif
 549
 550out:
 551        spin_unlock(&files->file_lock);
 552        return error;
 553}
 554
 555static int alloc_fd(unsigned start, unsigned flags)
 556{
 557        return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
 558}
 559
 560int get_unused_fd_flags(unsigned flags)
 561{
 562        return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
 563}
 564EXPORT_SYMBOL(get_unused_fd_flags);
 565
 566static void __put_unused_fd(struct files_struct *files, unsigned int fd)
 567{
 568        struct fdtable *fdt = files_fdtable(files);
 569        __clear_open_fd(fd, fdt);
 570        if (fd < files->next_fd)
 571                files->next_fd = fd;
 572}
 573
 574void put_unused_fd(unsigned int fd)
 575{
 576        struct files_struct *files = current->files;
 577        spin_lock(&files->file_lock);
 578        __put_unused_fd(files, fd);
 579        spin_unlock(&files->file_lock);
 580}
 581
 582EXPORT_SYMBOL(put_unused_fd);
 583
 584/*
 585 * Install a file pointer in the fd array.
 586 *
 587 * The VFS is full of places where we drop the files lock between
 588 * setting the open_fds bitmap and installing the file in the file
 589 * array.  At any such point, we are vulnerable to a dup2() race
 590 * installing a file in the array before us.  We need to detect this and
 591 * fput() the struct file we are about to overwrite in this case.
 592 *
 593 * It should never happen - if we allow dup2() do it, _really_ bad things
 594 * will follow.
 595 *
 596 * NOTE: __fd_install() variant is really, really low-level; don't
 597 * use it unless you are forced to by truly lousy API shoved down
 598 * your throat.  'files' *MUST* be either current->files or obtained
 599 * by get_files_struct(current) done by whoever had given it to you,
 600 * or really bad things will happen.  Normally you want to use
 601 * fd_install() instead.
 602 */
 603
 604void __fd_install(struct files_struct *files, unsigned int fd,
 605                struct file *file)
 606{
 607        struct fdtable *fdt;
 608
 609        might_sleep();
 610        rcu_read_lock_sched();
 611
 612        while (unlikely(files->resize_in_progress)) {
 613                rcu_read_unlock_sched();
 614                wait_event(files->resize_wait, !files->resize_in_progress);
 615                rcu_read_lock_sched();
 616        }
 617        /* coupled with smp_wmb() in expand_fdtable() */
 618        smp_rmb();
 619        fdt = rcu_dereference_sched(files->fdt);
 620        BUG_ON(fdt->fd[fd] != NULL);
 621        rcu_assign_pointer(fdt->fd[fd], file);
 622        rcu_read_unlock_sched();
 623}
 624
 625void fd_install(unsigned int fd, struct file *file)
 626{
 627        __fd_install(current->files, fd, file);
 628}
 629
 630EXPORT_SYMBOL(fd_install);
 631
 632/*
 633 * The same warnings as for __alloc_fd()/__fd_install() apply here...
 634 */
 635int __close_fd(struct files_struct *files, unsigned fd)
 636{
 637        struct file *file;
 638        struct fdtable *fdt;
 639
 640        spin_lock(&files->file_lock);
 641        fdt = files_fdtable(files);
 642        if (fd >= fdt->max_fds)
 643                goto out_unlock;
 644        file = fdt->fd[fd];
 645        if (!file)
 646                goto out_unlock;
 647        rcu_assign_pointer(fdt->fd[fd], NULL);
 648        __clear_close_on_exec(fd, fdt);
 649        __put_unused_fd(files, fd);
 650        spin_unlock(&files->file_lock);
 651        return filp_close(file, files);
 652
 653out_unlock:
 654        spin_unlock(&files->file_lock);
 655        return -EBADF;
 656}
 657
 658void do_close_on_exec(struct files_struct *files)
 659{
 660        unsigned i;
 661        struct fdtable *fdt;
 662
 663        /* exec unshares first */
 664        spin_lock(&files->file_lock);
 665        for (i = 0; ; i++) {
 666                unsigned long set;
 667                unsigned fd = i * BITS_PER_LONG;
 668                fdt = files_fdtable(files);
 669                if (fd >= fdt->max_fds)
 670                        break;
 671                set = fdt->close_on_exec[i];
 672                if (!set)
 673                        continue;
 674                fdt->close_on_exec[i] = 0;
 675                for ( ; set ; fd++, set >>= 1) {
 676                        struct file *file;
 677                        if (!(set & 1))
 678                                continue;
 679                        file = fdt->fd[fd];
 680                        if (!file)
 681                                continue;
 682                        rcu_assign_pointer(fdt->fd[fd], NULL);
 683                        __put_unused_fd(files, fd);
 684                        spin_unlock(&files->file_lock);
 685                        filp_close(file, files);
 686                        cond_resched();
 687                        spin_lock(&files->file_lock);
 688                }
 689
 690        }
 691        spin_unlock(&files->file_lock);
 692}
 693
 694static struct file *__fget(unsigned int fd, fmode_t mask)
 695{
 696        struct files_struct *files = current->files;
 697        struct file *file;
 698
 699        rcu_read_lock();
 700loop:
 701        file = fcheck_files(files, fd);
 702        if (file) {
 703                /* File object ref couldn't be taken.
 704                 * dup2() atomicity guarantee is the reason
 705                 * we loop to catch the new file (or NULL pointer)
 706                 */
 707                if (file->f_mode & mask)
 708                        file = NULL;
 709                else if (!get_file_rcu(file))
 710                        goto loop;
 711        }
 712        rcu_read_unlock();
 713
 714        return file;
 715}
 716
 717struct file *fget(unsigned int fd)
 718{
 719        return __fget(fd, FMODE_PATH);
 720}
 721EXPORT_SYMBOL(fget);
 722
 723struct file *fget_raw(unsigned int fd)
 724{
 725        return __fget(fd, 0);
 726}
 727EXPORT_SYMBOL(fget_raw);
 728
 729/*
 730 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
 731 *
 732 * You can use this instead of fget if you satisfy all of the following
 733 * conditions:
 734 * 1) You must call fput_light before exiting the syscall and returning control
 735 *    to userspace (i.e. you cannot remember the returned struct file * after
 736 *    returning to userspace).
 737 * 2) You must not call filp_close on the returned struct file * in between
 738 *    calls to fget_light and fput_light.
 739 * 3) You must not clone the current task in between the calls to fget_light
 740 *    and fput_light.
 741 *
 742 * The fput_needed flag returned by fget_light should be passed to the
 743 * corresponding fput_light.
 744 */
 745static unsigned long __fget_light(unsigned int fd, fmode_t mask)
 746{
 747        struct files_struct *files = current->files;
 748        struct file *file;
 749
 750        if (atomic_read(&files->count) == 1) {
 751                file = __fcheck_files(files, fd);
 752                if (!file || unlikely(file->f_mode & mask))
 753                        return 0;
 754                return (unsigned long)file;
 755        } else {
 756                file = __fget(fd, mask);
 757                if (!file)
 758                        return 0;
 759                return FDPUT_FPUT | (unsigned long)file;
 760        }
 761}
 762unsigned long __fdget(unsigned int fd)
 763{
 764        return __fget_light(fd, FMODE_PATH);
 765}
 766EXPORT_SYMBOL(__fdget);
 767
 768unsigned long __fdget_raw(unsigned int fd)
 769{
 770        return __fget_light(fd, 0);
 771}
 772
 773unsigned long __fdget_pos(unsigned int fd)
 774{
 775        unsigned long v = __fdget(fd);
 776        struct file *file = (struct file *)(v & ~3);
 777
 778        if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
 779                if (file_count(file) > 1) {
 780                        v |= FDPUT_POS_UNLOCK;
 781                        mutex_lock(&file->f_pos_lock);
 782                }
 783        }
 784        return v;
 785}
 786
 787/*
 788 * We only lock f_pos if we have threads or if the file might be
 789 * shared with another process. In both cases we'll have an elevated
 790 * file count (done either by fdget() or by fork()).
 791 */
 792
 793void set_close_on_exec(unsigned int fd, int flag)
 794{
 795        struct files_struct *files = current->files;
 796        struct fdtable *fdt;
 797        spin_lock(&files->file_lock);
 798        fdt = files_fdtable(files);
 799        if (flag)
 800                __set_close_on_exec(fd, fdt);
 801        else
 802                __clear_close_on_exec(fd, fdt);
 803        spin_unlock(&files->file_lock);
 804}
 805
 806bool get_close_on_exec(unsigned int fd)
 807{
 808        struct files_struct *files = current->files;
 809        struct fdtable *fdt;
 810        bool res;
 811        rcu_read_lock();
 812        fdt = files_fdtable(files);
 813        res = close_on_exec(fd, fdt);
 814        rcu_read_unlock();
 815        return res;
 816}
 817
 818static int do_dup2(struct files_struct *files,
 819        struct file *file, unsigned fd, unsigned flags)
 820__releases(&files->file_lock)
 821{
 822        struct file *tofree;
 823        struct fdtable *fdt;
 824
 825        /*
 826         * We need to detect attempts to do dup2() over allocated but still
 827         * not finished descriptor.  NB: OpenBSD avoids that at the price of
 828         * extra work in their equivalent of fget() - they insert struct
 829         * file immediately after grabbing descriptor, mark it larval if
 830         * more work (e.g. actual opening) is needed and make sure that
 831         * fget() treats larval files as absent.  Potentially interesting,
 832         * but while extra work in fget() is trivial, locking implications
 833         * and amount of surgery on open()-related paths in VFS are not.
 834         * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
 835         * deadlocks in rather amusing ways, AFAICS.  All of that is out of
 836         * scope of POSIX or SUS, since neither considers shared descriptor
 837         * tables and this condition does not arise without those.
 838         */
 839        fdt = files_fdtable(files);
 840        tofree = fdt->fd[fd];
 841        if (!tofree && fd_is_open(fd, fdt))
 842                goto Ebusy;
 843        get_file(file);
 844        rcu_assign_pointer(fdt->fd[fd], file);
 845        __set_open_fd(fd, fdt);
 846        if (flags & O_CLOEXEC)
 847                __set_close_on_exec(fd, fdt);
 848        else
 849                __clear_close_on_exec(fd, fdt);
 850        spin_unlock(&files->file_lock);
 851
 852        if (tofree)
 853                filp_close(tofree, files);
 854
 855        return fd;
 856
 857Ebusy:
 858        spin_unlock(&files->file_lock);
 859        return -EBUSY;
 860}
 861
 862int replace_fd(unsigned fd, struct file *file, unsigned flags)
 863{
 864        int err;
 865        struct files_struct *files = current->files;
 866
 867        if (!file)
 868                return __close_fd(files, fd);
 869
 870        if (fd >= rlimit(RLIMIT_NOFILE))
 871                return -EBADF;
 872
 873        spin_lock(&files->file_lock);
 874        err = expand_files(files, fd);
 875        if (unlikely(err < 0))
 876                goto out_unlock;
 877        return do_dup2(files, file, fd, flags);
 878
 879out_unlock:
 880        spin_unlock(&files->file_lock);
 881        return err;
 882}
 883
 884SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
 885{
 886        int err = -EBADF;
 887        struct file *file;
 888        struct files_struct *files = current->files;
 889
 890        if ((flags & ~O_CLOEXEC) != 0)
 891                return -EINVAL;
 892
 893        if (unlikely(oldfd == newfd))
 894                return -EINVAL;
 895
 896        if (newfd >= rlimit(RLIMIT_NOFILE))
 897                return -EBADF;
 898
 899        spin_lock(&files->file_lock);
 900        err = expand_files(files, newfd);
 901        file = fcheck(oldfd);
 902        if (unlikely(!file))
 903                goto Ebadf;
 904        if (unlikely(err < 0)) {
 905                if (err == -EMFILE)
 906                        goto Ebadf;
 907                goto out_unlock;
 908        }
 909        return do_dup2(files, file, newfd, flags);
 910
 911Ebadf:
 912        err = -EBADF;
 913out_unlock:
 914        spin_unlock(&files->file_lock);
 915        return err;
 916}
 917
 918SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
 919{
 920        if (unlikely(newfd == oldfd)) { /* corner case */
 921                struct files_struct *files = current->files;
 922                int retval = oldfd;
 923
 924                rcu_read_lock();
 925                if (!fcheck_files(files, oldfd))
 926                        retval = -EBADF;
 927                rcu_read_unlock();
 928                return retval;
 929        }
 930        return sys_dup3(oldfd, newfd, 0);
 931}
 932
 933SYSCALL_DEFINE1(dup, unsigned int, fildes)
 934{
 935        int ret = -EBADF;
 936        struct file *file = fget_raw(fildes);
 937
 938        if (file) {
 939                ret = get_unused_fd_flags(0);
 940                if (ret >= 0)
 941                        fd_install(ret, file);
 942                else
 943                        fput(file);
 944        }
 945        return ret;
 946}
 947
 948int f_dupfd(unsigned int from, struct file *file, unsigned flags)
 949{
 950        int err;
 951        if (from >= rlimit(RLIMIT_NOFILE))
 952                return -EINVAL;
 953        err = alloc_fd(from, flags);
 954        if (err >= 0) {
 955                get_file(file);
 956                fd_install(err, file);
 957        }
 958        return err;
 959}
 960
 961int iterate_fd(struct files_struct *files, unsigned n,
 962                int (*f)(const void *, struct file *, unsigned),
 963                const void *p)
 964{
 965        struct fdtable *fdt;
 966        int res = 0;
 967        if (!files)
 968                return 0;
 969        spin_lock(&files->file_lock);
 970        for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
 971                struct file *file;
 972                file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
 973                if (!file)
 974                        continue;
 975                res = f(p, file, n);
 976                if (res)
 977                        break;
 978        }
 979        spin_unlock(&files->file_lock);
 980        return res;
 981}
 982EXPORT_SYMBOL(iterate_fd);
 983