linux/fs/fcntl.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/fcntl.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 */
   6
   7#include <linux/syscalls.h>
   8#include <linux/init.h>
   9#include <linux/mm.h>
  10#include <linux/fs.h>
  11#include <linux/file.h>
  12#include <linux/fdtable.h>
  13#include <linux/capability.h>
  14#include <linux/dnotify.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/pipe_fs_i.h>
  18#include <linux/security.h>
  19#include <linux/ptrace.h>
  20#include <linux/signal.h>
  21#include <linux/rcupdate.h>
  22#include <linux/pid_namespace.h>
  23
  24#include <asm/poll.h>
  25#include <asm/siginfo.h>
  26#include <asm/uaccess.h>
  27
  28void set_close_on_exec(unsigned int fd, int flag)
  29{
  30        struct files_struct *files = current->files;
  31        struct fdtable *fdt;
  32        spin_lock(&files->file_lock);
  33        fdt = files_fdtable(files);
  34        if (flag)
  35                FD_SET(fd, fdt->close_on_exec);
  36        else
  37                FD_CLR(fd, fdt->close_on_exec);
  38        spin_unlock(&files->file_lock);
  39}
  40
  41static int get_close_on_exec(unsigned int fd)
  42{
  43        struct files_struct *files = current->files;
  44        struct fdtable *fdt;
  45        int res;
  46        rcu_read_lock();
  47        fdt = files_fdtable(files);
  48        res = FD_ISSET(fd, fdt->close_on_exec);
  49        rcu_read_unlock();
  50        return res;
  51}
  52
  53SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
  54{
  55        int err = -EBADF;
  56        struct file * file, *tofree;
  57        struct files_struct * files = current->files;
  58        struct fdtable *fdt;
  59
  60        if ((flags & ~O_CLOEXEC) != 0)
  61                return -EINVAL;
  62
  63        if (unlikely(oldfd == newfd))
  64                return -EINVAL;
  65
  66        spin_lock(&files->file_lock);
  67        err = expand_files(files, newfd);
  68        file = fcheck(oldfd);
  69        if (unlikely(!file))
  70                goto Ebadf;
  71        if (unlikely(err < 0)) {
  72                if (err == -EMFILE)
  73                        goto Ebadf;
  74                goto out_unlock;
  75        }
  76        /*
  77         * We need to detect attempts to do dup2() over allocated but still
  78         * not finished descriptor.  NB: OpenBSD avoids that at the price of
  79         * extra work in their equivalent of fget() - they insert struct
  80         * file immediately after grabbing descriptor, mark it larval if
  81         * more work (e.g. actual opening) is needed and make sure that
  82         * fget() treats larval files as absent.  Potentially interesting,
  83         * but while extra work in fget() is trivial, locking implications
  84         * and amount of surgery on open()-related paths in VFS are not.
  85         * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
  86         * deadlocks in rather amusing ways, AFAICS.  All of that is out of
  87         * scope of POSIX or SUS, since neither considers shared descriptor
  88         * tables and this condition does not arise without those.
  89         */
  90        err = -EBUSY;
  91        fdt = files_fdtable(files);
  92        tofree = fdt->fd[newfd];
  93        if (!tofree && FD_ISSET(newfd, fdt->open_fds))
  94                goto out_unlock;
  95        get_file(file);
  96        rcu_assign_pointer(fdt->fd[newfd], file);
  97        FD_SET(newfd, fdt->open_fds);
  98        if (flags & O_CLOEXEC)
  99                FD_SET(newfd, fdt->close_on_exec);
 100        else
 101                FD_CLR(newfd, fdt->close_on_exec);
 102        spin_unlock(&files->file_lock);
 103
 104        if (tofree)
 105                filp_close(tofree, files);
 106
 107        return newfd;
 108
 109Ebadf:
 110        err = -EBADF;
 111out_unlock:
 112        spin_unlock(&files->file_lock);
 113        return err;
 114}
 115
 116SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
 117{
 118        if (unlikely(newfd == oldfd)) { /* corner case */
 119                struct files_struct *files = current->files;
 120                int retval = oldfd;
 121
 122                rcu_read_lock();
 123                if (!fcheck_files(files, oldfd))
 124                        retval = -EBADF;
 125                rcu_read_unlock();
 126                return retval;
 127        }
 128        return sys_dup3(oldfd, newfd, 0);
 129}
 130
 131SYSCALL_DEFINE1(dup, unsigned int, fildes)
 132{
 133        int ret = -EBADF;
 134        struct file *file = fget(fildes);
 135
 136        if (file) {
 137                ret = get_unused_fd();
 138                if (ret >= 0)
 139                        fd_install(ret, file);
 140                else
 141                        fput(file);
 142        }
 143        return ret;
 144}
 145
 146#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
 147
 148static int setfl(int fd, struct file * filp, unsigned long arg)
 149{
 150        struct inode * inode = filp->f_path.dentry->d_inode;
 151        int error = 0;
 152
 153        /*
 154         * O_APPEND cannot be cleared if the file is marked as append-only
 155         * and the file is open for write.
 156         */
 157        if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
 158                return -EPERM;
 159
 160        /* O_NOATIME can only be set by the owner or superuser */
 161        if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
 162                if (!is_owner_or_cap(inode))
 163                        return -EPERM;
 164
 165        /* required for strict SunOS emulation */
 166        if (O_NONBLOCK != O_NDELAY)
 167               if (arg & O_NDELAY)
 168                   arg |= O_NONBLOCK;
 169
 170        if (arg & O_DIRECT) {
 171                if (!filp->f_mapping || !filp->f_mapping->a_ops ||
 172                        !filp->f_mapping->a_ops->direct_IO)
 173                                return -EINVAL;
 174        }
 175
 176        if (filp->f_op && filp->f_op->check_flags)
 177                error = filp->f_op->check_flags(arg);
 178        if (error)
 179                return error;
 180
 181        /*
 182         * ->fasync() is responsible for setting the FASYNC bit.
 183         */
 184        if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
 185                        filp->f_op->fasync) {
 186                error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
 187                if (error < 0)
 188                        goto out;
 189                if (error > 0)
 190                        error = 0;
 191        }
 192        spin_lock(&filp->f_lock);
 193        filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
 194        spin_unlock(&filp->f_lock);
 195
 196 out:
 197        return error;
 198}
 199
 200static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
 201                     int force)
 202{
 203        write_lock_irq(&filp->f_owner.lock);
 204        if (force || !filp->f_owner.pid) {
 205                put_pid(filp->f_owner.pid);
 206                filp->f_owner.pid = get_pid(pid);
 207                filp->f_owner.pid_type = type;
 208
 209                if (pid) {
 210                        const struct cred *cred = current_cred();
 211                        filp->f_owner.uid = cred->uid;
 212                        filp->f_owner.euid = cred->euid;
 213                }
 214        }
 215        write_unlock_irq(&filp->f_owner.lock);
 216}
 217
 218int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
 219                int force)
 220{
 221        int err;
 222
 223        err = security_file_set_fowner(filp);
 224        if (err)
 225                return err;
 226
 227        f_modown(filp, pid, type, force);
 228        return 0;
 229}
 230EXPORT_SYMBOL(__f_setown);
 231
 232int f_setown(struct file *filp, unsigned long arg, int force)
 233{
 234        enum pid_type type;
 235        struct pid *pid;
 236        int who = arg;
 237        int result;
 238        type = PIDTYPE_PID;
 239        if (who < 0) {
 240                type = PIDTYPE_PGID;
 241                who = -who;
 242        }
 243        rcu_read_lock();
 244        pid = find_vpid(who);
 245        result = __f_setown(filp, pid, type, force);
 246        rcu_read_unlock();
 247        return result;
 248}
 249EXPORT_SYMBOL(f_setown);
 250
 251void f_delown(struct file *filp)
 252{
 253        f_modown(filp, NULL, PIDTYPE_PID, 1);
 254}
 255
 256pid_t f_getown(struct file *filp)
 257{
 258        pid_t pid;
 259        read_lock(&filp->f_owner.lock);
 260        pid = pid_vnr(filp->f_owner.pid);
 261        if (filp->f_owner.pid_type == PIDTYPE_PGID)
 262                pid = -pid;
 263        read_unlock(&filp->f_owner.lock);
 264        return pid;
 265}
 266
 267static int f_setown_ex(struct file *filp, unsigned long arg)
 268{
 269        struct f_owner_ex * __user owner_p = (void * __user)arg;
 270        struct f_owner_ex owner;
 271        struct pid *pid;
 272        int type;
 273        int ret;
 274
 275        ret = copy_from_user(&owner, owner_p, sizeof(owner));
 276        if (ret)
 277                return -EFAULT;
 278
 279        switch (owner.type) {
 280        case F_OWNER_TID:
 281                type = PIDTYPE_MAX;
 282                break;
 283
 284        case F_OWNER_PID:
 285                type = PIDTYPE_PID;
 286                break;
 287
 288        case F_OWNER_PGRP:
 289                type = PIDTYPE_PGID;
 290                break;
 291
 292        default:
 293                return -EINVAL;
 294        }
 295
 296        rcu_read_lock();
 297        pid = find_vpid(owner.pid);
 298        if (owner.pid && !pid)
 299                ret = -ESRCH;
 300        else
 301                ret = __f_setown(filp, pid, type, 1);
 302        rcu_read_unlock();
 303
 304        return ret;
 305}
 306
 307static int f_getown_ex(struct file *filp, unsigned long arg)
 308{
 309        struct f_owner_ex * __user owner_p = (void * __user)arg;
 310        struct f_owner_ex owner;
 311        int ret = 0;
 312
 313        read_lock(&filp->f_owner.lock);
 314        owner.pid = pid_vnr(filp->f_owner.pid);
 315        switch (filp->f_owner.pid_type) {
 316        case PIDTYPE_MAX:
 317                owner.type = F_OWNER_TID;
 318                break;
 319
 320        case PIDTYPE_PID:
 321                owner.type = F_OWNER_PID;
 322                break;
 323
 324        case PIDTYPE_PGID:
 325                owner.type = F_OWNER_PGRP;
 326                break;
 327
 328        default:
 329                WARN_ON(1);
 330                ret = -EINVAL;
 331                break;
 332        }
 333        read_unlock(&filp->f_owner.lock);
 334
 335        if (!ret) {
 336                ret = copy_to_user(owner_p, &owner, sizeof(owner));
 337                if (ret)
 338                        ret = -EFAULT;
 339        }
 340        return ret;
 341}
 342
 343static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
 344                struct file *filp)
 345{
 346        long err = -EINVAL;
 347
 348        switch (cmd) {
 349        case F_DUPFD:
 350        case F_DUPFD_CLOEXEC:
 351                if (arg >= rlimit(RLIMIT_NOFILE))
 352                        break;
 353                err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
 354                if (err >= 0) {
 355                        get_file(filp);
 356                        fd_install(err, filp);
 357                }
 358                break;
 359        case F_GETFD:
 360                err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
 361                break;
 362        case F_SETFD:
 363                err = 0;
 364                set_close_on_exec(fd, arg & FD_CLOEXEC);
 365                break;
 366        case F_GETFL:
 367                err = filp->f_flags;
 368                break;
 369        case F_SETFL:
 370                err = setfl(fd, filp, arg);
 371                break;
 372        case F_GETLK:
 373                err = fcntl_getlk(filp, (struct flock __user *) arg);
 374                break;
 375        case F_SETLK:
 376        case F_SETLKW:
 377                err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
 378                break;
 379        case F_GETOWN:
 380                /*
 381                 * XXX If f_owner is a process group, the
 382                 * negative return value will get converted
 383                 * into an error.  Oops.  If we keep the
 384                 * current syscall conventions, the only way
 385                 * to fix this will be in libc.
 386                 */
 387                err = f_getown(filp);
 388                force_successful_syscall_return();
 389                break;
 390        case F_SETOWN:
 391                err = f_setown(filp, arg, 1);
 392                break;
 393        case F_GETOWN_EX:
 394                err = f_getown_ex(filp, arg);
 395                break;
 396        case F_SETOWN_EX:
 397                err = f_setown_ex(filp, arg);
 398                break;
 399        case F_GETSIG:
 400                err = filp->f_owner.signum;
 401                break;
 402        case F_SETSIG:
 403                /* arg == 0 restores default behaviour. */
 404                if (!valid_signal(arg)) {
 405                        break;
 406                }
 407                err = 0;
 408                filp->f_owner.signum = arg;
 409                break;
 410        case F_GETLEASE:
 411                err = fcntl_getlease(filp);
 412                break;
 413        case F_SETLEASE:
 414                err = fcntl_setlease(fd, filp, arg);
 415                break;
 416        case F_NOTIFY:
 417                err = fcntl_dirnotify(fd, filp, arg);
 418                break;
 419        case F_SETPIPE_SZ:
 420        case F_GETPIPE_SZ:
 421                err = pipe_fcntl(filp, cmd, arg);
 422                break;
 423        default:
 424                break;
 425        }
 426        return err;
 427}
 428
 429SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 430{       
 431        struct file *filp;
 432        long err = -EBADF;
 433
 434        filp = fget(fd);
 435        if (!filp)
 436                goto out;
 437
 438        err = security_file_fcntl(filp, cmd, arg);
 439        if (err) {
 440                fput(filp);
 441                return err;
 442        }
 443
 444        err = do_fcntl(fd, cmd, arg, filp);
 445
 446        fput(filp);
 447out:
 448        return err;
 449}
 450
 451#if BITS_PER_LONG == 32
 452SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
 453                unsigned long, arg)
 454{       
 455        struct file * filp;
 456        long err;
 457
 458        err = -EBADF;
 459        filp = fget(fd);
 460        if (!filp)
 461                goto out;
 462
 463        err = security_file_fcntl(filp, cmd, arg);
 464        if (err) {
 465                fput(filp);
 466                return err;
 467        }
 468        err = -EBADF;
 469        
 470        switch (cmd) {
 471                case F_GETLK64:
 472                        err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
 473                        break;
 474                case F_SETLK64:
 475                case F_SETLKW64:
 476                        err = fcntl_setlk64(fd, filp, cmd,
 477                                        (struct flock64 __user *) arg);
 478                        break;
 479                default:
 480                        err = do_fcntl(fd, cmd, arg, filp);
 481                        break;
 482        }
 483        fput(filp);
 484out:
 485        return err;
 486}
 487#endif
 488
 489/* Table to convert sigio signal codes into poll band bitmaps */
 490
 491static const long band_table[NSIGPOLL] = {
 492        POLLIN | POLLRDNORM,                    /* POLL_IN */
 493        POLLOUT | POLLWRNORM | POLLWRBAND,      /* POLL_OUT */
 494        POLLIN | POLLRDNORM | POLLMSG,          /* POLL_MSG */
 495        POLLERR,                                /* POLL_ERR */
 496        POLLPRI | POLLRDBAND,                   /* POLL_PRI */
 497        POLLHUP | POLLERR                       /* POLL_HUP */
 498};
 499
 500static inline int sigio_perm(struct task_struct *p,
 501                             struct fown_struct *fown, int sig)
 502{
 503        const struct cred *cred;
 504        int ret;
 505
 506        rcu_read_lock();
 507        cred = __task_cred(p);
 508        ret = ((fown->euid == 0 ||
 509                fown->euid == cred->suid || fown->euid == cred->uid ||
 510                fown->uid  == cred->suid || fown->uid  == cred->uid) &&
 511               !security_file_send_sigiotask(p, fown, sig));
 512        rcu_read_unlock();
 513        return ret;
 514}
 515
 516static void send_sigio_to_task(struct task_struct *p,
 517                               struct fown_struct *fown,
 518                               int fd, int reason, int group)
 519{
 520        /*
 521         * F_SETSIG can change ->signum lockless in parallel, make
 522         * sure we read it once and use the same value throughout.
 523         */
 524        int signum = ACCESS_ONCE(fown->signum);
 525
 526        if (!sigio_perm(p, fown, signum))
 527                return;
 528
 529        switch (signum) {
 530                siginfo_t si;
 531                default:
 532                        /* Queue a rt signal with the appropriate fd as its
 533                           value.  We use SI_SIGIO as the source, not 
 534                           SI_KERNEL, since kernel signals always get 
 535                           delivered even if we can't queue.  Failure to
 536                           queue in this case _should_ be reported; we fall
 537                           back to SIGIO in that case. --sct */
 538                        si.si_signo = signum;
 539                        si.si_errno = 0;
 540                        si.si_code  = reason;
 541                        /* Make sure we are called with one of the POLL_*
 542                           reasons, otherwise we could leak kernel stack into
 543                           userspace.  */
 544                        BUG_ON((reason & __SI_MASK) != __SI_POLL);
 545                        if (reason - POLL_IN >= NSIGPOLL)
 546                                si.si_band  = ~0L;
 547                        else
 548                                si.si_band = band_table[reason - POLL_IN];
 549                        si.si_fd    = fd;
 550                        if (!do_send_sig_info(signum, &si, p, group))
 551                                break;
 552                /* fall-through: fall back on the old plain SIGIO signal */
 553                case 0:
 554                        do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
 555        }
 556}
 557
 558void send_sigio(struct fown_struct *fown, int fd, int band)
 559{
 560        struct task_struct *p;
 561        enum pid_type type;
 562        struct pid *pid;
 563        int group = 1;
 564        
 565        read_lock(&fown->lock);
 566
 567        type = fown->pid_type;
 568        if (type == PIDTYPE_MAX) {
 569                group = 0;
 570                type = PIDTYPE_PID;
 571        }
 572
 573        pid = fown->pid;
 574        if (!pid)
 575                goto out_unlock_fown;
 576        
 577        read_lock(&tasklist_lock);
 578        do_each_pid_task(pid, type, p) {
 579                send_sigio_to_task(p, fown, fd, band, group);
 580        } while_each_pid_task(pid, type, p);
 581        read_unlock(&tasklist_lock);
 582 out_unlock_fown:
 583        read_unlock(&fown->lock);
 584}
 585
 586static void send_sigurg_to_task(struct task_struct *p,
 587                                struct fown_struct *fown, int group)
 588{
 589        if (sigio_perm(p, fown, SIGURG))
 590                do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
 591}
 592
 593int send_sigurg(struct fown_struct *fown)
 594{
 595        struct task_struct *p;
 596        enum pid_type type;
 597        struct pid *pid;
 598        int group = 1;
 599        int ret = 0;
 600        
 601        read_lock(&fown->lock);
 602
 603        type = fown->pid_type;
 604        if (type == PIDTYPE_MAX) {
 605                group = 0;
 606                type = PIDTYPE_PID;
 607        }
 608
 609        pid = fown->pid;
 610        if (!pid)
 611                goto out_unlock_fown;
 612
 613        ret = 1;
 614        
 615        read_lock(&tasklist_lock);
 616        do_each_pid_task(pid, type, p) {
 617                send_sigurg_to_task(p, fown, group);
 618        } while_each_pid_task(pid, type, p);
 619        read_unlock(&tasklist_lock);
 620 out_unlock_fown:
 621        read_unlock(&fown->lock);
 622        return ret;
 623}
 624
 625static DEFINE_SPINLOCK(fasync_lock);
 626static struct kmem_cache *fasync_cache __read_mostly;
 627
 628static void fasync_free_rcu(struct rcu_head *head)
 629{
 630        kmem_cache_free(fasync_cache,
 631                        container_of(head, struct fasync_struct, fa_rcu));
 632}
 633
 634/*
 635 * Remove a fasync entry. If successfully removed, return
 636 * positive and clear the FASYNC flag. If no entry exists,
 637 * do nothing and return 0.
 638 *
 639 * NOTE! It is very important that the FASYNC flag always
 640 * match the state "is the filp on a fasync list".
 641 *
 642 */
 643int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
 644{
 645        struct fasync_struct *fa, **fp;
 646        int result = 0;
 647
 648        spin_lock(&filp->f_lock);
 649        spin_lock(&fasync_lock);
 650        for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
 651                if (fa->fa_file != filp)
 652                        continue;
 653
 654                spin_lock_irq(&fa->fa_lock);
 655                fa->fa_file = NULL;
 656                spin_unlock_irq(&fa->fa_lock);
 657
 658                *fp = fa->fa_next;
 659                call_rcu(&fa->fa_rcu, fasync_free_rcu);
 660                filp->f_flags &= ~FASYNC;
 661                result = 1;
 662                break;
 663        }
 664        spin_unlock(&fasync_lock);
 665        spin_unlock(&filp->f_lock);
 666        return result;
 667}
 668
 669struct fasync_struct *fasync_alloc(void)
 670{
 671        return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
 672}
 673
 674/*
 675 * NOTE! This can be used only for unused fasync entries:
 676 * entries that actually got inserted on the fasync list
 677 * need to be released by rcu - see fasync_remove_entry.
 678 */
 679void fasync_free(struct fasync_struct *new)
 680{
 681        kmem_cache_free(fasync_cache, new);
 682}
 683
 684/*
 685 * Insert a new entry into the fasync list.  Return the pointer to the
 686 * old one if we didn't use the new one.
 687 *
 688 * NOTE! It is very important that the FASYNC flag always
 689 * match the state "is the filp on a fasync list".
 690 */
 691struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
 692{
 693        struct fasync_struct *fa, **fp;
 694
 695        spin_lock(&filp->f_lock);
 696        spin_lock(&fasync_lock);
 697        for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
 698                if (fa->fa_file != filp)
 699                        continue;
 700
 701                spin_lock_irq(&fa->fa_lock);
 702                fa->fa_fd = fd;
 703                spin_unlock_irq(&fa->fa_lock);
 704                goto out;
 705        }
 706
 707        spin_lock_init(&new->fa_lock);
 708        new->magic = FASYNC_MAGIC;
 709        new->fa_file = filp;
 710        new->fa_fd = fd;
 711        new->fa_next = *fapp;
 712        rcu_assign_pointer(*fapp, new);
 713        filp->f_flags |= FASYNC;
 714
 715out:
 716        spin_unlock(&fasync_lock);
 717        spin_unlock(&filp->f_lock);
 718        return fa;
 719}
 720
 721/*
 722 * Add a fasync entry. Return negative on error, positive if
 723 * added, and zero if did nothing but change an existing one.
 724 */
 725static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
 726{
 727        struct fasync_struct *new;
 728
 729        new = fasync_alloc();
 730        if (!new)
 731                return -ENOMEM;
 732
 733        /*
 734         * fasync_insert_entry() returns the old (update) entry if
 735         * it existed.
 736         *
 737         * So free the (unused) new entry and return 0 to let the
 738         * caller know that we didn't add any new fasync entries.
 739         */
 740        if (fasync_insert_entry(fd, filp, fapp, new)) {
 741                fasync_free(new);
 742                return 0;
 743        }
 744
 745        return 1;
 746}
 747
 748/*
 749 * fasync_helper() is used by almost all character device drivers
 750 * to set up the fasync queue, and for regular files by the file
 751 * lease code. It returns negative on error, 0 if it did no changes
 752 * and positive if it added/deleted the entry.
 753 */
 754int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
 755{
 756        if (!on)
 757                return fasync_remove_entry(filp, fapp);
 758        return fasync_add_entry(fd, filp, fapp);
 759}
 760
 761EXPORT_SYMBOL(fasync_helper);
 762
 763/*
 764 * rcu_read_lock() is held
 765 */
 766static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
 767{
 768        while (fa) {
 769                struct fown_struct *fown;
 770                unsigned long flags;
 771
 772                if (fa->magic != FASYNC_MAGIC) {
 773                        printk(KERN_ERR "kill_fasync: bad magic number in "
 774                               "fasync_struct!\n");
 775                        return;
 776                }
 777                spin_lock_irqsave(&fa->fa_lock, flags);
 778                if (fa->fa_file) {
 779                        fown = &fa->fa_file->f_owner;
 780                        /* Don't send SIGURG to processes which have not set a
 781                           queued signum: SIGURG has its own default signalling
 782                           mechanism. */
 783                        if (!(sig == SIGURG && fown->signum == 0))
 784                                send_sigio(fown, fa->fa_fd, band);
 785                }
 786                spin_unlock_irqrestore(&fa->fa_lock, flags);
 787                fa = rcu_dereference(fa->fa_next);
 788        }
 789}
 790
 791void kill_fasync(struct fasync_struct **fp, int sig, int band)
 792{
 793        /* First a quick test without locking: usually
 794         * the list is empty.
 795         */
 796        if (*fp) {
 797                rcu_read_lock();
 798                kill_fasync_rcu(rcu_dereference(*fp), sig, band);
 799                rcu_read_unlock();
 800        }
 801}
 802EXPORT_SYMBOL(kill_fasync);
 803
 804static int __init fcntl_init(void)
 805{
 806        /*
 807         * Please add new bits here to ensure allocation uniqueness.
 808         * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
 809         * is defined as O_NONBLOCK on some platforms and not on others.
 810         */
 811        BUILD_BUG_ON(18 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
 812                O_RDONLY        | O_WRONLY      | O_RDWR        |
 813                O_CREAT         | O_EXCL        | O_NOCTTY      |
 814                O_TRUNC         | O_APPEND      | /* O_NONBLOCK | */
 815                __O_SYNC        | O_DSYNC       | FASYNC        |
 816                O_DIRECT        | O_LARGEFILE   | O_DIRECTORY   |
 817                O_NOFOLLOW      | O_NOATIME     | O_CLOEXEC     |
 818                __FMODE_EXEC
 819                ));
 820
 821        fasync_cache = kmem_cache_create("fasync_cache",
 822                sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
 823        return 0;
 824}
 825
 826module_init(fcntl_init)
 827