linux/fs/eventpoll.c
<<
>>
Prefs
   1/*
   2 *  fs/eventpoll.c (Efficient event retrieval implementation)
   3 *  Copyright (C) 2001,...,2009  Davide Libenzi
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  Davide Libenzi <davidel@xmailserver.org>
  11 *
  12 */
  13
  14#include <linux/init.h>
  15#include <linux/kernel.h>
  16#include <linux/sched.h>
  17#include <linux/fs.h>
  18#include <linux/file.h>
  19#include <linux/signal.h>
  20#include <linux/errno.h>
  21#include <linux/mm.h>
  22#include <linux/slab.h>
  23#include <linux/poll.h>
  24#include <linux/string.h>
  25#include <linux/list.h>
  26#include <linux/hash.h>
  27#include <linux/spinlock.h>
  28#include <linux/syscalls.h>
  29#include <linux/rbtree.h>
  30#include <linux/wait.h>
  31#include <linux/eventpoll.h>
  32#include <linux/mount.h>
  33#include <linux/bitops.h>
  34#include <linux/mutex.h>
  35#include <linux/anon_inodes.h>
  36#include <linux/device.h>
  37#include <asm/uaccess.h>
  38#include <asm/io.h>
  39#include <asm/mman.h>
  40#include <linux/atomic.h>
  41#include <linux/proc_fs.h>
  42#include <linux/seq_file.h>
  43#include <linux/compat.h>
  44
  45/*
  46 * LOCKING:
  47 * There are three level of locking required by epoll :
  48 *
  49 * 1) epmutex (mutex)
  50 * 2) ep->mtx (mutex)
  51 * 3) ep->lock (spinlock)
  52 *
  53 * The acquire order is the one listed above, from 1 to 3.
  54 * We need a spinlock (ep->lock) because we manipulate objects
  55 * from inside the poll callback, that might be triggered from
  56 * a wake_up() that in turn might be called from IRQ context.
  57 * So we can't sleep inside the poll callback and hence we need
  58 * a spinlock. During the event transfer loop (from kernel to
  59 * user space) we could end up sleeping due a copy_to_user(), so
  60 * we need a lock that will allow us to sleep. This lock is a
  61 * mutex (ep->mtx). It is acquired during the event transfer loop,
  62 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
  63 * Then we also need a global mutex to serialize eventpoll_release_file()
  64 * and ep_free().
  65 * This mutex is acquired by ep_free() during the epoll file
  66 * cleanup path and it is also acquired by eventpoll_release_file()
  67 * if a file has been pushed inside an epoll set and it is then
  68 * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
  69 * It is also acquired when inserting an epoll fd onto another epoll
  70 * fd. We do this so that we walk the epoll tree and ensure that this
  71 * insertion does not create a cycle of epoll file descriptors, which
  72 * could lead to deadlock. We need a global mutex to prevent two
  73 * simultaneous inserts (A into B and B into A) from racing and
  74 * constructing a cycle without either insert observing that it is
  75 * going to.
  76 * It is necessary to acquire multiple "ep->mtx"es at once in the
  77 * case when one epoll fd is added to another. In this case, we
  78 * always acquire the locks in the order of nesting (i.e. after
  79 * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
  80 * before e2->mtx). Since we disallow cycles of epoll file
  81 * descriptors, this ensures that the mutexes are well-ordered. In
  82 * order to communicate this nesting to lockdep, when walking a tree
  83 * of epoll file descriptors, we use the current recursion depth as
  84 * the lockdep subkey.
  85 * It is possible to drop the "ep->mtx" and to use the global
  86 * mutex "epmutex" (together with "ep->lock") to have it working,
  87 * but having "ep->mtx" will make the interface more scalable.
  88 * Events that require holding "epmutex" are very rare, while for
  89 * normal operations the epoll private "ep->mtx" will guarantee
  90 * a better scalability.
  91 */
  92
  93/* Epoll private bits inside the event mask */
  94#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET)
  95
  96/* Maximum number of nesting allowed inside epoll sets */
  97#define EP_MAX_NESTS 4
  98
  99#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
 100
 101#define EP_UNACTIVE_PTR ((void *) -1L)
 102
 103#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
 104
 105struct epoll_filefd {
 106        struct file *file;
 107        int fd;
 108} __packed;
 109
 110/*
 111 * Structure used to track possible nested calls, for too deep recursions
 112 * and loop cycles.
 113 */
 114struct nested_call_node {
 115        struct list_head llink;
 116        void *cookie;
 117        void *ctx;
 118};
 119
 120/*
 121 * This structure is used as collector for nested calls, to check for
 122 * maximum recursion dept and loop cycles.
 123 */
 124struct nested_calls {
 125        struct list_head tasks_call_list;
 126        spinlock_t lock;
 127};
 128
 129/*
 130 * Each file descriptor added to the eventpoll interface will
 131 * have an entry of this type linked to the "rbr" RB tree.
 132 * Avoid increasing the size of this struct, there can be many thousands
 133 * of these on a server and we do not want this to take another cache line.
 134 */
 135struct epitem {
 136        /* RB tree node used to link this structure to the eventpoll RB tree */
 137        struct rb_node rbn;
 138
 139        /* List header used to link this structure to the eventpoll ready list */
 140        struct list_head rdllink;
 141
 142        /*
 143         * Works together "struct eventpoll"->ovflist in keeping the
 144         * single linked chain of items.
 145         */
 146        struct epitem *next;
 147
 148        /* The file descriptor information this item refers to */
 149        struct epoll_filefd ffd;
 150
 151        /* Number of active wait queue attached to poll operations */
 152        int nwait;
 153
 154        /* List containing poll wait queues */
 155        struct list_head pwqlist;
 156
 157        /* The "container" of this item */
 158        struct eventpoll *ep;
 159
 160        /* List header used to link this item to the "struct file" items list */
 161        struct list_head fllink;
 162
 163        /* wakeup_source used when EPOLLWAKEUP is set */
 164        struct wakeup_source __rcu *ws;
 165
 166        /* The structure that describe the interested events and the source fd */
 167        struct epoll_event event;
 168};
 169
 170/*
 171 * This structure is stored inside the "private_data" member of the file
 172 * structure and represents the main data structure for the eventpoll
 173 * interface.
 174 */
 175struct eventpoll {
 176        /* Protect the access to this structure */
 177        spinlock_t lock;
 178
 179        /*
 180         * This mutex is used to ensure that files are not removed
 181         * while epoll is using them. This is held during the event
 182         * collection loop, the file cleanup path, the epoll file exit
 183         * code and the ctl operations.
 184         */
 185        struct mutex mtx;
 186
 187        /* Wait queue used by sys_epoll_wait() */
 188        wait_queue_head_t wq;
 189
 190        /* Wait queue used by file->poll() */
 191        wait_queue_head_t poll_wait;
 192
 193        /* List of ready file descriptors */
 194        struct list_head rdllist;
 195
 196        /* RB tree root used to store monitored fd structs */
 197        struct rb_root rbr;
 198
 199        /*
 200         * This is a single linked list that chains all the "struct epitem" that
 201         * happened while transferring ready events to userspace w/out
 202         * holding ->lock.
 203         */
 204        struct epitem *ovflist;
 205
 206        /* wakeup_source used when ep_scan_ready_list is running */
 207        struct wakeup_source *ws;
 208
 209        /* The user that created the eventpoll descriptor */
 210        struct user_struct *user;
 211
 212        struct file *file;
 213
 214        /* used to optimize loop detection check */
 215        int visited;
 216        struct list_head visited_list_link;
 217};
 218
 219/* Wait structure used by the poll hooks */
 220struct eppoll_entry {
 221        /* List header used to link this structure to the "struct epitem" */
 222        struct list_head llink;
 223
 224        /* The "base" pointer is set to the container "struct epitem" */
 225        struct epitem *base;
 226
 227        /*
 228         * Wait queue item that will be linked to the target file wait
 229         * queue head.
 230         */
 231        wait_queue_t wait;
 232
 233        /* The wait queue head that linked the "wait" wait queue item */
 234        wait_queue_head_t *whead;
 235};
 236
 237/* Wrapper struct used by poll queueing */
 238struct ep_pqueue {
 239        poll_table pt;
 240        struct epitem *epi;
 241};
 242
 243/* Used by the ep_send_events() function as callback private data */
 244struct ep_send_events_data {
 245        int maxevents;
 246        struct epoll_event __user *events;
 247};
 248
 249/*
 250 * Configuration options available inside /proc/sys/fs/epoll/
 251 */
 252/* Maximum number of epoll watched descriptors, per user */
 253static long max_user_watches __read_mostly;
 254
 255/*
 256 * This mutex is used to serialize ep_free() and eventpoll_release_file().
 257 */
 258static DEFINE_MUTEX(epmutex);
 259
 260/* Used to check for epoll file descriptor inclusion loops */
 261static struct nested_calls poll_loop_ncalls;
 262
 263/* Used for safe wake up implementation */
 264static struct nested_calls poll_safewake_ncalls;
 265
 266/* Used to call file's f_op->poll() under the nested calls boundaries */
 267static struct nested_calls poll_readywalk_ncalls;
 268
 269/* Slab cache used to allocate "struct epitem" */
 270static struct kmem_cache *epi_cache __read_mostly;
 271
 272/* Slab cache used to allocate "struct eppoll_entry" */
 273static struct kmem_cache *pwq_cache __read_mostly;
 274
 275/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
 276static LIST_HEAD(visited_list);
 277
 278/*
 279 * List of files with newly added links, where we may need to limit the number
 280 * of emanating paths. Protected by the epmutex.
 281 */
 282static LIST_HEAD(tfile_check_list);
 283
 284#ifdef CONFIG_SYSCTL
 285
 286#include <linux/sysctl.h>
 287
 288static long zero;
 289static long long_max = LONG_MAX;
 290
 291ctl_table epoll_table[] = {
 292        {
 293                .procname       = "max_user_watches",
 294                .data           = &max_user_watches,
 295                .maxlen         = sizeof(max_user_watches),
 296                .mode           = 0644,
 297                .proc_handler   = proc_doulongvec_minmax,
 298                .extra1         = &zero,
 299                .extra2         = &long_max,
 300        },
 301        { }
 302};
 303#endif /* CONFIG_SYSCTL */
 304
 305static const struct file_operations eventpoll_fops;
 306
 307static inline int is_file_epoll(struct file *f)
 308{
 309        return f->f_op == &eventpoll_fops;
 310}
 311
 312/* Setup the structure that is used as key for the RB tree */
 313static inline void ep_set_ffd(struct epoll_filefd *ffd,
 314                              struct file *file, int fd)
 315{
 316        ffd->file = file;
 317        ffd->fd = fd;
 318}
 319
 320/* Compare RB tree keys */
 321static inline int ep_cmp_ffd(struct epoll_filefd *p1,
 322                             struct epoll_filefd *p2)
 323{
 324        return (p1->file > p2->file ? +1:
 325                (p1->file < p2->file ? -1 : p1->fd - p2->fd));
 326}
 327
 328/* Tells us if the item is currently linked */
 329static inline int ep_is_linked(struct list_head *p)
 330{
 331        return !list_empty(p);
 332}
 333
 334static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
 335{
 336        return container_of(p, struct eppoll_entry, wait);
 337}
 338
 339/* Get the "struct epitem" from a wait queue pointer */
 340static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
 341{
 342        return container_of(p, struct eppoll_entry, wait)->base;
 343}
 344
 345/* Get the "struct epitem" from an epoll queue wrapper */
 346static inline struct epitem *ep_item_from_epqueue(poll_table *p)
 347{
 348        return container_of(p, struct ep_pqueue, pt)->epi;
 349}
 350
 351/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
 352static inline int ep_op_has_event(int op)
 353{
 354        return op != EPOLL_CTL_DEL;
 355}
 356
 357/* Initialize the poll safe wake up structure */
 358static void ep_nested_calls_init(struct nested_calls *ncalls)
 359{
 360        INIT_LIST_HEAD(&ncalls->tasks_call_list);
 361        spin_lock_init(&ncalls->lock);
 362}
 363
 364/**
 365 * ep_events_available - Checks if ready events might be available.
 366 *
 367 * @ep: Pointer to the eventpoll context.
 368 *
 369 * Returns: Returns a value different than zero if ready events are available,
 370 *          or zero otherwise.
 371 */
 372static inline int ep_events_available(struct eventpoll *ep)
 373{
 374        return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
 375}
 376
 377/**
 378 * ep_call_nested - Perform a bound (possibly) nested call, by checking
 379 *                  that the recursion limit is not exceeded, and that
 380 *                  the same nested call (by the meaning of same cookie) is
 381 *                  no re-entered.
 382 *
 383 * @ncalls: Pointer to the nested_calls structure to be used for this call.
 384 * @max_nests: Maximum number of allowed nesting calls.
 385 * @nproc: Nested call core function pointer.
 386 * @priv: Opaque data to be passed to the @nproc callback.
 387 * @cookie: Cookie to be used to identify this nested call.
 388 * @ctx: This instance context.
 389 *
 390 * Returns: Returns the code returned by the @nproc callback, or -1 if
 391 *          the maximum recursion limit has been exceeded.
 392 */
 393static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
 394                          int (*nproc)(void *, void *, int), void *priv,
 395                          void *cookie, void *ctx)
 396{
 397        int error, call_nests = 0;
 398        unsigned long flags;
 399        struct list_head *lsthead = &ncalls->tasks_call_list;
 400        struct nested_call_node *tncur;
 401        struct nested_call_node tnode;
 402
 403        spin_lock_irqsave(&ncalls->lock, flags);
 404
 405        /*
 406         * Try to see if the current task is already inside this wakeup call.
 407         * We use a list here, since the population inside this set is always
 408         * very much limited.
 409         */
 410        list_for_each_entry(tncur, lsthead, llink) {
 411                if (tncur->ctx == ctx &&
 412                    (tncur->cookie == cookie || ++call_nests > max_nests)) {
 413                        /*
 414                         * Ops ... loop detected or maximum nest level reached.
 415                         * We abort this wake by breaking the cycle itself.
 416                         */
 417                        error = -1;
 418                        goto out_unlock;
 419                }
 420        }
 421
 422        /* Add the current task and cookie to the list */
 423        tnode.ctx = ctx;
 424        tnode.cookie = cookie;
 425        list_add(&tnode.llink, lsthead);
 426
 427        spin_unlock_irqrestore(&ncalls->lock, flags);
 428
 429        /* Call the nested function */
 430        error = (*nproc)(priv, cookie, call_nests);
 431
 432        /* Remove the current task from the list */
 433        spin_lock_irqsave(&ncalls->lock, flags);
 434        list_del(&tnode.llink);
 435out_unlock:
 436        spin_unlock_irqrestore(&ncalls->lock, flags);
 437
 438        return error;
 439}
 440
 441/*
 442 * As described in commit 0ccf831cb lockdep: annotate epoll
 443 * the use of wait queues used by epoll is done in a very controlled
 444 * manner. Wake ups can nest inside each other, but are never done
 445 * with the same locking. For example:
 446 *
 447 *   dfd = socket(...);
 448 *   efd1 = epoll_create();
 449 *   efd2 = epoll_create();
 450 *   epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
 451 *   epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
 452 *
 453 * When a packet arrives to the device underneath "dfd", the net code will
 454 * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
 455 * callback wakeup entry on that queue, and the wake_up() performed by the
 456 * "dfd" net code will end up in ep_poll_callback(). At this point epoll
 457 * (efd1) notices that it may have some event ready, so it needs to wake up
 458 * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
 459 * that ends up in another wake_up(), after having checked about the
 460 * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
 461 * avoid stack blasting.
 462 *
 463 * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
 464 * this special case of epoll.
 465 */
 466#ifdef CONFIG_DEBUG_LOCK_ALLOC
 467static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
 468                                     unsigned long events, int subclass)
 469{
 470        unsigned long flags;
 471
 472        spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
 473        wake_up_locked_poll(wqueue, events);
 474        spin_unlock_irqrestore(&wqueue->lock, flags);
 475}
 476#else
 477static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
 478                                     unsigned long events, int subclass)
 479{
 480        wake_up_poll(wqueue, events);
 481}
 482#endif
 483
 484static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
 485{
 486        ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
 487                          1 + call_nests);
 488        return 0;
 489}
 490
 491/*
 492 * Perform a safe wake up of the poll wait list. The problem is that
 493 * with the new callback'd wake up system, it is possible that the
 494 * poll callback is reentered from inside the call to wake_up() done
 495 * on the poll wait queue head. The rule is that we cannot reenter the
 496 * wake up code from the same task more than EP_MAX_NESTS times,
 497 * and we cannot reenter the same wait queue head at all. This will
 498 * enable to have a hierarchy of epoll file descriptor of no more than
 499 * EP_MAX_NESTS deep.
 500 */
 501static void ep_poll_safewake(wait_queue_head_t *wq)
 502{
 503        int this_cpu = get_cpu();
 504
 505        ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
 506                       ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
 507
 508        put_cpu();
 509}
 510
 511static void ep_remove_wait_queue(struct eppoll_entry *pwq)
 512{
 513        wait_queue_head_t *whead;
 514
 515        rcu_read_lock();
 516        /* If it is cleared by POLLFREE, it should be rcu-safe */
 517        whead = rcu_dereference(pwq->whead);
 518        if (whead)
 519                remove_wait_queue(whead, &pwq->wait);
 520        rcu_read_unlock();
 521}
 522
 523/*
 524 * This function unregisters poll callbacks from the associated file
 525 * descriptor.  Must be called with "mtx" held (or "epmutex" if called from
 526 * ep_free).
 527 */
 528static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
 529{
 530        struct list_head *lsthead = &epi->pwqlist;
 531        struct eppoll_entry *pwq;
 532
 533        while (!list_empty(lsthead)) {
 534                pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
 535
 536                list_del(&pwq->llink);
 537                ep_remove_wait_queue(pwq);
 538                kmem_cache_free(pwq_cache, pwq);
 539        }
 540}
 541
 542/* call only when ep->mtx is held */
 543static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
 544{
 545        return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
 546}
 547
 548/* call only when ep->mtx is held */
 549static inline void ep_pm_stay_awake(struct epitem *epi)
 550{
 551        struct wakeup_source *ws = ep_wakeup_source(epi);
 552
 553        if (ws)
 554                __pm_stay_awake(ws);
 555}
 556
 557static inline bool ep_has_wakeup_source(struct epitem *epi)
 558{
 559        return rcu_access_pointer(epi->ws) ? true : false;
 560}
 561
 562/* call when ep->mtx cannot be held (ep_poll_callback) */
 563static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
 564{
 565        struct wakeup_source *ws;
 566
 567        rcu_read_lock();
 568        ws = rcu_dereference(epi->ws);
 569        if (ws)
 570                __pm_stay_awake(ws);
 571        rcu_read_unlock();
 572}
 573
 574/**
 575 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
 576 *                      the scan code, to call f_op->poll(). Also allows for
 577 *                      O(NumReady) performance.
 578 *
 579 * @ep: Pointer to the epoll private data structure.
 580 * @sproc: Pointer to the scan callback.
 581 * @priv: Private opaque data passed to the @sproc callback.
 582 * @depth: The current depth of recursive f_op->poll calls.
 583 *
 584 * Returns: The same integer error code returned by the @sproc callback.
 585 */
 586static int ep_scan_ready_list(struct eventpoll *ep,
 587                              int (*sproc)(struct eventpoll *,
 588                                           struct list_head *, void *),
 589                              void *priv,
 590                              int depth)
 591{
 592        int error, pwake = 0;
 593        unsigned long flags;
 594        struct epitem *epi, *nepi;
 595        LIST_HEAD(txlist);
 596
 597        /*
 598         * We need to lock this because we could be hit by
 599         * eventpoll_release_file() and epoll_ctl().
 600         */
 601        mutex_lock_nested(&ep->mtx, depth);
 602
 603        /*
 604         * Steal the ready list, and re-init the original one to the
 605         * empty list. Also, set ep->ovflist to NULL so that events
 606         * happening while looping w/out locks, are not lost. We cannot
 607         * have the poll callback to queue directly on ep->rdllist,
 608         * because we want the "sproc" callback to be able to do it
 609         * in a lockless way.
 610         */
 611        spin_lock_irqsave(&ep->lock, flags);
 612        list_splice_init(&ep->rdllist, &txlist);
 613        ep->ovflist = NULL;
 614        spin_unlock_irqrestore(&ep->lock, flags);
 615
 616        /*
 617         * Now call the callback function.
 618         */
 619        error = (*sproc)(ep, &txlist, priv);
 620
 621        spin_lock_irqsave(&ep->lock, flags);
 622        /*
 623         * During the time we spent inside the "sproc" callback, some
 624         * other events might have been queued by the poll callback.
 625         * We re-insert them inside the main ready-list here.
 626         */
 627        for (nepi = ep->ovflist; (epi = nepi) != NULL;
 628             nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
 629                /*
 630                 * We need to check if the item is already in the list.
 631                 * During the "sproc" callback execution time, items are
 632                 * queued into ->ovflist but the "txlist" might already
 633                 * contain them, and the list_splice() below takes care of them.
 634                 */
 635                if (!ep_is_linked(&epi->rdllink)) {
 636                        list_add_tail(&epi->rdllink, &ep->rdllist);
 637                        ep_pm_stay_awake(epi);
 638                }
 639        }
 640        /*
 641         * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
 642         * releasing the lock, events will be queued in the normal way inside
 643         * ep->rdllist.
 644         */
 645        ep->ovflist = EP_UNACTIVE_PTR;
 646
 647        /*
 648         * Quickly re-inject items left on "txlist".
 649         */
 650        list_splice(&txlist, &ep->rdllist);
 651        __pm_relax(ep->ws);
 652
 653        if (!list_empty(&ep->rdllist)) {
 654                /*
 655                 * Wake up (if active) both the eventpoll wait list and
 656                 * the ->poll() wait list (delayed after we release the lock).
 657                 */
 658                if (waitqueue_active(&ep->wq))
 659                        wake_up_locked(&ep->wq);
 660                if (waitqueue_active(&ep->poll_wait))
 661                        pwake++;
 662        }
 663        spin_unlock_irqrestore(&ep->lock, flags);
 664
 665        mutex_unlock(&ep->mtx);
 666
 667        /* We have to call this outside the lock */
 668        if (pwake)
 669                ep_poll_safewake(&ep->poll_wait);
 670
 671        return error;
 672}
 673
 674/*
 675 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
 676 * all the associated resources. Must be called with "mtx" held.
 677 */
 678static int ep_remove(struct eventpoll *ep, struct epitem *epi)
 679{
 680        unsigned long flags;
 681        struct file *file = epi->ffd.file;
 682
 683        /*
 684         * Removes poll wait queue hooks. We _have_ to do this without holding
 685         * the "ep->lock" otherwise a deadlock might occur. This because of the
 686         * sequence of the lock acquisition. Here we do "ep->lock" then the wait
 687         * queue head lock when unregistering the wait queue. The wakeup callback
 688         * will run by holding the wait queue head lock and will call our callback
 689         * that will try to get "ep->lock".
 690         */
 691        ep_unregister_pollwait(ep, epi);
 692
 693        /* Remove the current item from the list of epoll hooks */
 694        spin_lock(&file->f_lock);
 695        if (ep_is_linked(&epi->fllink))
 696                list_del_init(&epi->fllink);
 697        spin_unlock(&file->f_lock);
 698
 699        rb_erase(&epi->rbn, &ep->rbr);
 700
 701        spin_lock_irqsave(&ep->lock, flags);
 702        if (ep_is_linked(&epi->rdllink))
 703                list_del_init(&epi->rdllink);
 704        spin_unlock_irqrestore(&ep->lock, flags);
 705
 706        wakeup_source_unregister(ep_wakeup_source(epi));
 707
 708        /* At this point it is safe to free the eventpoll item */
 709        kmem_cache_free(epi_cache, epi);
 710
 711        atomic_long_dec(&ep->user->epoll_watches);
 712
 713        return 0;
 714}
 715
 716static void ep_free(struct eventpoll *ep)
 717{
 718        struct rb_node *rbp;
 719        struct epitem *epi;
 720
 721        /* We need to release all tasks waiting for these file */
 722        if (waitqueue_active(&ep->poll_wait))
 723                ep_poll_safewake(&ep->poll_wait);
 724
 725        /*
 726         * We need to lock this because we could be hit by
 727         * eventpoll_release_file() while we're freeing the "struct eventpoll".
 728         * We do not need to hold "ep->mtx" here because the epoll file
 729         * is on the way to be removed and no one has references to it
 730         * anymore. The only hit might come from eventpoll_release_file() but
 731         * holding "epmutex" is sufficient here.
 732         */
 733        mutex_lock(&epmutex);
 734
 735        /*
 736         * Walks through the whole tree by unregistering poll callbacks.
 737         */
 738        for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
 739                epi = rb_entry(rbp, struct epitem, rbn);
 740
 741                ep_unregister_pollwait(ep, epi);
 742                cond_resched();
 743        }
 744
 745        /*
 746         * Walks through the whole tree by freeing each "struct epitem". At this
 747         * point we are sure no poll callbacks will be lingering around, and also by
 748         * holding "epmutex" we can be sure that no file cleanup code will hit
 749         * us during this operation. So we can avoid the lock on "ep->lock".
 750         * We do not need to lock ep->mtx, either, we only do it to prevent
 751         * a lockdep warning.
 752         */
 753        mutex_lock(&ep->mtx);
 754        while ((rbp = rb_first(&ep->rbr)) != NULL) {
 755                epi = rb_entry(rbp, struct epitem, rbn);
 756                ep_remove(ep, epi);
 757                cond_resched();
 758        }
 759        mutex_unlock(&ep->mtx);
 760
 761        mutex_unlock(&epmutex);
 762        mutex_destroy(&ep->mtx);
 763        free_uid(ep->user);
 764        wakeup_source_unregister(ep->ws);
 765        kfree(ep);
 766}
 767
 768static int ep_eventpoll_release(struct inode *inode, struct file *file)
 769{
 770        struct eventpoll *ep = file->private_data;
 771
 772        if (ep)
 773                ep_free(ep);
 774
 775        return 0;
 776}
 777
 778static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
 779{
 780        pt->_key = epi->event.events;
 781
 782        return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
 783}
 784
 785static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
 786                               void *priv)
 787{
 788        struct epitem *epi, *tmp;
 789        poll_table pt;
 790
 791        init_poll_funcptr(&pt, NULL);
 792
 793        list_for_each_entry_safe(epi, tmp, head, rdllink) {
 794                if (ep_item_poll(epi, &pt))
 795                        return POLLIN | POLLRDNORM;
 796                else {
 797                        /*
 798                         * Item has been dropped into the ready list by the poll
 799                         * callback, but it's not actually ready, as far as
 800                         * caller requested events goes. We can remove it here.
 801                         */
 802                        __pm_relax(ep_wakeup_source(epi));
 803                        list_del_init(&epi->rdllink);
 804                }
 805        }
 806
 807        return 0;
 808}
 809
 810static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
 811{
 812        return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
 813}
 814
 815static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
 816{
 817        int pollflags;
 818        struct eventpoll *ep = file->private_data;
 819
 820        /* Insert inside our poll wait queue */
 821        poll_wait(file, &ep->poll_wait, wait);
 822
 823        /*
 824         * Proceed to find out if wanted events are really available inside
 825         * the ready list. This need to be done under ep_call_nested()
 826         * supervision, since the call to f_op->poll() done on listed files
 827         * could re-enter here.
 828         */
 829        pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
 830                                   ep_poll_readyevents_proc, ep, ep, current);
 831
 832        return pollflags != -1 ? pollflags : 0;
 833}
 834
 835#ifdef CONFIG_PROC_FS
 836static int ep_show_fdinfo(struct seq_file *m, struct file *f)
 837{
 838        struct eventpoll *ep = f->private_data;
 839        struct rb_node *rbp;
 840        int ret = 0;
 841
 842        mutex_lock(&ep->mtx);
 843        for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
 844                struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
 845
 846                ret = seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
 847                                 epi->ffd.fd, epi->event.events,
 848                                 (long long)epi->event.data);
 849                if (ret)
 850                        break;
 851        }
 852        mutex_unlock(&ep->mtx);
 853
 854        return ret;
 855}
 856#endif
 857
 858/* File callbacks that implement the eventpoll file behaviour */
 859static const struct file_operations eventpoll_fops = {
 860#ifdef CONFIG_PROC_FS
 861        .show_fdinfo    = ep_show_fdinfo,
 862#endif
 863        .release        = ep_eventpoll_release,
 864        .poll           = ep_eventpoll_poll,
 865        .llseek         = noop_llseek,
 866};
 867
 868/*
 869 * This is called from eventpoll_release() to unlink files from the eventpoll
 870 * interface. We need to have this facility to cleanup correctly files that are
 871 * closed without being removed from the eventpoll interface.
 872 */
 873void eventpoll_release_file(struct file *file)
 874{
 875        struct list_head *lsthead = &file->f_ep_links;
 876        struct eventpoll *ep;
 877        struct epitem *epi;
 878
 879        /*
 880         * We don't want to get "file->f_lock" because it is not
 881         * necessary. It is not necessary because we're in the "struct file"
 882         * cleanup path, and this means that no one is using this file anymore.
 883         * So, for example, epoll_ctl() cannot hit here since if we reach this
 884         * point, the file counter already went to zero and fget() would fail.
 885         * The only hit might come from ep_free() but by holding the mutex
 886         * will correctly serialize the operation. We do need to acquire
 887         * "ep->mtx" after "epmutex" because ep_remove() requires it when called
 888         * from anywhere but ep_free().
 889         *
 890         * Besides, ep_remove() acquires the lock, so we can't hold it here.
 891         */
 892        mutex_lock(&epmutex);
 893
 894        while (!list_empty(lsthead)) {
 895                epi = list_first_entry(lsthead, struct epitem, fllink);
 896
 897                ep = epi->ep;
 898                list_del_init(&epi->fllink);
 899                mutex_lock_nested(&ep->mtx, 0);
 900                ep_remove(ep, epi);
 901                mutex_unlock(&ep->mtx);
 902        }
 903
 904        mutex_unlock(&epmutex);
 905}
 906
 907static int ep_alloc(struct eventpoll **pep)
 908{
 909        int error;
 910        struct user_struct *user;
 911        struct eventpoll *ep;
 912
 913        user = get_current_user();
 914        error = -ENOMEM;
 915        ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 916        if (unlikely(!ep))
 917                goto free_uid;
 918
 919        spin_lock_init(&ep->lock);
 920        mutex_init(&ep->mtx);
 921        init_waitqueue_head(&ep->wq);
 922        init_waitqueue_head(&ep->poll_wait);
 923        INIT_LIST_HEAD(&ep->rdllist);
 924        ep->rbr = RB_ROOT;
 925        ep->ovflist = EP_UNACTIVE_PTR;
 926        ep->user = user;
 927
 928        *pep = ep;
 929
 930        return 0;
 931
 932free_uid:
 933        free_uid(user);
 934        return error;
 935}
 936
 937/*
 938 * Search the file inside the eventpoll tree. The RB tree operations
 939 * are protected by the "mtx" mutex, and ep_find() must be called with
 940 * "mtx" held.
 941 */
 942static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
 943{
 944        int kcmp;
 945        struct rb_node *rbp;
 946        struct epitem *epi, *epir = NULL;
 947        struct epoll_filefd ffd;
 948
 949        ep_set_ffd(&ffd, file, fd);
 950        for (rbp = ep->rbr.rb_node; rbp; ) {
 951                epi = rb_entry(rbp, struct epitem, rbn);
 952                kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
 953                if (kcmp > 0)
 954                        rbp = rbp->rb_right;
 955                else if (kcmp < 0)
 956                        rbp = rbp->rb_left;
 957                else {
 958                        epir = epi;
 959                        break;
 960                }
 961        }
 962
 963        return epir;
 964}
 965
 966/*
 967 * This is the callback that is passed to the wait queue wakeup
 968 * mechanism. It is called by the stored file descriptors when they
 969 * have events to report.
 970 */
 971static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
 972{
 973        int pwake = 0;
 974        unsigned long flags;
 975        struct epitem *epi = ep_item_from_wait(wait);
 976        struct eventpoll *ep = epi->ep;
 977
 978        if ((unsigned long)key & POLLFREE) {
 979                ep_pwq_from_wait(wait)->whead = NULL;
 980                /*
 981                 * whead = NULL above can race with ep_remove_wait_queue()
 982                 * which can do another remove_wait_queue() after us, so we
 983                 * can't use __remove_wait_queue(). whead->lock is held by
 984                 * the caller.
 985                 */
 986                list_del_init(&wait->task_list);
 987        }
 988
 989        spin_lock_irqsave(&ep->lock, flags);
 990
 991        /*
 992         * If the event mask does not contain any poll(2) event, we consider the
 993         * descriptor to be disabled. This condition is likely the effect of the
 994         * EPOLLONESHOT bit that disables the descriptor when an event is received,
 995         * until the next EPOLL_CTL_MOD will be issued.
 996         */
 997        if (!(epi->event.events & ~EP_PRIVATE_BITS))
 998                goto out_unlock;
 999
1000        /*
1001         * Check the events coming with the callback. At this stage, not
1002         * every device reports the events in the "key" parameter of the
1003         * callback. We need to be able to handle both cases here, hence the
1004         * test for "key" != NULL before the event match test.
1005         */
1006        if (key && !((unsigned long) key & epi->event.events))
1007                goto out_unlock;
1008
1009        /*
1010         * If we are transferring events to userspace, we can hold no locks
1011         * (because we're accessing user memory, and because of linux f_op->poll()
1012         * semantics). All the events that happen during that period of time are
1013         * chained in ep->ovflist and requeued later on.
1014         */
1015        if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
1016                if (epi->next == EP_UNACTIVE_PTR) {
1017                        epi->next = ep->ovflist;
1018                        ep->ovflist = epi;
1019                        if (epi->ws) {
1020                                /*
1021                                 * Activate ep->ws since epi->ws may get
1022                                 * deactivated at any time.
1023                                 */
1024                                __pm_stay_awake(ep->ws);
1025                        }
1026
1027                }
1028                goto out_unlock;
1029        }
1030
1031        /* If this file is already in the ready list we exit soon */
1032        if (!ep_is_linked(&epi->rdllink)) {
1033                list_add_tail(&epi->rdllink, &ep->rdllist);
1034                ep_pm_stay_awake_rcu(epi);
1035        }
1036
1037        /*
1038         * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1039         * wait list.
1040         */
1041        if (waitqueue_active(&ep->wq))
1042                wake_up_locked(&ep->wq);
1043        if (waitqueue_active(&ep->poll_wait))
1044                pwake++;
1045
1046out_unlock:
1047        spin_unlock_irqrestore(&ep->lock, flags);
1048
1049        /* We have to call this outside the lock */
1050        if (pwake)
1051                ep_poll_safewake(&ep->poll_wait);
1052
1053        return 1;
1054}
1055
1056/*
1057 * This is the callback that is used to add our wait queue to the
1058 * target file wakeup lists.
1059 */
1060static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1061                                 poll_table *pt)
1062{
1063        struct epitem *epi = ep_item_from_epqueue(pt);
1064        struct eppoll_entry *pwq;
1065
1066        if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
1067                init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1068                pwq->whead = whead;
1069                pwq->base = epi;
1070                add_wait_queue(whead, &pwq->wait);
1071                list_add_tail(&pwq->llink, &epi->pwqlist);
1072                epi->nwait++;
1073        } else {
1074                /* We have to signal that an error occurred */
1075                epi->nwait = -1;
1076        }
1077}
1078
1079static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1080{
1081        int kcmp;
1082        struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
1083        struct epitem *epic;
1084
1085        while (*p) {
1086                parent = *p;
1087                epic = rb_entry(parent, struct epitem, rbn);
1088                kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1089                if (kcmp > 0)
1090                        p = &parent->rb_right;
1091                else
1092                        p = &parent->rb_left;
1093        }
1094        rb_link_node(&epi->rbn, parent, p);
1095        rb_insert_color(&epi->rbn, &ep->rbr);
1096}
1097
1098
1099
1100#define PATH_ARR_SIZE 5
1101/*
1102 * These are the number paths of length 1 to 5, that we are allowing to emanate
1103 * from a single file of interest. For example, we allow 1000 paths of length
1104 * 1, to emanate from each file of interest. This essentially represents the
1105 * potential wakeup paths, which need to be limited in order to avoid massive
1106 * uncontrolled wakeup storms. The common use case should be a single ep which
1107 * is connected to n file sources. In this case each file source has 1 path
1108 * of length 1. Thus, the numbers below should be more than sufficient. These
1109 * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1110 * and delete can't add additional paths. Protected by the epmutex.
1111 */
1112static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1113static int path_count[PATH_ARR_SIZE];
1114
1115static int path_count_inc(int nests)
1116{
1117        /* Allow an arbitrary number of depth 1 paths */
1118        if (nests == 0)
1119                return 0;
1120
1121        if (++path_count[nests] > path_limits[nests])
1122                return -1;
1123        return 0;
1124}
1125
1126static void path_count_init(void)
1127{
1128        int i;
1129
1130        for (i = 0; i < PATH_ARR_SIZE; i++)
1131                path_count[i] = 0;
1132}
1133
1134static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1135{
1136        int error = 0;
1137        struct file *file = priv;
1138        struct file *child_file;
1139        struct epitem *epi;
1140
1141        list_for_each_entry(epi, &file->f_ep_links, fllink) {
1142                child_file = epi->ep->file;
1143                if (is_file_epoll(child_file)) {
1144                        if (list_empty(&child_file->f_ep_links)) {
1145                                if (path_count_inc(call_nests)) {
1146                                        error = -1;
1147                                        break;
1148                                }
1149                        } else {
1150                                error = ep_call_nested(&poll_loop_ncalls,
1151                                                        EP_MAX_NESTS,
1152                                                        reverse_path_check_proc,
1153                                                        child_file, child_file,
1154                                                        current);
1155                        }
1156                        if (error != 0)
1157                                break;
1158                } else {
1159                        printk(KERN_ERR "reverse_path_check_proc: "
1160                                "file is not an ep!\n");
1161                }
1162        }
1163        return error;
1164}
1165
1166/**
1167 * reverse_path_check - The tfile_check_list is list of file *, which have
1168 *                      links that are proposed to be newly added. We need to
1169 *                      make sure that those added links don't add too many
1170 *                      paths such that we will spend all our time waking up
1171 *                      eventpoll objects.
1172 *
1173 * Returns: Returns zero if the proposed links don't create too many paths,
1174 *          -1 otherwise.
1175 */
1176static int reverse_path_check(void)
1177{
1178        int error = 0;
1179        struct file *current_file;
1180
1181        /* let's call this for all tfiles */
1182        list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1183                path_count_init();
1184                error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1185                                        reverse_path_check_proc, current_file,
1186                                        current_file, current);
1187                if (error)
1188                        break;
1189        }
1190        return error;
1191}
1192
1193static int ep_create_wakeup_source(struct epitem *epi)
1194{
1195        const char *name;
1196        struct wakeup_source *ws;
1197
1198        if (!epi->ep->ws) {
1199                epi->ep->ws = wakeup_source_register("eventpoll");
1200                if (!epi->ep->ws)
1201                        return -ENOMEM;
1202        }
1203
1204        name = epi->ffd.file->f_path.dentry->d_name.name;
1205        ws = wakeup_source_register(name);
1206
1207        if (!ws)
1208                return -ENOMEM;
1209        rcu_assign_pointer(epi->ws, ws);
1210
1211        return 0;
1212}
1213
1214/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1215static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1216{
1217        struct wakeup_source *ws = ep_wakeup_source(epi);
1218
1219        RCU_INIT_POINTER(epi->ws, NULL);
1220
1221        /*
1222         * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1223         * used internally by wakeup_source_remove, too (called by
1224         * wakeup_source_unregister), so we cannot use call_rcu
1225         */
1226        synchronize_rcu();
1227        wakeup_source_unregister(ws);
1228}
1229
1230/*
1231 * Must be called with "mtx" held.
1232 */
1233static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1234                     struct file *tfile, int fd)
1235{
1236        int error, revents, pwake = 0;
1237        unsigned long flags;
1238        long user_watches;
1239        struct epitem *epi;
1240        struct ep_pqueue epq;
1241
1242        user_watches = atomic_long_read(&ep->user->epoll_watches);
1243        if (unlikely(user_watches >= max_user_watches))
1244                return -ENOSPC;
1245        if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1246                return -ENOMEM;
1247
1248        /* Item initialization follow here ... */
1249        INIT_LIST_HEAD(&epi->rdllink);
1250        INIT_LIST_HEAD(&epi->fllink);
1251        INIT_LIST_HEAD(&epi->pwqlist);
1252        epi->ep = ep;
1253        ep_set_ffd(&epi->ffd, tfile, fd);
1254        epi->event = *event;
1255        epi->nwait = 0;
1256        epi->next = EP_UNACTIVE_PTR;
1257        if (epi->event.events & EPOLLWAKEUP) {
1258                error = ep_create_wakeup_source(epi);
1259                if (error)
1260                        goto error_create_wakeup_source;
1261        } else {
1262                RCU_INIT_POINTER(epi->ws, NULL);
1263        }
1264
1265        /* Initialize the poll table using the queue callback */
1266        epq.epi = epi;
1267        init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1268
1269        /*
1270         * Attach the item to the poll hooks and get current event bits.
1271         * We can safely use the file* here because its usage count has
1272         * been increased by the caller of this function. Note that after
1273         * this operation completes, the poll callback can start hitting
1274         * the new item.
1275         */
1276        revents = ep_item_poll(epi, &epq.pt);
1277
1278        /*
1279         * We have to check if something went wrong during the poll wait queue
1280         * install process. Namely an allocation for a wait queue failed due
1281         * high memory pressure.
1282         */
1283        error = -ENOMEM;
1284        if (epi->nwait < 0)
1285                goto error_unregister;
1286
1287        /* Add the current item to the list of active epoll hook for this file */
1288        spin_lock(&tfile->f_lock);
1289        list_add_tail(&epi->fllink, &tfile->f_ep_links);
1290        spin_unlock(&tfile->f_lock);
1291
1292        /*
1293         * Add the current item to the RB tree. All RB tree operations are
1294         * protected by "mtx", and ep_insert() is called with "mtx" held.
1295         */
1296        ep_rbtree_insert(ep, epi);
1297
1298        /* now check if we've created too many backpaths */
1299        error = -EINVAL;
1300        if (reverse_path_check())
1301                goto error_remove_epi;
1302
1303        /* We have to drop the new item inside our item list to keep track of it */
1304        spin_lock_irqsave(&ep->lock, flags);
1305
1306        /* If the file is already "ready" we drop it inside the ready list */
1307        if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
1308                list_add_tail(&epi->rdllink, &ep->rdllist);
1309                ep_pm_stay_awake(epi);
1310
1311                /* Notify waiting tasks that events are available */
1312                if (waitqueue_active(&ep->wq))
1313                        wake_up_locked(&ep->wq);
1314                if (waitqueue_active(&ep->poll_wait))
1315                        pwake++;
1316        }
1317
1318        spin_unlock_irqrestore(&ep->lock, flags);
1319
1320        atomic_long_inc(&ep->user->epoll_watches);
1321
1322        /* We have to call this outside the lock */
1323        if (pwake)
1324                ep_poll_safewake(&ep->poll_wait);
1325
1326        return 0;
1327
1328error_remove_epi:
1329        spin_lock(&tfile->f_lock);
1330        if (ep_is_linked(&epi->fllink))
1331                list_del_init(&epi->fllink);
1332        spin_unlock(&tfile->f_lock);
1333
1334        rb_erase(&epi->rbn, &ep->rbr);
1335
1336error_unregister:
1337        ep_unregister_pollwait(ep, epi);
1338
1339        /*
1340         * We need to do this because an event could have been arrived on some
1341         * allocated wait queue. Note that we don't care about the ep->ovflist
1342         * list, since that is used/cleaned only inside a section bound by "mtx".
1343         * And ep_insert() is called with "mtx" held.
1344         */
1345        spin_lock_irqsave(&ep->lock, flags);
1346        if (ep_is_linked(&epi->rdllink))
1347                list_del_init(&epi->rdllink);
1348        spin_unlock_irqrestore(&ep->lock, flags);
1349
1350        wakeup_source_unregister(ep_wakeup_source(epi));
1351
1352error_create_wakeup_source:
1353        kmem_cache_free(epi_cache, epi);
1354
1355        return error;
1356}
1357
1358/*
1359 * Modify the interest event mask by dropping an event if the new mask
1360 * has a match in the current file status. Must be called with "mtx" held.
1361 */
1362static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
1363{
1364        int pwake = 0;
1365        unsigned int revents;
1366        poll_table pt;
1367
1368        init_poll_funcptr(&pt, NULL);
1369
1370        /*
1371         * Set the new event interest mask before calling f_op->poll();
1372         * otherwise we might miss an event that happens between the
1373         * f_op->poll() call and the new event set registering.
1374         */
1375        epi->event.events = event->events; /* need barrier below */
1376        epi->event.data = event->data; /* protected by mtx */
1377        if (epi->event.events & EPOLLWAKEUP) {
1378                if (!ep_has_wakeup_source(epi))
1379                        ep_create_wakeup_source(epi);
1380        } else if (ep_has_wakeup_source(epi)) {
1381                ep_destroy_wakeup_source(epi);
1382        }
1383
1384        /*
1385         * The following barrier has two effects:
1386         *
1387         * 1) Flush epi changes above to other CPUs.  This ensures
1388         *    we do not miss events from ep_poll_callback if an
1389         *    event occurs immediately after we call f_op->poll().
1390         *    We need this because we did not take ep->lock while
1391         *    changing epi above (but ep_poll_callback does take
1392         *    ep->lock).
1393         *
1394         * 2) We also need to ensure we do not miss _past_ events
1395         *    when calling f_op->poll().  This barrier also
1396         *    pairs with the barrier in wq_has_sleeper (see
1397         *    comments for wq_has_sleeper).
1398         *
1399         * This barrier will now guarantee ep_poll_callback or f_op->poll
1400         * (or both) will notice the readiness of an item.
1401         */
1402        smp_mb();
1403
1404        /*
1405         * Get current event bits. We can safely use the file* here because
1406         * its usage count has been increased by the caller of this function.
1407         */
1408        revents = ep_item_poll(epi, &pt);
1409
1410        /*
1411         * If the item is "hot" and it is not registered inside the ready
1412         * list, push it inside.
1413         */
1414        if (revents & event->events) {
1415                spin_lock_irq(&ep->lock);
1416                if (!ep_is_linked(&epi->rdllink)) {
1417                        list_add_tail(&epi->rdllink, &ep->rdllist);
1418                        ep_pm_stay_awake(epi);
1419
1420                        /* Notify waiting tasks that events are available */
1421                        if (waitqueue_active(&ep->wq))
1422                                wake_up_locked(&ep->wq);
1423                        if (waitqueue_active(&ep->poll_wait))
1424                                pwake++;
1425                }
1426                spin_unlock_irq(&ep->lock);
1427        }
1428
1429        /* We have to call this outside the lock */
1430        if (pwake)
1431                ep_poll_safewake(&ep->poll_wait);
1432
1433        return 0;
1434}
1435
1436static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1437                               void *priv)
1438{
1439        struct ep_send_events_data *esed = priv;
1440        int eventcnt;
1441        unsigned int revents;
1442        struct epitem *epi;
1443        struct epoll_event __user *uevent;
1444        struct wakeup_source *ws;
1445        poll_table pt;
1446
1447        init_poll_funcptr(&pt, NULL);
1448
1449        /*
1450         * We can loop without lock because we are passed a task private list.
1451         * Items cannot vanish during the loop because ep_scan_ready_list() is
1452         * holding "mtx" during this call.
1453         */
1454        for (eventcnt = 0, uevent = esed->events;
1455             !list_empty(head) && eventcnt < esed->maxevents;) {
1456                epi = list_first_entry(head, struct epitem, rdllink);
1457
1458                /*
1459                 * Activate ep->ws before deactivating epi->ws to prevent
1460                 * triggering auto-suspend here (in case we reactive epi->ws
1461                 * below).
1462                 *
1463                 * This could be rearranged to delay the deactivation of epi->ws
1464                 * instead, but then epi->ws would temporarily be out of sync
1465                 * with ep_is_linked().
1466                 */
1467                ws = ep_wakeup_source(epi);
1468                if (ws) {
1469                        if (ws->active)
1470                                __pm_stay_awake(ep->ws);
1471                        __pm_relax(ws);
1472                }
1473
1474                list_del_init(&epi->rdllink);
1475
1476                revents = ep_item_poll(epi, &pt);
1477
1478                /*
1479                 * If the event mask intersect the caller-requested one,
1480                 * deliver the event to userspace. Again, ep_scan_ready_list()
1481                 * is holding "mtx", so no operations coming from userspace
1482                 * can change the item.
1483                 */
1484                if (revents) {
1485                        if (__put_user(revents, &uevent->events) ||
1486                            __put_user(epi->event.data, &uevent->data)) {
1487                                list_add(&epi->rdllink, head);
1488                                ep_pm_stay_awake(epi);
1489                                return eventcnt ? eventcnt : -EFAULT;
1490                        }
1491                        eventcnt++;
1492                        uevent++;
1493                        if (epi->event.events & EPOLLONESHOT)
1494                                epi->event.events &= EP_PRIVATE_BITS;
1495                        else if (!(epi->event.events & EPOLLET)) {
1496                                /*
1497                                 * If this file has been added with Level
1498                                 * Trigger mode, we need to insert back inside
1499                                 * the ready list, so that the next call to
1500                                 * epoll_wait() will check again the events
1501                                 * availability. At this point, no one can insert
1502                                 * into ep->rdllist besides us. The epoll_ctl()
1503                                 * callers are locked out by
1504                                 * ep_scan_ready_list() holding "mtx" and the
1505                                 * poll callback will queue them in ep->ovflist.
1506                                 */
1507                                list_add_tail(&epi->rdllink, &ep->rdllist);
1508                                ep_pm_stay_awake(epi);
1509                        }
1510                }
1511        }
1512
1513        return eventcnt;
1514}
1515
1516static int ep_send_events(struct eventpoll *ep,
1517                          struct epoll_event __user *events, int maxevents)
1518{
1519        struct ep_send_events_data esed;
1520
1521        esed.maxevents = maxevents;
1522        esed.events = events;
1523
1524        return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
1525}
1526
1527static inline struct timespec ep_set_mstimeout(long ms)
1528{
1529        struct timespec now, ts = {
1530                .tv_sec = ms / MSEC_PER_SEC,
1531                .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1532        };
1533
1534        ktime_get_ts(&now);
1535        return timespec_add_safe(now, ts);
1536}
1537
1538/**
1539 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1540 *           event buffer.
1541 *
1542 * @ep: Pointer to the eventpoll context.
1543 * @events: Pointer to the userspace buffer where the ready events should be
1544 *          stored.
1545 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1546 * @timeout: Maximum timeout for the ready events fetch operation, in
1547 *           milliseconds. If the @timeout is zero, the function will not block,
1548 *           while if the @timeout is less than zero, the function will block
1549 *           until at least one event has been retrieved (or an error
1550 *           occurred).
1551 *
1552 * Returns: Returns the number of ready events which have been fetched, or an
1553 *          error code, in case of error.
1554 */
1555static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1556                   int maxevents, long timeout)
1557{
1558        int res = 0, eavail, timed_out = 0;
1559        unsigned long flags;
1560        long slack = 0;
1561        wait_queue_t wait;
1562        ktime_t expires, *to = NULL;
1563
1564        if (timeout > 0) {
1565                struct timespec end_time = ep_set_mstimeout(timeout);
1566
1567                slack = select_estimate_accuracy(&end_time);
1568                to = &expires;
1569                *to = timespec_to_ktime(end_time);
1570        } else if (timeout == 0) {
1571                /*
1572                 * Avoid the unnecessary trip to the wait queue loop, if the
1573                 * caller specified a non blocking operation.
1574                 */
1575                timed_out = 1;
1576                spin_lock_irqsave(&ep->lock, flags);
1577                goto check_events;
1578        }
1579
1580fetch_events:
1581        spin_lock_irqsave(&ep->lock, flags);
1582
1583        if (!ep_events_available(ep)) {
1584                /*
1585                 * We don't have any available event to return to the caller.
1586                 * We need to sleep here, and we will be wake up by
1587                 * ep_poll_callback() when events will become available.
1588                 */
1589                init_waitqueue_entry(&wait, current);
1590                __add_wait_queue_exclusive(&ep->wq, &wait);
1591
1592                for (;;) {
1593                        /*
1594                         * We don't want to sleep if the ep_poll_callback() sends us
1595                         * a wakeup in between. That's why we set the task state
1596                         * to TASK_INTERRUPTIBLE before doing the checks.
1597                         */
1598                        set_current_state(TASK_INTERRUPTIBLE);
1599                        if (ep_events_available(ep) || timed_out)
1600                                break;
1601                        if (signal_pending(current)) {
1602                                res = -EINTR;
1603                                break;
1604                        }
1605
1606                        spin_unlock_irqrestore(&ep->lock, flags);
1607                        if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1608                                timed_out = 1;
1609
1610                        spin_lock_irqsave(&ep->lock, flags);
1611                }
1612                __remove_wait_queue(&ep->wq, &wait);
1613
1614                set_current_state(TASK_RUNNING);
1615        }
1616check_events:
1617        /* Is it worth to try to dig for events ? */
1618        eavail = ep_events_available(ep);
1619
1620        spin_unlock_irqrestore(&ep->lock, flags);
1621
1622        /*
1623         * Try to transfer events to user space. In case we get 0 events and
1624         * there's still timeout left over, we go trying again in search of
1625         * more luck.
1626         */
1627        if (!res && eavail &&
1628            !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1629                goto fetch_events;
1630
1631        return res;
1632}
1633
1634/**
1635 * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1636 *                      API, to verify that adding an epoll file inside another
1637 *                      epoll structure, does not violate the constraints, in
1638 *                      terms of closed loops, or too deep chains (which can
1639 *                      result in excessive stack usage).
1640 *
1641 * @priv: Pointer to the epoll file to be currently checked.
1642 * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1643 *          data structure pointer.
1644 * @call_nests: Current dept of the @ep_call_nested() call stack.
1645 *
1646 * Returns: Returns zero if adding the epoll @file inside current epoll
1647 *          structure @ep does not violate the constraints, or -1 otherwise.
1648 */
1649static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1650{
1651        int error = 0;
1652        struct file *file = priv;
1653        struct eventpoll *ep = file->private_data;
1654        struct eventpoll *ep_tovisit;
1655        struct rb_node *rbp;
1656        struct epitem *epi;
1657
1658        mutex_lock_nested(&ep->mtx, call_nests + 1);
1659        ep->visited = 1;
1660        list_add(&ep->visited_list_link, &visited_list);
1661        for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1662                epi = rb_entry(rbp, struct epitem, rbn);
1663                if (unlikely(is_file_epoll(epi->ffd.file))) {
1664                        ep_tovisit = epi->ffd.file->private_data;
1665                        if (ep_tovisit->visited)
1666                                continue;
1667                        error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1668                                        ep_loop_check_proc, epi->ffd.file,
1669                                        ep_tovisit, current);
1670                        if (error != 0)
1671                                break;
1672                } else {
1673                        /*
1674                         * If we've reached a file that is not associated with
1675                         * an ep, then we need to check if the newly added
1676                         * links are going to add too many wakeup paths. We do
1677                         * this by adding it to the tfile_check_list, if it's
1678                         * not already there, and calling reverse_path_check()
1679                         * during ep_insert().
1680                         */
1681                        if (list_empty(&epi->ffd.file->f_tfile_llink))
1682                                list_add(&epi->ffd.file->f_tfile_llink,
1683                                         &tfile_check_list);
1684                }
1685        }
1686        mutex_unlock(&ep->mtx);
1687
1688        return error;
1689}
1690
1691/**
1692 * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1693 *                 another epoll file (represented by @ep) does not create
1694 *                 closed loops or too deep chains.
1695 *
1696 * @ep: Pointer to the epoll private data structure.
1697 * @file: Pointer to the epoll file to be checked.
1698 *
1699 * Returns: Returns zero if adding the epoll @file inside current epoll
1700 *          structure @ep does not violate the constraints, or -1 otherwise.
1701 */
1702static int ep_loop_check(struct eventpoll *ep, struct file *file)
1703{
1704        int ret;
1705        struct eventpoll *ep_cur, *ep_next;
1706
1707        ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1708                              ep_loop_check_proc, file, ep, current);
1709        /* clear visited list */
1710        list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1711                                                        visited_list_link) {
1712                ep_cur->visited = 0;
1713                list_del(&ep_cur->visited_list_link);
1714        }
1715        return ret;
1716}
1717
1718static void clear_tfile_check_list(void)
1719{
1720        struct file *file;
1721
1722        /* first clear the tfile_check_list */
1723        while (!list_empty(&tfile_check_list)) {
1724                file = list_first_entry(&tfile_check_list, struct file,
1725                                        f_tfile_llink);
1726                list_del_init(&file->f_tfile_llink);
1727        }
1728        INIT_LIST_HEAD(&tfile_check_list);
1729}
1730
1731/*
1732 * Open an eventpoll file descriptor.
1733 */
1734SYSCALL_DEFINE1(epoll_create1, int, flags)
1735{
1736        int error, fd;
1737        struct eventpoll *ep = NULL;
1738        struct file *file;
1739
1740        /* Check the EPOLL_* constant for consistency.  */
1741        BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1742
1743        if (flags & ~EPOLL_CLOEXEC)
1744                return -EINVAL;
1745        /*
1746         * Create the internal data structure ("struct eventpoll").
1747         */
1748        error = ep_alloc(&ep);
1749        if (error < 0)
1750                return error;
1751        /*
1752         * Creates all the items needed to setup an eventpoll file. That is,
1753         * a file structure and a free file descriptor.
1754         */
1755        fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1756        if (fd < 0) {
1757                error = fd;
1758                goto out_free_ep;
1759        }
1760        file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
1761                                 O_RDWR | (flags & O_CLOEXEC));
1762        if (IS_ERR(file)) {
1763                error = PTR_ERR(file);
1764                goto out_free_fd;
1765        }
1766        ep->file = file;
1767        fd_install(fd, file);
1768        return fd;
1769
1770out_free_fd:
1771        put_unused_fd(fd);
1772out_free_ep:
1773        ep_free(ep);
1774        return error;
1775}
1776
1777SYSCALL_DEFINE1(epoll_create, int, size)
1778{
1779        if (size <= 0)
1780                return -EINVAL;
1781
1782        return sys_epoll_create1(0);
1783}
1784
1785/*
1786 * The following function implements the controller interface for
1787 * the eventpoll file that enables the insertion/removal/change of
1788 * file descriptors inside the interest set.
1789 */
1790SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1791                struct epoll_event __user *, event)
1792{
1793        int error;
1794        int did_lock_epmutex = 0;
1795        struct fd f, tf;
1796        struct eventpoll *ep;
1797        struct epitem *epi;
1798        struct epoll_event epds;
1799
1800        error = -EFAULT;
1801        if (ep_op_has_event(op) &&
1802            copy_from_user(&epds, event, sizeof(struct epoll_event)))
1803                goto error_return;
1804
1805        error = -EBADF;
1806        f = fdget(epfd);
1807        if (!f.file)
1808                goto error_return;
1809
1810        /* Get the "struct file *" for the target file */
1811        tf = fdget(fd);
1812        if (!tf.file)
1813                goto error_fput;
1814
1815        /* The target file descriptor must support poll */
1816        error = -EPERM;
1817        if (!tf.file->f_op || !tf.file->f_op->poll)
1818                goto error_tgt_fput;
1819
1820        /* Check if EPOLLWAKEUP is allowed */
1821        if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
1822                epds.events &= ~EPOLLWAKEUP;
1823
1824        /*
1825         * We have to check that the file structure underneath the file descriptor
1826         * the user passed to us _is_ an eventpoll file. And also we do not permit
1827         * adding an epoll file descriptor inside itself.
1828         */
1829        error = -EINVAL;
1830        if (f.file == tf.file || !is_file_epoll(f.file))
1831                goto error_tgt_fput;
1832
1833        /*
1834         * At this point it is safe to assume that the "private_data" contains
1835         * our own data structure.
1836         */
1837        ep = f.file->private_data;
1838
1839        /*
1840         * When we insert an epoll file descriptor, inside another epoll file
1841         * descriptor, there is the change of creating closed loops, which are
1842         * better be handled here, than in more critical paths. While we are
1843         * checking for loops we also determine the list of files reachable
1844         * and hang them on the tfile_check_list, so we can check that we
1845         * haven't created too many possible wakeup paths.
1846         *
1847         * We need to hold the epmutex across both ep_insert and ep_remove
1848         * b/c we want to make sure we are looking at a coherent view of
1849         * epoll network.
1850         */
1851        if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
1852                mutex_lock(&epmutex);
1853                did_lock_epmutex = 1;
1854        }
1855        if (op == EPOLL_CTL_ADD) {
1856                if (is_file_epoll(tf.file)) {
1857                        error = -ELOOP;
1858                        if (ep_loop_check(ep, tf.file) != 0) {
1859                                clear_tfile_check_list();
1860                                goto error_tgt_fput;
1861                        }
1862                } else
1863                        list_add(&tf.file->f_tfile_llink, &tfile_check_list);
1864        }
1865
1866        mutex_lock_nested(&ep->mtx, 0);
1867
1868        /*
1869         * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1870         * above, we can be sure to be able to use the item looked up by
1871         * ep_find() till we release the mutex.
1872         */
1873        epi = ep_find(ep, tf.file, fd);
1874
1875        error = -EINVAL;
1876        switch (op) {
1877        case EPOLL_CTL_ADD:
1878                if (!epi) {
1879                        epds.events |= POLLERR | POLLHUP;
1880                        error = ep_insert(ep, &epds, tf.file, fd);
1881                } else
1882                        error = -EEXIST;
1883                clear_tfile_check_list();
1884                break;
1885        case EPOLL_CTL_DEL:
1886                if (epi)
1887                        error = ep_remove(ep, epi);
1888                else
1889                        error = -ENOENT;
1890                break;
1891        case EPOLL_CTL_MOD:
1892                if (epi) {
1893                        epds.events |= POLLERR | POLLHUP;
1894                        error = ep_modify(ep, epi, &epds);
1895                } else
1896                        error = -ENOENT;
1897                break;
1898        }
1899        mutex_unlock(&ep->mtx);
1900
1901error_tgt_fput:
1902        if (did_lock_epmutex)
1903                mutex_unlock(&epmutex);
1904
1905        fdput(tf);
1906error_fput:
1907        fdput(f);
1908error_return:
1909
1910        return error;
1911}
1912
1913/*
1914 * Implement the event wait interface for the eventpoll file. It is the kernel
1915 * part of the user space epoll_wait(2).
1916 */
1917SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
1918                int, maxevents, int, timeout)
1919{
1920        int error;
1921        struct fd f;
1922        struct eventpoll *ep;
1923
1924        /* The maximum number of event must be greater than zero */
1925        if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
1926                return -EINVAL;
1927
1928        /* Verify that the area passed by the user is writeable */
1929        if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
1930                return -EFAULT;
1931
1932        /* Get the "struct file *" for the eventpoll file */
1933        f = fdget(epfd);
1934        if (!f.file)
1935                return -EBADF;
1936
1937        /*
1938         * We have to check that the file structure underneath the fd
1939         * the user passed to us _is_ an eventpoll file.
1940         */
1941        error = -EINVAL;
1942        if (!is_file_epoll(f.file))
1943                goto error_fput;
1944
1945        /*
1946         * At this point it is safe to assume that the "private_data" contains
1947         * our own data structure.
1948         */
1949        ep = f.file->private_data;
1950
1951        /* Time to fish for events ... */
1952        error = ep_poll(ep, events, maxevents, timeout);
1953
1954error_fput:
1955        fdput(f);
1956        return error;
1957}
1958
1959/*
1960 * Implement the event wait interface for the eventpoll file. It is the kernel
1961 * part of the user space epoll_pwait(2).
1962 */
1963SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
1964                int, maxevents, int, timeout, const sigset_t __user *, sigmask,
1965                size_t, sigsetsize)
1966{
1967        int error;
1968        sigset_t ksigmask, sigsaved;
1969
1970        /*
1971         * If the caller wants a certain signal mask to be set during the wait,
1972         * we apply it here.
1973         */
1974        if (sigmask) {
1975                if (sigsetsize != sizeof(sigset_t))
1976                        return -EINVAL;
1977                if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1978                        return -EFAULT;
1979                sigsaved = current->blocked;
1980                set_current_blocked(&ksigmask);
1981        }
1982
1983        error = sys_epoll_wait(epfd, events, maxevents, timeout);
1984
1985        /*
1986         * If we changed the signal mask, we need to restore the original one.
1987         * In case we've got a signal while waiting, we do not restore the
1988         * signal mask yet, and we allow do_signal() to deliver the signal on
1989         * the way back to userspace, before the signal mask is restored.
1990         */
1991        if (sigmask) {
1992                if (error == -EINTR) {
1993                        memcpy(&current->saved_sigmask, &sigsaved,
1994                               sizeof(sigsaved));
1995                        set_restore_sigmask();
1996                } else
1997                        set_current_blocked(&sigsaved);
1998        }
1999
2000        return error;
2001}
2002
2003#ifdef CONFIG_COMPAT
2004COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2005                        struct epoll_event __user *, events,
2006                        int, maxevents, int, timeout,
2007                        const compat_sigset_t __user *, sigmask,
2008                        compat_size_t, sigsetsize)
2009{
2010        long err;
2011        compat_sigset_t csigmask;
2012        sigset_t ksigmask, sigsaved;
2013
2014        /*
2015         * If the caller wants a certain signal mask to be set during the wait,
2016         * we apply it here.
2017         */
2018        if (sigmask) {
2019                if (sigsetsize != sizeof(compat_sigset_t))
2020                        return -EINVAL;
2021                if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
2022                        return -EFAULT;
2023                sigset_from_compat(&ksigmask, &csigmask);
2024                sigsaved = current->blocked;
2025                set_current_blocked(&ksigmask);
2026        }
2027
2028        err = sys_epoll_wait(epfd, events, maxevents, timeout);
2029
2030        /*
2031         * If we changed the signal mask, we need to restore the original one.
2032         * In case we've got a signal while waiting, we do not restore the
2033         * signal mask yet, and we allow do_signal() to deliver the signal on
2034         * the way back to userspace, before the signal mask is restored.
2035         */
2036        if (sigmask) {
2037                if (err == -EINTR) {
2038                        memcpy(&current->saved_sigmask, &sigsaved,
2039                               sizeof(sigsaved));
2040                        set_restore_sigmask();
2041                } else
2042                        set_current_blocked(&sigsaved);
2043        }
2044
2045        return err;
2046}
2047#endif
2048
2049static int __init eventpoll_init(void)
2050{
2051        struct sysinfo si;
2052
2053        si_meminfo(&si);
2054        /*
2055         * Allows top 4% of lomem to be allocated for epoll watches (per user).
2056         */
2057        max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2058                EP_ITEM_COST;
2059        BUG_ON(max_user_watches < 0);
2060
2061        /*
2062         * Initialize the structure used to perform epoll file descriptor
2063         * inclusion loops checks.
2064         */
2065        ep_nested_calls_init(&poll_loop_ncalls);
2066
2067        /* Initialize the structure used to perform safe poll wait head wake ups */
2068        ep_nested_calls_init(&poll_safewake_ncalls);
2069
2070        /* Initialize the structure used to perform file's f_op->poll() calls */
2071        ep_nested_calls_init(&poll_readywalk_ncalls);
2072
2073        /*
2074         * We can have many thousands of epitems, so prevent this from
2075         * using an extra cache line on 64-bit (and smaller) CPUs
2076         */
2077        BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2078
2079        /* Allocates slab cache used to allocate "struct epitem" items */
2080        epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2081                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2082
2083        /* Allocates slab cache used to allocate "struct eppoll_entry" */
2084        pwq_cache = kmem_cache_create("eventpoll_pwq",
2085                        sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
2086
2087        return 0;
2088}
2089fs_initcall(eventpoll_init);
2090