1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/signal.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/poll.h>
24#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/hash.h>
27#include <linux/spinlock.h>
28#include <linux/syscalls.h>
29#include <linux/rbtree.h>
30#include <linux/wait.h>
31#include <linux/eventpoll.h>
32#include <linux/mount.h>
33#include <linux/bitops.h>
34#include <linux/mutex.h>
35#include <linux/anon_inodes.h>
36#include <linux/device.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/mman.h>
40#include <linux/atomic.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43#include <linux/compat.h>
44#include <linux/rculist.h>
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
96
97#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
98
99#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
100 EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
101
102
103#define EP_MAX_NESTS 4
104
105#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
106
107#define EP_UNACTIVE_PTR ((void *) -1L)
108
109#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
110
111struct epoll_filefd {
112 struct file *file;
113 int fd;
114} __packed;
115
116
117
118
119
120struct nested_call_node {
121 struct list_head llink;
122 void *cookie;
123 void *ctx;
124};
125
126
127
128
129
130struct nested_calls {
131 struct list_head tasks_call_list;
132 spinlock_t lock;
133};
134
135
136
137
138
139
140
141struct epitem {
142 union {
143
144 struct rb_node rbn;
145
146 struct rcu_head rcu;
147 };
148
149
150 struct list_head rdllink;
151
152
153
154
155
156 struct epitem *next;
157
158
159 struct epoll_filefd ffd;
160
161
162 int nwait;
163
164
165 struct list_head pwqlist;
166
167
168 struct eventpoll *ep;
169
170
171 struct list_head fllink;
172
173
174 struct wakeup_source __rcu *ws;
175
176
177 struct epoll_event event;
178};
179
180
181
182
183
184
185struct eventpoll {
186
187 spinlock_t lock;
188
189
190
191
192
193
194
195 struct mutex mtx;
196
197
198 wait_queue_head_t wq;
199
200
201 wait_queue_head_t poll_wait;
202
203
204 struct list_head rdllist;
205
206
207 struct rb_root rbr;
208
209
210
211
212
213
214 struct epitem *ovflist;
215
216
217 struct wakeup_source *ws;
218
219
220 struct user_struct *user;
221
222 struct file *file;
223
224
225 int visited;
226 struct list_head visited_list_link;
227};
228
229
230struct eppoll_entry {
231
232 struct list_head llink;
233
234
235 struct epitem *base;
236
237
238
239
240
241 wait_queue_t wait;
242
243
244 wait_queue_head_t *whead;
245};
246
247
248struct ep_pqueue {
249 poll_table pt;
250 struct epitem *epi;
251};
252
253
254struct ep_send_events_data {
255 int maxevents;
256 struct epoll_event __user *events;
257};
258
259
260
261
262
263static long max_user_watches __read_mostly;
264
265
266
267
268static DEFINE_MUTEX(epmutex);
269
270
271static struct nested_calls poll_loop_ncalls;
272
273
274static struct nested_calls poll_safewake_ncalls;
275
276
277static struct nested_calls poll_readywalk_ncalls;
278
279
280static struct kmem_cache *epi_cache __read_mostly;
281
282
283static struct kmem_cache *pwq_cache __read_mostly;
284
285
286static LIST_HEAD(visited_list);
287
288
289
290
291
292static LIST_HEAD(tfile_check_list);
293
294#ifdef CONFIG_SYSCTL
295
296#include <linux/sysctl.h>
297
298static long zero;
299static long long_max = LONG_MAX;
300
301struct ctl_table epoll_table[] = {
302 {
303 .procname = "max_user_watches",
304 .data = &max_user_watches,
305 .maxlen = sizeof(max_user_watches),
306 .mode = 0644,
307 .proc_handler = proc_doulongvec_minmax,
308 .extra1 = &zero,
309 .extra2 = &long_max,
310 },
311 { }
312};
313#endif
314
315static const struct file_operations eventpoll_fops;
316
317static inline int is_file_epoll(struct file *f)
318{
319 return f->f_op == &eventpoll_fops;
320}
321
322
323static inline void ep_set_ffd(struct epoll_filefd *ffd,
324 struct file *file, int fd)
325{
326 ffd->file = file;
327 ffd->fd = fd;
328}
329
330
331static inline int ep_cmp_ffd(struct epoll_filefd *p1,
332 struct epoll_filefd *p2)
333{
334 return (p1->file > p2->file ? +1:
335 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
336}
337
338
339static inline int ep_is_linked(struct list_head *p)
340{
341 return !list_empty(p);
342}
343
344static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
345{
346 return container_of(p, struct eppoll_entry, wait);
347}
348
349
350static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
351{
352 return container_of(p, struct eppoll_entry, wait)->base;
353}
354
355
356static inline struct epitem *ep_item_from_epqueue(poll_table *p)
357{
358 return container_of(p, struct ep_pqueue, pt)->epi;
359}
360
361
362static inline int ep_op_has_event(int op)
363{
364 return op != EPOLL_CTL_DEL;
365}
366
367
368static void ep_nested_calls_init(struct nested_calls *ncalls)
369{
370 INIT_LIST_HEAD(&ncalls->tasks_call_list);
371 spin_lock_init(&ncalls->lock);
372}
373
374
375
376
377
378
379
380
381
382static inline int ep_events_available(struct eventpoll *ep)
383{
384 return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
404 int (*nproc)(void *, void *, int), void *priv,
405 void *cookie, void *ctx)
406{
407 int error, call_nests = 0;
408 unsigned long flags;
409 struct list_head *lsthead = &ncalls->tasks_call_list;
410 struct nested_call_node *tncur;
411 struct nested_call_node tnode;
412
413 spin_lock_irqsave(&ncalls->lock, flags);
414
415
416
417
418
419
420 list_for_each_entry(tncur, lsthead, llink) {
421 if (tncur->ctx == ctx &&
422 (tncur->cookie == cookie || ++call_nests > max_nests)) {
423
424
425
426
427 error = -1;
428 goto out_unlock;
429 }
430 }
431
432
433 tnode.ctx = ctx;
434 tnode.cookie = cookie;
435 list_add(&tnode.llink, lsthead);
436
437 spin_unlock_irqrestore(&ncalls->lock, flags);
438
439
440 error = (*nproc)(priv, cookie, call_nests);
441
442
443 spin_lock_irqsave(&ncalls->lock, flags);
444 list_del(&tnode.llink);
445out_unlock:
446 spin_unlock_irqrestore(&ncalls->lock, flags);
447
448 return error;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476#ifdef CONFIG_DEBUG_LOCK_ALLOC
477static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
478 unsigned long events, int subclass)
479{
480 unsigned long flags;
481
482 spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
483 wake_up_locked_poll(wqueue, events);
484 spin_unlock_irqrestore(&wqueue->lock, flags);
485}
486#else
487static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
488 unsigned long events, int subclass)
489{
490 wake_up_poll(wqueue, events);
491}
492#endif
493
494static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
495{
496 ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
497 1 + call_nests);
498 return 0;
499}
500
501
502
503
504
505
506
507
508
509
510
511static void ep_poll_safewake(wait_queue_head_t *wq)
512{
513 int this_cpu = get_cpu();
514
515 ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
516 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
517
518 put_cpu();
519}
520
521static void ep_remove_wait_queue(struct eppoll_entry *pwq)
522{
523 wait_queue_head_t *whead;
524
525 rcu_read_lock();
526
527 whead = rcu_dereference(pwq->whead);
528 if (whead)
529 remove_wait_queue(whead, &pwq->wait);
530 rcu_read_unlock();
531}
532
533
534
535
536
537
538static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
539{
540 struct list_head *lsthead = &epi->pwqlist;
541 struct eppoll_entry *pwq;
542
543 while (!list_empty(lsthead)) {
544 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
545
546 list_del(&pwq->llink);
547 ep_remove_wait_queue(pwq);
548 kmem_cache_free(pwq_cache, pwq);
549 }
550}
551
552
553static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
554{
555 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
556}
557
558
559static inline void ep_pm_stay_awake(struct epitem *epi)
560{
561 struct wakeup_source *ws = ep_wakeup_source(epi);
562
563 if (ws)
564 __pm_stay_awake(ws);
565}
566
567static inline bool ep_has_wakeup_source(struct epitem *epi)
568{
569 return rcu_access_pointer(epi->ws) ? true : false;
570}
571
572
573static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
574{
575 struct wakeup_source *ws;
576
577 rcu_read_lock();
578 ws = rcu_dereference(epi->ws);
579 if (ws)
580 __pm_stay_awake(ws);
581 rcu_read_unlock();
582}
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597static int ep_scan_ready_list(struct eventpoll *ep,
598 int (*sproc)(struct eventpoll *,
599 struct list_head *, void *),
600 void *priv, int depth, bool ep_locked)
601{
602 int error, pwake = 0;
603 unsigned long flags;
604 struct epitem *epi, *nepi;
605 LIST_HEAD(txlist);
606
607
608
609
610
611
612 if (!ep_locked)
613 mutex_lock_nested(&ep->mtx, depth);
614
615
616
617
618
619
620
621
622
623 spin_lock_irqsave(&ep->lock, flags);
624 list_splice_init(&ep->rdllist, &txlist);
625 ep->ovflist = NULL;
626 spin_unlock_irqrestore(&ep->lock, flags);
627
628
629
630
631 error = (*sproc)(ep, &txlist, priv);
632
633 spin_lock_irqsave(&ep->lock, flags);
634
635
636
637
638
639 for (nepi = ep->ovflist; (epi = nepi) != NULL;
640 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
641
642
643
644
645
646
647 if (!ep_is_linked(&epi->rdllink)) {
648 list_add_tail(&epi->rdllink, &ep->rdllist);
649 ep_pm_stay_awake(epi);
650 }
651 }
652
653
654
655
656
657 ep->ovflist = EP_UNACTIVE_PTR;
658
659
660
661
662 list_splice(&txlist, &ep->rdllist);
663 __pm_relax(ep->ws);
664
665 if (!list_empty(&ep->rdllist)) {
666
667
668
669
670 if (waitqueue_active(&ep->wq))
671 wake_up_locked(&ep->wq);
672 if (waitqueue_active(&ep->poll_wait))
673 pwake++;
674 }
675 spin_unlock_irqrestore(&ep->lock, flags);
676
677 if (!ep_locked)
678 mutex_unlock(&ep->mtx);
679
680
681 if (pwake)
682 ep_poll_safewake(&ep->poll_wait);
683
684 return error;
685}
686
687static void epi_rcu_free(struct rcu_head *head)
688{
689 struct epitem *epi = container_of(head, struct epitem, rcu);
690 kmem_cache_free(epi_cache, epi);
691}
692
693
694
695
696
697static int ep_remove(struct eventpoll *ep, struct epitem *epi)
698{
699 unsigned long flags;
700 struct file *file = epi->ffd.file;
701
702
703
704
705
706
707
708
709
710 ep_unregister_pollwait(ep, epi);
711
712
713 spin_lock(&file->f_lock);
714 list_del_rcu(&epi->fllink);
715 spin_unlock(&file->f_lock);
716
717 rb_erase(&epi->rbn, &ep->rbr);
718
719 spin_lock_irqsave(&ep->lock, flags);
720 if (ep_is_linked(&epi->rdllink))
721 list_del_init(&epi->rdllink);
722 spin_unlock_irqrestore(&ep->lock, flags);
723
724 wakeup_source_unregister(ep_wakeup_source(epi));
725
726
727
728
729
730
731
732 call_rcu(&epi->rcu, epi_rcu_free);
733
734 atomic_long_dec(&ep->user->epoll_watches);
735
736 return 0;
737}
738
739static void ep_free(struct eventpoll *ep)
740{
741 struct rb_node *rbp;
742 struct epitem *epi;
743
744
745 if (waitqueue_active(&ep->poll_wait))
746 ep_poll_safewake(&ep->poll_wait);
747
748
749
750
751
752
753
754
755
756 mutex_lock(&epmutex);
757
758
759
760
761 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
762 epi = rb_entry(rbp, struct epitem, rbn);
763
764 ep_unregister_pollwait(ep, epi);
765 cond_resched();
766 }
767
768
769
770
771
772
773
774
775
776 mutex_lock(&ep->mtx);
777 while ((rbp = rb_first(&ep->rbr)) != NULL) {
778 epi = rb_entry(rbp, struct epitem, rbn);
779 ep_remove(ep, epi);
780 cond_resched();
781 }
782 mutex_unlock(&ep->mtx);
783
784 mutex_unlock(&epmutex);
785 mutex_destroy(&ep->mtx);
786 free_uid(ep->user);
787 wakeup_source_unregister(ep->ws);
788 kfree(ep);
789}
790
791static int ep_eventpoll_release(struct inode *inode, struct file *file)
792{
793 struct eventpoll *ep = file->private_data;
794
795 if (ep)
796 ep_free(ep);
797
798 return 0;
799}
800
801static inline unsigned int ep_item_poll(struct epitem *epi, poll_table *pt)
802{
803 pt->_key = epi->event.events;
804
805 return epi->ffd.file->f_op->poll(epi->ffd.file, pt) & epi->event.events;
806}
807
808static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
809 void *priv)
810{
811 struct epitem *epi, *tmp;
812 poll_table pt;
813
814 init_poll_funcptr(&pt, NULL);
815
816 list_for_each_entry_safe(epi, tmp, head, rdllink) {
817 if (ep_item_poll(epi, &pt))
818 return POLLIN | POLLRDNORM;
819 else {
820
821
822
823
824
825 __pm_relax(ep_wakeup_source(epi));
826 list_del_init(&epi->rdllink);
827 }
828 }
829
830 return 0;
831}
832
833static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
834 poll_table *pt);
835
836struct readyevents_arg {
837 struct eventpoll *ep;
838 bool locked;
839};
840
841static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
842{
843 struct readyevents_arg *arg = priv;
844
845 return ep_scan_ready_list(arg->ep, ep_read_events_proc, NULL,
846 call_nests + 1, arg->locked);
847}
848
849static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
850{
851 int pollflags;
852 struct eventpoll *ep = file->private_data;
853 struct readyevents_arg arg;
854
855
856
857
858
859 arg.locked = wait && (wait->_qproc == ep_ptable_queue_proc);
860 arg.ep = ep;
861
862
863 poll_wait(file, &ep->poll_wait, wait);
864
865
866
867
868
869
870
871 pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
872 ep_poll_readyevents_proc, &arg, ep, current);
873
874 return pollflags != -1 ? pollflags : 0;
875}
876
877#ifdef CONFIG_PROC_FS
878static void ep_show_fdinfo(struct seq_file *m, struct file *f)
879{
880 struct eventpoll *ep = f->private_data;
881 struct rb_node *rbp;
882
883 mutex_lock(&ep->mtx);
884 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
885 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
886
887 seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
888 epi->ffd.fd, epi->event.events,
889 (long long)epi->event.data);
890 if (seq_has_overflowed(m))
891 break;
892 }
893 mutex_unlock(&ep->mtx);
894}
895#endif
896
897
898static const struct file_operations eventpoll_fops = {
899#ifdef CONFIG_PROC_FS
900 .show_fdinfo = ep_show_fdinfo,
901#endif
902 .release = ep_eventpoll_release,
903 .poll = ep_eventpoll_poll,
904 .llseek = noop_llseek,
905};
906
907
908
909
910
911
912void eventpoll_release_file(struct file *file)
913{
914 struct eventpoll *ep;
915 struct epitem *epi, *next;
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930 mutex_lock(&epmutex);
931 list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
932 ep = epi->ep;
933 mutex_lock_nested(&ep->mtx, 0);
934 ep_remove(ep, epi);
935 mutex_unlock(&ep->mtx);
936 }
937 mutex_unlock(&epmutex);
938}
939
940static int ep_alloc(struct eventpoll **pep)
941{
942 int error;
943 struct user_struct *user;
944 struct eventpoll *ep;
945
946 user = get_current_user();
947 error = -ENOMEM;
948 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
949 if (unlikely(!ep))
950 goto free_uid;
951
952 spin_lock_init(&ep->lock);
953 mutex_init(&ep->mtx);
954 init_waitqueue_head(&ep->wq);
955 init_waitqueue_head(&ep->poll_wait);
956 INIT_LIST_HEAD(&ep->rdllist);
957 ep->rbr = RB_ROOT;
958 ep->ovflist = EP_UNACTIVE_PTR;
959 ep->user = user;
960
961 *pep = ep;
962
963 return 0;
964
965free_uid:
966 free_uid(user);
967 return error;
968}
969
970
971
972
973
974
975static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
976{
977 int kcmp;
978 struct rb_node *rbp;
979 struct epitem *epi, *epir = NULL;
980 struct epoll_filefd ffd;
981
982 ep_set_ffd(&ffd, file, fd);
983 for (rbp = ep->rbr.rb_node; rbp; ) {
984 epi = rb_entry(rbp, struct epitem, rbn);
985 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
986 if (kcmp > 0)
987 rbp = rbp->rb_right;
988 else if (kcmp < 0)
989 rbp = rbp->rb_left;
990 else {
991 epir = epi;
992 break;
993 }
994 }
995
996 return epir;
997}
998
999
1000
1001
1002
1003
1004static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
1005{
1006 int pwake = 0;
1007 unsigned long flags;
1008 struct epitem *epi = ep_item_from_wait(wait);
1009 struct eventpoll *ep = epi->ep;
1010 int ewake = 0;
1011
1012 if ((unsigned long)key & POLLFREE) {
1013 ep_pwq_from_wait(wait)->whead = NULL;
1014
1015
1016
1017
1018
1019
1020 list_del_init(&wait->task_list);
1021 }
1022
1023 spin_lock_irqsave(&ep->lock, flags);
1024
1025
1026
1027
1028
1029
1030
1031 if (!(epi->event.events & ~EP_PRIVATE_BITS))
1032 goto out_unlock;
1033
1034
1035
1036
1037
1038
1039
1040 if (key && !((unsigned long) key & epi->event.events))
1041 goto out_unlock;
1042
1043
1044
1045
1046
1047
1048
1049 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
1050 if (epi->next == EP_UNACTIVE_PTR) {
1051 epi->next = ep->ovflist;
1052 ep->ovflist = epi;
1053 if (epi->ws) {
1054
1055
1056
1057
1058 __pm_stay_awake(ep->ws);
1059 }
1060
1061 }
1062 goto out_unlock;
1063 }
1064
1065
1066 if (!ep_is_linked(&epi->rdllink)) {
1067 list_add_tail(&epi->rdllink, &ep->rdllist);
1068 ep_pm_stay_awake_rcu(epi);
1069 }
1070
1071
1072
1073
1074
1075 if (waitqueue_active(&ep->wq)) {
1076 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1077 !((unsigned long)key & POLLFREE)) {
1078 switch ((unsigned long)key & EPOLLINOUT_BITS) {
1079 case POLLIN:
1080 if (epi->event.events & POLLIN)
1081 ewake = 1;
1082 break;
1083 case POLLOUT:
1084 if (epi->event.events & POLLOUT)
1085 ewake = 1;
1086 break;
1087 case 0:
1088 ewake = 1;
1089 break;
1090 }
1091 }
1092 wake_up_locked(&ep->wq);
1093 }
1094 if (waitqueue_active(&ep->poll_wait))
1095 pwake++;
1096
1097out_unlock:
1098 spin_unlock_irqrestore(&ep->lock, flags);
1099
1100
1101 if (pwake)
1102 ep_poll_safewake(&ep->poll_wait);
1103
1104 if (epi->event.events & EPOLLEXCLUSIVE)
1105 return ewake;
1106
1107 return 1;
1108}
1109
1110
1111
1112
1113
1114static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1115 poll_table *pt)
1116{
1117 struct epitem *epi = ep_item_from_epqueue(pt);
1118 struct eppoll_entry *pwq;
1119
1120 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
1121 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1122 pwq->whead = whead;
1123 pwq->base = epi;
1124 if (epi->event.events & EPOLLEXCLUSIVE)
1125 add_wait_queue_exclusive(whead, &pwq->wait);
1126 else
1127 add_wait_queue(whead, &pwq->wait);
1128 list_add_tail(&pwq->llink, &epi->pwqlist);
1129 epi->nwait++;
1130 } else {
1131
1132 epi->nwait = -1;
1133 }
1134}
1135
1136static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1137{
1138 int kcmp;
1139 struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
1140 struct epitem *epic;
1141
1142 while (*p) {
1143 parent = *p;
1144 epic = rb_entry(parent, struct epitem, rbn);
1145 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1146 if (kcmp > 0)
1147 p = &parent->rb_right;
1148 else
1149 p = &parent->rb_left;
1150 }
1151 rb_link_node(&epi->rbn, parent, p);
1152 rb_insert_color(&epi->rbn, &ep->rbr);
1153}
1154
1155
1156
1157#define PATH_ARR_SIZE 5
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1170static int path_count[PATH_ARR_SIZE];
1171
1172static int path_count_inc(int nests)
1173{
1174
1175 if (nests == 0)
1176 return 0;
1177
1178 if (++path_count[nests] > path_limits[nests])
1179 return -1;
1180 return 0;
1181}
1182
1183static void path_count_init(void)
1184{
1185 int i;
1186
1187 for (i = 0; i < PATH_ARR_SIZE; i++)
1188 path_count[i] = 0;
1189}
1190
1191static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1192{
1193 int error = 0;
1194 struct file *file = priv;
1195 struct file *child_file;
1196 struct epitem *epi;
1197
1198
1199 rcu_read_lock();
1200 list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
1201 child_file = epi->ep->file;
1202 if (is_file_epoll(child_file)) {
1203 if (list_empty(&child_file->f_ep_links)) {
1204 if (path_count_inc(call_nests)) {
1205 error = -1;
1206 break;
1207 }
1208 } else {
1209 error = ep_call_nested(&poll_loop_ncalls,
1210 EP_MAX_NESTS,
1211 reverse_path_check_proc,
1212 child_file, child_file,
1213 current);
1214 }
1215 if (error != 0)
1216 break;
1217 } else {
1218 printk(KERN_ERR "reverse_path_check_proc: "
1219 "file is not an ep!\n");
1220 }
1221 }
1222 rcu_read_unlock();
1223 return error;
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static int reverse_path_check(void)
1237{
1238 int error = 0;
1239 struct file *current_file;
1240
1241
1242 list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
1243 path_count_init();
1244 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1245 reverse_path_check_proc, current_file,
1246 current_file, current);
1247 if (error)
1248 break;
1249 }
1250 return error;
1251}
1252
1253static int ep_create_wakeup_source(struct epitem *epi)
1254{
1255 const char *name;
1256 struct wakeup_source *ws;
1257
1258 if (!epi->ep->ws) {
1259 epi->ep->ws = wakeup_source_register("eventpoll");
1260 if (!epi->ep->ws)
1261 return -ENOMEM;
1262 }
1263
1264 name = epi->ffd.file->f_path.dentry->d_name.name;
1265 ws = wakeup_source_register(name);
1266
1267 if (!ws)
1268 return -ENOMEM;
1269 rcu_assign_pointer(epi->ws, ws);
1270
1271 return 0;
1272}
1273
1274
1275static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1276{
1277 struct wakeup_source *ws = ep_wakeup_source(epi);
1278
1279 RCU_INIT_POINTER(epi->ws, NULL);
1280
1281
1282
1283
1284
1285
1286 synchronize_rcu();
1287 wakeup_source_unregister(ws);
1288}
1289
1290
1291
1292
1293static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1294 struct file *tfile, int fd, int full_check)
1295{
1296 int error, revents, pwake = 0;
1297 unsigned long flags;
1298 long user_watches;
1299 struct epitem *epi;
1300 struct ep_pqueue epq;
1301
1302 user_watches = atomic_long_read(&ep->user->epoll_watches);
1303 if (unlikely(user_watches >= max_user_watches))
1304 return -ENOSPC;
1305 if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
1306 return -ENOMEM;
1307
1308
1309 INIT_LIST_HEAD(&epi->rdllink);
1310 INIT_LIST_HEAD(&epi->fllink);
1311 INIT_LIST_HEAD(&epi->pwqlist);
1312 epi->ep = ep;
1313 ep_set_ffd(&epi->ffd, tfile, fd);
1314 epi->event = *event;
1315 epi->nwait = 0;
1316 epi->next = EP_UNACTIVE_PTR;
1317 if (epi->event.events & EPOLLWAKEUP) {
1318 error = ep_create_wakeup_source(epi);
1319 if (error)
1320 goto error_create_wakeup_source;
1321 } else {
1322 RCU_INIT_POINTER(epi->ws, NULL);
1323 }
1324
1325
1326 epq.epi = epi;
1327 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1328
1329
1330
1331
1332
1333
1334
1335
1336 revents = ep_item_poll(epi, &epq.pt);
1337
1338
1339
1340
1341
1342
1343 error = -ENOMEM;
1344 if (epi->nwait < 0)
1345 goto error_unregister;
1346
1347
1348 spin_lock(&tfile->f_lock);
1349 list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
1350 spin_unlock(&tfile->f_lock);
1351
1352
1353
1354
1355
1356 ep_rbtree_insert(ep, epi);
1357
1358
1359 error = -EINVAL;
1360 if (full_check && reverse_path_check())
1361 goto error_remove_epi;
1362
1363
1364 spin_lock_irqsave(&ep->lock, flags);
1365
1366
1367 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
1368 list_add_tail(&epi->rdllink, &ep->rdllist);
1369 ep_pm_stay_awake(epi);
1370
1371
1372 if (waitqueue_active(&ep->wq))
1373 wake_up_locked(&ep->wq);
1374 if (waitqueue_active(&ep->poll_wait))
1375 pwake++;
1376 }
1377
1378 spin_unlock_irqrestore(&ep->lock, flags);
1379
1380 atomic_long_inc(&ep->user->epoll_watches);
1381
1382
1383 if (pwake)
1384 ep_poll_safewake(&ep->poll_wait);
1385
1386 return 0;
1387
1388error_remove_epi:
1389 spin_lock(&tfile->f_lock);
1390 list_del_rcu(&epi->fllink);
1391 spin_unlock(&tfile->f_lock);
1392
1393 rb_erase(&epi->rbn, &ep->rbr);
1394
1395error_unregister:
1396 ep_unregister_pollwait(ep, epi);
1397
1398
1399
1400
1401
1402
1403
1404 spin_lock_irqsave(&ep->lock, flags);
1405 if (ep_is_linked(&epi->rdllink))
1406 list_del_init(&epi->rdllink);
1407 spin_unlock_irqrestore(&ep->lock, flags);
1408
1409 wakeup_source_unregister(ep_wakeup_source(epi));
1410
1411error_create_wakeup_source:
1412 kmem_cache_free(epi_cache, epi);
1413
1414 return error;
1415}
1416
1417
1418
1419
1420
1421static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
1422{
1423 int pwake = 0;
1424 unsigned int revents;
1425 poll_table pt;
1426
1427 init_poll_funcptr(&pt, NULL);
1428
1429
1430
1431
1432
1433
1434 epi->event.events = event->events;
1435 epi->event.data = event->data;
1436 if (epi->event.events & EPOLLWAKEUP) {
1437 if (!ep_has_wakeup_source(epi))
1438 ep_create_wakeup_source(epi);
1439 } else if (ep_has_wakeup_source(epi)) {
1440 ep_destroy_wakeup_source(epi);
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 smp_mb();
1462
1463
1464
1465
1466
1467 revents = ep_item_poll(epi, &pt);
1468
1469
1470
1471
1472
1473 if (revents & event->events) {
1474 spin_lock_irq(&ep->lock);
1475 if (!ep_is_linked(&epi->rdllink)) {
1476 list_add_tail(&epi->rdllink, &ep->rdllist);
1477 ep_pm_stay_awake(epi);
1478
1479
1480 if (waitqueue_active(&ep->wq))
1481 wake_up_locked(&ep->wq);
1482 if (waitqueue_active(&ep->poll_wait))
1483 pwake++;
1484 }
1485 spin_unlock_irq(&ep->lock);
1486 }
1487
1488
1489 if (pwake)
1490 ep_poll_safewake(&ep->poll_wait);
1491
1492 return 0;
1493}
1494
1495static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1496 void *priv)
1497{
1498 struct ep_send_events_data *esed = priv;
1499 int eventcnt;
1500 unsigned int revents;
1501 struct epitem *epi;
1502 struct epoll_event __user *uevent;
1503 struct wakeup_source *ws;
1504 poll_table pt;
1505
1506 init_poll_funcptr(&pt, NULL);
1507
1508
1509
1510
1511
1512
1513 for (eventcnt = 0, uevent = esed->events;
1514 !list_empty(head) && eventcnt < esed->maxevents;) {
1515 epi = list_first_entry(head, struct epitem, rdllink);
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 ws = ep_wakeup_source(epi);
1527 if (ws) {
1528 if (ws->active)
1529 __pm_stay_awake(ep->ws);
1530 __pm_relax(ws);
1531 }
1532
1533 list_del_init(&epi->rdllink);
1534
1535 revents = ep_item_poll(epi, &pt);
1536
1537
1538
1539
1540
1541
1542
1543 if (revents) {
1544 if (__put_user(revents, &uevent->events) ||
1545 __put_user(epi->event.data, &uevent->data)) {
1546 list_add(&epi->rdllink, head);
1547 ep_pm_stay_awake(epi);
1548 return eventcnt ? eventcnt : -EFAULT;
1549 }
1550 eventcnt++;
1551 uevent++;
1552 if (epi->event.events & EPOLLONESHOT)
1553 epi->event.events &= EP_PRIVATE_BITS;
1554 else if (!(epi->event.events & EPOLLET)) {
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566 list_add_tail(&epi->rdllink, &ep->rdllist);
1567 ep_pm_stay_awake(epi);
1568 }
1569 }
1570 }
1571
1572 return eventcnt;
1573}
1574
1575static int ep_send_events(struct eventpoll *ep,
1576 struct epoll_event __user *events, int maxevents)
1577{
1578 struct ep_send_events_data esed;
1579
1580 esed.maxevents = maxevents;
1581 esed.events = events;
1582
1583 return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
1584}
1585
1586static inline struct timespec64 ep_set_mstimeout(long ms)
1587{
1588 struct timespec64 now, ts = {
1589 .tv_sec = ms / MSEC_PER_SEC,
1590 .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1591 };
1592
1593 ktime_get_ts64(&now);
1594 return timespec64_add_safe(now, ts);
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1615 int maxevents, long timeout)
1616{
1617 int res = 0, eavail, timed_out = 0;
1618 unsigned long flags;
1619 u64 slack = 0;
1620 wait_queue_t wait;
1621 ktime_t expires, *to = NULL;
1622
1623 if (timeout > 0) {
1624 struct timespec64 end_time = ep_set_mstimeout(timeout);
1625
1626 slack = select_estimate_accuracy(&end_time);
1627 to = &expires;
1628 *to = timespec64_to_ktime(end_time);
1629 } else if (timeout == 0) {
1630
1631
1632
1633
1634 timed_out = 1;
1635 spin_lock_irqsave(&ep->lock, flags);
1636 goto check_events;
1637 }
1638
1639fetch_events:
1640 spin_lock_irqsave(&ep->lock, flags);
1641
1642 if (!ep_events_available(ep)) {
1643
1644
1645
1646
1647
1648 init_waitqueue_entry(&wait, current);
1649 __add_wait_queue_exclusive(&ep->wq, &wait);
1650
1651 for (;;) {
1652
1653
1654
1655
1656
1657 set_current_state(TASK_INTERRUPTIBLE);
1658 if (ep_events_available(ep) || timed_out)
1659 break;
1660 if (signal_pending(current)) {
1661 res = -EINTR;
1662 break;
1663 }
1664
1665 spin_unlock_irqrestore(&ep->lock, flags);
1666 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1667 timed_out = 1;
1668
1669 spin_lock_irqsave(&ep->lock, flags);
1670 }
1671
1672 __remove_wait_queue(&ep->wq, &wait);
1673 __set_current_state(TASK_RUNNING);
1674 }
1675check_events:
1676
1677 eavail = ep_events_available(ep);
1678
1679 spin_unlock_irqrestore(&ep->lock, flags);
1680
1681
1682
1683
1684
1685
1686 if (!res && eavail &&
1687 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
1688 goto fetch_events;
1689
1690 return res;
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1709{
1710 int error = 0;
1711 struct file *file = priv;
1712 struct eventpoll *ep = file->private_data;
1713 struct eventpoll *ep_tovisit;
1714 struct rb_node *rbp;
1715 struct epitem *epi;
1716
1717 mutex_lock_nested(&ep->mtx, call_nests + 1);
1718 ep->visited = 1;
1719 list_add(&ep->visited_list_link, &visited_list);
1720 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1721 epi = rb_entry(rbp, struct epitem, rbn);
1722 if (unlikely(is_file_epoll(epi->ffd.file))) {
1723 ep_tovisit = epi->ffd.file->private_data;
1724 if (ep_tovisit->visited)
1725 continue;
1726 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1727 ep_loop_check_proc, epi->ffd.file,
1728 ep_tovisit, current);
1729 if (error != 0)
1730 break;
1731 } else {
1732
1733
1734
1735
1736
1737
1738
1739
1740 if (list_empty(&epi->ffd.file->f_tfile_llink))
1741 list_add(&epi->ffd.file->f_tfile_llink,
1742 &tfile_check_list);
1743 }
1744 }
1745 mutex_unlock(&ep->mtx);
1746
1747 return error;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761static int ep_loop_check(struct eventpoll *ep, struct file *file)
1762{
1763 int ret;
1764 struct eventpoll *ep_cur, *ep_next;
1765
1766 ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1767 ep_loop_check_proc, file, ep, current);
1768
1769 list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1770 visited_list_link) {
1771 ep_cur->visited = 0;
1772 list_del(&ep_cur->visited_list_link);
1773 }
1774 return ret;
1775}
1776
1777static void clear_tfile_check_list(void)
1778{
1779 struct file *file;
1780
1781
1782 while (!list_empty(&tfile_check_list)) {
1783 file = list_first_entry(&tfile_check_list, struct file,
1784 f_tfile_llink);
1785 list_del_init(&file->f_tfile_llink);
1786 }
1787 INIT_LIST_HEAD(&tfile_check_list);
1788}
1789
1790
1791
1792
1793SYSCALL_DEFINE1(epoll_create1, int, flags)
1794{
1795 int error, fd;
1796 struct eventpoll *ep = NULL;
1797 struct file *file;
1798
1799
1800 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1801
1802 if (flags & ~EPOLL_CLOEXEC)
1803 return -EINVAL;
1804
1805
1806
1807 error = ep_alloc(&ep);
1808 if (error < 0)
1809 return error;
1810
1811
1812
1813
1814 fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1815 if (fd < 0) {
1816 error = fd;
1817 goto out_free_ep;
1818 }
1819 file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
1820 O_RDWR | (flags & O_CLOEXEC));
1821 if (IS_ERR(file)) {
1822 error = PTR_ERR(file);
1823 goto out_free_fd;
1824 }
1825 ep->file = file;
1826 fd_install(fd, file);
1827 return fd;
1828
1829out_free_fd:
1830 put_unused_fd(fd);
1831out_free_ep:
1832 ep_free(ep);
1833 return error;
1834}
1835
1836SYSCALL_DEFINE1(epoll_create, int, size)
1837{
1838 if (size <= 0)
1839 return -EINVAL;
1840
1841 return sys_epoll_create1(0);
1842}
1843
1844
1845
1846
1847
1848
1849SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1850 struct epoll_event __user *, event)
1851{
1852 int error;
1853 int full_check = 0;
1854 struct fd f, tf;
1855 struct eventpoll *ep;
1856 struct epitem *epi;
1857 struct epoll_event epds;
1858 struct eventpoll *tep = NULL;
1859
1860 error = -EFAULT;
1861 if (ep_op_has_event(op) &&
1862 copy_from_user(&epds, event, sizeof(struct epoll_event)))
1863 goto error_return;
1864
1865 error = -EBADF;
1866 f = fdget(epfd);
1867 if (!f.file)
1868 goto error_return;
1869
1870
1871 tf = fdget(fd);
1872 if (!tf.file)
1873 goto error_fput;
1874
1875
1876 error = -EPERM;
1877 if (!tf.file->f_op->poll)
1878 goto error_tgt_fput;
1879
1880
1881 if (ep_op_has_event(op))
1882 ep_take_care_of_epollwakeup(&epds);
1883
1884
1885
1886
1887
1888
1889 error = -EINVAL;
1890 if (f.file == tf.file || !is_file_epoll(f.file))
1891 goto error_tgt_fput;
1892
1893
1894
1895
1896
1897
1898 if (epds.events & EPOLLEXCLUSIVE) {
1899 if (op == EPOLL_CTL_MOD)
1900 goto error_tgt_fput;
1901 if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
1902 (epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
1903 goto error_tgt_fput;
1904 }
1905
1906
1907
1908
1909
1910 ep = f.file->private_data;
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927 mutex_lock_nested(&ep->mtx, 0);
1928 if (op == EPOLL_CTL_ADD) {
1929 if (!list_empty(&f.file->f_ep_links) ||
1930 is_file_epoll(tf.file)) {
1931 full_check = 1;
1932 mutex_unlock(&ep->mtx);
1933 mutex_lock(&epmutex);
1934 if (is_file_epoll(tf.file)) {
1935 error = -ELOOP;
1936 if (ep_loop_check(ep, tf.file) != 0) {
1937 clear_tfile_check_list();
1938 goto error_tgt_fput;
1939 }
1940 } else
1941 list_add(&tf.file->f_tfile_llink,
1942 &tfile_check_list);
1943 mutex_lock_nested(&ep->mtx, 0);
1944 if (is_file_epoll(tf.file)) {
1945 tep = tf.file->private_data;
1946 mutex_lock_nested(&tep->mtx, 1);
1947 }
1948 }
1949 }
1950
1951
1952
1953
1954
1955
1956 epi = ep_find(ep, tf.file, fd);
1957
1958 error = -EINVAL;
1959 switch (op) {
1960 case EPOLL_CTL_ADD:
1961 if (!epi) {
1962 epds.events |= POLLERR | POLLHUP;
1963 error = ep_insert(ep, &epds, tf.file, fd, full_check);
1964 } else
1965 error = -EEXIST;
1966 if (full_check)
1967 clear_tfile_check_list();
1968 break;
1969 case EPOLL_CTL_DEL:
1970 if (epi)
1971 error = ep_remove(ep, epi);
1972 else
1973 error = -ENOENT;
1974 break;
1975 case EPOLL_CTL_MOD:
1976 if (epi) {
1977 if (!(epi->event.events & EPOLLEXCLUSIVE)) {
1978 epds.events |= POLLERR | POLLHUP;
1979 error = ep_modify(ep, epi, &epds);
1980 }
1981 } else
1982 error = -ENOENT;
1983 break;
1984 }
1985 if (tep != NULL)
1986 mutex_unlock(&tep->mtx);
1987 mutex_unlock(&ep->mtx);
1988
1989error_tgt_fput:
1990 if (full_check)
1991 mutex_unlock(&epmutex);
1992
1993 fdput(tf);
1994error_fput:
1995 fdput(f);
1996error_return:
1997
1998 return error;
1999}
2000
2001
2002
2003
2004
2005SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2006 int, maxevents, int, timeout)
2007{
2008 int error;
2009 struct fd f;
2010 struct eventpoll *ep;
2011
2012
2013 if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2014 return -EINVAL;
2015
2016
2017 if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
2018 return -EFAULT;
2019
2020
2021 f = fdget(epfd);
2022 if (!f.file)
2023 return -EBADF;
2024
2025
2026
2027
2028
2029 error = -EINVAL;
2030 if (!is_file_epoll(f.file))
2031 goto error_fput;
2032
2033
2034
2035
2036
2037 ep = f.file->private_data;
2038
2039
2040 error = ep_poll(ep, events, maxevents, timeout);
2041
2042error_fput:
2043 fdput(f);
2044 return error;
2045}
2046
2047
2048
2049
2050
2051SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2052 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2053 size_t, sigsetsize)
2054{
2055 int error;
2056 sigset_t ksigmask, sigsaved;
2057
2058
2059
2060
2061
2062 if (sigmask) {
2063 if (sigsetsize != sizeof(sigset_t))
2064 return -EINVAL;
2065 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
2066 return -EFAULT;
2067 sigsaved = current->blocked;
2068 set_current_blocked(&ksigmask);
2069 }
2070
2071 error = sys_epoll_wait(epfd, events, maxevents, timeout);
2072
2073
2074
2075
2076
2077
2078
2079 if (sigmask) {
2080 if (error == -EINTR) {
2081 memcpy(¤t->saved_sigmask, &sigsaved,
2082 sizeof(sigsaved));
2083 set_restore_sigmask();
2084 } else
2085 set_current_blocked(&sigsaved);
2086 }
2087
2088 return error;
2089}
2090
2091#ifdef CONFIG_COMPAT
2092COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2093 struct epoll_event __user *, events,
2094 int, maxevents, int, timeout,
2095 const compat_sigset_t __user *, sigmask,
2096 compat_size_t, sigsetsize)
2097{
2098 long err;
2099 compat_sigset_t csigmask;
2100 sigset_t ksigmask, sigsaved;
2101
2102
2103
2104
2105
2106 if (sigmask) {
2107 if (sigsetsize != sizeof(compat_sigset_t))
2108 return -EINVAL;
2109 if (copy_from_user(&csigmask, sigmask, sizeof(csigmask)))
2110 return -EFAULT;
2111 sigset_from_compat(&ksigmask, &csigmask);
2112 sigsaved = current->blocked;
2113 set_current_blocked(&ksigmask);
2114 }
2115
2116 err = sys_epoll_wait(epfd, events, maxevents, timeout);
2117
2118
2119
2120
2121
2122
2123
2124 if (sigmask) {
2125 if (err == -EINTR) {
2126 memcpy(¤t->saved_sigmask, &sigsaved,
2127 sizeof(sigsaved));
2128 set_restore_sigmask();
2129 } else
2130 set_current_blocked(&sigsaved);
2131 }
2132
2133 return err;
2134}
2135#endif
2136
2137static int __init eventpoll_init(void)
2138{
2139 struct sysinfo si;
2140
2141 si_meminfo(&si);
2142
2143
2144
2145 max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2146 EP_ITEM_COST;
2147 BUG_ON(max_user_watches < 0);
2148
2149
2150
2151
2152
2153 ep_nested_calls_init(&poll_loop_ncalls);
2154
2155
2156 ep_nested_calls_init(&poll_safewake_ncalls);
2157
2158
2159 ep_nested_calls_init(&poll_readywalk_ncalls);
2160
2161
2162
2163
2164
2165 BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2166
2167
2168 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2169 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2170
2171
2172 pwq_cache = kmem_cache_create("eventpoll_pwq",
2173 sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
2174
2175 return 0;
2176}
2177fs_initcall(eventpoll_init);
2178