1
2
3
4
5
6
7
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/sched/signal.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/signal.h>
15#include <linux/errno.h>
16#include <linux/mm.h>
17#include <linux/slab.h>
18#include <linux/poll.h>
19#include <linux/string.h>
20#include <linux/list.h>
21#include <linux/hash.h>
22#include <linux/spinlock.h>
23#include <linux/syscalls.h>
24#include <linux/rbtree.h>
25#include <linux/wait.h>
26#include <linux/eventpoll.h>
27#include <linux/mount.h>
28#include <linux/bitops.h>
29#include <linux/mutex.h>
30#include <linux/anon_inodes.h>
31#include <linux/device.h>
32#include <linux/uaccess.h>
33#include <asm/io.h>
34#include <asm/mman.h>
35#include <linux/atomic.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/compat.h>
39#include <linux/rculist.h>
40#include <net/busy_poll.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
92
93#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
94
95#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
96 EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
97
98
99#define EP_MAX_NESTS 4
100
101#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
102
103#define EP_UNACTIVE_PTR ((void *) -1L)
104
105#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
106
107struct epoll_filefd {
108 struct file *file;
109 int fd;
110} __packed;
111
112
113struct eppoll_entry {
114
115 struct eppoll_entry *next;
116
117
118 struct epitem *base;
119
120
121
122
123
124 wait_queue_entry_t wait;
125
126
127 wait_queue_head_t *whead;
128};
129
130
131
132
133
134
135
136struct epitem {
137 union {
138
139 struct rb_node rbn;
140
141 struct rcu_head rcu;
142 };
143
144
145 struct list_head rdllink;
146
147
148
149
150
151 struct epitem *next;
152
153
154 struct epoll_filefd ffd;
155
156
157 struct eppoll_entry *pwqlist;
158
159
160 struct eventpoll *ep;
161
162
163 struct hlist_node fllink;
164
165
166 struct wakeup_source __rcu *ws;
167
168
169 struct epoll_event event;
170};
171
172
173
174
175
176
177struct eventpoll {
178
179
180
181
182
183
184 struct mutex mtx;
185
186
187 wait_queue_head_t wq;
188
189
190 wait_queue_head_t poll_wait;
191
192
193 struct list_head rdllist;
194
195
196 rwlock_t lock;
197
198
199 struct rb_root_cached rbr;
200
201
202
203
204
205
206 struct epitem *ovflist;
207
208
209 struct wakeup_source *ws;
210
211
212 struct user_struct *user;
213
214 struct file *file;
215
216
217 u64 gen;
218 struct hlist_head refs;
219
220#ifdef CONFIG_NET_RX_BUSY_POLL
221
222 unsigned int napi_id;
223#endif
224
225#ifdef CONFIG_DEBUG_LOCK_ALLOC
226
227 u8 nests;
228#endif
229};
230
231
232struct ep_pqueue {
233 poll_table pt;
234 struct epitem *epi;
235};
236
237
238
239
240
241static long max_user_watches __read_mostly;
242
243
244
245
246static DEFINE_MUTEX(epmutex);
247
248static u64 loop_check_gen = 0;
249
250
251static struct eventpoll *inserting_into;
252
253
254static struct kmem_cache *epi_cache __read_mostly;
255
256
257static struct kmem_cache *pwq_cache __read_mostly;
258
259
260
261
262
263struct epitems_head {
264 struct hlist_head epitems;
265 struct epitems_head *next;
266};
267static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;
268
269static struct kmem_cache *ephead_cache __read_mostly;
270
271static inline void free_ephead(struct epitems_head *head)
272{
273 if (head)
274 kmem_cache_free(ephead_cache, head);
275}
276
277static void list_file(struct file *file)
278{
279 struct epitems_head *head;
280
281 head = container_of(file->f_ep, struct epitems_head, epitems);
282 if (!head->next) {
283 head->next = tfile_check_list;
284 tfile_check_list = head;
285 }
286}
287
288static void unlist_file(struct epitems_head *head)
289{
290 struct epitems_head *to_free = head;
291 struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));
292 if (p) {
293 struct epitem *epi= container_of(p, struct epitem, fllink);
294 spin_lock(&epi->ffd.file->f_lock);
295 if (!hlist_empty(&head->epitems))
296 to_free = NULL;
297 head->next = NULL;
298 spin_unlock(&epi->ffd.file->f_lock);
299 }
300 free_ephead(to_free);
301}
302
303#ifdef CONFIG_SYSCTL
304
305#include <linux/sysctl.h>
306
307static long long_zero;
308static long long_max = LONG_MAX;
309
310struct ctl_table epoll_table[] = {
311 {
312 .procname = "max_user_watches",
313 .data = &max_user_watches,
314 .maxlen = sizeof(max_user_watches),
315 .mode = 0644,
316 .proc_handler = proc_doulongvec_minmax,
317 .extra1 = &long_zero,
318 .extra2 = &long_max,
319 },
320 { }
321};
322#endif
323
324static const struct file_operations eventpoll_fops;
325
326static inline int is_file_epoll(struct file *f)
327{
328 return f->f_op == &eventpoll_fops;
329}
330
331
332static inline void ep_set_ffd(struct epoll_filefd *ffd,
333 struct file *file, int fd)
334{
335 ffd->file = file;
336 ffd->fd = fd;
337}
338
339
340static inline int ep_cmp_ffd(struct epoll_filefd *p1,
341 struct epoll_filefd *p2)
342{
343 return (p1->file > p2->file ? +1:
344 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
345}
346
347
348static inline int ep_is_linked(struct epitem *epi)
349{
350 return !list_empty(&epi->rdllink);
351}
352
353static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
354{
355 return container_of(p, struct eppoll_entry, wait);
356}
357
358
359static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
360{
361 return container_of(p, struct eppoll_entry, wait)->base;
362}
363
364
365
366
367
368
369
370
371
372static inline int ep_events_available(struct eventpoll *ep)
373{
374 return !list_empty_careful(&ep->rdllist) ||
375 READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
376}
377
378#ifdef CONFIG_NET_RX_BUSY_POLL
379static bool ep_busy_loop_end(void *p, unsigned long start_time)
380{
381 struct eventpoll *ep = p;
382
383 return ep_events_available(ep) || busy_loop_timeout(start_time);
384}
385
386
387
388
389
390
391
392static bool ep_busy_loop(struct eventpoll *ep, int nonblock)
393{
394 unsigned int napi_id = READ_ONCE(ep->napi_id);
395
396 if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on()) {
397 napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, false,
398 BUSY_POLL_BUDGET);
399 if (ep_events_available(ep))
400 return true;
401
402
403
404
405
406 ep->napi_id = 0;
407 return false;
408 }
409 return false;
410}
411
412
413
414
415static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
416{
417 struct eventpoll *ep;
418 unsigned int napi_id;
419 struct socket *sock;
420 struct sock *sk;
421
422 if (!net_busy_loop_on())
423 return;
424
425 sock = sock_from_file(epi->ffd.file);
426 if (!sock)
427 return;
428
429 sk = sock->sk;
430 if (!sk)
431 return;
432
433 napi_id = READ_ONCE(sk->sk_napi_id);
434 ep = epi->ep;
435
436
437
438
439
440 if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
441 return;
442
443
444 ep->napi_id = napi_id;
445}
446
447#else
448
449static inline bool ep_busy_loop(struct eventpoll *ep, int nonblock)
450{
451 return false;
452}
453
454static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
455{
456}
457
458#endif
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485#ifdef CONFIG_DEBUG_LOCK_ALLOC
486
487static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
488{
489 struct eventpoll *ep_src;
490 unsigned long flags;
491 u8 nests = 0;
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508 if (epi) {
509 if ((is_file_epoll(epi->ffd.file))) {
510 ep_src = epi->ffd.file->private_data;
511 nests = ep_src->nests;
512 } else {
513 nests = 1;
514 }
515 }
516 spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
517 ep->nests = nests + 1;
518 wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
519 ep->nests = 0;
520 spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
521}
522
523#else
524
525static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
526{
527 wake_up_poll(&ep->poll_wait, EPOLLIN);
528}
529
530#endif
531
532static void ep_remove_wait_queue(struct eppoll_entry *pwq)
533{
534 wait_queue_head_t *whead;
535
536 rcu_read_lock();
537
538
539
540
541
542
543 whead = smp_load_acquire(&pwq->whead);
544 if (whead)
545 remove_wait_queue(whead, &pwq->wait);
546 rcu_read_unlock();
547}
548
549
550
551
552
553
554static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
555{
556 struct eppoll_entry **p = &epi->pwqlist;
557 struct eppoll_entry *pwq;
558
559 while ((pwq = *p) != NULL) {
560 *p = pwq->next;
561 ep_remove_wait_queue(pwq);
562 kmem_cache_free(pwq_cache, pwq);
563 }
564}
565
566
567static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
568{
569 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
570}
571
572
573static inline void ep_pm_stay_awake(struct epitem *epi)
574{
575 struct wakeup_source *ws = ep_wakeup_source(epi);
576
577 if (ws)
578 __pm_stay_awake(ws);
579}
580
581static inline bool ep_has_wakeup_source(struct epitem *epi)
582{
583 return rcu_access_pointer(epi->ws) ? true : false;
584}
585
586
587static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
588{
589 struct wakeup_source *ws;
590
591 rcu_read_lock();
592 ws = rcu_dereference(epi->ws);
593 if (ws)
594 __pm_stay_awake(ws);
595 rcu_read_unlock();
596}
597
598
599
600
601
602
603static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)
604{
605
606
607
608
609
610
611
612
613 lockdep_assert_irqs_enabled();
614 write_lock_irq(&ep->lock);
615 list_splice_init(&ep->rdllist, txlist);
616 WRITE_ONCE(ep->ovflist, NULL);
617 write_unlock_irq(&ep->lock);
618}
619
620static void ep_done_scan(struct eventpoll *ep,
621 struct list_head *txlist)
622{
623 struct epitem *epi, *nepi;
624
625 write_lock_irq(&ep->lock);
626
627
628
629
630
631 for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
632 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
633
634
635
636
637
638
639 if (!ep_is_linked(epi)) {
640
641
642
643
644 list_add(&epi->rdllink, &ep->rdllist);
645 ep_pm_stay_awake(epi);
646 }
647 }
648
649
650
651
652
653 WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
654
655
656
657
658 list_splice(txlist, &ep->rdllist);
659 __pm_relax(ep->ws);
660 write_unlock_irq(&ep->lock);
661}
662
663static void epi_rcu_free(struct rcu_head *head)
664{
665 struct epitem *epi = container_of(head, struct epitem, rcu);
666 kmem_cache_free(epi_cache, epi);
667}
668
669
670
671
672
673static int ep_remove(struct eventpoll *ep, struct epitem *epi)
674{
675 struct file *file = epi->ffd.file;
676 struct epitems_head *to_free;
677 struct hlist_head *head;
678
679 lockdep_assert_irqs_enabled();
680
681
682
683
684 ep_unregister_pollwait(ep, epi);
685
686
687 spin_lock(&file->f_lock);
688 to_free = NULL;
689 head = file->f_ep;
690 if (head->first == &epi->fllink && !epi->fllink.next) {
691 file->f_ep = NULL;
692 if (!is_file_epoll(file)) {
693 struct epitems_head *v;
694 v = container_of(head, struct epitems_head, epitems);
695 if (!smp_load_acquire(&v->next))
696 to_free = v;
697 }
698 }
699 hlist_del_rcu(&epi->fllink);
700 spin_unlock(&file->f_lock);
701 free_ephead(to_free);
702
703 rb_erase_cached(&epi->rbn, &ep->rbr);
704
705 write_lock_irq(&ep->lock);
706 if (ep_is_linked(epi))
707 list_del_init(&epi->rdllink);
708 write_unlock_irq(&ep->lock);
709
710 wakeup_source_unregister(ep_wakeup_source(epi));
711
712
713
714
715
716
717
718 call_rcu(&epi->rcu, epi_rcu_free);
719
720 atomic_long_dec(&ep->user->epoll_watches);
721
722 return 0;
723}
724
725static void ep_free(struct eventpoll *ep)
726{
727 struct rb_node *rbp;
728 struct epitem *epi;
729
730
731 if (waitqueue_active(&ep->poll_wait))
732 ep_poll_safewake(ep, NULL);
733
734
735
736
737
738
739
740
741
742 mutex_lock(&epmutex);
743
744
745
746
747 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
748 epi = rb_entry(rbp, struct epitem, rbn);
749
750 ep_unregister_pollwait(ep, epi);
751 cond_resched();
752 }
753
754
755
756
757
758
759
760
761
762 mutex_lock(&ep->mtx);
763 while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
764 epi = rb_entry(rbp, struct epitem, rbn);
765 ep_remove(ep, epi);
766 cond_resched();
767 }
768 mutex_unlock(&ep->mtx);
769
770 mutex_unlock(&epmutex);
771 mutex_destroy(&ep->mtx);
772 free_uid(ep->user);
773 wakeup_source_unregister(ep->ws);
774 kfree(ep);
775}
776
777static int ep_eventpoll_release(struct inode *inode, struct file *file)
778{
779 struct eventpoll *ep = file->private_data;
780
781 if (ep)
782 ep_free(ep);
783
784 return 0;
785}
786
787static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);
788
789static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)
790{
791 struct eventpoll *ep = file->private_data;
792 LIST_HEAD(txlist);
793 struct epitem *epi, *tmp;
794 poll_table pt;
795 __poll_t res = 0;
796
797 init_poll_funcptr(&pt, NULL);
798
799
800 poll_wait(file, &ep->poll_wait, wait);
801
802
803
804
805
806 mutex_lock_nested(&ep->mtx, depth);
807 ep_start_scan(ep, &txlist);
808 list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
809 if (ep_item_poll(epi, &pt, depth + 1)) {
810 res = EPOLLIN | EPOLLRDNORM;
811 break;
812 } else {
813
814
815
816
817
818 __pm_relax(ep_wakeup_source(epi));
819 list_del_init(&epi->rdllink);
820 }
821 }
822 ep_done_scan(ep, &txlist);
823 mutex_unlock(&ep->mtx);
824 return res;
825}
826
827
828
829
830
831
832static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
833 int depth)
834{
835 struct file *file = epi->ffd.file;
836 __poll_t res;
837
838 pt->_key = epi->event.events;
839 if (!is_file_epoll(file))
840 res = vfs_poll(file, pt);
841 else
842 res = __ep_eventpoll_poll(file, pt, depth);
843 return res & epi->event.events;
844}
845
846static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
847{
848 return __ep_eventpoll_poll(file, wait, 0);
849}
850
851#ifdef CONFIG_PROC_FS
852static void ep_show_fdinfo(struct seq_file *m, struct file *f)
853{
854 struct eventpoll *ep = f->private_data;
855 struct rb_node *rbp;
856
857 mutex_lock(&ep->mtx);
858 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
859 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
860 struct inode *inode = file_inode(epi->ffd.file);
861
862 seq_printf(m, "tfd: %8d events: %8x data: %16llx "
863 " pos:%lli ino:%lx sdev:%x\n",
864 epi->ffd.fd, epi->event.events,
865 (long long)epi->event.data,
866 (long long)epi->ffd.file->f_pos,
867 inode->i_ino, inode->i_sb->s_dev);
868 if (seq_has_overflowed(m))
869 break;
870 }
871 mutex_unlock(&ep->mtx);
872}
873#endif
874
875
876static const struct file_operations eventpoll_fops = {
877#ifdef CONFIG_PROC_FS
878 .show_fdinfo = ep_show_fdinfo,
879#endif
880 .release = ep_eventpoll_release,
881 .poll = ep_eventpoll_poll,
882 .llseek = noop_llseek,
883};
884
885
886
887
888
889
890void eventpoll_release_file(struct file *file)
891{
892 struct eventpoll *ep;
893 struct epitem *epi;
894 struct hlist_node *next;
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 mutex_lock(&epmutex);
910 if (unlikely(!file->f_ep)) {
911 mutex_unlock(&epmutex);
912 return;
913 }
914 hlist_for_each_entry_safe(epi, next, file->f_ep, fllink) {
915 ep = epi->ep;
916 mutex_lock_nested(&ep->mtx, 0);
917 ep_remove(ep, epi);
918 mutex_unlock(&ep->mtx);
919 }
920 mutex_unlock(&epmutex);
921}
922
923static int ep_alloc(struct eventpoll **pep)
924{
925 int error;
926 struct user_struct *user;
927 struct eventpoll *ep;
928
929 user = get_current_user();
930 error = -ENOMEM;
931 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
932 if (unlikely(!ep))
933 goto free_uid;
934
935 mutex_init(&ep->mtx);
936 rwlock_init(&ep->lock);
937 init_waitqueue_head(&ep->wq);
938 init_waitqueue_head(&ep->poll_wait);
939 INIT_LIST_HEAD(&ep->rdllist);
940 ep->rbr = RB_ROOT_CACHED;
941 ep->ovflist = EP_UNACTIVE_PTR;
942 ep->user = user;
943
944 *pep = ep;
945
946 return 0;
947
948free_uid:
949 free_uid(user);
950 return error;
951}
952
953
954
955
956
957
958static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
959{
960 int kcmp;
961 struct rb_node *rbp;
962 struct epitem *epi, *epir = NULL;
963 struct epoll_filefd ffd;
964
965 ep_set_ffd(&ffd, file, fd);
966 for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
967 epi = rb_entry(rbp, struct epitem, rbn);
968 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
969 if (kcmp > 0)
970 rbp = rbp->rb_right;
971 else if (kcmp < 0)
972 rbp = rbp->rb_left;
973 else {
974 epir = epi;
975 break;
976 }
977 }
978
979 return epir;
980}
981
982#ifdef CONFIG_KCMP
983static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
984{
985 struct rb_node *rbp;
986 struct epitem *epi;
987
988 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
989 epi = rb_entry(rbp, struct epitem, rbn);
990 if (epi->ffd.fd == tfd) {
991 if (toff == 0)
992 return epi;
993 else
994 toff--;
995 }
996 cond_resched();
997 }
998
999 return NULL;
1000}
1001
1002struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
1003 unsigned long toff)
1004{
1005 struct file *file_raw;
1006 struct eventpoll *ep;
1007 struct epitem *epi;
1008
1009 if (!is_file_epoll(file))
1010 return ERR_PTR(-EINVAL);
1011
1012 ep = file->private_data;
1013
1014 mutex_lock(&ep->mtx);
1015 epi = ep_find_tfd(ep, tfd, toff);
1016 if (epi)
1017 file_raw = epi->ffd.file;
1018 else
1019 file_raw = ERR_PTR(-ENOENT);
1020 mutex_unlock(&ep->mtx);
1021
1022 return file_raw;
1023}
1024#endif
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044static inline bool list_add_tail_lockless(struct list_head *new,
1045 struct list_head *head)
1046{
1047 struct list_head *prev;
1048
1049
1050
1051
1052
1053
1054
1055 if (cmpxchg(&new->next, new, head) != new)
1056 return false;
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 prev = xchg(&head->prev, new);
1067
1068
1069
1070
1071
1072
1073 prev->next = new;
1074 new->prev = prev;
1075
1076 return true;
1077}
1078
1079
1080
1081
1082
1083
1084
1085static inline bool chain_epi_lockless(struct epitem *epi)
1086{
1087 struct eventpoll *ep = epi->ep;
1088
1089
1090 if (epi->next != EP_UNACTIVE_PTR)
1091 return false;
1092
1093
1094 if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
1095 return false;
1096
1097
1098 epi->next = xchg(&ep->ovflist, epi);
1099
1100 return true;
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1122{
1123 int pwake = 0;
1124 struct epitem *epi = ep_item_from_wait(wait);
1125 struct eventpoll *ep = epi->ep;
1126 __poll_t pollflags = key_to_poll(key);
1127 unsigned long flags;
1128 int ewake = 0;
1129
1130 read_lock_irqsave(&ep->lock, flags);
1131
1132 ep_set_busy_poll_napi_id(epi);
1133
1134
1135
1136
1137
1138
1139
1140 if (!(epi->event.events & ~EP_PRIVATE_BITS))
1141 goto out_unlock;
1142
1143
1144
1145
1146
1147
1148
1149 if (pollflags && !(pollflags & epi->event.events))
1150 goto out_unlock;
1151
1152
1153
1154
1155
1156
1157
1158 if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
1159 if (chain_epi_lockless(epi))
1160 ep_pm_stay_awake_rcu(epi);
1161 } else if (!ep_is_linked(epi)) {
1162
1163 if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
1164 ep_pm_stay_awake_rcu(epi);
1165 }
1166
1167
1168
1169
1170
1171 if (waitqueue_active(&ep->wq)) {
1172 if ((epi->event.events & EPOLLEXCLUSIVE) &&
1173 !(pollflags & POLLFREE)) {
1174 switch (pollflags & EPOLLINOUT_BITS) {
1175 case EPOLLIN:
1176 if (epi->event.events & EPOLLIN)
1177 ewake = 1;
1178 break;
1179 case EPOLLOUT:
1180 if (epi->event.events & EPOLLOUT)
1181 ewake = 1;
1182 break;
1183 case 0:
1184 ewake = 1;
1185 break;
1186 }
1187 }
1188 wake_up(&ep->wq);
1189 }
1190 if (waitqueue_active(&ep->poll_wait))
1191 pwake++;
1192
1193out_unlock:
1194 read_unlock_irqrestore(&ep->lock, flags);
1195
1196
1197 if (pwake)
1198 ep_poll_safewake(ep, epi);
1199
1200 if (!(epi->event.events & EPOLLEXCLUSIVE))
1201 ewake = 1;
1202
1203 if (pollflags & POLLFREE) {
1204
1205
1206
1207
1208
1209 list_del_init(&wait->entry);
1210
1211
1212
1213
1214
1215
1216 smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
1217 }
1218
1219 return ewake;
1220}
1221
1222
1223
1224
1225
1226static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1227 poll_table *pt)
1228{
1229 struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);
1230 struct epitem *epi = epq->epi;
1231 struct eppoll_entry *pwq;
1232
1233 if (unlikely(!epi))
1234 return;
1235
1236 pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);
1237 if (unlikely(!pwq)) {
1238 epq->epi = NULL;
1239 return;
1240 }
1241
1242 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1243 pwq->whead = whead;
1244 pwq->base = epi;
1245 if (epi->event.events & EPOLLEXCLUSIVE)
1246 add_wait_queue_exclusive(whead, &pwq->wait);
1247 else
1248 add_wait_queue(whead, &pwq->wait);
1249 pwq->next = epi->pwqlist;
1250 epi->pwqlist = pwq;
1251}
1252
1253static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1254{
1255 int kcmp;
1256 struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
1257 struct epitem *epic;
1258 bool leftmost = true;
1259
1260 while (*p) {
1261 parent = *p;
1262 epic = rb_entry(parent, struct epitem, rbn);
1263 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
1264 if (kcmp > 0) {
1265 p = &parent->rb_right;
1266 leftmost = false;
1267 } else
1268 p = &parent->rb_left;
1269 }
1270 rb_link_node(&epi->rbn, parent, p);
1271 rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
1272}
1273
1274
1275
1276#define PATH_ARR_SIZE 5
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1289static int path_count[PATH_ARR_SIZE];
1290
1291static int path_count_inc(int nests)
1292{
1293
1294 if (nests == 0)
1295 return 0;
1296
1297 if (++path_count[nests] > path_limits[nests])
1298 return -1;
1299 return 0;
1300}
1301
1302static void path_count_init(void)
1303{
1304 int i;
1305
1306 for (i = 0; i < PATH_ARR_SIZE; i++)
1307 path_count[i] = 0;
1308}
1309
1310static int reverse_path_check_proc(struct hlist_head *refs, int depth)
1311{
1312 int error = 0;
1313 struct epitem *epi;
1314
1315 if (depth > EP_MAX_NESTS)
1316 return -1;
1317
1318
1319 hlist_for_each_entry_rcu(epi, refs, fllink) {
1320 struct hlist_head *refs = &epi->ep->refs;
1321 if (hlist_empty(refs))
1322 error = path_count_inc(depth);
1323 else
1324 error = reverse_path_check_proc(refs, depth + 1);
1325 if (error != 0)
1326 break;
1327 }
1328 return error;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static int reverse_path_check(void)
1342{
1343 struct epitems_head *p;
1344
1345 for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {
1346 int error;
1347 path_count_init();
1348 rcu_read_lock();
1349 error = reverse_path_check_proc(&p->epitems, 0);
1350 rcu_read_unlock();
1351 if (error)
1352 return error;
1353 }
1354 return 0;
1355}
1356
1357static int ep_create_wakeup_source(struct epitem *epi)
1358{
1359 struct name_snapshot n;
1360 struct wakeup_source *ws;
1361
1362 if (!epi->ep->ws) {
1363 epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
1364 if (!epi->ep->ws)
1365 return -ENOMEM;
1366 }
1367
1368 take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
1369 ws = wakeup_source_register(NULL, n.name.name);
1370 release_dentry_name_snapshot(&n);
1371
1372 if (!ws)
1373 return -ENOMEM;
1374 rcu_assign_pointer(epi->ws, ws);
1375
1376 return 0;
1377}
1378
1379
1380static noinline void ep_destroy_wakeup_source(struct epitem *epi)
1381{
1382 struct wakeup_source *ws = ep_wakeup_source(epi);
1383
1384 RCU_INIT_POINTER(epi->ws, NULL);
1385
1386
1387
1388
1389
1390
1391 synchronize_rcu();
1392 wakeup_source_unregister(ws);
1393}
1394
1395static int attach_epitem(struct file *file, struct epitem *epi)
1396{
1397 struct epitems_head *to_free = NULL;
1398 struct hlist_head *head = NULL;
1399 struct eventpoll *ep = NULL;
1400
1401 if (is_file_epoll(file))
1402 ep = file->private_data;
1403
1404 if (ep) {
1405 head = &ep->refs;
1406 } else if (!READ_ONCE(file->f_ep)) {
1407allocate:
1408 to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);
1409 if (!to_free)
1410 return -ENOMEM;
1411 head = &to_free->epitems;
1412 }
1413 spin_lock(&file->f_lock);
1414 if (!file->f_ep) {
1415 if (unlikely(!head)) {
1416 spin_unlock(&file->f_lock);
1417 goto allocate;
1418 }
1419 file->f_ep = head;
1420 to_free = NULL;
1421 }
1422 hlist_add_head_rcu(&epi->fllink, file->f_ep);
1423 spin_unlock(&file->f_lock);
1424 free_ephead(to_free);
1425 return 0;
1426}
1427
1428
1429
1430
1431static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
1432 struct file *tfile, int fd, int full_check)
1433{
1434 int error, pwake = 0;
1435 __poll_t revents;
1436 long user_watches;
1437 struct epitem *epi;
1438 struct ep_pqueue epq;
1439 struct eventpoll *tep = NULL;
1440
1441 if (is_file_epoll(tfile))
1442 tep = tfile->private_data;
1443
1444 lockdep_assert_irqs_enabled();
1445
1446 user_watches = atomic_long_read(&ep->user->epoll_watches);
1447 if (unlikely(user_watches >= max_user_watches))
1448 return -ENOSPC;
1449 if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL)))
1450 return -ENOMEM;
1451
1452
1453 INIT_LIST_HEAD(&epi->rdllink);
1454 epi->ep = ep;
1455 ep_set_ffd(&epi->ffd, tfile, fd);
1456 epi->event = *event;
1457 epi->next = EP_UNACTIVE_PTR;
1458
1459 if (tep)
1460 mutex_lock_nested(&tep->mtx, 1);
1461
1462 if (unlikely(attach_epitem(tfile, epi) < 0)) {
1463 kmem_cache_free(epi_cache, epi);
1464 if (tep)
1465 mutex_unlock(&tep->mtx);
1466 return -ENOMEM;
1467 }
1468
1469 if (full_check && !tep)
1470 list_file(tfile);
1471
1472 atomic_long_inc(&ep->user->epoll_watches);
1473
1474
1475
1476
1477
1478 ep_rbtree_insert(ep, epi);
1479 if (tep)
1480 mutex_unlock(&tep->mtx);
1481
1482
1483 if (unlikely(full_check && reverse_path_check())) {
1484 ep_remove(ep, epi);
1485 return -EINVAL;
1486 }
1487
1488 if (epi->event.events & EPOLLWAKEUP) {
1489 error = ep_create_wakeup_source(epi);
1490 if (error) {
1491 ep_remove(ep, epi);
1492 return error;
1493 }
1494 }
1495
1496
1497 epq.epi = epi;
1498 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
1499
1500
1501
1502
1503
1504
1505
1506
1507 revents = ep_item_poll(epi, &epq.pt, 1);
1508
1509
1510
1511
1512
1513
1514 if (unlikely(!epq.epi)) {
1515 ep_remove(ep, epi);
1516 return -ENOMEM;
1517 }
1518
1519
1520 write_lock_irq(&ep->lock);
1521
1522
1523 ep_set_busy_poll_napi_id(epi);
1524
1525
1526 if (revents && !ep_is_linked(epi)) {
1527 list_add_tail(&epi->rdllink, &ep->rdllist);
1528 ep_pm_stay_awake(epi);
1529
1530
1531 if (waitqueue_active(&ep->wq))
1532 wake_up(&ep->wq);
1533 if (waitqueue_active(&ep->poll_wait))
1534 pwake++;
1535 }
1536
1537 write_unlock_irq(&ep->lock);
1538
1539
1540 if (pwake)
1541 ep_poll_safewake(ep, NULL);
1542
1543 return 0;
1544}
1545
1546
1547
1548
1549
1550static int ep_modify(struct eventpoll *ep, struct epitem *epi,
1551 const struct epoll_event *event)
1552{
1553 int pwake = 0;
1554 poll_table pt;
1555
1556 lockdep_assert_irqs_enabled();
1557
1558 init_poll_funcptr(&pt, NULL);
1559
1560
1561
1562
1563
1564
1565 epi->event.events = event->events;
1566 epi->event.data = event->data;
1567 if (epi->event.events & EPOLLWAKEUP) {
1568 if (!ep_has_wakeup_source(epi))
1569 ep_create_wakeup_source(epi);
1570 } else if (ep_has_wakeup_source(epi)) {
1571 ep_destroy_wakeup_source(epi);
1572 }
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 smp_mb();
1593
1594
1595
1596
1597
1598
1599
1600 if (ep_item_poll(epi, &pt, 1)) {
1601 write_lock_irq(&ep->lock);
1602 if (!ep_is_linked(epi)) {
1603 list_add_tail(&epi->rdllink, &ep->rdllist);
1604 ep_pm_stay_awake(epi);
1605
1606
1607 if (waitqueue_active(&ep->wq))
1608 wake_up(&ep->wq);
1609 if (waitqueue_active(&ep->poll_wait))
1610 pwake++;
1611 }
1612 write_unlock_irq(&ep->lock);
1613 }
1614
1615
1616 if (pwake)
1617 ep_poll_safewake(ep, NULL);
1618
1619 return 0;
1620}
1621
1622static int ep_send_events(struct eventpoll *ep,
1623 struct epoll_event __user *events, int maxevents)
1624{
1625 struct epitem *epi, *tmp;
1626 LIST_HEAD(txlist);
1627 poll_table pt;
1628 int res = 0;
1629
1630
1631
1632
1633
1634
1635 if (fatal_signal_pending(current))
1636 return -EINTR;
1637
1638 init_poll_funcptr(&pt, NULL);
1639
1640 mutex_lock(&ep->mtx);
1641 ep_start_scan(ep, &txlist);
1642
1643
1644
1645
1646
1647 list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {
1648 struct wakeup_source *ws;
1649 __poll_t revents;
1650
1651 if (res >= maxevents)
1652 break;
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 ws = ep_wakeup_source(epi);
1664 if (ws) {
1665 if (ws->active)
1666 __pm_stay_awake(ep->ws);
1667 __pm_relax(ws);
1668 }
1669
1670 list_del_init(&epi->rdllink);
1671
1672
1673
1674
1675
1676
1677 revents = ep_item_poll(epi, &pt, 1);
1678 if (!revents)
1679 continue;
1680
1681 if (__put_user(revents, &events->events) ||
1682 __put_user(epi->event.data, &events->data)) {
1683 list_add(&epi->rdllink, &txlist);
1684 ep_pm_stay_awake(epi);
1685 if (!res)
1686 res = -EFAULT;
1687 break;
1688 }
1689 res++;
1690 events++;
1691 if (epi->event.events & EPOLLONESHOT)
1692 epi->event.events &= EP_PRIVATE_BITS;
1693 else if (!(epi->event.events & EPOLLET)) {
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 list_add_tail(&epi->rdllink, &ep->rdllist);
1706 ep_pm_stay_awake(epi);
1707 }
1708 }
1709 ep_done_scan(ep, &txlist);
1710 mutex_unlock(&ep->mtx);
1711
1712 return res;
1713}
1714
1715static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)
1716{
1717 struct timespec64 now;
1718
1719 if (ms < 0)
1720 return NULL;
1721
1722 if (!ms) {
1723 to->tv_sec = 0;
1724 to->tv_nsec = 0;
1725 return to;
1726 }
1727
1728 to->tv_sec = ms / MSEC_PER_SEC;
1729 to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);
1730
1731 ktime_get_ts64(&now);
1732 *to = timespec64_add_safe(now, *to);
1733 return to;
1734}
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1754 int maxevents, struct timespec64 *timeout)
1755{
1756 int res, eavail, timed_out = 0;
1757 u64 slack = 0;
1758 wait_queue_entry_t wait;
1759 ktime_t expires, *to = NULL;
1760
1761 lockdep_assert_irqs_enabled();
1762
1763 if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {
1764 slack = select_estimate_accuracy(timeout);
1765 to = &expires;
1766 *to = timespec64_to_ktime(*timeout);
1767 } else if (timeout) {
1768
1769
1770
1771
1772 timed_out = 1;
1773 }
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783 eavail = ep_events_available(ep);
1784
1785 while (1) {
1786 if (eavail) {
1787
1788
1789
1790
1791
1792 res = ep_send_events(ep, events, maxevents);
1793 if (res)
1794 return res;
1795 }
1796
1797 if (timed_out)
1798 return 0;
1799
1800 eavail = ep_busy_loop(ep, timed_out);
1801 if (eavail)
1802 continue;
1803
1804 if (signal_pending(current))
1805 return -EINTR;
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818 init_wait(&wait);
1819
1820 write_lock_irq(&ep->lock);
1821
1822
1823
1824
1825
1826 __set_current_state(TASK_INTERRUPTIBLE);
1827
1828
1829
1830
1831
1832
1833
1834
1835 eavail = ep_events_available(ep);
1836 if (!eavail)
1837 __add_wait_queue_exclusive(&ep->wq, &wait);
1838
1839 write_unlock_irq(&ep->lock);
1840
1841 if (!eavail)
1842 timed_out = !schedule_hrtimeout_range(to, slack,
1843 HRTIMER_MODE_ABS);
1844 __set_current_state(TASK_RUNNING);
1845
1846
1847
1848
1849
1850
1851 eavail = 1;
1852
1853 if (!list_empty_careful(&wait.entry)) {
1854 write_lock_irq(&ep->lock);
1855
1856
1857
1858
1859
1860
1861
1862 if (timed_out)
1863 eavail = list_empty(&wait.entry);
1864 __remove_wait_queue(&ep->wq, &wait);
1865 write_unlock_irq(&ep->lock);
1866 }
1867 }
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static int ep_loop_check_proc(struct eventpoll *ep, int depth)
1883{
1884 int error = 0;
1885 struct rb_node *rbp;
1886 struct epitem *epi;
1887
1888 mutex_lock_nested(&ep->mtx, depth + 1);
1889 ep->gen = loop_check_gen;
1890 for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1891 epi = rb_entry(rbp, struct epitem, rbn);
1892 if (unlikely(is_file_epoll(epi->ffd.file))) {
1893 struct eventpoll *ep_tovisit;
1894 ep_tovisit = epi->ffd.file->private_data;
1895 if (ep_tovisit->gen == loop_check_gen)
1896 continue;
1897 if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)
1898 error = -1;
1899 else
1900 error = ep_loop_check_proc(ep_tovisit, depth + 1);
1901 if (error != 0)
1902 break;
1903 } else {
1904
1905
1906
1907
1908
1909
1910
1911
1912 list_file(epi->ffd.file);
1913 }
1914 }
1915 mutex_unlock(&ep->mtx);
1916
1917 return error;
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)
1932{
1933 inserting_into = ep;
1934 return ep_loop_check_proc(to, 0);
1935}
1936
1937static void clear_tfile_check_list(void)
1938{
1939 rcu_read_lock();
1940 while (tfile_check_list != EP_UNACTIVE_PTR) {
1941 struct epitems_head *head = tfile_check_list;
1942 tfile_check_list = head->next;
1943 unlist_file(head);
1944 }
1945 rcu_read_unlock();
1946}
1947
1948
1949
1950
1951static int do_epoll_create(int flags)
1952{
1953 int error, fd;
1954 struct eventpoll *ep = NULL;
1955 struct file *file;
1956
1957
1958 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1959
1960 if (flags & ~EPOLL_CLOEXEC)
1961 return -EINVAL;
1962
1963
1964
1965 error = ep_alloc(&ep);
1966 if (error < 0)
1967 return error;
1968
1969
1970
1971
1972 fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1973 if (fd < 0) {
1974 error = fd;
1975 goto out_free_ep;
1976 }
1977 file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
1978 O_RDWR | (flags & O_CLOEXEC));
1979 if (IS_ERR(file)) {
1980 error = PTR_ERR(file);
1981 goto out_free_fd;
1982 }
1983 ep->file = file;
1984 fd_install(fd, file);
1985 return fd;
1986
1987out_free_fd:
1988 put_unused_fd(fd);
1989out_free_ep:
1990 ep_free(ep);
1991 return error;
1992}
1993
1994SYSCALL_DEFINE1(epoll_create1, int, flags)
1995{
1996 return do_epoll_create(flags);
1997}
1998
1999SYSCALL_DEFINE1(epoll_create, int, size)
2000{
2001 if (size <= 0)
2002 return -EINVAL;
2003
2004 return do_epoll_create(0);
2005}
2006
2007static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
2008 bool nonblock)
2009{
2010 if (!nonblock) {
2011 mutex_lock_nested(mutex, depth);
2012 return 0;
2013 }
2014 if (mutex_trylock(mutex))
2015 return 0;
2016 return -EAGAIN;
2017}
2018
2019int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
2020 bool nonblock)
2021{
2022 int error;
2023 int full_check = 0;
2024 struct fd f, tf;
2025 struct eventpoll *ep;
2026 struct epitem *epi;
2027 struct eventpoll *tep = NULL;
2028
2029 error = -EBADF;
2030 f = fdget(epfd);
2031 if (!f.file)
2032 goto error_return;
2033
2034
2035 tf = fdget(fd);
2036 if (!tf.file)
2037 goto error_fput;
2038
2039
2040 error = -EPERM;
2041 if (!file_can_poll(tf.file))
2042 goto error_tgt_fput;
2043
2044
2045 if (ep_op_has_event(op))
2046 ep_take_care_of_epollwakeup(epds);
2047
2048
2049
2050
2051
2052
2053 error = -EINVAL;
2054 if (f.file == tf.file || !is_file_epoll(f.file))
2055 goto error_tgt_fput;
2056
2057
2058
2059
2060
2061
2062 if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
2063 if (op == EPOLL_CTL_MOD)
2064 goto error_tgt_fput;
2065 if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
2066 (epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
2067 goto error_tgt_fput;
2068 }
2069
2070
2071
2072
2073
2074 ep = f.file->private_data;
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2092 if (error)
2093 goto error_tgt_fput;
2094 if (op == EPOLL_CTL_ADD) {
2095 if (READ_ONCE(f.file->f_ep) || ep->gen == loop_check_gen ||
2096 is_file_epoll(tf.file)) {
2097 mutex_unlock(&ep->mtx);
2098 error = epoll_mutex_lock(&epmutex, 0, nonblock);
2099 if (error)
2100 goto error_tgt_fput;
2101 loop_check_gen++;
2102 full_check = 1;
2103 if (is_file_epoll(tf.file)) {
2104 tep = tf.file->private_data;
2105 error = -ELOOP;
2106 if (ep_loop_check(ep, tep) != 0)
2107 goto error_tgt_fput;
2108 }
2109 error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
2110 if (error)
2111 goto error_tgt_fput;
2112 }
2113 }
2114
2115
2116
2117
2118
2119
2120 epi = ep_find(ep, tf.file, fd);
2121
2122 error = -EINVAL;
2123 switch (op) {
2124 case EPOLL_CTL_ADD:
2125 if (!epi) {
2126 epds->events |= EPOLLERR | EPOLLHUP;
2127 error = ep_insert(ep, epds, tf.file, fd, full_check);
2128 } else
2129 error = -EEXIST;
2130 break;
2131 case EPOLL_CTL_DEL:
2132 if (epi)
2133 error = ep_remove(ep, epi);
2134 else
2135 error = -ENOENT;
2136 break;
2137 case EPOLL_CTL_MOD:
2138 if (epi) {
2139 if (!(epi->event.events & EPOLLEXCLUSIVE)) {
2140 epds->events |= EPOLLERR | EPOLLHUP;
2141 error = ep_modify(ep, epi, epds);
2142 }
2143 } else
2144 error = -ENOENT;
2145 break;
2146 }
2147 mutex_unlock(&ep->mtx);
2148
2149error_tgt_fput:
2150 if (full_check) {
2151 clear_tfile_check_list();
2152 loop_check_gen++;
2153 mutex_unlock(&epmutex);
2154 }
2155
2156 fdput(tf);
2157error_fput:
2158 fdput(f);
2159error_return:
2160
2161 return error;
2162}
2163
2164
2165
2166
2167
2168
2169SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2170 struct epoll_event __user *, event)
2171{
2172 struct epoll_event epds;
2173
2174 if (ep_op_has_event(op) &&
2175 copy_from_user(&epds, event, sizeof(struct epoll_event)))
2176 return -EFAULT;
2177
2178 return do_epoll_ctl(epfd, op, fd, &epds, false);
2179}
2180
2181
2182
2183
2184
2185static int do_epoll_wait(int epfd, struct epoll_event __user *events,
2186 int maxevents, struct timespec64 *to)
2187{
2188 int error;
2189 struct fd f;
2190 struct eventpoll *ep;
2191
2192
2193 if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
2194 return -EINVAL;
2195
2196
2197 if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
2198 return -EFAULT;
2199
2200
2201 f = fdget(epfd);
2202 if (!f.file)
2203 return -EBADF;
2204
2205
2206
2207
2208
2209 error = -EINVAL;
2210 if (!is_file_epoll(f.file))
2211 goto error_fput;
2212
2213
2214
2215
2216
2217 ep = f.file->private_data;
2218
2219
2220 error = ep_poll(ep, events, maxevents, to);
2221
2222error_fput:
2223 fdput(f);
2224 return error;
2225}
2226
2227SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
2228 int, maxevents, int, timeout)
2229{
2230 struct timespec64 to;
2231
2232 return do_epoll_wait(epfd, events, maxevents,
2233 ep_timeout_to_timespec(&to, timeout));
2234}
2235
2236
2237
2238
2239
2240static int do_epoll_pwait(int epfd, struct epoll_event __user *events,
2241 int maxevents, struct timespec64 *to,
2242 const sigset_t __user *sigmask, size_t sigsetsize)
2243{
2244 int error;
2245
2246
2247
2248
2249
2250 error = set_user_sigmask(sigmask, sigsetsize);
2251 if (error)
2252 return error;
2253
2254 error = do_epoll_wait(epfd, events, maxevents, to);
2255
2256 restore_saved_sigmask_unless(error == -EINTR);
2257
2258 return error;
2259}
2260
2261SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
2262 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
2263 size_t, sigsetsize)
2264{
2265 struct timespec64 to;
2266
2267 return do_epoll_pwait(epfd, events, maxevents,
2268 ep_timeout_to_timespec(&to, timeout),
2269 sigmask, sigsetsize);
2270}
2271
2272SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,
2273 int, maxevents, const struct __kernel_timespec __user *, timeout,
2274 const sigset_t __user *, sigmask, size_t, sigsetsize)
2275{
2276 struct timespec64 ts, *to = NULL;
2277
2278 if (timeout) {
2279 if (get_timespec64(&ts, timeout))
2280 return -EFAULT;
2281 to = &ts;
2282 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2283 return -EINVAL;
2284 }
2285
2286 return do_epoll_pwait(epfd, events, maxevents, to,
2287 sigmask, sigsetsize);
2288}
2289
2290#ifdef CONFIG_COMPAT
2291static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,
2292 int maxevents, struct timespec64 *timeout,
2293 const compat_sigset_t __user *sigmask,
2294 compat_size_t sigsetsize)
2295{
2296 long err;
2297
2298
2299
2300
2301
2302 err = set_compat_user_sigmask(sigmask, sigsetsize);
2303 if (err)
2304 return err;
2305
2306 err = do_epoll_wait(epfd, events, maxevents, timeout);
2307
2308 restore_saved_sigmask_unless(err == -EINTR);
2309
2310 return err;
2311}
2312
2313COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
2314 struct epoll_event __user *, events,
2315 int, maxevents, int, timeout,
2316 const compat_sigset_t __user *, sigmask,
2317 compat_size_t, sigsetsize)
2318{
2319 struct timespec64 to;
2320
2321 return do_compat_epoll_pwait(epfd, events, maxevents,
2322 ep_timeout_to_timespec(&to, timeout),
2323 sigmask, sigsetsize);
2324}
2325
2326COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,
2327 struct epoll_event __user *, events,
2328 int, maxevents,
2329 const struct __kernel_timespec __user *, timeout,
2330 const compat_sigset_t __user *, sigmask,
2331 compat_size_t, sigsetsize)
2332{
2333 struct timespec64 ts, *to = NULL;
2334
2335 if (timeout) {
2336 if (get_timespec64(&ts, timeout))
2337 return -EFAULT;
2338 to = &ts;
2339 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
2340 return -EINVAL;
2341 }
2342
2343 return do_compat_epoll_pwait(epfd, events, maxevents, to,
2344 sigmask, sigsetsize);
2345}
2346
2347#endif
2348
2349static int __init eventpoll_init(void)
2350{
2351 struct sysinfo si;
2352
2353 si_meminfo(&si);
2354
2355
2356
2357 max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
2358 EP_ITEM_COST;
2359 BUG_ON(max_user_watches < 0);
2360
2361
2362
2363
2364
2365 BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2366
2367
2368 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
2369 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
2370
2371
2372 pwq_cache = kmem_cache_create("eventpoll_pwq",
2373 sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2374
2375 ephead_cache = kmem_cache_create("ep_head",
2376 sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
2377
2378 return 0;
2379}
2380fs_initcall(eventpoll_init);
2381