1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
27#include <linux/idr.h>
28#include <linux/slab.h>
29#include <linux/fs.h>
30#include <linux/sched.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/writeback.h>
34#include <linux/inotify.h>
35#include <linux/fsnotify_backend.h>
36
37static atomic_t inotify_cookie;
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct inotify_handle {
79 struct idr idr;
80 struct mutex mutex;
81 struct list_head watches;
82 atomic_t count;
83 u32 last_wd;
84 const struct inotify_operations *in_ops;
85};
86
87static inline void get_inotify_handle(struct inotify_handle *ih)
88{
89 atomic_inc(&ih->count);
90}
91
92static inline void put_inotify_handle(struct inotify_handle *ih)
93{
94 if (atomic_dec_and_test(&ih->count)) {
95 idr_destroy(&ih->idr);
96 kfree(ih);
97 }
98}
99
100
101
102
103
104void get_inotify_watch(struct inotify_watch *watch)
105{
106 atomic_inc(&watch->count);
107}
108EXPORT_SYMBOL_GPL(get_inotify_watch);
109
110int pin_inotify_watch(struct inotify_watch *watch)
111{
112 struct super_block *sb = watch->inode->i_sb;
113 spin_lock(&sb_lock);
114 if (sb->s_count >= S_BIAS) {
115 atomic_inc(&sb->s_active);
116 spin_unlock(&sb_lock);
117 atomic_inc(&watch->count);
118 return 1;
119 }
120 spin_unlock(&sb_lock);
121 return 0;
122}
123
124
125
126
127
128
129
130void put_inotify_watch(struct inotify_watch *watch)
131{
132 if (atomic_dec_and_test(&watch->count)) {
133 struct inotify_handle *ih = watch->ih;
134
135 iput(watch->inode);
136 ih->in_ops->destroy_watch(watch);
137 put_inotify_handle(ih);
138 }
139}
140EXPORT_SYMBOL_GPL(put_inotify_watch);
141
142void unpin_inotify_watch(struct inotify_watch *watch)
143{
144 struct super_block *sb = watch->inode->i_sb;
145 put_inotify_watch(watch);
146 deactivate_super(sb);
147}
148
149
150
151
152
153
154static int inotify_handle_get_wd(struct inotify_handle *ih,
155 struct inotify_watch *watch)
156{
157 int ret;
158
159 do {
160 if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
161 return -ENOSPC;
162 ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
163 } while (ret == -EAGAIN);
164
165 if (likely(!ret))
166 ih->last_wd = watch->wd;
167
168 return ret;
169}
170
171
172
173
174
175static inline int inotify_inode_watched(struct inode *inode)
176{
177 return !list_empty(&inode->inotify_watches);
178}
179
180
181
182
183
184static void set_dentry_child_flags(struct inode *inode, int watched)
185{
186 struct dentry *alias;
187
188 spin_lock(&dcache_lock);
189 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
190 struct dentry *child;
191
192 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
193 if (!child->d_inode)
194 continue;
195
196 spin_lock(&child->d_lock);
197 if (watched)
198 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
199 else
200 child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
201 spin_unlock(&child->d_lock);
202 }
203 }
204 spin_unlock(&dcache_lock);
205}
206
207
208
209
210
211
212
213static struct inotify_watch *inode_find_handle(struct inode *inode,
214 struct inotify_handle *ih)
215{
216 struct inotify_watch *watch;
217
218 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
219 if (watch->ih == ih)
220 return watch;
221 }
222
223 return NULL;
224}
225
226
227
228
229
230
231static void remove_watch_no_event(struct inotify_watch *watch,
232 struct inotify_handle *ih)
233{
234 list_del(&watch->i_list);
235 list_del(&watch->h_list);
236
237 if (!inotify_inode_watched(watch->inode))
238 set_dentry_child_flags(watch->inode, 0);
239
240 idr_remove(&ih->idr, watch->wd);
241}
242
243
244
245
246
247
248
249
250
251
252void inotify_remove_watch_locked(struct inotify_handle *ih,
253 struct inotify_watch *watch)
254{
255 remove_watch_no_event(watch, ih);
256 ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
257}
258EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
259
260
261
262
263
264
265void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
266{
267 struct dentry *parent;
268
269 if (!inode)
270 return;
271
272 spin_lock(&entry->d_lock);
273 parent = entry->d_parent;
274 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
275 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
276 spin_unlock(&entry->d_lock);
277}
278
279
280
281
282void inotify_d_move(struct dentry *entry)
283{
284 struct dentry *parent;
285
286 parent = entry->d_parent;
287 if (inotify_inode_watched(parent->d_inode))
288 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
289 else
290 entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
291}
292
293
294
295
296
297
298
299
300
301void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
302 const char *name, struct inode *n_inode)
303{
304 struct inotify_watch *watch, *next;
305
306 if (!inotify_inode_watched(inode))
307 return;
308
309 mutex_lock(&inode->inotify_mutex);
310 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
311 u32 watch_mask = watch->mask;
312 if (watch_mask & mask) {
313 struct inotify_handle *ih= watch->ih;
314 mutex_lock(&ih->mutex);
315 if (watch_mask & IN_ONESHOT)
316 remove_watch_no_event(watch, ih);
317 ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
318 name, n_inode);
319 mutex_unlock(&ih->mutex);
320 }
321 }
322 mutex_unlock(&inode->inotify_mutex);
323}
324EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
325
326
327
328
329
330
331
332
333void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
334 u32 cookie, const char *name)
335{
336 struct dentry *parent;
337 struct inode *inode;
338
339 if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
340 return;
341
342 spin_lock(&dentry->d_lock);
343 parent = dentry->d_parent;
344 inode = parent->d_inode;
345
346 if (inotify_inode_watched(inode)) {
347 dget(parent);
348 spin_unlock(&dentry->d_lock);
349 inotify_inode_queue_event(inode, mask, cookie, name,
350 dentry->d_inode);
351 dput(parent);
352 } else
353 spin_unlock(&dentry->d_lock);
354}
355EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
356
357
358
359
360u32 inotify_get_cookie(void)
361{
362 return atomic_inc_return(&inotify_cookie);
363}
364EXPORT_SYMBOL_GPL(inotify_get_cookie);
365
366
367
368
369
370
371
372
373
374void inotify_unmount_inodes(struct list_head *list)
375{
376 struct inode *inode, *next_i, *need_iput = NULL;
377
378 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
379 struct inotify_watch *watch, *next_w;
380 struct inode *need_iput_tmp;
381 struct list_head *watches;
382
383
384
385
386
387
388 if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
389 continue;
390
391
392
393
394
395
396
397 if (!atomic_read(&inode->i_count))
398 continue;
399
400 need_iput_tmp = need_iput;
401 need_iput = NULL;
402
403 if (inode != need_iput_tmp)
404 __iget(inode);
405 else
406 need_iput_tmp = NULL;
407
408 if ((&next_i->i_sb_list != list) &&
409 atomic_read(&next_i->i_count) &&
410 !(next_i->i_state & (I_CLEAR | I_FREEING |
411 I_WILL_FREE))) {
412 __iget(next_i);
413 need_iput = next_i;
414 }
415
416
417
418
419
420
421
422 spin_unlock(&inode_lock);
423
424 if (need_iput_tmp)
425 iput(need_iput_tmp);
426
427
428 mutex_lock(&inode->inotify_mutex);
429 watches = &inode->inotify_watches;
430 list_for_each_entry_safe(watch, next_w, watches, i_list) {
431 struct inotify_handle *ih= watch->ih;
432 get_inotify_watch(watch);
433 mutex_lock(&ih->mutex);
434 ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
435 NULL, NULL);
436 inotify_remove_watch_locked(ih, watch);
437 mutex_unlock(&ih->mutex);
438 put_inotify_watch(watch);
439 }
440 mutex_unlock(&inode->inotify_mutex);
441 iput(inode);
442
443 spin_lock(&inode_lock);
444 }
445}
446EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
447
448
449
450
451
452void inotify_inode_is_dead(struct inode *inode)
453{
454 struct inotify_watch *watch, *next;
455
456 mutex_lock(&inode->inotify_mutex);
457 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
458 struct inotify_handle *ih = watch->ih;
459 mutex_lock(&ih->mutex);
460 inotify_remove_watch_locked(ih, watch);
461 mutex_unlock(&ih->mutex);
462 }
463 mutex_unlock(&inode->inotify_mutex);
464}
465EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
466
467
468
469
470
471
472
473struct inotify_handle *inotify_init(const struct inotify_operations *ops)
474{
475 struct inotify_handle *ih;
476
477 ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
478 if (unlikely(!ih))
479 return ERR_PTR(-ENOMEM);
480
481 idr_init(&ih->idr);
482 INIT_LIST_HEAD(&ih->watches);
483 mutex_init(&ih->mutex);
484 ih->last_wd = 0;
485 ih->in_ops = ops;
486 atomic_set(&ih->count, 0);
487 get_inotify_handle(ih);
488
489 return ih;
490}
491EXPORT_SYMBOL_GPL(inotify_init);
492
493
494
495
496
497void inotify_init_watch(struct inotify_watch *watch)
498{
499 INIT_LIST_HEAD(&watch->h_list);
500 INIT_LIST_HEAD(&watch->i_list);
501 atomic_set(&watch->count, 0);
502 get_inotify_watch(watch);
503}
504EXPORT_SYMBOL_GPL(inotify_init_watch);
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
564{
565 struct super_block *sb = watch->inode->i_sb;
566 s32 wd = watch->wd;
567
568 spin_lock(&sb_lock);
569 if (sb->s_count >= S_BIAS) {
570 atomic_inc(&sb->s_active);
571 spin_unlock(&sb_lock);
572 get_inotify_watch(watch);
573 mutex_unlock(&ih->mutex);
574 return 1;
575 }
576 sb->s_count++;
577 spin_unlock(&sb_lock);
578 mutex_unlock(&ih->mutex);
579 down_read(&sb->s_umount);
580 if (likely(!sb->s_root)) {
581
582 drop_super(sb);
583 return 0;
584 }
585
586 mutex_lock(&ih->mutex);
587 if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
588
589 mutex_unlock(&ih->mutex);
590 drop_super(sb);
591 return 0;
592 }
593
594 get_inotify_watch(watch);
595 mutex_unlock(&ih->mutex);
596 return 2;
597}
598
599static void unpin_and_kill(struct inotify_watch *watch, int how)
600{
601 struct super_block *sb = watch->inode->i_sb;
602 put_inotify_watch(watch);
603 switch (how) {
604 case 1:
605 deactivate_super(sb);
606 break;
607 case 2:
608 drop_super(sb);
609 }
610}
611
612
613
614
615
616void inotify_destroy(struct inotify_handle *ih)
617{
618
619
620
621
622
623
624
625
626 while (1) {
627 struct inotify_watch *watch;
628 struct list_head *watches;
629 struct super_block *sb;
630 struct inode *inode;
631 int how;
632
633 mutex_lock(&ih->mutex);
634 watches = &ih->watches;
635 if (list_empty(watches)) {
636 mutex_unlock(&ih->mutex);
637 break;
638 }
639 watch = list_first_entry(watches, struct inotify_watch, h_list);
640 sb = watch->inode->i_sb;
641 how = pin_to_kill(ih, watch);
642 if (!how)
643 continue;
644
645 inode = watch->inode;
646 mutex_lock(&inode->inotify_mutex);
647 mutex_lock(&ih->mutex);
648
649
650 if (likely(idr_find(&ih->idr, watch->wd))) {
651 remove_watch_no_event(watch, ih);
652 put_inotify_watch(watch);
653 }
654
655 mutex_unlock(&ih->mutex);
656 mutex_unlock(&inode->inotify_mutex);
657 unpin_and_kill(watch, how);
658 }
659
660
661 put_inotify_handle(ih);
662}
663EXPORT_SYMBOL_GPL(inotify_destroy);
664
665
666
667
668
669
670
671
672
673s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
674 struct inotify_watch **watchp)
675{
676 struct inotify_watch *old;
677 int ret = -ENOENT;
678
679 mutex_lock(&inode->inotify_mutex);
680 mutex_lock(&ih->mutex);
681
682 old = inode_find_handle(inode, ih);
683 if (unlikely(old)) {
684 get_inotify_watch(old);
685 *watchp = old;
686 ret = old->wd;
687 }
688
689 mutex_unlock(&ih->mutex);
690 mutex_unlock(&inode->inotify_mutex);
691
692 return ret;
693}
694EXPORT_SYMBOL_GPL(inotify_find_watch);
695
696
697
698
699
700
701
702
703
704s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
705 u32 mask)
706{
707 struct inotify_watch *old;
708 int mask_add = 0;
709 int ret;
710
711 if (mask & IN_MASK_ADD)
712 mask_add = 1;
713
714
715 mask &= IN_ALL_EVENTS | IN_ONESHOT;
716 if (unlikely(!mask))
717 return -EINVAL;
718
719 mutex_lock(&inode->inotify_mutex);
720 mutex_lock(&ih->mutex);
721
722
723
724
725
726 old = inode_find_handle(inode, ih);
727 if (unlikely(!old)) {
728 ret = -ENOENT;
729 goto out;
730 }
731
732 if (mask_add)
733 old->mask |= mask;
734 else
735 old->mask = mask;
736 ret = old->wd;
737out:
738 mutex_unlock(&ih->mutex);
739 mutex_unlock(&inode->inotify_mutex);
740 return ret;
741}
742EXPORT_SYMBOL_GPL(inotify_find_update_watch);
743
744
745
746
747
748
749
750
751
752
753
754
755s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
756 struct inode *inode, u32 mask)
757{
758 int ret = 0;
759 int newly_watched;
760
761
762 mask &= IN_ALL_EVENTS | IN_ONESHOT;
763 if (unlikely(!mask))
764 return -EINVAL;
765 watch->mask = mask;
766
767 mutex_lock(&inode->inotify_mutex);
768 mutex_lock(&ih->mutex);
769
770
771 ret = inotify_handle_get_wd(ih, watch);
772 if (unlikely(ret))
773 goto out;
774 ret = watch->wd;
775
776
777 get_inotify_handle(ih);
778 watch->ih = ih;
779
780
781
782
783
784 watch->inode = igrab(inode);
785
786
787 newly_watched = !inotify_inode_watched(inode);
788 list_add(&watch->h_list, &ih->watches);
789 list_add(&watch->i_list, &inode->inotify_watches);
790
791
792
793
794
795 if (newly_watched)
796 set_dentry_child_flags(inode, 1);
797
798out:
799 mutex_unlock(&ih->mutex);
800 mutex_unlock(&inode->inotify_mutex);
801 return ret;
802}
803EXPORT_SYMBOL_GPL(inotify_add_watch);
804
805
806
807
808
809
810
811
812
813s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
814{
815 struct inotify_handle *ih = old->ih;
816 int ret = 0;
817
818 new->mask = old->mask;
819 new->ih = ih;
820
821 mutex_lock(&ih->mutex);
822
823
824 ret = inotify_handle_get_wd(ih, new);
825 if (unlikely(ret))
826 goto out;
827 ret = new->wd;
828
829 get_inotify_handle(ih);
830
831 new->inode = igrab(old->inode);
832
833 list_add(&new->h_list, &ih->watches);
834 list_add(&new->i_list, &old->inode->inotify_watches);
835out:
836 mutex_unlock(&ih->mutex);
837 return ret;
838}
839
840void inotify_evict_watch(struct inotify_watch *watch)
841{
842 get_inotify_watch(watch);
843 mutex_lock(&watch->ih->mutex);
844 inotify_remove_watch_locked(watch->ih, watch);
845 mutex_unlock(&watch->ih->mutex);
846}
847
848
849
850
851
852
853
854
855int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
856{
857 struct inotify_watch *watch;
858 struct super_block *sb;
859 struct inode *inode;
860 int how;
861
862 mutex_lock(&ih->mutex);
863 watch = idr_find(&ih->idr, wd);
864 if (unlikely(!watch)) {
865 mutex_unlock(&ih->mutex);
866 return -EINVAL;
867 }
868 sb = watch->inode->i_sb;
869 how = pin_to_kill(ih, watch);
870 if (!how)
871 return 0;
872
873 inode = watch->inode;
874
875 mutex_lock(&inode->inotify_mutex);
876 mutex_lock(&ih->mutex);
877
878
879 if (likely(idr_find(&ih->idr, wd) == watch))
880 inotify_remove_watch_locked(ih, watch);
881
882 mutex_unlock(&ih->mutex);
883 mutex_unlock(&inode->inotify_mutex);
884 unpin_and_kill(watch, how);
885
886 return 0;
887}
888EXPORT_SYMBOL_GPL(inotify_rm_wd);
889
890
891
892
893
894
895
896
897int inotify_rm_watch(struct inotify_handle *ih,
898 struct inotify_watch *watch)
899{
900 return inotify_rm_wd(ih, watch->wd);
901}
902EXPORT_SYMBOL_GPL(inotify_rm_watch);
903
904
905
906
907static int __init inotify_setup(void)
908{
909 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
910 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
911 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
912 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
913 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
914 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
915 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
916 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
917 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
918 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
919 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
920 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
921 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
922
923 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
924 BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
925 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
926 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
927
928 atomic_set(&inotify_cookie, 0);
929
930 return 0;
931}
932
933module_init(inotify_setup);
934