1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#include <linux/fs.h>
80#include <linux/init.h>
81#include <linux/kernel.h>
82#include <linux/kthread.h>
83#include <linux/module.h>
84#include <linux/mutex.h>
85#include <linux/slab.h>
86#include <linux/spinlock.h>
87#include <linux/srcu.h>
88
89#include <linux/atomic.h>
90
91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h"
93
94#define FSNOTIFY_REAPER_DELAY (1)
95
96struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list);
99
100static void fsnotify_mark_destroy(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
102
103void fsnotify_get_mark(struct fsnotify_mark *mark)
104{
105 atomic_inc(&mark->refcnt);
106}
107
108void fsnotify_put_mark(struct fsnotify_mark *mark)
109{
110 if (atomic_dec_and_test(&mark->refcnt)) {
111 if (mark->group)
112 fsnotify_put_group(mark->group);
113 mark->free_mark(mark);
114 }
115}
116
117
118u32 fsnotify_recalc_mask(struct hlist_head *head)
119{
120 u32 new_mask = 0;
121 struct fsnotify_mark *mark;
122
123 hlist_for_each_entry(mark, head, obj_list)
124 new_mask |= mark->mask;
125 return new_mask;
126}
127
128
129
130
131
132
133
134void fsnotify_detach_mark(struct fsnotify_mark *mark)
135{
136 struct inode *inode = NULL;
137 struct fsnotify_group *group = mark->group;
138
139 BUG_ON(!mutex_is_locked(&group->mark_mutex));
140
141 spin_lock(&mark->lock);
142
143
144 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
145 spin_unlock(&mark->lock);
146 return;
147 }
148
149 mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
150
151 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
152 inode = mark->inode;
153 fsnotify_destroy_inode_mark(mark);
154 } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
155 fsnotify_destroy_vfsmount_mark(mark);
156 else
157 BUG();
158
159
160
161
162
163
164
165 list_del_init(&mark->g_list);
166
167 spin_unlock(&mark->lock);
168
169 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
170 iput(inode);
171
172 atomic_dec(&group->num_marks);
173}
174
175
176
177
178
179
180void fsnotify_free_mark(struct fsnotify_mark *mark)
181{
182 struct fsnotify_group *group = mark->group;
183
184 spin_lock(&mark->lock);
185
186 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
187 spin_unlock(&mark->lock);
188 return;
189 }
190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
191 spin_unlock(&mark->lock);
192
193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
198
199
200
201
202
203
204 if (group->ops->freeing_mark)
205 group->ops->freeing_mark(mark, group);
206}
207
208void fsnotify_destroy_mark(struct fsnotify_mark *mark,
209 struct fsnotify_group *group)
210{
211 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
212 fsnotify_detach_mark(mark);
213 mutex_unlock(&group->mark_mutex);
214 fsnotify_free_mark(mark);
215}
216
217void fsnotify_destroy_marks(struct hlist_head *head, spinlock_t *lock)
218{
219 struct fsnotify_mark *mark;
220
221 while (1) {
222
223
224
225
226
227
228
229 spin_lock(lock);
230 if (hlist_empty(head)) {
231 spin_unlock(lock);
232 break;
233 }
234 mark = hlist_entry(head->first, struct fsnotify_mark, obj_list);
235
236
237
238
239
240 hlist_del_init_rcu(&mark->obj_list);
241 fsnotify_get_mark(mark);
242 spin_unlock(lock);
243 fsnotify_destroy_mark(mark, mark->group);
244 fsnotify_put_mark(mark);
245 }
246}
247
248void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
249{
250 assert_spin_locked(&mark->lock);
251
252 mark->mask = mask;
253
254 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
255 fsnotify_set_inode_mark_mask_locked(mark, mask);
256}
257
258void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
259{
260 assert_spin_locked(&mark->lock);
261
262 mark->ignored_mask = mask;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
285{
286 if (a == b)
287 return 0;
288 if (!a)
289 return 1;
290 if (!b)
291 return -1;
292 if (a->priority < b->priority)
293 return 1;
294 if (a->priority > b->priority)
295 return -1;
296 if (a < b)
297 return 1;
298 return -1;
299}
300
301
302int fsnotify_add_mark_list(struct hlist_head *head, struct fsnotify_mark *mark,
303 int allow_dups)
304{
305 struct fsnotify_mark *lmark, *last = NULL;
306 int cmp;
307
308
309 if (hlist_empty(head)) {
310 hlist_add_head_rcu(&mark->obj_list, head);
311 return 0;
312 }
313
314
315 hlist_for_each_entry(lmark, head, obj_list) {
316 last = lmark;
317
318 if ((lmark->group == mark->group) && !allow_dups)
319 return -EEXIST;
320
321 cmp = fsnotify_compare_groups(lmark->group, mark->group);
322 if (cmp >= 0) {
323 hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
324 return 0;
325 }
326 }
327
328 BUG_ON(last == NULL);
329
330 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
331 return 0;
332}
333
334
335
336
337
338
339int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
340 struct fsnotify_group *group, struct inode *inode,
341 struct vfsmount *mnt, int allow_dups)
342{
343 int ret = 0;
344
345 BUG_ON(inode && mnt);
346 BUG_ON(!inode && !mnt);
347 BUG_ON(!mutex_is_locked(&group->mark_mutex));
348
349
350
351
352
353
354
355 spin_lock(&mark->lock);
356 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
357
358 fsnotify_get_group(group);
359 mark->group = group;
360 list_add(&mark->g_list, &group->marks_list);
361 atomic_inc(&group->num_marks);
362 fsnotify_get_mark(mark);
363
364 if (inode) {
365 ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
366 if (ret)
367 goto err;
368 } else if (mnt) {
369 ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
370 if (ret)
371 goto err;
372 } else {
373 BUG();
374 }
375
376
377 fsnotify_set_mark_mask_locked(mark, mark->mask);
378 spin_unlock(&mark->lock);
379
380 if (inode)
381 __fsnotify_update_child_dentry_flags(inode);
382
383 return ret;
384err:
385 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
386 list_del_init(&mark->g_list);
387 fsnotify_put_group(group);
388 mark->group = NULL;
389 atomic_dec(&group->num_marks);
390
391 spin_unlock(&mark->lock);
392
393 spin_lock(&destroy_lock);
394 list_add(&mark->g_list, &destroy_list);
395 spin_unlock(&destroy_lock);
396 queue_delayed_work(system_unbound_wq, &reaper_work,
397 FSNOTIFY_REAPER_DELAY);
398
399 return ret;
400}
401
402int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
403 struct inode *inode, struct vfsmount *mnt, int allow_dups)
404{
405 int ret;
406 mutex_lock(&group->mark_mutex);
407 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
408 mutex_unlock(&group->mark_mutex);
409 return ret;
410}
411
412
413
414
415
416struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
417 struct fsnotify_group *group)
418{
419 struct fsnotify_mark *mark;
420
421 hlist_for_each_entry(mark, head, obj_list) {
422 if (mark->group == group) {
423 fsnotify_get_mark(mark);
424 return mark;
425 }
426 }
427 return NULL;
428}
429
430
431
432
433void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
434 unsigned int flags)
435{
436 struct fsnotify_mark *lmark, *mark;
437 LIST_HEAD(to_free);
438
439
440
441
442
443
444
445
446
447
448 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
449 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
450 if (mark->flags & flags)
451 list_move(&mark->g_list, &to_free);
452 }
453 mutex_unlock(&group->mark_mutex);
454
455 while (1) {
456 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
457 if (list_empty(&to_free)) {
458 mutex_unlock(&group->mark_mutex);
459 break;
460 }
461 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
462 fsnotify_get_mark(mark);
463 fsnotify_detach_mark(mark);
464 mutex_unlock(&group->mark_mutex);
465 fsnotify_free_mark(mark);
466 fsnotify_put_mark(mark);
467 }
468}
469
470
471
472
473void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
474{
475 fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
476}
477
478void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
479{
480 assert_spin_locked(&old->lock);
481 new->inode = old->inode;
482 new->mnt = old->mnt;
483 if (old->group)
484 fsnotify_get_group(old->group);
485 new->group = old->group;
486 new->mask = old->mask;
487 new->free_mark = old->free_mark;
488}
489
490
491
492
493void fsnotify_init_mark(struct fsnotify_mark *mark,
494 void (*free_mark)(struct fsnotify_mark *mark))
495{
496 memset(mark, 0, sizeof(*mark));
497 spin_lock_init(&mark->lock);
498 atomic_set(&mark->refcnt, 1);
499 mark->free_mark = free_mark;
500}
501
502static void fsnotify_mark_destroy(struct work_struct *work)
503{
504 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list;
506
507 spin_lock(&destroy_lock);
508
509 list_replace_init(&destroy_list, &private_destroy_list);
510 spin_unlock(&destroy_lock);
511
512 synchronize_srcu(&fsnotify_mark_srcu);
513
514 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
515 list_del_init(&mark->g_list);
516 fsnotify_put_mark(mark);
517 }
518}
519