1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#include <linux/fs.h>
80#include <linux/init.h>
81#include <linux/kernel.h>
82#include <linux/kthread.h>
83#include <linux/module.h>
84#include <linux/mutex.h>
85#include <linux/slab.h>
86#include <linux/spinlock.h>
87#include <linux/srcu.h>
88
89#include <linux/atomic.h>
90
91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h"
93
94struct srcu_struct fsnotify_mark_srcu;
95static DEFINE_SPINLOCK(destroy_lock);
96static LIST_HEAD(destroy_list);
97static DECLARE_WAIT_QUEUE_HEAD(destroy_waitq);
98
99void fsnotify_get_mark(struct fsnotify_mark *mark)
100{
101 atomic_inc(&mark->refcnt);
102}
103
104void fsnotify_put_mark(struct fsnotify_mark *mark)
105{
106 if (atomic_dec_and_test(&mark->refcnt)) {
107 if (mark->group)
108 fsnotify_put_group(mark->group);
109 mark->free_mark(mark);
110 }
111}
112
113
114u32 fsnotify_recalc_mask(struct hlist_head *head)
115{
116 u32 new_mask = 0;
117 struct fsnotify_mark *mark;
118
119 hlist_for_each_entry(mark, head, obj_list)
120 new_mask |= mark->mask;
121 return new_mask;
122}
123
124
125
126
127
128
129void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
130 struct fsnotify_group *group)
131{
132 struct inode *inode = NULL;
133
134 BUG_ON(!mutex_is_locked(&group->mark_mutex));
135
136 spin_lock(&mark->lock);
137
138
139 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
140 spin_unlock(&mark->lock);
141 return;
142 }
143
144 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
145
146 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
147 inode = mark->inode;
148 fsnotify_destroy_inode_mark(mark);
149 } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
150 fsnotify_destroy_vfsmount_mark(mark);
151 else
152 BUG();
153
154 list_del_init(&mark->g_list);
155
156 spin_unlock(&mark->lock);
157
158 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
159 iput(inode);
160
161 mutex_unlock(&group->mark_mutex);
162
163 spin_lock(&destroy_lock);
164 list_add(&mark->g_list, &destroy_list);
165 spin_unlock(&destroy_lock);
166 wake_up(&destroy_waitq);
167
168
169
170
171
172
173
174
175
176
177
178 if (group->ops->freeing_mark)
179 group->ops->freeing_mark(mark, group);
180
181
182
183
184
185
186
187
188
189
190
191
192
193 atomic_dec(&group->num_marks);
194
195 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
196}
197
198void fsnotify_destroy_mark(struct fsnotify_mark *mark,
199 struct fsnotify_group *group)
200{
201 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
202 fsnotify_destroy_mark_locked(mark, group);
203 mutex_unlock(&group->mark_mutex);
204}
205
206
207
208
209
210void fsnotify_destroy_marks(struct list_head *to_free)
211{
212 struct fsnotify_mark *mark, *lmark;
213 struct fsnotify_group *group;
214
215 list_for_each_entry_safe(mark, lmark, to_free, free_list) {
216 spin_lock(&mark->lock);
217 fsnotify_get_group(mark->group);
218 group = mark->group;
219 spin_unlock(&mark->lock);
220
221 fsnotify_destroy_mark(mark, group);
222 fsnotify_put_mark(mark);
223 fsnotify_put_group(group);
224 }
225}
226
227void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
228{
229 assert_spin_locked(&mark->lock);
230
231 mark->mask = mask;
232
233 if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
234 fsnotify_set_inode_mark_mask_locked(mark, mask);
235}
236
237void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
238{
239 assert_spin_locked(&mark->lock);
240
241 mark->ignored_mask = mask;
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
264{
265 if (a == b)
266 return 0;
267 if (!a)
268 return 1;
269 if (!b)
270 return -1;
271 if (a->priority < b->priority)
272 return 1;
273 if (a->priority > b->priority)
274 return -1;
275 if (a < b)
276 return 1;
277 return -1;
278}
279
280
281int fsnotify_add_mark_list(struct hlist_head *head, struct fsnotify_mark *mark,
282 int allow_dups)
283{
284 struct fsnotify_mark *lmark, *last = NULL;
285 int cmp;
286
287
288 if (hlist_empty(head)) {
289 hlist_add_head_rcu(&mark->obj_list, head);
290 return 0;
291 }
292
293
294 hlist_for_each_entry(lmark, head, obj_list) {
295 last = lmark;
296
297 if ((lmark->group == mark->group) && !allow_dups)
298 return -EEXIST;
299
300 cmp = fsnotify_compare_groups(lmark->group, mark->group);
301 if (cmp >= 0) {
302 hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
303 return 0;
304 }
305 }
306
307 BUG_ON(last == NULL);
308
309 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
310 return 0;
311}
312
313
314
315
316
317
318int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
319 struct fsnotify_group *group, struct inode *inode,
320 struct vfsmount *mnt, int allow_dups)
321{
322 int ret = 0;
323
324 BUG_ON(inode && mnt);
325 BUG_ON(!inode && !mnt);
326 BUG_ON(!mutex_is_locked(&group->mark_mutex));
327
328
329
330
331
332
333
334 spin_lock(&mark->lock);
335 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE;
336
337 fsnotify_get_group(group);
338 mark->group = group;
339 list_add(&mark->g_list, &group->marks_list);
340 atomic_inc(&group->num_marks);
341 fsnotify_get_mark(mark);
342
343 if (inode) {
344 ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
345 if (ret)
346 goto err;
347 } else if (mnt) {
348 ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
349 if (ret)
350 goto err;
351 } else {
352 BUG();
353 }
354
355
356 fsnotify_set_mark_mask_locked(mark, mark->mask);
357 spin_unlock(&mark->lock);
358
359 if (inode)
360 __fsnotify_update_child_dentry_flags(inode);
361
362 return ret;
363err:
364 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
365 list_del_init(&mark->g_list);
366 fsnotify_put_group(group);
367 mark->group = NULL;
368 atomic_dec(&group->num_marks);
369
370 spin_unlock(&mark->lock);
371
372 spin_lock(&destroy_lock);
373 list_add(&mark->g_list, &destroy_list);
374 spin_unlock(&destroy_lock);
375 wake_up(&destroy_waitq);
376
377 return ret;
378}
379
380int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
381 struct inode *inode, struct vfsmount *mnt, int allow_dups)
382{
383 int ret;
384 mutex_lock(&group->mark_mutex);
385 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups);
386 mutex_unlock(&group->mark_mutex);
387 return ret;
388}
389
390
391
392
393
394struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
395 struct fsnotify_group *group)
396{
397 struct fsnotify_mark *mark;
398
399 hlist_for_each_entry(mark, head, obj_list) {
400 if (mark->group == group) {
401 fsnotify_get_mark(mark);
402 return mark;
403 }
404 }
405 return NULL;
406}
407
408
409
410
411void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags)
413{
414 struct fsnotify_mark *lmark, *mark;
415
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) {
419 fsnotify_get_mark(mark);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 }
424 mutex_unlock(&group->mark_mutex);
425}
426
427
428
429
430void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
431{
432 fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
433}
434
435void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
436{
437 assert_spin_locked(&old->lock);
438 new->inode = old->inode;
439 new->mnt = old->mnt;
440 if (old->group)
441 fsnotify_get_group(old->group);
442 new->group = old->group;
443 new->mask = old->mask;
444 new->free_mark = old->free_mark;
445}
446
447
448
449
450void fsnotify_init_mark(struct fsnotify_mark *mark,
451 void (*free_mark)(struct fsnotify_mark *mark))
452{
453 memset(mark, 0, sizeof(*mark));
454 spin_lock_init(&mark->lock);
455 atomic_set(&mark->refcnt, 1);
456 mark->free_mark = free_mark;
457}
458
459static int fsnotify_mark_destroy(void *ignored)
460{
461 struct fsnotify_mark *mark, *next;
462 struct list_head private_destroy_list;
463
464 for (;;) {
465 spin_lock(&destroy_lock);
466
467 list_replace_init(&destroy_list, &private_destroy_list);
468 spin_unlock(&destroy_lock);
469
470 synchronize_srcu(&fsnotify_mark_srcu);
471
472 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
473 list_del_init(&mark->g_list);
474 fsnotify_put_mark(mark);
475 }
476
477 wait_event_interruptible(destroy_waitq, !list_empty(&destroy_list));
478 }
479
480 return 0;
481}
482
483static int __init fsnotify_mark_init(void)
484{
485 struct task_struct *thread;
486
487 thread = kthread_run(fsnotify_mark_destroy, NULL,
488 "fsnotify_mark");
489 if (IS_ERR(thread))
490 panic("unable to start fsnotify mark destruction thread.");
491
492 return 0;
493}
494device_initcall(fsnotify_mark_init);
495