1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/slab.h>
25#include <linux/fs.h>
26#include <linux/file.h>
27#include <linux/mount.h>
28#include <linux/namei.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/inotify.h>
33#include <linux/syscalls.h>
34#include <linux/magic.h>
35
36#include <asm/ioctls.h>
37
38static struct kmem_cache *watch_cachep __read_mostly;
39static struct kmem_cache *event_cachep __read_mostly;
40
41static struct vfsmount *inotify_mnt __read_mostly;
42
43
44int inotify_max_user_instances __read_mostly;
45int inotify_max_user_watches __read_mostly;
46int inotify_max_queued_events __read_mostly;
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct inotify_device {
75 wait_queue_head_t wq;
76 struct mutex ev_mutex;
77 struct mutex up_mutex;
78 struct list_head events;
79 atomic_t count;
80 struct user_struct *user;
81 struct inotify_handle *ih;
82 unsigned int queue_size;
83 unsigned int event_count;
84 unsigned int max_events;
85};
86
87
88
89
90
91
92
93
94
95struct inotify_kernel_event {
96 struct inotify_event event;
97 struct list_head list;
98 char *name;
99};
100
101
102
103
104
105struct inotify_user_watch {
106 struct inotify_device *dev;
107 struct inotify_watch wdata;
108};
109
110#ifdef CONFIG_SYSCTL
111
112#include <linux/sysctl.h>
113
114static int zero;
115
116ctl_table inotify_table[] = {
117 {
118 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
119 .procname = "max_user_instances",
120 .data = &inotify_max_user_instances,
121 .maxlen = sizeof(int),
122 .mode = 0644,
123 .proc_handler = &proc_dointvec_minmax,
124 .strategy = &sysctl_intvec,
125 .extra1 = &zero,
126 },
127 {
128 .ctl_name = INOTIFY_MAX_USER_WATCHES,
129 .procname = "max_user_watches",
130 .data = &inotify_max_user_watches,
131 .maxlen = sizeof(int),
132 .mode = 0644,
133 .proc_handler = &proc_dointvec_minmax,
134 .strategy = &sysctl_intvec,
135 .extra1 = &zero,
136 },
137 {
138 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
139 .procname = "max_queued_events",
140 .data = &inotify_max_queued_events,
141 .maxlen = sizeof(int),
142 .mode = 0644,
143 .proc_handler = &proc_dointvec_minmax,
144 .strategy = &sysctl_intvec,
145 .extra1 = &zero
146 },
147 { .ctl_name = 0 }
148};
149#endif
150
151static inline void get_inotify_dev(struct inotify_device *dev)
152{
153 atomic_inc(&dev->count);
154}
155
156static inline void put_inotify_dev(struct inotify_device *dev)
157{
158 if (atomic_dec_and_test(&dev->count)) {
159 atomic_dec(&dev->user->inotify_devs);
160 free_uid(dev->user);
161 kfree(dev);
162 }
163}
164
165
166
167
168static void free_inotify_user_watch(struct inotify_watch *w)
169{
170 struct inotify_user_watch *watch;
171 struct inotify_device *dev;
172
173 watch = container_of(w, struct inotify_user_watch, wdata);
174 dev = watch->dev;
175
176 atomic_dec(&dev->user->inotify_watches);
177 put_inotify_dev(dev);
178 kmem_cache_free(watch_cachep, watch);
179}
180
181
182
183
184
185
186static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
187 const char *name)
188{
189 struct inotify_kernel_event *kevent;
190
191 kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
192 if (unlikely(!kevent))
193 return NULL;
194
195
196 memset(&kevent->event, 0, sizeof(struct inotify_event));
197
198 kevent->event.wd = wd;
199 kevent->event.mask = mask;
200 kevent->event.cookie = cookie;
201
202 INIT_LIST_HEAD(&kevent->list);
203
204 if (name) {
205 size_t len, rem, event_size = sizeof(struct inotify_event);
206
207
208
209
210
211
212
213
214 len = strlen(name) + 1;
215 rem = event_size - len;
216 if (len > event_size) {
217 rem = event_size - (len % event_size);
218 if (len % event_size == 0)
219 rem = 0;
220 }
221
222 kevent->name = kmalloc(len + rem, GFP_KERNEL);
223 if (unlikely(!kevent->name)) {
224 kmem_cache_free(event_cachep, kevent);
225 return NULL;
226 }
227 memcpy(kevent->name, name, len);
228 if (rem)
229 memset(kevent->name + len, 0, rem);
230 kevent->event.len = len + rem;
231 } else {
232 kevent->event.len = 0;
233 kevent->name = NULL;
234 }
235
236 return kevent;
237}
238
239
240
241
242
243
244static inline struct inotify_kernel_event *
245inotify_dev_get_event(struct inotify_device *dev)
246{
247 return list_entry(dev->events.next, struct inotify_kernel_event, list);
248}
249
250
251
252
253
254
255
256static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
257 u32 cookie, const char *name,
258 struct inode *ignored)
259{
260 struct inotify_user_watch *watch;
261 struct inotify_device *dev;
262 struct inotify_kernel_event *kevent, *last;
263
264 watch = container_of(w, struct inotify_user_watch, wdata);
265 dev = watch->dev;
266
267 mutex_lock(&dev->ev_mutex);
268
269
270
271
272 if (mask & IN_IGNORED || mask & IN_ONESHOT)
273 put_inotify_watch(w);
274
275
276 last = inotify_dev_get_event(dev);
277 if (last && last->event.mask == mask && last->event.wd == wd &&
278 last->event.cookie == cookie) {
279 const char *lastname = last->name;
280
281 if (!name && !lastname)
282 goto out;
283 if (name && lastname && !strcmp(lastname, name))
284 goto out;
285 }
286
287
288 if (unlikely(dev->event_count > dev->max_events))
289 goto out;
290
291
292 if (unlikely(dev->event_count == dev->max_events))
293 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
294 else
295 kevent = kernel_event(wd, mask, cookie, name);
296
297 if (unlikely(!kevent))
298 goto out;
299
300
301 dev->event_count++;
302 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
303 list_add_tail(&kevent->list, &dev->events);
304 wake_up_interruptible(&dev->wq);
305
306out:
307 mutex_unlock(&dev->ev_mutex);
308}
309
310
311
312
313
314
315static void remove_kevent(struct inotify_device *dev,
316 struct inotify_kernel_event *kevent)
317{
318 list_del(&kevent->list);
319
320 dev->event_count--;
321 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
322
323 kfree(kevent->name);
324 kmem_cache_free(event_cachep, kevent);
325}
326
327
328
329
330
331
332static void inotify_dev_event_dequeue(struct inotify_device *dev)
333{
334 if (!list_empty(&dev->events)) {
335 struct inotify_kernel_event *kevent;
336 kevent = inotify_dev_get_event(dev);
337 remove_kevent(dev, kevent);
338 }
339}
340
341
342
343
344static int find_inode(const char __user *dirname, struct nameidata *nd,
345 unsigned flags)
346{
347 int error;
348
349 error = __user_walk(dirname, flags, nd);
350 if (error)
351 return error;
352
353 error = vfs_permission(nd, MAY_READ);
354 if (error)
355 path_release(nd);
356 return error;
357}
358
359
360
361
362
363
364static int create_watch(struct inotify_device *dev, struct inode *inode,
365 u32 mask)
366{
367 struct inotify_user_watch *watch;
368 int ret;
369
370 if (atomic_read(&dev->user->inotify_watches) >=
371 inotify_max_user_watches)
372 return -ENOSPC;
373
374 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
375 if (unlikely(!watch))
376 return -ENOMEM;
377
378
379 get_inotify_dev(dev);
380 watch->dev = dev;
381
382 atomic_inc(&dev->user->inotify_watches);
383
384 inotify_init_watch(&watch->wdata);
385 ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask);
386 if (ret < 0)
387 free_inotify_user_watch(&watch->wdata);
388
389 return ret;
390}
391
392
393
394static unsigned int inotify_poll(struct file *file, poll_table *wait)
395{
396 struct inotify_device *dev = file->private_data;
397 int ret = 0;
398
399 poll_wait(file, &dev->wq, wait);
400 mutex_lock(&dev->ev_mutex);
401 if (!list_empty(&dev->events))
402 ret = POLLIN | POLLRDNORM;
403 mutex_unlock(&dev->ev_mutex);
404
405 return ret;
406}
407
408static ssize_t inotify_read(struct file *file, char __user *buf,
409 size_t count, loff_t *pos)
410{
411 size_t event_size = sizeof (struct inotify_event);
412 struct inotify_device *dev;
413 char __user *start;
414 int ret;
415 DEFINE_WAIT(wait);
416
417 start = buf;
418 dev = file->private_data;
419
420 while (1) {
421 int events;
422
423 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
424
425 mutex_lock(&dev->ev_mutex);
426 events = !list_empty(&dev->events);
427 mutex_unlock(&dev->ev_mutex);
428 if (events) {
429 ret = 0;
430 break;
431 }
432
433 if (file->f_flags & O_NONBLOCK) {
434 ret = -EAGAIN;
435 break;
436 }
437
438 if (signal_pending(current)) {
439 ret = -EINTR;
440 break;
441 }
442
443 schedule();
444 }
445
446 finish_wait(&dev->wq, &wait);
447 if (ret)
448 return ret;
449
450 mutex_lock(&dev->ev_mutex);
451 while (1) {
452 struct inotify_kernel_event *kevent;
453
454 ret = buf - start;
455 if (list_empty(&dev->events))
456 break;
457
458 kevent = inotify_dev_get_event(dev);
459 if (event_size + kevent->event.len > count) {
460 if (ret == 0 && count > 0) {
461
462
463
464
465 ret = -EINVAL;
466 }
467 break;
468 }
469
470 if (copy_to_user(buf, &kevent->event, event_size)) {
471 ret = -EFAULT;
472 break;
473 }
474 buf += event_size;
475 count -= event_size;
476
477 if (kevent->name) {
478 if (copy_to_user(buf, kevent->name, kevent->event.len)){
479 ret = -EFAULT;
480 break;
481 }
482 buf += kevent->event.len;
483 count -= kevent->event.len;
484 }
485
486 remove_kevent(dev, kevent);
487 }
488 mutex_unlock(&dev->ev_mutex);
489
490 return ret;
491}
492
493static int inotify_release(struct inode *ignored, struct file *file)
494{
495 struct inotify_device *dev = file->private_data;
496
497 inotify_destroy(dev->ih);
498
499
500 mutex_lock(&dev->ev_mutex);
501 while (!list_empty(&dev->events))
502 inotify_dev_event_dequeue(dev);
503 mutex_unlock(&dev->ev_mutex);
504
505
506 put_inotify_dev(dev);
507
508 return 0;
509}
510
511static long inotify_ioctl(struct file *file, unsigned int cmd,
512 unsigned long arg)
513{
514 struct inotify_device *dev;
515 void __user *p;
516 int ret = -ENOTTY;
517
518 dev = file->private_data;
519 p = (void __user *) arg;
520
521 switch (cmd) {
522 case FIONREAD:
523 ret = put_user(dev->queue_size, (int __user *) p);
524 break;
525 }
526
527 return ret;
528}
529
530static const struct file_operations inotify_fops = {
531 .poll = inotify_poll,
532 .read = inotify_read,
533 .release = inotify_release,
534 .unlocked_ioctl = inotify_ioctl,
535 .compat_ioctl = inotify_ioctl,
536};
537
538static const struct inotify_operations inotify_user_ops = {
539 .handle_event = inotify_dev_queue_event,
540 .destroy_watch = free_inotify_user_watch,
541};
542
543asmlinkage long sys_inotify_init(void)
544{
545 struct inotify_device *dev;
546 struct inotify_handle *ih;
547 struct user_struct *user;
548 struct file *filp;
549 int fd, ret;
550
551 fd = get_unused_fd();
552 if (fd < 0)
553 return fd;
554
555 filp = get_empty_filp();
556 if (!filp) {
557 ret = -ENFILE;
558 goto out_put_fd;
559 }
560
561 user = get_uid(current->user);
562 if (unlikely(atomic_read(&user->inotify_devs) >=
563 inotify_max_user_instances)) {
564 ret = -EMFILE;
565 goto out_free_uid;
566 }
567
568 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
569 if (unlikely(!dev)) {
570 ret = -ENOMEM;
571 goto out_free_uid;
572 }
573
574 ih = inotify_init(&inotify_user_ops);
575 if (unlikely(IS_ERR(ih))) {
576 ret = PTR_ERR(ih);
577 goto out_free_dev;
578 }
579 dev->ih = ih;
580
581 filp->f_op = &inotify_fops;
582 filp->f_path.mnt = mntget(inotify_mnt);
583 filp->f_path.dentry = dget(inotify_mnt->mnt_root);
584 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
585 filp->f_mode = FMODE_READ;
586 filp->f_flags = O_RDONLY;
587 filp->private_data = dev;
588
589 INIT_LIST_HEAD(&dev->events);
590 init_waitqueue_head(&dev->wq);
591 mutex_init(&dev->ev_mutex);
592 mutex_init(&dev->up_mutex);
593 dev->event_count = 0;
594 dev->queue_size = 0;
595 dev->max_events = inotify_max_queued_events;
596 dev->user = user;
597 atomic_set(&dev->count, 0);
598
599 get_inotify_dev(dev);
600 atomic_inc(&user->inotify_devs);
601 fd_install(fd, filp);
602
603 return fd;
604out_free_dev:
605 kfree(dev);
606out_free_uid:
607 free_uid(user);
608 put_filp(filp);
609out_put_fd:
610 put_unused_fd(fd);
611 return ret;
612}
613
614asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
615{
616 struct inode *inode;
617 struct inotify_device *dev;
618 struct nameidata nd;
619 struct file *filp;
620 int ret, fput_needed;
621 unsigned flags = 0;
622
623 filp = fget_light(fd, &fput_needed);
624 if (unlikely(!filp))
625 return -EBADF;
626
627
628 if (unlikely(filp->f_op != &inotify_fops)) {
629 ret = -EINVAL;
630 goto fput_and_out;
631 }
632
633 if (!(mask & IN_DONT_FOLLOW))
634 flags |= LOOKUP_FOLLOW;
635 if (mask & IN_ONLYDIR)
636 flags |= LOOKUP_DIRECTORY;
637
638 ret = find_inode(path, &nd, flags);
639 if (unlikely(ret))
640 goto fput_and_out;
641
642
643 inode = nd.dentry->d_inode;
644 dev = filp->private_data;
645
646 mutex_lock(&dev->up_mutex);
647 ret = inotify_find_update_watch(dev->ih, inode, mask);
648 if (ret == -ENOENT)
649 ret = create_watch(dev, inode, mask);
650 mutex_unlock(&dev->up_mutex);
651
652 path_release(&nd);
653fput_and_out:
654 fput_light(filp, fput_needed);
655 return ret;
656}
657
658asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
659{
660 struct file *filp;
661 struct inotify_device *dev;
662 int ret, fput_needed;
663
664 filp = fget_light(fd, &fput_needed);
665 if (unlikely(!filp))
666 return -EBADF;
667
668
669 if (unlikely(filp->f_op != &inotify_fops)) {
670 ret = -EINVAL;
671 goto out;
672 }
673
674 dev = filp->private_data;
675
676
677 ret = inotify_rm_wd(dev->ih, wd);
678
679out:
680 fput_light(filp, fput_needed);
681 return ret;
682}
683
684static int
685inotify_get_sb(struct file_system_type *fs_type, int flags,
686 const char *dev_name, void *data, struct vfsmount *mnt)
687{
688 return get_sb_pseudo(fs_type, "inotify", NULL,
689 INOTIFYFS_SUPER_MAGIC, mnt);
690}
691
692static struct file_system_type inotify_fs_type = {
693 .name = "inotifyfs",
694 .get_sb = inotify_get_sb,
695 .kill_sb = kill_anon_super,
696};
697
698
699
700
701
702
703static int __init inotify_user_setup(void)
704{
705 int ret;
706
707 ret = register_filesystem(&inotify_fs_type);
708 if (unlikely(ret))
709 panic("inotify: register_filesystem returned %d!\n", ret);
710
711 inotify_mnt = kern_mount(&inotify_fs_type);
712 if (IS_ERR(inotify_mnt))
713 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
714
715 inotify_max_queued_events = 16384;
716 inotify_max_user_instances = 128;
717 inotify_max_user_watches = 8192;
718
719 watch_cachep = kmem_cache_create("inotify_watch_cache",
720 sizeof(struct inotify_user_watch),
721 0, SLAB_PANIC, NULL);
722 event_cachep = kmem_cache_create("inotify_event_cache",
723 sizeof(struct inotify_kernel_event),
724 0, SLAB_PANIC, NULL);
725
726 return 0;
727}
728
729module_init(inotify_user_setup);
730