1
2
3
4
5
6
7
8
9
10
11#include <linux/fs.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/poll.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/fsnotify.h>
18
19#include "kernfs-internal.h"
20
21
22
23
24
25
26
27
28
29
30
31
32static DEFINE_SPINLOCK(kernfs_open_node_lock);
33static DEFINE_MUTEX(kernfs_open_file_mutex);
34
35struct kernfs_open_node {
36 atomic_t refcnt;
37 atomic_t event;
38 wait_queue_head_t poll;
39 struct list_head files;
40};
41
42
43
44
45
46
47
48
49
50#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
51
52static DEFINE_SPINLOCK(kernfs_notify_lock);
53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
54
55static struct kernfs_open_file *kernfs_of(struct file *file)
56{
57 return ((struct seq_file *)file->private_data)->private;
58}
59
60
61
62
63
64static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
65{
66 if (kn->flags & KERNFS_LOCKDEP)
67 lockdep_assert_held(kn);
68 return kn->attr.ops;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
94{
95 struct kernfs_open_file *of = sf->private;
96 const struct kernfs_ops *ops = kernfs_ops(of->kn);
97
98 if (ops->seq_stop)
99 ops->seq_stop(sf, v);
100 kernfs_put_active(of->kn);
101}
102
103static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
104{
105 struct kernfs_open_file *of = sf->private;
106 const struct kernfs_ops *ops;
107
108
109
110
111
112 mutex_lock(&of->mutex);
113 if (!kernfs_get_active(of->kn))
114 return ERR_PTR(-ENODEV);
115
116 ops = kernfs_ops(of->kn);
117 if (ops->seq_start) {
118 void *next = ops->seq_start(sf, ppos);
119
120 if (next == ERR_PTR(-ENODEV))
121 kernfs_seq_stop_active(sf, next);
122 return next;
123 } else {
124
125
126
127
128 return NULL + !*ppos;
129 }
130}
131
132static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
133{
134 struct kernfs_open_file *of = sf->private;
135 const struct kernfs_ops *ops = kernfs_ops(of->kn);
136
137 if (ops->seq_next) {
138 void *next = ops->seq_next(sf, v, ppos);
139
140 if (next == ERR_PTR(-ENODEV))
141 kernfs_seq_stop_active(sf, next);
142 return next;
143 } else {
144
145
146
147
148 ++*ppos;
149 return NULL;
150 }
151}
152
153static void kernfs_seq_stop(struct seq_file *sf, void *v)
154{
155 struct kernfs_open_file *of = sf->private;
156
157 if (v != ERR_PTR(-ENODEV))
158 kernfs_seq_stop_active(sf, v);
159 mutex_unlock(&of->mutex);
160}
161
162static int kernfs_seq_show(struct seq_file *sf, void *v)
163{
164 struct kernfs_open_file *of = sf->private;
165
166 of->event = atomic_read(&of->kn->attr.open->event);
167
168 return of->kn->attr.ops->seq_show(sf, v);
169}
170
171static const struct seq_operations kernfs_seq_ops = {
172 .start = kernfs_seq_start,
173 .next = kernfs_seq_next,
174 .stop = kernfs_seq_stop,
175 .show = kernfs_seq_show,
176};
177
178
179
180
181
182
183
184static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
185 char __user *user_buf, size_t count,
186 loff_t *ppos)
187{
188 ssize_t len = min_t(size_t, count, PAGE_SIZE);
189 const struct kernfs_ops *ops;
190 char *buf;
191
192 buf = of->prealloc_buf;
193 if (!buf)
194 buf = kmalloc(len, GFP_KERNEL);
195 if (!buf)
196 return -ENOMEM;
197
198
199
200
201
202
203 mutex_lock(&of->mutex);
204 if (!kernfs_get_active(of->kn)) {
205 len = -ENODEV;
206 mutex_unlock(&of->mutex);
207 goto out_free;
208 }
209
210 of->event = atomic_read(&of->kn->attr.open->event);
211 ops = kernfs_ops(of->kn);
212 if (ops->read)
213 len = ops->read(of, buf, len, *ppos);
214 else
215 len = -EINVAL;
216
217 if (len < 0)
218 goto out_unlock;
219
220 if (copy_to_user(user_buf, buf, len)) {
221 len = -EFAULT;
222 goto out_unlock;
223 }
224
225 *ppos += len;
226
227 out_unlock:
228 kernfs_put_active(of->kn);
229 mutex_unlock(&of->mutex);
230 out_free:
231 if (buf != of->prealloc_buf)
232 kfree(buf);
233 return len;
234}
235
236
237
238
239
240
241
242
243static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
244 size_t count, loff_t *ppos)
245{
246 struct kernfs_open_file *of = kernfs_of(file);
247
248 if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
249 return seq_read(file, user_buf, count, ppos);
250 else
251 return kernfs_file_direct_read(of, user_buf, count, ppos);
252}
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
271 size_t count, loff_t *ppos)
272{
273 struct kernfs_open_file *of = kernfs_of(file);
274 const struct kernfs_ops *ops;
275 size_t len;
276 char *buf;
277
278 if (of->atomic_write_len) {
279 len = count;
280 if (len > of->atomic_write_len)
281 return -E2BIG;
282 } else {
283 len = min_t(size_t, count, PAGE_SIZE);
284 }
285
286 buf = of->prealloc_buf;
287 if (!buf)
288 buf = kmalloc(len + 1, GFP_KERNEL);
289 if (!buf)
290 return -ENOMEM;
291
292
293
294
295
296
297 mutex_lock(&of->mutex);
298 if (!kernfs_get_active(of->kn)) {
299 mutex_unlock(&of->mutex);
300 len = -ENODEV;
301 goto out_free;
302 }
303
304 if (copy_from_user(buf, user_buf, len)) {
305 len = -EFAULT;
306 goto out_unlock;
307 }
308 buf[len] = '\0';
309
310 ops = kernfs_ops(of->kn);
311 if (ops->write)
312 len = ops->write(of, buf, len, *ppos);
313 else
314 len = -EINVAL;
315
316 if (len > 0)
317 *ppos += len;
318
319out_unlock:
320 kernfs_put_active(of->kn);
321 mutex_unlock(&of->mutex);
322out_free:
323 if (buf != of->prealloc_buf)
324 kfree(buf);
325 return len;
326}
327
328static void kernfs_vma_open(struct vm_area_struct *vma)
329{
330 struct file *file = vma->vm_file;
331 struct kernfs_open_file *of = kernfs_of(file);
332
333 if (!of->vm_ops)
334 return;
335
336 if (!kernfs_get_active(of->kn))
337 return;
338
339 if (of->vm_ops->open)
340 of->vm_ops->open(vma);
341
342 kernfs_put_active(of->kn);
343}
344
345static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
346{
347 struct file *file = vma->vm_file;
348 struct kernfs_open_file *of = kernfs_of(file);
349 int ret;
350
351 if (!of->vm_ops)
352 return VM_FAULT_SIGBUS;
353
354 if (!kernfs_get_active(of->kn))
355 return VM_FAULT_SIGBUS;
356
357 ret = VM_FAULT_SIGBUS;
358 if (of->vm_ops->fault)
359 ret = of->vm_ops->fault(vma, vmf);
360
361 kernfs_put_active(of->kn);
362 return ret;
363}
364
365static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
366 struct vm_fault *vmf)
367{
368 struct file *file = vma->vm_file;
369 struct kernfs_open_file *of = kernfs_of(file);
370 int ret;
371
372 if (!of->vm_ops)
373 return VM_FAULT_SIGBUS;
374
375 if (!kernfs_get_active(of->kn))
376 return VM_FAULT_SIGBUS;
377
378 ret = 0;
379 if (of->vm_ops->page_mkwrite)
380 ret = of->vm_ops->page_mkwrite(vma, vmf);
381 else
382 file_update_time(file);
383
384 kernfs_put_active(of->kn);
385 return ret;
386}
387
388static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
389 void *buf, int len, int write)
390{
391 struct file *file = vma->vm_file;
392 struct kernfs_open_file *of = kernfs_of(file);
393 int ret;
394
395 if (!of->vm_ops)
396 return -EINVAL;
397
398 if (!kernfs_get_active(of->kn))
399 return -EINVAL;
400
401 ret = -EINVAL;
402 if (of->vm_ops->access)
403 ret = of->vm_ops->access(vma, addr, buf, len, write);
404
405 kernfs_put_active(of->kn);
406 return ret;
407}
408
409#ifdef CONFIG_NUMA
410static int kernfs_vma_set_policy(struct vm_area_struct *vma,
411 struct mempolicy *new)
412{
413 struct file *file = vma->vm_file;
414 struct kernfs_open_file *of = kernfs_of(file);
415 int ret;
416
417 if (!of->vm_ops)
418 return 0;
419
420 if (!kernfs_get_active(of->kn))
421 return -EINVAL;
422
423 ret = 0;
424 if (of->vm_ops->set_policy)
425 ret = of->vm_ops->set_policy(vma, new);
426
427 kernfs_put_active(of->kn);
428 return ret;
429}
430
431static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
432 unsigned long addr)
433{
434 struct file *file = vma->vm_file;
435 struct kernfs_open_file *of = kernfs_of(file);
436 struct mempolicy *pol;
437
438 if (!of->vm_ops)
439 return vma->vm_policy;
440
441 if (!kernfs_get_active(of->kn))
442 return vma->vm_policy;
443
444 pol = vma->vm_policy;
445 if (of->vm_ops->get_policy)
446 pol = of->vm_ops->get_policy(vma, addr);
447
448 kernfs_put_active(of->kn);
449 return pol;
450}
451
452#endif
453
454static const struct vm_operations_struct kernfs_vm_ops = {
455 .open = kernfs_vma_open,
456 .fault = kernfs_vma_fault,
457 .page_mkwrite = kernfs_vma_page_mkwrite,
458 .access = kernfs_vma_access,
459#ifdef CONFIG_NUMA
460 .set_policy = kernfs_vma_set_policy,
461 .get_policy = kernfs_vma_get_policy,
462#endif
463};
464
465static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
466{
467 struct kernfs_open_file *of = kernfs_of(file);
468 const struct kernfs_ops *ops;
469 int rc;
470
471
472
473
474
475
476
477
478 if (!(of->kn->flags & KERNFS_HAS_MMAP))
479 return -ENODEV;
480
481 mutex_lock(&of->mutex);
482
483 rc = -ENODEV;
484 if (!kernfs_get_active(of->kn))
485 goto out_unlock;
486
487 ops = kernfs_ops(of->kn);
488 rc = ops->mmap(of, vma);
489 if (rc)
490 goto out_put;
491
492
493
494
495
496
497 if (vma->vm_file != file)
498 goto out_put;
499
500 rc = -EINVAL;
501 if (of->mmapped && of->vm_ops != vma->vm_ops)
502 goto out_put;
503
504
505
506
507
508 rc = -EINVAL;
509 if (vma->vm_ops && vma->vm_ops->close)
510 goto out_put;
511
512 rc = 0;
513 of->mmapped = 1;
514 of->vm_ops = vma->vm_ops;
515 vma->vm_ops = &kernfs_vm_ops;
516out_put:
517 kernfs_put_active(of->kn);
518out_unlock:
519 mutex_unlock(&of->mutex);
520
521 return rc;
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static int kernfs_get_open_node(struct kernfs_node *kn,
539 struct kernfs_open_file *of)
540{
541 struct kernfs_open_node *on, *new_on = NULL;
542
543 retry:
544 mutex_lock(&kernfs_open_file_mutex);
545 spin_lock_irq(&kernfs_open_node_lock);
546
547 if (!kn->attr.open && new_on) {
548 kn->attr.open = new_on;
549 new_on = NULL;
550 }
551
552 on = kn->attr.open;
553 if (on) {
554 atomic_inc(&on->refcnt);
555 list_add_tail(&of->list, &on->files);
556 }
557
558 spin_unlock_irq(&kernfs_open_node_lock);
559 mutex_unlock(&kernfs_open_file_mutex);
560
561 if (on) {
562 kfree(new_on);
563 return 0;
564 }
565
566
567 new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
568 if (!new_on)
569 return -ENOMEM;
570
571 atomic_set(&new_on->refcnt, 0);
572 atomic_set(&new_on->event, 1);
573 init_waitqueue_head(&new_on->poll);
574 INIT_LIST_HEAD(&new_on->files);
575 goto retry;
576}
577
578
579
580
581
582
583
584
585
586
587
588
589static void kernfs_put_open_node(struct kernfs_node *kn,
590 struct kernfs_open_file *of)
591{
592 struct kernfs_open_node *on = kn->attr.open;
593 unsigned long flags;
594
595 mutex_lock(&kernfs_open_file_mutex);
596 spin_lock_irqsave(&kernfs_open_node_lock, flags);
597
598 if (of)
599 list_del(&of->list);
600
601 if (atomic_dec_and_test(&on->refcnt))
602 kn->attr.open = NULL;
603 else
604 on = NULL;
605
606 spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
607 mutex_unlock(&kernfs_open_file_mutex);
608
609 kfree(on);
610}
611
612static int kernfs_fop_open(struct inode *inode, struct file *file)
613{
614 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
615 struct kernfs_root *root = kernfs_root(kn);
616 const struct kernfs_ops *ops;
617 struct kernfs_open_file *of;
618 bool has_read, has_write, has_mmap;
619 int error = -EACCES;
620
621 if (!kernfs_get_active(kn))
622 return -ENODEV;
623
624 ops = kernfs_ops(kn);
625
626 has_read = ops->seq_show || ops->read || ops->mmap;
627 has_write = ops->write || ops->mmap;
628 has_mmap = ops->mmap;
629
630
631 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
632 if ((file->f_mode & FMODE_WRITE) &&
633 (!(inode->i_mode & S_IWUGO) || !has_write))
634 goto err_out;
635
636 if ((file->f_mode & FMODE_READ) &&
637 (!(inode->i_mode & S_IRUGO) || !has_read))
638 goto err_out;
639 }
640
641
642 error = -ENOMEM;
643 of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
644 if (!of)
645 goto err_out;
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662 if (has_mmap)
663 mutex_init(&of->mutex);
664 else
665 mutex_init(&of->mutex);
666
667 of->kn = kn;
668 of->file = file;
669
670
671
672
673
674 of->atomic_write_len = ops->atomic_write_len;
675
676 error = -EINVAL;
677
678
679
680
681
682 if (ops->prealloc && ops->seq_show)
683 goto err_free;
684 if (ops->prealloc) {
685 int len = of->atomic_write_len ?: PAGE_SIZE;
686 of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
687 error = -ENOMEM;
688 if (!of->prealloc_buf)
689 goto err_free;
690 }
691
692
693
694
695
696
697 if (ops->seq_show)
698 error = seq_open(file, &kernfs_seq_ops);
699 else
700 error = seq_open(file, NULL);
701 if (error)
702 goto err_free;
703
704 ((struct seq_file *)file->private_data)->private = of;
705
706
707 if (file->f_mode & FMODE_WRITE)
708 file->f_mode |= FMODE_PWRITE;
709
710
711 error = kernfs_get_open_node(kn, of);
712 if (error)
713 goto err_close;
714
715
716 kernfs_put_active(kn);
717 return 0;
718
719err_close:
720 seq_release(inode, file);
721err_free:
722 kfree(of->prealloc_buf);
723 kfree(of);
724err_out:
725 kernfs_put_active(kn);
726 return error;
727}
728
729static int kernfs_fop_release(struct inode *inode, struct file *filp)
730{
731 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
732 struct kernfs_open_file *of = kernfs_of(filp);
733
734 kernfs_put_open_node(kn, of);
735 seq_release(inode, filp);
736 kfree(of->prealloc_buf);
737 kfree(of);
738
739 return 0;
740}
741
742void kernfs_unmap_bin_file(struct kernfs_node *kn)
743{
744 struct kernfs_open_node *on;
745 struct kernfs_open_file *of;
746
747 if (!(kn->flags & KERNFS_HAS_MMAP))
748 return;
749
750 spin_lock_irq(&kernfs_open_node_lock);
751 on = kn->attr.open;
752 if (on)
753 atomic_inc(&on->refcnt);
754 spin_unlock_irq(&kernfs_open_node_lock);
755 if (!on)
756 return;
757
758 mutex_lock(&kernfs_open_file_mutex);
759 list_for_each_entry(of, &on->files, list) {
760 struct inode *inode = file_inode(of->file);
761 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
762 }
763 mutex_unlock(&kernfs_open_file_mutex);
764
765 kernfs_put_open_node(kn, NULL);
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
783{
784 struct kernfs_open_file *of = kernfs_of(filp);
785 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
786 struct kernfs_open_node *on = kn->attr.open;
787
788 if (!kernfs_get_active(kn))
789 goto trigger;
790
791 poll_wait(filp, &on->poll, wait);
792
793 kernfs_put_active(kn);
794
795 if (of->event != atomic_read(&on->event))
796 goto trigger;
797
798 return DEFAULT_POLLMASK;
799
800 trigger:
801 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
802}
803
804static void kernfs_notify_workfn(struct work_struct *work)
805{
806 struct kernfs_node *kn;
807 struct kernfs_open_node *on;
808 struct kernfs_super_info *info;
809repeat:
810
811 spin_lock_irq(&kernfs_notify_lock);
812 kn = kernfs_notify_list;
813 if (kn == KERNFS_NOTIFY_EOL) {
814 spin_unlock_irq(&kernfs_notify_lock);
815 return;
816 }
817 kernfs_notify_list = kn->attr.notify_next;
818 kn->attr.notify_next = NULL;
819 spin_unlock_irq(&kernfs_notify_lock);
820
821
822 spin_lock_irq(&kernfs_open_node_lock);
823
824 on = kn->attr.open;
825 if (on) {
826 atomic_inc(&on->event);
827 wake_up_interruptible(&on->poll);
828 }
829
830 spin_unlock_irq(&kernfs_open_node_lock);
831
832
833 mutex_lock(&kernfs_mutex);
834
835 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
836 struct inode *inode;
837 struct dentry *dentry;
838
839 inode = ilookup(info->sb, kn->ino);
840 if (!inode)
841 continue;
842
843 dentry = d_find_any_alias(inode);
844 if (dentry) {
845 fsnotify_parent(NULL, dentry, FS_MODIFY);
846 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
847 NULL, 0);
848 dput(dentry);
849 }
850
851 iput(inode);
852 }
853
854 mutex_unlock(&kernfs_mutex);
855 kernfs_put(kn);
856 goto repeat;
857}
858
859
860
861
862
863
864
865
866void kernfs_notify(struct kernfs_node *kn)
867{
868 static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
869 unsigned long flags;
870
871 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
872 return;
873
874 spin_lock_irqsave(&kernfs_notify_lock, flags);
875 if (!kn->attr.notify_next) {
876 kernfs_get(kn);
877 kn->attr.notify_next = kernfs_notify_list;
878 kernfs_notify_list = kn;
879 schedule_work(&kernfs_notify_work);
880 }
881 spin_unlock_irqrestore(&kernfs_notify_lock, flags);
882}
883EXPORT_SYMBOL_GPL(kernfs_notify);
884
885const struct file_operations kernfs_file_fops = {
886 .read = kernfs_fop_read,
887 .write = kernfs_fop_write,
888 .llseek = generic_file_llseek,
889 .mmap = kernfs_fop_mmap,
890 .open = kernfs_fop_open,
891 .release = kernfs_fop_release,
892 .poll = kernfs_fop_poll,
893};
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
909 const char *name,
910 umode_t mode, loff_t size,
911 const struct kernfs_ops *ops,
912 void *priv, const void *ns,
913 struct lock_class_key *key)
914{
915 struct kernfs_node *kn;
916 unsigned flags;
917 int rc;
918
919 flags = KERNFS_FILE;
920
921 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
922 if (!kn)
923 return ERR_PTR(-ENOMEM);
924
925 kn->attr.ops = ops;
926 kn->attr.size = size;
927 kn->ns = ns;
928 kn->priv = priv;
929
930#ifdef CONFIG_DEBUG_LOCK_ALLOC
931 if (key) {
932 lockdep_init_map(&kn->dep_map, "s_active", key, 0);
933 kn->flags |= KERNFS_LOCKDEP;
934 }
935#endif
936
937
938
939
940
941
942 if (ops->seq_show)
943 kn->flags |= KERNFS_HAS_SEQ_SHOW;
944 if (ops->mmap)
945 kn->flags |= KERNFS_HAS_MMAP;
946
947 rc = kernfs_add_one(kn);
948 if (rc) {
949 kernfs_put(kn);
950 return ERR_PTR(rc);
951 }
952 return kn;
953}
954