1
2
3
4
5
6
7
8
9
10
11#include <linux/fs.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/poll.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/fsnotify.h>
18
19#include "kernfs-internal.h"
20
21
22
23
24
25
26
27
28
29
30
31
32static DEFINE_SPINLOCK(kernfs_open_node_lock);
33static DEFINE_MUTEX(kernfs_open_file_mutex);
34
35struct kernfs_open_node {
36 atomic_t refcnt;
37 atomic_t event;
38 wait_queue_head_t poll;
39 struct list_head files;
40};
41
42
43
44
45
46
47
48
49
50#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
51
52static DEFINE_SPINLOCK(kernfs_notify_lock);
53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
54
55static struct kernfs_open_file *kernfs_of(struct file *file)
56{
57 return ((struct seq_file *)file->private_data)->private;
58}
59
60
61
62
63
64static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
65{
66 if (kn->flags & KERNFS_LOCKDEP)
67 lockdep_assert_held(kn);
68 return kn->attr.ops;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
94{
95 struct kernfs_open_file *of = sf->private;
96 const struct kernfs_ops *ops = kernfs_ops(of->kn);
97
98 if (ops->seq_stop)
99 ops->seq_stop(sf, v);
100 kernfs_put_active(of->kn);
101}
102
103static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
104{
105 struct kernfs_open_file *of = sf->private;
106 const struct kernfs_ops *ops;
107
108
109
110
111
112 mutex_lock(&of->mutex);
113 if (!kernfs_get_active(of->kn))
114 return ERR_PTR(-ENODEV);
115
116 ops = kernfs_ops(of->kn);
117 if (ops->seq_start) {
118 void *next = ops->seq_start(sf, ppos);
119
120 if (next == ERR_PTR(-ENODEV))
121 kernfs_seq_stop_active(sf, next);
122 return next;
123 } else {
124
125
126
127
128 return NULL + !*ppos;
129 }
130}
131
132static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
133{
134 struct kernfs_open_file *of = sf->private;
135 const struct kernfs_ops *ops = kernfs_ops(of->kn);
136
137 if (ops->seq_next) {
138 void *next = ops->seq_next(sf, v, ppos);
139
140 if (next == ERR_PTR(-ENODEV))
141 kernfs_seq_stop_active(sf, next);
142 return next;
143 } else {
144
145
146
147
148 ++*ppos;
149 return NULL;
150 }
151}
152
153static void kernfs_seq_stop(struct seq_file *sf, void *v)
154{
155 struct kernfs_open_file *of = sf->private;
156
157 if (v != ERR_PTR(-ENODEV))
158 kernfs_seq_stop_active(sf, v);
159 mutex_unlock(&of->mutex);
160}
161
162static int kernfs_seq_show(struct seq_file *sf, void *v)
163{
164 struct kernfs_open_file *of = sf->private;
165
166 of->event = atomic_read(&of->kn->attr.open->event);
167
168 return of->kn->attr.ops->seq_show(sf, v);
169}
170
171static const struct seq_operations kernfs_seq_ops = {
172 .start = kernfs_seq_start,
173 .next = kernfs_seq_next,
174 .stop = kernfs_seq_stop,
175 .show = kernfs_seq_show,
176};
177
178
179
180
181
182
183
184static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
185 char __user *user_buf, size_t count,
186 loff_t *ppos)
187{
188 ssize_t len = min_t(size_t, count, PAGE_SIZE);
189 const struct kernfs_ops *ops;
190 char *buf;
191
192 buf = of->prealloc_buf;
193 if (buf)
194 mutex_lock(&of->prealloc_mutex);
195 else
196 buf = kmalloc(len, GFP_KERNEL);
197 if (!buf)
198 return -ENOMEM;
199
200
201
202
203
204 mutex_lock(&of->mutex);
205 if (!kernfs_get_active(of->kn)) {
206 len = -ENODEV;
207 mutex_unlock(&of->mutex);
208 goto out_free;
209 }
210
211 of->event = atomic_read(&of->kn->attr.open->event);
212 ops = kernfs_ops(of->kn);
213 if (ops->read)
214 len = ops->read(of, buf, len, *ppos);
215 else
216 len = -EINVAL;
217
218 kernfs_put_active(of->kn);
219 mutex_unlock(&of->mutex);
220
221 if (len < 0)
222 goto out_free;
223
224 if (copy_to_user(user_buf, buf, len)) {
225 len = -EFAULT;
226 goto out_free;
227 }
228
229 *ppos += len;
230
231 out_free:
232 if (buf == of->prealloc_buf)
233 mutex_unlock(&of->prealloc_mutex);
234 else
235 kfree(buf);
236 return len;
237}
238
239
240
241
242
243
244
245
246static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
247 size_t count, loff_t *ppos)
248{
249 struct kernfs_open_file *of = kernfs_of(file);
250
251 if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
252 return seq_read(file, user_buf, count, ppos);
253 else
254 return kernfs_file_direct_read(of, user_buf, count, ppos);
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
274 size_t count, loff_t *ppos)
275{
276 struct kernfs_open_file *of = kernfs_of(file);
277 const struct kernfs_ops *ops;
278 size_t len;
279 char *buf;
280
281 if (of->atomic_write_len) {
282 len = count;
283 if (len > of->atomic_write_len)
284 return -E2BIG;
285 } else {
286 len = min_t(size_t, count, PAGE_SIZE);
287 }
288
289 buf = of->prealloc_buf;
290 if (buf)
291 mutex_lock(&of->prealloc_mutex);
292 else
293 buf = kmalloc(len + 1, GFP_KERNEL);
294 if (!buf)
295 return -ENOMEM;
296
297 if (copy_from_user(buf, user_buf, len)) {
298 len = -EFAULT;
299 goto out_free;
300 }
301 buf[len] = '\0';
302
303
304
305
306
307 mutex_lock(&of->mutex);
308 if (!kernfs_get_active(of->kn)) {
309 mutex_unlock(&of->mutex);
310 len = -ENODEV;
311 goto out_free;
312 }
313
314 ops = kernfs_ops(of->kn);
315 if (ops->write)
316 len = ops->write(of, buf, len, *ppos);
317 else
318 len = -EINVAL;
319
320 kernfs_put_active(of->kn);
321 mutex_unlock(&of->mutex);
322
323 if (len > 0)
324 *ppos += len;
325
326out_free:
327 if (buf == of->prealloc_buf)
328 mutex_unlock(&of->prealloc_mutex);
329 else
330 kfree(buf);
331 return len;
332}
333
334static void kernfs_vma_open(struct vm_area_struct *vma)
335{
336 struct file *file = vma->vm_file;
337 struct kernfs_open_file *of = kernfs_of(file);
338
339 if (!of->vm_ops)
340 return;
341
342 if (!kernfs_get_active(of->kn))
343 return;
344
345 if (of->vm_ops->open)
346 of->vm_ops->open(vma);
347
348 kernfs_put_active(of->kn);
349}
350
351static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
352{
353 struct file *file = vma->vm_file;
354 struct kernfs_open_file *of = kernfs_of(file);
355 int ret;
356
357 if (!of->vm_ops)
358 return VM_FAULT_SIGBUS;
359
360 if (!kernfs_get_active(of->kn))
361 return VM_FAULT_SIGBUS;
362
363 ret = VM_FAULT_SIGBUS;
364 if (of->vm_ops->fault)
365 ret = of->vm_ops->fault(vma, vmf);
366
367 kernfs_put_active(of->kn);
368 return ret;
369}
370
371static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
372 struct vm_fault *vmf)
373{
374 struct file *file = vma->vm_file;
375 struct kernfs_open_file *of = kernfs_of(file);
376 int ret;
377
378 if (!of->vm_ops)
379 return VM_FAULT_SIGBUS;
380
381 if (!kernfs_get_active(of->kn))
382 return VM_FAULT_SIGBUS;
383
384 ret = 0;
385 if (of->vm_ops->page_mkwrite)
386 ret = of->vm_ops->page_mkwrite(vma, vmf);
387 else
388 file_update_time(file);
389
390 kernfs_put_active(of->kn);
391 return ret;
392}
393
394static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
395 void *buf, int len, int write)
396{
397 struct file *file = vma->vm_file;
398 struct kernfs_open_file *of = kernfs_of(file);
399 int ret;
400
401 if (!of->vm_ops)
402 return -EINVAL;
403
404 if (!kernfs_get_active(of->kn))
405 return -EINVAL;
406
407 ret = -EINVAL;
408 if (of->vm_ops->access)
409 ret = of->vm_ops->access(vma, addr, buf, len, write);
410
411 kernfs_put_active(of->kn);
412 return ret;
413}
414
415#ifdef CONFIG_NUMA
416static int kernfs_vma_set_policy(struct vm_area_struct *vma,
417 struct mempolicy *new)
418{
419 struct file *file = vma->vm_file;
420 struct kernfs_open_file *of = kernfs_of(file);
421 int ret;
422
423 if (!of->vm_ops)
424 return 0;
425
426 if (!kernfs_get_active(of->kn))
427 return -EINVAL;
428
429 ret = 0;
430 if (of->vm_ops->set_policy)
431 ret = of->vm_ops->set_policy(vma, new);
432
433 kernfs_put_active(of->kn);
434 return ret;
435}
436
437static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
438 unsigned long addr)
439{
440 struct file *file = vma->vm_file;
441 struct kernfs_open_file *of = kernfs_of(file);
442 struct mempolicy *pol;
443
444 if (!of->vm_ops)
445 return vma->vm_policy;
446
447 if (!kernfs_get_active(of->kn))
448 return vma->vm_policy;
449
450 pol = vma->vm_policy;
451 if (of->vm_ops->get_policy)
452 pol = of->vm_ops->get_policy(vma, addr);
453
454 kernfs_put_active(of->kn);
455 return pol;
456}
457
458#endif
459
460static const struct vm_operations_struct kernfs_vm_ops = {
461 .open = kernfs_vma_open,
462 .fault = kernfs_vma_fault,
463 .page_mkwrite = kernfs_vma_page_mkwrite,
464 .access = kernfs_vma_access,
465#ifdef CONFIG_NUMA
466 .set_policy = kernfs_vma_set_policy,
467 .get_policy = kernfs_vma_get_policy,
468#endif
469};
470
471static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
472{
473 struct kernfs_open_file *of = kernfs_of(file);
474 const struct kernfs_ops *ops;
475 int rc;
476
477
478
479
480
481
482
483
484 if (!(of->kn->flags & KERNFS_HAS_MMAP))
485 return -ENODEV;
486
487 mutex_lock(&of->mutex);
488
489 rc = -ENODEV;
490 if (!kernfs_get_active(of->kn))
491 goto out_unlock;
492
493 ops = kernfs_ops(of->kn);
494 rc = ops->mmap(of, vma);
495 if (rc)
496 goto out_put;
497
498
499
500
501
502
503 if (vma->vm_file != file)
504 goto out_put;
505
506 rc = -EINVAL;
507 if (of->mmapped && of->vm_ops != vma->vm_ops)
508 goto out_put;
509
510
511
512
513
514 rc = -EINVAL;
515 if (vma->vm_ops && vma->vm_ops->close)
516 goto out_put;
517
518 rc = 0;
519 of->mmapped = 1;
520 of->vm_ops = vma->vm_ops;
521 vma->vm_ops = &kernfs_vm_ops;
522out_put:
523 kernfs_put_active(of->kn);
524out_unlock:
525 mutex_unlock(&of->mutex);
526
527 return rc;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544static int kernfs_get_open_node(struct kernfs_node *kn,
545 struct kernfs_open_file *of)
546{
547 struct kernfs_open_node *on, *new_on = NULL;
548
549 retry:
550 mutex_lock(&kernfs_open_file_mutex);
551 spin_lock_irq(&kernfs_open_node_lock);
552
553 if (!kn->attr.open && new_on) {
554 kn->attr.open = new_on;
555 new_on = NULL;
556 }
557
558 on = kn->attr.open;
559 if (on) {
560 atomic_inc(&on->refcnt);
561 list_add_tail(&of->list, &on->files);
562 }
563
564 spin_unlock_irq(&kernfs_open_node_lock);
565 mutex_unlock(&kernfs_open_file_mutex);
566
567 if (on) {
568 kfree(new_on);
569 return 0;
570 }
571
572
573 new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
574 if (!new_on)
575 return -ENOMEM;
576
577 atomic_set(&new_on->refcnt, 0);
578 atomic_set(&new_on->event, 1);
579 init_waitqueue_head(&new_on->poll);
580 INIT_LIST_HEAD(&new_on->files);
581 goto retry;
582}
583
584
585
586
587
588
589
590
591
592
593
594
595static void kernfs_put_open_node(struct kernfs_node *kn,
596 struct kernfs_open_file *of)
597{
598 struct kernfs_open_node *on = kn->attr.open;
599 unsigned long flags;
600
601 mutex_lock(&kernfs_open_file_mutex);
602 spin_lock_irqsave(&kernfs_open_node_lock, flags);
603
604 if (of)
605 list_del(&of->list);
606
607 if (atomic_dec_and_test(&on->refcnt))
608 kn->attr.open = NULL;
609 else
610 on = NULL;
611
612 spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
613 mutex_unlock(&kernfs_open_file_mutex);
614
615 kfree(on);
616}
617
618static int kernfs_fop_open(struct inode *inode, struct file *file)
619{
620 struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
621 struct kernfs_root *root = kernfs_root(kn);
622 const struct kernfs_ops *ops;
623 struct kernfs_open_file *of;
624 bool has_read, has_write, has_mmap;
625 int error = -EACCES;
626
627 if (!kernfs_get_active(kn))
628 return -ENODEV;
629
630 ops = kernfs_ops(kn);
631
632 has_read = ops->seq_show || ops->read || ops->mmap;
633 has_write = ops->write || ops->mmap;
634 has_mmap = ops->mmap;
635
636
637 if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
638 if ((file->f_mode & FMODE_WRITE) &&
639 (!(inode->i_mode & S_IWUGO) || !has_write))
640 goto err_out;
641
642 if ((file->f_mode & FMODE_READ) &&
643 (!(inode->i_mode & S_IRUGO) || !has_read))
644 goto err_out;
645 }
646
647
648 error = -ENOMEM;
649 of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
650 if (!of)
651 goto err_out;
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668 if (has_mmap)
669 mutex_init(&of->mutex);
670 else
671 mutex_init(&of->mutex);
672
673 of->kn = kn;
674 of->file = file;
675
676
677
678
679
680 of->atomic_write_len = ops->atomic_write_len;
681
682 error = -EINVAL;
683
684
685
686
687
688 if (ops->prealloc && ops->seq_show)
689 goto err_free;
690 if (ops->prealloc) {
691 int len = of->atomic_write_len ?: PAGE_SIZE;
692 of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
693 error = -ENOMEM;
694 if (!of->prealloc_buf)
695 goto err_free;
696 mutex_init(&of->prealloc_mutex);
697 }
698
699
700
701
702
703
704 if (ops->seq_show)
705 error = seq_open(file, &kernfs_seq_ops);
706 else
707 error = seq_open(file, NULL);
708 if (error)
709 goto err_free;
710
711 ((struct seq_file *)file->private_data)->private = of;
712
713
714 if (file->f_mode & FMODE_WRITE)
715 file->f_mode |= FMODE_PWRITE;
716
717
718 error = kernfs_get_open_node(kn, of);
719 if (error)
720 goto err_close;
721
722
723 kernfs_put_active(kn);
724 return 0;
725
726err_close:
727 seq_release(inode, file);
728err_free:
729 kfree(of->prealloc_buf);
730 kfree(of);
731err_out:
732 kernfs_put_active(kn);
733 return error;
734}
735
736static int kernfs_fop_release(struct inode *inode, struct file *filp)
737{
738 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
739 struct kernfs_open_file *of = kernfs_of(filp);
740
741 kernfs_put_open_node(kn, of);
742 seq_release(inode, filp);
743 kfree(of->prealloc_buf);
744 kfree(of);
745
746 return 0;
747}
748
749void kernfs_unmap_bin_file(struct kernfs_node *kn)
750{
751 struct kernfs_open_node *on;
752 struct kernfs_open_file *of;
753
754 if (!(kn->flags & KERNFS_HAS_MMAP))
755 return;
756
757 spin_lock_irq(&kernfs_open_node_lock);
758 on = kn->attr.open;
759 if (on)
760 atomic_inc(&on->refcnt);
761 spin_unlock_irq(&kernfs_open_node_lock);
762 if (!on)
763 return;
764
765 mutex_lock(&kernfs_open_file_mutex);
766 list_for_each_entry(of, &on->files, list) {
767 struct inode *inode = file_inode(of->file);
768 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
769 }
770 mutex_unlock(&kernfs_open_file_mutex);
771
772 kernfs_put_open_node(kn, NULL);
773}
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
790{
791 struct kernfs_open_file *of = kernfs_of(filp);
792 struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
793 struct kernfs_open_node *on = kn->attr.open;
794
795 if (!kernfs_get_active(kn))
796 goto trigger;
797
798 poll_wait(filp, &on->poll, wait);
799
800 kernfs_put_active(kn);
801
802 if (of->event != atomic_read(&on->event))
803 goto trigger;
804
805 return DEFAULT_POLLMASK;
806
807 trigger:
808 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
809}
810
811static void kernfs_notify_workfn(struct work_struct *work)
812{
813 struct kernfs_node *kn;
814 struct kernfs_open_node *on;
815 struct kernfs_super_info *info;
816repeat:
817
818 spin_lock_irq(&kernfs_notify_lock);
819 kn = kernfs_notify_list;
820 if (kn == KERNFS_NOTIFY_EOL) {
821 spin_unlock_irq(&kernfs_notify_lock);
822 return;
823 }
824 kernfs_notify_list = kn->attr.notify_next;
825 kn->attr.notify_next = NULL;
826 spin_unlock_irq(&kernfs_notify_lock);
827
828
829 spin_lock_irq(&kernfs_open_node_lock);
830
831 on = kn->attr.open;
832 if (on) {
833 atomic_inc(&on->event);
834 wake_up_interruptible(&on->poll);
835 }
836
837 spin_unlock_irq(&kernfs_open_node_lock);
838
839
840 mutex_lock(&kernfs_mutex);
841
842 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
843 struct kernfs_node *parent;
844 struct inode *inode;
845
846
847
848
849
850
851
852 inode = ilookup(info->sb, kn->ino);
853 if (!inode)
854 continue;
855
856 parent = kernfs_get_parent(kn);
857 if (parent) {
858 struct inode *p_inode;
859
860 p_inode = ilookup(info->sb, parent->ino);
861 if (p_inode) {
862 fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
863 inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
864 iput(p_inode);
865 }
866
867 kernfs_put(parent);
868 }
869
870 fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
871 kn->name, 0);
872 iput(inode);
873 }
874
875 mutex_unlock(&kernfs_mutex);
876 kernfs_put(kn);
877 goto repeat;
878}
879
880
881
882
883
884
885
886
887void kernfs_notify(struct kernfs_node *kn)
888{
889 static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
890 unsigned long flags;
891
892 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
893 return;
894
895 spin_lock_irqsave(&kernfs_notify_lock, flags);
896 if (!kn->attr.notify_next) {
897 kernfs_get(kn);
898 kn->attr.notify_next = kernfs_notify_list;
899 kernfs_notify_list = kn;
900 schedule_work(&kernfs_notify_work);
901 }
902 spin_unlock_irqrestore(&kernfs_notify_lock, flags);
903}
904EXPORT_SYMBOL_GPL(kernfs_notify);
905
906const struct file_operations kernfs_file_fops = {
907 .read = kernfs_fop_read,
908 .write = kernfs_fop_write,
909 .llseek = generic_file_llseek,
910 .mmap = kernfs_fop_mmap,
911 .open = kernfs_fop_open,
912 .release = kernfs_fop_release,
913 .poll = kernfs_fop_poll,
914 .fsync = noop_fsync,
915};
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
931 const char *name,
932 umode_t mode, loff_t size,
933 const struct kernfs_ops *ops,
934 void *priv, const void *ns,
935 struct lock_class_key *key)
936{
937 struct kernfs_node *kn;
938 unsigned flags;
939 int rc;
940
941 flags = KERNFS_FILE;
942
943 kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
944 if (!kn)
945 return ERR_PTR(-ENOMEM);
946
947 kn->attr.ops = ops;
948 kn->attr.size = size;
949 kn->ns = ns;
950 kn->priv = priv;
951
952#ifdef CONFIG_DEBUG_LOCK_ALLOC
953 if (key) {
954 lockdep_init_map(&kn->dep_map, "s_active", key, 0);
955 kn->flags |= KERNFS_LOCKDEP;
956 }
957#endif
958
959
960
961
962
963
964 if (ops->seq_show)
965 kn->flags |= KERNFS_HAS_SEQ_SHOW;
966 if (ops->mmap)
967 kn->flags |= KERNFS_HAS_MMAP;
968
969 rc = kernfs_add_one(kn);
970 if (rc) {
971 kernfs_put(kn);
972 return ERR_PTR(rc);
973 }
974 return kn;
975}
976