1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/export.h>
25#include <linux/slab.h>
26#include <linux/blkdev.h>
27#include <linux/mount.h>
28#include <linux/security.h>
29#include <linux/writeback.h>
30#include <linux/idr.h>
31#include <linux/mutex.h>
32#include <linux/backing-dev.h>
33#include <linux/rculist_bl.h>
34#include <linux/cleancache.h>
35#include <linux/fsnotify.h>
36#include <linux/lockdep.h>
37#include <linux/user_namespace.h>
38#include <linux/fs_context.h>
39#include <uapi/linux/mount.h>
40#include "internal.h"
41
42static int thaw_super_locked(struct super_block *sb);
43
44static LIST_HEAD(super_blocks);
45static DEFINE_SPINLOCK(sb_lock);
46
47static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48 "sb_writers",
49 "sb_pagefaults",
50 "sb_internal",
51};
52
53
54
55
56
57
58
59
60static unsigned long super_cache_scan(struct shrinker *shrink,
61 struct shrink_control *sc)
62{
63 struct super_block *sb;
64 long fs_objects = 0;
65 long total_objects;
66 long freed = 0;
67 long dentries;
68 long inodes;
69
70 sb = container_of(shrink, struct super_block, s_shrink);
71
72
73
74
75
76 if (!(sc->gfp_mask & __GFP_FS))
77 return SHRINK_STOP;
78
79 if (!trylock_super(sb))
80 return SHRINK_STOP;
81
82 if (sb->s_op->nr_cached_objects)
83 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
84
85 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
86 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
87 total_objects = dentries + inodes + fs_objects + 1;
88 if (!total_objects)
89 total_objects = 1;
90
91
92 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
93 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
94 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
95
96
97
98
99
100
101
102
103 sc->nr_to_scan = dentries + 1;
104 freed = prune_dcache_sb(sb, sc);
105 sc->nr_to_scan = inodes + 1;
106 freed += prune_icache_sb(sb, sc);
107
108 if (fs_objects) {
109 sc->nr_to_scan = fs_objects + 1;
110 freed += sb->s_op->free_cached_objects(sb, sc);
111 }
112
113 up_read(&sb->s_umount);
114 return freed;
115}
116
117static unsigned long super_cache_count(struct shrinker *shrink,
118 struct shrink_control *sc)
119{
120 struct super_block *sb;
121 long total_objects = 0;
122
123 sb = container_of(shrink, struct super_block, s_shrink);
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 if (!(sb->s_flags & SB_BORN))
140 return 0;
141 smp_rmb();
142
143 if (sb->s_op && sb->s_op->nr_cached_objects)
144 total_objects = sb->s_op->nr_cached_objects(sb, sc);
145
146 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
147 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
148
149 if (!total_objects)
150 return SHRINK_EMPTY;
151
152 total_objects = vfs_pressure_ratio(total_objects);
153 return total_objects;
154}
155
156static void destroy_super_work(struct work_struct *work)
157{
158 struct super_block *s = container_of(work, struct super_block,
159 destroy_work);
160 int i;
161
162 for (i = 0; i < SB_FREEZE_LEVELS; i++)
163 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
164 kfree(s);
165}
166
167static void destroy_super_rcu(struct rcu_head *head)
168{
169 struct super_block *s = container_of(head, struct super_block, rcu);
170 INIT_WORK(&s->destroy_work, destroy_super_work);
171 schedule_work(&s->destroy_work);
172}
173
174
175static void destroy_unused_super(struct super_block *s)
176{
177 if (!s)
178 return;
179 up_write(&s->s_umount);
180 list_lru_destroy(&s->s_dentry_lru);
181 list_lru_destroy(&s->s_inode_lru);
182 security_sb_free(s);
183 put_user_ns(s->s_user_ns);
184 kfree(s->s_subtype);
185 free_prealloced_shrinker(&s->s_shrink);
186
187 destroy_super_work(&s->destroy_work);
188}
189
190
191
192
193
194
195
196
197
198
199static struct super_block *alloc_super(struct file_system_type *type, int flags,
200 struct user_namespace *user_ns)
201{
202 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
203 static const struct super_operations default_op;
204 int i;
205
206 if (!s)
207 return NULL;
208
209 INIT_LIST_HEAD(&s->s_mounts);
210 s->s_user_ns = get_user_ns(user_ns);
211 init_rwsem(&s->s_umount);
212 lockdep_set_class(&s->s_umount, &type->s_umount_key);
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
229
230 if (security_sb_alloc(s))
231 goto fail;
232
233 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
234 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
235 sb_writers_name[i],
236 &type->s_writers_key[i]))
237 goto fail;
238 }
239 init_waitqueue_head(&s->s_writers.wait_unfrozen);
240 s->s_bdi = &noop_backing_dev_info;
241 s->s_flags = flags;
242 if (s->s_user_ns != &init_user_ns)
243 s->s_iflags |= SB_I_NODEV;
244 INIT_HLIST_NODE(&s->s_instances);
245 INIT_HLIST_BL_HEAD(&s->s_roots);
246 mutex_init(&s->s_sync_lock);
247 INIT_LIST_HEAD(&s->s_inodes);
248 spin_lock_init(&s->s_inode_list_lock);
249 INIT_LIST_HEAD(&s->s_inodes_wb);
250 spin_lock_init(&s->s_inode_wblist_lock);
251
252 s->s_count = 1;
253 atomic_set(&s->s_active, 1);
254 mutex_init(&s->s_vfs_rename_mutex);
255 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
256 init_rwsem(&s->s_dquot.dqio_sem);
257 s->s_maxbytes = MAX_NON_LFS;
258 s->s_op = &default_op;
259 s->s_time_gran = 1000000000;
260 s->s_time_min = TIME64_MIN;
261 s->s_time_max = TIME64_MAX;
262 s->cleancache_poolid = CLEANCACHE_NO_POOL;
263
264 s->s_shrink.seeks = DEFAULT_SEEKS;
265 s->s_shrink.scan_objects = super_cache_scan;
266 s->s_shrink.count_objects = super_cache_count;
267 s->s_shrink.batch = 1024;
268 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
269 if (prealloc_shrinker(&s->s_shrink))
270 goto fail;
271 if (list_lru_init_memcg(&s->s_dentry_lru, &s->s_shrink))
272 goto fail;
273 if (list_lru_init_memcg(&s->s_inode_lru, &s->s_shrink))
274 goto fail;
275 return s;
276
277fail:
278 destroy_unused_super(s);
279 return NULL;
280}
281
282
283
284
285
286
287static void __put_super(struct super_block *s)
288{
289 if (!--s->s_count) {
290 list_del_init(&s->s_list);
291 WARN_ON(s->s_dentry_lru.node);
292 WARN_ON(s->s_inode_lru.node);
293 WARN_ON(!list_empty(&s->s_mounts));
294 security_sb_free(s);
295 put_user_ns(s->s_user_ns);
296 kfree(s->s_subtype);
297 call_rcu(&s->rcu, destroy_super_rcu);
298 }
299}
300
301
302
303
304
305
306
307
308static void put_super(struct super_block *sb)
309{
310 spin_lock(&sb_lock);
311 __put_super(sb);
312 spin_unlock(&sb_lock);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327void deactivate_locked_super(struct super_block *s)
328{
329 struct file_system_type *fs = s->s_type;
330 if (atomic_dec_and_test(&s->s_active)) {
331 cleancache_invalidate_fs(s);
332 unregister_shrinker(&s->s_shrink);
333 fs->kill_sb(s);
334
335
336
337
338
339
340 list_lru_destroy(&s->s_dentry_lru);
341 list_lru_destroy(&s->s_inode_lru);
342
343 put_filesystem(fs);
344 put_super(s);
345 } else {
346 up_write(&s->s_umount);
347 }
348}
349
350EXPORT_SYMBOL(deactivate_locked_super);
351
352
353
354
355
356
357
358
359
360void deactivate_super(struct super_block *s)
361{
362 if (!atomic_add_unless(&s->s_active, -1, 1)) {
363 down_write(&s->s_umount);
364 deactivate_locked_super(s);
365 }
366}
367
368EXPORT_SYMBOL(deactivate_super);
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383static int grab_super(struct super_block *s) __releases(sb_lock)
384{
385 s->s_count++;
386 spin_unlock(&sb_lock);
387 down_write(&s->s_umount);
388 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
389 put_super(s);
390 return 1;
391 }
392 up_write(&s->s_umount);
393 put_super(s);
394 return 0;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414bool trylock_super(struct super_block *sb)
415{
416 if (down_read_trylock(&sb->s_umount)) {
417 if (!hlist_unhashed(&sb->s_instances) &&
418 sb->s_root && (sb->s_flags & SB_BORN))
419 return true;
420 up_read(&sb->s_umount);
421 }
422
423 return false;
424}
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440void generic_shutdown_super(struct super_block *sb)
441{
442 const struct super_operations *sop = sb->s_op;
443
444 if (sb->s_root) {
445 shrink_dcache_for_umount(sb);
446 sync_filesystem(sb);
447 sb->s_flags &= ~SB_ACTIVE;
448
449 cgroup_writeback_umount();
450
451
452 evict_inodes(sb);
453
454 fsnotify_unmount_inodes(sb);
455
456 if (sb->s_dio_done_wq) {
457 destroy_workqueue(sb->s_dio_done_wq);
458 sb->s_dio_done_wq = NULL;
459 }
460
461 if (sop->put_super)
462 sop->put_super(sb);
463
464 if (!list_empty(&sb->s_inodes)) {
465 printk("VFS: Busy inodes after unmount of %s. "
466 "Self-destruct in 5 seconds. Have a nice day...\n",
467 sb->s_id);
468 }
469 }
470 spin_lock(&sb_lock);
471
472 hlist_del_init(&sb->s_instances);
473 spin_unlock(&sb_lock);
474 up_write(&sb->s_umount);
475 if (sb->s_bdi != &noop_backing_dev_info) {
476 bdi_put(sb->s_bdi);
477 sb->s_bdi = &noop_backing_dev_info;
478 }
479}
480
481EXPORT_SYMBOL(generic_shutdown_super);
482
483bool mount_capable(struct fs_context *fc)
484{
485 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
486 return capable(CAP_SYS_ADMIN);
487 else
488 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
489}
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509struct super_block *sget_fc(struct fs_context *fc,
510 int (*test)(struct super_block *, struct fs_context *),
511 int (*set)(struct super_block *, struct fs_context *))
512{
513 struct super_block *s = NULL;
514 struct super_block *old;
515 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
516 int err;
517
518retry:
519 spin_lock(&sb_lock);
520 if (test) {
521 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
522 if (test(old, fc))
523 goto share_extant_sb;
524 }
525 }
526 if (!s) {
527 spin_unlock(&sb_lock);
528 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
529 if (!s)
530 return ERR_PTR(-ENOMEM);
531 goto retry;
532 }
533
534 s->s_fs_info = fc->s_fs_info;
535 err = set(s, fc);
536 if (err) {
537 s->s_fs_info = NULL;
538 spin_unlock(&sb_lock);
539 destroy_unused_super(s);
540 return ERR_PTR(err);
541 }
542 fc->s_fs_info = NULL;
543 s->s_type = fc->fs_type;
544 s->s_iflags |= fc->s_iflags;
545 strlcpy(s->s_id, s->s_type->name, sizeof(s->s_id));
546 list_add_tail(&s->s_list, &super_blocks);
547 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
548 spin_unlock(&sb_lock);
549 get_filesystem(s->s_type);
550 register_shrinker_prepared(&s->s_shrink);
551 return s;
552
553share_extant_sb:
554 if (user_ns != old->s_user_ns) {
555 spin_unlock(&sb_lock);
556 destroy_unused_super(s);
557 return ERR_PTR(-EBUSY);
558 }
559 if (!grab_super(old))
560 goto retry;
561 destroy_unused_super(s);
562 return old;
563}
564EXPORT_SYMBOL(sget_fc);
565
566
567
568
569
570
571
572
573
574struct super_block *sget(struct file_system_type *type,
575 int (*test)(struct super_block *,void *),
576 int (*set)(struct super_block *,void *),
577 int flags,
578 void *data)
579{
580 struct user_namespace *user_ns = current_user_ns();
581 struct super_block *s = NULL;
582 struct super_block *old;
583 int err;
584
585
586
587
588
589 if (flags & SB_SUBMOUNT)
590 user_ns = &init_user_ns;
591
592retry:
593 spin_lock(&sb_lock);
594 if (test) {
595 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
596 if (!test(old, data))
597 continue;
598 if (user_ns != old->s_user_ns) {
599 spin_unlock(&sb_lock);
600 destroy_unused_super(s);
601 return ERR_PTR(-EBUSY);
602 }
603 if (!grab_super(old))
604 goto retry;
605 destroy_unused_super(s);
606 return old;
607 }
608 }
609 if (!s) {
610 spin_unlock(&sb_lock);
611 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
612 if (!s)
613 return ERR_PTR(-ENOMEM);
614 goto retry;
615 }
616
617 err = set(s, data);
618 if (err) {
619 spin_unlock(&sb_lock);
620 destroy_unused_super(s);
621 return ERR_PTR(err);
622 }
623 s->s_type = type;
624 strlcpy(s->s_id, type->name, sizeof(s->s_id));
625 list_add_tail(&s->s_list, &super_blocks);
626 hlist_add_head(&s->s_instances, &type->fs_supers);
627 spin_unlock(&sb_lock);
628 get_filesystem(type);
629 register_shrinker_prepared(&s->s_shrink);
630 return s;
631}
632EXPORT_SYMBOL(sget);
633
634void drop_super(struct super_block *sb)
635{
636 up_read(&sb->s_umount);
637 put_super(sb);
638}
639
640EXPORT_SYMBOL(drop_super);
641
642void drop_super_exclusive(struct super_block *sb)
643{
644 up_write(&sb->s_umount);
645 put_super(sb);
646}
647EXPORT_SYMBOL(drop_super_exclusive);
648
649static void __iterate_supers(void (*f)(struct super_block *))
650{
651 struct super_block *sb, *p = NULL;
652
653 spin_lock(&sb_lock);
654 list_for_each_entry(sb, &super_blocks, s_list) {
655 if (hlist_unhashed(&sb->s_instances))
656 continue;
657 sb->s_count++;
658 spin_unlock(&sb_lock);
659
660 f(sb);
661
662 spin_lock(&sb_lock);
663 if (p)
664 __put_super(p);
665 p = sb;
666 }
667 if (p)
668 __put_super(p);
669 spin_unlock(&sb_lock);
670}
671
672
673
674
675
676
677
678
679void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
680{
681 struct super_block *sb, *p = NULL;
682
683 spin_lock(&sb_lock);
684 list_for_each_entry(sb, &super_blocks, s_list) {
685 if (hlist_unhashed(&sb->s_instances))
686 continue;
687 sb->s_count++;
688 spin_unlock(&sb_lock);
689
690 down_read(&sb->s_umount);
691 if (sb->s_root && (sb->s_flags & SB_BORN))
692 f(sb, arg);
693 up_read(&sb->s_umount);
694
695 spin_lock(&sb_lock);
696 if (p)
697 __put_super(p);
698 p = sb;
699 }
700 if (p)
701 __put_super(p);
702 spin_unlock(&sb_lock);
703}
704
705
706
707
708
709
710
711
712
713
714void iterate_supers_type(struct file_system_type *type,
715 void (*f)(struct super_block *, void *), void *arg)
716{
717 struct super_block *sb, *p = NULL;
718
719 spin_lock(&sb_lock);
720 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
721 sb->s_count++;
722 spin_unlock(&sb_lock);
723
724 down_read(&sb->s_umount);
725 if (sb->s_root && (sb->s_flags & SB_BORN))
726 f(sb, arg);
727 up_read(&sb->s_umount);
728
729 spin_lock(&sb_lock);
730 if (p)
731 __put_super(p);
732 p = sb;
733 }
734 if (p)
735 __put_super(p);
736 spin_unlock(&sb_lock);
737}
738
739EXPORT_SYMBOL(iterate_supers_type);
740
741static struct super_block *__get_super(struct block_device *bdev, bool excl)
742{
743 struct super_block *sb;
744
745 if (!bdev)
746 return NULL;
747
748 spin_lock(&sb_lock);
749rescan:
750 list_for_each_entry(sb, &super_blocks, s_list) {
751 if (hlist_unhashed(&sb->s_instances))
752 continue;
753 if (sb->s_bdev == bdev) {
754 sb->s_count++;
755 spin_unlock(&sb_lock);
756 if (!excl)
757 down_read(&sb->s_umount);
758 else
759 down_write(&sb->s_umount);
760
761 if (sb->s_root && (sb->s_flags & SB_BORN))
762 return sb;
763 if (!excl)
764 up_read(&sb->s_umount);
765 else
766 up_write(&sb->s_umount);
767
768 spin_lock(&sb_lock);
769 __put_super(sb);
770 goto rescan;
771 }
772 }
773 spin_unlock(&sb_lock);
774 return NULL;
775}
776
777
778
779
780
781
782
783
784struct super_block *get_super(struct block_device *bdev)
785{
786 return __get_super(bdev, false);
787}
788EXPORT_SYMBOL(get_super);
789
790static struct super_block *__get_super_thawed(struct block_device *bdev,
791 bool excl)
792{
793 while (1) {
794 struct super_block *s = __get_super(bdev, excl);
795 if (!s || s->s_writers.frozen == SB_UNFROZEN)
796 return s;
797 if (!excl)
798 up_read(&s->s_umount);
799 else
800 up_write(&s->s_umount);
801 wait_event(s->s_writers.wait_unfrozen,
802 s->s_writers.frozen == SB_UNFROZEN);
803 put_super(s);
804 }
805}
806
807
808
809
810
811
812
813
814
815
816struct super_block *get_super_thawed(struct block_device *bdev)
817{
818 return __get_super_thawed(bdev, false);
819}
820EXPORT_SYMBOL(get_super_thawed);
821
822
823
824
825
826
827
828
829
830
831struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
832{
833 return __get_super_thawed(bdev, true);
834}
835EXPORT_SYMBOL(get_super_exclusive_thawed);
836
837
838
839
840
841
842
843
844
845struct super_block *get_active_super(struct block_device *bdev)
846{
847 struct super_block *sb;
848
849 if (!bdev)
850 return NULL;
851
852restart:
853 spin_lock(&sb_lock);
854 list_for_each_entry(sb, &super_blocks, s_list) {
855 if (hlist_unhashed(&sb->s_instances))
856 continue;
857 if (sb->s_bdev == bdev) {
858 if (!grab_super(sb))
859 goto restart;
860 up_write(&sb->s_umount);
861 return sb;
862 }
863 }
864 spin_unlock(&sb_lock);
865 return NULL;
866}
867
868struct super_block *user_get_super(dev_t dev)
869{
870 struct super_block *sb;
871
872 spin_lock(&sb_lock);
873rescan:
874 list_for_each_entry(sb, &super_blocks, s_list) {
875 if (hlist_unhashed(&sb->s_instances))
876 continue;
877 if (sb->s_dev == dev) {
878 sb->s_count++;
879 spin_unlock(&sb_lock);
880 down_read(&sb->s_umount);
881
882 if (sb->s_root && (sb->s_flags & SB_BORN))
883 return sb;
884 up_read(&sb->s_umount);
885
886 spin_lock(&sb_lock);
887 __put_super(sb);
888 goto rescan;
889 }
890 }
891 spin_unlock(&sb_lock);
892 return NULL;
893}
894
895
896
897
898
899
900
901int reconfigure_super(struct fs_context *fc)
902{
903 struct super_block *sb = fc->root->d_sb;
904 int retval;
905 bool remount_ro = false;
906 bool force = fc->sb_flags & SB_FORCE;
907
908 if (fc->sb_flags_mask & ~MS_RMT_MASK)
909 return -EINVAL;
910 if (sb->s_writers.frozen != SB_UNFROZEN)
911 return -EBUSY;
912
913 retval = security_sb_remount(sb, fc->security);
914 if (retval)
915 return retval;
916
917 if (fc->sb_flags_mask & SB_RDONLY) {
918#ifdef CONFIG_BLOCK
919 if (!(fc->sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
920 return -EACCES;
921#endif
922
923 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
924 }
925
926 if (remount_ro) {
927 if (!hlist_empty(&sb->s_pins)) {
928 up_write(&sb->s_umount);
929 group_pin_kill(&sb->s_pins);
930 down_write(&sb->s_umount);
931 if (!sb->s_root)
932 return 0;
933 if (sb->s_writers.frozen != SB_UNFROZEN)
934 return -EBUSY;
935 remount_ro = !sb_rdonly(sb);
936 }
937 }
938 shrink_dcache_sb(sb);
939
940
941
942
943 if (remount_ro) {
944 if (force) {
945 sb->s_readonly_remount = 1;
946 smp_wmb();
947 } else {
948 retval = sb_prepare_remount_readonly(sb);
949 if (retval)
950 return retval;
951 }
952 }
953
954 if (fc->ops->reconfigure) {
955 retval = fc->ops->reconfigure(fc);
956 if (retval) {
957 if (!force)
958 goto cancel_readonly;
959
960 WARN(1, "forced remount of a %s fs returned %i\n",
961 sb->s_type->name, retval);
962 }
963 }
964
965 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
966 (fc->sb_flags & fc->sb_flags_mask)));
967
968 smp_wmb();
969 sb->s_readonly_remount = 0;
970
971
972
973
974
975
976
977
978
979 if (remount_ro && sb->s_bdev)
980 invalidate_bdev(sb->s_bdev);
981 return 0;
982
983cancel_readonly:
984 sb->s_readonly_remount = 0;
985 return retval;
986}
987
988static void do_emergency_remount_callback(struct super_block *sb)
989{
990 down_write(&sb->s_umount);
991 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
992 !sb_rdonly(sb)) {
993 struct fs_context *fc;
994
995 fc = fs_context_for_reconfigure(sb->s_root,
996 SB_RDONLY | SB_FORCE, SB_RDONLY);
997 if (!IS_ERR(fc)) {
998 if (parse_monolithic_mount_data(fc, NULL) == 0)
999 (void)reconfigure_super(fc);
1000 put_fs_context(fc);
1001 }
1002 }
1003 up_write(&sb->s_umount);
1004}
1005
1006static void do_emergency_remount(struct work_struct *work)
1007{
1008 __iterate_supers(do_emergency_remount_callback);
1009 kfree(work);
1010 printk("Emergency Remount complete\n");
1011}
1012
1013void emergency_remount(void)
1014{
1015 struct work_struct *work;
1016
1017 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1018 if (work) {
1019 INIT_WORK(work, do_emergency_remount);
1020 schedule_work(work);
1021 }
1022}
1023
1024static void do_thaw_all_callback(struct super_block *sb)
1025{
1026 down_write(&sb->s_umount);
1027 if (sb->s_root && sb->s_flags & SB_BORN) {
1028 emergency_thaw_bdev(sb);
1029 thaw_super_locked(sb);
1030 } else {
1031 up_write(&sb->s_umount);
1032 }
1033}
1034
1035static void do_thaw_all(struct work_struct *work)
1036{
1037 __iterate_supers(do_thaw_all_callback);
1038 kfree(work);
1039 printk(KERN_WARNING "Emergency Thaw complete\n");
1040}
1041
1042
1043
1044
1045
1046
1047void emergency_thaw_all(void)
1048{
1049 struct work_struct *work;
1050
1051 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1052 if (work) {
1053 INIT_WORK(work, do_thaw_all);
1054 schedule_work(work);
1055 }
1056}
1057
1058static DEFINE_IDA(unnamed_dev_ida);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071int get_anon_bdev(dev_t *p)
1072{
1073 int dev;
1074
1075
1076
1077
1078
1079 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1080 GFP_ATOMIC);
1081 if (dev == -ENOSPC)
1082 dev = -EMFILE;
1083 if (dev < 0)
1084 return dev;
1085
1086 *p = MKDEV(0, dev);
1087 return 0;
1088}
1089EXPORT_SYMBOL(get_anon_bdev);
1090
1091void free_anon_bdev(dev_t dev)
1092{
1093 ida_free(&unnamed_dev_ida, MINOR(dev));
1094}
1095EXPORT_SYMBOL(free_anon_bdev);
1096
1097int set_anon_super(struct super_block *s, void *data)
1098{
1099 return get_anon_bdev(&s->s_dev);
1100}
1101EXPORT_SYMBOL(set_anon_super);
1102
1103void kill_anon_super(struct super_block *sb)
1104{
1105 dev_t dev = sb->s_dev;
1106 generic_shutdown_super(sb);
1107 free_anon_bdev(dev);
1108}
1109EXPORT_SYMBOL(kill_anon_super);
1110
1111void kill_litter_super(struct super_block *sb)
1112{
1113 if (sb->s_root)
1114 d_genocide(sb->s_root);
1115 kill_anon_super(sb);
1116}
1117EXPORT_SYMBOL(kill_litter_super);
1118
1119int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1120{
1121 return set_anon_super(sb, NULL);
1122}
1123EXPORT_SYMBOL(set_anon_super_fc);
1124
1125static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1126{
1127 return sb->s_fs_info == fc->s_fs_info;
1128}
1129
1130static int test_single_super(struct super_block *s, struct fs_context *fc)
1131{
1132 return 1;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160int vfs_get_super(struct fs_context *fc,
1161 enum vfs_get_super_keying keying,
1162 int (*fill_super)(struct super_block *sb,
1163 struct fs_context *fc))
1164{
1165 int (*test)(struct super_block *, struct fs_context *);
1166 struct super_block *sb;
1167 int err;
1168
1169 switch (keying) {
1170 case vfs_get_single_super:
1171 case vfs_get_single_reconf_super:
1172 test = test_single_super;
1173 break;
1174 case vfs_get_keyed_super:
1175 test = test_keyed_super;
1176 break;
1177 case vfs_get_independent_super:
1178 test = NULL;
1179 break;
1180 default:
1181 BUG();
1182 }
1183
1184 sb = sget_fc(fc, test, set_anon_super_fc);
1185 if (IS_ERR(sb))
1186 return PTR_ERR(sb);
1187
1188 if (!sb->s_root) {
1189 err = fill_super(sb, fc);
1190 if (err)
1191 goto error;
1192
1193 sb->s_flags |= SB_ACTIVE;
1194 fc->root = dget(sb->s_root);
1195 } else {
1196 fc->root = dget(sb->s_root);
1197 if (keying == vfs_get_single_reconf_super) {
1198 err = reconfigure_super(fc);
1199 if (err < 0) {
1200 dput(fc->root);
1201 fc->root = NULL;
1202 goto error;
1203 }
1204 }
1205 }
1206
1207 return 0;
1208
1209error:
1210 deactivate_locked_super(sb);
1211 return err;
1212}
1213EXPORT_SYMBOL(vfs_get_super);
1214
1215int get_tree_nodev(struct fs_context *fc,
1216 int (*fill_super)(struct super_block *sb,
1217 struct fs_context *fc))
1218{
1219 return vfs_get_super(fc, vfs_get_independent_super, fill_super);
1220}
1221EXPORT_SYMBOL(get_tree_nodev);
1222
1223int get_tree_single(struct fs_context *fc,
1224 int (*fill_super)(struct super_block *sb,
1225 struct fs_context *fc))
1226{
1227 return vfs_get_super(fc, vfs_get_single_super, fill_super);
1228}
1229EXPORT_SYMBOL(get_tree_single);
1230
1231int get_tree_single_reconf(struct fs_context *fc,
1232 int (*fill_super)(struct super_block *sb,
1233 struct fs_context *fc))
1234{
1235 return vfs_get_super(fc, vfs_get_single_reconf_super, fill_super);
1236}
1237EXPORT_SYMBOL(get_tree_single_reconf);
1238
1239int get_tree_keyed(struct fs_context *fc,
1240 int (*fill_super)(struct super_block *sb,
1241 struct fs_context *fc),
1242 void *key)
1243{
1244 fc->s_fs_info = key;
1245 return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
1246}
1247EXPORT_SYMBOL(get_tree_keyed);
1248
1249#ifdef CONFIG_BLOCK
1250
1251static int set_bdev_super(struct super_block *s, void *data)
1252{
1253 s->s_bdev = data;
1254 s->s_dev = s->s_bdev->bd_dev;
1255 s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1256
1257 return 0;
1258}
1259
1260static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1261{
1262 return set_bdev_super(s, fc->sget_key);
1263}
1264
1265static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
1266{
1267 return s->s_bdev == fc->sget_key;
1268}
1269
1270
1271
1272
1273
1274
1275int get_tree_bdev(struct fs_context *fc,
1276 int (*fill_super)(struct super_block *,
1277 struct fs_context *))
1278{
1279 struct block_device *bdev;
1280 struct super_block *s;
1281 fmode_t mode = FMODE_READ | FMODE_EXCL;
1282 int error = 0;
1283
1284 if (!(fc->sb_flags & SB_RDONLY))
1285 mode |= FMODE_WRITE;
1286
1287 if (!fc->source)
1288 return invalf(fc, "No source specified");
1289
1290 bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
1291 if (IS_ERR(bdev)) {
1292 errorf(fc, "%s: Can't open blockdev", fc->source);
1293 return PTR_ERR(bdev);
1294 }
1295
1296
1297
1298
1299
1300 mutex_lock(&bdev->bd_fsfreeze_mutex);
1301 if (bdev->bd_fsfreeze_count > 0) {
1302 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1303 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1304 blkdev_put(bdev, mode);
1305 return -EBUSY;
1306 }
1307
1308 fc->sb_flags |= SB_NOSEC;
1309 fc->sget_key = bdev;
1310 s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
1311 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1312 if (IS_ERR(s)) {
1313 blkdev_put(bdev, mode);
1314 return PTR_ERR(s);
1315 }
1316
1317 if (s->s_root) {
1318
1319 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1320 warnf(fc, "%pg: Can't mount, would change RO state", bdev);
1321 deactivate_locked_super(s);
1322 blkdev_put(bdev, mode);
1323 return -EBUSY;
1324 }
1325
1326
1327
1328
1329
1330
1331
1332
1333 up_write(&s->s_umount);
1334 blkdev_put(bdev, mode);
1335 down_write(&s->s_umount);
1336 } else {
1337 s->s_mode = mode;
1338 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1339 sb_set_blocksize(s, block_size(bdev));
1340 error = fill_super(s, fc);
1341 if (error) {
1342 deactivate_locked_super(s);
1343 return error;
1344 }
1345
1346 s->s_flags |= SB_ACTIVE;
1347 bdev->bd_super = s;
1348 }
1349
1350 BUG_ON(fc->root);
1351 fc->root = dget(s->s_root);
1352 return 0;
1353}
1354EXPORT_SYMBOL(get_tree_bdev);
1355
1356static int test_bdev_super(struct super_block *s, void *data)
1357{
1358 return (void *)s->s_bdev == data;
1359}
1360
1361struct dentry *mount_bdev(struct file_system_type *fs_type,
1362 int flags, const char *dev_name, void *data,
1363 int (*fill_super)(struct super_block *, void *, int))
1364{
1365 struct block_device *bdev;
1366 struct super_block *s;
1367 fmode_t mode = FMODE_READ | FMODE_EXCL;
1368 int error = 0;
1369
1370 if (!(flags & SB_RDONLY))
1371 mode |= FMODE_WRITE;
1372
1373 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1374 if (IS_ERR(bdev))
1375 return ERR_CAST(bdev);
1376
1377
1378
1379
1380
1381
1382 mutex_lock(&bdev->bd_fsfreeze_mutex);
1383 if (bdev->bd_fsfreeze_count > 0) {
1384 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1385 error = -EBUSY;
1386 goto error_bdev;
1387 }
1388 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
1389 bdev);
1390 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1391 if (IS_ERR(s))
1392 goto error_s;
1393
1394 if (s->s_root) {
1395 if ((flags ^ s->s_flags) & SB_RDONLY) {
1396 deactivate_locked_super(s);
1397 error = -EBUSY;
1398 goto error_bdev;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408 up_write(&s->s_umount);
1409 blkdev_put(bdev, mode);
1410 down_write(&s->s_umount);
1411 } else {
1412 s->s_mode = mode;
1413 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1414 sb_set_blocksize(s, block_size(bdev));
1415 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1416 if (error) {
1417 deactivate_locked_super(s);
1418 goto error;
1419 }
1420
1421 s->s_flags |= SB_ACTIVE;
1422 bdev->bd_super = s;
1423 }
1424
1425 return dget(s->s_root);
1426
1427error_s:
1428 error = PTR_ERR(s);
1429error_bdev:
1430 blkdev_put(bdev, mode);
1431error:
1432 return ERR_PTR(error);
1433}
1434EXPORT_SYMBOL(mount_bdev);
1435
1436void kill_block_super(struct super_block *sb)
1437{
1438 struct block_device *bdev = sb->s_bdev;
1439 fmode_t mode = sb->s_mode;
1440
1441 bdev->bd_super = NULL;
1442 generic_shutdown_super(sb);
1443 sync_blockdev(bdev);
1444 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1445 blkdev_put(bdev, mode | FMODE_EXCL);
1446}
1447
1448EXPORT_SYMBOL(kill_block_super);
1449#endif
1450
1451struct dentry *mount_nodev(struct file_system_type *fs_type,
1452 int flags, void *data,
1453 int (*fill_super)(struct super_block *, void *, int))
1454{
1455 int error;
1456 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1457
1458 if (IS_ERR(s))
1459 return ERR_CAST(s);
1460
1461 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1462 if (error) {
1463 deactivate_locked_super(s);
1464 return ERR_PTR(error);
1465 }
1466 s->s_flags |= SB_ACTIVE;
1467 return dget(s->s_root);
1468}
1469EXPORT_SYMBOL(mount_nodev);
1470
1471static int reconfigure_single(struct super_block *s,
1472 int flags, void *data)
1473{
1474 struct fs_context *fc;
1475 int ret;
1476
1477
1478
1479
1480
1481
1482 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1483 if (IS_ERR(fc))
1484 return PTR_ERR(fc);
1485
1486 ret = parse_monolithic_mount_data(fc, data);
1487 if (ret < 0)
1488 goto out;
1489
1490 ret = reconfigure_super(fc);
1491out:
1492 put_fs_context(fc);
1493 return ret;
1494}
1495
1496static int compare_single(struct super_block *s, void *p)
1497{
1498 return 1;
1499}
1500
1501struct dentry *mount_single(struct file_system_type *fs_type,
1502 int flags, void *data,
1503 int (*fill_super)(struct super_block *, void *, int))
1504{
1505 struct super_block *s;
1506 int error;
1507
1508 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1509 if (IS_ERR(s))
1510 return ERR_CAST(s);
1511 if (!s->s_root) {
1512 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1513 if (!error)
1514 s->s_flags |= SB_ACTIVE;
1515 } else {
1516 error = reconfigure_single(s, flags, data);
1517 }
1518 if (unlikely(error)) {
1519 deactivate_locked_super(s);
1520 return ERR_PTR(error);
1521 }
1522 return dget(s->s_root);
1523}
1524EXPORT_SYMBOL(mount_single);
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534int vfs_get_tree(struct fs_context *fc)
1535{
1536 struct super_block *sb;
1537 int error;
1538
1539 if (fc->root)
1540 return -EBUSY;
1541
1542
1543
1544
1545 error = fc->ops->get_tree(fc);
1546 if (error < 0)
1547 return error;
1548
1549 if (!fc->root) {
1550 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1551 fc->fs_type->name);
1552
1553
1554
1555 BUG();
1556 }
1557
1558 sb = fc->root->d_sb;
1559 WARN_ON(!sb->s_bdi);
1560
1561
1562
1563
1564
1565
1566
1567 smp_wmb();
1568 sb->s_flags |= SB_BORN;
1569
1570 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1571 if (unlikely(error)) {
1572 fc_drop_locked(fc);
1573 return error;
1574 }
1575
1576
1577
1578
1579
1580
1581
1582 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1583 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1584
1585 return 0;
1586}
1587EXPORT_SYMBOL(vfs_get_tree);
1588
1589
1590
1591
1592
1593int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1594{
1595 struct backing_dev_info *bdi;
1596 int err;
1597 va_list args;
1598
1599 bdi = bdi_alloc(NUMA_NO_NODE);
1600 if (!bdi)
1601 return -ENOMEM;
1602
1603 va_start(args, fmt);
1604 err = bdi_register_va(bdi, fmt, args);
1605 va_end(args);
1606 if (err) {
1607 bdi_put(bdi);
1608 return err;
1609 }
1610 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1611 sb->s_bdi = bdi;
1612
1613 return 0;
1614}
1615EXPORT_SYMBOL(super_setup_bdi_name);
1616
1617
1618
1619
1620
1621int super_setup_bdi(struct super_block *sb)
1622{
1623 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1624
1625 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1626 atomic_long_inc_return(&bdi_seq));
1627}
1628EXPORT_SYMBOL(super_setup_bdi);
1629
1630
1631
1632
1633
1634void __sb_end_write(struct super_block *sb, int level)
1635{
1636 percpu_up_read(sb->s_writers.rw_sem + level-1);
1637}
1638EXPORT_SYMBOL(__sb_end_write);
1639
1640
1641
1642
1643
1644int __sb_start_write(struct super_block *sb, int level, bool wait)
1645{
1646 bool force_trylock = false;
1647 int ret = 1;
1648
1649#ifdef CONFIG_LOCKDEP
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659 if (wait) {
1660 int i;
1661
1662 for (i = 0; i < level - 1; i++)
1663 if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
1664 force_trylock = true;
1665 break;
1666 }
1667 }
1668#endif
1669 if (wait && !force_trylock)
1670 percpu_down_read(sb->s_writers.rw_sem + level-1);
1671 else
1672 ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
1673
1674 WARN_ON(force_trylock && !ret);
1675 return ret;
1676}
1677EXPORT_SYMBOL(__sb_start_write);
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687static void sb_wait_write(struct super_block *sb, int level)
1688{
1689 percpu_down_write(sb->s_writers.rw_sem + level-1);
1690}
1691
1692
1693
1694
1695
1696static void lockdep_sb_freeze_release(struct super_block *sb)
1697{
1698 int level;
1699
1700 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1701 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1702}
1703
1704
1705
1706
1707static void lockdep_sb_freeze_acquire(struct super_block *sb)
1708{
1709 int level;
1710
1711 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1712 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1713}
1714
1715static void sb_freeze_unlock(struct super_block *sb)
1716{
1717 int level;
1718
1719 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1720 percpu_up_write(sb->s_writers.rw_sem + level);
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756int freeze_super(struct super_block *sb)
1757{
1758 int ret;
1759
1760 atomic_inc(&sb->s_active);
1761 down_write(&sb->s_umount);
1762 if (sb->s_writers.frozen != SB_UNFROZEN) {
1763 deactivate_locked_super(sb);
1764 return -EBUSY;
1765 }
1766
1767 if (!(sb->s_flags & SB_BORN)) {
1768 up_write(&sb->s_umount);
1769 return 0;
1770 }
1771
1772 if (sb_rdonly(sb)) {
1773
1774 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1775 up_write(&sb->s_umount);
1776 return 0;
1777 }
1778
1779 sb->s_writers.frozen = SB_FREEZE_WRITE;
1780
1781 up_write(&sb->s_umount);
1782 sb_wait_write(sb, SB_FREEZE_WRITE);
1783 down_write(&sb->s_umount);
1784
1785
1786 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1787 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1788
1789
1790 sync_filesystem(sb);
1791
1792
1793 sb->s_writers.frozen = SB_FREEZE_FS;
1794 sb_wait_write(sb, SB_FREEZE_FS);
1795
1796 if (sb->s_op->freeze_fs) {
1797 ret = sb->s_op->freeze_fs(sb);
1798 if (ret) {
1799 printk(KERN_ERR
1800 "VFS:Filesystem freeze failed\n");
1801 sb->s_writers.frozen = SB_UNFROZEN;
1802 sb_freeze_unlock(sb);
1803 wake_up(&sb->s_writers.wait_unfrozen);
1804 deactivate_locked_super(sb);
1805 return ret;
1806 }
1807 }
1808
1809
1810
1811
1812 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1813 lockdep_sb_freeze_release(sb);
1814 up_write(&sb->s_umount);
1815 return 0;
1816}
1817EXPORT_SYMBOL(freeze_super);
1818
1819
1820
1821
1822
1823
1824
1825static int thaw_super_locked(struct super_block *sb)
1826{
1827 int error;
1828
1829 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1830 up_write(&sb->s_umount);
1831 return -EINVAL;
1832 }
1833
1834 if (sb_rdonly(sb)) {
1835 sb->s_writers.frozen = SB_UNFROZEN;
1836 goto out;
1837 }
1838
1839 lockdep_sb_freeze_acquire(sb);
1840
1841 if (sb->s_op->unfreeze_fs) {
1842 error = sb->s_op->unfreeze_fs(sb);
1843 if (error) {
1844 printk(KERN_ERR
1845 "VFS:Filesystem thaw failed\n");
1846 lockdep_sb_freeze_release(sb);
1847 up_write(&sb->s_umount);
1848 return error;
1849 }
1850 }
1851
1852 sb->s_writers.frozen = SB_UNFROZEN;
1853 sb_freeze_unlock(sb);
1854out:
1855 wake_up(&sb->s_writers.wait_unfrozen);
1856 deactivate_locked_super(sb);
1857 return 0;
1858}
1859
1860int thaw_super(struct super_block *sb)
1861{
1862 down_write(&sb->s_umount);
1863 return thaw_super_locked(sb);
1864}
1865EXPORT_SYMBOL(thaw_super);
1866