1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/buffer_head.h>
15#include <linux/statfs.h>
16#include <linux/parser.h>
17#include <linux/seq_file.h>
18#include "internal.h"
19#include "xattr.h"
20
21#define CREATE_TRACE_POINTS
22#include <trace/events/erofs.h>
23
24static struct kmem_cache *erofs_inode_cachep __read_mostly;
25
26static void init_once(void *ptr)
27{
28 struct erofs_vnode *vi = ptr;
29
30 inode_init_once(&vi->vfs_inode);
31}
32
33static int __init erofs_init_inode_cache(void)
34{
35 erofs_inode_cachep = kmem_cache_create("erofs_inode",
36 sizeof(struct erofs_vnode), 0,
37 SLAB_RECLAIM_ACCOUNT,
38 init_once);
39
40 return erofs_inode_cachep ? 0 : -ENOMEM;
41}
42
43static void erofs_exit_inode_cache(void)
44{
45 kmem_cache_destroy(erofs_inode_cachep);
46}
47
48static struct inode *alloc_inode(struct super_block *sb)
49{
50 struct erofs_vnode *vi =
51 kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
52
53 if (!vi)
54 return NULL;
55
56
57 memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode));
58 return &vi->vfs_inode;
59}
60
61static void free_inode(struct inode *inode)
62{
63 struct erofs_vnode *vi = EROFS_V(inode);
64
65
66 if (is_inode_fast_symlink(inode))
67 kfree(inode->i_link);
68
69 kfree(vi->xattr_shared_xattrs);
70
71 kmem_cache_free(erofs_inode_cachep, vi);
72}
73
74static bool check_layout_compatibility(struct super_block *sb,
75 struct erofs_super_block *layout)
76{
77 const unsigned int requirements = le32_to_cpu(layout->requirements);
78
79 EROFS_SB(sb)->requirements = requirements;
80
81
82 if (requirements & (~EROFS_ALL_REQUIREMENTS)) {
83 errln("unidentified requirements %x, please upgrade kernel version",
84 requirements & ~EROFS_ALL_REQUIREMENTS);
85 return false;
86 }
87 return true;
88}
89
90static int superblock_read(struct super_block *sb)
91{
92 struct erofs_sb_info *sbi;
93 struct buffer_head *bh;
94 struct erofs_super_block *layout;
95 unsigned int blkszbits;
96 int ret;
97
98 bh = sb_bread(sb, 0);
99
100 if (!bh) {
101 errln("cannot read erofs superblock");
102 return -EIO;
103 }
104
105 sbi = EROFS_SB(sb);
106 layout = (struct erofs_super_block *)((u8 *)bh->b_data
107 + EROFS_SUPER_OFFSET);
108
109 ret = -EINVAL;
110 if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) {
111 errln("cannot find valid erofs superblock");
112 goto out;
113 }
114
115 blkszbits = layout->blkszbits;
116
117 if (unlikely(blkszbits != LOG_BLOCK_SIZE)) {
118 errln("blksize %u isn't supported on this platform",
119 1 << blkszbits);
120 goto out;
121 }
122
123 if (!check_layout_compatibility(sb, layout))
124 goto out;
125
126 sbi->blocks = le32_to_cpu(layout->blocks);
127 sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr);
128#ifdef CONFIG_EROFS_FS_XATTR
129 sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr);
130#endif
131 sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1;
132#ifdef CONFIG_EROFS_FS_ZIP
133
134 sbi->clusterbits = blkszbits;
135
136 if (1 << (sbi->clusterbits - PAGE_SHIFT) > Z_EROFS_CLUSTER_MAX_PAGES)
137 errln("clusterbits %u is not supported on this kernel",
138 sbi->clusterbits);
139#endif
140
141 sbi->root_nid = le16_to_cpu(layout->root_nid);
142 sbi->inos = le64_to_cpu(layout->inos);
143
144 sbi->build_time = le64_to_cpu(layout->build_time);
145 sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec);
146
147 memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid));
148 memcpy(sbi->volume_name, layout->volume_name,
149 sizeof(layout->volume_name));
150
151 ret = 0;
152out:
153 brelse(bh);
154 return ret;
155}
156
157#ifdef CONFIG_EROFS_FAULT_INJECTION
158const char *erofs_fault_name[FAULT_MAX] = {
159 [FAULT_KMALLOC] = "kmalloc",
160 [FAULT_READ_IO] = "read IO error",
161};
162
163static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
164 unsigned int rate)
165{
166 struct erofs_fault_info *ffi = &sbi->fault_info;
167
168 if (rate) {
169 atomic_set(&ffi->inject_ops, 0);
170 ffi->inject_rate = rate;
171 ffi->inject_type = (1 << FAULT_MAX) - 1;
172 } else {
173 memset(ffi, 0, sizeof(struct erofs_fault_info));
174 }
175
176 set_opt(sbi, FAULT_INJECTION);
177}
178
179static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
180 substring_t *args)
181{
182 int rate = 0;
183
184 if (args->from && match_int(args, &rate))
185 return -EINVAL;
186
187 __erofs_build_fault_attr(sbi, rate);
188 return 0;
189}
190
191static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
192{
193 return sbi->fault_info.inject_rate;
194}
195#else
196static void __erofs_build_fault_attr(struct erofs_sb_info *sbi,
197 unsigned int rate)
198{
199}
200
201static int erofs_build_fault_attr(struct erofs_sb_info *sbi,
202 substring_t *args)
203{
204 infoln("fault_injection options not supported");
205 return 0;
206}
207
208static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi)
209{
210 return 0;
211}
212#endif
213
214static void default_options(struct erofs_sb_info *sbi)
215{
216
217#ifdef CONFIG_EROFS_FS_ZIP
218 sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
219#endif
220
221#ifdef CONFIG_EROFS_FS_XATTR
222 set_opt(sbi, XATTR_USER);
223#endif
224
225#ifdef CONFIG_EROFS_FS_POSIX_ACL
226 set_opt(sbi, POSIX_ACL);
227#endif
228}
229
230enum {
231 Opt_user_xattr,
232 Opt_nouser_xattr,
233 Opt_acl,
234 Opt_noacl,
235 Opt_fault_injection,
236 Opt_err
237};
238
239static match_table_t erofs_tokens = {
240 {Opt_user_xattr, "user_xattr"},
241 {Opt_nouser_xattr, "nouser_xattr"},
242 {Opt_acl, "acl"},
243 {Opt_noacl, "noacl"},
244 {Opt_fault_injection, "fault_injection=%u"},
245 {Opt_err, NULL}
246};
247
248static int parse_options(struct super_block *sb, char *options)
249{
250 substring_t args[MAX_OPT_ARGS];
251 char *p;
252 int err;
253
254 if (!options)
255 return 0;
256
257 while ((p = strsep(&options, ","))) {
258 int token;
259
260 if (!*p)
261 continue;
262
263 args[0].to = args[0].from = NULL;
264 token = match_token(p, erofs_tokens, args);
265
266 switch (token) {
267#ifdef CONFIG_EROFS_FS_XATTR
268 case Opt_user_xattr:
269 set_opt(EROFS_SB(sb), XATTR_USER);
270 break;
271 case Opt_nouser_xattr:
272 clear_opt(EROFS_SB(sb), XATTR_USER);
273 break;
274#else
275 case Opt_user_xattr:
276 infoln("user_xattr options not supported");
277 break;
278 case Opt_nouser_xattr:
279 infoln("nouser_xattr options not supported");
280 break;
281#endif
282#ifdef CONFIG_EROFS_FS_POSIX_ACL
283 case Opt_acl:
284 set_opt(EROFS_SB(sb), POSIX_ACL);
285 break;
286 case Opt_noacl:
287 clear_opt(EROFS_SB(sb), POSIX_ACL);
288 break;
289#else
290 case Opt_acl:
291 infoln("acl options not supported");
292 break;
293 case Opt_noacl:
294 infoln("noacl options not supported");
295 break;
296#endif
297 case Opt_fault_injection:
298 err = erofs_build_fault_attr(EROFS_SB(sb), args);
299 if (err)
300 return err;
301 break;
302
303 default:
304 errln("Unrecognized mount option \"%s\" "
305 "or missing value", p);
306 return -EINVAL;
307 }
308 }
309 return 0;
310}
311
312#ifdef EROFS_FS_HAS_MANAGED_CACHE
313
314static const struct address_space_operations managed_cache_aops;
315
316static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
317{
318 int ret = 1;
319 struct address_space *const mapping = page->mapping;
320
321 DBG_BUGON(!PageLocked(page));
322 DBG_BUGON(mapping->a_ops != &managed_cache_aops);
323
324 if (PagePrivate(page))
325 ret = erofs_try_to_free_cached_page(mapping, page);
326
327 return ret;
328}
329
330static void managed_cache_invalidatepage(struct page *page,
331 unsigned int offset,
332 unsigned int length)
333{
334 const unsigned int stop = length + offset;
335
336 DBG_BUGON(!PageLocked(page));
337
338
339 DBG_BUGON(stop > PAGE_SIZE || stop < length);
340
341 if (offset == 0 && stop == PAGE_SIZE)
342 while (!managed_cache_releasepage(page, GFP_NOFS))
343 cond_resched();
344}
345
346static const struct address_space_operations managed_cache_aops = {
347 .releasepage = managed_cache_releasepage,
348 .invalidatepage = managed_cache_invalidatepage,
349};
350
351static struct inode *erofs_init_managed_cache(struct super_block *sb)
352{
353 struct inode *inode = new_inode(sb);
354
355 if (unlikely(!inode))
356 return ERR_PTR(-ENOMEM);
357
358 set_nlink(inode, 1);
359 inode->i_size = OFFSET_MAX;
360
361 inode->i_mapping->a_ops = &managed_cache_aops;
362 mapping_set_gfp_mask(inode->i_mapping,
363 GFP_NOFS | __GFP_HIGHMEM |
364 __GFP_MOVABLE | __GFP_NOFAIL);
365 return inode;
366}
367
368#endif
369
370static int erofs_read_super(struct super_block *sb,
371 const char *dev_name,
372 void *data, int silent)
373{
374 struct inode *inode;
375 struct erofs_sb_info *sbi;
376 int err = -EINVAL;
377
378 infoln("read_super, device -> %s", dev_name);
379 infoln("options -> %s", (char *)data);
380
381 if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) {
382 errln("failed to set erofs blksize");
383 goto err;
384 }
385
386 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
387 if (unlikely(!sbi)) {
388 err = -ENOMEM;
389 goto err;
390 }
391 sb->s_fs_info = sbi;
392
393 err = superblock_read(sb);
394 if (err)
395 goto err_sbread;
396
397 sb->s_magic = EROFS_SUPER_MAGIC;
398 sb->s_flags |= SB_RDONLY | SB_NOATIME;
399 sb->s_maxbytes = MAX_LFS_FILESIZE;
400 sb->s_time_gran = 1;
401
402 sb->s_op = &erofs_sops;
403
404#ifdef CONFIG_EROFS_FS_XATTR
405 sb->s_xattr = erofs_xattr_handlers;
406#endif
407
408
409 default_options(sbi);
410
411 err = parse_options(sb, data);
412 if (err)
413 goto err_parseopt;
414
415 if (!silent)
416 infoln("root inode @ nid %llu", ROOT_NID(sbi));
417
418 if (test_opt(sbi, POSIX_ACL))
419 sb->s_flags |= SB_POSIXACL;
420 else
421 sb->s_flags &= ~SB_POSIXACL;
422
423#ifdef CONFIG_EROFS_FS_ZIP
424 INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
425#endif
426
427#ifdef EROFS_FS_HAS_MANAGED_CACHE
428 sbi->managed_cache = erofs_init_managed_cache(sb);
429 if (IS_ERR(sbi->managed_cache)) {
430 err = PTR_ERR(sbi->managed_cache);
431 goto err_init_managed_cache;
432 }
433#endif
434
435
436 inode = erofs_iget(sb, ROOT_NID(sbi), true);
437 if (IS_ERR(inode)) {
438 err = PTR_ERR(inode);
439 goto err_iget;
440 }
441
442 if (!S_ISDIR(inode->i_mode)) {
443 errln("rootino(nid %llu) is not a directory(i_mode %o)",
444 ROOT_NID(sbi), inode->i_mode);
445 err = -EINVAL;
446 iput(inode);
447 goto err_iget;
448 }
449
450 sb->s_root = d_make_root(inode);
451 if (!sb->s_root) {
452 err = -ENOMEM;
453 goto err_iget;
454 }
455
456
457 sbi->dev_name = __getname();
458 if (!sbi->dev_name) {
459 err = -ENOMEM;
460 goto err_devname;
461 }
462
463 snprintf(sbi->dev_name, PATH_MAX, "%s", dev_name);
464 sbi->dev_name[PATH_MAX - 1] = '\0';
465
466 erofs_register_super(sb);
467
468 if (!silent)
469 infoln("mounted on %s with opts: %s.", dev_name,
470 (char *)data);
471 return 0;
472
473
474
475
476
477err_devname:
478 dput(sb->s_root);
479 sb->s_root = NULL;
480err_iget:
481#ifdef EROFS_FS_HAS_MANAGED_CACHE
482 iput(sbi->managed_cache);
483err_init_managed_cache:
484#endif
485err_parseopt:
486err_sbread:
487 sb->s_fs_info = NULL;
488 kfree(sbi);
489err:
490 return err;
491}
492
493
494
495
496
497static void erofs_put_super(struct super_block *sb)
498{
499 struct erofs_sb_info *sbi = EROFS_SB(sb);
500
501
502 if (!sbi)
503 return;
504
505 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
506
507 infoln("unmounted for %s", sbi->dev_name);
508 __putname(sbi->dev_name);
509
510#ifdef EROFS_FS_HAS_MANAGED_CACHE
511 iput(sbi->managed_cache);
512#endif
513
514 mutex_lock(&sbi->umount_mutex);
515
516#ifdef CONFIG_EROFS_FS_ZIP
517
518 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
519#endif
520
521 erofs_unregister_super(sb);
522 mutex_unlock(&sbi->umount_mutex);
523
524 kfree(sbi);
525 sb->s_fs_info = NULL;
526}
527
528
529struct erofs_mount_private {
530 const char *dev_name;
531 char *options;
532};
533
534
535static int erofs_fill_super(struct super_block *sb,
536 void *_priv, int silent)
537{
538 struct erofs_mount_private *priv = _priv;
539
540 return erofs_read_super(sb, priv->dev_name,
541 priv->options, silent);
542}
543
544static struct dentry *erofs_mount(
545 struct file_system_type *fs_type, int flags,
546 const char *dev_name, void *data)
547{
548 struct erofs_mount_private priv = {
549 .dev_name = dev_name,
550 .options = data
551 };
552
553 return mount_bdev(fs_type, flags, dev_name,
554 &priv, erofs_fill_super);
555}
556
557static void erofs_kill_sb(struct super_block *sb)
558{
559 kill_block_super(sb);
560}
561
562static struct file_system_type erofs_fs_type = {
563 .owner = THIS_MODULE,
564 .name = "erofs",
565 .mount = erofs_mount,
566 .kill_sb = erofs_kill_sb,
567 .fs_flags = FS_REQUIRES_DEV,
568};
569MODULE_ALIAS_FS("erofs");
570
571static int __init erofs_module_init(void)
572{
573 int err;
574
575 erofs_check_ondisk_layout_definitions();
576 infoln("initializing erofs " EROFS_VERSION);
577
578 err = erofs_init_inode_cache();
579 if (err)
580 goto icache_err;
581
582 err = register_shrinker(&erofs_shrinker_info);
583 if (err)
584 goto shrinker_err;
585
586 err = z_erofs_init_zip_subsystem();
587 if (err)
588 goto zip_err;
589
590 err = register_filesystem(&erofs_fs_type);
591 if (err)
592 goto fs_err;
593
594 infoln("successfully to initialize erofs");
595 return 0;
596
597fs_err:
598 z_erofs_exit_zip_subsystem();
599zip_err:
600 unregister_shrinker(&erofs_shrinker_info);
601shrinker_err:
602 erofs_exit_inode_cache();
603icache_err:
604 return err;
605}
606
607static void __exit erofs_module_exit(void)
608{
609 unregister_filesystem(&erofs_fs_type);
610 z_erofs_exit_zip_subsystem();
611 unregister_shrinker(&erofs_shrinker_info);
612 erofs_exit_inode_cache();
613 infoln("successfully finalize erofs");
614}
615
616
617static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
618{
619 struct super_block *sb = dentry->d_sb;
620 struct erofs_sb_info *sbi = EROFS_SB(sb);
621 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
622
623 buf->f_type = sb->s_magic;
624 buf->f_bsize = EROFS_BLKSIZ;
625 buf->f_blocks = sbi->blocks;
626 buf->f_bfree = buf->f_bavail = 0;
627
628 buf->f_files = ULLONG_MAX;
629 buf->f_ffree = ULLONG_MAX - sbi->inos;
630
631 buf->f_namelen = EROFS_NAME_LEN;
632
633 buf->f_fsid.val[0] = (u32)id;
634 buf->f_fsid.val[1] = (u32)(id >> 32);
635 return 0;
636}
637
638static int erofs_show_options(struct seq_file *seq, struct dentry *root)
639{
640 struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
641
642#ifdef CONFIG_EROFS_FS_XATTR
643 if (test_opt(sbi, XATTR_USER))
644 seq_puts(seq, ",user_xattr");
645 else
646 seq_puts(seq, ",nouser_xattr");
647#endif
648#ifdef CONFIG_EROFS_FS_POSIX_ACL
649 if (test_opt(sbi, POSIX_ACL))
650 seq_puts(seq, ",acl");
651 else
652 seq_puts(seq, ",noacl");
653#endif
654 if (test_opt(sbi, FAULT_INJECTION))
655 seq_printf(seq, ",fault_injection=%u",
656 erofs_get_fault_rate(sbi));
657 return 0;
658}
659
660static int erofs_remount(struct super_block *sb, int *flags, char *data)
661{
662 struct erofs_sb_info *sbi = EROFS_SB(sb);
663 unsigned int org_mnt_opt = sbi->mount_opt;
664 unsigned int org_inject_rate = erofs_get_fault_rate(sbi);
665 int err;
666
667 DBG_BUGON(!sb_rdonly(sb));
668 err = parse_options(sb, data);
669 if (err)
670 goto out;
671
672 if (test_opt(sbi, POSIX_ACL))
673 sb->s_flags |= SB_POSIXACL;
674 else
675 sb->s_flags &= ~SB_POSIXACL;
676
677 *flags |= SB_RDONLY;
678 return 0;
679out:
680 __erofs_build_fault_attr(sbi, org_inject_rate);
681 sbi->mount_opt = org_mnt_opt;
682
683 return err;
684}
685
686const struct super_operations erofs_sops = {
687 .put_super = erofs_put_super,
688 .alloc_inode = alloc_inode,
689 .free_inode = free_inode,
690 .statfs = erofs_statfs,
691 .show_options = erofs_show_options,
692 .remount_fs = erofs_remount,
693};
694
695module_init(erofs_module_init);
696module_exit(erofs_module_exit);
697
698MODULE_DESCRIPTION("Enhanced ROM File System");
699MODULE_AUTHOR("Gao Xiang, Yu Chao, Miao Xie, CONSUMER BG, HUAWEI Inc.");
700MODULE_LICENSE("GPL");
701
702