1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/fs.h>
11#include <linux/statfs.h>
12#include <linux/buffer_head.h>
13#include <linux/backing-dev.h>
14#include <linux/kthread.h>
15#include <linux/parser.h>
16#include <linux/mount.h>
17#include <linux/seq_file.h>
18#include <linux/proc_fs.h>
19#include <linux/random.h>
20#include <linux/exportfs.h>
21#include <linux/blkdev.h>
22#include <linux/quotaops.h>
23#include <linux/f2fs_fs.h>
24#include <linux/sysfs.h>
25#include <linux/quota.h>
26#include <linux/unicode.h>
27#include <linux/part_stat.h>
28
29#include "f2fs.h"
30#include "node.h"
31#include "segment.h"
32#include "xattr.h"
33#include "gc.h"
34#include "trace.h"
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/f2fs.h>
38
39static struct kmem_cache *f2fs_inode_cachep;
40
41#ifdef CONFIG_F2FS_FAULT_INJECTION
42
43const char *f2fs_fault_name[FAULT_MAX] = {
44 [FAULT_KMALLOC] = "kmalloc",
45 [FAULT_KVMALLOC] = "kvmalloc",
46 [FAULT_PAGE_ALLOC] = "page alloc",
47 [FAULT_PAGE_GET] = "page get",
48 [FAULT_ALLOC_BIO] = "alloc bio",
49 [FAULT_ALLOC_NID] = "alloc nid",
50 [FAULT_ORPHAN] = "orphan",
51 [FAULT_BLOCK] = "no more block",
52 [FAULT_DIR_DEPTH] = "too big dir depth",
53 [FAULT_EVICT_INODE] = "evict_inode fail",
54 [FAULT_TRUNCATE] = "truncate fail",
55 [FAULT_READ_IO] = "read IO error",
56 [FAULT_CHECKPOINT] = "checkpoint error",
57 [FAULT_DISCARD] = "discard error",
58 [FAULT_WRITE_IO] = "write IO error",
59};
60
61void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
62 unsigned int type)
63{
64 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
65
66 if (rate) {
67 atomic_set(&ffi->inject_ops, 0);
68 ffi->inject_rate = rate;
69 }
70
71 if (type)
72 ffi->inject_type = type;
73
74 if (!rate && !type)
75 memset(ffi, 0, sizeof(struct f2fs_fault_info));
76}
77#endif
78
79
80static struct shrinker f2fs_shrinker_info = {
81 .scan_objects = f2fs_shrink_scan,
82 .count_objects = f2fs_shrink_count,
83 .seeks = DEFAULT_SEEKS,
84};
85
86enum {
87 Opt_gc_background,
88 Opt_disable_roll_forward,
89 Opt_norecovery,
90 Opt_discard,
91 Opt_nodiscard,
92 Opt_noheap,
93 Opt_heap,
94 Opt_user_xattr,
95 Opt_nouser_xattr,
96 Opt_acl,
97 Opt_noacl,
98 Opt_active_logs,
99 Opt_disable_ext_identify,
100 Opt_inline_xattr,
101 Opt_noinline_xattr,
102 Opt_inline_xattr_size,
103 Opt_inline_data,
104 Opt_inline_dentry,
105 Opt_noinline_dentry,
106 Opt_flush_merge,
107 Opt_noflush_merge,
108 Opt_nobarrier,
109 Opt_fastboot,
110 Opt_extent_cache,
111 Opt_noextent_cache,
112 Opt_noinline_data,
113 Opt_data_flush,
114 Opt_reserve_root,
115 Opt_resgid,
116 Opt_resuid,
117 Opt_mode,
118 Opt_io_size_bits,
119 Opt_fault_injection,
120 Opt_fault_type,
121 Opt_lazytime,
122 Opt_nolazytime,
123 Opt_quota,
124 Opt_noquota,
125 Opt_usrquota,
126 Opt_grpquota,
127 Opt_prjquota,
128 Opt_usrjquota,
129 Opt_grpjquota,
130 Opt_prjjquota,
131 Opt_offusrjquota,
132 Opt_offgrpjquota,
133 Opt_offprjjquota,
134 Opt_jqfmt_vfsold,
135 Opt_jqfmt_vfsv0,
136 Opt_jqfmt_vfsv1,
137 Opt_whint,
138 Opt_alloc,
139 Opt_fsync,
140 Opt_test_dummy_encryption,
141 Opt_checkpoint_disable,
142 Opt_checkpoint_disable_cap,
143 Opt_checkpoint_disable_cap_perc,
144 Opt_checkpoint_enable,
145 Opt_compress_algorithm,
146 Opt_compress_log_size,
147 Opt_compress_extension,
148 Opt_err,
149};
150
151static match_table_t f2fs_tokens = {
152 {Opt_gc_background, "background_gc=%s"},
153 {Opt_disable_roll_forward, "disable_roll_forward"},
154 {Opt_norecovery, "norecovery"},
155 {Opt_discard, "discard"},
156 {Opt_nodiscard, "nodiscard"},
157 {Opt_noheap, "no_heap"},
158 {Opt_heap, "heap"},
159 {Opt_user_xattr, "user_xattr"},
160 {Opt_nouser_xattr, "nouser_xattr"},
161 {Opt_acl, "acl"},
162 {Opt_noacl, "noacl"},
163 {Opt_active_logs, "active_logs=%u"},
164 {Opt_disable_ext_identify, "disable_ext_identify"},
165 {Opt_inline_xattr, "inline_xattr"},
166 {Opt_noinline_xattr, "noinline_xattr"},
167 {Opt_inline_xattr_size, "inline_xattr_size=%u"},
168 {Opt_inline_data, "inline_data"},
169 {Opt_inline_dentry, "inline_dentry"},
170 {Opt_noinline_dentry, "noinline_dentry"},
171 {Opt_flush_merge, "flush_merge"},
172 {Opt_noflush_merge, "noflush_merge"},
173 {Opt_nobarrier, "nobarrier"},
174 {Opt_fastboot, "fastboot"},
175 {Opt_extent_cache, "extent_cache"},
176 {Opt_noextent_cache, "noextent_cache"},
177 {Opt_noinline_data, "noinline_data"},
178 {Opt_data_flush, "data_flush"},
179 {Opt_reserve_root, "reserve_root=%u"},
180 {Opt_resgid, "resgid=%u"},
181 {Opt_resuid, "resuid=%u"},
182 {Opt_mode, "mode=%s"},
183 {Opt_io_size_bits, "io_bits=%u"},
184 {Opt_fault_injection, "fault_injection=%u"},
185 {Opt_fault_type, "fault_type=%u"},
186 {Opt_lazytime, "lazytime"},
187 {Opt_nolazytime, "nolazytime"},
188 {Opt_quota, "quota"},
189 {Opt_noquota, "noquota"},
190 {Opt_usrquota, "usrquota"},
191 {Opt_grpquota, "grpquota"},
192 {Opt_prjquota, "prjquota"},
193 {Opt_usrjquota, "usrjquota=%s"},
194 {Opt_grpjquota, "grpjquota=%s"},
195 {Opt_prjjquota, "prjjquota=%s"},
196 {Opt_offusrjquota, "usrjquota="},
197 {Opt_offgrpjquota, "grpjquota="},
198 {Opt_offprjjquota, "prjjquota="},
199 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
200 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
201 {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
202 {Opt_whint, "whint_mode=%s"},
203 {Opt_alloc, "alloc_mode=%s"},
204 {Opt_fsync, "fsync_mode=%s"},
205 {Opt_test_dummy_encryption, "test_dummy_encryption"},
206 {Opt_checkpoint_disable, "checkpoint=disable"},
207 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
208 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
209 {Opt_checkpoint_enable, "checkpoint=enable"},
210 {Opt_compress_algorithm, "compress_algorithm=%s"},
211 {Opt_compress_log_size, "compress_log_size=%u"},
212 {Opt_compress_extension, "compress_extension=%s"},
213 {Opt_err, NULL},
214};
215
216void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
217{
218 struct va_format vaf;
219 va_list args;
220 int level;
221
222 va_start(args, fmt);
223
224 level = printk_get_level(fmt);
225 vaf.fmt = printk_skip_level(fmt);
226 vaf.va = &args;
227 printk("%c%cF2FS-fs (%s): %pV\n",
228 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
229
230 va_end(args);
231}
232
233#ifdef CONFIG_UNICODE
234static const struct f2fs_sb_encodings {
235 __u16 magic;
236 char *name;
237 char *version;
238} f2fs_sb_encoding_map[] = {
239 {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
240};
241
242static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
243 const struct f2fs_sb_encodings **encoding,
244 __u16 *flags)
245{
246 __u16 magic = le16_to_cpu(sb->s_encoding);
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
250 if (magic == f2fs_sb_encoding_map[i].magic)
251 break;
252
253 if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
254 return -EINVAL;
255
256 *encoding = &f2fs_sb_encoding_map[i];
257 *flags = le16_to_cpu(sb->s_encoding_flags);
258
259 return 0;
260}
261#endif
262
263static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
264{
265 block_t limit = min((sbi->user_block_count << 1) / 1000,
266 sbi->user_block_count - sbi->reserved_blocks);
267
268
269 if (test_opt(sbi, RESERVE_ROOT) &&
270 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
271 F2FS_OPTION(sbi).root_reserved_blocks = limit;
272 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
273 F2FS_OPTION(sbi).root_reserved_blocks);
274 }
275 if (!test_opt(sbi, RESERVE_ROOT) &&
276 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
277 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
278 !gid_eq(F2FS_OPTION(sbi).s_resgid,
279 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
280 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
281 from_kuid_munged(&init_user_ns,
282 F2FS_OPTION(sbi).s_resuid),
283 from_kgid_munged(&init_user_ns,
284 F2FS_OPTION(sbi).s_resgid));
285}
286
287static void init_once(void *foo)
288{
289 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
290
291 inode_init_once(&fi->vfs_inode);
292}
293
294#ifdef CONFIG_QUOTA
295static const char * const quotatypes[] = INITQFNAMES;
296#define QTYPE2NAME(t) (quotatypes[t])
297static int f2fs_set_qf_name(struct super_block *sb, int qtype,
298 substring_t *args)
299{
300 struct f2fs_sb_info *sbi = F2FS_SB(sb);
301 char *qname;
302 int ret = -EINVAL;
303
304 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
305 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
306 return -EINVAL;
307 }
308 if (f2fs_sb_has_quota_ino(sbi)) {
309 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
310 return 0;
311 }
312
313 qname = match_strdup(args);
314 if (!qname) {
315 f2fs_err(sbi, "Not enough memory for storing quotafile name");
316 return -ENOMEM;
317 }
318 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
319 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
320 ret = 0;
321 else
322 f2fs_err(sbi, "%s quota file already specified",
323 QTYPE2NAME(qtype));
324 goto errout;
325 }
326 if (strchr(qname, '/')) {
327 f2fs_err(sbi, "quotafile must be on filesystem root");
328 goto errout;
329 }
330 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
331 set_opt(sbi, QUOTA);
332 return 0;
333errout:
334 kvfree(qname);
335 return ret;
336}
337
338static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
339{
340 struct f2fs_sb_info *sbi = F2FS_SB(sb);
341
342 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
343 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
344 return -EINVAL;
345 }
346 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
347 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
348 return 0;
349}
350
351static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
352{
353
354
355
356
357
358 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
359 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
360 return -1;
361 }
362 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
363 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
364 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
365 if (test_opt(sbi, USRQUOTA) &&
366 F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
367 clear_opt(sbi, USRQUOTA);
368
369 if (test_opt(sbi, GRPQUOTA) &&
370 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
371 clear_opt(sbi, GRPQUOTA);
372
373 if (test_opt(sbi, PRJQUOTA) &&
374 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
375 clear_opt(sbi, PRJQUOTA);
376
377 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
378 test_opt(sbi, PRJQUOTA)) {
379 f2fs_err(sbi, "old and new quota format mixing");
380 return -1;
381 }
382
383 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
384 f2fs_err(sbi, "journaled quota format not specified");
385 return -1;
386 }
387 }
388
389 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
390 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
391 F2FS_OPTION(sbi).s_jquota_fmt = 0;
392 }
393 return 0;
394}
395#endif
396
397static int parse_options(struct super_block *sb, char *options)
398{
399 struct f2fs_sb_info *sbi = F2FS_SB(sb);
400 substring_t args[MAX_OPT_ARGS];
401 unsigned char (*ext)[F2FS_EXTENSION_LEN];
402 char *p, *name;
403 int arg = 0, ext_cnt;
404 kuid_t uid;
405 kgid_t gid;
406#ifdef CONFIG_QUOTA
407 int ret;
408#endif
409
410 if (!options)
411 return 0;
412
413 while ((p = strsep(&options, ",")) != NULL) {
414 int token;
415 if (!*p)
416 continue;
417
418
419
420
421 args[0].to = args[0].from = NULL;
422 token = match_token(p, f2fs_tokens, args);
423
424 switch (token) {
425 case Opt_gc_background:
426 name = match_strdup(&args[0]);
427
428 if (!name)
429 return -ENOMEM;
430 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
431 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
432 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
433 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
434 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
435 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
436 } else {
437 kvfree(name);
438 return -EINVAL;
439 }
440 kvfree(name);
441 break;
442 case Opt_disable_roll_forward:
443 set_opt(sbi, DISABLE_ROLL_FORWARD);
444 break;
445 case Opt_norecovery:
446
447 set_opt(sbi, NORECOVERY);
448 if (!f2fs_readonly(sb))
449 return -EINVAL;
450 break;
451 case Opt_discard:
452 set_opt(sbi, DISCARD);
453 break;
454 case Opt_nodiscard:
455 if (f2fs_sb_has_blkzoned(sbi)) {
456 f2fs_warn(sbi, "discard is required for zoned block devices");
457 return -EINVAL;
458 }
459 clear_opt(sbi, DISCARD);
460 break;
461 case Opt_noheap:
462 set_opt(sbi, NOHEAP);
463 break;
464 case Opt_heap:
465 clear_opt(sbi, NOHEAP);
466 break;
467#ifdef CONFIG_F2FS_FS_XATTR
468 case Opt_user_xattr:
469 set_opt(sbi, XATTR_USER);
470 break;
471 case Opt_nouser_xattr:
472 clear_opt(sbi, XATTR_USER);
473 break;
474 case Opt_inline_xattr:
475 set_opt(sbi, INLINE_XATTR);
476 break;
477 case Opt_noinline_xattr:
478 clear_opt(sbi, INLINE_XATTR);
479 break;
480 case Opt_inline_xattr_size:
481 if (args->from && match_int(args, &arg))
482 return -EINVAL;
483 set_opt(sbi, INLINE_XATTR_SIZE);
484 F2FS_OPTION(sbi).inline_xattr_size = arg;
485 break;
486#else
487 case Opt_user_xattr:
488 f2fs_info(sbi, "user_xattr options not supported");
489 break;
490 case Opt_nouser_xattr:
491 f2fs_info(sbi, "nouser_xattr options not supported");
492 break;
493 case Opt_inline_xattr:
494 f2fs_info(sbi, "inline_xattr options not supported");
495 break;
496 case Opt_noinline_xattr:
497 f2fs_info(sbi, "noinline_xattr options not supported");
498 break;
499#endif
500#ifdef CONFIG_F2FS_FS_POSIX_ACL
501 case Opt_acl:
502 set_opt(sbi, POSIX_ACL);
503 break;
504 case Opt_noacl:
505 clear_opt(sbi, POSIX_ACL);
506 break;
507#else
508 case Opt_acl:
509 f2fs_info(sbi, "acl options not supported");
510 break;
511 case Opt_noacl:
512 f2fs_info(sbi, "noacl options not supported");
513 break;
514#endif
515 case Opt_active_logs:
516 if (args->from && match_int(args, &arg))
517 return -EINVAL;
518 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
519 return -EINVAL;
520 F2FS_OPTION(sbi).active_logs = arg;
521 break;
522 case Opt_disable_ext_identify:
523 set_opt(sbi, DISABLE_EXT_IDENTIFY);
524 break;
525 case Opt_inline_data:
526 set_opt(sbi, INLINE_DATA);
527 break;
528 case Opt_inline_dentry:
529 set_opt(sbi, INLINE_DENTRY);
530 break;
531 case Opt_noinline_dentry:
532 clear_opt(sbi, INLINE_DENTRY);
533 break;
534 case Opt_flush_merge:
535 set_opt(sbi, FLUSH_MERGE);
536 break;
537 case Opt_noflush_merge:
538 clear_opt(sbi, FLUSH_MERGE);
539 break;
540 case Opt_nobarrier:
541 set_opt(sbi, NOBARRIER);
542 break;
543 case Opt_fastboot:
544 set_opt(sbi, FASTBOOT);
545 break;
546 case Opt_extent_cache:
547 set_opt(sbi, EXTENT_CACHE);
548 break;
549 case Opt_noextent_cache:
550 clear_opt(sbi, EXTENT_CACHE);
551 break;
552 case Opt_noinline_data:
553 clear_opt(sbi, INLINE_DATA);
554 break;
555 case Opt_data_flush:
556 set_opt(sbi, DATA_FLUSH);
557 break;
558 case Opt_reserve_root:
559 if (args->from && match_int(args, &arg))
560 return -EINVAL;
561 if (test_opt(sbi, RESERVE_ROOT)) {
562 f2fs_info(sbi, "Preserve previous reserve_root=%u",
563 F2FS_OPTION(sbi).root_reserved_blocks);
564 } else {
565 F2FS_OPTION(sbi).root_reserved_blocks = arg;
566 set_opt(sbi, RESERVE_ROOT);
567 }
568 break;
569 case Opt_resuid:
570 if (args->from && match_int(args, &arg))
571 return -EINVAL;
572 uid = make_kuid(current_user_ns(), arg);
573 if (!uid_valid(uid)) {
574 f2fs_err(sbi, "Invalid uid value %d", arg);
575 return -EINVAL;
576 }
577 F2FS_OPTION(sbi).s_resuid = uid;
578 break;
579 case Opt_resgid:
580 if (args->from && match_int(args, &arg))
581 return -EINVAL;
582 gid = make_kgid(current_user_ns(), arg);
583 if (!gid_valid(gid)) {
584 f2fs_err(sbi, "Invalid gid value %d", arg);
585 return -EINVAL;
586 }
587 F2FS_OPTION(sbi).s_resgid = gid;
588 break;
589 case Opt_mode:
590 name = match_strdup(&args[0]);
591
592 if (!name)
593 return -ENOMEM;
594 if (strlen(name) == 8 &&
595 !strncmp(name, "adaptive", 8)) {
596 if (f2fs_sb_has_blkzoned(sbi)) {
597 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
598 kvfree(name);
599 return -EINVAL;
600 }
601 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
602 } else if (strlen(name) == 3 &&
603 !strncmp(name, "lfs", 3)) {
604 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
605 } else {
606 kvfree(name);
607 return -EINVAL;
608 }
609 kvfree(name);
610 break;
611 case Opt_io_size_bits:
612 if (args->from && match_int(args, &arg))
613 return -EINVAL;
614 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
615 f2fs_warn(sbi, "Not support %d, larger than %d",
616 1 << arg, BIO_MAX_PAGES);
617 return -EINVAL;
618 }
619 F2FS_OPTION(sbi).write_io_size_bits = arg;
620 break;
621#ifdef CONFIG_F2FS_FAULT_INJECTION
622 case Opt_fault_injection:
623 if (args->from && match_int(args, &arg))
624 return -EINVAL;
625 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
626 set_opt(sbi, FAULT_INJECTION);
627 break;
628
629 case Opt_fault_type:
630 if (args->from && match_int(args, &arg))
631 return -EINVAL;
632 f2fs_build_fault_attr(sbi, 0, arg);
633 set_opt(sbi, FAULT_INJECTION);
634 break;
635#else
636 case Opt_fault_injection:
637 f2fs_info(sbi, "fault_injection options not supported");
638 break;
639
640 case Opt_fault_type:
641 f2fs_info(sbi, "fault_type options not supported");
642 break;
643#endif
644 case Opt_lazytime:
645 sb->s_flags |= SB_LAZYTIME;
646 break;
647 case Opt_nolazytime:
648 sb->s_flags &= ~SB_LAZYTIME;
649 break;
650#ifdef CONFIG_QUOTA
651 case Opt_quota:
652 case Opt_usrquota:
653 set_opt(sbi, USRQUOTA);
654 break;
655 case Opt_grpquota:
656 set_opt(sbi, GRPQUOTA);
657 break;
658 case Opt_prjquota:
659 set_opt(sbi, PRJQUOTA);
660 break;
661 case Opt_usrjquota:
662 ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
663 if (ret)
664 return ret;
665 break;
666 case Opt_grpjquota:
667 ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
668 if (ret)
669 return ret;
670 break;
671 case Opt_prjjquota:
672 ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
673 if (ret)
674 return ret;
675 break;
676 case Opt_offusrjquota:
677 ret = f2fs_clear_qf_name(sb, USRQUOTA);
678 if (ret)
679 return ret;
680 break;
681 case Opt_offgrpjquota:
682 ret = f2fs_clear_qf_name(sb, GRPQUOTA);
683 if (ret)
684 return ret;
685 break;
686 case Opt_offprjjquota:
687 ret = f2fs_clear_qf_name(sb, PRJQUOTA);
688 if (ret)
689 return ret;
690 break;
691 case Opt_jqfmt_vfsold:
692 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
693 break;
694 case Opt_jqfmt_vfsv0:
695 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
696 break;
697 case Opt_jqfmt_vfsv1:
698 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
699 break;
700 case Opt_noquota:
701 clear_opt(sbi, QUOTA);
702 clear_opt(sbi, USRQUOTA);
703 clear_opt(sbi, GRPQUOTA);
704 clear_opt(sbi, PRJQUOTA);
705 break;
706#else
707 case Opt_quota:
708 case Opt_usrquota:
709 case Opt_grpquota:
710 case Opt_prjquota:
711 case Opt_usrjquota:
712 case Opt_grpjquota:
713 case Opt_prjjquota:
714 case Opt_offusrjquota:
715 case Opt_offgrpjquota:
716 case Opt_offprjjquota:
717 case Opt_jqfmt_vfsold:
718 case Opt_jqfmt_vfsv0:
719 case Opt_jqfmt_vfsv1:
720 case Opt_noquota:
721 f2fs_info(sbi, "quota operations not supported");
722 break;
723#endif
724 case Opt_whint:
725 name = match_strdup(&args[0]);
726 if (!name)
727 return -ENOMEM;
728 if (strlen(name) == 10 &&
729 !strncmp(name, "user-based", 10)) {
730 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
731 } else if (strlen(name) == 3 &&
732 !strncmp(name, "off", 3)) {
733 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
734 } else if (strlen(name) == 8 &&
735 !strncmp(name, "fs-based", 8)) {
736 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
737 } else {
738 kvfree(name);
739 return -EINVAL;
740 }
741 kvfree(name);
742 break;
743 case Opt_alloc:
744 name = match_strdup(&args[0]);
745 if (!name)
746 return -ENOMEM;
747
748 if (strlen(name) == 7 &&
749 !strncmp(name, "default", 7)) {
750 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
751 } else if (strlen(name) == 5 &&
752 !strncmp(name, "reuse", 5)) {
753 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
754 } else {
755 kvfree(name);
756 return -EINVAL;
757 }
758 kvfree(name);
759 break;
760 case Opt_fsync:
761 name = match_strdup(&args[0]);
762 if (!name)
763 return -ENOMEM;
764 if (strlen(name) == 5 &&
765 !strncmp(name, "posix", 5)) {
766 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
767 } else if (strlen(name) == 6 &&
768 !strncmp(name, "strict", 6)) {
769 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
770 } else if (strlen(name) == 9 &&
771 !strncmp(name, "nobarrier", 9)) {
772 F2FS_OPTION(sbi).fsync_mode =
773 FSYNC_MODE_NOBARRIER;
774 } else {
775 kvfree(name);
776 return -EINVAL;
777 }
778 kvfree(name);
779 break;
780 case Opt_test_dummy_encryption:
781#ifdef CONFIG_FS_ENCRYPTION
782 if (!f2fs_sb_has_encrypt(sbi)) {
783 f2fs_err(sbi, "Encrypt feature is off");
784 return -EINVAL;
785 }
786
787 F2FS_OPTION(sbi).test_dummy_encryption = true;
788 f2fs_info(sbi, "Test dummy encryption mode enabled");
789#else
790 f2fs_info(sbi, "Test dummy encryption mount option ignored");
791#endif
792 break;
793 case Opt_checkpoint_disable_cap_perc:
794 if (args->from && match_int(args, &arg))
795 return -EINVAL;
796 if (arg < 0 || arg > 100)
797 return -EINVAL;
798 if (arg == 100)
799 F2FS_OPTION(sbi).unusable_cap =
800 sbi->user_block_count;
801 else
802 F2FS_OPTION(sbi).unusable_cap =
803 (sbi->user_block_count / 100) * arg;
804 set_opt(sbi, DISABLE_CHECKPOINT);
805 break;
806 case Opt_checkpoint_disable_cap:
807 if (args->from && match_int(args, &arg))
808 return -EINVAL;
809 F2FS_OPTION(sbi).unusable_cap = arg;
810 set_opt(sbi, DISABLE_CHECKPOINT);
811 break;
812 case Opt_checkpoint_disable:
813 set_opt(sbi, DISABLE_CHECKPOINT);
814 break;
815 case Opt_checkpoint_enable:
816 clear_opt(sbi, DISABLE_CHECKPOINT);
817 break;
818 case Opt_compress_algorithm:
819 if (!f2fs_sb_has_compression(sbi)) {
820 f2fs_err(sbi, "Compression feature if off");
821 return -EINVAL;
822 }
823 name = match_strdup(&args[0]);
824 if (!name)
825 return -ENOMEM;
826 if (strlen(name) == 3 && !strcmp(name, "lzo")) {
827 F2FS_OPTION(sbi).compress_algorithm =
828 COMPRESS_LZO;
829 } else if (strlen(name) == 3 &&
830 !strcmp(name, "lz4")) {
831 F2FS_OPTION(sbi).compress_algorithm =
832 COMPRESS_LZ4;
833 } else if (strlen(name) == 4 &&
834 !strcmp(name, "zstd")) {
835 F2FS_OPTION(sbi).compress_algorithm =
836 COMPRESS_ZSTD;
837 } else {
838 kfree(name);
839 return -EINVAL;
840 }
841 kfree(name);
842 break;
843 case Opt_compress_log_size:
844 if (!f2fs_sb_has_compression(sbi)) {
845 f2fs_err(sbi, "Compression feature is off");
846 return -EINVAL;
847 }
848 if (args->from && match_int(args, &arg))
849 return -EINVAL;
850 if (arg < MIN_COMPRESS_LOG_SIZE ||
851 arg > MAX_COMPRESS_LOG_SIZE) {
852 f2fs_err(sbi,
853 "Compress cluster log size is out of range");
854 return -EINVAL;
855 }
856 F2FS_OPTION(sbi).compress_log_size = arg;
857 break;
858 case Opt_compress_extension:
859 if (!f2fs_sb_has_compression(sbi)) {
860 f2fs_err(sbi, "Compression feature is off");
861 return -EINVAL;
862 }
863 name = match_strdup(&args[0]);
864 if (!name)
865 return -ENOMEM;
866
867 ext = F2FS_OPTION(sbi).extensions;
868 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
869
870 if (strlen(name) >= F2FS_EXTENSION_LEN ||
871 ext_cnt >= COMPRESS_EXT_NUM) {
872 f2fs_err(sbi,
873 "invalid extension length/number");
874 kfree(name);
875 return -EINVAL;
876 }
877
878 strcpy(ext[ext_cnt], name);
879 F2FS_OPTION(sbi).compress_ext_cnt++;
880 kfree(name);
881 break;
882 default:
883 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
884 p);
885 return -EINVAL;
886 }
887 }
888#ifdef CONFIG_QUOTA
889 if (f2fs_check_quota_options(sbi))
890 return -EINVAL;
891#else
892 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
893 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
894 return -EINVAL;
895 }
896 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
897 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
898 return -EINVAL;
899 }
900#endif
901#ifndef CONFIG_UNICODE
902 if (f2fs_sb_has_casefold(sbi)) {
903 f2fs_err(sbi,
904 "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
905 return -EINVAL;
906 }
907#endif
908
909 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
910 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
911 F2FS_IO_SIZE_KB(sbi));
912 return -EINVAL;
913 }
914
915 if (test_opt(sbi, INLINE_XATTR_SIZE)) {
916 int min_size, max_size;
917
918 if (!f2fs_sb_has_extra_attr(sbi) ||
919 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
920 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
921 return -EINVAL;
922 }
923 if (!test_opt(sbi, INLINE_XATTR)) {
924 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
925 return -EINVAL;
926 }
927
928 min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
929 max_size = MAX_INLINE_XATTR_SIZE;
930
931 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
932 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
933 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
934 min_size, max_size);
935 return -EINVAL;
936 }
937 }
938
939 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
940 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
941 return -EINVAL;
942 }
943
944
945
946
947 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
948 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
949 return 0;
950}
951
952static struct inode *f2fs_alloc_inode(struct super_block *sb)
953{
954 struct f2fs_inode_info *fi;
955
956 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
957 if (!fi)
958 return NULL;
959
960 init_once((void *) fi);
961
962
963 atomic_set(&fi->dirty_pages, 0);
964 init_rwsem(&fi->i_sem);
965 spin_lock_init(&fi->i_size_lock);
966 INIT_LIST_HEAD(&fi->dirty_list);
967 INIT_LIST_HEAD(&fi->gdirty_list);
968 INIT_LIST_HEAD(&fi->inmem_ilist);
969 INIT_LIST_HEAD(&fi->inmem_pages);
970 mutex_init(&fi->inmem_lock);
971 init_rwsem(&fi->i_gc_rwsem[READ]);
972 init_rwsem(&fi->i_gc_rwsem[WRITE]);
973 init_rwsem(&fi->i_mmap_sem);
974 init_rwsem(&fi->i_xattr_sem);
975
976
977 fi->i_dir_level = F2FS_SB(sb)->dir_level;
978
979 return &fi->vfs_inode;
980}
981
982static int f2fs_drop_inode(struct inode *inode)
983{
984 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
985 int ret;
986
987
988
989
990
991 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
992 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
993 inode->i_ino == F2FS_META_INO(sbi)) {
994 trace_f2fs_drop_inode(inode, 1);
995 return 1;
996 }
997 }
998
999
1000
1001
1002
1003
1004
1005
1006 if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1007 if (!inode->i_nlink && !is_bad_inode(inode)) {
1008
1009 atomic_inc(&inode->i_count);
1010 spin_unlock(&inode->i_lock);
1011
1012
1013 if (f2fs_is_atomic_file(inode))
1014 f2fs_drop_inmem_pages(inode);
1015
1016
1017 f2fs_destroy_extent_node(inode);
1018
1019 sb_start_intwrite(inode->i_sb);
1020 f2fs_i_size_write(inode, 0);
1021
1022 f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1023 inode, NULL, 0, DATA);
1024 truncate_inode_pages_final(inode->i_mapping);
1025
1026 if (F2FS_HAS_BLOCKS(inode))
1027 f2fs_truncate(inode);
1028
1029 sb_end_intwrite(inode->i_sb);
1030
1031 spin_lock(&inode->i_lock);
1032 atomic_dec(&inode->i_count);
1033 }
1034 trace_f2fs_drop_inode(inode, 0);
1035 return 0;
1036 }
1037 ret = generic_drop_inode(inode);
1038 if (!ret)
1039 ret = fscrypt_drop_inode(inode);
1040 trace_f2fs_drop_inode(inode, ret);
1041 return ret;
1042}
1043
1044int f2fs_inode_dirtied(struct inode *inode, bool sync)
1045{
1046 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1047 int ret = 0;
1048
1049 spin_lock(&sbi->inode_lock[DIRTY_META]);
1050 if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1051 ret = 1;
1052 } else {
1053 set_inode_flag(inode, FI_DIRTY_INODE);
1054 stat_inc_dirty_inode(sbi, DIRTY_META);
1055 }
1056 if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1057 list_add_tail(&F2FS_I(inode)->gdirty_list,
1058 &sbi->inode_list[DIRTY_META]);
1059 inc_page_count(sbi, F2FS_DIRTY_IMETA);
1060 }
1061 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1062 return ret;
1063}
1064
1065void f2fs_inode_synced(struct inode *inode)
1066{
1067 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1068
1069 spin_lock(&sbi->inode_lock[DIRTY_META]);
1070 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1071 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1072 return;
1073 }
1074 if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1075 list_del_init(&F2FS_I(inode)->gdirty_list);
1076 dec_page_count(sbi, F2FS_DIRTY_IMETA);
1077 }
1078 clear_inode_flag(inode, FI_DIRTY_INODE);
1079 clear_inode_flag(inode, FI_AUTO_RECOVER);
1080 stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1081 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1082}
1083
1084
1085
1086
1087
1088
1089static void f2fs_dirty_inode(struct inode *inode, int flags)
1090{
1091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092
1093 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1094 inode->i_ino == F2FS_META_INO(sbi))
1095 return;
1096
1097 if (flags == I_DIRTY_TIME)
1098 return;
1099
1100 if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1101 clear_inode_flag(inode, FI_AUTO_RECOVER);
1102
1103 f2fs_inode_dirtied(inode, false);
1104}
1105
1106static void f2fs_free_inode(struct inode *inode)
1107{
1108 fscrypt_free_inode(inode);
1109 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1110}
1111
1112static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1113{
1114 percpu_counter_destroy(&sbi->alloc_valid_block_count);
1115 percpu_counter_destroy(&sbi->total_valid_inode_count);
1116}
1117
1118static void destroy_device_list(struct f2fs_sb_info *sbi)
1119{
1120 int i;
1121
1122 for (i = 0; i < sbi->s_ndevs; i++) {
1123 blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1124#ifdef CONFIG_BLK_DEV_ZONED
1125 kvfree(FDEV(i).blkz_seq);
1126#endif
1127 }
1128 kvfree(sbi->devs);
1129}
1130
1131static void f2fs_put_super(struct super_block *sb)
1132{
1133 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1134 int i;
1135 bool dropped;
1136
1137 f2fs_quota_off_umount(sb);
1138
1139
1140 mutex_lock(&sbi->umount_mutex);
1141
1142
1143
1144
1145
1146
1147 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1148 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1149 struct cp_control cpc = {
1150 .reason = CP_UMOUNT,
1151 };
1152 f2fs_write_checkpoint(sbi, &cpc);
1153 }
1154
1155
1156 dropped = f2fs_issue_discard_timeout(sbi);
1157
1158 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
1159 !sbi->discard_blks && !dropped) {
1160 struct cp_control cpc = {
1161 .reason = CP_UMOUNT | CP_TRIMMED,
1162 };
1163 f2fs_write_checkpoint(sbi, &cpc);
1164 }
1165
1166
1167
1168
1169
1170 f2fs_release_ino_entry(sbi, true);
1171
1172 f2fs_leave_shrinker(sbi);
1173 mutex_unlock(&sbi->umount_mutex);
1174
1175
1176 f2fs_flush_merged_writes(sbi);
1177
1178 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1179
1180 f2fs_bug_on(sbi, sbi->fsync_node_num);
1181
1182 iput(sbi->node_inode);
1183 sbi->node_inode = NULL;
1184
1185 iput(sbi->meta_inode);
1186 sbi->meta_inode = NULL;
1187
1188
1189
1190
1191
1192 f2fs_destroy_stats(sbi);
1193
1194
1195 f2fs_destroy_node_manager(sbi);
1196 f2fs_destroy_segment_manager(sbi);
1197
1198 f2fs_destroy_post_read_wq(sbi);
1199
1200 kvfree(sbi->ckpt);
1201
1202 f2fs_unregister_sysfs(sbi);
1203
1204 sb->s_fs_info = NULL;
1205 if (sbi->s_chksum_driver)
1206 crypto_free_shash(sbi->s_chksum_driver);
1207 kvfree(sbi->raw_super);
1208
1209 destroy_device_list(sbi);
1210 f2fs_destroy_xattr_caches(sbi);
1211 mempool_destroy(sbi->write_io_dummy);
1212#ifdef CONFIG_QUOTA
1213 for (i = 0; i < MAXQUOTAS; i++)
1214 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1215#endif
1216 destroy_percpu_info(sbi);
1217 for (i = 0; i < NR_PAGE_TYPE; i++)
1218 kvfree(sbi->write_io[i]);
1219#ifdef CONFIG_UNICODE
1220 utf8_unload(sbi->s_encoding);
1221#endif
1222 kvfree(sbi);
1223}
1224
1225int f2fs_sync_fs(struct super_block *sb, int sync)
1226{
1227 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1228 int err = 0;
1229
1230 if (unlikely(f2fs_cp_error(sbi)))
1231 return 0;
1232 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1233 return 0;
1234
1235 trace_f2fs_sync_fs(sb, sync);
1236
1237 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1238 return -EAGAIN;
1239
1240 if (sync) {
1241 struct cp_control cpc;
1242
1243 cpc.reason = __get_cp_reason(sbi);
1244
1245 down_write(&sbi->gc_lock);
1246 err = f2fs_write_checkpoint(sbi, &cpc);
1247 up_write(&sbi->gc_lock);
1248 }
1249 f2fs_trace_ios(NULL, 1);
1250
1251 return err;
1252}
1253
1254static int f2fs_freeze(struct super_block *sb)
1255{
1256 if (f2fs_readonly(sb))
1257 return 0;
1258
1259
1260 if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1261 return -EIO;
1262
1263
1264 if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1265 return -EINVAL;
1266 return 0;
1267}
1268
1269static int f2fs_unfreeze(struct super_block *sb)
1270{
1271 return 0;
1272}
1273
1274#ifdef CONFIG_QUOTA
1275static int f2fs_statfs_project(struct super_block *sb,
1276 kprojid_t projid, struct kstatfs *buf)
1277{
1278 struct kqid qid;
1279 struct dquot *dquot;
1280 u64 limit;
1281 u64 curblock;
1282
1283 qid = make_kqid_projid(projid);
1284 dquot = dqget(sb, qid);
1285 if (IS_ERR(dquot))
1286 return PTR_ERR(dquot);
1287 spin_lock(&dquot->dq_dqb_lock);
1288
1289 limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1290 dquot->dq_dqb.dqb_bhardlimit);
1291 if (limit)
1292 limit >>= sb->s_blocksize_bits;
1293
1294 if (limit && buf->f_blocks > limit) {
1295 curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
1296 buf->f_blocks = limit;
1297 buf->f_bfree = buf->f_bavail =
1298 (buf->f_blocks > curblock) ?
1299 (buf->f_blocks - curblock) : 0;
1300 }
1301
1302 limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1303 dquot->dq_dqb.dqb_ihardlimit);
1304
1305 if (limit && buf->f_files > limit) {
1306 buf->f_files = limit;
1307 buf->f_ffree =
1308 (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1309 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1310 }
1311
1312 spin_unlock(&dquot->dq_dqb_lock);
1313 dqput(dquot);
1314 return 0;
1315}
1316#endif
1317
1318static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1319{
1320 struct super_block *sb = dentry->d_sb;
1321 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1322 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1323 block_t total_count, user_block_count, start_count;
1324 u64 avail_node_count;
1325
1326 total_count = le64_to_cpu(sbi->raw_super->block_count);
1327 user_block_count = sbi->user_block_count;
1328 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1329 buf->f_type = F2FS_SUPER_MAGIC;
1330 buf->f_bsize = sbi->blocksize;
1331
1332 buf->f_blocks = total_count - start_count;
1333 buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1334 sbi->current_reserved_blocks;
1335
1336 spin_lock(&sbi->stat_lock);
1337 if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1338 buf->f_bfree = 0;
1339 else
1340 buf->f_bfree -= sbi->unusable_block_count;
1341 spin_unlock(&sbi->stat_lock);
1342
1343 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1344 buf->f_bavail = buf->f_bfree -
1345 F2FS_OPTION(sbi).root_reserved_blocks;
1346 else
1347 buf->f_bavail = 0;
1348
1349 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1350
1351 if (avail_node_count > user_block_count) {
1352 buf->f_files = user_block_count;
1353 buf->f_ffree = buf->f_bavail;
1354 } else {
1355 buf->f_files = avail_node_count;
1356 buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
1357 buf->f_bavail);
1358 }
1359
1360 buf->f_namelen = F2FS_NAME_LEN;
1361 buf->f_fsid.val[0] = (u32)id;
1362 buf->f_fsid.val[1] = (u32)(id >> 32);
1363
1364#ifdef CONFIG_QUOTA
1365 if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1366 sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1367 f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1368 }
1369#endif
1370 return 0;
1371}
1372
1373static inline void f2fs_show_quota_options(struct seq_file *seq,
1374 struct super_block *sb)
1375{
1376#ifdef CONFIG_QUOTA
1377 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1378
1379 if (F2FS_OPTION(sbi).s_jquota_fmt) {
1380 char *fmtname = "";
1381
1382 switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1383 case QFMT_VFS_OLD:
1384 fmtname = "vfsold";
1385 break;
1386 case QFMT_VFS_V0:
1387 fmtname = "vfsv0";
1388 break;
1389 case QFMT_VFS_V1:
1390 fmtname = "vfsv1";
1391 break;
1392 }
1393 seq_printf(seq, ",jqfmt=%s", fmtname);
1394 }
1395
1396 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1397 seq_show_option(seq, "usrjquota",
1398 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1399
1400 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1401 seq_show_option(seq, "grpjquota",
1402 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1403
1404 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1405 seq_show_option(seq, "prjjquota",
1406 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1407#endif
1408}
1409
1410static inline void f2fs_show_compress_options(struct seq_file *seq,
1411 struct super_block *sb)
1412{
1413 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1414 char *algtype = "";
1415 int i;
1416
1417 if (!f2fs_sb_has_compression(sbi))
1418 return;
1419
1420 switch (F2FS_OPTION(sbi).compress_algorithm) {
1421 case COMPRESS_LZO:
1422 algtype = "lzo";
1423 break;
1424 case COMPRESS_LZ4:
1425 algtype = "lz4";
1426 break;
1427 case COMPRESS_ZSTD:
1428 algtype = "zstd";
1429 break;
1430 }
1431 seq_printf(seq, ",compress_algorithm=%s", algtype);
1432
1433 seq_printf(seq, ",compress_log_size=%u",
1434 F2FS_OPTION(sbi).compress_log_size);
1435
1436 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1437 seq_printf(seq, ",compress_extension=%s",
1438 F2FS_OPTION(sbi).extensions[i]);
1439 }
1440}
1441
1442static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1443{
1444 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1445
1446 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1447 seq_printf(seq, ",background_gc=%s", "sync");
1448 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1449 seq_printf(seq, ",background_gc=%s", "on");
1450 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1451 seq_printf(seq, ",background_gc=%s", "off");
1452
1453 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1454 seq_puts(seq, ",disable_roll_forward");
1455 if (test_opt(sbi, NORECOVERY))
1456 seq_puts(seq, ",norecovery");
1457 if (test_opt(sbi, DISCARD))
1458 seq_puts(seq, ",discard");
1459 else
1460 seq_puts(seq, ",nodiscard");
1461 if (test_opt(sbi, NOHEAP))
1462 seq_puts(seq, ",no_heap");
1463 else
1464 seq_puts(seq, ",heap");
1465#ifdef CONFIG_F2FS_FS_XATTR
1466 if (test_opt(sbi, XATTR_USER))
1467 seq_puts(seq, ",user_xattr");
1468 else
1469 seq_puts(seq, ",nouser_xattr");
1470 if (test_opt(sbi, INLINE_XATTR))
1471 seq_puts(seq, ",inline_xattr");
1472 else
1473 seq_puts(seq, ",noinline_xattr");
1474 if (test_opt(sbi, INLINE_XATTR_SIZE))
1475 seq_printf(seq, ",inline_xattr_size=%u",
1476 F2FS_OPTION(sbi).inline_xattr_size);
1477#endif
1478#ifdef CONFIG_F2FS_FS_POSIX_ACL
1479 if (test_opt(sbi, POSIX_ACL))
1480 seq_puts(seq, ",acl");
1481 else
1482 seq_puts(seq, ",noacl");
1483#endif
1484 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1485 seq_puts(seq, ",disable_ext_identify");
1486 if (test_opt(sbi, INLINE_DATA))
1487 seq_puts(seq, ",inline_data");
1488 else
1489 seq_puts(seq, ",noinline_data");
1490 if (test_opt(sbi, INLINE_DENTRY))
1491 seq_puts(seq, ",inline_dentry");
1492 else
1493 seq_puts(seq, ",noinline_dentry");
1494 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
1495 seq_puts(seq, ",flush_merge");
1496 if (test_opt(sbi, NOBARRIER))
1497 seq_puts(seq, ",nobarrier");
1498 if (test_opt(sbi, FASTBOOT))
1499 seq_puts(seq, ",fastboot");
1500 if (test_opt(sbi, EXTENT_CACHE))
1501 seq_puts(seq, ",extent_cache");
1502 else
1503 seq_puts(seq, ",noextent_cache");
1504 if (test_opt(sbi, DATA_FLUSH))
1505 seq_puts(seq, ",data_flush");
1506
1507 seq_puts(seq, ",mode=");
1508 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
1509 seq_puts(seq, "adaptive");
1510 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
1511 seq_puts(seq, "lfs");
1512 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
1513 if (test_opt(sbi, RESERVE_ROOT))
1514 seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
1515 F2FS_OPTION(sbi).root_reserved_blocks,
1516 from_kuid_munged(&init_user_ns,
1517 F2FS_OPTION(sbi).s_resuid),
1518 from_kgid_munged(&init_user_ns,
1519 F2FS_OPTION(sbi).s_resgid));
1520 if (F2FS_IO_SIZE_BITS(sbi))
1521 seq_printf(seq, ",io_bits=%u",
1522 F2FS_OPTION(sbi).write_io_size_bits);
1523#ifdef CONFIG_F2FS_FAULT_INJECTION
1524 if (test_opt(sbi, FAULT_INJECTION)) {
1525 seq_printf(seq, ",fault_injection=%u",
1526 F2FS_OPTION(sbi).fault_info.inject_rate);
1527 seq_printf(seq, ",fault_type=%u",
1528 F2FS_OPTION(sbi).fault_info.inject_type);
1529 }
1530#endif
1531#ifdef CONFIG_QUOTA
1532 if (test_opt(sbi, QUOTA))
1533 seq_puts(seq, ",quota");
1534 if (test_opt(sbi, USRQUOTA))
1535 seq_puts(seq, ",usrquota");
1536 if (test_opt(sbi, GRPQUOTA))
1537 seq_puts(seq, ",grpquota");
1538 if (test_opt(sbi, PRJQUOTA))
1539 seq_puts(seq, ",prjquota");
1540#endif
1541 f2fs_show_quota_options(seq, sbi->sb);
1542 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
1543 seq_printf(seq, ",whint_mode=%s", "user-based");
1544 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
1545 seq_printf(seq, ",whint_mode=%s", "fs-based");
1546#ifdef CONFIG_FS_ENCRYPTION
1547 if (F2FS_OPTION(sbi).test_dummy_encryption)
1548 seq_puts(seq, ",test_dummy_encryption");
1549#endif
1550
1551 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
1552 seq_printf(seq, ",alloc_mode=%s", "default");
1553 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
1554 seq_printf(seq, ",alloc_mode=%s", "reuse");
1555
1556 if (test_opt(sbi, DISABLE_CHECKPOINT))
1557 seq_printf(seq, ",checkpoint=disable:%u",
1558 F2FS_OPTION(sbi).unusable_cap);
1559 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1560 seq_printf(seq, ",fsync_mode=%s", "posix");
1561 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
1562 seq_printf(seq, ",fsync_mode=%s", "strict");
1563 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
1564 seq_printf(seq, ",fsync_mode=%s", "nobarrier");
1565
1566 f2fs_show_compress_options(seq, sbi->sb);
1567 return 0;
1568}
1569
1570static void default_options(struct f2fs_sb_info *sbi)
1571{
1572
1573 F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
1574 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
1575 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
1576 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1577 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1578 F2FS_OPTION(sbi).test_dummy_encryption = false;
1579 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
1580 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
1581 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
1582 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
1583 F2FS_OPTION(sbi).compress_ext_cnt = 0;
1584 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
1585
1586 set_opt(sbi, INLINE_XATTR);
1587 set_opt(sbi, INLINE_DATA);
1588 set_opt(sbi, INLINE_DENTRY);
1589 set_opt(sbi, EXTENT_CACHE);
1590 set_opt(sbi, NOHEAP);
1591 clear_opt(sbi, DISABLE_CHECKPOINT);
1592 F2FS_OPTION(sbi).unusable_cap = 0;
1593 sbi->sb->s_flags |= SB_LAZYTIME;
1594 set_opt(sbi, FLUSH_MERGE);
1595 set_opt(sbi, DISCARD);
1596 if (f2fs_sb_has_blkzoned(sbi))
1597 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
1598 else
1599 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
1600
1601#ifdef CONFIG_F2FS_FS_XATTR
1602 set_opt(sbi, XATTR_USER);
1603#endif
1604#ifdef CONFIG_F2FS_FS_POSIX_ACL
1605 set_opt(sbi, POSIX_ACL);
1606#endif
1607
1608 f2fs_build_fault_attr(sbi, 0, 0);
1609}
1610
1611#ifdef CONFIG_QUOTA
1612static int f2fs_enable_quotas(struct super_block *sb);
1613#endif
1614
1615static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1616{
1617 unsigned int s_flags = sbi->sb->s_flags;
1618 struct cp_control cpc;
1619 int err = 0;
1620 int ret;
1621 block_t unusable;
1622
1623 if (s_flags & SB_RDONLY) {
1624 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1625 return -EINVAL;
1626 }
1627 sbi->sb->s_flags |= SB_ACTIVE;
1628
1629 f2fs_update_time(sbi, DISABLE_TIME);
1630
1631 while (!f2fs_time_over(sbi, DISABLE_TIME)) {
1632 down_write(&sbi->gc_lock);
1633 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1634 if (err == -ENODATA) {
1635 err = 0;
1636 break;
1637 }
1638 if (err && err != -EAGAIN)
1639 break;
1640 }
1641
1642 ret = sync_filesystem(sbi->sb);
1643 if (ret || err) {
1644 err = ret ? ret: err;
1645 goto restore_flag;
1646 }
1647
1648 unusable = f2fs_get_unusable_blocks(sbi);
1649 if (f2fs_disable_cp_again(sbi, unusable)) {
1650 err = -EAGAIN;
1651 goto restore_flag;
1652 }
1653
1654 down_write(&sbi->gc_lock);
1655 cpc.reason = CP_PAUSE;
1656 set_sbi_flag(sbi, SBI_CP_DISABLED);
1657 err = f2fs_write_checkpoint(sbi, &cpc);
1658 if (err)
1659 goto out_unlock;
1660
1661 spin_lock(&sbi->stat_lock);
1662 sbi->unusable_block_count = unusable;
1663 spin_unlock(&sbi->stat_lock);
1664
1665out_unlock:
1666 up_write(&sbi->gc_lock);
1667restore_flag:
1668 sbi->sb->s_flags = s_flags;
1669 return err;
1670}
1671
1672static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
1673{
1674 down_write(&sbi->gc_lock);
1675 f2fs_dirty_to_prefree(sbi);
1676
1677 clear_sbi_flag(sbi, SBI_CP_DISABLED);
1678 set_sbi_flag(sbi, SBI_IS_DIRTY);
1679 up_write(&sbi->gc_lock);
1680
1681 f2fs_sync_fs(sbi->sb, 1);
1682}
1683
1684static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1685{
1686 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1687 struct f2fs_mount_info org_mount_opt;
1688 unsigned long old_sb_flags;
1689 int err;
1690 bool need_restart_gc = false;
1691 bool need_stop_gc = false;
1692 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
1693 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
1694 bool no_io_align = !F2FS_IO_ALIGNED(sbi);
1695 bool checkpoint_changed;
1696#ifdef CONFIG_QUOTA
1697 int i, j;
1698#endif
1699
1700
1701
1702
1703
1704 org_mount_opt = sbi->mount_opt;
1705 old_sb_flags = sb->s_flags;
1706
1707#ifdef CONFIG_QUOTA
1708 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
1709 for (i = 0; i < MAXQUOTAS; i++) {
1710 if (F2FS_OPTION(sbi).s_qf_names[i]) {
1711 org_mount_opt.s_qf_names[i] =
1712 kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
1713 GFP_KERNEL);
1714 if (!org_mount_opt.s_qf_names[i]) {
1715 for (j = 0; j < i; j++)
1716 kvfree(org_mount_opt.s_qf_names[j]);
1717 return -ENOMEM;
1718 }
1719 } else {
1720 org_mount_opt.s_qf_names[i] = NULL;
1721 }
1722 }
1723#endif
1724
1725
1726 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1727 err = f2fs_commit_super(sbi, false);
1728 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1729 err);
1730 if (!err)
1731 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1732 }
1733
1734 default_options(sbi);
1735
1736
1737 err = parse_options(sb, data);
1738 if (err)
1739 goto restore_opts;
1740 checkpoint_changed =
1741 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
1742
1743
1744
1745
1746
1747 if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
1748 goto skip;
1749
1750#ifdef CONFIG_QUOTA
1751 if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
1752 err = dquot_suspend(sb, -1);
1753 if (err < 0)
1754 goto restore_opts;
1755 } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
1756
1757 sb->s_flags &= ~SB_RDONLY;
1758 if (sb_any_quota_suspended(sb)) {
1759 dquot_resume(sb, -1);
1760 } else if (f2fs_sb_has_quota_ino(sbi)) {
1761 err = f2fs_enable_quotas(sb);
1762 if (err)
1763 goto restore_opts;
1764 }
1765 }
1766#endif
1767
1768 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1769 err = -EINVAL;
1770 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1771 goto restore_opts;
1772 }
1773
1774 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
1775 err = -EINVAL;
1776 f2fs_warn(sbi, "switch io_bits option is not allowed");
1777 goto restore_opts;
1778 }
1779
1780 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1781 err = -EINVAL;
1782 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
1783 goto restore_opts;
1784 }
1785
1786
1787
1788
1789
1790
1791 if ((*flags & SB_RDONLY) ||
1792 F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) {
1793 if (sbi->gc_thread) {
1794 f2fs_stop_gc_thread(sbi);
1795 need_restart_gc = true;
1796 }
1797 } else if (!sbi->gc_thread) {
1798 err = f2fs_start_gc_thread(sbi);
1799 if (err)
1800 goto restore_opts;
1801 need_stop_gc = true;
1802 }
1803
1804 if (*flags & SB_RDONLY ||
1805 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
1806 writeback_inodes_sb(sb, WB_REASON_SYNC);
1807 sync_inodes_sb(sb);
1808
1809 set_sbi_flag(sbi, SBI_IS_DIRTY);
1810 set_sbi_flag(sbi, SBI_IS_CLOSE);
1811 f2fs_sync_fs(sb, 1);
1812 clear_sbi_flag(sbi, SBI_IS_CLOSE);
1813 }
1814
1815 if (checkpoint_changed) {
1816 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1817 err = f2fs_disable_checkpoint(sbi);
1818 if (err)
1819 goto restore_gc;
1820 } else {
1821 f2fs_enable_checkpoint(sbi);
1822 }
1823 }
1824
1825
1826
1827
1828
1829 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
1830 clear_opt(sbi, FLUSH_MERGE);
1831 f2fs_destroy_flush_cmd_control(sbi, false);
1832 } else {
1833 err = f2fs_create_flush_cmd_control(sbi);
1834 if (err)
1835 goto restore_gc;
1836 }
1837skip:
1838#ifdef CONFIG_QUOTA
1839
1840 for (i = 0; i < MAXQUOTAS; i++)
1841 kvfree(org_mount_opt.s_qf_names[i]);
1842#endif
1843
1844 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1845 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
1846
1847 limit_reserve_root(sbi);
1848 *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
1849 return 0;
1850restore_gc:
1851 if (need_restart_gc) {
1852 if (f2fs_start_gc_thread(sbi))
1853 f2fs_warn(sbi, "background gc thread has stopped");
1854 } else if (need_stop_gc) {
1855 f2fs_stop_gc_thread(sbi);
1856 }
1857restore_opts:
1858#ifdef CONFIG_QUOTA
1859 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
1860 for (i = 0; i < MAXQUOTAS; i++) {
1861 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
1862 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
1863 }
1864#endif
1865 sbi->mount_opt = org_mount_opt;
1866 sb->s_flags = old_sb_flags;
1867 return err;
1868}
1869
1870#ifdef CONFIG_QUOTA
1871
1872static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
1873 size_t len, loff_t off)
1874{
1875 struct inode *inode = sb_dqopt(sb)->files[type];
1876 struct address_space *mapping = inode->i_mapping;
1877 block_t blkidx = F2FS_BYTES_TO_BLK(off);
1878 int offset = off & (sb->s_blocksize - 1);
1879 int tocopy;
1880 size_t toread;
1881 loff_t i_size = i_size_read(inode);
1882 struct page *page;
1883 char *kaddr;
1884
1885 if (off > i_size)
1886 return 0;
1887
1888 if (off + len > i_size)
1889 len = i_size - off;
1890 toread = len;
1891 while (toread > 0) {
1892 tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
1893repeat:
1894 page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
1895 if (IS_ERR(page)) {
1896 if (PTR_ERR(page) == -ENOMEM) {
1897 congestion_wait(BLK_RW_ASYNC,
1898 DEFAULT_IO_TIMEOUT);
1899 goto repeat;
1900 }
1901 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1902 return PTR_ERR(page);
1903 }
1904
1905 lock_page(page);
1906
1907 if (unlikely(page->mapping != mapping)) {
1908 f2fs_put_page(page, 1);
1909 goto repeat;
1910 }
1911 if (unlikely(!PageUptodate(page))) {
1912 f2fs_put_page(page, 1);
1913 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1914 return -EIO;
1915 }
1916
1917 kaddr = kmap_atomic(page);
1918 memcpy(data, kaddr + offset, tocopy);
1919 kunmap_atomic(kaddr);
1920 f2fs_put_page(page, 1);
1921
1922 offset = 0;
1923 toread -= tocopy;
1924 data += tocopy;
1925 blkidx++;
1926 }
1927 return len;
1928}
1929
1930
1931static ssize_t f2fs_quota_write(struct super_block *sb, int type,
1932 const char *data, size_t len, loff_t off)
1933{
1934 struct inode *inode = sb_dqopt(sb)->files[type];
1935 struct address_space *mapping = inode->i_mapping;
1936 const struct address_space_operations *a_ops = mapping->a_ops;
1937 int offset = off & (sb->s_blocksize - 1);
1938 size_t towrite = len;
1939 struct page *page;
1940 void *fsdata = NULL;
1941 char *kaddr;
1942 int err = 0;
1943 int tocopy;
1944
1945 while (towrite > 0) {
1946 tocopy = min_t(unsigned long, sb->s_blocksize - offset,
1947 towrite);
1948retry:
1949 err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
1950 &page, &fsdata);
1951 if (unlikely(err)) {
1952 if (err == -ENOMEM) {
1953 congestion_wait(BLK_RW_ASYNC,
1954 DEFAULT_IO_TIMEOUT);
1955 goto retry;
1956 }
1957 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1958 break;
1959 }
1960
1961 kaddr = kmap_atomic(page);
1962 memcpy(kaddr + offset, data, tocopy);
1963 kunmap_atomic(kaddr);
1964 flush_dcache_page(page);
1965
1966 a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
1967 page, fsdata);
1968 offset = 0;
1969 towrite -= tocopy;
1970 off += tocopy;
1971 data += tocopy;
1972 cond_resched();
1973 }
1974
1975 if (len == towrite)
1976 return err;
1977 inode->i_mtime = inode->i_ctime = current_time(inode);
1978 f2fs_mark_inode_dirty_sync(inode, false);
1979 return len - towrite;
1980}
1981
1982static struct dquot **f2fs_get_dquots(struct inode *inode)
1983{
1984 return F2FS_I(inode)->i_dquot;
1985}
1986
1987static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1988{
1989 return &F2FS_I(inode)->i_reserved_quota;
1990}
1991
1992static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1993{
1994 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
1995 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
1996 return 0;
1997 }
1998
1999 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2000 F2FS_OPTION(sbi).s_jquota_fmt, type);
2001}
2002
2003int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2004{
2005 int enabled = 0;
2006 int i, err;
2007
2008 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2009 err = f2fs_enable_quotas(sbi->sb);
2010 if (err) {
2011 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2012 return 0;
2013 }
2014 return 1;
2015 }
2016
2017 for (i = 0; i < MAXQUOTAS; i++) {
2018 if (F2FS_OPTION(sbi).s_qf_names[i]) {
2019 err = f2fs_quota_on_mount(sbi, i);
2020 if (!err) {
2021 enabled = 1;
2022 continue;
2023 }
2024 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2025 err, i);
2026 }
2027 }
2028 return enabled;
2029}
2030
2031static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2032 unsigned int flags)
2033{
2034 struct inode *qf_inode;
2035 unsigned long qf_inum;
2036 int err;
2037
2038 BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2039
2040 qf_inum = f2fs_qf_ino(sb, type);
2041 if (!qf_inum)
2042 return -EPERM;
2043
2044 qf_inode = f2fs_iget(sb, qf_inum);
2045 if (IS_ERR(qf_inode)) {
2046 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2047 return PTR_ERR(qf_inode);
2048 }
2049
2050
2051 qf_inode->i_flags |= S_NOQUOTA;
2052 err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2053 iput(qf_inode);
2054 return err;
2055}
2056
2057static int f2fs_enable_quotas(struct super_block *sb)
2058{
2059 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2060 int type, err = 0;
2061 unsigned long qf_inum;
2062 bool quota_mopt[MAXQUOTAS] = {
2063 test_opt(sbi, USRQUOTA),
2064 test_opt(sbi, GRPQUOTA),
2065 test_opt(sbi, PRJQUOTA),
2066 };
2067
2068 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2069 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2070 return 0;
2071 }
2072
2073 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2074
2075 for (type = 0; type < MAXQUOTAS; type++) {
2076 qf_inum = f2fs_qf_ino(sb, type);
2077 if (qf_inum) {
2078 err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2079 DQUOT_USAGE_ENABLED |
2080 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2081 if (err) {
2082 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2083 type, err);
2084 for (type--; type >= 0; type--)
2085 dquot_quota_off(sb, type);
2086 set_sbi_flag(F2FS_SB(sb),
2087 SBI_QUOTA_NEED_REPAIR);
2088 return err;
2089 }
2090 }
2091 }
2092 return 0;
2093}
2094
2095int f2fs_quota_sync(struct super_block *sb, int type)
2096{
2097 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2098 struct quota_info *dqopt = sb_dqopt(sb);
2099 int cnt;
2100 int ret;
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 f2fs_lock_op(sbi);
2112
2113 down_read(&sbi->quota_sem);
2114 ret = dquot_writeback_dquots(sb, type);
2115 if (ret)
2116 goto out;
2117
2118
2119
2120
2121
2122 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2123 struct address_space *mapping;
2124
2125 if (type != -1 && cnt != type)
2126 continue;
2127 if (!sb_has_quota_active(sb, cnt))
2128 continue;
2129
2130 mapping = dqopt->files[cnt]->i_mapping;
2131
2132 ret = filemap_fdatawrite(mapping);
2133 if (ret)
2134 goto out;
2135
2136
2137 if (is_journalled_quota(sbi))
2138 continue;
2139
2140 ret = filemap_fdatawait(mapping);
2141 if (ret)
2142 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2143
2144 inode_lock(dqopt->files[cnt]);
2145 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
2146 inode_unlock(dqopt->files[cnt]);
2147 }
2148out:
2149 if (ret)
2150 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2151 up_read(&sbi->quota_sem);
2152 f2fs_unlock_op(sbi);
2153 return ret;
2154}
2155
2156static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2157 const struct path *path)
2158{
2159 struct inode *inode;
2160 int err;
2161
2162
2163 if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2164 f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2165 return -EBUSY;
2166 }
2167
2168 err = f2fs_quota_sync(sb, type);
2169 if (err)
2170 return err;
2171
2172 err = dquot_quota_on(sb, type, format_id, path);
2173 if (err)
2174 return err;
2175
2176 inode = d_inode(path->dentry);
2177
2178 inode_lock(inode);
2179 F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2180 f2fs_set_inode_flags(inode);
2181 inode_unlock(inode);
2182 f2fs_mark_inode_dirty_sync(inode, false);
2183
2184 return 0;
2185}
2186
2187static int __f2fs_quota_off(struct super_block *sb, int type)
2188{
2189 struct inode *inode = sb_dqopt(sb)->files[type];
2190 int err;
2191
2192 if (!inode || !igrab(inode))
2193 return dquot_quota_off(sb, type);
2194
2195 err = f2fs_quota_sync(sb, type);
2196 if (err)
2197 goto out_put;
2198
2199 err = dquot_quota_off(sb, type);
2200 if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2201 goto out_put;
2202
2203 inode_lock(inode);
2204 F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2205 f2fs_set_inode_flags(inode);
2206 inode_unlock(inode);
2207 f2fs_mark_inode_dirty_sync(inode, false);
2208out_put:
2209 iput(inode);
2210 return err;
2211}
2212
2213static int f2fs_quota_off(struct super_block *sb, int type)
2214{
2215 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2216 int err;
2217
2218 err = __f2fs_quota_off(sb, type);
2219
2220
2221
2222
2223
2224
2225 if (is_journalled_quota(sbi))
2226 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2227 return err;
2228}
2229
2230void f2fs_quota_off_umount(struct super_block *sb)
2231{
2232 int type;
2233 int err;
2234
2235 for (type = 0; type < MAXQUOTAS; type++) {
2236 err = __f2fs_quota_off(sb, type);
2237 if (err) {
2238 int ret = dquot_quota_off(sb, type);
2239
2240 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2241 type, err, ret);
2242 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2243 }
2244 }
2245
2246
2247
2248
2249
2250 sync_filesystem(sb);
2251}
2252
2253static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2254{
2255 struct quota_info *dqopt = sb_dqopt(sb);
2256 int type;
2257
2258 for (type = 0; type < MAXQUOTAS; type++) {
2259 if (!dqopt->files[type])
2260 continue;
2261 f2fs_inode_synced(dqopt->files[type]);
2262 }
2263}
2264
2265static int f2fs_dquot_commit(struct dquot *dquot)
2266{
2267 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2268 int ret;
2269
2270 down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
2271 ret = dquot_commit(dquot);
2272 if (ret < 0)
2273 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2274 up_read(&sbi->quota_sem);
2275 return ret;
2276}
2277
2278static int f2fs_dquot_acquire(struct dquot *dquot)
2279{
2280 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2281 int ret;
2282
2283 down_read(&sbi->quota_sem);
2284 ret = dquot_acquire(dquot);
2285 if (ret < 0)
2286 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2287 up_read(&sbi->quota_sem);
2288 return ret;
2289}
2290
2291static int f2fs_dquot_release(struct dquot *dquot)
2292{
2293 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2294 int ret = dquot_release(dquot);
2295
2296 if (ret < 0)
2297 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2298 return ret;
2299}
2300
2301static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2302{
2303 struct super_block *sb = dquot->dq_sb;
2304 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2305 int ret = dquot_mark_dquot_dirty(dquot);
2306
2307
2308 if (is_journalled_quota(sbi))
2309 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2310
2311 return ret;
2312}
2313
2314static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2315{
2316 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2317 int ret = dquot_commit_info(sb, type);
2318
2319 if (ret < 0)
2320 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2321 return ret;
2322}
2323
2324static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
2325{
2326 *projid = F2FS_I(inode)->i_projid;
2327 return 0;
2328}
2329
2330static const struct dquot_operations f2fs_quota_operations = {
2331 .get_reserved_space = f2fs_get_reserved_space,
2332 .write_dquot = f2fs_dquot_commit,
2333 .acquire_dquot = f2fs_dquot_acquire,
2334 .release_dquot = f2fs_dquot_release,
2335 .mark_dirty = f2fs_dquot_mark_dquot_dirty,
2336 .write_info = f2fs_dquot_commit_info,
2337 .alloc_dquot = dquot_alloc,
2338 .destroy_dquot = dquot_destroy,
2339 .get_projid = f2fs_get_projid,
2340 .get_next_id = dquot_get_next_id,
2341};
2342
2343static const struct quotactl_ops f2fs_quotactl_ops = {
2344 .quota_on = f2fs_quota_on,
2345 .quota_off = f2fs_quota_off,
2346 .quota_sync = f2fs_quota_sync,
2347 .get_state = dquot_get_state,
2348 .set_info = dquot_set_dqinfo,
2349 .get_dqblk = dquot_get_dqblk,
2350 .set_dqblk = dquot_set_dqblk,
2351 .get_nextdqblk = dquot_get_next_dqblk,
2352};
2353#else
2354int f2fs_quota_sync(struct super_block *sb, int type)
2355{
2356 return 0;
2357}
2358
2359void f2fs_quota_off_umount(struct super_block *sb)
2360{
2361}
2362#endif
2363
2364static const struct super_operations f2fs_sops = {
2365 .alloc_inode = f2fs_alloc_inode,
2366 .free_inode = f2fs_free_inode,
2367 .drop_inode = f2fs_drop_inode,
2368 .write_inode = f2fs_write_inode,
2369 .dirty_inode = f2fs_dirty_inode,
2370 .show_options = f2fs_show_options,
2371#ifdef CONFIG_QUOTA
2372 .quota_read = f2fs_quota_read,
2373 .quota_write = f2fs_quota_write,
2374 .get_dquots = f2fs_get_dquots,
2375#endif
2376 .evict_inode = f2fs_evict_inode,
2377 .put_super = f2fs_put_super,
2378 .sync_fs = f2fs_sync_fs,
2379 .freeze_fs = f2fs_freeze,
2380 .unfreeze_fs = f2fs_unfreeze,
2381 .statfs = f2fs_statfs,
2382 .remount_fs = f2fs_remount,
2383};
2384
2385#ifdef CONFIG_FS_ENCRYPTION
2386static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
2387{
2388 return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2389 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2390 ctx, len, NULL);
2391}
2392
2393static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
2394 void *fs_data)
2395{
2396 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2397
2398
2399
2400
2401
2402
2403
2404 if (f2fs_sb_has_lost_found(sbi) &&
2405 inode->i_ino == F2FS_ROOT_INO(sbi))
2406 return -EPERM;
2407
2408 return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
2409 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
2410 ctx, len, fs_data, XATTR_CREATE);
2411}
2412
2413static bool f2fs_dummy_context(struct inode *inode)
2414{
2415 return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
2416}
2417
2418static bool f2fs_has_stable_inodes(struct super_block *sb)
2419{
2420 return true;
2421}
2422
2423static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
2424 int *ino_bits_ret, int *lblk_bits_ret)
2425{
2426 *ino_bits_ret = 8 * sizeof(nid_t);
2427 *lblk_bits_ret = 8 * sizeof(block_t);
2428}
2429
2430static const struct fscrypt_operations f2fs_cryptops = {
2431 .key_prefix = "f2fs:",
2432 .get_context = f2fs_get_context,
2433 .set_context = f2fs_set_context,
2434 .dummy_context = f2fs_dummy_context,
2435 .empty_dir = f2fs_empty_dir,
2436 .max_namelen = F2FS_NAME_LEN,
2437 .has_stable_inodes = f2fs_has_stable_inodes,
2438 .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
2439};
2440#endif
2441
2442static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
2443 u64 ino, u32 generation)
2444{
2445 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2446 struct inode *inode;
2447
2448 if (f2fs_check_nid_range(sbi, ino))
2449 return ERR_PTR(-ESTALE);
2450
2451
2452
2453
2454
2455
2456 inode = f2fs_iget(sb, ino);
2457 if (IS_ERR(inode))
2458 return ERR_CAST(inode);
2459 if (unlikely(generation && inode->i_generation != generation)) {
2460
2461 iput(inode);
2462 return ERR_PTR(-ESTALE);
2463 }
2464 return inode;
2465}
2466
2467static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2468 int fh_len, int fh_type)
2469{
2470 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
2471 f2fs_nfs_get_inode);
2472}
2473
2474static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
2475 int fh_len, int fh_type)
2476{
2477 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
2478 f2fs_nfs_get_inode);
2479}
2480
2481static const struct export_operations f2fs_export_ops = {
2482 .fh_to_dentry = f2fs_fh_to_dentry,
2483 .fh_to_parent = f2fs_fh_to_parent,
2484 .get_parent = f2fs_get_parent,
2485};
2486
2487static loff_t max_file_blocks(void)
2488{
2489 loff_t result = 0;
2490 loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500 result += (leaf_count * 2);
2501
2502
2503 leaf_count *= NIDS_PER_BLOCK;
2504 result += (leaf_count * 2);
2505
2506
2507 leaf_count *= NIDS_PER_BLOCK;
2508 result += leaf_count;
2509
2510 return result;
2511}
2512
2513static int __f2fs_commit_super(struct buffer_head *bh,
2514 struct f2fs_super_block *super)
2515{
2516 lock_buffer(bh);
2517 if (super)
2518 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
2519 set_buffer_dirty(bh);
2520 unlock_buffer(bh);
2521
2522
2523 return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
2524}
2525
2526static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2527 struct buffer_head *bh)
2528{
2529 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2530 (bh->b_data + F2FS_SUPER_OFFSET);
2531 struct super_block *sb = sbi->sb;
2532 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2533 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
2534 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
2535 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
2536 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2537 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2538 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
2539 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
2540 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
2541 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
2542 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2543 u32 segment_count = le32_to_cpu(raw_super->segment_count);
2544 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2545 u64 main_end_blkaddr = main_blkaddr +
2546 (segment_count_main << log_blocks_per_seg);
2547 u64 seg_end_blkaddr = segment0_blkaddr +
2548 (segment_count << log_blocks_per_seg);
2549
2550 if (segment0_blkaddr != cp_blkaddr) {
2551 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2552 segment0_blkaddr, cp_blkaddr);
2553 return true;
2554 }
2555
2556 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2557 sit_blkaddr) {
2558 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2559 cp_blkaddr, sit_blkaddr,
2560 segment_count_ckpt << log_blocks_per_seg);
2561 return true;
2562 }
2563
2564 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2565 nat_blkaddr) {
2566 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2567 sit_blkaddr, nat_blkaddr,
2568 segment_count_sit << log_blocks_per_seg);
2569 return true;
2570 }
2571
2572 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2573 ssa_blkaddr) {
2574 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2575 nat_blkaddr, ssa_blkaddr,
2576 segment_count_nat << log_blocks_per_seg);
2577 return true;
2578 }
2579
2580 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2581 main_blkaddr) {
2582 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2583 ssa_blkaddr, main_blkaddr,
2584 segment_count_ssa << log_blocks_per_seg);
2585 return true;
2586 }
2587
2588 if (main_end_blkaddr > seg_end_blkaddr) {
2589 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2590 main_blkaddr,
2591 segment0_blkaddr +
2592 (segment_count << log_blocks_per_seg),
2593 segment_count_main << log_blocks_per_seg);
2594 return true;
2595 } else if (main_end_blkaddr < seg_end_blkaddr) {
2596 int err = 0;
2597 char *res;
2598
2599
2600 raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
2601 segment0_blkaddr) >> log_blocks_per_seg);
2602
2603 if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
2604 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2605 res = "internally";
2606 } else {
2607 err = __f2fs_commit_super(bh, NULL);
2608 res = err ? "failed" : "done";
2609 }
2610 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)",
2611 res, main_blkaddr,
2612 segment0_blkaddr +
2613 (segment_count << log_blocks_per_seg),
2614 segment_count_main << log_blocks_per_seg);
2615 if (err)
2616 return true;
2617 }
2618 return false;
2619}
2620
2621static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2622 struct buffer_head *bh)
2623{
2624 block_t segment_count, segs_per_sec, secs_per_zone;
2625 block_t total_sections, blocks_per_seg;
2626 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2627 (bh->b_data + F2FS_SUPER_OFFSET);
2628 unsigned int blocksize;
2629 size_t crc_offset = 0;
2630 __u32 crc = 0;
2631
2632 if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
2633 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2634 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2635 return -EINVAL;
2636 }
2637
2638
2639 if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
2640 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2641 if (crc_offset !=
2642 offsetof(struct f2fs_super_block, crc)) {
2643 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2644 crc_offset);
2645 return -EFSCORRUPTED;
2646 }
2647 crc = le32_to_cpu(raw_super->crc);
2648 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2649 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2650 return -EFSCORRUPTED;
2651 }
2652 }
2653
2654
2655 if (F2FS_BLKSIZE != PAGE_SIZE) {
2656 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2657 PAGE_SIZE);
2658 return -EFSCORRUPTED;
2659 }
2660
2661
2662 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2663 if (blocksize != F2FS_BLKSIZE) {
2664 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
2665 blocksize);
2666 return -EFSCORRUPTED;
2667 }
2668
2669
2670 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2671 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2672 le32_to_cpu(raw_super->log_blocks_per_seg));
2673 return -EFSCORRUPTED;
2674 }
2675
2676
2677 if (le32_to_cpu(raw_super->log_sectorsize) >
2678 F2FS_MAX_LOG_SECTOR_SIZE ||
2679 le32_to_cpu(raw_super->log_sectorsize) <
2680 F2FS_MIN_LOG_SECTOR_SIZE) {
2681 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2682 le32_to_cpu(raw_super->log_sectorsize));
2683 return -EFSCORRUPTED;
2684 }
2685 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2686 le32_to_cpu(raw_super->log_sectorsize) !=
2687 F2FS_MAX_LOG_SECTOR_SIZE) {
2688 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2689 le32_to_cpu(raw_super->log_sectors_per_block),
2690 le32_to_cpu(raw_super->log_sectorsize));
2691 return -EFSCORRUPTED;
2692 }
2693
2694 segment_count = le32_to_cpu(raw_super->segment_count);
2695 segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2696 secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2697 total_sections = le32_to_cpu(raw_super->section_count);
2698
2699
2700 blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
2701
2702 if (segment_count > F2FS_MAX_SEGMENT ||
2703 segment_count < F2FS_MIN_SEGMENTS) {
2704 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2705 return -EFSCORRUPTED;
2706 }
2707
2708 if (total_sections > segment_count ||
2709 total_sections < F2FS_MIN_SEGMENTS ||
2710 segs_per_sec > segment_count || !segs_per_sec) {
2711 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2712 segment_count, total_sections, segs_per_sec);
2713 return -EFSCORRUPTED;
2714 }
2715
2716 if ((segment_count / segs_per_sec) < total_sections) {
2717 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2718 segment_count, segs_per_sec, total_sections);
2719 return -EFSCORRUPTED;
2720 }
2721
2722 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2723 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2724 segment_count, le64_to_cpu(raw_super->block_count));
2725 return -EFSCORRUPTED;
2726 }
2727
2728 if (RDEV(0).path[0]) {
2729 block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
2730 int i = 1;
2731
2732 while (i < MAX_DEVICES && RDEV(i).path[0]) {
2733 dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
2734 i++;
2735 }
2736 if (segment_count != dev_seg_count) {
2737 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
2738 segment_count, dev_seg_count);
2739 return -EFSCORRUPTED;
2740 }
2741 }
2742
2743 if (secs_per_zone > total_sections || !secs_per_zone) {
2744 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2745 secs_per_zone, total_sections);
2746 return -EFSCORRUPTED;
2747 }
2748 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2749 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2750 (le32_to_cpu(raw_super->extension_count) +
2751 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2752 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2753 le32_to_cpu(raw_super->extension_count),
2754 raw_super->hot_ext_count,
2755 F2FS_MAX_EXTENSION);
2756 return -EFSCORRUPTED;
2757 }
2758
2759 if (le32_to_cpu(raw_super->cp_payload) >
2760 (blocks_per_seg - F2FS_CP_PACKS)) {
2761 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
2762 le32_to_cpu(raw_super->cp_payload),
2763 blocks_per_seg - F2FS_CP_PACKS);
2764 return -EFSCORRUPTED;
2765 }
2766
2767
2768 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2769 le32_to_cpu(raw_super->meta_ino) != 2 ||
2770 le32_to_cpu(raw_super->root_ino) != 3) {
2771 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2772 le32_to_cpu(raw_super->node_ino),
2773 le32_to_cpu(raw_super->meta_ino),
2774 le32_to_cpu(raw_super->root_ino));
2775 return -EFSCORRUPTED;
2776 }
2777
2778
2779 if (sanity_check_area_boundary(sbi, bh))
2780 return -EFSCORRUPTED;
2781
2782 return 0;
2783}
2784
2785int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2786{
2787 unsigned int total, fsmeta;
2788 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2789 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2790 unsigned int ovp_segments, reserved_segments;
2791 unsigned int main_segs, blocks_per_seg;
2792 unsigned int sit_segs, nat_segs;
2793 unsigned int sit_bitmap_size, nat_bitmap_size;
2794 unsigned int log_blocks_per_seg;
2795 unsigned int segment_count_main;
2796 unsigned int cp_pack_start_sum, cp_payload;
2797 block_t user_block_count, valid_user_blocks;
2798 block_t avail_node_count, valid_node_count;
2799 int i, j;
2800
2801 total = le32_to_cpu(raw_super->segment_count);
2802 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
2803 sit_segs = le32_to_cpu(raw_super->segment_count_sit);
2804 fsmeta += sit_segs;
2805 nat_segs = le32_to_cpu(raw_super->segment_count_nat);
2806 fsmeta += nat_segs;
2807 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
2808 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
2809
2810 if (unlikely(fsmeta >= total))
2811 return 1;
2812
2813 ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2814 reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2815
2816 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2817 ovp_segments == 0 || reserved_segments == 0)) {
2818 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
2819 return 1;
2820 }
2821
2822 user_block_count = le64_to_cpu(ckpt->user_block_count);
2823 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
2824 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2825 if (!user_block_count || user_block_count >=
2826 segment_count_main << log_blocks_per_seg) {
2827 f2fs_err(sbi, "Wrong user_block_count: %u",
2828 user_block_count);
2829 return 1;
2830 }
2831
2832 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
2833 if (valid_user_blocks > user_block_count) {
2834 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
2835 valid_user_blocks, user_block_count);
2836 return 1;
2837 }
2838
2839 valid_node_count = le32_to_cpu(ckpt->valid_node_count);
2840 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
2841 if (valid_node_count > avail_node_count) {
2842 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
2843 valid_node_count, avail_node_count);
2844 return 1;
2845 }
2846
2847 main_segs = le32_to_cpu(raw_super->segment_count_main);
2848 blocks_per_seg = sbi->blocks_per_seg;
2849
2850 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2851 if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
2852 le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
2853 return 1;
2854 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2855 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2856 le32_to_cpu(ckpt->cur_node_segno[j])) {
2857 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
2858 i, j,
2859 le32_to_cpu(ckpt->cur_node_segno[i]));
2860 return 1;
2861 }
2862 }
2863 }
2864 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
2865 if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
2866 le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
2867 return 1;
2868 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2869 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2870 le32_to_cpu(ckpt->cur_data_segno[j])) {
2871 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
2872 i, j,
2873 le32_to_cpu(ckpt->cur_data_segno[i]));
2874 return 1;
2875 }
2876 }
2877 }
2878 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
2879 for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
2880 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2881 le32_to_cpu(ckpt->cur_data_segno[j])) {
2882 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
2883 i, j,
2884 le32_to_cpu(ckpt->cur_node_segno[i]));
2885 return 1;
2886 }
2887 }
2888 }
2889
2890 sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2891 nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2892
2893 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2894 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2895 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
2896 sit_bitmap_size, nat_bitmap_size);
2897 return 1;
2898 }
2899
2900 cp_pack_start_sum = __start_sum_addr(sbi);
2901 cp_payload = __cp_payload(sbi);
2902 if (cp_pack_start_sum < cp_payload + 1 ||
2903 cp_pack_start_sum > blocks_per_seg - 1 -
2904 NR_CURSEG_TYPE) {
2905 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
2906 cp_pack_start_sum);
2907 return 1;
2908 }
2909
2910 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
2911 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2912 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
2913 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
2914 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
2915 le32_to_cpu(ckpt->checksum_offset));
2916 return 1;
2917 }
2918
2919 if (unlikely(f2fs_cp_error(sbi))) {
2920 f2fs_err(sbi, "A bug case: need to run fsck");
2921 return 1;
2922 }
2923 return 0;
2924}
2925
2926static void init_sb_info(struct f2fs_sb_info *sbi)
2927{
2928 struct f2fs_super_block *raw_super = sbi->raw_super;
2929 int i;
2930
2931 sbi->log_sectors_per_block =
2932 le32_to_cpu(raw_super->log_sectors_per_block);
2933 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
2934 sbi->blocksize = 1 << sbi->log_blocksize;
2935 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2936 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
2937 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
2938 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
2939 sbi->total_sections = le32_to_cpu(raw_super->section_count);
2940 sbi->total_node_count =
2941 (le32_to_cpu(raw_super->segment_count_nat) / 2)
2942 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
2943 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
2944 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
2945 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
2946 sbi->cur_victim_sec = NULL_SECNO;
2947 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
2948 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
2949 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
2950 sbi->migration_granularity = sbi->segs_per_sec;
2951
2952 sbi->dir_level = DEF_DIR_LEVEL;
2953 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
2954 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
2955 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
2956 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
2957 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
2958 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
2959 DEF_UMOUNT_DISCARD_TIMEOUT;
2960 clear_sbi_flag(sbi, SBI_NEED_FSCK);
2961
2962 for (i = 0; i < NR_COUNT_TYPE; i++)
2963 atomic_set(&sbi->nr_pages[i], 0);
2964
2965 for (i = 0; i < META; i++)
2966 atomic_set(&sbi->wb_sync_req[i], 0);
2967
2968 INIT_LIST_HEAD(&sbi->s_list);
2969 mutex_init(&sbi->umount_mutex);
2970 init_rwsem(&sbi->io_order_lock);
2971 spin_lock_init(&sbi->cp_lock);
2972
2973 sbi->dirty_device = 0;
2974 spin_lock_init(&sbi->dev_lock);
2975
2976 init_rwsem(&sbi->sb_lock);
2977 init_rwsem(&sbi->pin_sem);
2978}
2979
2980static int init_percpu_info(struct f2fs_sb_info *sbi)
2981{
2982 int err;
2983
2984 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
2985 if (err)
2986 return err;
2987
2988 err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
2989 GFP_KERNEL);
2990 if (err)
2991 percpu_counter_destroy(&sbi->alloc_valid_block_count);
2992
2993 return err;
2994}
2995
2996#ifdef CONFIG_BLK_DEV_ZONED
2997static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
2998 void *data)
2999{
3000 struct f2fs_dev_info *dev = data;
3001
3002 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
3003 set_bit(idx, dev->blkz_seq);
3004 return 0;
3005}
3006
3007static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3008{
3009 struct block_device *bdev = FDEV(devi).bdev;
3010 sector_t nr_sectors = bdev->bd_part->nr_sects;
3011 int ret;
3012
3013 if (!f2fs_sb_has_blkzoned(sbi))
3014 return 0;
3015
3016 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3017 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
3018 return -EINVAL;
3019 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
3020 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
3021 __ilog2_u32(sbi->blocks_per_blkz))
3022 return -EINVAL;
3023 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
3024 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
3025 sbi->log_blocks_per_blkz;
3026 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
3027 FDEV(devi).nr_blkz++;
3028
3029 FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
3030 BITS_TO_LONGS(FDEV(devi).nr_blkz)
3031 * sizeof(unsigned long),
3032 GFP_KERNEL);
3033 if (!FDEV(devi).blkz_seq)
3034 return -ENOMEM;
3035
3036
3037 ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3038 &FDEV(devi));
3039 if (ret < 0)
3040 return ret;
3041
3042 return 0;
3043}
3044#endif
3045
3046
3047
3048
3049
3050
3051
3052static int read_raw_super_block(struct f2fs_sb_info *sbi,
3053 struct f2fs_super_block **raw_super,
3054 int *valid_super_block, int *recovery)
3055{
3056 struct super_block *sb = sbi->sb;
3057 int block;
3058 struct buffer_head *bh;
3059 struct f2fs_super_block *super;
3060 int err = 0;
3061
3062 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3063 if (!super)
3064 return -ENOMEM;
3065
3066 for (block = 0; block < 2; block++) {
3067 bh = sb_bread(sb, block);
3068 if (!bh) {
3069 f2fs_err(sbi, "Unable to read %dth superblock",
3070 block + 1);
3071 err = -EIO;
3072 *recovery = 1;
3073 continue;
3074 }
3075
3076
3077 err = sanity_check_raw_super(sbi, bh);
3078 if (err) {
3079 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3080 block + 1);
3081 brelse(bh);
3082 *recovery = 1;
3083 continue;
3084 }
3085
3086 if (!*raw_super) {
3087 memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3088 sizeof(*super));
3089 *valid_super_block = block;
3090 *raw_super = super;
3091 }
3092 brelse(bh);
3093 }
3094
3095
3096 if (!*raw_super)
3097 kvfree(super);
3098 else
3099 err = 0;
3100
3101 return err;
3102}
3103
3104int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3105{
3106 struct buffer_head *bh;
3107 __u32 crc = 0;
3108 int err;
3109
3110 if ((recover && f2fs_readonly(sbi->sb)) ||
3111 bdev_read_only(sbi->sb->s_bdev)) {
3112 set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3113 return -EROFS;
3114 }
3115
3116
3117 if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3118 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3119 offsetof(struct f2fs_super_block, crc));
3120 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3121 }
3122
3123
3124 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3125 if (!bh)
3126 return -EIO;
3127 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3128 brelse(bh);
3129
3130
3131 if (recover || err)
3132 return err;
3133
3134
3135 bh = sb_bread(sbi->sb, sbi->valid_super_block);
3136 if (!bh)
3137 return -EIO;
3138 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3139 brelse(bh);
3140 return err;
3141}
3142
3143static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3144{
3145 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3146 unsigned int max_devices = MAX_DEVICES;
3147 int i;
3148
3149
3150 if (!RDEV(0).path[0]) {
3151 if (!bdev_is_zoned(sbi->sb->s_bdev))
3152 return 0;
3153 max_devices = 1;
3154 }
3155
3156
3157
3158
3159
3160 sbi->devs = f2fs_kzalloc(sbi,
3161 array_size(max_devices,
3162 sizeof(struct f2fs_dev_info)),
3163 GFP_KERNEL);
3164 if (!sbi->devs)
3165 return -ENOMEM;
3166
3167 for (i = 0; i < max_devices; i++) {
3168
3169 if (i > 0 && !RDEV(i).path[0])
3170 break;
3171
3172 if (max_devices == 1) {
3173
3174 FDEV(0).bdev =
3175 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
3176 sbi->sb->s_mode, sbi->sb->s_type);
3177 } else {
3178
3179 memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
3180 FDEV(i).total_segments =
3181 le32_to_cpu(RDEV(i).total_segments);
3182 if (i == 0) {
3183 FDEV(i).start_blk = 0;
3184 FDEV(i).end_blk = FDEV(i).start_blk +
3185 (FDEV(i).total_segments <<
3186 sbi->log_blocks_per_seg) - 1 +
3187 le32_to_cpu(raw_super->segment0_blkaddr);
3188 } else {
3189 FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
3190 FDEV(i).end_blk = FDEV(i).start_blk +
3191 (FDEV(i).total_segments <<
3192 sbi->log_blocks_per_seg) - 1;
3193 }
3194 FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
3195 sbi->sb->s_mode, sbi->sb->s_type);
3196 }
3197 if (IS_ERR(FDEV(i).bdev))
3198 return PTR_ERR(FDEV(i).bdev);
3199
3200
3201 sbi->s_ndevs = i + 1;
3202
3203#ifdef CONFIG_BLK_DEV_ZONED
3204 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3205 !f2fs_sb_has_blkzoned(sbi)) {
3206 f2fs_err(sbi, "Zoned block device feature not enabled\n");
3207 return -EINVAL;
3208 }
3209 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3210 if (init_blkz_info(sbi, i)) {
3211 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3212 return -EINVAL;
3213 }
3214 if (max_devices == 1)
3215 break;
3216 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3217 i, FDEV(i).path,
3218 FDEV(i).total_segments,
3219 FDEV(i).start_blk, FDEV(i).end_blk,
3220 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3221 "Host-aware" : "Host-managed");
3222 continue;
3223 }
3224#endif
3225 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3226 i, FDEV(i).path,
3227 FDEV(i).total_segments,
3228 FDEV(i).start_blk, FDEV(i).end_blk);
3229 }
3230 f2fs_info(sbi,
3231 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3232 return 0;
3233}
3234
3235static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
3236{
3237#ifdef CONFIG_UNICODE
3238 if (f2fs_sb_has_casefold(sbi) && !sbi->s_encoding) {
3239 const struct f2fs_sb_encodings *encoding_info;
3240 struct unicode_map *encoding;
3241 __u16 encoding_flags;
3242
3243 if (f2fs_sb_has_encrypt(sbi)) {
3244 f2fs_err(sbi,
3245 "Can't mount with encoding and encryption");
3246 return -EINVAL;
3247 }
3248
3249 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
3250 &encoding_flags)) {
3251 f2fs_err(sbi,
3252 "Encoding requested by superblock is unknown");
3253 return -EINVAL;
3254 }
3255
3256 encoding = utf8_load(encoding_info->version);
3257 if (IS_ERR(encoding)) {
3258 f2fs_err(sbi,
3259 "can't mount with superblock charset: %s-%s "
3260 "not supported by the kernel. flags: 0x%x.",
3261 encoding_info->name, encoding_info->version,
3262 encoding_flags);
3263 return PTR_ERR(encoding);
3264 }
3265 f2fs_info(sbi, "Using encoding defined by superblock: "
3266 "%s-%s with flags 0x%hx", encoding_info->name,
3267 encoding_info->version?:"\b", encoding_flags);
3268
3269 sbi->s_encoding = encoding;
3270 sbi->s_encoding_flags = encoding_flags;
3271 sbi->sb->s_d_op = &f2fs_dentry_ops;
3272 }
3273#else
3274 if (f2fs_sb_has_casefold(sbi)) {
3275 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
3276 return -EINVAL;
3277 }
3278#endif
3279 return 0;
3280}
3281
3282static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
3283{
3284 struct f2fs_sm_info *sm_i = SM_I(sbi);
3285
3286
3287 if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
3288 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
3289 sm_i->dcc_info->discard_granularity = 1;
3290 sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
3291 }
3292
3293 sbi->readdir_ra = 1;
3294}
3295
3296static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
3297{
3298 struct f2fs_sb_info *sbi;
3299 struct f2fs_super_block *raw_super;
3300 struct inode *root;
3301 int err;
3302 bool skip_recovery = false, need_fsck = false;
3303 char *options = NULL;
3304 int recovery, i, valid_super_block;
3305 struct curseg_info *seg_i;
3306 int retry_cnt = 1;
3307
3308try_onemore:
3309 err = -EINVAL;
3310 raw_super = NULL;
3311 valid_super_block = -1;
3312 recovery = 0;
3313
3314
3315 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
3316 if (!sbi)
3317 return -ENOMEM;
3318
3319 sbi->sb = sb;
3320
3321
3322 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3323 if (IS_ERR(sbi->s_chksum_driver)) {
3324 f2fs_err(sbi, "Cannot load crc32 driver.");
3325 err = PTR_ERR(sbi->s_chksum_driver);
3326 sbi->s_chksum_driver = NULL;
3327 goto free_sbi;
3328 }
3329
3330
3331 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3332 f2fs_err(sbi, "unable to set blocksize");
3333 goto free_sbi;
3334 }
3335
3336 err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
3337 &recovery);
3338 if (err)
3339 goto free_sbi;
3340
3341 sb->s_fs_info = sbi;
3342 sbi->raw_super = raw_super;
3343
3344
3345 if (f2fs_sb_has_inode_chksum(sbi))
3346 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
3347 sizeof(raw_super->uuid));
3348
3349
3350
3351
3352
3353
3354#ifndef CONFIG_BLK_DEV_ZONED
3355 if (f2fs_sb_has_blkzoned(sbi)) {
3356 f2fs_err(sbi, "Zoned block device support is not enabled");
3357 err = -EOPNOTSUPP;
3358 goto free_sb_buf;
3359 }
3360#endif
3361 default_options(sbi);
3362
3363 options = kstrdup((const char *)data, GFP_KERNEL);
3364 if (data && !options) {
3365 err = -ENOMEM;
3366 goto free_sb_buf;
3367 }
3368
3369 err = parse_options(sb, options);
3370 if (err)
3371 goto free_options;
3372
3373 sbi->max_file_blocks = max_file_blocks();
3374 sb->s_maxbytes = sbi->max_file_blocks <<
3375 le32_to_cpu(raw_super->log_blocksize);
3376 sb->s_max_links = F2FS_LINK_MAX;
3377
3378 err = f2fs_setup_casefold(sbi);
3379 if (err)
3380 goto free_options;
3381
3382#ifdef CONFIG_QUOTA
3383 sb->dq_op = &f2fs_quota_operations;
3384 sb->s_qcop = &f2fs_quotactl_ops;
3385 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3386
3387 if (f2fs_sb_has_quota_ino(sbi)) {
3388 for (i = 0; i < MAXQUOTAS; i++) {
3389 if (f2fs_qf_ino(sbi->sb, i))
3390 sbi->nquota_files++;
3391 }
3392 }
3393#endif
3394
3395 sb->s_op = &f2fs_sops;
3396#ifdef CONFIG_FS_ENCRYPTION
3397 sb->s_cop = &f2fs_cryptops;
3398#endif
3399#ifdef CONFIG_FS_VERITY
3400 sb->s_vop = &f2fs_verityops;
3401#endif
3402 sb->s_xattr = f2fs_xattr_handlers;
3403 sb->s_export_op = &f2fs_export_ops;
3404 sb->s_magic = F2FS_SUPER_MAGIC;
3405 sb->s_time_gran = 1;
3406 sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
3407 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
3408 memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
3409 sb->s_iflags |= SB_I_CGROUPWB;
3410
3411
3412 sbi->valid_super_block = valid_super_block;
3413 init_rwsem(&sbi->gc_lock);
3414 mutex_init(&sbi->writepages);
3415 mutex_init(&sbi->cp_mutex);
3416 mutex_init(&sbi->resize_mutex);
3417 init_rwsem(&sbi->node_write);
3418 init_rwsem(&sbi->node_change);
3419
3420
3421 set_sbi_flag(sbi, SBI_POR_DOING);
3422 spin_lock_init(&sbi->stat_lock);
3423
3424
3425 spin_lock_init(&sbi->iostat_lock);
3426 sbi->iostat_enable = false;
3427
3428 for (i = 0; i < NR_PAGE_TYPE; i++) {
3429 int n = (i == META) ? 1: NR_TEMP_TYPE;
3430 int j;
3431
3432 sbi->write_io[i] =
3433 f2fs_kmalloc(sbi,
3434 array_size(n,
3435 sizeof(struct f2fs_bio_info)),
3436 GFP_KERNEL);
3437 if (!sbi->write_io[i]) {
3438 err = -ENOMEM;
3439 goto free_bio_info;
3440 }
3441
3442 for (j = HOT; j < n; j++) {
3443 init_rwsem(&sbi->write_io[i][j].io_rwsem);
3444 sbi->write_io[i][j].sbi = sbi;
3445 sbi->write_io[i][j].bio = NULL;
3446 spin_lock_init(&sbi->write_io[i][j].io_lock);
3447 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
3448 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
3449 init_rwsem(&sbi->write_io[i][j].bio_list_lock);
3450 }
3451 }
3452
3453 init_rwsem(&sbi->cp_rwsem);
3454 init_rwsem(&sbi->quota_sem);
3455 init_waitqueue_head(&sbi->cp_wait);
3456 init_sb_info(sbi);
3457
3458 err = init_percpu_info(sbi);
3459 if (err)
3460 goto free_bio_info;
3461
3462 if (F2FS_IO_ALIGNED(sbi)) {
3463 sbi->write_io_dummy =
3464 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
3465 if (!sbi->write_io_dummy) {
3466 err = -ENOMEM;
3467 goto free_percpu;
3468 }
3469 }
3470
3471
3472 err = f2fs_init_xattr_caches(sbi);
3473 if (err)
3474 goto free_io_dummy;
3475
3476
3477 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3478 if (IS_ERR(sbi->meta_inode)) {
3479 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3480 err = PTR_ERR(sbi->meta_inode);
3481 goto free_xattr_cache;
3482 }
3483
3484 err = f2fs_get_valid_checkpoint(sbi);
3485 if (err) {
3486 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3487 goto free_meta_inode;
3488 }
3489
3490 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
3491 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3492 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
3493 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3494 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3495 }
3496
3497 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3498 set_sbi_flag(sbi, SBI_NEED_FSCK);
3499
3500
3501 err = f2fs_scan_devices(sbi);
3502 if (err) {
3503 f2fs_err(sbi, "Failed to find devices");
3504 goto free_devices;
3505 }
3506
3507 err = f2fs_init_post_read_wq(sbi);
3508 if (err) {
3509 f2fs_err(sbi, "Failed to initialize post read workqueue");
3510 goto free_devices;
3511 }
3512
3513 sbi->total_valid_node_count =
3514 le32_to_cpu(sbi->ckpt->valid_node_count);
3515 percpu_counter_set(&sbi->total_valid_inode_count,
3516 le32_to_cpu(sbi->ckpt->valid_inode_count));
3517 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
3518 sbi->total_valid_block_count =
3519 le64_to_cpu(sbi->ckpt->valid_block_count);
3520 sbi->last_valid_block_count = sbi->total_valid_block_count;
3521 sbi->reserved_blocks = 0;
3522 sbi->current_reserved_blocks = 0;
3523 limit_reserve_root(sbi);
3524
3525 for (i = 0; i < NR_INODE_TYPE; i++) {
3526 INIT_LIST_HEAD(&sbi->inode_list[i]);
3527 spin_lock_init(&sbi->inode_lock[i]);
3528 }
3529 mutex_init(&sbi->flush_lock);
3530
3531 f2fs_init_extent_cache_info(sbi);
3532
3533 f2fs_init_ino_entry_info(sbi);
3534
3535 f2fs_init_fsync_node_info(sbi);
3536
3537
3538 err = f2fs_build_segment_manager(sbi);
3539 if (err) {
3540 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3541 err);
3542 goto free_sm;
3543 }
3544 err = f2fs_build_node_manager(sbi);
3545 if (err) {
3546 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3547 err);
3548 goto free_nm;
3549 }
3550
3551
3552 if (sb->s_bdev->bd_part)
3553 sbi->sectors_written_start =
3554 (u64)part_stat_read(sb->s_bdev->bd_part,
3555 sectors[STAT_WRITE]);
3556
3557
3558 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
3559 if (__exist_node_summaries(sbi))
3560 sbi->kbytes_written =
3561 le64_to_cpu(seg_i->journal->info.kbytes_written);
3562
3563 f2fs_build_gc_manager(sbi);
3564
3565 err = f2fs_build_stats(sbi);
3566 if (err)
3567 goto free_nm;
3568
3569
3570 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3571 if (IS_ERR(sbi->node_inode)) {
3572 f2fs_err(sbi, "Failed to read node inode");
3573 err = PTR_ERR(sbi->node_inode);
3574 goto free_stats;
3575 }
3576
3577
3578 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3579 if (IS_ERR(root)) {
3580 f2fs_err(sbi, "Failed to read root inode");
3581 err = PTR_ERR(root);
3582 goto free_node_inode;
3583 }
3584 if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
3585 !root->i_size || !root->i_nlink) {
3586 iput(root);
3587 err = -EINVAL;
3588 goto free_node_inode;
3589 }
3590
3591 sb->s_root = d_make_root(root);
3592 if (!sb->s_root) {
3593 err = -ENOMEM;
3594 goto free_node_inode;
3595 }
3596
3597 err = f2fs_register_sysfs(sbi);
3598 if (err)
3599 goto free_root_inode;
3600
3601#ifdef CONFIG_QUOTA
3602
3603 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3604 err = f2fs_enable_quotas(sb);
3605 if (err)
3606 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
3607 }
3608#endif
3609
3610 err = f2fs_recover_orphan_inodes(sbi);
3611 if (err)
3612 goto free_meta;
3613
3614 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
3615 goto reset_checkpoint;
3616
3617
3618 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
3619 !test_opt(sbi, NORECOVERY)) {
3620
3621
3622
3623
3624 if (f2fs_hw_is_readonly(sbi)) {
3625 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3626 err = -EROFS;
3627 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3628 goto free_meta;
3629 }
3630 f2fs_info(sbi, "write access unavailable, skipping recovery");
3631 goto reset_checkpoint;
3632 }
3633
3634 if (need_fsck)
3635 set_sbi_flag(sbi, SBI_NEED_FSCK);
3636
3637 if (skip_recovery)
3638 goto reset_checkpoint;
3639
3640 err = f2fs_recover_fsync_data(sbi, false);
3641 if (err < 0) {
3642 if (err != -ENOMEM)
3643 skip_recovery = true;
3644 need_fsck = true;
3645 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3646 err);
3647 goto free_meta;
3648 }
3649 } else {
3650 err = f2fs_recover_fsync_data(sbi, true);
3651
3652 if (!f2fs_readonly(sb) && err > 0) {
3653 err = -EINVAL;
3654 f2fs_err(sbi, "Need to recover fsync data");
3655 goto free_meta;
3656 }
3657 }
3658
3659
3660
3661
3662
3663 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
3664 err = f2fs_check_write_pointer(sbi);
3665 if (err)
3666 goto free_meta;
3667 }
3668
3669reset_checkpoint:
3670
3671 clear_sbi_flag(sbi, SBI_POR_DOING);
3672
3673 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
3674 err = f2fs_disable_checkpoint(sbi);
3675 if (err)
3676 goto sync_free_meta;
3677 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
3678 f2fs_enable_checkpoint(sbi);
3679 }
3680
3681
3682
3683
3684
3685 if (F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF && !f2fs_readonly(sb)) {
3686
3687 err = f2fs_start_gc_thread(sbi);
3688 if (err)
3689 goto sync_free_meta;
3690 }
3691 kvfree(options);
3692
3693
3694 if (recovery) {
3695 err = f2fs_commit_super(sbi, true);
3696 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3697 sbi->valid_super_block ? 1 : 2, err);
3698 }
3699
3700 f2fs_join_shrinker(sbi);
3701
3702 f2fs_tuning_parameters(sbi);
3703
3704 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3705 cur_cp_version(F2FS_CKPT(sbi)));
3706 f2fs_update_time(sbi, CP_TIME);
3707 f2fs_update_time(sbi, REQ_TIME);
3708 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
3709 return 0;
3710
3711sync_free_meta:
3712
3713 sync_filesystem(sbi->sb);
3714 retry_cnt = 0;
3715
3716free_meta:
3717#ifdef CONFIG_QUOTA
3718 f2fs_truncate_quota_inode_pages(sb);
3719 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
3720 f2fs_quota_off_umount(sbi->sb);
3721#endif
3722
3723
3724
3725
3726
3727
3728 truncate_inode_pages_final(META_MAPPING(sbi));
3729
3730 evict_inodes(sb);
3731 f2fs_unregister_sysfs(sbi);
3732free_root_inode:
3733 dput(sb->s_root);
3734 sb->s_root = NULL;
3735free_node_inode:
3736 f2fs_release_ino_entry(sbi, true);
3737 truncate_inode_pages_final(NODE_MAPPING(sbi));
3738 iput(sbi->node_inode);
3739 sbi->node_inode = NULL;
3740free_stats:
3741 f2fs_destroy_stats(sbi);
3742free_nm:
3743 f2fs_destroy_node_manager(sbi);
3744free_sm:
3745 f2fs_destroy_segment_manager(sbi);
3746 f2fs_destroy_post_read_wq(sbi);
3747free_devices:
3748 destroy_device_list(sbi);
3749 kvfree(sbi->ckpt);
3750free_meta_inode:
3751 make_bad_inode(sbi->meta_inode);
3752 iput(sbi->meta_inode);
3753 sbi->meta_inode = NULL;
3754free_xattr_cache:
3755 f2fs_destroy_xattr_caches(sbi);
3756free_io_dummy:
3757 mempool_destroy(sbi->write_io_dummy);
3758free_percpu:
3759 destroy_percpu_info(sbi);
3760free_bio_info:
3761 for (i = 0; i < NR_PAGE_TYPE; i++)
3762 kvfree(sbi->write_io[i]);
3763
3764#ifdef CONFIG_UNICODE
3765 utf8_unload(sbi->s_encoding);
3766#endif
3767free_options:
3768#ifdef CONFIG_QUOTA
3769 for (i = 0; i < MAXQUOTAS; i++)
3770 kvfree(F2FS_OPTION(sbi).s_qf_names[i]);
3771#endif
3772 kvfree(options);
3773free_sb_buf:
3774 kvfree(raw_super);
3775free_sbi:
3776 if (sbi->s_chksum_driver)
3777 crypto_free_shash(sbi->s_chksum_driver);
3778 kvfree(sbi);
3779
3780
3781 if (retry_cnt > 0 && skip_recovery) {
3782 retry_cnt--;
3783 shrink_dcache_sb(sb);
3784 goto try_onemore;
3785 }
3786 return err;
3787}
3788
3789static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
3790 const char *dev_name, void *data)
3791{
3792 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
3793}
3794
3795static void kill_f2fs_super(struct super_block *sb)
3796{
3797 if (sb->s_root) {
3798 struct f2fs_sb_info *sbi = F2FS_SB(sb);
3799
3800 set_sbi_flag(sbi, SBI_IS_CLOSE);
3801 f2fs_stop_gc_thread(sbi);
3802 f2fs_stop_discard_thread(sbi);
3803
3804 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
3805 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3806 struct cp_control cpc = {
3807 .reason = CP_UMOUNT,
3808 };
3809 f2fs_write_checkpoint(sbi, &cpc);
3810 }
3811
3812 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
3813 sb->s_flags &= ~SB_RDONLY;
3814 }
3815 kill_block_super(sb);
3816}
3817
3818static struct file_system_type f2fs_fs_type = {
3819 .owner = THIS_MODULE,
3820 .name = "f2fs",
3821 .mount = f2fs_mount,
3822 .kill_sb = kill_f2fs_super,
3823 .fs_flags = FS_REQUIRES_DEV,
3824};
3825MODULE_ALIAS_FS("f2fs");
3826
3827static int __init init_inodecache(void)
3828{
3829 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
3830 sizeof(struct f2fs_inode_info), 0,
3831 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
3832 if (!f2fs_inode_cachep)
3833 return -ENOMEM;
3834 return 0;
3835}
3836
3837static void destroy_inodecache(void)
3838{
3839
3840
3841
3842
3843 rcu_barrier();
3844 kmem_cache_destroy(f2fs_inode_cachep);
3845}
3846
3847static int __init init_f2fs_fs(void)
3848{
3849 int err;
3850
3851 if (PAGE_SIZE != F2FS_BLKSIZE) {
3852 printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
3853 PAGE_SIZE, F2FS_BLKSIZE);
3854 return -EINVAL;
3855 }
3856
3857 f2fs_build_trace_ios();
3858
3859 err = init_inodecache();
3860 if (err)
3861 goto fail;
3862 err = f2fs_create_node_manager_caches();
3863 if (err)
3864 goto free_inodecache;
3865 err = f2fs_create_segment_manager_caches();
3866 if (err)
3867 goto free_node_manager_caches;
3868 err = f2fs_create_checkpoint_caches();
3869 if (err)
3870 goto free_segment_manager_caches;
3871 err = f2fs_create_extent_cache();
3872 if (err)
3873 goto free_checkpoint_caches;
3874 err = f2fs_init_sysfs();
3875 if (err)
3876 goto free_extent_cache;
3877 err = register_shrinker(&f2fs_shrinker_info);
3878 if (err)
3879 goto free_sysfs;
3880 err = register_filesystem(&f2fs_fs_type);
3881 if (err)
3882 goto free_shrinker;
3883 f2fs_create_root_stats();
3884 err = f2fs_init_post_read_processing();
3885 if (err)
3886 goto free_root_stats;
3887 err = f2fs_init_bio_entry_cache();
3888 if (err)
3889 goto free_post_read;
3890 err = f2fs_init_bioset();
3891 if (err)
3892 goto free_bio_enrty_cache;
3893 return 0;
3894free_bio_enrty_cache:
3895 f2fs_destroy_bio_entry_cache();
3896free_post_read:
3897 f2fs_destroy_post_read_processing();
3898free_root_stats:
3899 f2fs_destroy_root_stats();
3900 unregister_filesystem(&f2fs_fs_type);
3901free_shrinker:
3902 unregister_shrinker(&f2fs_shrinker_info);
3903free_sysfs:
3904 f2fs_exit_sysfs();
3905free_extent_cache:
3906 f2fs_destroy_extent_cache();
3907free_checkpoint_caches:
3908 f2fs_destroy_checkpoint_caches();
3909free_segment_manager_caches:
3910 f2fs_destroy_segment_manager_caches();
3911free_node_manager_caches:
3912 f2fs_destroy_node_manager_caches();
3913free_inodecache:
3914 destroy_inodecache();
3915fail:
3916 return err;
3917}
3918
3919static void __exit exit_f2fs_fs(void)
3920{
3921 f2fs_destroy_bioset();
3922 f2fs_destroy_bio_entry_cache();
3923 f2fs_destroy_post_read_processing();
3924 f2fs_destroy_root_stats();
3925 unregister_filesystem(&f2fs_fs_type);
3926 unregister_shrinker(&f2fs_shrinker_info);
3927 f2fs_exit_sysfs();
3928 f2fs_destroy_extent_cache();
3929 f2fs_destroy_checkpoint_caches();
3930 f2fs_destroy_segment_manager_caches();
3931 f2fs_destroy_node_manager_caches();
3932 destroy_inodecache();
3933 f2fs_destroy_trace_ios();
3934}
3935
3936module_init(init_f2fs_fs)
3937module_exit(exit_f2fs_fs)
3938
3939MODULE_AUTHOR("Samsung Electronics's Praesto Team");
3940MODULE_DESCRIPTION("Flash Friendly File System");
3941MODULE_LICENSE("GPL");
3942
3943