linux/fs/f2fs/super.c
<<
>>
Prefs
   1/*
   2 * fs/f2fs/super.c
   3 *
   4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
   5 *             http://www.samsung.com/
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/fs.h>
  14#include <linux/statfs.h>
  15#include <linux/buffer_head.h>
  16#include <linux/backing-dev.h>
  17#include <linux/kthread.h>
  18#include <linux/parser.h>
  19#include <linux/mount.h>
  20#include <linux/seq_file.h>
  21#include <linux/proc_fs.h>
  22#include <linux/random.h>
  23#include <linux/exportfs.h>
  24#include <linux/blkdev.h>
  25#include <linux/f2fs_fs.h>
  26#include <linux/sysfs.h>
  27
  28#include "f2fs.h"
  29#include "node.h"
  30#include "segment.h"
  31#include "xattr.h"
  32#include "gc.h"
  33
  34#define CREATE_TRACE_POINTS
  35#include <trace/events/f2fs.h>
  36
  37static struct proc_dir_entry *f2fs_proc_root;
  38static struct kmem_cache *f2fs_inode_cachep;
  39static struct kset *f2fs_kset;
  40
  41enum {
  42        Opt_gc_background,
  43        Opt_disable_roll_forward,
  44        Opt_discard,
  45        Opt_noheap,
  46        Opt_user_xattr,
  47        Opt_nouser_xattr,
  48        Opt_acl,
  49        Opt_noacl,
  50        Opt_active_logs,
  51        Opt_disable_ext_identify,
  52        Opt_inline_xattr,
  53        Opt_inline_data,
  54        Opt_inline_dentry,
  55        Opt_flush_merge,
  56        Opt_nobarrier,
  57        Opt_fastboot,
  58        Opt_err,
  59};
  60
  61static match_table_t f2fs_tokens = {
  62        {Opt_gc_background, "background_gc=%s"},
  63        {Opt_disable_roll_forward, "disable_roll_forward"},
  64        {Opt_discard, "discard"},
  65        {Opt_noheap, "no_heap"},
  66        {Opt_user_xattr, "user_xattr"},
  67        {Opt_nouser_xattr, "nouser_xattr"},
  68        {Opt_acl, "acl"},
  69        {Opt_noacl, "noacl"},
  70        {Opt_active_logs, "active_logs=%u"},
  71        {Opt_disable_ext_identify, "disable_ext_identify"},
  72        {Opt_inline_xattr, "inline_xattr"},
  73        {Opt_inline_data, "inline_data"},
  74        {Opt_inline_dentry, "inline_dentry"},
  75        {Opt_flush_merge, "flush_merge"},
  76        {Opt_nobarrier, "nobarrier"},
  77        {Opt_fastboot, "fastboot"},
  78        {Opt_err, NULL},
  79};
  80
  81/* Sysfs support for f2fs */
  82enum {
  83        GC_THREAD,      /* struct f2fs_gc_thread */
  84        SM_INFO,        /* struct f2fs_sm_info */
  85        NM_INFO,        /* struct f2fs_nm_info */
  86        F2FS_SBI,       /* struct f2fs_sb_info */
  87};
  88
  89struct f2fs_attr {
  90        struct attribute attr;
  91        ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
  92        ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
  93                         const char *, size_t);
  94        int struct_type;
  95        int offset;
  96};
  97
  98static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
  99{
 100        if (struct_type == GC_THREAD)
 101                return (unsigned char *)sbi->gc_thread;
 102        else if (struct_type == SM_INFO)
 103                return (unsigned char *)SM_I(sbi);
 104        else if (struct_type == NM_INFO)
 105                return (unsigned char *)NM_I(sbi);
 106        else if (struct_type == F2FS_SBI)
 107                return (unsigned char *)sbi;
 108        return NULL;
 109}
 110
 111static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
 112                        struct f2fs_sb_info *sbi, char *buf)
 113{
 114        unsigned char *ptr = NULL;
 115        unsigned int *ui;
 116
 117        ptr = __struct_ptr(sbi, a->struct_type);
 118        if (!ptr)
 119                return -EINVAL;
 120
 121        ui = (unsigned int *)(ptr + a->offset);
 122
 123        return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
 124}
 125
 126static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
 127                        struct f2fs_sb_info *sbi,
 128                        const char *buf, size_t count)
 129{
 130        unsigned char *ptr;
 131        unsigned long t;
 132        unsigned int *ui;
 133        ssize_t ret;
 134
 135        ptr = __struct_ptr(sbi, a->struct_type);
 136        if (!ptr)
 137                return -EINVAL;
 138
 139        ui = (unsigned int *)(ptr + a->offset);
 140
 141        ret = kstrtoul(skip_spaces(buf), 0, &t);
 142        if (ret < 0)
 143                return ret;
 144        *ui = t;
 145        return count;
 146}
 147
 148static ssize_t f2fs_attr_show(struct kobject *kobj,
 149                                struct attribute *attr, char *buf)
 150{
 151        struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
 152                                                                s_kobj);
 153        struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
 154
 155        return a->show ? a->show(a, sbi, buf) : 0;
 156}
 157
 158static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
 159                                                const char *buf, size_t len)
 160{
 161        struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
 162                                                                        s_kobj);
 163        struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
 164
 165        return a->store ? a->store(a, sbi, buf, len) : 0;
 166}
 167
 168static void f2fs_sb_release(struct kobject *kobj)
 169{
 170        struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
 171                                                                s_kobj);
 172        complete(&sbi->s_kobj_unregister);
 173}
 174
 175#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
 176static struct f2fs_attr f2fs_attr_##_name = {                   \
 177        .attr = {.name = __stringify(_name), .mode = _mode },   \
 178        .show   = _show,                                        \
 179        .store  = _store,                                       \
 180        .struct_type = _struct_type,                            \
 181        .offset = _offset                                       \
 182}
 183
 184#define F2FS_RW_ATTR(struct_type, struct_name, name, elname)    \
 185        F2FS_ATTR_OFFSET(struct_type, name, 0644,               \
 186                f2fs_sbi_show, f2fs_sbi_store,                  \
 187                offsetof(struct struct_name, elname))
 188
 189F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
 190F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
 191F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
 192F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
 193F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
 194F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
 195F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
 196F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
 197F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
 198F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
 199F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
 200F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 201
 202#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 203static struct attribute *f2fs_attrs[] = {
 204        ATTR_LIST(gc_min_sleep_time),
 205        ATTR_LIST(gc_max_sleep_time),
 206        ATTR_LIST(gc_no_gc_sleep_time),
 207        ATTR_LIST(gc_idle),
 208        ATTR_LIST(reclaim_segments),
 209        ATTR_LIST(max_small_discards),
 210        ATTR_LIST(ipu_policy),
 211        ATTR_LIST(min_ipu_util),
 212        ATTR_LIST(min_fsync_blocks),
 213        ATTR_LIST(max_victim_search),
 214        ATTR_LIST(dir_level),
 215        ATTR_LIST(ram_thresh),
 216        NULL,
 217};
 218
 219static const struct sysfs_ops f2fs_attr_ops = {
 220        .show   = f2fs_attr_show,
 221        .store  = f2fs_attr_store,
 222};
 223
 224static struct kobj_type f2fs_ktype = {
 225        .default_attrs  = f2fs_attrs,
 226        .sysfs_ops      = &f2fs_attr_ops,
 227        .release        = f2fs_sb_release,
 228};
 229
 230void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
 231{
 232        struct va_format vaf;
 233        va_list args;
 234
 235        va_start(args, fmt);
 236        vaf.fmt = fmt;
 237        vaf.va = &args;
 238        printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
 239        va_end(args);
 240}
 241
 242static void init_once(void *foo)
 243{
 244        struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
 245
 246        inode_init_once(&fi->vfs_inode);
 247}
 248
 249static int parse_options(struct super_block *sb, char *options)
 250{
 251        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 252        substring_t args[MAX_OPT_ARGS];
 253        char *p, *name;
 254        int arg = 0;
 255
 256        if (!options)
 257                return 0;
 258
 259        while ((p = strsep(&options, ",")) != NULL) {
 260                int token;
 261                if (!*p)
 262                        continue;
 263                /*
 264                 * Initialize args struct so we know whether arg was
 265                 * found; some options take optional arguments.
 266                 */
 267                args[0].to = args[0].from = NULL;
 268                token = match_token(p, f2fs_tokens, args);
 269
 270                switch (token) {
 271                case Opt_gc_background:
 272                        name = match_strdup(&args[0]);
 273
 274                        if (!name)
 275                                return -ENOMEM;
 276                        if (strlen(name) == 2 && !strncmp(name, "on", 2))
 277                                set_opt(sbi, BG_GC);
 278                        else if (strlen(name) == 3 && !strncmp(name, "off", 3))
 279                                clear_opt(sbi, BG_GC);
 280                        else {
 281                                kfree(name);
 282                                return -EINVAL;
 283                        }
 284                        kfree(name);
 285                        break;
 286                case Opt_disable_roll_forward:
 287                        set_opt(sbi, DISABLE_ROLL_FORWARD);
 288                        break;
 289                case Opt_discard:
 290                        set_opt(sbi, DISCARD);
 291                        break;
 292                case Opt_noheap:
 293                        set_opt(sbi, NOHEAP);
 294                        break;
 295#ifdef CONFIG_F2FS_FS_XATTR
 296                case Opt_user_xattr:
 297                        set_opt(sbi, XATTR_USER);
 298                        break;
 299                case Opt_nouser_xattr:
 300                        clear_opt(sbi, XATTR_USER);
 301                        break;
 302                case Opt_inline_xattr:
 303                        set_opt(sbi, INLINE_XATTR);
 304                        break;
 305#else
 306                case Opt_user_xattr:
 307                        f2fs_msg(sb, KERN_INFO,
 308                                "user_xattr options not supported");
 309                        break;
 310                case Opt_nouser_xattr:
 311                        f2fs_msg(sb, KERN_INFO,
 312                                "nouser_xattr options not supported");
 313                        break;
 314                case Opt_inline_xattr:
 315                        f2fs_msg(sb, KERN_INFO,
 316                                "inline_xattr options not supported");
 317                        break;
 318#endif
 319#ifdef CONFIG_F2FS_FS_POSIX_ACL
 320                case Opt_acl:
 321                        set_opt(sbi, POSIX_ACL);
 322                        break;
 323                case Opt_noacl:
 324                        clear_opt(sbi, POSIX_ACL);
 325                        break;
 326#else
 327                case Opt_acl:
 328                        f2fs_msg(sb, KERN_INFO, "acl options not supported");
 329                        break;
 330                case Opt_noacl:
 331                        f2fs_msg(sb, KERN_INFO, "noacl options not supported");
 332                        break;
 333#endif
 334                case Opt_active_logs:
 335                        if (args->from && match_int(args, &arg))
 336                                return -EINVAL;
 337                        if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
 338                                return -EINVAL;
 339                        sbi->active_logs = arg;
 340                        break;
 341                case Opt_disable_ext_identify:
 342                        set_opt(sbi, DISABLE_EXT_IDENTIFY);
 343                        break;
 344                case Opt_inline_data:
 345                        set_opt(sbi, INLINE_DATA);
 346                        break;
 347                case Opt_inline_dentry:
 348                        set_opt(sbi, INLINE_DENTRY);
 349                        break;
 350                case Opt_flush_merge:
 351                        set_opt(sbi, FLUSH_MERGE);
 352                        break;
 353                case Opt_nobarrier:
 354                        set_opt(sbi, NOBARRIER);
 355                        break;
 356                case Opt_fastboot:
 357                        set_opt(sbi, FASTBOOT);
 358                        break;
 359                default:
 360                        f2fs_msg(sb, KERN_ERR,
 361                                "Unrecognized mount option \"%s\" or missing value",
 362                                p);
 363                        return -EINVAL;
 364                }
 365        }
 366        return 0;
 367}
 368
 369static struct inode *f2fs_alloc_inode(struct super_block *sb)
 370{
 371        struct f2fs_inode_info *fi;
 372
 373        fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
 374        if (!fi)
 375                return NULL;
 376
 377        init_once((void *) fi);
 378
 379        /* Initialize f2fs-specific inode info */
 380        fi->vfs_inode.i_version = 1;
 381        atomic_set(&fi->dirty_pages, 0);
 382        fi->i_current_depth = 1;
 383        fi->i_advise = 0;
 384        rwlock_init(&fi->ext.ext_lock);
 385        init_rwsem(&fi->i_sem);
 386        INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
 387        INIT_LIST_HEAD(&fi->inmem_pages);
 388        mutex_init(&fi->inmem_lock);
 389
 390        set_inode_flag(fi, FI_NEW_INODE);
 391
 392        if (test_opt(F2FS_SB(sb), INLINE_XATTR))
 393                set_inode_flag(fi, FI_INLINE_XATTR);
 394
 395        /* Will be used by directory only */
 396        fi->i_dir_level = F2FS_SB(sb)->dir_level;
 397
 398        return &fi->vfs_inode;
 399}
 400
 401static int f2fs_drop_inode(struct inode *inode)
 402{
 403        /*
 404         * This is to avoid a deadlock condition like below.
 405         * writeback_single_inode(inode)
 406         *  - f2fs_write_data_page
 407         *    - f2fs_gc -> iput -> evict
 408         *       - inode_wait_for_writeback(inode)
 409         */
 410        if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
 411                return 0;
 412        return generic_drop_inode(inode);
 413}
 414
 415/*
 416 * f2fs_dirty_inode() is called from __mark_inode_dirty()
 417 *
 418 * We should call set_dirty_inode to write the dirty inode through write_inode.
 419 */
 420static void f2fs_dirty_inode(struct inode *inode, int flags)
 421{
 422        set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 423}
 424
 425static void f2fs_i_callback(struct rcu_head *head)
 426{
 427        struct inode *inode = container_of(head, struct inode, i_rcu);
 428        kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
 429}
 430
 431static void f2fs_destroy_inode(struct inode *inode)
 432{
 433        call_rcu(&inode->i_rcu, f2fs_i_callback);
 434}
 435
 436static void f2fs_put_super(struct super_block *sb)
 437{
 438        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 439
 440        if (sbi->s_proc) {
 441                remove_proc_entry("segment_info", sbi->s_proc);
 442                remove_proc_entry(sb->s_id, f2fs_proc_root);
 443        }
 444        kobject_del(&sbi->s_kobj);
 445
 446        f2fs_destroy_stats(sbi);
 447        stop_gc_thread(sbi);
 448
 449        /* We don't need to do checkpoint when it's clean */
 450        if (sbi->s_dirty) {
 451                struct cp_control cpc = {
 452                        .reason = CP_UMOUNT,
 453                };
 454                write_checkpoint(sbi, &cpc);
 455        }
 456
 457        /*
 458         * normally superblock is clean, so we need to release this.
 459         * In addition, EIO will skip do checkpoint, we need this as well.
 460         */
 461        release_dirty_inode(sbi);
 462        release_discard_addrs(sbi);
 463
 464        iput(sbi->node_inode);
 465        iput(sbi->meta_inode);
 466
 467        /* destroy f2fs internal modules */
 468        destroy_node_manager(sbi);
 469        destroy_segment_manager(sbi);
 470
 471        kfree(sbi->ckpt);
 472        kobject_put(&sbi->s_kobj);
 473        wait_for_completion(&sbi->s_kobj_unregister);
 474
 475        sb->s_fs_info = NULL;
 476        brelse(sbi->raw_super_buf);
 477        kfree(sbi);
 478}
 479
 480int f2fs_sync_fs(struct super_block *sb, int sync)
 481{
 482        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 483
 484        trace_f2fs_sync_fs(sb, sync);
 485
 486        if (sync) {
 487                struct cp_control cpc;
 488
 489                cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
 490                mutex_lock(&sbi->gc_mutex);
 491                write_checkpoint(sbi, &cpc);
 492                mutex_unlock(&sbi->gc_mutex);
 493        } else {
 494                f2fs_balance_fs(sbi);
 495        }
 496
 497        return 0;
 498}
 499
 500static int f2fs_freeze(struct super_block *sb)
 501{
 502        int err;
 503
 504        if (f2fs_readonly(sb))
 505                return 0;
 506
 507        err = f2fs_sync_fs(sb, 1);
 508        return err;
 509}
 510
 511static int f2fs_unfreeze(struct super_block *sb)
 512{
 513        return 0;
 514}
 515
 516static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 517{
 518        struct super_block *sb = dentry->d_sb;
 519        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 520        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
 521        block_t total_count, user_block_count, start_count, ovp_count;
 522
 523        total_count = le64_to_cpu(sbi->raw_super->block_count);
 524        user_block_count = sbi->user_block_count;
 525        start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
 526        ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
 527        buf->f_type = F2FS_SUPER_MAGIC;
 528        buf->f_bsize = sbi->blocksize;
 529
 530        buf->f_blocks = total_count - start_count;
 531        buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
 532        buf->f_bavail = user_block_count - valid_user_blocks(sbi);
 533
 534        buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
 535        buf->f_ffree = buf->f_files - valid_inode_count(sbi);
 536
 537        buf->f_namelen = F2FS_NAME_LEN;
 538        buf->f_fsid.val[0] = (u32)id;
 539        buf->f_fsid.val[1] = (u32)(id >> 32);
 540
 541        return 0;
 542}
 543
 544static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 545{
 546        struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
 547
 548        if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC))
 549                seq_printf(seq, ",background_gc=%s", "on");
 550        else
 551                seq_printf(seq, ",background_gc=%s", "off");
 552        if (test_opt(sbi, DISABLE_ROLL_FORWARD))
 553                seq_puts(seq, ",disable_roll_forward");
 554        if (test_opt(sbi, DISCARD))
 555                seq_puts(seq, ",discard");
 556        if (test_opt(sbi, NOHEAP))
 557                seq_puts(seq, ",no_heap_alloc");
 558#ifdef CONFIG_F2FS_FS_XATTR
 559        if (test_opt(sbi, XATTR_USER))
 560                seq_puts(seq, ",user_xattr");
 561        else
 562                seq_puts(seq, ",nouser_xattr");
 563        if (test_opt(sbi, INLINE_XATTR))
 564                seq_puts(seq, ",inline_xattr");
 565#endif
 566#ifdef CONFIG_F2FS_FS_POSIX_ACL
 567        if (test_opt(sbi, POSIX_ACL))
 568                seq_puts(seq, ",acl");
 569        else
 570                seq_puts(seq, ",noacl");
 571#endif
 572        if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
 573                seq_puts(seq, ",disable_ext_identify");
 574        if (test_opt(sbi, INLINE_DATA))
 575                seq_puts(seq, ",inline_data");
 576        if (test_opt(sbi, INLINE_DENTRY))
 577                seq_puts(seq, ",inline_dentry");
 578        if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
 579                seq_puts(seq, ",flush_merge");
 580        if (test_opt(sbi, NOBARRIER))
 581                seq_puts(seq, ",nobarrier");
 582        if (test_opt(sbi, FASTBOOT))
 583                seq_puts(seq, ",fastboot");
 584        seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 585
 586        return 0;
 587}
 588
 589static int segment_info_seq_show(struct seq_file *seq, void *offset)
 590{
 591        struct super_block *sb = seq->private;
 592        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 593        unsigned int total_segs =
 594                        le32_to_cpu(sbi->raw_super->segment_count_main);
 595        int i;
 596
 597        seq_puts(seq, "format: segment_type|valid_blocks\n"
 598                "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
 599
 600        for (i = 0; i < total_segs; i++) {
 601                struct seg_entry *se = get_seg_entry(sbi, i);
 602
 603                if ((i % 10) == 0)
 604                        seq_printf(seq, "%-5d", i);
 605                seq_printf(seq, "%d|%-3u", se->type,
 606                                        get_valid_blocks(sbi, i, 1));
 607                if ((i % 10) == 9 || i == (total_segs - 1))
 608                        seq_putc(seq, '\n');
 609                else
 610                        seq_putc(seq, ' ');
 611        }
 612
 613        return 0;
 614}
 615
 616static int segment_info_open_fs(struct inode *inode, struct file *file)
 617{
 618        return single_open(file, segment_info_seq_show, PDE_DATA(inode));
 619}
 620
 621static const struct file_operations f2fs_seq_segment_info_fops = {
 622        .owner = THIS_MODULE,
 623        .open = segment_info_open_fs,
 624        .read = seq_read,
 625        .llseek = seq_lseek,
 626        .release = single_release,
 627};
 628
 629static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 630{
 631        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 632        struct f2fs_mount_info org_mount_opt;
 633        int err, active_logs;
 634        bool need_restart_gc = false;
 635        bool need_stop_gc = false;
 636
 637        sync_filesystem(sb);
 638
 639        /*
 640         * Save the old mount options in case we
 641         * need to restore them.
 642         */
 643        org_mount_opt = sbi->mount_opt;
 644        active_logs = sbi->active_logs;
 645
 646        sbi->mount_opt.opt = 0;
 647        sbi->active_logs = NR_CURSEG_TYPE;
 648
 649        /* parse mount options */
 650        err = parse_options(sb, data);
 651        if (err)
 652                goto restore_opts;
 653
 654        /*
 655         * Previous and new state of filesystem is RO,
 656         * so skip checking GC and FLUSH_MERGE conditions.
 657         */
 658        if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
 659                goto skip;
 660
 661        /*
 662         * We stop the GC thread if FS is mounted as RO
 663         * or if background_gc = off is passed in mount
 664         * option. Also sync the filesystem.
 665         */
 666        if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
 667                if (sbi->gc_thread) {
 668                        stop_gc_thread(sbi);
 669                        f2fs_sync_fs(sb, 1);
 670                        need_restart_gc = true;
 671                }
 672        } else if (!sbi->gc_thread) {
 673                err = start_gc_thread(sbi);
 674                if (err)
 675                        goto restore_opts;
 676                need_stop_gc = true;
 677        }
 678
 679        /*
 680         * We stop issue flush thread if FS is mounted as RO
 681         * or if flush_merge is not passed in mount option.
 682         */
 683        if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
 684                destroy_flush_cmd_control(sbi);
 685        } else if (!SM_I(sbi)->cmd_control_info) {
 686                err = create_flush_cmd_control(sbi);
 687                if (err)
 688                        goto restore_gc;
 689        }
 690skip:
 691        /* Update the POSIXACL Flag */
 692         sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 693                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
 694        return 0;
 695restore_gc:
 696        if (need_restart_gc) {
 697                if (start_gc_thread(sbi))
 698                        f2fs_msg(sbi->sb, KERN_WARNING,
 699                                "background gc thread has stopped");
 700        } else if (need_stop_gc) {
 701                stop_gc_thread(sbi);
 702        }
 703restore_opts:
 704        sbi->mount_opt = org_mount_opt;
 705        sbi->active_logs = active_logs;
 706        return err;
 707}
 708
 709static struct super_operations f2fs_sops = {
 710        .alloc_inode    = f2fs_alloc_inode,
 711        .drop_inode     = f2fs_drop_inode,
 712        .destroy_inode  = f2fs_destroy_inode,
 713        .write_inode    = f2fs_write_inode,
 714        .dirty_inode    = f2fs_dirty_inode,
 715        .show_options   = f2fs_show_options,
 716        .evict_inode    = f2fs_evict_inode,
 717        .put_super      = f2fs_put_super,
 718        .sync_fs        = f2fs_sync_fs,
 719        .freeze_fs      = f2fs_freeze,
 720        .unfreeze_fs    = f2fs_unfreeze,
 721        .statfs         = f2fs_statfs,
 722        .remount_fs     = f2fs_remount,
 723};
 724
 725static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
 726                u64 ino, u32 generation)
 727{
 728        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 729        struct inode *inode;
 730
 731        if (check_nid_range(sbi, ino))
 732                return ERR_PTR(-ESTALE);
 733
 734        /*
 735         * f2fs_iget isn't quite right if the inode is currently unallocated!
 736         * However f2fs_iget currently does appropriate checks to handle stale
 737         * inodes so everything is OK.
 738         */
 739        inode = f2fs_iget(sb, ino);
 740        if (IS_ERR(inode))
 741                return ERR_CAST(inode);
 742        if (unlikely(generation && inode->i_generation != generation)) {
 743                /* we didn't find the right inode.. */
 744                iput(inode);
 745                return ERR_PTR(-ESTALE);
 746        }
 747        return inode;
 748}
 749
 750static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
 751                int fh_len, int fh_type)
 752{
 753        return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
 754                                    f2fs_nfs_get_inode);
 755}
 756
 757static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
 758                int fh_len, int fh_type)
 759{
 760        return generic_fh_to_parent(sb, fid, fh_len, fh_type,
 761                                    f2fs_nfs_get_inode);
 762}
 763
 764static const struct export_operations f2fs_export_ops = {
 765        .fh_to_dentry = f2fs_fh_to_dentry,
 766        .fh_to_parent = f2fs_fh_to_parent,
 767        .get_parent = f2fs_get_parent,
 768};
 769
 770static loff_t max_file_size(unsigned bits)
 771{
 772        loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
 773        loff_t leaf_count = ADDRS_PER_BLOCK;
 774
 775        /* two direct node blocks */
 776        result += (leaf_count * 2);
 777
 778        /* two indirect node blocks */
 779        leaf_count *= NIDS_PER_BLOCK;
 780        result += (leaf_count * 2);
 781
 782        /* one double indirect node block */
 783        leaf_count *= NIDS_PER_BLOCK;
 784        result += leaf_count;
 785
 786        result <<= bits;
 787        return result;
 788}
 789
 790static int sanity_check_raw_super(struct super_block *sb,
 791                        struct f2fs_super_block *raw_super)
 792{
 793        unsigned int blocksize;
 794
 795        if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
 796                f2fs_msg(sb, KERN_INFO,
 797                        "Magic Mismatch, valid(0x%x) - read(0x%x)",
 798                        F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
 799                return 1;
 800        }
 801
 802        /* Currently, support only 4KB page cache size */
 803        if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
 804                f2fs_msg(sb, KERN_INFO,
 805                        "Invalid page_cache_size (%lu), supports only 4KB\n",
 806                        PAGE_CACHE_SIZE);
 807                return 1;
 808        }
 809
 810        /* Currently, support only 4KB block size */
 811        blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
 812        if (blocksize != F2FS_BLKSIZE) {
 813                f2fs_msg(sb, KERN_INFO,
 814                        "Invalid blocksize (%u), supports only 4KB\n",
 815                        blocksize);
 816                return 1;
 817        }
 818
 819        /* Currently, support 512/1024/2048/4096 bytes sector size */
 820        if (le32_to_cpu(raw_super->log_sectorsize) >
 821                                F2FS_MAX_LOG_SECTOR_SIZE ||
 822                le32_to_cpu(raw_super->log_sectorsize) <
 823                                F2FS_MIN_LOG_SECTOR_SIZE) {
 824                f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
 825                        le32_to_cpu(raw_super->log_sectorsize));
 826                return 1;
 827        }
 828        if (le32_to_cpu(raw_super->log_sectors_per_block) +
 829                le32_to_cpu(raw_super->log_sectorsize) !=
 830                        F2FS_MAX_LOG_SECTOR_SIZE) {
 831                f2fs_msg(sb, KERN_INFO,
 832                        "Invalid log sectors per block(%u) log sectorsize(%u)",
 833                        le32_to_cpu(raw_super->log_sectors_per_block),
 834                        le32_to_cpu(raw_super->log_sectorsize));
 835                return 1;
 836        }
 837        return 0;
 838}
 839
 840static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 841{
 842        unsigned int total, fsmeta;
 843        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 844        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 845
 846        total = le32_to_cpu(raw_super->segment_count);
 847        fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
 848        fsmeta += le32_to_cpu(raw_super->segment_count_sit);
 849        fsmeta += le32_to_cpu(raw_super->segment_count_nat);
 850        fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
 851        fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
 852
 853        if (unlikely(fsmeta >= total))
 854                return 1;
 855
 856        if (unlikely(f2fs_cp_error(sbi))) {
 857                f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 858                return 1;
 859        }
 860        return 0;
 861}
 862
 863static void init_sb_info(struct f2fs_sb_info *sbi)
 864{
 865        struct f2fs_super_block *raw_super = sbi->raw_super;
 866        int i;
 867
 868        sbi->log_sectors_per_block =
 869                le32_to_cpu(raw_super->log_sectors_per_block);
 870        sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
 871        sbi->blocksize = 1 << sbi->log_blocksize;
 872        sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
 873        sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
 874        sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
 875        sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
 876        sbi->total_sections = le32_to_cpu(raw_super->section_count);
 877        sbi->total_node_count =
 878                (le32_to_cpu(raw_super->segment_count_nat) / 2)
 879                        * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
 880        sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
 881        sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
 882        sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
 883        sbi->cur_victim_sec = NULL_SECNO;
 884        sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
 885
 886        for (i = 0; i < NR_COUNT_TYPE; i++)
 887                atomic_set(&sbi->nr_pages[i], 0);
 888
 889        sbi->dir_level = DEF_DIR_LEVEL;
 890        sbi->need_fsck = false;
 891}
 892
 893/*
 894 * Read f2fs raw super block.
 895 * Because we have two copies of super block, so read the first one at first,
 896 * if the first one is invalid, move to read the second one.
 897 */
 898static int read_raw_super_block(struct super_block *sb,
 899                        struct f2fs_super_block **raw_super,
 900                        struct buffer_head **raw_super_buf)
 901{
 902        int block = 0;
 903
 904retry:
 905        *raw_super_buf = sb_bread(sb, block);
 906        if (!*raw_super_buf) {
 907                f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
 908                                block + 1);
 909                if (block == 0) {
 910                        block++;
 911                        goto retry;
 912                } else {
 913                        return -EIO;
 914                }
 915        }
 916
 917        *raw_super = (struct f2fs_super_block *)
 918                ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
 919
 920        /* sanity checking of raw super */
 921        if (sanity_check_raw_super(sb, *raw_super)) {
 922                brelse(*raw_super_buf);
 923                f2fs_msg(sb, KERN_ERR,
 924                        "Can't find valid F2FS filesystem in %dth superblock",
 925                                                                block + 1);
 926                if (block == 0) {
 927                        block++;
 928                        goto retry;
 929                } else {
 930                        return -EINVAL;
 931                }
 932        }
 933
 934        return 0;
 935}
 936
 937static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 938{
 939        struct f2fs_sb_info *sbi;
 940        struct f2fs_super_block *raw_super = NULL;
 941        struct buffer_head *raw_super_buf;
 942        struct inode *root;
 943        long err = -EINVAL;
 944        bool retry = true;
 945        int i;
 946
 947try_onemore:
 948        /* allocate memory for f2fs-specific super block info */
 949        sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
 950        if (!sbi)
 951                return -ENOMEM;
 952
 953        /* set a block size */
 954        if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
 955                f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
 956                goto free_sbi;
 957        }
 958
 959        err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
 960        if (err)
 961                goto free_sbi;
 962
 963        sb->s_fs_info = sbi;
 964        /* init some FS parameters */
 965        sbi->active_logs = NR_CURSEG_TYPE;
 966
 967        set_opt(sbi, BG_GC);
 968
 969#ifdef CONFIG_F2FS_FS_XATTR
 970        set_opt(sbi, XATTR_USER);
 971#endif
 972#ifdef CONFIG_F2FS_FS_POSIX_ACL
 973        set_opt(sbi, POSIX_ACL);
 974#endif
 975        /* parse mount options */
 976        err = parse_options(sb, (char *)data);
 977        if (err)
 978                goto free_sb_buf;
 979
 980        sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
 981        sb->s_max_links = F2FS_LINK_MAX;
 982        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 983
 984        sb->s_op = &f2fs_sops;
 985        sb->s_xattr = f2fs_xattr_handlers;
 986        sb->s_export_op = &f2fs_export_ops;
 987        sb->s_magic = F2FS_SUPER_MAGIC;
 988        sb->s_time_gran = 1;
 989        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
 990                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
 991        memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 992
 993        /* init f2fs-specific super block info */
 994        sbi->sb = sb;
 995        sbi->raw_super = raw_super;
 996        sbi->raw_super_buf = raw_super_buf;
 997        mutex_init(&sbi->gc_mutex);
 998        mutex_init(&sbi->writepages);
 999        mutex_init(&sbi->cp_mutex);
1000        init_rwsem(&sbi->node_write);
1001        sbi->por_doing = false;
1002        spin_lock_init(&sbi->stat_lock);
1003
1004        init_rwsem(&sbi->read_io.io_rwsem);
1005        sbi->read_io.sbi = sbi;
1006        sbi->read_io.bio = NULL;
1007        for (i = 0; i < NR_PAGE_TYPE; i++) {
1008                init_rwsem(&sbi->write_io[i].io_rwsem);
1009                sbi->write_io[i].sbi = sbi;
1010                sbi->write_io[i].bio = NULL;
1011        }
1012
1013        init_rwsem(&sbi->cp_rwsem);
1014        init_waitqueue_head(&sbi->cp_wait);
1015        init_sb_info(sbi);
1016
1017        /* get an inode for meta space */
1018        sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
1019        if (IS_ERR(sbi->meta_inode)) {
1020                f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
1021                err = PTR_ERR(sbi->meta_inode);
1022                goto free_sb_buf;
1023        }
1024
1025        err = get_valid_checkpoint(sbi);
1026        if (err) {
1027                f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
1028                goto free_meta_inode;
1029        }
1030
1031        /* sanity checking of checkpoint */
1032        err = -EINVAL;
1033        if (sanity_check_ckpt(sbi)) {
1034                f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
1035                goto free_cp;
1036        }
1037
1038        sbi->total_valid_node_count =
1039                                le32_to_cpu(sbi->ckpt->valid_node_count);
1040        sbi->total_valid_inode_count =
1041                                le32_to_cpu(sbi->ckpt->valid_inode_count);
1042        sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1043        sbi->total_valid_block_count =
1044                                le64_to_cpu(sbi->ckpt->valid_block_count);
1045        sbi->last_valid_block_count = sbi->total_valid_block_count;
1046        sbi->alloc_valid_block_count = 0;
1047        INIT_LIST_HEAD(&sbi->dir_inode_list);
1048        spin_lock_init(&sbi->dir_inode_lock);
1049
1050        init_ino_entry_info(sbi);
1051
1052        /* setup f2fs internal modules */
1053        err = build_segment_manager(sbi);
1054        if (err) {
1055                f2fs_msg(sb, KERN_ERR,
1056                        "Failed to initialize F2FS segment manager");
1057                goto free_sm;
1058        }
1059        err = build_node_manager(sbi);
1060        if (err) {
1061                f2fs_msg(sb, KERN_ERR,
1062                        "Failed to initialize F2FS node manager");
1063                goto free_nm;
1064        }
1065
1066        build_gc_manager(sbi);
1067
1068        /* get an inode for node space */
1069        sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
1070        if (IS_ERR(sbi->node_inode)) {
1071                f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
1072                err = PTR_ERR(sbi->node_inode);
1073                goto free_nm;
1074        }
1075
1076        /* if there are nt orphan nodes free them */
1077        recover_orphan_inodes(sbi);
1078
1079        /* read root inode and dentry */
1080        root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
1081        if (IS_ERR(root)) {
1082                f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
1083                err = PTR_ERR(root);
1084                goto free_node_inode;
1085        }
1086        if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1087                iput(root);
1088                err = -EINVAL;
1089                goto free_node_inode;
1090        }
1091
1092        sb->s_root = d_make_root(root); /* allocate root dentry */
1093        if (!sb->s_root) {
1094                err = -ENOMEM;
1095                goto free_root_inode;
1096        }
1097
1098        err = f2fs_build_stats(sbi);
1099        if (err)
1100                goto free_root_inode;
1101
1102        if (f2fs_proc_root)
1103                sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
1104
1105        if (sbi->s_proc)
1106                proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
1107                                 &f2fs_seq_segment_info_fops, sb);
1108
1109        if (test_opt(sbi, DISCARD)) {
1110                struct request_queue *q = bdev_get_queue(sb->s_bdev);
1111                if (!blk_queue_discard(q))
1112                        f2fs_msg(sb, KERN_WARNING,
1113                                        "mounting with \"discard\" option, but "
1114                                        "the device does not support discard");
1115        }
1116
1117        sbi->s_kobj.kset = f2fs_kset;
1118        init_completion(&sbi->s_kobj_unregister);
1119        err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
1120                                                        "%s", sb->s_id);
1121        if (err)
1122                goto free_proc;
1123
1124        if (!retry)
1125                sbi->need_fsck = true;
1126
1127        /* recover fsynced data */
1128        if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1129                err = recover_fsync_data(sbi);
1130                if (err) {
1131                        f2fs_msg(sb, KERN_ERR,
1132                                "Cannot recover all fsync data errno=%ld", err);
1133                        goto free_kobj;
1134                }
1135        }
1136
1137        /*
1138         * If filesystem is not mounted as read-only then
1139         * do start the gc_thread.
1140         */
1141        if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1142                /* After POR, we can run background GC thread.*/
1143                err = start_gc_thread(sbi);
1144                if (err)
1145                        goto free_kobj;
1146        }
1147        return 0;
1148
1149free_kobj:
1150        kobject_del(&sbi->s_kobj);
1151free_proc:
1152        if (sbi->s_proc) {
1153                remove_proc_entry("segment_info", sbi->s_proc);
1154                remove_proc_entry(sb->s_id, f2fs_proc_root);
1155        }
1156        f2fs_destroy_stats(sbi);
1157free_root_inode:
1158        dput(sb->s_root);
1159        sb->s_root = NULL;
1160free_node_inode:
1161        iput(sbi->node_inode);
1162free_nm:
1163        destroy_node_manager(sbi);
1164free_sm:
1165        destroy_segment_manager(sbi);
1166free_cp:
1167        kfree(sbi->ckpt);
1168free_meta_inode:
1169        make_bad_inode(sbi->meta_inode);
1170        iput(sbi->meta_inode);
1171free_sb_buf:
1172        brelse(raw_super_buf);
1173free_sbi:
1174        kfree(sbi);
1175
1176        /* give only one another chance */
1177        if (retry) {
1178                retry = 0;
1179                shrink_dcache_sb(sb);
1180                goto try_onemore;
1181        }
1182        return err;
1183}
1184
1185static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
1186                        const char *dev_name, void *data)
1187{
1188        return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
1189}
1190
1191static struct file_system_type f2fs_fs_type = {
1192        .owner          = THIS_MODULE,
1193        .name           = "f2fs",
1194        .mount          = f2fs_mount,
1195        .kill_sb        = kill_block_super,
1196        .fs_flags       = FS_REQUIRES_DEV,
1197};
1198MODULE_ALIAS_FS("f2fs");
1199
1200static int __init init_inodecache(void)
1201{
1202        f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
1203                        sizeof(struct f2fs_inode_info));
1204        if (!f2fs_inode_cachep)
1205                return -ENOMEM;
1206        return 0;
1207}
1208
1209static void destroy_inodecache(void)
1210{
1211        /*
1212         * Make sure all delayed rcu free inodes are flushed before we
1213         * destroy cache.
1214         */
1215        rcu_barrier();
1216        kmem_cache_destroy(f2fs_inode_cachep);
1217}
1218
1219static int __init init_f2fs_fs(void)
1220{
1221        int err;
1222
1223        err = init_inodecache();
1224        if (err)
1225                goto fail;
1226        err = create_node_manager_caches();
1227        if (err)
1228                goto free_inodecache;
1229        err = create_segment_manager_caches();
1230        if (err)
1231                goto free_node_manager_caches;
1232        err = create_gc_caches();
1233        if (err)
1234                goto free_segment_manager_caches;
1235        err = create_checkpoint_caches();
1236        if (err)
1237                goto free_gc_caches;
1238        f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1239        if (!f2fs_kset) {
1240                err = -ENOMEM;
1241                goto free_checkpoint_caches;
1242        }
1243        err = register_filesystem(&f2fs_fs_type);
1244        if (err)
1245                goto free_kset;
1246        f2fs_create_root_stats();
1247        f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1248        return 0;
1249
1250free_kset:
1251        kset_unregister(f2fs_kset);
1252free_checkpoint_caches:
1253        destroy_checkpoint_caches();
1254free_gc_caches:
1255        destroy_gc_caches();
1256free_segment_manager_caches:
1257        destroy_segment_manager_caches();
1258free_node_manager_caches:
1259        destroy_node_manager_caches();
1260free_inodecache:
1261        destroy_inodecache();
1262fail:
1263        return err;
1264}
1265
1266static void __exit exit_f2fs_fs(void)
1267{
1268        remove_proc_entry("fs/f2fs", NULL);
1269        f2fs_destroy_root_stats();
1270        unregister_filesystem(&f2fs_fs_type);
1271        destroy_checkpoint_caches();
1272        destroy_gc_caches();
1273        destroy_segment_manager_caches();
1274        destroy_node_manager_caches();
1275        destroy_inodecache();
1276        kset_unregister(f2fs_kset);
1277}
1278
1279module_init(init_f2fs_fs)
1280module_exit(exit_f2fs_fs)
1281
1282MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1283MODULE_DESCRIPTION("Flash Friendly File System");
1284MODULE_LICENSE("GPL");
1285