linux/fs/ext2/super.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/fs/ext2/super.c
   4 *
   5 * Copyright (C) 1992, 1993, 1994, 1995
   6 * Remy Card (card@masi.ibp.fr)
   7 * Laboratoire MASI - Institut Blaise Pascal
   8 * Universite Pierre et Marie Curie (Paris VI)
   9 *
  10 *  from
  11 *
  12 *  linux/fs/minix/inode.c
  13 *
  14 *  Copyright (C) 1991, 1992  Linus Torvalds
  15 *
  16 *  Big-endian to little-endian byte-swapping/bitmaps by
  17 *        David S. Miller (davem@caip.rutgers.edu), 1995
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/string.h>
  22#include <linux/fs.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/blkdev.h>
  26#include <linux/parser.h>
  27#include <linux/random.h>
  28#include <linux/buffer_head.h>
  29#include <linux/exportfs.h>
  30#include <linux/vfs.h>
  31#include <linux/seq_file.h>
  32#include <linux/mount.h>
  33#include <linux/log2.h>
  34#include <linux/quotaops.h>
  35#include <linux/uaccess.h>
  36#include <linux/dax.h>
  37#include <linux/iversion.h>
  38#include "ext2.h"
  39#include "xattr.h"
  40#include "acl.h"
  41
  42static void ext2_write_super(struct super_block *sb);
  43static int ext2_remount (struct super_block * sb, int * flags, char * data);
  44static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
  45static int ext2_sync_fs(struct super_block *sb, int wait);
  46static int ext2_freeze(struct super_block *sb);
  47static int ext2_unfreeze(struct super_block *sb);
  48
  49void ext2_error(struct super_block *sb, const char *function,
  50                const char *fmt, ...)
  51{
  52        struct va_format vaf;
  53        va_list args;
  54        struct ext2_sb_info *sbi = EXT2_SB(sb);
  55        struct ext2_super_block *es = sbi->s_es;
  56
  57        if (!sb_rdonly(sb)) {
  58                spin_lock(&sbi->s_lock);
  59                sbi->s_mount_state |= EXT2_ERROR_FS;
  60                es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
  61                spin_unlock(&sbi->s_lock);
  62                ext2_sync_super(sb, es, 1);
  63        }
  64
  65        va_start(args, fmt);
  66
  67        vaf.fmt = fmt;
  68        vaf.va = &args;
  69
  70        printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
  71               sb->s_id, function, &vaf);
  72
  73        va_end(args);
  74
  75        if (test_opt(sb, ERRORS_PANIC))
  76                panic("EXT2-fs: panic from previous error\n");
  77        if (!sb_rdonly(sb) && test_opt(sb, ERRORS_RO)) {
  78                ext2_msg(sb, KERN_CRIT,
  79                             "error: remounting filesystem read-only");
  80                sb->s_flags |= SB_RDONLY;
  81        }
  82}
  83
  84void ext2_msg(struct super_block *sb, const char *prefix,
  85                const char *fmt, ...)
  86{
  87        struct va_format vaf;
  88        va_list args;
  89
  90        va_start(args, fmt);
  91
  92        vaf.fmt = fmt;
  93        vaf.va = &args;
  94
  95        printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  96
  97        va_end(args);
  98}
  99
 100/*
 101 * This must be called with sbi->s_lock held.
 102 */
 103void ext2_update_dynamic_rev(struct super_block *sb)
 104{
 105        struct ext2_super_block *es = EXT2_SB(sb)->s_es;
 106
 107        if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
 108                return;
 109
 110        ext2_msg(sb, KERN_WARNING,
 111                     "warning: updating to rev %d because of "
 112                     "new feature flag, running e2fsck is recommended",
 113                     EXT2_DYNAMIC_REV);
 114
 115        es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
 116        es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
 117        es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
 118        /* leave es->s_feature_*compat flags alone */
 119        /* es->s_uuid will be set by e2fsck if empty */
 120
 121        /*
 122         * The rest of the superblock fields should be zero, and if not it
 123         * means they are likely already in use, so leave them alone.  We
 124         * can leave it up to e2fsck to clean up any inconsistencies there.
 125         */
 126}
 127
 128#ifdef CONFIG_QUOTA
 129static int ext2_quota_off(struct super_block *sb, int type);
 130
 131static void ext2_quota_off_umount(struct super_block *sb)
 132{
 133        int type;
 134
 135        for (type = 0; type < MAXQUOTAS; type++)
 136                ext2_quota_off(sb, type);
 137}
 138#else
 139static inline void ext2_quota_off_umount(struct super_block *sb)
 140{
 141}
 142#endif
 143
 144static void ext2_put_super (struct super_block * sb)
 145{
 146        int db_count;
 147        int i;
 148        struct ext2_sb_info *sbi = EXT2_SB(sb);
 149
 150        ext2_quota_off_umount(sb);
 151
 152        ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
 153        sbi->s_ea_block_cache = NULL;
 154
 155        if (!sb_rdonly(sb)) {
 156                struct ext2_super_block *es = sbi->s_es;
 157
 158                spin_lock(&sbi->s_lock);
 159                es->s_state = cpu_to_le16(sbi->s_mount_state);
 160                spin_unlock(&sbi->s_lock);
 161                ext2_sync_super(sb, es, 1);
 162        }
 163        db_count = sbi->s_gdb_count;
 164        for (i = 0; i < db_count; i++)
 165                if (sbi->s_group_desc[i])
 166                        brelse (sbi->s_group_desc[i]);
 167        kfree(sbi->s_group_desc);
 168        kfree(sbi->s_debts);
 169        percpu_counter_destroy(&sbi->s_freeblocks_counter);
 170        percpu_counter_destroy(&sbi->s_freeinodes_counter);
 171        percpu_counter_destroy(&sbi->s_dirs_counter);
 172        brelse (sbi->s_sbh);
 173        sb->s_fs_info = NULL;
 174        kfree(sbi->s_blockgroup_lock);
 175        fs_put_dax(sbi->s_daxdev);
 176        kfree(sbi);
 177}
 178
 179static struct kmem_cache * ext2_inode_cachep;
 180
 181static struct inode *ext2_alloc_inode(struct super_block *sb)
 182{
 183        struct ext2_inode_info *ei;
 184        ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
 185        if (!ei)
 186                return NULL;
 187        ei->i_block_alloc_info = NULL;
 188        inode_set_iversion(&ei->vfs_inode, 1);
 189#ifdef CONFIG_QUOTA
 190        memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
 191#endif
 192
 193        return &ei->vfs_inode;
 194}
 195
 196static void ext2_free_in_core_inode(struct inode *inode)
 197{
 198        kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
 199}
 200
 201static void init_once(void *foo)
 202{
 203        struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
 204
 205        rwlock_init(&ei->i_meta_lock);
 206#ifdef CONFIG_EXT2_FS_XATTR
 207        init_rwsem(&ei->xattr_sem);
 208#endif
 209        mutex_init(&ei->truncate_mutex);
 210#ifdef CONFIG_FS_DAX
 211        init_rwsem(&ei->dax_sem);
 212#endif
 213        inode_init_once(&ei->vfs_inode);
 214}
 215
 216static int __init init_inodecache(void)
 217{
 218        ext2_inode_cachep = kmem_cache_create_usercopy("ext2_inode_cache",
 219                                sizeof(struct ext2_inode_info), 0,
 220                                (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
 221                                        SLAB_ACCOUNT),
 222                                offsetof(struct ext2_inode_info, i_data),
 223                                sizeof_field(struct ext2_inode_info, i_data),
 224                                init_once);
 225        if (ext2_inode_cachep == NULL)
 226                return -ENOMEM;
 227        return 0;
 228}
 229
 230static void destroy_inodecache(void)
 231{
 232        /*
 233         * Make sure all delayed rcu free inodes are flushed before we
 234         * destroy cache.
 235         */
 236        rcu_barrier();
 237        kmem_cache_destroy(ext2_inode_cachep);
 238}
 239
 240static int ext2_show_options(struct seq_file *seq, struct dentry *root)
 241{
 242        struct super_block *sb = root->d_sb;
 243        struct ext2_sb_info *sbi = EXT2_SB(sb);
 244        struct ext2_super_block *es = sbi->s_es;
 245        unsigned long def_mount_opts;
 246
 247        spin_lock(&sbi->s_lock);
 248        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
 249
 250        if (sbi->s_sb_block != 1)
 251                seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
 252        if (test_opt(sb, MINIX_DF))
 253                seq_puts(seq, ",minixdf");
 254        if (test_opt(sb, GRPID))
 255                seq_puts(seq, ",grpid");
 256        if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS))
 257                seq_puts(seq, ",nogrpid");
 258        if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT2_DEF_RESUID)) ||
 259            le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) {
 260                seq_printf(seq, ",resuid=%u",
 261                                from_kuid_munged(&init_user_ns, sbi->s_resuid));
 262        }
 263        if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT2_DEF_RESGID)) ||
 264            le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
 265                seq_printf(seq, ",resgid=%u",
 266                                from_kgid_munged(&init_user_ns, sbi->s_resgid));
 267        }
 268        if (test_opt(sb, ERRORS_RO)) {
 269                int def_errors = le16_to_cpu(es->s_errors);
 270
 271                if (def_errors == EXT2_ERRORS_PANIC ||
 272                    def_errors == EXT2_ERRORS_CONTINUE) {
 273                        seq_puts(seq, ",errors=remount-ro");
 274                }
 275        }
 276        if (test_opt(sb, ERRORS_CONT))
 277                seq_puts(seq, ",errors=continue");
 278        if (test_opt(sb, ERRORS_PANIC))
 279                seq_puts(seq, ",errors=panic");
 280        if (test_opt(sb, NO_UID32))
 281                seq_puts(seq, ",nouid32");
 282        if (test_opt(sb, DEBUG))
 283                seq_puts(seq, ",debug");
 284        if (test_opt(sb, OLDALLOC))
 285                seq_puts(seq, ",oldalloc");
 286
 287#ifdef CONFIG_EXT2_FS_XATTR
 288        if (test_opt(sb, XATTR_USER))
 289                seq_puts(seq, ",user_xattr");
 290        if (!test_opt(sb, XATTR_USER) &&
 291            (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
 292                seq_puts(seq, ",nouser_xattr");
 293        }
 294#endif
 295
 296#ifdef CONFIG_EXT2_FS_POSIX_ACL
 297        if (test_opt(sb, POSIX_ACL))
 298                seq_puts(seq, ",acl");
 299        if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL))
 300                seq_puts(seq, ",noacl");
 301#endif
 302
 303        if (test_opt(sb, NOBH))
 304                seq_puts(seq, ",nobh");
 305
 306        if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
 307                seq_puts(seq, ",usrquota");
 308
 309        if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
 310                seq_puts(seq, ",grpquota");
 311
 312        if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
 313                seq_puts(seq, ",xip");
 314
 315        if (sbi->s_mount_opt & EXT2_MOUNT_DAX)
 316                seq_puts(seq, ",dax");
 317
 318        if (!test_opt(sb, RESERVATION))
 319                seq_puts(seq, ",noreservation");
 320
 321        spin_unlock(&sbi->s_lock);
 322        return 0;
 323}
 324
 325#ifdef CONFIG_QUOTA
 326static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
 327static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
 328static int ext2_quota_on(struct super_block *sb, int type, int format_id,
 329                         const struct path *path);
 330static struct dquot **ext2_get_dquots(struct inode *inode)
 331{
 332        return EXT2_I(inode)->i_dquot;
 333}
 334
 335static const struct quotactl_ops ext2_quotactl_ops = {
 336        .quota_on       = ext2_quota_on,
 337        .quota_off      = ext2_quota_off,
 338        .quota_sync     = dquot_quota_sync,
 339        .get_state      = dquot_get_state,
 340        .set_info       = dquot_set_dqinfo,
 341        .get_dqblk      = dquot_get_dqblk,
 342        .set_dqblk      = dquot_set_dqblk,
 343        .get_nextdqblk  = dquot_get_next_dqblk,
 344};
 345#endif
 346
 347static const struct super_operations ext2_sops = {
 348        .alloc_inode    = ext2_alloc_inode,
 349        .free_inode     = ext2_free_in_core_inode,
 350        .write_inode    = ext2_write_inode,
 351        .evict_inode    = ext2_evict_inode,
 352        .put_super      = ext2_put_super,
 353        .sync_fs        = ext2_sync_fs,
 354        .freeze_fs      = ext2_freeze,
 355        .unfreeze_fs    = ext2_unfreeze,
 356        .statfs         = ext2_statfs,
 357        .remount_fs     = ext2_remount,
 358        .show_options   = ext2_show_options,
 359#ifdef CONFIG_QUOTA
 360        .quota_read     = ext2_quota_read,
 361        .quota_write    = ext2_quota_write,
 362        .get_dquots     = ext2_get_dquots,
 363#endif
 364};
 365
 366static struct inode *ext2_nfs_get_inode(struct super_block *sb,
 367                u64 ino, u32 generation)
 368{
 369        struct inode *inode;
 370
 371        if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO)
 372                return ERR_PTR(-ESTALE);
 373        if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
 374                return ERR_PTR(-ESTALE);
 375
 376        /*
 377         * ext2_iget isn't quite right if the inode is currently unallocated!
 378         * However ext2_iget currently does appropriate checks to handle stale
 379         * inodes so everything is OK.
 380         */
 381        inode = ext2_iget(sb, ino);
 382        if (IS_ERR(inode))
 383                return ERR_CAST(inode);
 384        if (generation && inode->i_generation != generation) {
 385                /* we didn't find the right inode.. */
 386                iput(inode);
 387                return ERR_PTR(-ESTALE);
 388        }
 389        return inode;
 390}
 391
 392static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid,
 393                int fh_len, int fh_type)
 394{
 395        return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
 396                                    ext2_nfs_get_inode);
 397}
 398
 399static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
 400                int fh_len, int fh_type)
 401{
 402        return generic_fh_to_parent(sb, fid, fh_len, fh_type,
 403                                    ext2_nfs_get_inode);
 404}
 405
 406static const struct export_operations ext2_export_ops = {
 407        .fh_to_dentry = ext2_fh_to_dentry,
 408        .fh_to_parent = ext2_fh_to_parent,
 409        .get_parent = ext2_get_parent,
 410};
 411
 412static unsigned long get_sb_block(void **data)
 413{
 414        unsigned long   sb_block;
 415        char            *options = (char *) *data;
 416
 417        if (!options || strncmp(options, "sb=", 3) != 0)
 418                return 1;       /* Default location */
 419        options += 3;
 420        sb_block = simple_strtoul(options, &options, 0);
 421        if (*options && *options != ',') {
 422                printk("EXT2-fs: Invalid sb specification: %s\n",
 423                       (char *) *data);
 424                return 1;
 425        }
 426        if (*options == ',')
 427                options++;
 428        *data = (void *) options;
 429        return sb_block;
 430}
 431
 432enum {
 433        Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
 434        Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
 435        Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
 436        Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
 437        Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota,
 438        Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
 439};
 440
 441static const match_table_t tokens = {
 442        {Opt_bsd_df, "bsddf"},
 443        {Opt_minix_df, "minixdf"},
 444        {Opt_grpid, "grpid"},
 445        {Opt_grpid, "bsdgroups"},
 446        {Opt_nogrpid, "nogrpid"},
 447        {Opt_nogrpid, "sysvgroups"},
 448        {Opt_resgid, "resgid=%u"},
 449        {Opt_resuid, "resuid=%u"},
 450        {Opt_sb, "sb=%u"},
 451        {Opt_err_cont, "errors=continue"},
 452        {Opt_err_panic, "errors=panic"},
 453        {Opt_err_ro, "errors=remount-ro"},
 454        {Opt_nouid32, "nouid32"},
 455        {Opt_nocheck, "check=none"},
 456        {Opt_nocheck, "nocheck"},
 457        {Opt_debug, "debug"},
 458        {Opt_oldalloc, "oldalloc"},
 459        {Opt_orlov, "orlov"},
 460        {Opt_nobh, "nobh"},
 461        {Opt_user_xattr, "user_xattr"},
 462        {Opt_nouser_xattr, "nouser_xattr"},
 463        {Opt_acl, "acl"},
 464        {Opt_noacl, "noacl"},
 465        {Opt_xip, "xip"},
 466        {Opt_dax, "dax"},
 467        {Opt_grpquota, "grpquota"},
 468        {Opt_ignore, "noquota"},
 469        {Opt_quota, "quota"},
 470        {Opt_usrquota, "usrquota"},
 471        {Opt_reservation, "reservation"},
 472        {Opt_noreservation, "noreservation"},
 473        {Opt_err, NULL}
 474};
 475
 476static int parse_options(char *options, struct super_block *sb,
 477                         struct ext2_mount_options *opts)
 478{
 479        char *p;
 480        substring_t args[MAX_OPT_ARGS];
 481        int option;
 482        kuid_t uid;
 483        kgid_t gid;
 484
 485        if (!options)
 486                return 1;
 487
 488        while ((p = strsep (&options, ",")) != NULL) {
 489                int token;
 490                if (!*p)
 491                        continue;
 492
 493                token = match_token(p, tokens, args);
 494                switch (token) {
 495                case Opt_bsd_df:
 496                        clear_opt (opts->s_mount_opt, MINIX_DF);
 497                        break;
 498                case Opt_minix_df:
 499                        set_opt (opts->s_mount_opt, MINIX_DF);
 500                        break;
 501                case Opt_grpid:
 502                        set_opt (opts->s_mount_opt, GRPID);
 503                        break;
 504                case Opt_nogrpid:
 505                        clear_opt (opts->s_mount_opt, GRPID);
 506                        break;
 507                case Opt_resuid:
 508                        if (match_int(&args[0], &option))
 509                                return 0;
 510                        uid = make_kuid(current_user_ns(), option);
 511                        if (!uid_valid(uid)) {
 512                                ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option);
 513                                return 0;
 514
 515                        }
 516                        opts->s_resuid = uid;
 517                        break;
 518                case Opt_resgid:
 519                        if (match_int(&args[0], &option))
 520                                return 0;
 521                        gid = make_kgid(current_user_ns(), option);
 522                        if (!gid_valid(gid)) {
 523                                ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
 524                                return 0;
 525                        }
 526                        opts->s_resgid = gid;
 527                        break;
 528                case Opt_sb:
 529                        /* handled by get_sb_block() instead of here */
 530                        /* *sb_block = match_int(&args[0]); */
 531                        break;
 532                case Opt_err_panic:
 533                        clear_opt (opts->s_mount_opt, ERRORS_CONT);
 534                        clear_opt (opts->s_mount_opt, ERRORS_RO);
 535                        set_opt (opts->s_mount_opt, ERRORS_PANIC);
 536                        break;
 537                case Opt_err_ro:
 538                        clear_opt (opts->s_mount_opt, ERRORS_CONT);
 539                        clear_opt (opts->s_mount_opt, ERRORS_PANIC);
 540                        set_opt (opts->s_mount_opt, ERRORS_RO);
 541                        break;
 542                case Opt_err_cont:
 543                        clear_opt (opts->s_mount_opt, ERRORS_RO);
 544                        clear_opt (opts->s_mount_opt, ERRORS_PANIC);
 545                        set_opt (opts->s_mount_opt, ERRORS_CONT);
 546                        break;
 547                case Opt_nouid32:
 548                        set_opt (opts->s_mount_opt, NO_UID32);
 549                        break;
 550                case Opt_nocheck:
 551                        ext2_msg(sb, KERN_WARNING,
 552                                "Option nocheck/check=none is deprecated and"
 553                                " will be removed in June 2020.");
 554                        clear_opt (opts->s_mount_opt, CHECK);
 555                        break;
 556                case Opt_debug:
 557                        set_opt (opts->s_mount_opt, DEBUG);
 558                        break;
 559                case Opt_oldalloc:
 560                        set_opt (opts->s_mount_opt, OLDALLOC);
 561                        break;
 562                case Opt_orlov:
 563                        clear_opt (opts->s_mount_opt, OLDALLOC);
 564                        break;
 565                case Opt_nobh:
 566                        set_opt (opts->s_mount_opt, NOBH);
 567                        break;
 568#ifdef CONFIG_EXT2_FS_XATTR
 569                case Opt_user_xattr:
 570                        set_opt (opts->s_mount_opt, XATTR_USER);
 571                        break;
 572                case Opt_nouser_xattr:
 573                        clear_opt (opts->s_mount_opt, XATTR_USER);
 574                        break;
 575#else
 576                case Opt_user_xattr:
 577                case Opt_nouser_xattr:
 578                        ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
 579                                "not supported");
 580                        break;
 581#endif
 582#ifdef CONFIG_EXT2_FS_POSIX_ACL
 583                case Opt_acl:
 584                        set_opt(opts->s_mount_opt, POSIX_ACL);
 585                        break;
 586                case Opt_noacl:
 587                        clear_opt(opts->s_mount_opt, POSIX_ACL);
 588                        break;
 589#else
 590                case Opt_acl:
 591                case Opt_noacl:
 592                        ext2_msg(sb, KERN_INFO,
 593                                "(no)acl options not supported");
 594                        break;
 595#endif
 596                case Opt_xip:
 597                        ext2_msg(sb, KERN_INFO, "use dax instead of xip");
 598                        set_opt(opts->s_mount_opt, XIP);
 599                        /* Fall through */
 600                case Opt_dax:
 601#ifdef CONFIG_FS_DAX
 602                        ext2_msg(sb, KERN_WARNING,
 603                "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
 604                        set_opt(opts->s_mount_opt, DAX);
 605#else
 606                        ext2_msg(sb, KERN_INFO, "dax option not supported");
 607#endif
 608                        break;
 609
 610#if defined(CONFIG_QUOTA)
 611                case Opt_quota:
 612                case Opt_usrquota:
 613                        set_opt(opts->s_mount_opt, USRQUOTA);
 614                        break;
 615
 616                case Opt_grpquota:
 617                        set_opt(opts->s_mount_opt, GRPQUOTA);
 618                        break;
 619#else
 620                case Opt_quota:
 621                case Opt_usrquota:
 622                case Opt_grpquota:
 623                        ext2_msg(sb, KERN_INFO,
 624                                "quota operations not supported");
 625                        break;
 626#endif
 627
 628                case Opt_reservation:
 629                        set_opt(opts->s_mount_opt, RESERVATION);
 630                        ext2_msg(sb, KERN_INFO, "reservations ON");
 631                        break;
 632                case Opt_noreservation:
 633                        clear_opt(opts->s_mount_opt, RESERVATION);
 634                        ext2_msg(sb, KERN_INFO, "reservations OFF");
 635                        break;
 636                case Opt_ignore:
 637                        break;
 638                default:
 639                        return 0;
 640                }
 641        }
 642        return 1;
 643}
 644
 645static int ext2_setup_super (struct super_block * sb,
 646                              struct ext2_super_block * es,
 647                              int read_only)
 648{
 649        int res = 0;
 650        struct ext2_sb_info *sbi = EXT2_SB(sb);
 651
 652        if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
 653                ext2_msg(sb, KERN_ERR,
 654                        "error: revision level too high, "
 655                        "forcing read-only mode");
 656                res = SB_RDONLY;
 657        }
 658        if (read_only)
 659                return res;
 660        if (!(sbi->s_mount_state & EXT2_VALID_FS))
 661                ext2_msg(sb, KERN_WARNING,
 662                        "warning: mounting unchecked fs, "
 663                        "running e2fsck is recommended");
 664        else if ((sbi->s_mount_state & EXT2_ERROR_FS))
 665                ext2_msg(sb, KERN_WARNING,
 666                        "warning: mounting fs with errors, "
 667                        "running e2fsck is recommended");
 668        else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
 669                 le16_to_cpu(es->s_mnt_count) >=
 670                 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
 671                ext2_msg(sb, KERN_WARNING,
 672                        "warning: maximal mount count reached, "
 673                        "running e2fsck is recommended");
 674        else if (le32_to_cpu(es->s_checkinterval) &&
 675                (le32_to_cpu(es->s_lastcheck) +
 676                        le32_to_cpu(es->s_checkinterval) <=
 677                        ktime_get_real_seconds()))
 678                ext2_msg(sb, KERN_WARNING,
 679                        "warning: checktime reached, "
 680                        "running e2fsck is recommended");
 681        if (!le16_to_cpu(es->s_max_mnt_count))
 682                es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
 683        le16_add_cpu(&es->s_mnt_count, 1);
 684        if (test_opt (sb, DEBUG))
 685                ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
 686                        "bpg=%lu, ipg=%lu, mo=%04lx]",
 687                        EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
 688                        sbi->s_frag_size,
 689                        sbi->s_groups_count,
 690                        EXT2_BLOCKS_PER_GROUP(sb),
 691                        EXT2_INODES_PER_GROUP(sb),
 692                        sbi->s_mount_opt);
 693        return res;
 694}
 695
 696static int ext2_check_descriptors(struct super_block *sb)
 697{
 698        int i;
 699        struct ext2_sb_info *sbi = EXT2_SB(sb);
 700
 701        ext2_debug ("Checking group descriptors");
 702
 703        for (i = 0; i < sbi->s_groups_count; i++) {
 704                struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
 705                ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
 706                ext2_fsblk_t last_block;
 707
 708                if (i == sbi->s_groups_count - 1)
 709                        last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
 710                else
 711                        last_block = first_block +
 712                                (EXT2_BLOCKS_PER_GROUP(sb) - 1);
 713
 714                if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
 715                    le32_to_cpu(gdp->bg_block_bitmap) > last_block)
 716                {
 717                        ext2_error (sb, "ext2_check_descriptors",
 718                                    "Block bitmap for group %d"
 719                                    " not in group (block %lu)!",
 720                                    i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
 721                        return 0;
 722                }
 723                if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
 724                    le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
 725                {
 726                        ext2_error (sb, "ext2_check_descriptors",
 727                                    "Inode bitmap for group %d"
 728                                    " not in group (block %lu)!",
 729                                    i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
 730                        return 0;
 731                }
 732                if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
 733                    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
 734                    last_block)
 735                {
 736                        ext2_error (sb, "ext2_check_descriptors",
 737                                    "Inode table for group %d"
 738                                    " not in group (block %lu)!",
 739                                    i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
 740                        return 0;
 741                }
 742        }
 743        return 1;
 744}
 745
 746/*
 747 * Maximal file size.  There is a direct, and {,double-,triple-}indirect
 748 * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
 749 * We need to be 1 filesystem block less than the 2^32 sector limit.
 750 */
 751static loff_t ext2_max_size(int bits)
 752{
 753        loff_t res = EXT2_NDIR_BLOCKS;
 754        int meta_blocks;
 755        unsigned int upper_limit;
 756        unsigned int ppb = 1 << (bits-2);
 757
 758        /* This is calculated to be the largest file size for a
 759         * dense, file such that the total number of
 760         * sectors in the file, including data and all indirect blocks,
 761         * does not exceed 2^32 -1
 762         * __u32 i_blocks representing the total number of
 763         * 512 bytes blocks of the file
 764         */
 765        upper_limit = (1LL << 32) - 1;
 766
 767        /* total blocks in file system block size */
 768        upper_limit >>= (bits - 9);
 769
 770        /* Compute how many blocks we can address by block tree */
 771        res += 1LL << (bits-2);
 772        res += 1LL << (2*(bits-2));
 773        res += 1LL << (3*(bits-2));
 774        /* Does block tree limit file size? */
 775        if (res < upper_limit)
 776                goto check_lfs;
 777
 778        res = upper_limit;
 779        /* How many metadata blocks are needed for addressing upper_limit? */
 780        upper_limit -= EXT2_NDIR_BLOCKS;
 781        /* indirect blocks */
 782        meta_blocks = 1;
 783        upper_limit -= ppb;
 784        /* double indirect blocks */
 785        if (upper_limit < ppb * ppb) {
 786                meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
 787                res -= meta_blocks;
 788                goto check_lfs;
 789        }
 790        meta_blocks += 1 + ppb;
 791        upper_limit -= ppb * ppb;
 792        /* tripple indirect blocks for the rest */
 793        meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
 794                DIV_ROUND_UP(upper_limit, ppb*ppb);
 795        res -= meta_blocks;
 796check_lfs:
 797        res <<= bits;
 798        if (res > MAX_LFS_FILESIZE)
 799                res = MAX_LFS_FILESIZE;
 800
 801        return res;
 802}
 803
 804static unsigned long descriptor_loc(struct super_block *sb,
 805                                    unsigned long logic_sb_block,
 806                                    int nr)
 807{
 808        struct ext2_sb_info *sbi = EXT2_SB(sb);
 809        unsigned long bg, first_meta_bg;
 810        int has_super = 0;
 811        
 812        first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
 813
 814        if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
 815            nr < first_meta_bg)
 816                return (logic_sb_block + nr + 1);
 817        bg = sbi->s_desc_per_block * nr;
 818        if (ext2_bg_has_super(sb, bg))
 819                has_super = 1;
 820
 821        return ext2_group_first_block_no(sb, bg) + has_super;
 822}
 823
 824static int ext2_fill_super(struct super_block *sb, void *data, int silent)
 825{
 826        struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
 827        struct buffer_head * bh;
 828        struct ext2_sb_info * sbi;
 829        struct ext2_super_block * es;
 830        struct inode *root;
 831        unsigned long block;
 832        unsigned long sb_block = get_sb_block(&data);
 833        unsigned long logic_sb_block;
 834        unsigned long offset = 0;
 835        unsigned long def_mount_opts;
 836        long ret = -ENOMEM;
 837        int blocksize = BLOCK_SIZE;
 838        int db_count;
 839        int i, j;
 840        __le32 features;
 841        int err;
 842        struct ext2_mount_options opts;
 843
 844        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 845        if (!sbi)
 846                goto failed;
 847
 848        sbi->s_blockgroup_lock =
 849                kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
 850        if (!sbi->s_blockgroup_lock) {
 851                kfree(sbi);
 852                goto failed;
 853        }
 854        sb->s_fs_info = sbi;
 855        sbi->s_sb_block = sb_block;
 856        sbi->s_daxdev = dax_dev;
 857
 858        spin_lock_init(&sbi->s_lock);
 859        ret = -EINVAL;
 860
 861        /*
 862         * See what the current blocksize for the device is, and
 863         * use that as the blocksize.  Otherwise (or if the blocksize
 864         * is smaller than the default) use the default.
 865         * This is important for devices that have a hardware
 866         * sectorsize that is larger than the default.
 867         */
 868        blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
 869        if (!blocksize) {
 870                ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
 871                goto failed_sbi;
 872        }
 873
 874        /*
 875         * If the superblock doesn't start on a hardware sector boundary,
 876         * calculate the offset.  
 877         */
 878        if (blocksize != BLOCK_SIZE) {
 879                logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
 880                offset = (sb_block*BLOCK_SIZE) % blocksize;
 881        } else {
 882                logic_sb_block = sb_block;
 883        }
 884
 885        if (!(bh = sb_bread(sb, logic_sb_block))) {
 886                ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
 887                goto failed_sbi;
 888        }
 889        /*
 890         * Note: s_es must be initialized as soon as possible because
 891         *       some ext2 macro-instructions depend on its value
 892         */
 893        es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
 894        sbi->s_es = es;
 895        sb->s_magic = le16_to_cpu(es->s_magic);
 896
 897        if (sb->s_magic != EXT2_SUPER_MAGIC)
 898                goto cantfind_ext2;
 899
 900        opts.s_mount_opt = 0;
 901        /* Set defaults before we parse the mount options */
 902        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
 903        if (def_mount_opts & EXT2_DEFM_DEBUG)
 904                set_opt(opts.s_mount_opt, DEBUG);
 905        if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
 906                set_opt(opts.s_mount_opt, GRPID);
 907        if (def_mount_opts & EXT2_DEFM_UID16)
 908                set_opt(opts.s_mount_opt, NO_UID32);
 909#ifdef CONFIG_EXT2_FS_XATTR
 910        if (def_mount_opts & EXT2_DEFM_XATTR_USER)
 911                set_opt(opts.s_mount_opt, XATTR_USER);
 912#endif
 913#ifdef CONFIG_EXT2_FS_POSIX_ACL
 914        if (def_mount_opts & EXT2_DEFM_ACL)
 915                set_opt(opts.s_mount_opt, POSIX_ACL);
 916#endif
 917        
 918        if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
 919                set_opt(opts.s_mount_opt, ERRORS_PANIC);
 920        else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
 921                set_opt(opts.s_mount_opt, ERRORS_CONT);
 922        else
 923                set_opt(opts.s_mount_opt, ERRORS_RO);
 924
 925        opts.s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
 926        opts.s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
 927        
 928        set_opt(opts.s_mount_opt, RESERVATION);
 929
 930        if (!parse_options((char *) data, sb, &opts))
 931                goto failed_mount;
 932
 933        sbi->s_mount_opt = opts.s_mount_opt;
 934        sbi->s_resuid = opts.s_resuid;
 935        sbi->s_resgid = opts.s_resgid;
 936
 937        sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
 938                ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
 939                 SB_POSIXACL : 0);
 940        sb->s_iflags |= SB_I_CGROUPWB;
 941
 942        if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
 943            (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
 944             EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
 945             EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
 946                ext2_msg(sb, KERN_WARNING,
 947                        "warning: feature flags set on rev 0 fs, "
 948                        "running e2fsck is recommended");
 949        /*
 950         * Check feature flags regardless of the revision level, since we
 951         * previously didn't change the revision level when setting the flags,
 952         * so there is a chance incompat flags are set on a rev 0 filesystem.
 953         */
 954        features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
 955        if (features) {
 956                ext2_msg(sb, KERN_ERR,  "error: couldn't mount because of "
 957                       "unsupported optional features (%x)",
 958                        le32_to_cpu(features));
 959                goto failed_mount;
 960        }
 961        if (!sb_rdonly(sb) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
 962                ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
 963                       "unsupported optional features (%x)",
 964                       le32_to_cpu(features));
 965                goto failed_mount;
 966        }
 967
 968        blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
 969
 970        if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
 971                if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
 972                        ext2_msg(sb, KERN_ERR,
 973                                "DAX unsupported by block device. Turning off DAX.");
 974                        sbi->s_mount_opt &= ~EXT2_MOUNT_DAX;
 975                }
 976        }
 977
 978        /* If the blocksize doesn't match, re-read the thing.. */
 979        if (sb->s_blocksize != blocksize) {
 980                brelse(bh);
 981
 982                if (!sb_set_blocksize(sb, blocksize)) {
 983                        ext2_msg(sb, KERN_ERR,
 984                                "error: bad blocksize %d", blocksize);
 985                        goto failed_sbi;
 986                }
 987
 988                logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
 989                offset = (sb_block*BLOCK_SIZE) % blocksize;
 990                bh = sb_bread(sb, logic_sb_block);
 991                if(!bh) {
 992                        ext2_msg(sb, KERN_ERR, "error: couldn't read"
 993                                "superblock on 2nd try");
 994                        goto failed_sbi;
 995                }
 996                es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
 997                sbi->s_es = es;
 998                if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
 999                        ext2_msg(sb, KERN_ERR, "error: magic mismatch");
1000                        goto failed_mount;
1001                }
1002        }
1003
1004        sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
1005        sb->s_max_links = EXT2_LINK_MAX;
1006
1007        if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
1008                sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
1009                sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
1010        } else {
1011                sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
1012                sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
1013                if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
1014                    !is_power_of_2(sbi->s_inode_size) ||
1015                    (sbi->s_inode_size > blocksize)) {
1016                        ext2_msg(sb, KERN_ERR,
1017                                "error: unsupported inode size: %d",
1018                                sbi->s_inode_size);
1019                        goto failed_mount;
1020                }
1021        }
1022
1023        sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
1024                                   le32_to_cpu(es->s_log_frag_size);
1025        if (sbi->s_frag_size == 0)
1026                goto cantfind_ext2;
1027        sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
1028
1029        sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
1030        sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
1031        sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
1032
1033        sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
1034        if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
1035                goto cantfind_ext2;
1036        sbi->s_itb_per_group = sbi->s_inodes_per_group /
1037                                        sbi->s_inodes_per_block;
1038        sbi->s_desc_per_block = sb->s_blocksize /
1039                                        sizeof (struct ext2_group_desc);
1040        sbi->s_sbh = bh;
1041        sbi->s_mount_state = le16_to_cpu(es->s_state);
1042        sbi->s_addr_per_block_bits =
1043                ilog2 (EXT2_ADDR_PER_BLOCK(sb));
1044        sbi->s_desc_per_block_bits =
1045                ilog2 (EXT2_DESC_PER_BLOCK(sb));
1046
1047        if (sb->s_magic != EXT2_SUPER_MAGIC)
1048                goto cantfind_ext2;
1049
1050        if (sb->s_blocksize != bh->b_size) {
1051                if (!silent)
1052                        ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
1053                goto failed_mount;
1054        }
1055
1056        if (sb->s_blocksize != sbi->s_frag_size) {
1057                ext2_msg(sb, KERN_ERR,
1058                        "error: fragsize %lu != blocksize %lu"
1059                        "(not supported yet)",
1060                        sbi->s_frag_size, sb->s_blocksize);
1061                goto failed_mount;
1062        }
1063
1064        if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
1065                ext2_msg(sb, KERN_ERR,
1066                        "error: #blocks per group too big: %lu",
1067                        sbi->s_blocks_per_group);
1068                goto failed_mount;
1069        }
1070        if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
1071                ext2_msg(sb, KERN_ERR,
1072                        "error: #fragments per group too big: %lu",
1073                        sbi->s_frags_per_group);
1074                goto failed_mount;
1075        }
1076        if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
1077                ext2_msg(sb, KERN_ERR,
1078                        "error: #inodes per group too big: %lu",
1079                        sbi->s_inodes_per_group);
1080                goto failed_mount;
1081        }
1082
1083        if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
1084                goto cantfind_ext2;
1085        sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
1086                                le32_to_cpu(es->s_first_data_block) - 1)
1087                                        / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
1088        db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
1089                   EXT2_DESC_PER_BLOCK(sb);
1090        sbi->s_group_desc = kmalloc_array (db_count,
1091                                           sizeof(struct buffer_head *),
1092                                           GFP_KERNEL);
1093        if (sbi->s_group_desc == NULL) {
1094                ret = -ENOMEM;
1095                ext2_msg(sb, KERN_ERR, "error: not enough memory");
1096                goto failed_mount;
1097        }
1098        bgl_lock_init(sbi->s_blockgroup_lock);
1099        sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
1100        if (!sbi->s_debts) {
1101                ret = -ENOMEM;
1102                ext2_msg(sb, KERN_ERR, "error: not enough memory");
1103                goto failed_mount_group_desc;
1104        }
1105        for (i = 0; i < db_count; i++) {
1106                block = descriptor_loc(sb, logic_sb_block, i);
1107                sbi->s_group_desc[i] = sb_bread(sb, block);
1108                if (!sbi->s_group_desc[i]) {
1109                        for (j = 0; j < i; j++)
1110                                brelse (sbi->s_group_desc[j]);
1111                        ext2_msg(sb, KERN_ERR,
1112                                "error: unable to read group descriptors");
1113                        goto failed_mount_group_desc;
1114                }
1115        }
1116        if (!ext2_check_descriptors (sb)) {
1117                ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
1118                goto failed_mount2;
1119        }
1120        sbi->s_gdb_count = db_count;
1121        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1122        spin_lock_init(&sbi->s_next_gen_lock);
1123
1124        /* per fileystem reservation list head & lock */
1125        spin_lock_init(&sbi->s_rsv_window_lock);
1126        sbi->s_rsv_window_root = RB_ROOT;
1127        /*
1128         * Add a single, static dummy reservation to the start of the
1129         * reservation window list --- it gives us a placeholder for
1130         * append-at-start-of-list which makes the allocation logic
1131         * _much_ simpler.
1132         */
1133        sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1134        sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1135        sbi->s_rsv_window_head.rsv_alloc_hit = 0;
1136        sbi->s_rsv_window_head.rsv_goal_size = 0;
1137        ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
1138
1139        err = percpu_counter_init(&sbi->s_freeblocks_counter,
1140                                ext2_count_free_blocks(sb), GFP_KERNEL);
1141        if (!err) {
1142                err = percpu_counter_init(&sbi->s_freeinodes_counter,
1143                                ext2_count_free_inodes(sb), GFP_KERNEL);
1144        }
1145        if (!err) {
1146                err = percpu_counter_init(&sbi->s_dirs_counter,
1147                                ext2_count_dirs(sb), GFP_KERNEL);
1148        }
1149        if (err) {
1150                ext2_msg(sb, KERN_ERR, "error: insufficient memory");
1151                goto failed_mount3;
1152        }
1153
1154#ifdef CONFIG_EXT2_FS_XATTR
1155        sbi->s_ea_block_cache = ext2_xattr_create_cache();
1156        if (!sbi->s_ea_block_cache) {
1157                ret = -ENOMEM;
1158                ext2_msg(sb, KERN_ERR, "Failed to create ea_block_cache");
1159                goto failed_mount3;
1160        }
1161#endif
1162        /*
1163         * set up enough so that it can read an inode
1164         */
1165        sb->s_op = &ext2_sops;
1166        sb->s_export_op = &ext2_export_ops;
1167        sb->s_xattr = ext2_xattr_handlers;
1168
1169#ifdef CONFIG_QUOTA
1170        sb->dq_op = &dquot_operations;
1171        sb->s_qcop = &ext2_quotactl_ops;
1172        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1173#endif
1174
1175        root = ext2_iget(sb, EXT2_ROOT_INO);
1176        if (IS_ERR(root)) {
1177                ret = PTR_ERR(root);
1178                goto failed_mount3;
1179        }
1180        if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1181                iput(root);
1182                ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
1183                goto failed_mount3;
1184        }
1185
1186        sb->s_root = d_make_root(root);
1187        if (!sb->s_root) {
1188                ext2_msg(sb, KERN_ERR, "error: get root inode failed");
1189                ret = -ENOMEM;
1190                goto failed_mount3;
1191        }
1192        if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
1193                ext2_msg(sb, KERN_WARNING,
1194                        "warning: mounting ext3 filesystem as ext2");
1195        if (ext2_setup_super (sb, es, sb_rdonly(sb)))
1196                sb->s_flags |= SB_RDONLY;
1197        ext2_write_super(sb);
1198        return 0;
1199
1200cantfind_ext2:
1201        if (!silent)
1202                ext2_msg(sb, KERN_ERR,
1203                        "error: can't find an ext2 filesystem on dev %s.",
1204                        sb->s_id);
1205        goto failed_mount;
1206failed_mount3:
1207        ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
1208        percpu_counter_destroy(&sbi->s_freeblocks_counter);
1209        percpu_counter_destroy(&sbi->s_freeinodes_counter);
1210        percpu_counter_destroy(&sbi->s_dirs_counter);
1211failed_mount2:
1212        for (i = 0; i < db_count; i++)
1213                brelse(sbi->s_group_desc[i]);
1214failed_mount_group_desc:
1215        kfree(sbi->s_group_desc);
1216        kfree(sbi->s_debts);
1217failed_mount:
1218        brelse(bh);
1219failed_sbi:
1220        sb->s_fs_info = NULL;
1221        kfree(sbi->s_blockgroup_lock);
1222        kfree(sbi);
1223failed:
1224        fs_put_dax(dax_dev);
1225        return ret;
1226}
1227
1228static void ext2_clear_super_error(struct super_block *sb)
1229{
1230        struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1231
1232        if (buffer_write_io_error(sbh)) {
1233                /*
1234                 * Oh, dear.  A previous attempt to write the
1235                 * superblock failed.  This could happen because the
1236                 * USB device was yanked out.  Or it could happen to
1237                 * be a transient write error and maybe the block will
1238                 * be remapped.  Nothing we can do but to retry the
1239                 * write and hope for the best.
1240                 */
1241                ext2_msg(sb, KERN_ERR,
1242                       "previous I/O error to superblock detected");
1243                clear_buffer_write_io_error(sbh);
1244                set_buffer_uptodate(sbh);
1245        }
1246}
1247
1248void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
1249                     int wait)
1250{
1251        ext2_clear_super_error(sb);
1252        spin_lock(&EXT2_SB(sb)->s_lock);
1253        es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1254        es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1255        es->s_wtime = cpu_to_le32(ktime_get_real_seconds());
1256        /* unlock before we do IO */
1257        spin_unlock(&EXT2_SB(sb)->s_lock);
1258        mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1259        if (wait)
1260                sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
1261}
1262
1263/*
1264 * In the second extended file system, it is not necessary to
1265 * write the super block since we use a mapping of the
1266 * disk super block in a buffer.
1267 *
1268 * However, this function is still used to set the fs valid
1269 * flags to 0.  We need to set this flag to 0 since the fs
1270 * may have been checked while mounted and e2fsck may have
1271 * set s_state to EXT2_VALID_FS after some corrections.
1272 */
1273static int ext2_sync_fs(struct super_block *sb, int wait)
1274{
1275        struct ext2_sb_info *sbi = EXT2_SB(sb);
1276        struct ext2_super_block *es = EXT2_SB(sb)->s_es;
1277
1278        /*
1279         * Write quota structures to quota file, sync_blockdev() will write
1280         * them to disk later
1281         */
1282        dquot_writeback_dquots(sb, -1);
1283
1284        spin_lock(&sbi->s_lock);
1285        if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
1286                ext2_debug("setting valid to 0\n");
1287                es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
1288        }
1289        spin_unlock(&sbi->s_lock);
1290        ext2_sync_super(sb, es, wait);
1291        return 0;
1292}
1293
1294static int ext2_freeze(struct super_block *sb)
1295{
1296        struct ext2_sb_info *sbi = EXT2_SB(sb);
1297
1298        /*
1299         * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared
1300         * because we have unattached inodes and thus filesystem is not fully
1301         * consistent.
1302         */
1303        if (atomic_long_read(&sb->s_remove_count)) {
1304                ext2_sync_fs(sb, 1);
1305                return 0;
1306        }
1307        /* Set EXT2_FS_VALID flag */
1308        spin_lock(&sbi->s_lock);
1309        sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state);
1310        spin_unlock(&sbi->s_lock);
1311        ext2_sync_super(sb, sbi->s_es, 1);
1312
1313        return 0;
1314}
1315
1316static int ext2_unfreeze(struct super_block *sb)
1317{
1318        /* Just write sb to clear EXT2_VALID_FS flag */
1319        ext2_write_super(sb);
1320
1321        return 0;
1322}
1323
1324static void ext2_write_super(struct super_block *sb)
1325{
1326        if (!sb_rdonly(sb))
1327                ext2_sync_fs(sb, 1);
1328}
1329
1330static int ext2_remount (struct super_block * sb, int * flags, char * data)
1331{
1332        struct ext2_sb_info * sbi = EXT2_SB(sb);
1333        struct ext2_super_block * es;
1334        struct ext2_mount_options new_opts;
1335        int err;
1336
1337        sync_filesystem(sb);
1338
1339        spin_lock(&sbi->s_lock);
1340        new_opts.s_mount_opt = sbi->s_mount_opt;
1341        new_opts.s_resuid = sbi->s_resuid;
1342        new_opts.s_resgid = sbi->s_resgid;
1343        spin_unlock(&sbi->s_lock);
1344
1345        if (!parse_options(data, sb, &new_opts))
1346                return -EINVAL;
1347
1348        spin_lock(&sbi->s_lock);
1349        es = sbi->s_es;
1350        if ((sbi->s_mount_opt ^ new_opts.s_mount_opt) & EXT2_MOUNT_DAX) {
1351                ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
1352                         "dax flag with busy inodes while remounting");
1353                new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
1354        }
1355        if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
1356                goto out_set;
1357        if (*flags & SB_RDONLY) {
1358                if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
1359                    !(sbi->s_mount_state & EXT2_VALID_FS))
1360                        goto out_set;
1361
1362                /*
1363                 * OK, we are remounting a valid rw partition rdonly, so set
1364                 * the rdonly flag and then mark the partition as valid again.
1365                 */
1366                es->s_state = cpu_to_le16(sbi->s_mount_state);
1367                es->s_mtime = cpu_to_le32(ktime_get_real_seconds());
1368                spin_unlock(&sbi->s_lock);
1369
1370                err = dquot_suspend(sb, -1);
1371                if (err < 0)
1372                        return err;
1373
1374                ext2_sync_super(sb, es, 1);
1375        } else {
1376                __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
1377                                               ~EXT2_FEATURE_RO_COMPAT_SUPP);
1378                if (ret) {
1379                        spin_unlock(&sbi->s_lock);
1380                        ext2_msg(sb, KERN_WARNING,
1381                                "warning: couldn't remount RDWR because of "
1382                                "unsupported optional features (%x).",
1383                                le32_to_cpu(ret));
1384                        return -EROFS;
1385                }
1386                /*
1387                 * Mounting a RDONLY partition read-write, so reread and
1388                 * store the current valid flag.  (It may have been changed
1389                 * by e2fsck since we originally mounted the partition.)
1390                 */
1391                sbi->s_mount_state = le16_to_cpu(es->s_state);
1392                if (!ext2_setup_super (sb, es, 0))
1393                        sb->s_flags &= ~SB_RDONLY;
1394                spin_unlock(&sbi->s_lock);
1395
1396                ext2_write_super(sb);
1397
1398                dquot_resume(sb, -1);
1399        }
1400
1401        spin_lock(&sbi->s_lock);
1402out_set:
1403        sbi->s_mount_opt = new_opts.s_mount_opt;
1404        sbi->s_resuid = new_opts.s_resuid;
1405        sbi->s_resgid = new_opts.s_resgid;
1406        sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
1407                ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
1408        spin_unlock(&sbi->s_lock);
1409
1410        return 0;
1411}
1412
1413static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1414{
1415        struct super_block *sb = dentry->d_sb;
1416        struct ext2_sb_info *sbi = EXT2_SB(sb);
1417        struct ext2_super_block *es = sbi->s_es;
1418        u64 fsid;
1419
1420        spin_lock(&sbi->s_lock);
1421
1422        if (test_opt (sb, MINIX_DF))
1423                sbi->s_overhead_last = 0;
1424        else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
1425                unsigned long i, overhead = 0;
1426                smp_rmb();
1427
1428                /*
1429                 * Compute the overhead (FS structures). This is constant
1430                 * for a given filesystem unless the number of block groups
1431                 * changes so we cache the previous value until it does.
1432                 */
1433
1434                /*
1435                 * All of the blocks before first_data_block are
1436                 * overhead
1437                 */
1438                overhead = le32_to_cpu(es->s_first_data_block);
1439
1440                /*
1441                 * Add the overhead attributed to the superblock and
1442                 * block group descriptors.  If the sparse superblocks
1443                 * feature is turned on, then not all groups have this.
1444                 */
1445                for (i = 0; i < sbi->s_groups_count; i++)
1446                        overhead += ext2_bg_has_super(sb, i) +
1447                                ext2_bg_num_gdb(sb, i);
1448
1449                /*
1450                 * Every block group has an inode bitmap, a block
1451                 * bitmap, and an inode table.
1452                 */
1453                overhead += (sbi->s_groups_count *
1454                             (2 + sbi->s_itb_per_group));
1455                sbi->s_overhead_last = overhead;
1456                smp_wmb();
1457                sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
1458        }
1459
1460        buf->f_type = EXT2_SUPER_MAGIC;
1461        buf->f_bsize = sb->s_blocksize;
1462        buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
1463        buf->f_bfree = ext2_count_free_blocks(sb);
1464        es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
1465        buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
1466        if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
1467                buf->f_bavail = 0;
1468        buf->f_files = le32_to_cpu(es->s_inodes_count);
1469        buf->f_ffree = ext2_count_free_inodes(sb);
1470        es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
1471        buf->f_namelen = EXT2_NAME_LEN;
1472        fsid = le64_to_cpup((void *)es->s_uuid) ^
1473               le64_to_cpup((void *)es->s_uuid + sizeof(u64));
1474        buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1475        buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1476        spin_unlock(&sbi->s_lock);
1477        return 0;
1478}
1479
1480static struct dentry *ext2_mount(struct file_system_type *fs_type,
1481        int flags, const char *dev_name, void *data)
1482{
1483        return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
1484}
1485
1486#ifdef CONFIG_QUOTA
1487
1488/* Read data from quotafile - avoid pagecache and such because we cannot afford
1489 * acquiring the locks... As quota files are never truncated and quota code
1490 * itself serializes the operations (and no one else should touch the files)
1491 * we don't have to be afraid of races */
1492static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1493                               size_t len, loff_t off)
1494{
1495        struct inode *inode = sb_dqopt(sb)->files[type];
1496        sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1497        int err = 0;
1498        int offset = off & (sb->s_blocksize - 1);
1499        int tocopy;
1500        size_t toread;
1501        struct buffer_head tmp_bh;
1502        struct buffer_head *bh;
1503        loff_t i_size = i_size_read(inode);
1504
1505        if (off > i_size)
1506                return 0;
1507        if (off+len > i_size)
1508                len = i_size-off;
1509        toread = len;
1510        while (toread > 0) {
1511                tocopy = sb->s_blocksize - offset < toread ?
1512                                sb->s_blocksize - offset : toread;
1513
1514                tmp_bh.b_state = 0;
1515                tmp_bh.b_size = sb->s_blocksize;
1516                err = ext2_get_block(inode, blk, &tmp_bh, 0);
1517                if (err < 0)
1518                        return err;
1519                if (!buffer_mapped(&tmp_bh))    /* A hole? */
1520                        memset(data, 0, tocopy);
1521                else {
1522                        bh = sb_bread(sb, tmp_bh.b_blocknr);
1523                        if (!bh)
1524                                return -EIO;
1525                        memcpy(data, bh->b_data+offset, tocopy);
1526                        brelse(bh);
1527                }
1528                offset = 0;
1529                toread -= tocopy;
1530                data += tocopy;
1531                blk++;
1532        }
1533        return len;
1534}
1535
1536/* Write to quotafile */
1537static ssize_t ext2_quota_write(struct super_block *sb, int type,
1538                                const char *data, size_t len, loff_t off)
1539{
1540        struct inode *inode = sb_dqopt(sb)->files[type];
1541        sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1542        int err = 0;
1543        int offset = off & (sb->s_blocksize - 1);
1544        int tocopy;
1545        size_t towrite = len;
1546        struct buffer_head tmp_bh;
1547        struct buffer_head *bh;
1548
1549        while (towrite > 0) {
1550                tocopy = sb->s_blocksize - offset < towrite ?
1551                                sb->s_blocksize - offset : towrite;
1552
1553                tmp_bh.b_state = 0;
1554                tmp_bh.b_size = sb->s_blocksize;
1555                err = ext2_get_block(inode, blk, &tmp_bh, 1);
1556                if (err < 0)
1557                        goto out;
1558                if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
1559                        bh = sb_bread(sb, tmp_bh.b_blocknr);
1560                else
1561                        bh = sb_getblk(sb, tmp_bh.b_blocknr);
1562                if (unlikely(!bh)) {
1563                        err = -EIO;
1564                        goto out;
1565                }
1566                lock_buffer(bh);
1567                memcpy(bh->b_data+offset, data, tocopy);
1568                flush_dcache_page(bh->b_page);
1569                set_buffer_uptodate(bh);
1570                mark_buffer_dirty(bh);
1571                unlock_buffer(bh);
1572                brelse(bh);
1573                offset = 0;
1574                towrite -= tocopy;
1575                data += tocopy;
1576                blk++;
1577        }
1578out:
1579        if (len == towrite)
1580                return err;
1581        if (inode->i_size < off+len-towrite)
1582                i_size_write(inode, off+len-towrite);
1583        inode_inc_iversion(inode);
1584        inode->i_mtime = inode->i_ctime = current_time(inode);
1585        mark_inode_dirty(inode);
1586        return len - towrite;
1587}
1588
1589static int ext2_quota_on(struct super_block *sb, int type, int format_id,
1590                         const struct path *path)
1591{
1592        int err;
1593        struct inode *inode;
1594
1595        err = dquot_quota_on(sb, type, format_id, path);
1596        if (err)
1597                return err;
1598
1599        inode = d_inode(path->dentry);
1600        inode_lock(inode);
1601        EXT2_I(inode)->i_flags |= EXT2_NOATIME_FL | EXT2_IMMUTABLE_FL;
1602        inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
1603                        S_NOATIME | S_IMMUTABLE);
1604        inode_unlock(inode);
1605        mark_inode_dirty(inode);
1606
1607        return 0;
1608}
1609
1610static int ext2_quota_off(struct super_block *sb, int type)
1611{
1612        struct inode *inode = sb_dqopt(sb)->files[type];
1613        int err;
1614
1615        if (!inode || !igrab(inode))
1616                goto out;
1617
1618        err = dquot_quota_off(sb, type);
1619        if (err)
1620                goto out_put;
1621
1622        inode_lock(inode);
1623        EXT2_I(inode)->i_flags &= ~(EXT2_NOATIME_FL | EXT2_IMMUTABLE_FL);
1624        inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
1625        inode_unlock(inode);
1626        mark_inode_dirty(inode);
1627out_put:
1628        iput(inode);
1629        return err;
1630out:
1631        return dquot_quota_off(sb, type);
1632}
1633
1634#endif
1635
1636static struct file_system_type ext2_fs_type = {
1637        .owner          = THIS_MODULE,
1638        .name           = "ext2",
1639        .mount          = ext2_mount,
1640        .kill_sb        = kill_block_super,
1641        .fs_flags       = FS_REQUIRES_DEV,
1642};
1643MODULE_ALIAS_FS("ext2");
1644
1645static int __init init_ext2_fs(void)
1646{
1647        int err;
1648
1649        err = init_inodecache();
1650        if (err)
1651                return err;
1652        err = register_filesystem(&ext2_fs_type);
1653        if (err)
1654                goto out;
1655        return 0;
1656out:
1657        destroy_inodecache();
1658        return err;
1659}
1660
1661static void __exit exit_ext2_fs(void)
1662{
1663        unregister_filesystem(&ext2_fs_type);
1664        destroy_inodecache();
1665}
1666
1667MODULE_AUTHOR("Remy Card and others");
1668MODULE_DESCRIPTION("Second Extended Filesystem");
1669MODULE_LICENSE("GPL");
1670module_init(init_ext2_fs)
1671module_exit(exit_ext2_fs)
1672