linux/fs/gfs2/ops_fstype.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/completion.h>
  16#include <linux/buffer_head.h>
  17#include <linux/blkdev.h>
  18#include <linux/kthread.h>
  19#include <linux/export.h>
  20#include <linux/namei.h>
  21#include <linux/mount.h>
  22#include <linux/gfs2_ondisk.h>
  23#include <linux/quotaops.h>
  24#include <linux/lockdep.h>
  25#include <linux/module.h>
  26
  27#include "gfs2.h"
  28#include "incore.h"
  29#include "bmap.h"
  30#include "glock.h"
  31#include "glops.h"
  32#include "inode.h"
  33#include "recovery.h"
  34#include "rgrp.h"
  35#include "super.h"
  36#include "sys.h"
  37#include "util.h"
  38#include "log.h"
  39#include "quota.h"
  40#include "dir.h"
  41#include "meta_io.h"
  42#include "trace_gfs2.h"
  43
  44#define DO 0
  45#define UNDO 1
  46
  47/**
  48 * gfs2_tune_init - Fill a gfs2_tune structure with default values
  49 * @gt: tune
  50 *
  51 */
  52
  53static void gfs2_tune_init(struct gfs2_tune *gt)
  54{
  55        spin_lock_init(&gt->gt_spin);
  56
  57        gt->gt_quota_warn_period = 10;
  58        gt->gt_quota_scale_num = 1;
  59        gt->gt_quota_scale_den = 1;
  60        gt->gt_new_files_jdata = 0;
  61        gt->gt_max_readahead = 1 << 18;
  62        gt->gt_complain_secs = 10;
  63}
  64
  65static struct gfs2_sbd *init_sbd(struct super_block *sb)
  66{
  67        struct gfs2_sbd *sdp;
  68        struct address_space *mapping;
  69
  70        sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
  71        if (!sdp)
  72                return NULL;
  73
  74        sb->s_fs_info = sdp;
  75        sdp->sd_vfs = sb;
  76        sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
  77        if (!sdp->sd_lkstats) {
  78                kfree(sdp);
  79                return NULL;
  80        }
  81
  82        set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
  83        gfs2_tune_init(&sdp->sd_tune);
  84
  85        init_waitqueue_head(&sdp->sd_glock_wait);
  86        atomic_set(&sdp->sd_glock_disposal, 0);
  87        init_completion(&sdp->sd_locking_init);
  88        init_completion(&sdp->sd_wdack);
  89        spin_lock_init(&sdp->sd_statfs_spin);
  90
  91        spin_lock_init(&sdp->sd_rindex_spin);
  92        sdp->sd_rindex_tree.rb_node = NULL;
  93
  94        INIT_LIST_HEAD(&sdp->sd_jindex_list);
  95        spin_lock_init(&sdp->sd_jindex_spin);
  96        mutex_init(&sdp->sd_jindex_mutex);
  97        init_completion(&sdp->sd_journal_ready);
  98
  99        INIT_LIST_HEAD(&sdp->sd_quota_list);
 100        mutex_init(&sdp->sd_quota_mutex);
 101        mutex_init(&sdp->sd_quota_sync_mutex);
 102        init_waitqueue_head(&sdp->sd_quota_wait);
 103        INIT_LIST_HEAD(&sdp->sd_trunc_list);
 104        spin_lock_init(&sdp->sd_trunc_lock);
 105        spin_lock_init(&sdp->sd_bitmap_lock);
 106
 107        mapping = &sdp->sd_aspace;
 108
 109        address_space_init_once(mapping);
 110        mapping->a_ops = &gfs2_rgrp_aops;
 111        mapping->host = sb->s_bdev->bd_inode;
 112        mapping->flags = 0;
 113        mapping_set_gfp_mask(mapping, GFP_NOFS);
 114        mapping->private_data = NULL;
 115        mapping->writeback_index = 0;
 116
 117        spin_lock_init(&sdp->sd_log_lock);
 118        atomic_set(&sdp->sd_log_pinned, 0);
 119        INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
 120        INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
 121        spin_lock_init(&sdp->sd_ordered_lock);
 122
 123        init_waitqueue_head(&sdp->sd_log_waitq);
 124        init_waitqueue_head(&sdp->sd_logd_waitq);
 125        spin_lock_init(&sdp->sd_ail_lock);
 126        INIT_LIST_HEAD(&sdp->sd_ail1_list);
 127        INIT_LIST_HEAD(&sdp->sd_ail2_list);
 128
 129        init_rwsem(&sdp->sd_log_flush_lock);
 130        atomic_set(&sdp->sd_log_in_flight, 0);
 131        atomic_set(&sdp->sd_reserving_log, 0);
 132        init_waitqueue_head(&sdp->sd_reserving_log_wait);
 133        init_waitqueue_head(&sdp->sd_log_flush_wait);
 134        atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
 135        mutex_init(&sdp->sd_freeze_mutex);
 136
 137        return sdp;
 138}
 139
 140
 141/**
 142 * gfs2_check_sb - Check superblock
 143 * @sdp: the filesystem
 144 * @sb: The superblock
 145 * @silent: Don't print a message if the check fails
 146 *
 147 * Checks the version code of the FS is one that we understand how to
 148 * read and that the sizes of the various on-disk structures have not
 149 * changed.
 150 */
 151
 152static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
 153{
 154        struct gfs2_sb_host *sb = &sdp->sd_sb;
 155
 156        if (sb->sb_magic != GFS2_MAGIC ||
 157            sb->sb_type != GFS2_METATYPE_SB) {
 158                if (!silent)
 159                        pr_warn("not a GFS2 filesystem\n");
 160                return -EINVAL;
 161        }
 162
 163        /*  If format numbers match exactly, we're done.  */
 164
 165        if (sb->sb_fs_format == GFS2_FORMAT_FS &&
 166            sb->sb_multihost_format == GFS2_FORMAT_MULTI)
 167                return 0;
 168
 169        fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
 170
 171        return -EINVAL;
 172}
 173
 174static void end_bio_io_page(struct bio *bio)
 175{
 176        struct page *page = bio->bi_private;
 177
 178        if (!bio->bi_error)
 179                SetPageUptodate(page);
 180        else
 181                pr_warn("error %d reading superblock\n", bio->bi_error);
 182        unlock_page(page);
 183}
 184
 185static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 186{
 187        struct gfs2_sb_host *sb = &sdp->sd_sb;
 188        struct super_block *s = sdp->sd_vfs;
 189        const struct gfs2_sb *str = buf;
 190
 191        sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
 192        sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
 193        sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
 194        sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
 195        sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
 196        sb->sb_bsize = be32_to_cpu(str->sb_bsize);
 197        sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
 198        sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
 199        sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
 200        sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
 201        sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
 202
 203        memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
 204        memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 205        memcpy(s->s_uuid, str->sb_uuid, 16);
 206}
 207
 208/**
 209 * gfs2_read_super - Read the gfs2 super block from disk
 210 * @sdp: The GFS2 super block
 211 * @sector: The location of the super block
 212 * @error: The error code to return
 213 *
 214 * This uses the bio functions to read the super block from disk
 215 * because we want to be 100% sure that we never read cached data.
 216 * A super block is read twice only during each GFS2 mount and is
 217 * never written to by the filesystem. The first time its read no
 218 * locks are held, and the only details which are looked at are those
 219 * relating to the locking protocol. Once locking is up and working,
 220 * the sb is read again under the lock to establish the location of
 221 * the master directory (contains pointers to journals etc) and the
 222 * root directory.
 223 *
 224 * Returns: 0 on success or error
 225 */
 226
 227static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 228{
 229        struct super_block *sb = sdp->sd_vfs;
 230        struct gfs2_sb *p;
 231        struct page *page;
 232        struct bio *bio;
 233
 234        page = alloc_page(GFP_NOFS);
 235        if (unlikely(!page))
 236                return -ENOMEM;
 237
 238        ClearPageUptodate(page);
 239        ClearPageDirty(page);
 240        lock_page(page);
 241
 242        bio = bio_alloc(GFP_NOFS, 1);
 243        bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
 244        bio->bi_bdev = sb->s_bdev;
 245        bio_add_page(bio, page, PAGE_SIZE, 0);
 246
 247        bio->bi_end_io = end_bio_io_page;
 248        bio->bi_private = page;
 249        bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
 250        submit_bio(bio);
 251        wait_on_page_locked(page);
 252        bio_put(bio);
 253        if (!PageUptodate(page)) {
 254                __free_page(page);
 255                return -EIO;
 256        }
 257        p = kmap(page);
 258        gfs2_sb_in(sdp, p);
 259        kunmap(page);
 260        __free_page(page);
 261        return gfs2_check_sb(sdp, silent);
 262}
 263
 264/**
 265 * gfs2_read_sb - Read super block
 266 * @sdp: The GFS2 superblock
 267 * @silent: Don't print message if mount fails
 268 *
 269 */
 270
 271static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
 272{
 273        u32 hash_blocks, ind_blocks, leaf_blocks;
 274        u32 tmp_blocks;
 275        unsigned int x;
 276        int error;
 277
 278        error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 279        if (error) {
 280                if (!silent)
 281                        fs_err(sdp, "can't read superblock\n");
 282                return error;
 283        }
 284
 285        sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
 286                               GFS2_BASIC_BLOCK_SHIFT;
 287        sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
 288        sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
 289                          sizeof(struct gfs2_dinode)) / sizeof(u64);
 290        sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
 291                          sizeof(struct gfs2_meta_header)) / sizeof(u64);
 292        sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
 293        sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
 294        sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
 295        sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
 296        sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
 297                                sizeof(struct gfs2_meta_header)) /
 298                                sizeof(struct gfs2_quota_change);
 299        sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
 300                                     sizeof(struct gfs2_meta_header))
 301                * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
 302
 303        /* Compute maximum reservation required to add a entry to a directory */
 304
 305        hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
 306                             sdp->sd_jbsize);
 307
 308        ind_blocks = 0;
 309        for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
 310                tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
 311                ind_blocks += tmp_blocks;
 312        }
 313
 314        leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
 315
 316        sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
 317
 318        sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
 319                                sizeof(struct gfs2_dinode);
 320        sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
 321        for (x = 2;; x++) {
 322                u64 space, d;
 323                u32 m;
 324
 325                space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
 326                d = space;
 327                m = do_div(d, sdp->sd_inptrs);
 328
 329                if (d != sdp->sd_heightsize[x - 1] || m)
 330                        break;
 331                sdp->sd_heightsize[x] = space;
 332        }
 333        sdp->sd_max_height = x;
 334        sdp->sd_heightsize[x] = ~0;
 335        gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
 336
 337        sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
 338                                 sizeof(struct gfs2_dinode);
 339        sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
 340        for (x = 2;; x++) {
 341                u64 space, d;
 342                u32 m;
 343
 344                space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
 345                d = space;
 346                m = do_div(d, sdp->sd_inptrs);
 347
 348                if (d != sdp->sd_jheightsize[x - 1] || m)
 349                        break;
 350                sdp->sd_jheightsize[x] = space;
 351        }
 352        sdp->sd_max_jheight = x;
 353        sdp->sd_jheightsize[x] = ~0;
 354        gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
 355
 356        sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
 357                                      sizeof(struct gfs2_leaf)) /
 358                                     GFS2_MIN_DIRENT_SIZE;
 359        return 0;
 360}
 361
 362static int init_names(struct gfs2_sbd *sdp, int silent)
 363{
 364        char *proto, *table;
 365        int error = 0;
 366
 367        proto = sdp->sd_args.ar_lockproto;
 368        table = sdp->sd_args.ar_locktable;
 369
 370        /*  Try to autodetect  */
 371
 372        if (!proto[0] || !table[0]) {
 373                error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 374                if (error)
 375                        return error;
 376
 377                if (!proto[0])
 378                        proto = sdp->sd_sb.sb_lockproto;
 379                if (!table[0])
 380                        table = sdp->sd_sb.sb_locktable;
 381        }
 382
 383        if (!table[0])
 384                table = sdp->sd_vfs->s_id;
 385
 386        strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
 387        strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
 388
 389        table = sdp->sd_table_name;
 390        while ((table = strchr(table, '/')))
 391                *table = '_';
 392
 393        return error;
 394}
 395
 396static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
 397                        int undo)
 398{
 399        int error = 0;
 400
 401        if (undo)
 402                goto fail_trans;
 403
 404        error = gfs2_glock_nq_num(sdp,
 405                                  GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
 406                                  LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
 407                                  mount_gh);
 408        if (error) {
 409                fs_err(sdp, "can't acquire mount glock: %d\n", error);
 410                goto fail;
 411        }
 412
 413        error = gfs2_glock_nq_num(sdp,
 414                                  GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
 415                                  LM_ST_SHARED,
 416                                  LM_FLAG_NOEXP | GL_EXACT,
 417                                  &sdp->sd_live_gh);
 418        if (error) {
 419                fs_err(sdp, "can't acquire live glock: %d\n", error);
 420                goto fail_mount;
 421        }
 422
 423        error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
 424                               CREATE, &sdp->sd_rename_gl);
 425        if (error) {
 426                fs_err(sdp, "can't create rename glock: %d\n", error);
 427                goto fail_live;
 428        }
 429
 430        error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
 431                               CREATE, &sdp->sd_freeze_gl);
 432        if (error) {
 433                fs_err(sdp, "can't create transaction glock: %d\n", error);
 434                goto fail_rename;
 435        }
 436
 437        return 0;
 438
 439fail_trans:
 440        gfs2_glock_put(sdp->sd_freeze_gl);
 441fail_rename:
 442        gfs2_glock_put(sdp->sd_rename_gl);
 443fail_live:
 444        gfs2_glock_dq_uninit(&sdp->sd_live_gh);
 445fail_mount:
 446        gfs2_glock_dq_uninit(mount_gh);
 447fail:
 448        return error;
 449}
 450
 451static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
 452                            u64 no_addr, const char *name)
 453{
 454        struct gfs2_sbd *sdp = sb->s_fs_info;
 455        struct dentry *dentry;
 456        struct inode *inode;
 457
 458        inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
 459                                  GFS2_BLKST_FREE /* ignore */);
 460        if (IS_ERR(inode)) {
 461                fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
 462                return PTR_ERR(inode);
 463        }
 464        dentry = d_make_root(inode);
 465        if (!dentry) {
 466                fs_err(sdp, "can't alloc %s dentry\n", name);
 467                return -ENOMEM;
 468        }
 469        *dptr = dentry;
 470        return 0;
 471}
 472
 473static int init_sb(struct gfs2_sbd *sdp, int silent)
 474{
 475        struct super_block *sb = sdp->sd_vfs;
 476        struct gfs2_holder sb_gh;
 477        u64 no_addr;
 478        int ret;
 479
 480        ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
 481                                LM_ST_SHARED, 0, &sb_gh);
 482        if (ret) {
 483                fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
 484                return ret;
 485        }
 486
 487        ret = gfs2_read_sb(sdp, silent);
 488        if (ret) {
 489                fs_err(sdp, "can't read superblock: %d\n", ret);
 490                goto out;
 491        }
 492
 493        /* Set up the buffer cache and SB for real */
 494        if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
 495                ret = -EINVAL;
 496                fs_err(sdp, "FS block size (%u) is too small for device "
 497                       "block size (%u)\n",
 498                       sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
 499                goto out;
 500        }
 501        if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
 502                ret = -EINVAL;
 503                fs_err(sdp, "FS block size (%u) is too big for machine "
 504                       "page size (%u)\n",
 505                       sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
 506                goto out;
 507        }
 508        sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
 509
 510        /* Get the root inode */
 511        no_addr = sdp->sd_sb.sb_root_dir.no_addr;
 512        ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
 513        if (ret)
 514                goto out;
 515
 516        /* Get the master inode */
 517        no_addr = sdp->sd_sb.sb_master_dir.no_addr;
 518        ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
 519        if (ret) {
 520                dput(sdp->sd_root_dir);
 521                goto out;
 522        }
 523        sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
 524out:
 525        gfs2_glock_dq_uninit(&sb_gh);
 526        return ret;
 527}
 528
 529static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
 530{
 531        char *message = "FIRSTMOUNT=Done";
 532        char *envp[] = { message, NULL };
 533
 534        fs_info(sdp, "first mount done, others may mount\n");
 535
 536        if (sdp->sd_lockstruct.ls_ops->lm_first_done)
 537                sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
 538
 539        kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
 540}
 541
 542/**
 543 * gfs2_jindex_hold - Grab a lock on the jindex
 544 * @sdp: The GFS2 superblock
 545 * @ji_gh: the holder for the jindex glock
 546 *
 547 * Returns: errno
 548 */
 549
 550static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
 551{
 552        struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
 553        struct qstr name;
 554        char buf[20];
 555        struct gfs2_jdesc *jd;
 556        int error;
 557
 558        name.name = buf;
 559
 560        mutex_lock(&sdp->sd_jindex_mutex);
 561
 562        for (;;) {
 563                error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
 564                if (error)
 565                        break;
 566
 567                name.len = sprintf(buf, "journal%u", sdp->sd_journals);
 568                name.hash = gfs2_disk_hash(name.name, name.len);
 569
 570                error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
 571                if (error == -ENOENT) {
 572                        error = 0;
 573                        break;
 574                }
 575
 576                gfs2_glock_dq_uninit(ji_gh);
 577
 578                if (error)
 579                        break;
 580
 581                error = -ENOMEM;
 582                jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
 583                if (!jd)
 584                        break;
 585
 586                INIT_LIST_HEAD(&jd->extent_list);
 587                INIT_LIST_HEAD(&jd->jd_revoke_list);
 588
 589                INIT_WORK(&jd->jd_work, gfs2_recover_func);
 590                jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
 591                if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
 592                        if (!jd->jd_inode)
 593                                error = -ENOENT;
 594                        else
 595                                error = PTR_ERR(jd->jd_inode);
 596                        kfree(jd);
 597                        break;
 598                }
 599
 600                spin_lock(&sdp->sd_jindex_spin);
 601                jd->jd_jid = sdp->sd_journals++;
 602                list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
 603                spin_unlock(&sdp->sd_jindex_spin);
 604        }
 605
 606        mutex_unlock(&sdp->sd_jindex_mutex);
 607
 608        return error;
 609}
 610
 611/**
 612 * check_journal_clean - Make sure a journal is clean for a spectator mount
 613 * @sdp: The GFS2 superblock
 614 * @jd: The journal descriptor
 615 *
 616 * Returns: 0 if the journal is clean or locked, else an error
 617 */
 618static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
 619{
 620        int error;
 621        struct gfs2_holder j_gh;
 622        struct gfs2_log_header_host head;
 623        struct gfs2_inode *ip;
 624
 625        ip = GFS2_I(jd->jd_inode);
 626        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
 627                                   GL_EXACT | GL_NOCACHE, &j_gh);
 628        if (error) {
 629                fs_err(sdp, "Error locking journal for spectator mount.\n");
 630                return -EPERM;
 631        }
 632        error = gfs2_jdesc_check(jd);
 633        if (error) {
 634                fs_err(sdp, "Error checking journal for spectator mount.\n");
 635                goto out_unlock;
 636        }
 637        error = gfs2_find_jhead(jd, &head);
 638        if (error) {
 639                fs_err(sdp, "Error parsing journal for spectator mount.\n");
 640                goto out_unlock;
 641        }
 642        if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
 643                error = -EPERM;
 644                fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
 645                       "must not be a spectator.\n", jd->jd_jid);
 646        }
 647
 648out_unlock:
 649        gfs2_glock_dq_uninit(&j_gh);
 650        return error;
 651}
 652
 653static int init_journal(struct gfs2_sbd *sdp, int undo)
 654{
 655        struct inode *master = d_inode(sdp->sd_master_dir);
 656        struct gfs2_holder ji_gh;
 657        struct gfs2_inode *ip;
 658        int jindex = 1;
 659        int error = 0;
 660
 661        if (undo) {
 662                jindex = 0;
 663                goto fail_jinode_gh;
 664        }
 665
 666        sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
 667        if (IS_ERR(sdp->sd_jindex)) {
 668                fs_err(sdp, "can't lookup journal index: %d\n", error);
 669                return PTR_ERR(sdp->sd_jindex);
 670        }
 671
 672        /* Load in the journal index special file */
 673
 674        error = gfs2_jindex_hold(sdp, &ji_gh);
 675        if (error) {
 676                fs_err(sdp, "can't read journal index: %d\n", error);
 677                goto fail;
 678        }
 679
 680        error = -EUSERS;
 681        if (!gfs2_jindex_size(sdp)) {
 682                fs_err(sdp, "no journals!\n");
 683                goto fail_jindex;
 684        }
 685
 686        if (sdp->sd_args.ar_spectator) {
 687                sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
 688                atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 689                atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 690                atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 691        } else {
 692                if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
 693                        fs_err(sdp, "can't mount journal #%u\n",
 694                               sdp->sd_lockstruct.ls_jid);
 695                        fs_err(sdp, "there are only %u journals (0 - %u)\n",
 696                               gfs2_jindex_size(sdp),
 697                               gfs2_jindex_size(sdp) - 1);
 698                        goto fail_jindex;
 699                }
 700                sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
 701
 702                error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
 703                                          &gfs2_journal_glops,
 704                                          LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
 705                                          &sdp->sd_journal_gh);
 706                if (error) {
 707                        fs_err(sdp, "can't acquire journal glock: %d\n", error);
 708                        goto fail_jindex;
 709                }
 710
 711                ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 712                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
 713                                           LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
 714                                           &sdp->sd_jinode_gh);
 715                if (error) {
 716                        fs_err(sdp, "can't acquire journal inode glock: %d\n",
 717                               error);
 718                        goto fail_journal_gh;
 719                }
 720
 721                error = gfs2_jdesc_check(sdp->sd_jdesc);
 722                if (error) {
 723                        fs_err(sdp, "my journal (%u) is bad: %d\n",
 724                               sdp->sd_jdesc->jd_jid, error);
 725                        goto fail_jinode_gh;
 726                }
 727                atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 728                atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 729                atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 730
 731                /* Map the extents for this journal's blocks */
 732                gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
 733        }
 734        trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
 735
 736        if (sdp->sd_lockstruct.ls_first) {
 737                unsigned int x;
 738                for (x = 0; x < sdp->sd_journals; x++) {
 739                        struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
 740
 741                        if (sdp->sd_args.ar_spectator) {
 742                                error = check_journal_clean(sdp, jd);
 743                                if (error)
 744                                        goto fail_jinode_gh;
 745                                continue;
 746                        }
 747                        error = gfs2_recover_journal(jd, true);
 748                        if (error) {
 749                                fs_err(sdp, "error recovering journal %u: %d\n",
 750                                       x, error);
 751                                goto fail_jinode_gh;
 752                        }
 753                }
 754
 755                gfs2_others_may_mount(sdp);
 756        } else if (!sdp->sd_args.ar_spectator) {
 757                error = gfs2_recover_journal(sdp->sd_jdesc, true);
 758                if (error) {
 759                        fs_err(sdp, "error recovering my journal: %d\n", error);
 760                        goto fail_jinode_gh;
 761                }
 762        }
 763
 764        sdp->sd_log_idle = 1;
 765        set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
 766        gfs2_glock_dq_uninit(&ji_gh);
 767        jindex = 0;
 768        INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
 769        return 0;
 770
 771fail_jinode_gh:
 772        if (!sdp->sd_args.ar_spectator)
 773                gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
 774fail_journal_gh:
 775        if (!sdp->sd_args.ar_spectator)
 776                gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
 777fail_jindex:
 778        gfs2_jindex_free(sdp);
 779        if (jindex)
 780                gfs2_glock_dq_uninit(&ji_gh);
 781fail:
 782        iput(sdp->sd_jindex);
 783        return error;
 784}
 785
 786static struct lock_class_key gfs2_quota_imutex_key;
 787
 788static int init_inodes(struct gfs2_sbd *sdp, int undo)
 789{
 790        int error = 0;
 791        struct inode *master = d_inode(sdp->sd_master_dir);
 792
 793        if (undo)
 794                goto fail_qinode;
 795
 796        error = init_journal(sdp, undo);
 797        complete_all(&sdp->sd_journal_ready);
 798        if (error)
 799                goto fail;
 800
 801        /* Read in the master statfs inode */
 802        sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
 803        if (IS_ERR(sdp->sd_statfs_inode)) {
 804                error = PTR_ERR(sdp->sd_statfs_inode);
 805                fs_err(sdp, "can't read in statfs inode: %d\n", error);
 806                goto fail_journal;
 807        }
 808
 809        /* Read in the resource index inode */
 810        sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
 811        if (IS_ERR(sdp->sd_rindex)) {
 812                error = PTR_ERR(sdp->sd_rindex);
 813                fs_err(sdp, "can't get resource index inode: %d\n", error);
 814                goto fail_statfs;
 815        }
 816        sdp->sd_rindex_uptodate = 0;
 817
 818        /* Read in the quota inode */
 819        sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
 820        if (IS_ERR(sdp->sd_quota_inode)) {
 821                error = PTR_ERR(sdp->sd_quota_inode);
 822                fs_err(sdp, "can't get quota file inode: %d\n", error);
 823                goto fail_rindex;
 824        }
 825        /*
 826         * i_mutex on quota files is special. Since this inode is hidden system
 827         * file, we are safe to define locking ourselves.
 828         */
 829        lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
 830                          &gfs2_quota_imutex_key);
 831
 832        error = gfs2_rindex_update(sdp);
 833        if (error)
 834                goto fail_qinode;
 835
 836        return 0;
 837
 838fail_qinode:
 839        iput(sdp->sd_quota_inode);
 840fail_rindex:
 841        gfs2_clear_rgrpd(sdp);
 842        iput(sdp->sd_rindex);
 843fail_statfs:
 844        iput(sdp->sd_statfs_inode);
 845fail_journal:
 846        init_journal(sdp, UNDO);
 847fail:
 848        return error;
 849}
 850
 851static int init_per_node(struct gfs2_sbd *sdp, int undo)
 852{
 853        struct inode *pn = NULL;
 854        char buf[30];
 855        int error = 0;
 856        struct gfs2_inode *ip;
 857        struct inode *master = d_inode(sdp->sd_master_dir);
 858
 859        if (sdp->sd_args.ar_spectator)
 860                return 0;
 861
 862        if (undo)
 863                goto fail_qc_gh;
 864
 865        pn = gfs2_lookup_simple(master, "per_node");
 866        if (IS_ERR(pn)) {
 867                error = PTR_ERR(pn);
 868                fs_err(sdp, "can't find per_node directory: %d\n", error);
 869                return error;
 870        }
 871
 872        sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
 873        sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
 874        if (IS_ERR(sdp->sd_sc_inode)) {
 875                error = PTR_ERR(sdp->sd_sc_inode);
 876                fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
 877                goto fail;
 878        }
 879
 880        sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
 881        sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
 882        if (IS_ERR(sdp->sd_qc_inode)) {
 883                error = PTR_ERR(sdp->sd_qc_inode);
 884                fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
 885                goto fail_ut_i;
 886        }
 887
 888        iput(pn);
 889        pn = NULL;
 890
 891        ip = GFS2_I(sdp->sd_sc_inode);
 892        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 893                                   &sdp->sd_sc_gh);
 894        if (error) {
 895                fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
 896                goto fail_qc_i;
 897        }
 898
 899        ip = GFS2_I(sdp->sd_qc_inode);
 900        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 901                                   &sdp->sd_qc_gh);
 902        if (error) {
 903                fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
 904                goto fail_ut_gh;
 905        }
 906
 907        return 0;
 908
 909fail_qc_gh:
 910        gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
 911fail_ut_gh:
 912        gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 913fail_qc_i:
 914        iput(sdp->sd_qc_inode);
 915fail_ut_i:
 916        iput(sdp->sd_sc_inode);
 917fail:
 918        iput(pn);
 919        return error;
 920}
 921
 922static const match_table_t nolock_tokens = {
 923        { Opt_jid, "jid=%d\n", },
 924        { Opt_err, NULL },
 925};
 926
 927static const struct lm_lockops nolock_ops = {
 928        .lm_proto_name = "lock_nolock",
 929        .lm_put_lock = gfs2_glock_free,
 930        .lm_tokens = &nolock_tokens,
 931};
 932
 933/**
 934 * gfs2_lm_mount - mount a locking protocol
 935 * @sdp: the filesystem
 936 * @args: mount arguments
 937 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
 938 *
 939 * Returns: errno
 940 */
 941
 942static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
 943{
 944        const struct lm_lockops *lm;
 945        struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 946        struct gfs2_args *args = &sdp->sd_args;
 947        const char *proto = sdp->sd_proto_name;
 948        const char *table = sdp->sd_table_name;
 949        char *o, *options;
 950        int ret;
 951
 952        if (!strcmp("lock_nolock", proto)) {
 953                lm = &nolock_ops;
 954                sdp->sd_args.ar_localflocks = 1;
 955#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 956        } else if (!strcmp("lock_dlm", proto)) {
 957                lm = &gfs2_dlm_ops;
 958#endif
 959        } else {
 960                pr_info("can't find protocol %s\n", proto);
 961                return -ENOENT;
 962        }
 963
 964        fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
 965
 966        ls->ls_ops = lm;
 967        ls->ls_first = 1;
 968
 969        for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
 970                substring_t tmp[MAX_OPT_ARGS];
 971                int token, option;
 972
 973                if (!o || !*o)
 974                        continue;
 975
 976                token = match_token(o, *lm->lm_tokens, tmp);
 977                switch (token) {
 978                case Opt_jid:
 979                        ret = match_int(&tmp[0], &option);
 980                        if (ret || option < 0) 
 981                                goto hostdata_error;
 982                        if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
 983                                ls->ls_jid = option;
 984                        break;
 985                case Opt_id:
 986                case Opt_nodir:
 987                        /* Obsolete, but left for backward compat purposes */
 988                        break;
 989                case Opt_first:
 990                        ret = match_int(&tmp[0], &option);
 991                        if (ret || (option != 0 && option != 1))
 992                                goto hostdata_error;
 993                        ls->ls_first = option;
 994                        break;
 995                case Opt_err:
 996                default:
 997hostdata_error:
 998                        fs_info(sdp, "unknown hostdata (%s)\n", o);
 999                        return -EINVAL;
1000                }
1001        }
1002
1003        if (lm->lm_mount == NULL) {
1004                fs_info(sdp, "Now mounting FS...\n");
1005                complete_all(&sdp->sd_locking_init);
1006                return 0;
1007        }
1008        ret = lm->lm_mount(sdp, table);
1009        if (ret == 0)
1010                fs_info(sdp, "Joined cluster. Now mounting FS...\n");
1011        complete_all(&sdp->sd_locking_init);
1012        return ret;
1013}
1014
1015void gfs2_lm_unmount(struct gfs2_sbd *sdp)
1016{
1017        const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
1018        if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
1019            lm->lm_unmount)
1020                lm->lm_unmount(sdp);
1021}
1022
1023static int wait_on_journal(struct gfs2_sbd *sdp)
1024{
1025        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1026                return 0;
1027
1028        return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1029                ? -EINTR : 0;
1030}
1031
1032void gfs2_online_uevent(struct gfs2_sbd *sdp)
1033{
1034        struct super_block *sb = sdp->sd_vfs;
1035        char ro[20];
1036        char spectator[20];
1037        char *envp[] = { ro, spectator, NULL };
1038        sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0);
1039        sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1040        kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1041}
1042
1043/**
1044 * fill_super - Read in superblock
1045 * @sb: The VFS superblock
1046 * @data: Mount options
1047 * @silent: Don't complain if it's not a GFS2 filesystem
1048 *
1049 * Returns: errno
1050 */
1051
1052static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1053{
1054        struct gfs2_sbd *sdp;
1055        struct gfs2_holder mount_gh;
1056        int error;
1057
1058        sdp = init_sbd(sb);
1059        if (!sdp) {
1060                pr_warn("can't alloc struct gfs2_sbd\n");
1061                return -ENOMEM;
1062        }
1063        sdp->sd_args = *args;
1064
1065        if (sdp->sd_args.ar_spectator) {
1066                sb->s_flags |= MS_RDONLY;
1067                set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1068        }
1069        if (sdp->sd_args.ar_posix_acl)
1070                sb->s_flags |= MS_POSIXACL;
1071        if (sdp->sd_args.ar_nobarrier)
1072                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1073
1074        sb->s_flags |= MS_NOSEC;
1075        sb->s_magic = GFS2_MAGIC;
1076        sb->s_op = &gfs2_super_ops;
1077        sb->s_d_op = &gfs2_dops;
1078        sb->s_export_op = &gfs2_export_ops;
1079        sb->s_xattr = gfs2_xattr_handlers;
1080        sb->s_qcop = &gfs2_quotactl_ops;
1081        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1082        sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1083        sb->s_time_gran = 1;
1084        sb->s_maxbytes = MAX_LFS_FILESIZE;
1085
1086        /* Set up the buffer cache and fill in some fake block size values
1087           to allow us to read-in the on-disk superblock. */
1088        sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
1089        sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1090        sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
1091                               GFS2_BASIC_BLOCK_SHIFT;
1092        sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
1093
1094        sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1095        sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1096        if (sdp->sd_args.ar_statfs_quantum) {
1097                sdp->sd_tune.gt_statfs_slow = 0;
1098                sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1099        } else {
1100                sdp->sd_tune.gt_statfs_slow = 1;
1101                sdp->sd_tune.gt_statfs_quantum = 30;
1102        }
1103
1104        error = init_names(sdp, silent);
1105        if (error) {
1106                /* In this case, we haven't initialized sysfs, so we have to
1107                   manually free the sdp. */
1108                free_percpu(sdp->sd_lkstats);
1109                kfree(sdp);
1110                sb->s_fs_info = NULL;
1111                return error;
1112        }
1113
1114        snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s", sdp->sd_table_name);
1115
1116        error = gfs2_sys_fs_add(sdp);
1117        /*
1118         * If we hit an error here, gfs2_sys_fs_add will have called function
1119         * kobject_put which causes the sysfs usage count to go to zero, which
1120         * causes sysfs to call function gfs2_sbd_release, which frees sdp.
1121         * Subsequent error paths here will call gfs2_sys_fs_del, which also
1122         * kobject_put to free sdp.
1123         */
1124        if (error)
1125                return error;
1126
1127        gfs2_create_debugfs_file(sdp);
1128
1129        error = gfs2_lm_mount(sdp, silent);
1130        if (error)
1131                goto fail_debug;
1132
1133        error = init_locking(sdp, &mount_gh, DO);
1134        if (error)
1135                goto fail_lm;
1136
1137        error = init_sb(sdp, silent);
1138        if (error)
1139                goto fail_locking;
1140
1141        error = wait_on_journal(sdp);
1142        if (error)
1143                goto fail_sb;
1144
1145        /*
1146         * If user space has failed to join the cluster or some similar
1147         * failure has occurred, then the journal id will contain a
1148         * negative (error) number. This will then be returned to the
1149         * caller (of the mount syscall). We do this even for spectator
1150         * mounts (which just write a jid of 0 to indicate "ok" even though
1151         * the jid is unused in the spectator case)
1152         */
1153        if (sdp->sd_lockstruct.ls_jid < 0) {
1154                error = sdp->sd_lockstruct.ls_jid;
1155                sdp->sd_lockstruct.ls_jid = 0;
1156                goto fail_sb;
1157        }
1158
1159        if (sdp->sd_args.ar_spectator)
1160                snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s",
1161                         sdp->sd_table_name);
1162        else
1163                snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u",
1164                         sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1165
1166        error = init_inodes(sdp, DO);
1167        if (error)
1168                goto fail_sb;
1169
1170        error = init_per_node(sdp, DO);
1171        if (error)
1172                goto fail_inodes;
1173
1174        error = gfs2_statfs_init(sdp);
1175        if (error) {
1176                fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1177                goto fail_per_node;
1178        }
1179
1180        if (!(sb->s_flags & MS_RDONLY)) {
1181                error = gfs2_make_fs_rw(sdp);
1182                if (error) {
1183                        fs_err(sdp, "can't make FS RW: %d\n", error);
1184                        goto fail_per_node;
1185                }
1186        }
1187
1188        gfs2_glock_dq_uninit(&mount_gh);
1189        gfs2_online_uevent(sdp);
1190        return 0;
1191
1192fail_per_node:
1193        init_per_node(sdp, UNDO);
1194fail_inodes:
1195        init_inodes(sdp, UNDO);
1196fail_sb:
1197        if (sdp->sd_root_dir)
1198                dput(sdp->sd_root_dir);
1199        if (sdp->sd_master_dir)
1200                dput(sdp->sd_master_dir);
1201        if (sb->s_root)
1202                dput(sb->s_root);
1203        sb->s_root = NULL;
1204fail_locking:
1205        init_locking(sdp, &mount_gh, UNDO);
1206fail_lm:
1207        complete_all(&sdp->sd_journal_ready);
1208        gfs2_gl_hash_clear(sdp);
1209        gfs2_lm_unmount(sdp);
1210fail_debug:
1211        gfs2_delete_debugfs_file(sdp);
1212        free_percpu(sdp->sd_lkstats);
1213        /* gfs2_sys_fs_del must be the last thing we do, since it causes
1214         * sysfs to call function gfs2_sbd_release, which frees sdp. */
1215        gfs2_sys_fs_del(sdp);
1216        sb->s_fs_info = NULL;
1217        return error;
1218}
1219
1220static int set_gfs2_super(struct super_block *s, void *data)
1221{
1222        s->s_bdev = data;
1223        s->s_dev = s->s_bdev->bd_dev;
1224
1225        /*
1226         * We set the bdi here to the queue backing, file systems can
1227         * overwrite this in ->fill_super()
1228         */
1229        s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1230        return 0;
1231}
1232
1233static int test_gfs2_super(struct super_block *s, void *ptr)
1234{
1235        struct block_device *bdev = ptr;
1236        return (bdev == s->s_bdev);
1237}
1238
1239/**
1240 * gfs2_mount - Get the GFS2 superblock
1241 * @fs_type: The GFS2 filesystem type
1242 * @flags: Mount flags
1243 * @dev_name: The name of the device
1244 * @data: The mount arguments
1245 *
1246 * Q. Why not use get_sb_bdev() ?
1247 * A. We need to select one of two root directories to mount, independent
1248 *    of whether this is the initial, or subsequent, mount of this sb
1249 *
1250 * Returns: 0 or -ve on error
1251 */
1252
1253static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
1254                       const char *dev_name, void *data)
1255{
1256        struct block_device *bdev;
1257        struct super_block *s;
1258        fmode_t mode = FMODE_READ | FMODE_EXCL;
1259        int error;
1260        struct gfs2_args args;
1261        struct gfs2_sbd *sdp;
1262
1263        if (!(flags & MS_RDONLY))
1264                mode |= FMODE_WRITE;
1265
1266        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1267        if (IS_ERR(bdev))
1268                return ERR_CAST(bdev);
1269
1270        /*
1271         * once the super is inserted into the list by sget, s_umount
1272         * will protect the lockfs code from trying to start a snapshot
1273         * while we are mounting
1274         */
1275        mutex_lock(&bdev->bd_fsfreeze_mutex);
1276        if (bdev->bd_fsfreeze_count > 0) {
1277                mutex_unlock(&bdev->bd_fsfreeze_mutex);
1278                error = -EBUSY;
1279                goto error_bdev;
1280        }
1281        s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
1282        mutex_unlock(&bdev->bd_fsfreeze_mutex);
1283        error = PTR_ERR(s);
1284        if (IS_ERR(s))
1285                goto error_bdev;
1286
1287        if (s->s_root) {
1288                /*
1289                 * s_umount nests inside bd_mutex during
1290                 * __invalidate_device().  blkdev_put() acquires
1291                 * bd_mutex and can't be called under s_umount.  Drop
1292                 * s_umount temporarily.  This is safe as we're
1293                 * holding an active reference.
1294                 */
1295                up_write(&s->s_umount);
1296                blkdev_put(bdev, mode);
1297                down_write(&s->s_umount);
1298        } else {
1299                /* s_mode must be set before deactivate_locked_super calls */
1300                s->s_mode = mode;
1301        }
1302
1303        memset(&args, 0, sizeof(args));
1304        args.ar_quota = GFS2_QUOTA_DEFAULT;
1305        args.ar_data = GFS2_DATA_DEFAULT;
1306        args.ar_commit = 30;
1307        args.ar_statfs_quantum = 30;
1308        args.ar_quota_quantum = 60;
1309        args.ar_errors = GFS2_ERRORS_DEFAULT;
1310
1311        error = gfs2_mount_args(&args, data);
1312        if (error) {
1313                pr_warn("can't parse mount arguments\n");
1314                goto error_super;
1315        }
1316
1317        if (s->s_root) {
1318                error = -EBUSY;
1319                if ((flags ^ s->s_flags) & MS_RDONLY)
1320                        goto error_super;
1321        } else {
1322                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1323                sb_set_blocksize(s, block_size(bdev));
1324                error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
1325                if (error)
1326                        goto error_super;
1327                s->s_flags |= MS_ACTIVE;
1328                bdev->bd_super = s;
1329        }
1330
1331        sdp = s->s_fs_info;
1332        if (args.ar_meta)
1333                return dget(sdp->sd_master_dir);
1334        else
1335                return dget(sdp->sd_root_dir);
1336
1337error_super:
1338        deactivate_locked_super(s);
1339        return ERR_PTR(error);
1340error_bdev:
1341        blkdev_put(bdev, mode);
1342        return ERR_PTR(error);
1343}
1344
1345static int set_meta_super(struct super_block *s, void *ptr)
1346{
1347        return -EINVAL;
1348}
1349
1350static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
1351                        int flags, const char *dev_name, void *data)
1352{
1353        struct super_block *s;
1354        struct gfs2_sbd *sdp;
1355        struct path path;
1356        int error;
1357
1358        error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
1359        if (error) {
1360                pr_warn("path_lookup on %s returned error %d\n",
1361                        dev_name, error);
1362                return ERR_PTR(error);
1363        }
1364        s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
1365                 path.dentry->d_sb->s_bdev);
1366        path_put(&path);
1367        if (IS_ERR(s)) {
1368                pr_warn("gfs2 mount does not exist\n");
1369                return ERR_CAST(s);
1370        }
1371        if ((flags ^ s->s_flags) & MS_RDONLY) {
1372                deactivate_locked_super(s);
1373                return ERR_PTR(-EBUSY);
1374        }
1375        sdp = s->s_fs_info;
1376        return dget(sdp->sd_master_dir);
1377}
1378
1379static void gfs2_kill_sb(struct super_block *sb)
1380{
1381        struct gfs2_sbd *sdp = sb->s_fs_info;
1382
1383        if (sdp == NULL) {
1384                kill_block_super(sb);
1385                return;
1386        }
1387
1388        gfs2_log_flush(sdp, NULL, SYNC_FLUSH);
1389        dput(sdp->sd_root_dir);
1390        dput(sdp->sd_master_dir);
1391        sdp->sd_root_dir = NULL;
1392        sdp->sd_master_dir = NULL;
1393        shrink_dcache_sb(sb);
1394        gfs2_delete_debugfs_file(sdp);
1395        free_percpu(sdp->sd_lkstats);
1396        kill_block_super(sb);
1397}
1398
1399struct file_system_type gfs2_fs_type = {
1400        .name = "gfs2",
1401        .fs_flags = FS_REQUIRES_DEV,
1402        .mount = gfs2_mount,
1403        .kill_sb = gfs2_kill_sb,
1404        .owner = THIS_MODULE,
1405};
1406MODULE_ALIAS_FS("gfs2");
1407
1408struct file_system_type gfs2meta_fs_type = {
1409        .name = "gfs2meta",
1410        .fs_flags = FS_REQUIRES_DEV,
1411        .mount = gfs2_mount_meta,
1412        .owner = THIS_MODULE,
1413};
1414MODULE_ALIAS_FS("gfs2meta");
1415