linux/fs/gfs2/ops_fstype.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11
  12#include <linux/sched.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/completion.h>
  16#include <linux/buffer_head.h>
  17#include <linux/blkdev.h>
  18#include <linux/kthread.h>
  19#include <linux/export.h>
  20#include <linux/namei.h>
  21#include <linux/mount.h>
  22#include <linux/gfs2_ondisk.h>
  23#include <linux/quotaops.h>
  24#include <linux/lockdep.h>
  25#include <linux/module.h>
  26#include <linux/backing-dev.h>
  27
  28#include "gfs2.h"
  29#include "incore.h"
  30#include "bmap.h"
  31#include "glock.h"
  32#include "glops.h"
  33#include "inode.h"
  34#include "recovery.h"
  35#include "rgrp.h"
  36#include "super.h"
  37#include "sys.h"
  38#include "util.h"
  39#include "log.h"
  40#include "quota.h"
  41#include "dir.h"
  42#include "meta_io.h"
  43#include "trace_gfs2.h"
  44
  45#define DO 0
  46#define UNDO 1
  47
  48/**
  49 * gfs2_tune_init - Fill a gfs2_tune structure with default values
  50 * @gt: tune
  51 *
  52 */
  53
  54static void gfs2_tune_init(struct gfs2_tune *gt)
  55{
  56        spin_lock_init(&gt->gt_spin);
  57
  58        gt->gt_quota_warn_period = 10;
  59        gt->gt_quota_scale_num = 1;
  60        gt->gt_quota_scale_den = 1;
  61        gt->gt_new_files_jdata = 0;
  62        gt->gt_max_readahead = BIT(18);
  63        gt->gt_complain_secs = 10;
  64}
  65
  66static struct gfs2_sbd *init_sbd(struct super_block *sb)
  67{
  68        struct gfs2_sbd *sdp;
  69        struct address_space *mapping;
  70
  71        sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
  72        if (!sdp)
  73                return NULL;
  74
  75        sb->s_fs_info = sdp;
  76        sdp->sd_vfs = sb;
  77        sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
  78        if (!sdp->sd_lkstats) {
  79                kfree(sdp);
  80                return NULL;
  81        }
  82
  83        set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
  84        gfs2_tune_init(&sdp->sd_tune);
  85
  86        init_waitqueue_head(&sdp->sd_glock_wait);
  87        atomic_set(&sdp->sd_glock_disposal, 0);
  88        init_completion(&sdp->sd_locking_init);
  89        init_completion(&sdp->sd_wdack);
  90        spin_lock_init(&sdp->sd_statfs_spin);
  91
  92        spin_lock_init(&sdp->sd_rindex_spin);
  93        sdp->sd_rindex_tree.rb_node = NULL;
  94
  95        INIT_LIST_HEAD(&sdp->sd_jindex_list);
  96        spin_lock_init(&sdp->sd_jindex_spin);
  97        mutex_init(&sdp->sd_jindex_mutex);
  98        init_completion(&sdp->sd_journal_ready);
  99
 100        INIT_LIST_HEAD(&sdp->sd_quota_list);
 101        mutex_init(&sdp->sd_quota_mutex);
 102        mutex_init(&sdp->sd_quota_sync_mutex);
 103        init_waitqueue_head(&sdp->sd_quota_wait);
 104        INIT_LIST_HEAD(&sdp->sd_trunc_list);
 105        spin_lock_init(&sdp->sd_trunc_lock);
 106        spin_lock_init(&sdp->sd_bitmap_lock);
 107
 108        mapping = &sdp->sd_aspace;
 109
 110        address_space_init_once(mapping);
 111        mapping->a_ops = &gfs2_rgrp_aops;
 112        mapping->host = sb->s_bdev->bd_inode;
 113        mapping->flags = 0;
 114        mapping_set_gfp_mask(mapping, GFP_NOFS);
 115        mapping->private_data = NULL;
 116        mapping->writeback_index = 0;
 117
 118        spin_lock_init(&sdp->sd_log_lock);
 119        atomic_set(&sdp->sd_log_pinned, 0);
 120        INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
 121        INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
 122        spin_lock_init(&sdp->sd_ordered_lock);
 123
 124        init_waitqueue_head(&sdp->sd_log_waitq);
 125        init_waitqueue_head(&sdp->sd_logd_waitq);
 126        spin_lock_init(&sdp->sd_ail_lock);
 127        INIT_LIST_HEAD(&sdp->sd_ail1_list);
 128        INIT_LIST_HEAD(&sdp->sd_ail2_list);
 129
 130        init_rwsem(&sdp->sd_log_flush_lock);
 131        atomic_set(&sdp->sd_log_in_flight, 0);
 132        atomic_set(&sdp->sd_reserving_log, 0);
 133        init_waitqueue_head(&sdp->sd_reserving_log_wait);
 134        init_waitqueue_head(&sdp->sd_log_flush_wait);
 135        atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
 136        mutex_init(&sdp->sd_freeze_mutex);
 137
 138        return sdp;
 139}
 140
 141
 142/**
 143 * gfs2_check_sb - Check superblock
 144 * @sdp: the filesystem
 145 * @sb: The superblock
 146 * @silent: Don't print a message if the check fails
 147 *
 148 * Checks the version code of the FS is one that we understand how to
 149 * read and that the sizes of the various on-disk structures have not
 150 * changed.
 151 */
 152
 153static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
 154{
 155        struct gfs2_sb_host *sb = &sdp->sd_sb;
 156
 157        if (sb->sb_magic != GFS2_MAGIC ||
 158            sb->sb_type != GFS2_METATYPE_SB) {
 159                if (!silent)
 160                        pr_warn("not a GFS2 filesystem\n");
 161                return -EINVAL;
 162        }
 163
 164        /*  If format numbers match exactly, we're done.  */
 165
 166        if (sb->sb_fs_format == GFS2_FORMAT_FS &&
 167            sb->sb_multihost_format == GFS2_FORMAT_MULTI)
 168                return 0;
 169
 170        fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
 171
 172        return -EINVAL;
 173}
 174
 175static void end_bio_io_page(struct bio *bio)
 176{
 177        struct page *page = bio->bi_private;
 178
 179        if (!bio->bi_status)
 180                SetPageUptodate(page);
 181        else
 182                pr_warn("error %d reading superblock\n", bio->bi_status);
 183        unlock_page(page);
 184}
 185
 186static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 187{
 188        struct gfs2_sb_host *sb = &sdp->sd_sb;
 189        struct super_block *s = sdp->sd_vfs;
 190        const struct gfs2_sb *str = buf;
 191
 192        sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
 193        sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
 194        sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
 195        sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
 196        sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
 197        sb->sb_bsize = be32_to_cpu(str->sb_bsize);
 198        sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
 199        sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
 200        sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
 201        sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
 202        sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
 203
 204        memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
 205        memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 206        memcpy(&s->s_uuid, str->sb_uuid, 16);
 207}
 208
 209/**
 210 * gfs2_read_super - Read the gfs2 super block from disk
 211 * @sdp: The GFS2 super block
 212 * @sector: The location of the super block
 213 * @error: The error code to return
 214 *
 215 * This uses the bio functions to read the super block from disk
 216 * because we want to be 100% sure that we never read cached data.
 217 * A super block is read twice only during each GFS2 mount and is
 218 * never written to by the filesystem. The first time its read no
 219 * locks are held, and the only details which are looked at are those
 220 * relating to the locking protocol. Once locking is up and working,
 221 * the sb is read again under the lock to establish the location of
 222 * the master directory (contains pointers to journals etc) and the
 223 * root directory.
 224 *
 225 * Returns: 0 on success or error
 226 */
 227
 228static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 229{
 230        struct super_block *sb = sdp->sd_vfs;
 231        struct gfs2_sb *p;
 232        struct page *page;
 233        struct bio *bio;
 234
 235        page = alloc_page(GFP_NOFS);
 236        if (unlikely(!page))
 237                return -ENOMEM;
 238
 239        ClearPageUptodate(page);
 240        ClearPageDirty(page);
 241        lock_page(page);
 242
 243        bio = bio_alloc(GFP_NOFS, 1);
 244        bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
 245        bio_set_dev(bio, sb->s_bdev);
 246        bio_add_page(bio, page, PAGE_SIZE, 0);
 247
 248        bio->bi_end_io = end_bio_io_page;
 249        bio->bi_private = page;
 250        bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
 251        submit_bio(bio);
 252        wait_on_page_locked(page);
 253        bio_put(bio);
 254        if (!PageUptodate(page)) {
 255                __free_page(page);
 256                return -EIO;
 257        }
 258        p = kmap(page);
 259        gfs2_sb_in(sdp, p);
 260        kunmap(page);
 261        __free_page(page);
 262        return gfs2_check_sb(sdp, silent);
 263}
 264
 265/**
 266 * gfs2_read_sb - Read super block
 267 * @sdp: The GFS2 superblock
 268 * @silent: Don't print message if mount fails
 269 *
 270 */
 271
 272static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
 273{
 274        u32 hash_blocks, ind_blocks, leaf_blocks;
 275        u32 tmp_blocks;
 276        unsigned int x;
 277        int error;
 278
 279        error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 280        if (error) {
 281                if (!silent)
 282                        fs_err(sdp, "can't read superblock\n");
 283                return error;
 284        }
 285
 286        sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
 287                               GFS2_BASIC_BLOCK_SHIFT;
 288        sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
 289        sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
 290                          sizeof(struct gfs2_dinode)) / sizeof(u64);
 291        sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
 292                          sizeof(struct gfs2_meta_header)) / sizeof(u64);
 293        sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
 294        sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
 295        sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
 296        sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
 297        sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
 298                                sizeof(struct gfs2_meta_header)) /
 299                                sizeof(struct gfs2_quota_change);
 300        sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
 301                                     sizeof(struct gfs2_meta_header))
 302                * GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
 303
 304        /* Compute maximum reservation required to add a entry to a directory */
 305
 306        hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
 307                             sdp->sd_jbsize);
 308
 309        ind_blocks = 0;
 310        for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
 311                tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
 312                ind_blocks += tmp_blocks;
 313        }
 314
 315        leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
 316
 317        sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
 318
 319        sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
 320                                sizeof(struct gfs2_dinode);
 321        sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
 322        for (x = 2;; x++) {
 323                u64 space, d;
 324                u32 m;
 325
 326                space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
 327                d = space;
 328                m = do_div(d, sdp->sd_inptrs);
 329
 330                if (d != sdp->sd_heightsize[x - 1] || m)
 331                        break;
 332                sdp->sd_heightsize[x] = space;
 333        }
 334        sdp->sd_max_height = x;
 335        sdp->sd_heightsize[x] = ~0;
 336        gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
 337
 338        sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
 339                                      sizeof(struct gfs2_leaf)) /
 340                                     GFS2_MIN_DIRENT_SIZE;
 341        return 0;
 342}
 343
 344static int init_names(struct gfs2_sbd *sdp, int silent)
 345{
 346        char *proto, *table;
 347        int error = 0;
 348
 349        proto = sdp->sd_args.ar_lockproto;
 350        table = sdp->sd_args.ar_locktable;
 351
 352        /*  Try to autodetect  */
 353
 354        if (!proto[0] || !table[0]) {
 355                error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
 356                if (error)
 357                        return error;
 358
 359                if (!proto[0])
 360                        proto = sdp->sd_sb.sb_lockproto;
 361                if (!table[0])
 362                        table = sdp->sd_sb.sb_locktable;
 363        }
 364
 365        if (!table[0])
 366                table = sdp->sd_vfs->s_id;
 367
 368        strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
 369        strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
 370
 371        table = sdp->sd_table_name;
 372        while ((table = strchr(table, '/')))
 373                *table = '_';
 374
 375        return error;
 376}
 377
 378static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
 379                        int undo)
 380{
 381        int error = 0;
 382
 383        if (undo)
 384                goto fail_trans;
 385
 386        error = gfs2_glock_nq_num(sdp,
 387                                  GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
 388                                  LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
 389                                  mount_gh);
 390        if (error) {
 391                fs_err(sdp, "can't acquire mount glock: %d\n", error);
 392                goto fail;
 393        }
 394
 395        error = gfs2_glock_nq_num(sdp,
 396                                  GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
 397                                  LM_ST_SHARED,
 398                                  LM_FLAG_NOEXP | GL_EXACT,
 399                                  &sdp->sd_live_gh);
 400        if (error) {
 401                fs_err(sdp, "can't acquire live glock: %d\n", error);
 402                goto fail_mount;
 403        }
 404
 405        error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
 406                               CREATE, &sdp->sd_rename_gl);
 407        if (error) {
 408                fs_err(sdp, "can't create rename glock: %d\n", error);
 409                goto fail_live;
 410        }
 411
 412        error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
 413                               CREATE, &sdp->sd_freeze_gl);
 414        if (error) {
 415                fs_err(sdp, "can't create transaction glock: %d\n", error);
 416                goto fail_rename;
 417        }
 418
 419        return 0;
 420
 421fail_trans:
 422        gfs2_glock_put(sdp->sd_freeze_gl);
 423fail_rename:
 424        gfs2_glock_put(sdp->sd_rename_gl);
 425fail_live:
 426        gfs2_glock_dq_uninit(&sdp->sd_live_gh);
 427fail_mount:
 428        gfs2_glock_dq_uninit(mount_gh);
 429fail:
 430        return error;
 431}
 432
 433static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
 434                            u64 no_addr, const char *name)
 435{
 436        struct gfs2_sbd *sdp = sb->s_fs_info;
 437        struct dentry *dentry;
 438        struct inode *inode;
 439
 440        inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
 441                                  GFS2_BLKST_FREE /* ignore */);
 442        if (IS_ERR(inode)) {
 443                fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
 444                return PTR_ERR(inode);
 445        }
 446        dentry = d_make_root(inode);
 447        if (!dentry) {
 448                fs_err(sdp, "can't alloc %s dentry\n", name);
 449                return -ENOMEM;
 450        }
 451        *dptr = dentry;
 452        return 0;
 453}
 454
 455static int init_sb(struct gfs2_sbd *sdp, int silent)
 456{
 457        struct super_block *sb = sdp->sd_vfs;
 458        struct gfs2_holder sb_gh;
 459        u64 no_addr;
 460        int ret;
 461
 462        ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
 463                                LM_ST_SHARED, 0, &sb_gh);
 464        if (ret) {
 465                fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
 466                return ret;
 467        }
 468
 469        ret = gfs2_read_sb(sdp, silent);
 470        if (ret) {
 471                fs_err(sdp, "can't read superblock: %d\n", ret);
 472                goto out;
 473        }
 474
 475        /* Set up the buffer cache and SB for real */
 476        if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
 477                ret = -EINVAL;
 478                fs_err(sdp, "FS block size (%u) is too small for device "
 479                       "block size (%u)\n",
 480                       sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
 481                goto out;
 482        }
 483        if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
 484                ret = -EINVAL;
 485                fs_err(sdp, "FS block size (%u) is too big for machine "
 486                       "page size (%u)\n",
 487                       sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
 488                goto out;
 489        }
 490        sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
 491
 492        /* Get the root inode */
 493        no_addr = sdp->sd_sb.sb_root_dir.no_addr;
 494        ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
 495        if (ret)
 496                goto out;
 497
 498        /* Get the master inode */
 499        no_addr = sdp->sd_sb.sb_master_dir.no_addr;
 500        ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
 501        if (ret) {
 502                dput(sdp->sd_root_dir);
 503                goto out;
 504        }
 505        sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
 506out:
 507        gfs2_glock_dq_uninit(&sb_gh);
 508        return ret;
 509}
 510
 511static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
 512{
 513        char *message = "FIRSTMOUNT=Done";
 514        char *envp[] = { message, NULL };
 515
 516        fs_info(sdp, "first mount done, others may mount\n");
 517
 518        if (sdp->sd_lockstruct.ls_ops->lm_first_done)
 519                sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
 520
 521        kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
 522}
 523
 524/**
 525 * gfs2_jindex_hold - Grab a lock on the jindex
 526 * @sdp: The GFS2 superblock
 527 * @ji_gh: the holder for the jindex glock
 528 *
 529 * Returns: errno
 530 */
 531
 532static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
 533{
 534        struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
 535        struct qstr name;
 536        char buf[20];
 537        struct gfs2_jdesc *jd;
 538        int error;
 539
 540        name.name = buf;
 541
 542        mutex_lock(&sdp->sd_jindex_mutex);
 543
 544        for (;;) {
 545                error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
 546                if (error)
 547                        break;
 548
 549                name.len = sprintf(buf, "journal%u", sdp->sd_journals);
 550                name.hash = gfs2_disk_hash(name.name, name.len);
 551
 552                error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
 553                if (error == -ENOENT) {
 554                        error = 0;
 555                        break;
 556                }
 557
 558                gfs2_glock_dq_uninit(ji_gh);
 559
 560                if (error)
 561                        break;
 562
 563                error = -ENOMEM;
 564                jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
 565                if (!jd)
 566                        break;
 567
 568                INIT_LIST_HEAD(&jd->extent_list);
 569                INIT_LIST_HEAD(&jd->jd_revoke_list);
 570
 571                INIT_WORK(&jd->jd_work, gfs2_recover_func);
 572                jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
 573                if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
 574                        if (!jd->jd_inode)
 575                                error = -ENOENT;
 576                        else
 577                                error = PTR_ERR(jd->jd_inode);
 578                        kfree(jd);
 579                        break;
 580                }
 581
 582                spin_lock(&sdp->sd_jindex_spin);
 583                jd->jd_jid = sdp->sd_journals++;
 584                list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
 585                spin_unlock(&sdp->sd_jindex_spin);
 586        }
 587
 588        mutex_unlock(&sdp->sd_jindex_mutex);
 589
 590        return error;
 591}
 592
 593/**
 594 * check_journal_clean - Make sure a journal is clean for a spectator mount
 595 * @sdp: The GFS2 superblock
 596 * @jd: The journal descriptor
 597 *
 598 * Returns: 0 if the journal is clean or locked, else an error
 599 */
 600static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
 601{
 602        int error;
 603        struct gfs2_holder j_gh;
 604        struct gfs2_log_header_host head;
 605        struct gfs2_inode *ip;
 606
 607        ip = GFS2_I(jd->jd_inode);
 608        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
 609                                   GL_EXACT | GL_NOCACHE, &j_gh);
 610        if (error) {
 611                fs_err(sdp, "Error locking journal for spectator mount.\n");
 612                return -EPERM;
 613        }
 614        error = gfs2_jdesc_check(jd);
 615        if (error) {
 616                fs_err(sdp, "Error checking journal for spectator mount.\n");
 617                goto out_unlock;
 618        }
 619        error = gfs2_find_jhead(jd, &head);
 620        if (error) {
 621                fs_err(sdp, "Error parsing journal for spectator mount.\n");
 622                goto out_unlock;
 623        }
 624        if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
 625                error = -EPERM;
 626                fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
 627                       "must not be a spectator.\n", jd->jd_jid);
 628        }
 629
 630out_unlock:
 631        gfs2_glock_dq_uninit(&j_gh);
 632        return error;
 633}
 634
 635static int init_journal(struct gfs2_sbd *sdp, int undo)
 636{
 637        struct inode *master = d_inode(sdp->sd_master_dir);
 638        struct gfs2_holder ji_gh;
 639        struct gfs2_inode *ip;
 640        int jindex = 1;
 641        int error = 0;
 642
 643        if (undo) {
 644                jindex = 0;
 645                goto fail_jinode_gh;
 646        }
 647
 648        sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
 649        if (IS_ERR(sdp->sd_jindex)) {
 650                fs_err(sdp, "can't lookup journal index: %d\n", error);
 651                return PTR_ERR(sdp->sd_jindex);
 652        }
 653
 654        /* Load in the journal index special file */
 655
 656        error = gfs2_jindex_hold(sdp, &ji_gh);
 657        if (error) {
 658                fs_err(sdp, "can't read journal index: %d\n", error);
 659                goto fail;
 660        }
 661
 662        error = -EUSERS;
 663        if (!gfs2_jindex_size(sdp)) {
 664                fs_err(sdp, "no journals!\n");
 665                goto fail_jindex;
 666        }
 667
 668        atomic_set(&sdp->sd_log_blks_needed, 0);
 669        if (sdp->sd_args.ar_spectator) {
 670                sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
 671                atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 672                atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 673                atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 674        } else {
 675                if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
 676                        fs_err(sdp, "can't mount journal #%u\n",
 677                               sdp->sd_lockstruct.ls_jid);
 678                        fs_err(sdp, "there are only %u journals (0 - %u)\n",
 679                               gfs2_jindex_size(sdp),
 680                               gfs2_jindex_size(sdp) - 1);
 681                        goto fail_jindex;
 682                }
 683                sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
 684
 685                error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
 686                                          &gfs2_journal_glops,
 687                                          LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
 688                                          &sdp->sd_journal_gh);
 689                if (error) {
 690                        fs_err(sdp, "can't acquire journal glock: %d\n", error);
 691                        goto fail_jindex;
 692                }
 693
 694                ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 695                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
 696                                           LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
 697                                           &sdp->sd_jinode_gh);
 698                if (error) {
 699                        fs_err(sdp, "can't acquire journal inode glock: %d\n",
 700                               error);
 701                        goto fail_journal_gh;
 702                }
 703
 704                error = gfs2_jdesc_check(sdp->sd_jdesc);
 705                if (error) {
 706                        fs_err(sdp, "my journal (%u) is bad: %d\n",
 707                               sdp->sd_jdesc->jd_jid, error);
 708                        goto fail_jinode_gh;
 709                }
 710                atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
 711                atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
 712                atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
 713
 714                /* Map the extents for this journal's blocks */
 715                gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
 716        }
 717        trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
 718
 719        if (sdp->sd_lockstruct.ls_first) {
 720                unsigned int x;
 721                for (x = 0; x < sdp->sd_journals; x++) {
 722                        struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
 723
 724                        if (sdp->sd_args.ar_spectator) {
 725                                error = check_journal_clean(sdp, jd);
 726                                if (error)
 727                                        goto fail_jinode_gh;
 728                                continue;
 729                        }
 730                        error = gfs2_recover_journal(jd, true);
 731                        if (error) {
 732                                fs_err(sdp, "error recovering journal %u: %d\n",
 733                                       x, error);
 734                                goto fail_jinode_gh;
 735                        }
 736                }
 737
 738                gfs2_others_may_mount(sdp);
 739        } else if (!sdp->sd_args.ar_spectator) {
 740                error = gfs2_recover_journal(sdp->sd_jdesc, true);
 741                if (error) {
 742                        fs_err(sdp, "error recovering my journal: %d\n", error);
 743                        goto fail_jinode_gh;
 744                }
 745        }
 746
 747        sdp->sd_log_idle = 1;
 748        set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
 749        gfs2_glock_dq_uninit(&ji_gh);
 750        jindex = 0;
 751        INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
 752        return 0;
 753
 754fail_jinode_gh:
 755        if (!sdp->sd_args.ar_spectator)
 756                gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
 757fail_journal_gh:
 758        if (!sdp->sd_args.ar_spectator)
 759                gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
 760fail_jindex:
 761        gfs2_jindex_free(sdp);
 762        if (jindex)
 763                gfs2_glock_dq_uninit(&ji_gh);
 764fail:
 765        iput(sdp->sd_jindex);
 766        return error;
 767}
 768
 769static struct lock_class_key gfs2_quota_imutex_key;
 770
 771static int init_inodes(struct gfs2_sbd *sdp, int undo)
 772{
 773        int error = 0;
 774        struct inode *master = d_inode(sdp->sd_master_dir);
 775
 776        if (undo)
 777                goto fail_qinode;
 778
 779        error = init_journal(sdp, undo);
 780        complete_all(&sdp->sd_journal_ready);
 781        if (error)
 782                goto fail;
 783
 784        /* Read in the master statfs inode */
 785        sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
 786        if (IS_ERR(sdp->sd_statfs_inode)) {
 787                error = PTR_ERR(sdp->sd_statfs_inode);
 788                fs_err(sdp, "can't read in statfs inode: %d\n", error);
 789                goto fail_journal;
 790        }
 791
 792        /* Read in the resource index inode */
 793        sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
 794        if (IS_ERR(sdp->sd_rindex)) {
 795                error = PTR_ERR(sdp->sd_rindex);
 796                fs_err(sdp, "can't get resource index inode: %d\n", error);
 797                goto fail_statfs;
 798        }
 799        sdp->sd_rindex_uptodate = 0;
 800
 801        /* Read in the quota inode */
 802        sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
 803        if (IS_ERR(sdp->sd_quota_inode)) {
 804                error = PTR_ERR(sdp->sd_quota_inode);
 805                fs_err(sdp, "can't get quota file inode: %d\n", error);
 806                goto fail_rindex;
 807        }
 808        /*
 809         * i_rwsem on quota files is special. Since this inode is hidden system
 810         * file, we are safe to define locking ourselves.
 811         */
 812        lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
 813                          &gfs2_quota_imutex_key);
 814
 815        error = gfs2_rindex_update(sdp);
 816        if (error)
 817                goto fail_qinode;
 818
 819        return 0;
 820
 821fail_qinode:
 822        iput(sdp->sd_quota_inode);
 823fail_rindex:
 824        gfs2_clear_rgrpd(sdp);
 825        iput(sdp->sd_rindex);
 826fail_statfs:
 827        iput(sdp->sd_statfs_inode);
 828fail_journal:
 829        init_journal(sdp, UNDO);
 830fail:
 831        return error;
 832}
 833
 834static int init_per_node(struct gfs2_sbd *sdp, int undo)
 835{
 836        struct inode *pn = NULL;
 837        char buf[30];
 838        int error = 0;
 839        struct gfs2_inode *ip;
 840        struct inode *master = d_inode(sdp->sd_master_dir);
 841
 842        if (sdp->sd_args.ar_spectator)
 843                return 0;
 844
 845        if (undo)
 846                goto fail_qc_gh;
 847
 848        pn = gfs2_lookup_simple(master, "per_node");
 849        if (IS_ERR(pn)) {
 850                error = PTR_ERR(pn);
 851                fs_err(sdp, "can't find per_node directory: %d\n", error);
 852                return error;
 853        }
 854
 855        sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
 856        sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
 857        if (IS_ERR(sdp->sd_sc_inode)) {
 858                error = PTR_ERR(sdp->sd_sc_inode);
 859                fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
 860                goto fail;
 861        }
 862
 863        sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
 864        sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
 865        if (IS_ERR(sdp->sd_qc_inode)) {
 866                error = PTR_ERR(sdp->sd_qc_inode);
 867                fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
 868                goto fail_ut_i;
 869        }
 870
 871        iput(pn);
 872        pn = NULL;
 873
 874        ip = GFS2_I(sdp->sd_sc_inode);
 875        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 876                                   &sdp->sd_sc_gh);
 877        if (error) {
 878                fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
 879                goto fail_qc_i;
 880        }
 881
 882        ip = GFS2_I(sdp->sd_qc_inode);
 883        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
 884                                   &sdp->sd_qc_gh);
 885        if (error) {
 886                fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
 887                goto fail_ut_gh;
 888        }
 889
 890        return 0;
 891
 892fail_qc_gh:
 893        gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
 894fail_ut_gh:
 895        gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 896fail_qc_i:
 897        iput(sdp->sd_qc_inode);
 898fail_ut_i:
 899        iput(sdp->sd_sc_inode);
 900fail:
 901        iput(pn);
 902        return error;
 903}
 904
 905static const match_table_t nolock_tokens = {
 906        { Opt_jid, "jid=%d\n", },
 907        { Opt_err, NULL },
 908};
 909
 910static const struct lm_lockops nolock_ops = {
 911        .lm_proto_name = "lock_nolock",
 912        .lm_put_lock = gfs2_glock_free,
 913        .lm_tokens = &nolock_tokens,
 914};
 915
 916/**
 917 * gfs2_lm_mount - mount a locking protocol
 918 * @sdp: the filesystem
 919 * @args: mount arguments
 920 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
 921 *
 922 * Returns: errno
 923 */
 924
 925static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
 926{
 927        const struct lm_lockops *lm;
 928        struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 929        struct gfs2_args *args = &sdp->sd_args;
 930        const char *proto = sdp->sd_proto_name;
 931        const char *table = sdp->sd_table_name;
 932        char *o, *options;
 933        int ret;
 934
 935        if (!strcmp("lock_nolock", proto)) {
 936                lm = &nolock_ops;
 937                sdp->sd_args.ar_localflocks = 1;
 938#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 939        } else if (!strcmp("lock_dlm", proto)) {
 940                lm = &gfs2_dlm_ops;
 941#endif
 942        } else {
 943                pr_info("can't find protocol %s\n", proto);
 944                return -ENOENT;
 945        }
 946
 947        fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
 948
 949        ls->ls_ops = lm;
 950        ls->ls_first = 1;
 951
 952        for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
 953                substring_t tmp[MAX_OPT_ARGS];
 954                int token, option;
 955
 956                if (!o || !*o)
 957                        continue;
 958
 959                token = match_token(o, *lm->lm_tokens, tmp);
 960                switch (token) {
 961                case Opt_jid:
 962                        ret = match_int(&tmp[0], &option);
 963                        if (ret || option < 0) 
 964                                goto hostdata_error;
 965                        if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
 966                                ls->ls_jid = option;
 967                        break;
 968                case Opt_id:
 969                case Opt_nodir:
 970                        /* Obsolete, but left for backward compat purposes */
 971                        break;
 972                case Opt_first:
 973                        ret = match_int(&tmp[0], &option);
 974                        if (ret || (option != 0 && option != 1))
 975                                goto hostdata_error;
 976                        ls->ls_first = option;
 977                        break;
 978                case Opt_err:
 979                default:
 980hostdata_error:
 981                        fs_info(sdp, "unknown hostdata (%s)\n", o);
 982                        return -EINVAL;
 983                }
 984        }
 985
 986        if (lm->lm_mount == NULL) {
 987                fs_info(sdp, "Now mounting FS...\n");
 988                complete_all(&sdp->sd_locking_init);
 989                return 0;
 990        }
 991        ret = lm->lm_mount(sdp, table);
 992        if (ret == 0)
 993                fs_info(sdp, "Joined cluster. Now mounting FS...\n");
 994        complete_all(&sdp->sd_locking_init);
 995        return ret;
 996}
 997
 998void gfs2_lm_unmount(struct gfs2_sbd *sdp)
 999{
1000        const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
1001        if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
1002            lm->lm_unmount)
1003                lm->lm_unmount(sdp);
1004}
1005
1006static int wait_on_journal(struct gfs2_sbd *sdp)
1007{
1008        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
1009                return 0;
1010
1011        return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
1012                ? -EINTR : 0;
1013}
1014
1015void gfs2_online_uevent(struct gfs2_sbd *sdp)
1016{
1017        struct super_block *sb = sdp->sd_vfs;
1018        char ro[20];
1019        char spectator[20];
1020        char *envp[] = { ro, spectator, NULL };
1021        sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
1022        sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
1023        kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
1024}
1025
1026/**
1027 * fill_super - Read in superblock
1028 * @sb: The VFS superblock
1029 * @data: Mount options
1030 * @silent: Don't complain if it's not a GFS2 filesystem
1031 *
1032 * Returns: errno
1033 */
1034
1035static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1036{
1037        struct gfs2_sbd *sdp;
1038        struct gfs2_holder mount_gh;
1039        int error;
1040
1041        sdp = init_sbd(sb);
1042        if (!sdp) {
1043                pr_warn("can't alloc struct gfs2_sbd\n");
1044                return -ENOMEM;
1045        }
1046        sdp->sd_args = *args;
1047
1048        if (sdp->sd_args.ar_spectator) {
1049                sb->s_flags |= SB_RDONLY;
1050                set_bit(SDF_RORECOVERY, &sdp->sd_flags);
1051        }
1052        if (sdp->sd_args.ar_posix_acl)
1053                sb->s_flags |= SB_POSIXACL;
1054        if (sdp->sd_args.ar_nobarrier)
1055                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1056
1057        sb->s_flags |= SB_NOSEC;
1058        sb->s_magic = GFS2_MAGIC;
1059        sb->s_op = &gfs2_super_ops;
1060        sb->s_d_op = &gfs2_dops;
1061        sb->s_export_op = &gfs2_export_ops;
1062        sb->s_xattr = gfs2_xattr_handlers;
1063        sb->s_qcop = &gfs2_quotactl_ops;
1064        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
1065        sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1066        sb->s_time_gran = 1;
1067        sb->s_maxbytes = MAX_LFS_FILESIZE;
1068
1069        /* Set up the buffer cache and fill in some fake block size values
1070           to allow us to read-in the on-disk superblock. */
1071        sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
1072        sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
1073        sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
1074                               GFS2_BASIC_BLOCK_SHIFT;
1075        sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
1076
1077        sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
1078        sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1079        if (sdp->sd_args.ar_statfs_quantum) {
1080                sdp->sd_tune.gt_statfs_slow = 0;
1081                sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1082        } else {
1083                sdp->sd_tune.gt_statfs_slow = 1;
1084                sdp->sd_tune.gt_statfs_quantum = 30;
1085        }
1086
1087        error = init_names(sdp, silent);
1088        if (error) {
1089                /* In this case, we haven't initialized sysfs, so we have to
1090                   manually free the sdp. */
1091                free_percpu(sdp->sd_lkstats);
1092                kfree(sdp);
1093                sb->s_fs_info = NULL;
1094                return error;
1095        }
1096
1097        snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
1098
1099        error = gfs2_sys_fs_add(sdp);
1100        /*
1101         * If we hit an error here, gfs2_sys_fs_add will have called function
1102         * kobject_put which causes the sysfs usage count to go to zero, which
1103         * causes sysfs to call function gfs2_sbd_release, which frees sdp.
1104         * Subsequent error paths here will call gfs2_sys_fs_del, which also
1105         * kobject_put to free sdp.
1106         */
1107        if (error)
1108                return error;
1109
1110        gfs2_create_debugfs_file(sdp);
1111
1112        error = gfs2_lm_mount(sdp, silent);
1113        if (error)
1114                goto fail_debug;
1115
1116        error = init_locking(sdp, &mount_gh, DO);
1117        if (error)
1118                goto fail_lm;
1119
1120        error = init_sb(sdp, silent);
1121        if (error)
1122                goto fail_locking;
1123
1124        error = wait_on_journal(sdp);
1125        if (error)
1126                goto fail_sb;
1127
1128        /*
1129         * If user space has failed to join the cluster or some similar
1130         * failure has occurred, then the journal id will contain a
1131         * negative (error) number. This will then be returned to the
1132         * caller (of the mount syscall). We do this even for spectator
1133         * mounts (which just write a jid of 0 to indicate "ok" even though
1134         * the jid is unused in the spectator case)
1135         */
1136        if (sdp->sd_lockstruct.ls_jid < 0) {
1137                error = sdp->sd_lockstruct.ls_jid;
1138                sdp->sd_lockstruct.ls_jid = 0;
1139                goto fail_sb;
1140        }
1141
1142        if (sdp->sd_args.ar_spectator)
1143                snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
1144                         sdp->sd_table_name);
1145        else
1146                snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
1147                         sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
1148
1149        error = init_inodes(sdp, DO);
1150        if (error)
1151                goto fail_sb;
1152
1153        error = init_per_node(sdp, DO);
1154        if (error)
1155                goto fail_inodes;
1156
1157        error = gfs2_statfs_init(sdp);
1158        if (error) {
1159                fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
1160                goto fail_per_node;
1161        }
1162
1163        if (!sb_rdonly(sb)) {
1164                error = gfs2_make_fs_rw(sdp);
1165                if (error) {
1166                        fs_err(sdp, "can't make FS RW: %d\n", error);
1167                        goto fail_per_node;
1168                }
1169        }
1170
1171        gfs2_glock_dq_uninit(&mount_gh);
1172        gfs2_online_uevent(sdp);
1173        return 0;
1174
1175fail_per_node:
1176        init_per_node(sdp, UNDO);
1177fail_inodes:
1178        init_inodes(sdp, UNDO);
1179fail_sb:
1180        if (sdp->sd_root_dir)
1181                dput(sdp->sd_root_dir);
1182        if (sdp->sd_master_dir)
1183                dput(sdp->sd_master_dir);
1184        if (sb->s_root)
1185                dput(sb->s_root);
1186        sb->s_root = NULL;
1187fail_locking:
1188        init_locking(sdp, &mount_gh, UNDO);
1189fail_lm:
1190        complete_all(&sdp->sd_journal_ready);
1191        gfs2_gl_hash_clear(sdp);
1192        gfs2_lm_unmount(sdp);
1193fail_debug:
1194        gfs2_delete_debugfs_file(sdp);
1195        free_percpu(sdp->sd_lkstats);
1196        /* gfs2_sys_fs_del must be the last thing we do, since it causes
1197         * sysfs to call function gfs2_sbd_release, which frees sdp. */
1198        gfs2_sys_fs_del(sdp);
1199        sb->s_fs_info = NULL;
1200        return error;
1201}
1202
1203static int set_gfs2_super(struct super_block *s, void *data)
1204{
1205        s->s_bdev = data;
1206        s->s_dev = s->s_bdev->bd_dev;
1207        s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1208        return 0;
1209}
1210
1211static int test_gfs2_super(struct super_block *s, void *ptr)
1212{
1213        struct block_device *bdev = ptr;
1214        return (bdev == s->s_bdev);
1215}
1216
1217/**
1218 * gfs2_mount - Get the GFS2 superblock
1219 * @fs_type: The GFS2 filesystem type
1220 * @flags: Mount flags
1221 * @dev_name: The name of the device
1222 * @data: The mount arguments
1223 *
1224 * Q. Why not use get_sb_bdev() ?
1225 * A. We need to select one of two root directories to mount, independent
1226 *    of whether this is the initial, or subsequent, mount of this sb
1227 *
1228 * Returns: 0 or -ve on error
1229 */
1230
1231static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
1232                       const char *dev_name, void *data)
1233{
1234        struct block_device *bdev;
1235        struct super_block *s;
1236        fmode_t mode = FMODE_READ | FMODE_EXCL;
1237        int error;
1238        struct gfs2_args args;
1239        struct gfs2_sbd *sdp;
1240
1241        if (!(flags & SB_RDONLY))
1242                mode |= FMODE_WRITE;
1243
1244        bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1245        if (IS_ERR(bdev))
1246                return ERR_CAST(bdev);
1247
1248        /*
1249         * once the super is inserted into the list by sget, s_umount
1250         * will protect the lockfs code from trying to start a snapshot
1251         * while we are mounting
1252         */
1253        mutex_lock(&bdev->bd_fsfreeze_mutex);
1254        if (bdev->bd_fsfreeze_count > 0) {
1255                mutex_unlock(&bdev->bd_fsfreeze_mutex);
1256                error = -EBUSY;
1257                goto error_bdev;
1258        }
1259        s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
1260        mutex_unlock(&bdev->bd_fsfreeze_mutex);
1261        error = PTR_ERR(s);
1262        if (IS_ERR(s))
1263                goto error_bdev;
1264
1265        if (s->s_root) {
1266                /*
1267                 * s_umount nests inside bd_mutex during
1268                 * __invalidate_device().  blkdev_put() acquires
1269                 * bd_mutex and can't be called under s_umount.  Drop
1270                 * s_umount temporarily.  This is safe as we're
1271                 * holding an active reference.
1272                 */
1273                up_write(&s->s_umount);
1274                blkdev_put(bdev, mode);
1275                down_write(&s->s_umount);
1276        } else {
1277                /* s_mode must be set before deactivate_locked_super calls */
1278                s->s_mode = mode;
1279        }
1280
1281        memset(&args, 0, sizeof(args));
1282        args.ar_quota = GFS2_QUOTA_DEFAULT;
1283        args.ar_data = GFS2_DATA_DEFAULT;
1284        args.ar_commit = 30;
1285        args.ar_statfs_quantum = 30;
1286        args.ar_quota_quantum = 60;
1287        args.ar_errors = GFS2_ERRORS_DEFAULT;
1288
1289        error = gfs2_mount_args(&args, data);
1290        if (error) {
1291                pr_warn("can't parse mount arguments\n");
1292                goto error_super;
1293        }
1294
1295        if (s->s_root) {
1296                error = -EBUSY;
1297                if ((flags ^ s->s_flags) & SB_RDONLY)
1298                        goto error_super;
1299        } else {
1300                snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1301                sb_set_blocksize(s, block_size(bdev));
1302                error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
1303                if (error)
1304                        goto error_super;
1305                s->s_flags |= SB_ACTIVE;
1306                bdev->bd_super = s;
1307        }
1308
1309        sdp = s->s_fs_info;
1310        if (args.ar_meta)
1311                return dget(sdp->sd_master_dir);
1312        else
1313                return dget(sdp->sd_root_dir);
1314
1315error_super:
1316        deactivate_locked_super(s);
1317        return ERR_PTR(error);
1318error_bdev:
1319        blkdev_put(bdev, mode);
1320        return ERR_PTR(error);
1321}
1322
1323static int set_meta_super(struct super_block *s, void *ptr)
1324{
1325        return -EINVAL;
1326}
1327
1328static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
1329                        int flags, const char *dev_name, void *data)
1330{
1331        struct super_block *s;
1332        struct gfs2_sbd *sdp;
1333        struct path path;
1334        int error;
1335
1336        error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
1337        if (error) {
1338                pr_warn("path_lookup on %s returned error %d\n",
1339                        dev_name, error);
1340                return ERR_PTR(error);
1341        }
1342        s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
1343                 path.dentry->d_sb->s_bdev);
1344        path_put(&path);
1345        if (IS_ERR(s)) {
1346                pr_warn("gfs2 mount does not exist\n");
1347                return ERR_CAST(s);
1348        }
1349        if ((flags ^ s->s_flags) & SB_RDONLY) {
1350                deactivate_locked_super(s);
1351                return ERR_PTR(-EBUSY);
1352        }
1353        sdp = s->s_fs_info;
1354        return dget(sdp->sd_master_dir);
1355}
1356
1357static void gfs2_kill_sb(struct super_block *sb)
1358{
1359        struct gfs2_sbd *sdp = sb->s_fs_info;
1360
1361        if (sdp == NULL) {
1362                kill_block_super(sb);
1363                return;
1364        }
1365
1366        gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
1367        dput(sdp->sd_root_dir);
1368        dput(sdp->sd_master_dir);
1369        sdp->sd_root_dir = NULL;
1370        sdp->sd_master_dir = NULL;
1371        shrink_dcache_sb(sb);
1372        free_percpu(sdp->sd_lkstats);
1373        kill_block_super(sb);
1374}
1375
1376struct file_system_type gfs2_fs_type = {
1377        .name = "gfs2",
1378        .fs_flags = FS_REQUIRES_DEV,
1379        .mount = gfs2_mount,
1380        .kill_sb = gfs2_kill_sb,
1381        .owner = THIS_MODULE,
1382};
1383MODULE_ALIAS_FS("gfs2");
1384
1385struct file_system_type gfs2meta_fs_type = {
1386        .name = "gfs2meta",
1387        .fs_flags = FS_REQUIRES_DEV,
1388        .mount = gfs2_mount_meta,
1389        .owner = THIS_MODULE,
1390};
1391MODULE_ALIAS_FS("gfs2meta");
1392