linux/fs/gfs2/super.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/bio.h>
  10#include <linux/sched/signal.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/completion.h>
  14#include <linux/buffer_head.h>
  15#include <linux/statfs.h>
  16#include <linux/seq_file.h>
  17#include <linux/mount.h>
  18#include <linux/kthread.h>
  19#include <linux/delay.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/crc32.h>
  22#include <linux/time.h>
  23#include <linux/wait.h>
  24#include <linux/writeback.h>
  25#include <linux/backing-dev.h>
  26#include <linux/kernel.h>
  27
  28#include "gfs2.h"
  29#include "incore.h"
  30#include "bmap.h"
  31#include "dir.h"
  32#include "glock.h"
  33#include "glops.h"
  34#include "inode.h"
  35#include "log.h"
  36#include "meta_io.h"
  37#include "quota.h"
  38#include "recovery.h"
  39#include "rgrp.h"
  40#include "super.h"
  41#include "trans.h"
  42#include "util.h"
  43#include "sys.h"
  44#include "xattr.h"
  45#include "lops.h"
  46
  47#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
  48
  49enum {
  50        Opt_lockproto,
  51        Opt_locktable,
  52        Opt_hostdata,
  53        Opt_spectator,
  54        Opt_ignore_local_fs,
  55        Opt_localflocks,
  56        Opt_localcaching,
  57        Opt_debug,
  58        Opt_nodebug,
  59        Opt_upgrade,
  60        Opt_acl,
  61        Opt_noacl,
  62        Opt_quota_off,
  63        Opt_quota_account,
  64        Opt_quota_on,
  65        Opt_quota,
  66        Opt_noquota,
  67        Opt_suiddir,
  68        Opt_nosuiddir,
  69        Opt_data_writeback,
  70        Opt_data_ordered,
  71        Opt_meta,
  72        Opt_discard,
  73        Opt_nodiscard,
  74        Opt_commit,
  75        Opt_err_withdraw,
  76        Opt_err_panic,
  77        Opt_statfs_quantum,
  78        Opt_statfs_percent,
  79        Opt_quota_quantum,
  80        Opt_barrier,
  81        Opt_nobarrier,
  82        Opt_rgrplvb,
  83        Opt_norgrplvb,
  84        Opt_loccookie,
  85        Opt_noloccookie,
  86        Opt_error,
  87};
  88
  89static const match_table_t tokens = {
  90        {Opt_lockproto, "lockproto=%s"},
  91        {Opt_locktable, "locktable=%s"},
  92        {Opt_hostdata, "hostdata=%s"},
  93        {Opt_spectator, "spectator"},
  94        {Opt_spectator, "norecovery"},
  95        {Opt_ignore_local_fs, "ignore_local_fs"},
  96        {Opt_localflocks, "localflocks"},
  97        {Opt_localcaching, "localcaching"},
  98        {Opt_debug, "debug"},
  99        {Opt_nodebug, "nodebug"},
 100        {Opt_upgrade, "upgrade"},
 101        {Opt_acl, "acl"},
 102        {Opt_noacl, "noacl"},
 103        {Opt_quota_off, "quota=off"},
 104        {Opt_quota_account, "quota=account"},
 105        {Opt_quota_on, "quota=on"},
 106        {Opt_quota, "quota"},
 107        {Opt_noquota, "noquota"},
 108        {Opt_suiddir, "suiddir"},
 109        {Opt_nosuiddir, "nosuiddir"},
 110        {Opt_data_writeback, "data=writeback"},
 111        {Opt_data_ordered, "data=ordered"},
 112        {Opt_meta, "meta"},
 113        {Opt_discard, "discard"},
 114        {Opt_nodiscard, "nodiscard"},
 115        {Opt_commit, "commit=%d"},
 116        {Opt_err_withdraw, "errors=withdraw"},
 117        {Opt_err_panic, "errors=panic"},
 118        {Opt_statfs_quantum, "statfs_quantum=%d"},
 119        {Opt_statfs_percent, "statfs_percent=%d"},
 120        {Opt_quota_quantum, "quota_quantum=%d"},
 121        {Opt_barrier, "barrier"},
 122        {Opt_nobarrier, "nobarrier"},
 123        {Opt_rgrplvb, "rgrplvb"},
 124        {Opt_norgrplvb, "norgrplvb"},
 125        {Opt_loccookie, "loccookie"},
 126        {Opt_noloccookie, "noloccookie"},
 127        {Opt_error, NULL}
 128};
 129
 130/**
 131 * gfs2_mount_args - Parse mount options
 132 * @args: The structure into which the parsed options will be written
 133 * @options: The options to parse
 134 *
 135 * Return: errno
 136 */
 137
 138int gfs2_mount_args(struct gfs2_args *args, char *options)
 139{
 140        char *o;
 141        int token;
 142        substring_t tmp[MAX_OPT_ARGS];
 143        int rv;
 144
 145        /* Split the options into tokens with the "," character and
 146           process them */
 147
 148        while (1) {
 149                o = strsep(&options, ",");
 150                if (o == NULL)
 151                        break;
 152                if (*o == '\0')
 153                        continue;
 154
 155                token = match_token(o, tokens, tmp);
 156                switch (token) {
 157                case Opt_lockproto:
 158                        match_strlcpy(args->ar_lockproto, &tmp[0],
 159                                      GFS2_LOCKNAME_LEN);
 160                        break;
 161                case Opt_locktable:
 162                        match_strlcpy(args->ar_locktable, &tmp[0],
 163                                      GFS2_LOCKNAME_LEN);
 164                        break;
 165                case Opt_hostdata:
 166                        match_strlcpy(args->ar_hostdata, &tmp[0],
 167                                      GFS2_LOCKNAME_LEN);
 168                        break;
 169                case Opt_spectator:
 170                        args->ar_spectator = 1;
 171                        break;
 172                case Opt_ignore_local_fs:
 173                        /* Retained for backwards compat only */
 174                        break;
 175                case Opt_localflocks:
 176                        args->ar_localflocks = 1;
 177                        break;
 178                case Opt_localcaching:
 179                        /* Retained for backwards compat only */
 180                        break;
 181                case Opt_debug:
 182                        if (args->ar_errors == GFS2_ERRORS_PANIC) {
 183                                pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
 184                                return -EINVAL;
 185                        }
 186                        args->ar_debug = 1;
 187                        break;
 188                case Opt_nodebug:
 189                        args->ar_debug = 0;
 190                        break;
 191                case Opt_upgrade:
 192                        /* Retained for backwards compat only */
 193                        break;
 194                case Opt_acl:
 195                        args->ar_posix_acl = 1;
 196                        break;
 197                case Opt_noacl:
 198                        args->ar_posix_acl = 0;
 199                        break;
 200                case Opt_quota_off:
 201                case Opt_noquota:
 202                        args->ar_quota = GFS2_QUOTA_OFF;
 203                        break;
 204                case Opt_quota_account:
 205                        args->ar_quota = GFS2_QUOTA_ACCOUNT;
 206                        break;
 207                case Opt_quota_on:
 208                case Opt_quota:
 209                        args->ar_quota = GFS2_QUOTA_ON;
 210                        break;
 211                case Opt_suiddir:
 212                        args->ar_suiddir = 1;
 213                        break;
 214                case Opt_nosuiddir:
 215                        args->ar_suiddir = 0;
 216                        break;
 217                case Opt_data_writeback:
 218                        args->ar_data = GFS2_DATA_WRITEBACK;
 219                        break;
 220                case Opt_data_ordered:
 221                        args->ar_data = GFS2_DATA_ORDERED;
 222                        break;
 223                case Opt_meta:
 224                        args->ar_meta = 1;
 225                        break;
 226                case Opt_discard:
 227                        args->ar_discard = 1;
 228                        break;
 229                case Opt_nodiscard:
 230                        args->ar_discard = 0;
 231                        break;
 232                case Opt_commit:
 233                        rv = match_int(&tmp[0], &args->ar_commit);
 234                        if (rv || args->ar_commit <= 0) {
 235                                pr_warn("commit mount option requires a positive numeric argument\n");
 236                                return rv ? rv : -EINVAL;
 237                        }
 238                        break;
 239                case Opt_statfs_quantum:
 240                        rv = match_int(&tmp[0], &args->ar_statfs_quantum);
 241                        if (rv || args->ar_statfs_quantum < 0) {
 242                                pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
 243                                return rv ? rv : -EINVAL;
 244                        }
 245                        break;
 246                case Opt_quota_quantum:
 247                        rv = match_int(&tmp[0], &args->ar_quota_quantum);
 248                        if (rv || args->ar_quota_quantum <= 0) {
 249                                pr_warn("quota_quantum mount option requires a positive numeric argument\n");
 250                                return rv ? rv : -EINVAL;
 251                        }
 252                        break;
 253                case Opt_statfs_percent:
 254                        rv = match_int(&tmp[0], &args->ar_statfs_percent);
 255                        if (rv || args->ar_statfs_percent < 0 ||
 256                            args->ar_statfs_percent > 100) {
 257                                pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
 258                                return rv ? rv : -EINVAL;
 259                        }
 260                        break;
 261                case Opt_err_withdraw:
 262                        args->ar_errors = GFS2_ERRORS_WITHDRAW;
 263                        break;
 264                case Opt_err_panic:
 265                        if (args->ar_debug) {
 266                                pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
 267                                return -EINVAL;
 268                        }
 269                        args->ar_errors = GFS2_ERRORS_PANIC;
 270                        break;
 271                case Opt_barrier:
 272                        args->ar_nobarrier = 0;
 273                        break;
 274                case Opt_nobarrier:
 275                        args->ar_nobarrier = 1;
 276                        break;
 277                case Opt_rgrplvb:
 278                        args->ar_rgrplvb = 1;
 279                        break;
 280                case Opt_norgrplvb:
 281                        args->ar_rgrplvb = 0;
 282                        break;
 283                case Opt_loccookie:
 284                        args->ar_loccookie = 1;
 285                        break;
 286                case Opt_noloccookie:
 287                        args->ar_loccookie = 0;
 288                        break;
 289                case Opt_error:
 290                default:
 291                        pr_warn("invalid mount option: %s\n", o);
 292                        return -EINVAL;
 293                }
 294        }
 295
 296        return 0;
 297}
 298
 299/**
 300 * gfs2_jindex_free - Clear all the journal index information
 301 * @sdp: The GFS2 superblock
 302 *
 303 */
 304
 305void gfs2_jindex_free(struct gfs2_sbd *sdp)
 306{
 307        struct list_head list;
 308        struct gfs2_jdesc *jd;
 309
 310        spin_lock(&sdp->sd_jindex_spin);
 311        list_add(&list, &sdp->sd_jindex_list);
 312        list_del_init(&sdp->sd_jindex_list);
 313        sdp->sd_journals = 0;
 314        spin_unlock(&sdp->sd_jindex_spin);
 315
 316        while (!list_empty(&list)) {
 317                jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
 318                gfs2_free_journal_extents(jd);
 319                list_del(&jd->jd_list);
 320                iput(jd->jd_inode);
 321                kfree(jd);
 322        }
 323}
 324
 325static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
 326{
 327        struct gfs2_jdesc *jd;
 328        int found = 0;
 329
 330        list_for_each_entry(jd, head, jd_list) {
 331                if (jd->jd_jid == jid) {
 332                        found = 1;
 333                        break;
 334                }
 335        }
 336
 337        if (!found)
 338                jd = NULL;
 339
 340        return jd;
 341}
 342
 343struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
 344{
 345        struct gfs2_jdesc *jd;
 346
 347        spin_lock(&sdp->sd_jindex_spin);
 348        jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
 349        spin_unlock(&sdp->sd_jindex_spin);
 350
 351        return jd;
 352}
 353
 354int gfs2_jdesc_check(struct gfs2_jdesc *jd)
 355{
 356        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
 357        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 358        u64 size = i_size_read(jd->jd_inode);
 359
 360        if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
 361                return -EIO;
 362
 363        jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
 364
 365        if (gfs2_write_alloc_required(ip, 0, size)) {
 366                gfs2_consist_inode(ip);
 367                return -EIO;
 368        }
 369
 370        return 0;
 371}
 372
 373static int init_threads(struct gfs2_sbd *sdp)
 374{
 375        struct task_struct *p;
 376        int error = 0;
 377
 378        p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
 379        if (IS_ERR(p)) {
 380                error = PTR_ERR(p);
 381                fs_err(sdp, "can't start logd thread: %d\n", error);
 382                return error;
 383        }
 384        sdp->sd_logd_process = p;
 385
 386        p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
 387        if (IS_ERR(p)) {
 388                error = PTR_ERR(p);
 389                fs_err(sdp, "can't start quotad thread: %d\n", error);
 390                goto fail;
 391        }
 392        sdp->sd_quotad_process = p;
 393        return 0;
 394
 395fail:
 396        kthread_stop(sdp->sd_logd_process);
 397        sdp->sd_logd_process = NULL;
 398        return error;
 399}
 400
 401/**
 402 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
 403 * @sdp: the filesystem
 404 *
 405 * Returns: errno
 406 */
 407
 408int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
 409{
 410        struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
 411        struct gfs2_glock *j_gl = ip->i_gl;
 412        struct gfs2_holder freeze_gh;
 413        struct gfs2_log_header_host head;
 414        int error;
 415
 416        error = init_threads(sdp);
 417        if (error)
 418                return error;
 419
 420        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
 421                                   &freeze_gh);
 422        if (error)
 423                goto fail_threads;
 424
 425        j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 426
 427        error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
 428        if (error)
 429                goto fail;
 430
 431        if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
 432                gfs2_consist(sdp);
 433                error = -EIO;
 434                goto fail;
 435        }
 436
 437        /*  Initialize some head of the log stuff  */
 438        sdp->sd_log_sequence = head.lh_sequence + 1;
 439        gfs2_log_pointers_init(sdp, head.lh_blkno);
 440
 441        error = gfs2_quota_init(sdp);
 442        if (error)
 443                goto fail;
 444
 445        set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
 446
 447        gfs2_glock_dq_uninit(&freeze_gh);
 448
 449        return 0;
 450
 451fail:
 452        freeze_gh.gh_flags |= GL_NOCACHE;
 453        gfs2_glock_dq_uninit(&freeze_gh);
 454fail_threads:
 455        if (sdp->sd_quotad_process)
 456                kthread_stop(sdp->sd_quotad_process);
 457        sdp->sd_quotad_process = NULL;
 458        if (sdp->sd_logd_process)
 459                kthread_stop(sdp->sd_logd_process);
 460        sdp->sd_logd_process = NULL;
 461        return error;
 462}
 463
 464void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
 465{
 466        const struct gfs2_statfs_change *str = buf;
 467
 468        sc->sc_total = be64_to_cpu(str->sc_total);
 469        sc->sc_free = be64_to_cpu(str->sc_free);
 470        sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
 471}
 472
 473static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
 474{
 475        struct gfs2_statfs_change *str = buf;
 476
 477        str->sc_total = cpu_to_be64(sc->sc_total);
 478        str->sc_free = cpu_to_be64(sc->sc_free);
 479        str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
 480}
 481
 482int gfs2_statfs_init(struct gfs2_sbd *sdp)
 483{
 484        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 485        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 486        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
 487        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 488        struct buffer_head *m_bh, *l_bh;
 489        struct gfs2_holder gh;
 490        int error;
 491
 492        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
 493                                   &gh);
 494        if (error)
 495                return error;
 496
 497        error = gfs2_meta_inode_buffer(m_ip, &m_bh);
 498        if (error)
 499                goto out;
 500
 501        if (sdp->sd_args.ar_spectator) {
 502                spin_lock(&sdp->sd_statfs_spin);
 503                gfs2_statfs_change_in(m_sc, m_bh->b_data +
 504                                      sizeof(struct gfs2_dinode));
 505                spin_unlock(&sdp->sd_statfs_spin);
 506        } else {
 507                error = gfs2_meta_inode_buffer(l_ip, &l_bh);
 508                if (error)
 509                        goto out_m_bh;
 510
 511                spin_lock(&sdp->sd_statfs_spin);
 512                gfs2_statfs_change_in(m_sc, m_bh->b_data +
 513                                      sizeof(struct gfs2_dinode));
 514                gfs2_statfs_change_in(l_sc, l_bh->b_data +
 515                                      sizeof(struct gfs2_dinode));
 516                spin_unlock(&sdp->sd_statfs_spin);
 517
 518                brelse(l_bh);
 519        }
 520
 521out_m_bh:
 522        brelse(m_bh);
 523out:
 524        gfs2_glock_dq_uninit(&gh);
 525        return 0;
 526}
 527
 528void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
 529                        s64 dinodes)
 530{
 531        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
 532        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 533        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 534        struct buffer_head *l_bh;
 535        s64 x, y;
 536        int need_sync = 0;
 537        int error;
 538
 539        error = gfs2_meta_inode_buffer(l_ip, &l_bh);
 540        if (error)
 541                return;
 542
 543        gfs2_trans_add_meta(l_ip->i_gl, l_bh);
 544
 545        spin_lock(&sdp->sd_statfs_spin);
 546        l_sc->sc_total += total;
 547        l_sc->sc_free += free;
 548        l_sc->sc_dinodes += dinodes;
 549        gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
 550        if (sdp->sd_args.ar_statfs_percent) {
 551                x = 100 * l_sc->sc_free;
 552                y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
 553                if (x >= y || x <= -y)
 554                        need_sync = 1;
 555        }
 556        spin_unlock(&sdp->sd_statfs_spin);
 557
 558        brelse(l_bh);
 559        if (need_sync)
 560                gfs2_wake_up_statfs(sdp);
 561}
 562
 563void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
 564                   struct buffer_head *l_bh)
 565{
 566        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 567        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
 568        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 569        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 570
 571        gfs2_trans_add_meta(l_ip->i_gl, l_bh);
 572        gfs2_trans_add_meta(m_ip->i_gl, m_bh);
 573
 574        spin_lock(&sdp->sd_statfs_spin);
 575        m_sc->sc_total += l_sc->sc_total;
 576        m_sc->sc_free += l_sc->sc_free;
 577        m_sc->sc_dinodes += l_sc->sc_dinodes;
 578        memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
 579        memset(l_bh->b_data + sizeof(struct gfs2_dinode),
 580               0, sizeof(struct gfs2_statfs_change));
 581        gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
 582        spin_unlock(&sdp->sd_statfs_spin);
 583}
 584
 585int gfs2_statfs_sync(struct super_block *sb, int type)
 586{
 587        struct gfs2_sbd *sdp = sb->s_fs_info;
 588        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 589        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
 590        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
 591        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 592        struct gfs2_holder gh;
 593        struct buffer_head *m_bh, *l_bh;
 594        int error;
 595
 596        sb_start_write(sb);
 597        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
 598                                   &gh);
 599        if (error)
 600                goto out;
 601
 602        error = gfs2_meta_inode_buffer(m_ip, &m_bh);
 603        if (error)
 604                goto out_unlock;
 605
 606        spin_lock(&sdp->sd_statfs_spin);
 607        gfs2_statfs_change_in(m_sc, m_bh->b_data +
 608                              sizeof(struct gfs2_dinode));
 609        if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
 610                spin_unlock(&sdp->sd_statfs_spin);
 611                goto out_bh;
 612        }
 613        spin_unlock(&sdp->sd_statfs_spin);
 614
 615        error = gfs2_meta_inode_buffer(l_ip, &l_bh);
 616        if (error)
 617                goto out_bh;
 618
 619        error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
 620        if (error)
 621                goto out_bh2;
 622
 623        update_statfs(sdp, m_bh, l_bh);
 624        sdp->sd_statfs_force_sync = 0;
 625
 626        gfs2_trans_end(sdp);
 627
 628out_bh2:
 629        brelse(l_bh);
 630out_bh:
 631        brelse(m_bh);
 632out_unlock:
 633        gfs2_glock_dq_uninit(&gh);
 634out:
 635        sb_end_write(sb);
 636        return error;
 637}
 638
 639struct lfcc {
 640        struct list_head list;
 641        struct gfs2_holder gh;
 642};
 643
 644/**
 645 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
 646 *                            journals are clean
 647 * @sdp: the file system
 648 * @state: the state to put the transaction lock into
 649 * @t_gh: the hold on the transaction lock
 650 *
 651 * Returns: errno
 652 */
 653
 654static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
 655                                    struct gfs2_holder *freeze_gh)
 656{
 657        struct gfs2_inode *ip;
 658        struct gfs2_jdesc *jd;
 659        struct lfcc *lfcc;
 660        LIST_HEAD(list);
 661        struct gfs2_log_header_host lh;
 662        int error;
 663
 664        list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
 665                lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
 666                if (!lfcc) {
 667                        error = -ENOMEM;
 668                        goto out;
 669                }
 670                ip = GFS2_I(jd->jd_inode);
 671                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
 672                if (error) {
 673                        kfree(lfcc);
 674                        goto out;
 675                }
 676                list_add(&lfcc->list, &list);
 677        }
 678
 679        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
 680                                   GL_NOCACHE, freeze_gh);
 681
 682        list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
 683                error = gfs2_jdesc_check(jd);
 684                if (error)
 685                        break;
 686                error = gfs2_find_jhead(jd, &lh, false);
 687                if (error)
 688                        break;
 689                if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
 690                        error = -EBUSY;
 691                        break;
 692                }
 693        }
 694
 695        if (error)
 696                gfs2_glock_dq_uninit(freeze_gh);
 697
 698out:
 699        while (!list_empty(&list)) {
 700                lfcc = list_entry(list.next, struct lfcc, list);
 701                list_del(&lfcc->list);
 702                gfs2_glock_dq_uninit(&lfcc->gh);
 703                kfree(lfcc);
 704        }
 705        return error;
 706}
 707
 708void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
 709{
 710        struct gfs2_dinode *str = buf;
 711
 712        str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 713        str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
 714        str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
 715        str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
 716        str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
 717        str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
 718        str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
 719        str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
 720        str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
 721        str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
 722        str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 723        str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
 724        str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
 725        str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
 726
 727        str->di_goal_meta = cpu_to_be64(ip->i_goal);
 728        str->di_goal_data = cpu_to_be64(ip->i_goal);
 729        str->di_generation = cpu_to_be64(ip->i_generation);
 730
 731        str->di_flags = cpu_to_be32(ip->i_diskflags);
 732        str->di_height = cpu_to_be16(ip->i_height);
 733        str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
 734                                             !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
 735                                             GFS2_FORMAT_DE : 0);
 736        str->di_depth = cpu_to_be16(ip->i_depth);
 737        str->di_entries = cpu_to_be32(ip->i_entries);
 738
 739        str->di_eattr = cpu_to_be64(ip->i_eattr);
 740        str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
 741        str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
 742        str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
 743}
 744
 745/**
 746 * gfs2_write_inode - Make sure the inode is stable on the disk
 747 * @inode: The inode
 748 * @wbc: The writeback control structure
 749 *
 750 * Returns: errno
 751 */
 752
 753static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
 754{
 755        struct gfs2_inode *ip = GFS2_I(inode);
 756        struct gfs2_sbd *sdp = GFS2_SB(inode);
 757        struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
 758        struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
 759        int ret = 0;
 760        bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
 761
 762        if (flush_all)
 763                gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
 764                               GFS2_LOG_HEAD_FLUSH_NORMAL |
 765                               GFS2_LFC_WRITE_INODE);
 766        if (bdi->wb.dirty_exceeded)
 767                gfs2_ail1_flush(sdp, wbc);
 768        else
 769                filemap_fdatawrite(metamapping);
 770        if (flush_all)
 771                ret = filemap_fdatawait(metamapping);
 772        if (ret)
 773                mark_inode_dirty_sync(inode);
 774        else {
 775                spin_lock(&inode->i_lock);
 776                if (!(inode->i_flags & I_DIRTY))
 777                        gfs2_ordered_del_inode(ip);
 778                spin_unlock(&inode->i_lock);
 779        }
 780        return ret;
 781}
 782
 783/**
 784 * gfs2_dirty_inode - check for atime updates
 785 * @inode: The inode in question
 786 * @flags: The type of dirty
 787 *
 788 * Unfortunately it can be called under any combination of inode
 789 * glock and transaction lock, so we have to check carefully.
 790 *
 791 * At the moment this deals only with atime - it should be possible
 792 * to expand that role in future, once a review of the locking has
 793 * been carried out.
 794 */
 795
 796static void gfs2_dirty_inode(struct inode *inode, int flags)
 797{
 798        struct gfs2_inode *ip = GFS2_I(inode);
 799        struct gfs2_sbd *sdp = GFS2_SB(inode);
 800        struct buffer_head *bh;
 801        struct gfs2_holder gh;
 802        int need_unlock = 0;
 803        int need_endtrans = 0;
 804        int ret;
 805
 806        if (!(flags & I_DIRTY_INODE))
 807                return;
 808        if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
 809                return;
 810        if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
 811                ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 812                if (ret) {
 813                        fs_err(sdp, "dirty_inode: glock %d\n", ret);
 814                        return;
 815                }
 816                need_unlock = 1;
 817        } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
 818                return;
 819
 820        if (current->journal_info == NULL) {
 821                ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
 822                if (ret) {
 823                        fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
 824                        goto out;
 825                }
 826                need_endtrans = 1;
 827        }
 828
 829        ret = gfs2_meta_inode_buffer(ip, &bh);
 830        if (ret == 0) {
 831                gfs2_trans_add_meta(ip->i_gl, bh);
 832                gfs2_dinode_out(ip, bh->b_data);
 833                brelse(bh);
 834        }
 835
 836        if (need_endtrans)
 837                gfs2_trans_end(sdp);
 838out:
 839        if (need_unlock)
 840                gfs2_glock_dq_uninit(&gh);
 841}
 842
 843/**
 844 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
 845 * @sdp: the filesystem
 846 *
 847 * Returns: errno
 848 */
 849
 850static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
 851{
 852        struct gfs2_holder freeze_gh;
 853        int error;
 854
 855        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
 856                                   &freeze_gh);
 857        if (error && !test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
 858                return error;
 859
 860        flush_workqueue(gfs2_delete_workqueue);
 861        if (sdp->sd_quotad_process)
 862                kthread_stop(sdp->sd_quotad_process);
 863        sdp->sd_quotad_process = NULL;
 864        if (sdp->sd_logd_process)
 865                kthread_stop(sdp->sd_logd_process);
 866        sdp->sd_logd_process = NULL;
 867
 868        gfs2_quota_sync(sdp->sd_vfs, 0);
 869        gfs2_statfs_sync(sdp->sd_vfs, 0);
 870
 871        gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
 872                       GFS2_LFC_MAKE_FS_RO);
 873        wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
 874        gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
 875
 876        if (gfs2_holder_initialized(&freeze_gh))
 877                gfs2_glock_dq_uninit(&freeze_gh);
 878
 879        gfs2_quota_cleanup(sdp);
 880
 881        return error;
 882}
 883
 884/**
 885 * gfs2_put_super - Unmount the filesystem
 886 * @sb: The VFS superblock
 887 *
 888 */
 889
 890static void gfs2_put_super(struct super_block *sb)
 891{
 892        struct gfs2_sbd *sdp = sb->s_fs_info;
 893        int error;
 894        struct gfs2_jdesc *jd;
 895
 896        /* No more recovery requests */
 897        set_bit(SDF_NORECOVERY, &sdp->sd_flags);
 898        smp_mb();
 899
 900        /* Wait on outstanding recovery */
 901restart:
 902        spin_lock(&sdp->sd_jindex_spin);
 903        list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
 904                if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
 905                        continue;
 906                spin_unlock(&sdp->sd_jindex_spin);
 907                wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
 908                            TASK_UNINTERRUPTIBLE);
 909                goto restart;
 910        }
 911        spin_unlock(&sdp->sd_jindex_spin);
 912
 913        if (!sb_rdonly(sb)) {
 914                error = gfs2_make_fs_ro(sdp);
 915                if (error)
 916                        gfs2_io_error(sdp);
 917        }
 918        /*  At this point, we're through modifying the disk  */
 919
 920        /*  Release stuff  */
 921
 922        iput(sdp->sd_jindex);
 923        iput(sdp->sd_statfs_inode);
 924        iput(sdp->sd_rindex);
 925        iput(sdp->sd_quota_inode);
 926
 927        gfs2_glock_put(sdp->sd_rename_gl);
 928        gfs2_glock_put(sdp->sd_freeze_gl);
 929
 930        if (!sdp->sd_args.ar_spectator) {
 931                gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
 932                gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
 933                gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
 934                gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
 935                iput(sdp->sd_sc_inode);
 936                iput(sdp->sd_qc_inode);
 937        }
 938
 939        gfs2_glock_dq_uninit(&sdp->sd_live_gh);
 940        gfs2_clear_rgrpd(sdp);
 941        gfs2_jindex_free(sdp);
 942        /*  Take apart glock structures and buffer lists  */
 943        gfs2_gl_hash_clear(sdp);
 944        gfs2_delete_debugfs_file(sdp);
 945        /*  Unmount the locking protocol  */
 946        gfs2_lm_unmount(sdp);
 947
 948        /*  At this point, we're through participating in the lockspace  */
 949        gfs2_sys_fs_del(sdp);
 950}
 951
 952/**
 953 * gfs2_sync_fs - sync the filesystem
 954 * @sb: the superblock
 955 *
 956 * Flushes the log to disk.
 957 */
 958
 959static int gfs2_sync_fs(struct super_block *sb, int wait)
 960{
 961        struct gfs2_sbd *sdp = sb->s_fs_info;
 962
 963        gfs2_quota_sync(sb, -1);
 964        if (wait)
 965                gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
 966                               GFS2_LFC_SYNC_FS);
 967        return sdp->sd_log_error;
 968}
 969
 970void gfs2_freeze_func(struct work_struct *work)
 971{
 972        int error;
 973        struct gfs2_holder freeze_gh;
 974        struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
 975        struct super_block *sb = sdp->sd_vfs;
 976
 977        atomic_inc(&sb->s_active);
 978        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
 979                                   &freeze_gh);
 980        if (error) {
 981                fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
 982                gfs2_assert_withdraw(sdp, 0);
 983        } else {
 984                atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
 985                error = thaw_super(sb);
 986                if (error) {
 987                        fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
 988                                error);
 989                        gfs2_assert_withdraw(sdp, 0);
 990                }
 991                if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
 992                        freeze_gh.gh_flags |= GL_NOCACHE;
 993                gfs2_glock_dq_uninit(&freeze_gh);
 994        }
 995        deactivate_super(sb);
 996        clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
 997        wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
 998        return;
 999}
1000
1001/**
1002 * gfs2_freeze - prevent further writes to the filesystem
1003 * @sb: the VFS structure for the filesystem
1004 *
1005 */
1006
1007static int gfs2_freeze(struct super_block *sb)
1008{
1009        struct gfs2_sbd *sdp = sb->s_fs_info;
1010        int error = 0;
1011
1012        mutex_lock(&sdp->sd_freeze_mutex);
1013        if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
1014                goto out;
1015
1016        if (test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
1017                error = -EINVAL;
1018                goto out;
1019        }
1020
1021        for (;;) {
1022                error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
1023                if (!error)
1024                        break;
1025
1026                if (error == -EBUSY)
1027                        fs_err(sdp, "waiting for recovery before freeze\n");
1028                else
1029                        fs_err(sdp, "error freezing FS: %d\n", error);
1030
1031                fs_err(sdp, "retrying...\n");
1032                msleep(1000);
1033        }
1034        set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
1035out:
1036        mutex_unlock(&sdp->sd_freeze_mutex);
1037        return error;
1038}
1039
1040/**
1041 * gfs2_unfreeze - reallow writes to the filesystem
1042 * @sb: the VFS structure for the filesystem
1043 *
1044 */
1045
1046static int gfs2_unfreeze(struct super_block *sb)
1047{
1048        struct gfs2_sbd *sdp = sb->s_fs_info;
1049
1050        mutex_lock(&sdp->sd_freeze_mutex);
1051        if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
1052            !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
1053                mutex_unlock(&sdp->sd_freeze_mutex);
1054                return 0;
1055        }
1056
1057        gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
1058        mutex_unlock(&sdp->sd_freeze_mutex);
1059        return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
1060}
1061
1062/**
1063 * statfs_fill - fill in the sg for a given RG
1064 * @rgd: the RG
1065 * @sc: the sc structure
1066 *
1067 * Returns: 0 on success, -ESTALE if the LVB is invalid
1068 */
1069
1070static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
1071                            struct gfs2_statfs_change_host *sc)
1072{
1073        gfs2_rgrp_verify(rgd);
1074        sc->sc_total += rgd->rd_data;
1075        sc->sc_free += rgd->rd_free;
1076        sc->sc_dinodes += rgd->rd_dinodes;
1077        return 0;
1078}
1079
1080/**
1081 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
1082 * @sdp: the filesystem
1083 * @sc: the sc info that will be returned
1084 *
1085 * Any error (other than a signal) will cause this routine to fall back
1086 * to the synchronous version.
1087 *
1088 * FIXME: This really shouldn't busy wait like this.
1089 *
1090 * Returns: errno
1091 */
1092
1093static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
1094{
1095        struct gfs2_rgrpd *rgd_next;
1096        struct gfs2_holder *gha, *gh;
1097        unsigned int slots = 64;
1098        unsigned int x;
1099        int done;
1100        int error = 0, err;
1101
1102        memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
1103        gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
1104        if (!gha)
1105                return -ENOMEM;
1106        for (x = 0; x < slots; x++)
1107                gfs2_holder_mark_uninitialized(gha + x);
1108
1109        rgd_next = gfs2_rgrpd_get_first(sdp);
1110
1111        for (;;) {
1112                done = 1;
1113
1114                for (x = 0; x < slots; x++) {
1115                        gh = gha + x;
1116
1117                        if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
1118                                err = gfs2_glock_wait(gh);
1119                                if (err) {
1120                                        gfs2_holder_uninit(gh);
1121                                        error = err;
1122                                } else {
1123                                        if (!error) {
1124                                                struct gfs2_rgrpd *rgd =
1125                                                        gfs2_glock2rgrp(gh->gh_gl);
1126
1127                                                error = statfs_slow_fill(rgd, sc);
1128                                        }
1129                                        gfs2_glock_dq_uninit(gh);
1130                                }
1131                        }
1132
1133                        if (gfs2_holder_initialized(gh))
1134                                done = 0;
1135                        else if (rgd_next && !error) {
1136                                error = gfs2_glock_nq_init(rgd_next->rd_gl,
1137                                                           LM_ST_SHARED,
1138                                                           GL_ASYNC,
1139                                                           gh);
1140                                rgd_next = gfs2_rgrpd_get_next(rgd_next);
1141                                done = 0;
1142                        }
1143
1144                        if (signal_pending(current))
1145                                error = -ERESTARTSYS;
1146                }
1147
1148                if (done)
1149                        break;
1150
1151                yield();
1152        }
1153
1154        kfree(gha);
1155        return error;
1156}
1157
1158/**
1159 * gfs2_statfs_i - Do a statfs
1160 * @sdp: the filesystem
1161 * @sg: the sg structure
1162 *
1163 * Returns: errno
1164 */
1165
1166static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
1167{
1168        struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
1169        struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
1170
1171        spin_lock(&sdp->sd_statfs_spin);
1172
1173        *sc = *m_sc;
1174        sc->sc_total += l_sc->sc_total;
1175        sc->sc_free += l_sc->sc_free;
1176        sc->sc_dinodes += l_sc->sc_dinodes;
1177
1178        spin_unlock(&sdp->sd_statfs_spin);
1179
1180        if (sc->sc_free < 0)
1181                sc->sc_free = 0;
1182        if (sc->sc_free > sc->sc_total)
1183                sc->sc_free = sc->sc_total;
1184        if (sc->sc_dinodes < 0)
1185                sc->sc_dinodes = 0;
1186
1187        return 0;
1188}
1189
1190/**
1191 * gfs2_statfs - Gather and return stats about the filesystem
1192 * @sb: The superblock
1193 * @statfsbuf: The buffer
1194 *
1195 * Returns: 0 on success or error code
1196 */
1197
1198static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1199{
1200        struct super_block *sb = dentry->d_sb;
1201        struct gfs2_sbd *sdp = sb->s_fs_info;
1202        struct gfs2_statfs_change_host sc;
1203        int error;
1204
1205        error = gfs2_rindex_update(sdp);
1206        if (error)
1207                return error;
1208
1209        if (gfs2_tune_get(sdp, gt_statfs_slow))
1210                error = gfs2_statfs_slow(sdp, &sc);
1211        else
1212                error = gfs2_statfs_i(sdp, &sc);
1213
1214        if (error)
1215                return error;
1216
1217        buf->f_type = GFS2_MAGIC;
1218        buf->f_bsize = sdp->sd_sb.sb_bsize;
1219        buf->f_blocks = sc.sc_total;
1220        buf->f_bfree = sc.sc_free;
1221        buf->f_bavail = sc.sc_free;
1222        buf->f_files = sc.sc_dinodes + sc.sc_free;
1223        buf->f_ffree = sc.sc_free;
1224        buf->f_namelen = GFS2_FNAMESIZE;
1225
1226        return 0;
1227}
1228
1229/**
1230 * gfs2_remount_fs - called when the FS is remounted
1231 * @sb:  the filesystem
1232 * @flags:  the remount flags
1233 * @data:  extra data passed in (not used right now)
1234 *
1235 * Returns: errno
1236 */
1237
1238static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1239{
1240        struct gfs2_sbd *sdp = sb->s_fs_info;
1241        struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1242        struct gfs2_tune *gt = &sdp->sd_tune;
1243        int error;
1244
1245        sync_filesystem(sb);
1246
1247        spin_lock(&gt->gt_spin);
1248        args.ar_commit = gt->gt_logd_secs;
1249        args.ar_quota_quantum = gt->gt_quota_quantum;
1250        if (gt->gt_statfs_slow)
1251                args.ar_statfs_quantum = 0;
1252        else
1253                args.ar_statfs_quantum = gt->gt_statfs_quantum;
1254        spin_unlock(&gt->gt_spin);
1255        error = gfs2_mount_args(&args, data);
1256        if (error)
1257                return error;
1258
1259        /* Not allowed to change locking details */
1260        if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1261            strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1262            strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1263                return -EINVAL;
1264
1265        /* Some flags must not be changed */
1266        if (args_neq(&args, &sdp->sd_args, spectator) ||
1267            args_neq(&args, &sdp->sd_args, localflocks) ||
1268            args_neq(&args, &sdp->sd_args, meta))
1269                return -EINVAL;
1270
1271        if (sdp->sd_args.ar_spectator)
1272                *flags |= SB_RDONLY;
1273
1274        if ((sb->s_flags ^ *flags) & SB_RDONLY) {
1275                if (*flags & SB_RDONLY)
1276                        error = gfs2_make_fs_ro(sdp);
1277                else
1278                        error = gfs2_make_fs_rw(sdp);
1279        }
1280
1281        sdp->sd_args = args;
1282        if (sdp->sd_args.ar_posix_acl)
1283                sb->s_flags |= SB_POSIXACL;
1284        else
1285                sb->s_flags &= ~SB_POSIXACL;
1286        if (sdp->sd_args.ar_nobarrier)
1287                set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1288        else
1289                clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1290        spin_lock(&gt->gt_spin);
1291        gt->gt_logd_secs = args.ar_commit;
1292        gt->gt_quota_quantum = args.ar_quota_quantum;
1293        if (args.ar_statfs_quantum) {
1294                gt->gt_statfs_slow = 0;
1295                gt->gt_statfs_quantum = args.ar_statfs_quantum;
1296        }
1297        else {
1298                gt->gt_statfs_slow = 1;
1299                gt->gt_statfs_quantum = 30;
1300        }
1301        spin_unlock(&gt->gt_spin);
1302
1303        gfs2_online_uevent(sdp);
1304        return error;
1305}
1306
1307/**
1308 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1309 * @inode: The inode to drop
1310 *
1311 * If we've received a callback on an iopen lock then it's because a
1312 * remote node tried to deallocate the inode but failed due to this node
1313 * still having the inode open. Here we mark the link count zero
1314 * since we know that it must have reached zero if the GLF_DEMOTE flag
1315 * is set on the iopen glock. If we didn't do a disk read since the
1316 * remote node removed the final link then we might otherwise miss
1317 * this event. This check ensures that this node will deallocate the
1318 * inode's blocks, or alternatively pass the baton on to another
1319 * node for later deallocation.
1320 */
1321
1322static int gfs2_drop_inode(struct inode *inode)
1323{
1324        struct gfs2_inode *ip = GFS2_I(inode);
1325
1326        if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1327            inode->i_nlink &&
1328            gfs2_holder_initialized(&ip->i_iopen_gh)) {
1329                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1330                if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1331                        clear_nlink(inode);
1332        }
1333
1334        /*
1335         * When under memory pressure when an inode's link count has dropped to
1336         * zero, defer deleting the inode to the delete workqueue.  This avoids
1337         * calling into DLM under memory pressure, which can deadlock.
1338         */
1339        if (!inode->i_nlink &&
1340            unlikely(current->flags & PF_MEMALLOC) &&
1341            gfs2_holder_initialized(&ip->i_iopen_gh)) {
1342                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1343
1344                gfs2_glock_hold(gl);
1345                if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1346                        gfs2_glock_queue_put(gl);
1347                return false;
1348        }
1349
1350        return generic_drop_inode(inode);
1351}
1352
1353static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1354{
1355        do {
1356                if (d1 == d2)
1357                        return 1;
1358                d1 = d1->d_parent;
1359        } while (!IS_ROOT(d1));
1360        return 0;
1361}
1362
1363/**
1364 * gfs2_show_options - Show mount options for /proc/mounts
1365 * @s: seq_file structure
1366 * @root: root of this (sub)tree
1367 *
1368 * Returns: 0 on success or error code
1369 */
1370
1371static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1372{
1373        struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1374        struct gfs2_args *args = &sdp->sd_args;
1375        int val;
1376
1377        if (is_ancestor(root, sdp->sd_master_dir))
1378                seq_puts(s, ",meta");
1379        if (args->ar_lockproto[0])
1380                seq_show_option(s, "lockproto", args->ar_lockproto);
1381        if (args->ar_locktable[0])
1382                seq_show_option(s, "locktable", args->ar_locktable);
1383        if (args->ar_hostdata[0])
1384                seq_show_option(s, "hostdata", args->ar_hostdata);
1385        if (args->ar_spectator)
1386                seq_puts(s, ",spectator");
1387        if (args->ar_localflocks)
1388                seq_puts(s, ",localflocks");
1389        if (args->ar_debug)
1390                seq_puts(s, ",debug");
1391        if (args->ar_posix_acl)
1392                seq_puts(s, ",acl");
1393        if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1394                char *state;
1395                switch (args->ar_quota) {
1396                case GFS2_QUOTA_OFF:
1397                        state = "off";
1398                        break;
1399                case GFS2_QUOTA_ACCOUNT:
1400                        state = "account";
1401                        break;
1402                case GFS2_QUOTA_ON:
1403                        state = "on";
1404                        break;
1405                default:
1406                        state = "unknown";
1407                        break;
1408                }
1409                seq_printf(s, ",quota=%s", state);
1410        }
1411        if (args->ar_suiddir)
1412                seq_puts(s, ",suiddir");
1413        if (args->ar_data != GFS2_DATA_DEFAULT) {
1414                char *state;
1415                switch (args->ar_data) {
1416                case GFS2_DATA_WRITEBACK:
1417                        state = "writeback";
1418                        break;
1419                case GFS2_DATA_ORDERED:
1420                        state = "ordered";
1421                        break;
1422                default:
1423                        state = "unknown";
1424                        break;
1425                }
1426                seq_printf(s, ",data=%s", state);
1427        }
1428        if (args->ar_discard)
1429                seq_puts(s, ",discard");
1430        val = sdp->sd_tune.gt_logd_secs;
1431        if (val != 30)
1432                seq_printf(s, ",commit=%d", val);
1433        val = sdp->sd_tune.gt_statfs_quantum;
1434        if (val != 30)
1435                seq_printf(s, ",statfs_quantum=%d", val);
1436        else if (sdp->sd_tune.gt_statfs_slow)
1437                seq_puts(s, ",statfs_quantum=0");
1438        val = sdp->sd_tune.gt_quota_quantum;
1439        if (val != 60)
1440                seq_printf(s, ",quota_quantum=%d", val);
1441        if (args->ar_statfs_percent)
1442                seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1443        if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1444                const char *state;
1445
1446                switch (args->ar_errors) {
1447                case GFS2_ERRORS_WITHDRAW:
1448                        state = "withdraw";
1449                        break;
1450                case GFS2_ERRORS_PANIC:
1451                        state = "panic";
1452                        break;
1453                default:
1454                        state = "unknown";
1455                        break;
1456                }
1457                seq_printf(s, ",errors=%s", state);
1458        }
1459        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1460                seq_puts(s, ",nobarrier");
1461        if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1462                seq_puts(s, ",demote_interface_used");
1463        if (args->ar_rgrplvb)
1464                seq_puts(s, ",rgrplvb");
1465        if (args->ar_loccookie)
1466                seq_puts(s, ",loccookie");
1467        return 0;
1468}
1469
1470static void gfs2_final_release_pages(struct gfs2_inode *ip)
1471{
1472        struct inode *inode = &ip->i_inode;
1473        struct gfs2_glock *gl = ip->i_gl;
1474
1475        truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1476        truncate_inode_pages(&inode->i_data, 0);
1477
1478        if (atomic_read(&gl->gl_revokes) == 0) {
1479                clear_bit(GLF_LFLUSH, &gl->gl_flags);
1480                clear_bit(GLF_DIRTY, &gl->gl_flags);
1481        }
1482}
1483
1484static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1485{
1486        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1487        struct gfs2_rgrpd *rgd;
1488        struct gfs2_holder gh;
1489        int error;
1490
1491        if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1492                gfs2_consist_inode(ip);
1493                return -EIO;
1494        }
1495
1496        error = gfs2_rindex_update(sdp);
1497        if (error)
1498                return error;
1499
1500        error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1501        if (error)
1502                return error;
1503
1504        rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1505        if (!rgd) {
1506                gfs2_consist_inode(ip);
1507                error = -EIO;
1508                goto out_qs;
1509        }
1510
1511        error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1512        if (error)
1513                goto out_qs;
1514
1515        error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1516                                 sdp->sd_jdesc->jd_blocks);
1517        if (error)
1518                goto out_rg_gunlock;
1519
1520        gfs2_free_di(rgd, ip);
1521
1522        gfs2_final_release_pages(ip);
1523
1524        gfs2_trans_end(sdp);
1525
1526out_rg_gunlock:
1527        gfs2_glock_dq_uninit(&gh);
1528out_qs:
1529        gfs2_quota_unhold(ip);
1530        return error;
1531}
1532
1533/**
1534 * gfs2_glock_put_eventually
1535 * @gl: The glock to put
1536 *
1537 * When under memory pressure, trigger a deferred glock put to make sure we
1538 * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1539 */
1540
1541static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1542{
1543        if (current->flags & PF_MEMALLOC)
1544                gfs2_glock_queue_put(gl);
1545        else
1546                gfs2_glock_put(gl);
1547}
1548
1549/**
1550 * gfs2_evict_inode - Remove an inode from cache
1551 * @inode: The inode to evict
1552 *
1553 * There are three cases to consider:
1554 * 1. i_nlink == 0, we are final opener (and must deallocate)
1555 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1556 * 3. i_nlink > 0
1557 *
1558 * If the fs is read only, then we have to treat all cases as per #3
1559 * since we are unable to do any deallocation. The inode will be
1560 * deallocated by the next read/write node to attempt an allocation
1561 * in the same resource group
1562 *
1563 * We have to (at the moment) hold the inodes main lock to cover
1564 * the gap between unlocking the shared lock on the iopen lock and
1565 * taking the exclusive lock. I'd rather do a shared -> exclusive
1566 * conversion on the iopen lock, but we can change that later. This
1567 * is safe, just less efficient.
1568 */
1569
1570static void gfs2_evict_inode(struct inode *inode)
1571{
1572        struct super_block *sb = inode->i_sb;
1573        struct gfs2_sbd *sdp = sb->s_fs_info;
1574        struct gfs2_inode *ip = GFS2_I(inode);
1575        struct gfs2_holder gh;
1576        struct address_space *metamapping;
1577        int error;
1578
1579        if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1580                clear_inode(inode);
1581                return;
1582        }
1583
1584        if (inode->i_nlink || sb_rdonly(sb))
1585                goto out;
1586
1587        if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1588                BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1589                gfs2_holder_mark_uninitialized(&gh);
1590                goto alloc_failed;
1591        }
1592
1593        /* Deletes should never happen under memory pressure anymore.  */
1594        if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1595                goto out;
1596
1597        /* Must not read inode block until block type has been verified */
1598        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1599        if (unlikely(error)) {
1600                glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1601                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1602                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1603                goto out;
1604        }
1605
1606        error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1607        if (error)
1608                goto out_truncate;
1609
1610        if (test_bit(GIF_INVALID, &ip->i_flags)) {
1611                error = gfs2_inode_refresh(ip);
1612                if (error)
1613                        goto out_truncate;
1614        }
1615
1616        /*
1617         * The inode may have been recreated in the meantime.
1618         */
1619        if (inode->i_nlink)
1620                goto out_truncate;
1621
1622alloc_failed:
1623        if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1624            test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1625                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1626                gfs2_glock_dq_wait(&ip->i_iopen_gh);
1627                gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
1628                                   &ip->i_iopen_gh);
1629                error = gfs2_glock_nq(&ip->i_iopen_gh);
1630                if (error)
1631                        goto out_truncate;
1632        }
1633
1634        if (S_ISDIR(inode->i_mode) &&
1635            (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1636                error = gfs2_dir_exhash_dealloc(ip);
1637                if (error)
1638                        goto out_unlock;
1639        }
1640
1641        if (ip->i_eattr) {
1642                error = gfs2_ea_dealloc(ip);
1643                if (error)
1644                        goto out_unlock;
1645        }
1646
1647        if (!gfs2_is_stuffed(ip)) {
1648                error = gfs2_file_dealloc(ip);
1649                if (error)
1650                        goto out_unlock;
1651        }
1652
1653        /* We're about to clear the bitmap for the dinode, but as soon as we
1654           do, gfs2_create_inode can create another inode at the same block
1655           location and try to set gl_object again. We clear gl_object here so
1656           that subsequent inode creates don't see an old gl_object. */
1657        glock_clear_object(ip->i_gl, ip);
1658        error = gfs2_dinode_dealloc(ip);
1659        goto out_unlock;
1660
1661out_truncate:
1662        gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1663                       GFS2_LFC_EVICT_INODE);
1664        metamapping = gfs2_glock2aspace(ip->i_gl);
1665        if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1666                filemap_fdatawrite(metamapping);
1667                filemap_fdatawait(metamapping);
1668        }
1669        write_inode_now(inode, 1);
1670        gfs2_ail_flush(ip->i_gl, 0);
1671
1672        error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1673        if (error)
1674                goto out_unlock;
1675        /* Needs to be done before glock release & also in a transaction */
1676        truncate_inode_pages(&inode->i_data, 0);
1677        truncate_inode_pages(metamapping, 0);
1678        gfs2_trans_end(sdp);
1679
1680out_unlock:
1681        if (gfs2_rs_active(&ip->i_res))
1682                gfs2_rs_deltree(&ip->i_res);
1683
1684        if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1685                glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1686                if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1687                        ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1688                        gfs2_glock_dq(&ip->i_iopen_gh);
1689                }
1690                gfs2_holder_uninit(&ip->i_iopen_gh);
1691        }
1692        if (gfs2_holder_initialized(&gh)) {
1693                glock_clear_object(ip->i_gl, ip);
1694                gfs2_glock_dq_uninit(&gh);
1695        }
1696        if (error && error != GLR_TRYFAILED && error != -EROFS)
1697                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
1698out:
1699        truncate_inode_pages_final(&inode->i_data);
1700        gfs2_rsqa_delete(ip, NULL);
1701        gfs2_ordered_del_inode(ip);
1702        clear_inode(inode);
1703        gfs2_dir_hash_inval(ip);
1704        glock_clear_object(ip->i_gl, ip);
1705        wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1706        gfs2_glock_add_to_lru(ip->i_gl);
1707        gfs2_glock_put_eventually(ip->i_gl);
1708        ip->i_gl = NULL;
1709        if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1710                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1711
1712                glock_clear_object(gl, ip);
1713                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1714                gfs2_glock_hold(gl);
1715                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1716                gfs2_glock_put_eventually(gl);
1717        }
1718}
1719
1720static struct inode *gfs2_alloc_inode(struct super_block *sb)
1721{
1722        struct gfs2_inode *ip;
1723
1724        ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1725        if (ip) {
1726                ip->i_flags = 0;
1727                ip->i_gl = NULL;
1728                memset(&ip->i_res, 0, sizeof(ip->i_res));
1729                RB_CLEAR_NODE(&ip->i_res.rs_node);
1730                ip->i_rahead = 0;
1731        }
1732        return &ip->i_inode;
1733}
1734
1735static void gfs2_free_inode(struct inode *inode)
1736{
1737        kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1738}
1739
1740const struct super_operations gfs2_super_ops = {
1741        .alloc_inode            = gfs2_alloc_inode,
1742        .free_inode             = gfs2_free_inode,
1743        .write_inode            = gfs2_write_inode,
1744        .dirty_inode            = gfs2_dirty_inode,
1745        .evict_inode            = gfs2_evict_inode,
1746        .put_super              = gfs2_put_super,
1747        .sync_fs                = gfs2_sync_fs,
1748        .freeze_super           = gfs2_freeze,
1749        .thaw_super             = gfs2_unfreeze,
1750        .statfs                 = gfs2_statfs,
1751        .remount_fs             = gfs2_remount_fs,
1752        .drop_inode             = gfs2_drop_inode,
1753        .show_options           = gfs2_show_options,
1754};
1755
1756