linux/fs/xfs/xfs_mount.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_dir2.h"
  28#include "xfs_mount.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_alloc_btree.h"
  31#include "xfs_ialloc_btree.h"
  32#include "xfs_dinode.h"
  33#include "xfs_inode.h"
  34#include "xfs_btree.h"
  35#include "xfs_ialloc.h"
  36#include "xfs_alloc.h"
  37#include "xfs_rtalloc.h"
  38#include "xfs_bmap.h"
  39#include "xfs_error.h"
  40#include "xfs_rw.h"
  41#include "xfs_quota.h"
  42#include "xfs_fsops.h"
  43#include "xfs_utils.h"
  44#include "xfs_trace.h"
  45
  46
  47STATIC void     xfs_unmountfs_wait(xfs_mount_t *);
  48
  49
  50#ifdef HAVE_PERCPU_SB
  51STATIC void     xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
  52                                                int);
  53STATIC void     xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
  54                                                int);
  55STATIC void     xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
  56#else
  57
  58#define xfs_icsb_balance_counter(mp, a, b)              do { } while (0)
  59#define xfs_icsb_balance_counter_locked(mp, a, b)       do { } while (0)
  60#endif
  61
  62static const struct {
  63        short offset;
  64        short type;     /* 0 = integer
  65                         * 1 = binary / string (no translation)
  66                         */
  67} xfs_sb_info[] = {
  68    { offsetof(xfs_sb_t, sb_magicnum),   0 },
  69    { offsetof(xfs_sb_t, sb_blocksize),  0 },
  70    { offsetof(xfs_sb_t, sb_dblocks),    0 },
  71    { offsetof(xfs_sb_t, sb_rblocks),    0 },
  72    { offsetof(xfs_sb_t, sb_rextents),   0 },
  73    { offsetof(xfs_sb_t, sb_uuid),       1 },
  74    { offsetof(xfs_sb_t, sb_logstart),   0 },
  75    { offsetof(xfs_sb_t, sb_rootino),    0 },
  76    { offsetof(xfs_sb_t, sb_rbmino),     0 },
  77    { offsetof(xfs_sb_t, sb_rsumino),    0 },
  78    { offsetof(xfs_sb_t, sb_rextsize),   0 },
  79    { offsetof(xfs_sb_t, sb_agblocks),   0 },
  80    { offsetof(xfs_sb_t, sb_agcount),    0 },
  81    { offsetof(xfs_sb_t, sb_rbmblocks),  0 },
  82    { offsetof(xfs_sb_t, sb_logblocks),  0 },
  83    { offsetof(xfs_sb_t, sb_versionnum), 0 },
  84    { offsetof(xfs_sb_t, sb_sectsize),   0 },
  85    { offsetof(xfs_sb_t, sb_inodesize),  0 },
  86    { offsetof(xfs_sb_t, sb_inopblock),  0 },
  87    { offsetof(xfs_sb_t, sb_fname[0]),   1 },
  88    { offsetof(xfs_sb_t, sb_blocklog),   0 },
  89    { offsetof(xfs_sb_t, sb_sectlog),    0 },
  90    { offsetof(xfs_sb_t, sb_inodelog),   0 },
  91    { offsetof(xfs_sb_t, sb_inopblog),   0 },
  92    { offsetof(xfs_sb_t, sb_agblklog),   0 },
  93    { offsetof(xfs_sb_t, sb_rextslog),   0 },
  94    { offsetof(xfs_sb_t, sb_inprogress), 0 },
  95    { offsetof(xfs_sb_t, sb_imax_pct),   0 },
  96    { offsetof(xfs_sb_t, sb_icount),     0 },
  97    { offsetof(xfs_sb_t, sb_ifree),      0 },
  98    { offsetof(xfs_sb_t, sb_fdblocks),   0 },
  99    { offsetof(xfs_sb_t, sb_frextents),  0 },
 100    { offsetof(xfs_sb_t, sb_uquotino),   0 },
 101    { offsetof(xfs_sb_t, sb_gquotino),   0 },
 102    { offsetof(xfs_sb_t, sb_qflags),     0 },
 103    { offsetof(xfs_sb_t, sb_flags),      0 },
 104    { offsetof(xfs_sb_t, sb_shared_vn),  0 },
 105    { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
 106    { offsetof(xfs_sb_t, sb_unit),       0 },
 107    { offsetof(xfs_sb_t, sb_width),      0 },
 108    { offsetof(xfs_sb_t, sb_dirblklog),  0 },
 109    { offsetof(xfs_sb_t, sb_logsectlog), 0 },
 110    { offsetof(xfs_sb_t, sb_logsectsize),0 },
 111    { offsetof(xfs_sb_t, sb_logsunit),   0 },
 112    { offsetof(xfs_sb_t, sb_features2),  0 },
 113    { offsetof(xfs_sb_t, sb_bad_features2), 0 },
 114    { sizeof(xfs_sb_t),                  0 }
 115};
 116
 117static DEFINE_MUTEX(xfs_uuid_table_mutex);
 118static int xfs_uuid_table_size;
 119static uuid_t *xfs_uuid_table;
 120
 121/*
 122 * See if the UUID is unique among mounted XFS filesystems.
 123 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
 124 */
 125STATIC int
 126xfs_uuid_mount(
 127        struct xfs_mount        *mp)
 128{
 129        uuid_t                  *uuid = &mp->m_sb.sb_uuid;
 130        int                     hole, i;
 131
 132        if (mp->m_flags & XFS_MOUNT_NOUUID)
 133                return 0;
 134
 135        if (uuid_is_nil(uuid)) {
 136                cmn_err(CE_WARN,
 137                        "XFS: Filesystem %s has nil UUID - can't mount",
 138                        mp->m_fsname);
 139                return XFS_ERROR(EINVAL);
 140        }
 141
 142        mutex_lock(&xfs_uuid_table_mutex);
 143        for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
 144                if (uuid_is_nil(&xfs_uuid_table[i])) {
 145                        hole = i;
 146                        continue;
 147                }
 148                if (uuid_equal(uuid, &xfs_uuid_table[i]))
 149                        goto out_duplicate;
 150        }
 151
 152        if (hole < 0) {
 153                xfs_uuid_table = kmem_realloc(xfs_uuid_table,
 154                        (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
 155                        xfs_uuid_table_size  * sizeof(*xfs_uuid_table),
 156                        KM_SLEEP);
 157                hole = xfs_uuid_table_size++;
 158        }
 159        xfs_uuid_table[hole] = *uuid;
 160        mutex_unlock(&xfs_uuid_table_mutex);
 161
 162        return 0;
 163
 164 out_duplicate:
 165        mutex_unlock(&xfs_uuid_table_mutex);
 166        cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount",
 167                         mp->m_fsname);
 168        return XFS_ERROR(EINVAL);
 169}
 170
 171STATIC void
 172xfs_uuid_unmount(
 173        struct xfs_mount        *mp)
 174{
 175        uuid_t                  *uuid = &mp->m_sb.sb_uuid;
 176        int                     i;
 177
 178        if (mp->m_flags & XFS_MOUNT_NOUUID)
 179                return;
 180
 181        mutex_lock(&xfs_uuid_table_mutex);
 182        for (i = 0; i < xfs_uuid_table_size; i++) {
 183                if (uuid_is_nil(&xfs_uuid_table[i]))
 184                        continue;
 185                if (!uuid_equal(uuid, &xfs_uuid_table[i]))
 186                        continue;
 187                memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
 188                break;
 189        }
 190        ASSERT(i < xfs_uuid_table_size);
 191        mutex_unlock(&xfs_uuid_table_mutex);
 192}
 193
 194
 195/*
 196 * Reference counting access wrappers to the perag structures.
 197 * Because we never free per-ag structures, the only thing we
 198 * have to protect against changes is the tree structure itself.
 199 */
 200struct xfs_perag *
 201xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
 202{
 203        struct xfs_perag        *pag;
 204        int                     ref = 0;
 205
 206        rcu_read_lock();
 207        pag = radix_tree_lookup(&mp->m_perag_tree, agno);
 208        if (pag) {
 209                ASSERT(atomic_read(&pag->pag_ref) >= 0);
 210                ref = atomic_inc_return(&pag->pag_ref);
 211        }
 212        rcu_read_unlock();
 213        trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
 214        return pag;
 215}
 216
 217/*
 218 * search from @first to find the next perag with the given tag set.
 219 */
 220struct xfs_perag *
 221xfs_perag_get_tag(
 222        struct xfs_mount        *mp,
 223        xfs_agnumber_t          first,
 224        int                     tag)
 225{
 226        struct xfs_perag        *pag;
 227        int                     found;
 228        int                     ref;
 229
 230        rcu_read_lock();
 231        found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
 232                                        (void **)&pag, first, 1, tag);
 233        if (found <= 0) {
 234                rcu_read_unlock();
 235                return NULL;
 236        }
 237        ref = atomic_inc_return(&pag->pag_ref);
 238        rcu_read_unlock();
 239        trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
 240        return pag;
 241}
 242
 243void
 244xfs_perag_put(struct xfs_perag *pag)
 245{
 246        int     ref;
 247
 248        ASSERT(atomic_read(&pag->pag_ref) > 0);
 249        ref = atomic_dec_return(&pag->pag_ref);
 250        trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
 251}
 252
 253STATIC void
 254__xfs_free_perag(
 255        struct rcu_head *head)
 256{
 257        struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
 258
 259        ASSERT(atomic_read(&pag->pag_ref) == 0);
 260        kmem_free(pag);
 261}
 262
 263/*
 264 * Free up the per-ag resources associated with the mount structure.
 265 */
 266STATIC void
 267xfs_free_perag(
 268        xfs_mount_t     *mp)
 269{
 270        xfs_agnumber_t  agno;
 271        struct xfs_perag *pag;
 272
 273        for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
 274                spin_lock(&mp->m_perag_lock);
 275                pag = radix_tree_delete(&mp->m_perag_tree, agno);
 276                spin_unlock(&mp->m_perag_lock);
 277                ASSERT(pag);
 278                ASSERT(atomic_read(&pag->pag_ref) == 0);
 279                call_rcu(&pag->rcu_head, __xfs_free_perag);
 280        }
 281}
 282
 283/*
 284 * Check size of device based on the (data/realtime) block count.
 285 * Note: this check is used by the growfs code as well as mount.
 286 */
 287int
 288xfs_sb_validate_fsb_count(
 289        xfs_sb_t        *sbp,
 290        __uint64_t      nblocks)
 291{
 292        ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
 293        ASSERT(sbp->sb_blocklog >= BBSHIFT);
 294
 295#if XFS_BIG_BLKNOS     /* Limited by ULONG_MAX of page cache index */
 296        if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
 297                return EFBIG;
 298#else                  /* Limited by UINT_MAX of sectors */
 299        if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX)
 300                return EFBIG;
 301#endif
 302        return 0;
 303}
 304
 305/*
 306 * Check the validity of the SB found.
 307 */
 308STATIC int
 309xfs_mount_validate_sb(
 310        xfs_mount_t     *mp,
 311        xfs_sb_t        *sbp,
 312        int             flags)
 313{
 314        /*
 315         * If the log device and data device have the
 316         * same device number, the log is internal.
 317         * Consequently, the sb_logstart should be non-zero.  If
 318         * we have a zero sb_logstart in this case, we may be trying to mount
 319         * a volume filesystem in a non-volume manner.
 320         */
 321        if (sbp->sb_magicnum != XFS_SB_MAGIC) {
 322                xfs_fs_mount_cmn_err(flags, "bad magic number");
 323                return XFS_ERROR(EWRONGFS);
 324        }
 325
 326        if (!xfs_sb_good_version(sbp)) {
 327                xfs_fs_mount_cmn_err(flags, "bad version");
 328                return XFS_ERROR(EWRONGFS);
 329        }
 330
 331        if (unlikely(
 332            sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
 333                xfs_fs_mount_cmn_err(flags,
 334                        "filesystem is marked as having an external log; "
 335                        "specify logdev on the\nmount command line.");
 336                return XFS_ERROR(EINVAL);
 337        }
 338
 339        if (unlikely(
 340            sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
 341                xfs_fs_mount_cmn_err(flags,
 342                        "filesystem is marked as having an internal log; "
 343                        "do not specify logdev on\nthe mount command line.");
 344                return XFS_ERROR(EINVAL);
 345        }
 346
 347        /*
 348         * More sanity checking. These were stolen directly from
 349         * xfs_repair.
 350         */
 351        if (unlikely(
 352            sbp->sb_agcount <= 0                                        ||
 353            sbp->sb_sectsize < XFS_MIN_SECTORSIZE                       ||
 354            sbp->sb_sectsize > XFS_MAX_SECTORSIZE                       ||
 355            sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG                    ||
 356            sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG                    ||
 357            sbp->sb_sectsize != (1 << sbp->sb_sectlog)                  ||
 358            sbp->sb_blocksize < XFS_MIN_BLOCKSIZE                       ||
 359            sbp->sb_blocksize > XFS_MAX_BLOCKSIZE                       ||
 360            sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
 361            sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
 362            sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
 363            sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
 364            sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
 365            sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
 366            sbp->sb_inodelog > XFS_DINODE_MAX_LOG                       ||
 367            sbp->sb_inodesize != (1 << sbp->sb_inodelog)                ||
 368            (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog)   ||
 369            (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)  ||
 370            (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)  ||
 371            (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
 372                xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed");
 373                return XFS_ERROR(EFSCORRUPTED);
 374        }
 375
 376        /*
 377         * Sanity check AG count, size fields against data size field
 378         */
 379        if (unlikely(
 380            sbp->sb_dblocks == 0 ||
 381            sbp->sb_dblocks >
 382             (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
 383            sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
 384                              sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
 385                xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed");
 386                return XFS_ERROR(EFSCORRUPTED);
 387        }
 388
 389        /*
 390         * Until this is fixed only page-sized or smaller data blocks work.
 391         */
 392        if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
 393                xfs_fs_mount_cmn_err(flags,
 394                        "file system with blocksize %d bytes",
 395                        sbp->sb_blocksize);
 396                xfs_fs_mount_cmn_err(flags,
 397                        "only pagesize (%ld) or less will currently work.",
 398                        PAGE_SIZE);
 399                return XFS_ERROR(ENOSYS);
 400        }
 401
 402        /*
 403         * Currently only very few inode sizes are supported.
 404         */
 405        switch (sbp->sb_inodesize) {
 406        case 256:
 407        case 512:
 408        case 1024:
 409        case 2048:
 410                break;
 411        default:
 412                xfs_fs_mount_cmn_err(flags,
 413                        "inode size of %d bytes not supported",
 414                        sbp->sb_inodesize);
 415                return XFS_ERROR(ENOSYS);
 416        }
 417
 418        if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
 419            xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
 420                xfs_fs_mount_cmn_err(flags,
 421                        "file system too large to be mounted on this system.");
 422                return XFS_ERROR(EFBIG);
 423        }
 424
 425        if (unlikely(sbp->sb_inprogress)) {
 426                xfs_fs_mount_cmn_err(flags, "file system busy");
 427                return XFS_ERROR(EFSCORRUPTED);
 428        }
 429
 430        /*
 431         * Version 1 directory format has never worked on Linux.
 432         */
 433        if (unlikely(!xfs_sb_version_hasdirv2(sbp))) {
 434                xfs_fs_mount_cmn_err(flags,
 435                        "file system using version 1 directory format");
 436                return XFS_ERROR(ENOSYS);
 437        }
 438
 439        return 0;
 440}
 441
 442int
 443xfs_initialize_perag(
 444        xfs_mount_t     *mp,
 445        xfs_agnumber_t  agcount,
 446        xfs_agnumber_t  *maxagi)
 447{
 448        xfs_agnumber_t  index, max_metadata;
 449        xfs_agnumber_t  first_initialised = 0;
 450        xfs_perag_t     *pag;
 451        xfs_agino_t     agino;
 452        xfs_ino_t       ino;
 453        xfs_sb_t        *sbp = &mp->m_sb;
 454        int             error = -ENOMEM;
 455
 456        /*
 457         * Walk the current per-ag tree so we don't try to initialise AGs
 458         * that already exist (growfs case). Allocate and insert all the
 459         * AGs we don't find ready for initialisation.
 460         */
 461        for (index = 0; index < agcount; index++) {
 462                pag = xfs_perag_get(mp, index);
 463                if (pag) {
 464                        xfs_perag_put(pag);
 465                        continue;
 466                }
 467                if (!first_initialised)
 468                        first_initialised = index;
 469
 470                pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
 471                if (!pag)
 472                        goto out_unwind;
 473                pag->pag_agno = index;
 474                pag->pag_mount = mp;
 475                spin_lock_init(&pag->pag_ici_lock);
 476                mutex_init(&pag->pag_ici_reclaim_lock);
 477                INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 478                spin_lock_init(&pag->pag_buf_lock);
 479                pag->pag_buf_tree = RB_ROOT;
 480
 481                if (radix_tree_preload(GFP_NOFS))
 482                        goto out_unwind;
 483
 484                spin_lock(&mp->m_perag_lock);
 485                if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
 486                        BUG();
 487                        spin_unlock(&mp->m_perag_lock);
 488                        radix_tree_preload_end();
 489                        error = -EEXIST;
 490                        goto out_unwind;
 491                }
 492                spin_unlock(&mp->m_perag_lock);
 493                radix_tree_preload_end();
 494        }
 495
 496        /*
 497         * If we mount with the inode64 option, or no inode overflows
 498         * the legacy 32-bit address space clear the inode32 option.
 499         */
 500        agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
 501        ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
 502
 503        if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
 504                mp->m_flags |= XFS_MOUNT_32BITINODES;
 505        else
 506                mp->m_flags &= ~XFS_MOUNT_32BITINODES;
 507
 508        if (mp->m_flags & XFS_MOUNT_32BITINODES) {
 509                /*
 510                 * Calculate how much should be reserved for inodes to meet
 511                 * the max inode percentage.
 512                 */
 513                if (mp->m_maxicount) {
 514                        __uint64_t      icount;
 515
 516                        icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 517                        do_div(icount, 100);
 518                        icount += sbp->sb_agblocks - 1;
 519                        do_div(icount, sbp->sb_agblocks);
 520                        max_metadata = icount;
 521                } else {
 522                        max_metadata = agcount;
 523                }
 524
 525                for (index = 0; index < agcount; index++) {
 526                        ino = XFS_AGINO_TO_INO(mp, index, agino);
 527                        if (ino > XFS_MAXINUMBER_32) {
 528                                index++;
 529                                break;
 530                        }
 531
 532                        pag = xfs_perag_get(mp, index);
 533                        pag->pagi_inodeok = 1;
 534                        if (index < max_metadata)
 535                                pag->pagf_metadata = 1;
 536                        xfs_perag_put(pag);
 537                }
 538        } else {
 539                for (index = 0; index < agcount; index++) {
 540                        pag = xfs_perag_get(mp, index);
 541                        pag->pagi_inodeok = 1;
 542                        xfs_perag_put(pag);
 543                }
 544        }
 545
 546        if (maxagi)
 547                *maxagi = index;
 548        return 0;
 549
 550out_unwind:
 551        kmem_free(pag);
 552        for (; index > first_initialised; index--) {
 553                pag = radix_tree_delete(&mp->m_perag_tree, index);
 554                kmem_free(pag);
 555        }
 556        return error;
 557}
 558
 559void
 560xfs_sb_from_disk(
 561        xfs_sb_t        *to,
 562        xfs_dsb_t       *from)
 563{
 564        to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
 565        to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
 566        to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
 567        to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
 568        to->sb_rextents = be64_to_cpu(from->sb_rextents);
 569        memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
 570        to->sb_logstart = be64_to_cpu(from->sb_logstart);
 571        to->sb_rootino = be64_to_cpu(from->sb_rootino);
 572        to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
 573        to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
 574        to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
 575        to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
 576        to->sb_agcount = be32_to_cpu(from->sb_agcount);
 577        to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
 578        to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
 579        to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
 580        to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
 581        to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
 582        to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
 583        memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
 584        to->sb_blocklog = from->sb_blocklog;
 585        to->sb_sectlog = from->sb_sectlog;
 586        to->sb_inodelog = from->sb_inodelog;
 587        to->sb_inopblog = from->sb_inopblog;
 588        to->sb_agblklog = from->sb_agblklog;
 589        to->sb_rextslog = from->sb_rextslog;
 590        to->sb_inprogress = from->sb_inprogress;
 591        to->sb_imax_pct = from->sb_imax_pct;
 592        to->sb_icount = be64_to_cpu(from->sb_icount);
 593        to->sb_ifree = be64_to_cpu(from->sb_ifree);
 594        to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
 595        to->sb_frextents = be64_to_cpu(from->sb_frextents);
 596        to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
 597        to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
 598        to->sb_qflags = be16_to_cpu(from->sb_qflags);
 599        to->sb_flags = from->sb_flags;
 600        to->sb_shared_vn = from->sb_shared_vn;
 601        to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
 602        to->sb_unit = be32_to_cpu(from->sb_unit);
 603        to->sb_width = be32_to_cpu(from->sb_width);
 604        to->sb_dirblklog = from->sb_dirblklog;
 605        to->sb_logsectlog = from->sb_logsectlog;
 606        to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
 607        to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
 608        to->sb_features2 = be32_to_cpu(from->sb_features2);
 609        to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
 610}
 611
 612/*
 613 * Copy in core superblock to ondisk one.
 614 *
 615 * The fields argument is mask of superblock fields to copy.
 616 */
 617void
 618xfs_sb_to_disk(
 619        xfs_dsb_t       *to,
 620        xfs_sb_t        *from,
 621        __int64_t       fields)
 622{
 623        xfs_caddr_t     to_ptr = (xfs_caddr_t)to;
 624        xfs_caddr_t     from_ptr = (xfs_caddr_t)from;
 625        xfs_sb_field_t  f;
 626        int             first;
 627        int             size;
 628
 629        ASSERT(fields);
 630        if (!fields)
 631                return;
 632
 633        while (fields) {
 634                f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
 635                first = xfs_sb_info[f].offset;
 636                size = xfs_sb_info[f + 1].offset - first;
 637
 638                ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
 639
 640                if (size == 1 || xfs_sb_info[f].type == 1) {
 641                        memcpy(to_ptr + first, from_ptr + first, size);
 642                } else {
 643                        switch (size) {
 644                        case 2:
 645                                *(__be16 *)(to_ptr + first) =
 646                                        cpu_to_be16(*(__u16 *)(from_ptr + first));
 647                                break;
 648                        case 4:
 649                                *(__be32 *)(to_ptr + first) =
 650                                        cpu_to_be32(*(__u32 *)(from_ptr + first));
 651                                break;
 652                        case 8:
 653                                *(__be64 *)(to_ptr + first) =
 654                                        cpu_to_be64(*(__u64 *)(from_ptr + first));
 655                                break;
 656                        default:
 657                                ASSERT(0);
 658                        }
 659                }
 660
 661                fields &= ~(1LL << f);
 662        }
 663}
 664
 665/*
 666 * xfs_readsb
 667 *
 668 * Does the initial read of the superblock.
 669 */
 670int
 671xfs_readsb(xfs_mount_t *mp, int flags)
 672{
 673        unsigned int    sector_size;
 674        xfs_buf_t       *bp;
 675        int             error;
 676
 677        ASSERT(mp->m_sb_bp == NULL);
 678        ASSERT(mp->m_ddev_targp != NULL);
 679
 680        /*
 681         * Allocate a (locked) buffer to hold the superblock.
 682         * This will be kept around at all times to optimize
 683         * access to the superblock.
 684         */
 685        sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
 686
 687reread:
 688        bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
 689                                        XFS_SB_DADDR, sector_size, 0);
 690        if (!bp) {
 691                xfs_fs_mount_cmn_err(flags, "SB buffer read failed");
 692                return EIO;
 693        }
 694
 695        /*
 696         * Initialize the mount structure from the superblock.
 697         * But first do some basic consistency checking.
 698         */
 699        xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
 700        error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags);
 701        if (error) {
 702                xfs_fs_mount_cmn_err(flags, "SB validate failed");
 703                goto release_buf;
 704        }
 705
 706        /*
 707         * We must be able to do sector-sized and sector-aligned IO.
 708         */
 709        if (sector_size > mp->m_sb.sb_sectsize) {
 710                xfs_fs_mount_cmn_err(flags,
 711                        "device supports only %u byte sectors (not %u)",
 712                        sector_size, mp->m_sb.sb_sectsize);
 713                error = ENOSYS;
 714                goto release_buf;
 715        }
 716
 717        /*
 718         * If device sector size is smaller than the superblock size,
 719         * re-read the superblock so the buffer is correctly sized.
 720         */
 721        if (sector_size < mp->m_sb.sb_sectsize) {
 722                xfs_buf_relse(bp);
 723                sector_size = mp->m_sb.sb_sectsize;
 724                goto reread;
 725        }
 726
 727        /* Initialize per-cpu counters */
 728        xfs_icsb_reinit_counters(mp);
 729
 730        mp->m_sb_bp = bp;
 731        xfs_buf_unlock(bp);
 732        return 0;
 733
 734release_buf:
 735        xfs_buf_relse(bp);
 736        return error;
 737}
 738
 739
 740/*
 741 * xfs_mount_common
 742 *
 743 * Mount initialization code establishing various mount
 744 * fields from the superblock associated with the given
 745 * mount structure
 746 */
 747STATIC void
 748xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
 749{
 750        mp->m_agfrotor = mp->m_agirotor = 0;
 751        spin_lock_init(&mp->m_agirotor_lock);
 752        mp->m_maxagi = mp->m_sb.sb_agcount;
 753        mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
 754        mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
 755        mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
 756        mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
 757        mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
 758        mp->m_blockmask = sbp->sb_blocksize - 1;
 759        mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
 760        mp->m_blockwmask = mp->m_blockwsize - 1;
 761
 762        mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
 763        mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
 764        mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
 765        mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
 766
 767        mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
 768        mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
 769        mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
 770        mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
 771
 772        mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
 773        mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
 774        mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
 775        mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
 776
 777        mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
 778        mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
 779                                        sbp->sb_inopblock);
 780        mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
 781}
 782
 783/*
 784 * xfs_initialize_perag_data
 785 *
 786 * Read in each per-ag structure so we can count up the number of
 787 * allocated inodes, free inodes and used filesystem blocks as this
 788 * information is no longer persistent in the superblock. Once we have
 789 * this information, write it into the in-core superblock structure.
 790 */
 791STATIC int
 792xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
 793{
 794        xfs_agnumber_t  index;
 795        xfs_perag_t     *pag;
 796        xfs_sb_t        *sbp = &mp->m_sb;
 797        uint64_t        ifree = 0;
 798        uint64_t        ialloc = 0;
 799        uint64_t        bfree = 0;
 800        uint64_t        bfreelst = 0;
 801        uint64_t        btree = 0;
 802        int             error;
 803
 804        for (index = 0; index < agcount; index++) {
 805                /*
 806                 * read the agf, then the agi. This gets us
 807                 * all the information we need and populates the
 808                 * per-ag structures for us.
 809                 */
 810                error = xfs_alloc_pagf_init(mp, NULL, index, 0);
 811                if (error)
 812                        return error;
 813
 814                error = xfs_ialloc_pagi_init(mp, NULL, index);
 815                if (error)
 816                        return error;
 817                pag = xfs_perag_get(mp, index);
 818                ifree += pag->pagi_freecount;
 819                ialloc += pag->pagi_count;
 820                bfree += pag->pagf_freeblks;
 821                bfreelst += pag->pagf_flcount;
 822                btree += pag->pagf_btreeblks;
 823                xfs_perag_put(pag);
 824        }
 825        /*
 826         * Overwrite incore superblock counters with just-read data
 827         */
 828        spin_lock(&mp->m_sb_lock);
 829        sbp->sb_ifree = ifree;
 830        sbp->sb_icount = ialloc;
 831        sbp->sb_fdblocks = bfree + bfreelst + btree;
 832        spin_unlock(&mp->m_sb_lock);
 833
 834        /* Fixup the per-cpu counters as well. */
 835        xfs_icsb_reinit_counters(mp);
 836
 837        return 0;
 838}
 839
 840/*
 841 * Update alignment values based on mount options and sb values
 842 */
 843STATIC int
 844xfs_update_alignment(xfs_mount_t *mp)
 845{
 846        xfs_sb_t        *sbp = &(mp->m_sb);
 847
 848        if (mp->m_dalign) {
 849                /*
 850                 * If stripe unit and stripe width are not multiples
 851                 * of the fs blocksize turn off alignment.
 852                 */
 853                if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
 854                    (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
 855                        if (mp->m_flags & XFS_MOUNT_RETERR) {
 856                                cmn_err(CE_WARN,
 857                                        "XFS: alignment check 1 failed");
 858                                return XFS_ERROR(EINVAL);
 859                        }
 860                        mp->m_dalign = mp->m_swidth = 0;
 861                } else {
 862                        /*
 863                         * Convert the stripe unit and width to FSBs.
 864                         */
 865                        mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
 866                        if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
 867                                if (mp->m_flags & XFS_MOUNT_RETERR) {
 868                                        return XFS_ERROR(EINVAL);
 869                                }
 870                                xfs_fs_cmn_err(CE_WARN, mp,
 871"stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
 872                                        mp->m_dalign, mp->m_swidth,
 873                                        sbp->sb_agblocks);
 874
 875                                mp->m_dalign = 0;
 876                                mp->m_swidth = 0;
 877                        } else if (mp->m_dalign) {
 878                                mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
 879                        } else {
 880                                if (mp->m_flags & XFS_MOUNT_RETERR) {
 881                                        xfs_fs_cmn_err(CE_WARN, mp,
 882"stripe alignment turned off: sunit(%d) less than bsize(%d)",
 883                                                mp->m_dalign,
 884                                                mp->m_blockmask +1);
 885                                        return XFS_ERROR(EINVAL);
 886                                }
 887                                mp->m_swidth = 0;
 888                        }
 889                }
 890
 891                /*
 892                 * Update superblock with new values
 893                 * and log changes
 894                 */
 895                if (xfs_sb_version_hasdalign(sbp)) {
 896                        if (sbp->sb_unit != mp->m_dalign) {
 897                                sbp->sb_unit = mp->m_dalign;
 898                                mp->m_update_flags |= XFS_SB_UNIT;
 899                        }
 900                        if (sbp->sb_width != mp->m_swidth) {
 901                                sbp->sb_width = mp->m_swidth;
 902                                mp->m_update_flags |= XFS_SB_WIDTH;
 903                        }
 904                }
 905        } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
 906                    xfs_sb_version_hasdalign(&mp->m_sb)) {
 907                        mp->m_dalign = sbp->sb_unit;
 908                        mp->m_swidth = sbp->sb_width;
 909        }
 910
 911        return 0;
 912}
 913
 914/*
 915 * Set the maximum inode count for this filesystem
 916 */
 917STATIC void
 918xfs_set_maxicount(xfs_mount_t *mp)
 919{
 920        xfs_sb_t        *sbp = &(mp->m_sb);
 921        __uint64_t      icount;
 922
 923        if (sbp->sb_imax_pct) {
 924                /*
 925                 * Make sure the maximum inode count is a multiple
 926                 * of the units we allocate inodes in.
 927                 */
 928                icount = sbp->sb_dblocks * sbp->sb_imax_pct;
 929                do_div(icount, 100);
 930                do_div(icount, mp->m_ialloc_blks);
 931                mp->m_maxicount = (icount * mp->m_ialloc_blks)  <<
 932                                   sbp->sb_inopblog;
 933        } else {
 934                mp->m_maxicount = 0;
 935        }
 936}
 937
 938/*
 939 * Set the default minimum read and write sizes unless
 940 * already specified in a mount option.
 941 * We use smaller I/O sizes when the file system
 942 * is being used for NFS service (wsync mount option).
 943 */
 944STATIC void
 945xfs_set_rw_sizes(xfs_mount_t *mp)
 946{
 947        xfs_sb_t        *sbp = &(mp->m_sb);
 948        int             readio_log, writeio_log;
 949
 950        if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
 951                if (mp->m_flags & XFS_MOUNT_WSYNC) {
 952                        readio_log = XFS_WSYNC_READIO_LOG;
 953                        writeio_log = XFS_WSYNC_WRITEIO_LOG;
 954                } else {
 955                        readio_log = XFS_READIO_LOG_LARGE;
 956                        writeio_log = XFS_WRITEIO_LOG_LARGE;
 957                }
 958        } else {
 959                readio_log = mp->m_readio_log;
 960                writeio_log = mp->m_writeio_log;
 961        }
 962
 963        if (sbp->sb_blocklog > readio_log) {
 964                mp->m_readio_log = sbp->sb_blocklog;
 965        } else {
 966                mp->m_readio_log = readio_log;
 967        }
 968        mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
 969        if (sbp->sb_blocklog > writeio_log) {
 970                mp->m_writeio_log = sbp->sb_blocklog;
 971        } else {
 972                mp->m_writeio_log = writeio_log;
 973        }
 974        mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
 975}
 976
 977/*
 978 * precalculate the low space thresholds for dynamic speculative preallocation.
 979 */
 980void
 981xfs_set_low_space_thresholds(
 982        struct xfs_mount        *mp)
 983{
 984        int i;
 985
 986        for (i = 0; i < XFS_LOWSP_MAX; i++) {
 987                __uint64_t space = mp->m_sb.sb_dblocks;
 988
 989                do_div(space, 100);
 990                mp->m_low_space[i] = space * (i + 1);
 991        }
 992}
 993
 994
 995/*
 996 * Set whether we're using inode alignment.
 997 */
 998STATIC void
 999xfs_set_inoalignment(xfs_mount_t *mp)
1000{
1001        if (xfs_sb_version_hasalign(&mp->m_sb) &&
1002            mp->m_sb.sb_inoalignmt >=
1003            XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
1004                mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
1005        else
1006                mp->m_inoalign_mask = 0;
1007        /*
1008         * If we are using stripe alignment, check whether
1009         * the stripe unit is a multiple of the inode alignment
1010         */
1011        if (mp->m_dalign && mp->m_inoalign_mask &&
1012            !(mp->m_dalign & mp->m_inoalign_mask))
1013                mp->m_sinoalign = mp->m_dalign;
1014        else
1015                mp->m_sinoalign = 0;
1016}
1017
1018/*
1019 * Check that the data (and log if separate) are an ok size.
1020 */
1021STATIC int
1022xfs_check_sizes(xfs_mount_t *mp)
1023{
1024        xfs_buf_t       *bp;
1025        xfs_daddr_t     d;
1026
1027        d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
1028        if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
1029                cmn_err(CE_WARN, "XFS: filesystem size mismatch detected");
1030                return XFS_ERROR(EFBIG);
1031        }
1032        bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp,
1033                                        d - XFS_FSS_TO_BB(mp, 1),
1034                                        BBTOB(XFS_FSS_TO_BB(mp, 1)), 0);
1035        if (!bp) {
1036                cmn_err(CE_WARN, "XFS: last sector read failed");
1037                return EIO;
1038        }
1039        xfs_buf_relse(bp);
1040
1041        if (mp->m_logdev_targp != mp->m_ddev_targp) {
1042                d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
1043                if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
1044                        cmn_err(CE_WARN, "XFS: log size mismatch detected");
1045                        return XFS_ERROR(EFBIG);
1046                }
1047                bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp,
1048                                        d - XFS_FSB_TO_BB(mp, 1),
1049                                        XFS_FSB_TO_B(mp, 1), 0);
1050                if (!bp) {
1051                        cmn_err(CE_WARN, "XFS: log device read failed");
1052                        return EIO;
1053                }
1054                xfs_buf_relse(bp);
1055        }
1056        return 0;
1057}
1058
1059/*
1060 * Clear the quotaflags in memory and in the superblock.
1061 */
1062int
1063xfs_mount_reset_sbqflags(
1064        struct xfs_mount        *mp)
1065{
1066        int                     error;
1067        struct xfs_trans        *tp;
1068
1069        mp->m_qflags = 0;
1070
1071        /*
1072         * It is OK to look at sb_qflags here in mount path,
1073         * without m_sb_lock.
1074         */
1075        if (mp->m_sb.sb_qflags == 0)
1076                return 0;
1077        spin_lock(&mp->m_sb_lock);
1078        mp->m_sb.sb_qflags = 0;
1079        spin_unlock(&mp->m_sb_lock);
1080
1081        /*
1082         * If the fs is readonly, let the incore superblock run
1083         * with quotas off but don't flush the update out to disk
1084         */
1085        if (mp->m_flags & XFS_MOUNT_RDONLY)
1086                return 0;
1087
1088#ifdef QUOTADEBUG
1089        xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
1090#endif
1091
1092        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1093        error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1094                                      XFS_DEFAULT_LOG_COUNT);
1095        if (error) {
1096                xfs_trans_cancel(tp, 0);
1097                xfs_fs_cmn_err(CE_ALERT, mp,
1098                        "xfs_mount_reset_sbqflags: Superblock update failed!");
1099                return error;
1100        }
1101
1102        xfs_mod_sb(tp, XFS_SB_QFLAGS);
1103        return xfs_trans_commit(tp, 0);
1104}
1105
1106__uint64_t
1107xfs_default_resblks(xfs_mount_t *mp)
1108{
1109        __uint64_t resblks;
1110
1111        /*
1112         * We default to 5% or 8192 fsbs of space reserved, whichever is
1113         * smaller.  This is intended to cover concurrent allocation
1114         * transactions when we initially hit enospc. These each require a 4
1115         * block reservation. Hence by default we cover roughly 2000 concurrent
1116         * allocation reservations.
1117         */
1118        resblks = mp->m_sb.sb_dblocks;
1119        do_div(resblks, 20);
1120        resblks = min_t(__uint64_t, resblks, 8192);
1121        return resblks;
1122}
1123
1124/*
1125 * This function does the following on an initial mount of a file system:
1126 *      - reads the superblock from disk and init the mount struct
1127 *      - if we're a 32-bit kernel, do a size check on the superblock
1128 *              so we don't mount terabyte filesystems
1129 *      - init mount struct realtime fields
1130 *      - allocate inode hash table for fs
1131 *      - init directory manager
1132 *      - perform recovery and init the log manager
1133 */
1134int
1135xfs_mountfs(
1136        xfs_mount_t     *mp)
1137{
1138        xfs_sb_t        *sbp = &(mp->m_sb);
1139        xfs_inode_t     *rip;
1140        __uint64_t      resblks;
1141        uint            quotamount = 0;
1142        uint            quotaflags = 0;
1143        int             error = 0;
1144
1145        xfs_mount_common(mp, sbp);
1146
1147        /*
1148         * Check for a mismatched features2 values.  Older kernels
1149         * read & wrote into the wrong sb offset for sb_features2
1150         * on some platforms due to xfs_sb_t not being 64bit size aligned
1151         * when sb_features2 was added, which made older superblock
1152         * reading/writing routines swap it as a 64-bit value.
1153         *
1154         * For backwards compatibility, we make both slots equal.
1155         *
1156         * If we detect a mismatched field, we OR the set bits into the
1157         * existing features2 field in case it has already been modified; we
1158         * don't want to lose any features.  We then update the bad location
1159         * with the ORed value so that older kernels will see any features2
1160         * flags, and mark the two fields as needing updates once the
1161         * transaction subsystem is online.
1162         */
1163        if (xfs_sb_has_mismatched_features2(sbp)) {
1164                cmn_err(CE_WARN,
1165                        "XFS: correcting sb_features alignment problem");
1166                sbp->sb_features2 |= sbp->sb_bad_features2;
1167                sbp->sb_bad_features2 = sbp->sb_features2;
1168                mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
1169
1170                /*
1171                 * Re-check for ATTR2 in case it was found in bad_features2
1172                 * slot.
1173                 */
1174                if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1175                   !(mp->m_flags & XFS_MOUNT_NOATTR2))
1176                        mp->m_flags |= XFS_MOUNT_ATTR2;
1177        }
1178
1179        if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1180           (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1181                xfs_sb_version_removeattr2(&mp->m_sb);
1182                mp->m_update_flags |= XFS_SB_FEATURES2;
1183
1184                /* update sb_versionnum for the clearing of the morebits */
1185                if (!sbp->sb_features2)
1186                        mp->m_update_flags |= XFS_SB_VERSIONNUM;
1187        }
1188
1189        /*
1190         * Check if sb_agblocks is aligned at stripe boundary
1191         * If sb_agblocks is NOT aligned turn off m_dalign since
1192         * allocator alignment is within an ag, therefore ag has
1193         * to be aligned at stripe boundary.
1194         */
1195        error = xfs_update_alignment(mp);
1196        if (error)
1197                goto out;
1198
1199        xfs_alloc_compute_maxlevels(mp);
1200        xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
1201        xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
1202        xfs_ialloc_compute_maxlevels(mp);
1203
1204        xfs_set_maxicount(mp);
1205
1206        mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
1207
1208        error = xfs_uuid_mount(mp);
1209        if (error)
1210                goto out;
1211
1212        /*
1213         * Set the minimum read and write sizes
1214         */
1215        xfs_set_rw_sizes(mp);
1216
1217        /* set the low space thresholds for dynamic preallocation */
1218        xfs_set_low_space_thresholds(mp);
1219
1220        /*
1221         * Set the inode cluster size.
1222         * This may still be overridden by the file system
1223         * block size if it is larger than the chosen cluster size.
1224         */
1225        mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
1226
1227        /*
1228         * Set inode alignment fields
1229         */
1230        xfs_set_inoalignment(mp);
1231
1232        /*
1233         * Check that the data (and log if separate) are an ok size.
1234         */
1235        error = xfs_check_sizes(mp);
1236        if (error)
1237                goto out_remove_uuid;
1238
1239        /*
1240         * Initialize realtime fields in the mount structure
1241         */
1242        error = xfs_rtmount_init(mp);
1243        if (error) {
1244                cmn_err(CE_WARN, "XFS: RT mount failed");
1245                goto out_remove_uuid;
1246        }
1247
1248        /*
1249         *  Copies the low order bits of the timestamp and the randomly
1250         *  set "sequence" number out of a UUID.
1251         */
1252        uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
1253
1254        mp->m_dmevmask = 0;     /* not persistent; set after each mount */
1255
1256        xfs_dir_mount(mp);
1257
1258        /*
1259         * Initialize the attribute manager's entries.
1260         */
1261        mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
1262
1263        /*
1264         * Initialize the precomputed transaction reservations values.
1265         */
1266        xfs_trans_init(mp);
1267
1268        /*
1269         * Allocate and initialize the per-ag data.
1270         */
1271        spin_lock_init(&mp->m_perag_lock);
1272        INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1273        error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
1274        if (error) {
1275                cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
1276                goto out_remove_uuid;
1277        }
1278
1279        if (!sbp->sb_logblocks) {
1280                cmn_err(CE_WARN, "XFS: no log defined");
1281                XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
1282                error = XFS_ERROR(EFSCORRUPTED);
1283                goto out_free_perag;
1284        }
1285
1286        /*
1287         * log's mount-time initialization. Perform 1st part recovery if needed
1288         */
1289        error = xfs_log_mount(mp, mp->m_logdev_targp,
1290                              XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
1291                              XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
1292        if (error) {
1293                cmn_err(CE_WARN, "XFS: log mount failed");
1294                goto out_free_perag;
1295        }
1296
1297        /*
1298         * Now the log is mounted, we know if it was an unclean shutdown or
1299         * not. If it was, with the first phase of recovery has completed, we
1300         * have consistent AG blocks on disk. We have not recovered EFIs yet,
1301         * but they are recovered transactionally in the second recovery phase
1302         * later.
1303         *
1304         * Hence we can safely re-initialise incore superblock counters from
1305         * the per-ag data. These may not be correct if the filesystem was not
1306         * cleanly unmounted, so we need to wait for recovery to finish before
1307         * doing this.
1308         *
1309         * If the filesystem was cleanly unmounted, then we can trust the
1310         * values in the superblock to be correct and we don't need to do
1311         * anything here.
1312         *
1313         * If we are currently making the filesystem, the initialisation will
1314         * fail as the perag data is in an undefined state.
1315         */
1316        if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
1317            !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
1318             !mp->m_sb.sb_inprogress) {
1319                error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
1320                if (error)
1321                        goto out_free_perag;
1322        }
1323
1324        /*
1325         * Get and sanity-check the root inode.
1326         * Save the pointer to it in the mount structure.
1327         */
1328        error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip);
1329        if (error) {
1330                cmn_err(CE_WARN, "XFS: failed to read root inode");
1331                goto out_log_dealloc;
1332        }
1333
1334        ASSERT(rip != NULL);
1335
1336        if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
1337                cmn_err(CE_WARN, "XFS: corrupted root inode");
1338                cmn_err(CE_WARN, "Device %s - root %llu is not a directory",
1339                        XFS_BUFTARG_NAME(mp->m_ddev_targp),
1340                        (unsigned long long)rip->i_ino);
1341                xfs_iunlock(rip, XFS_ILOCK_EXCL);
1342                XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
1343                                 mp);
1344                error = XFS_ERROR(EFSCORRUPTED);
1345                goto out_rele_rip;
1346        }
1347        mp->m_rootip = rip;     /* save it */
1348
1349        xfs_iunlock(rip, XFS_ILOCK_EXCL);
1350
1351        /*
1352         * Initialize realtime inode pointers in the mount structure
1353         */
1354        error = xfs_rtmount_inodes(mp);
1355        if (error) {
1356                /*
1357                 * Free up the root inode.
1358                 */
1359                cmn_err(CE_WARN, "XFS: failed to read RT inodes");
1360                goto out_rele_rip;
1361        }
1362
1363        /*
1364         * If this is a read-only mount defer the superblock updates until
1365         * the next remount into writeable mode.  Otherwise we would never
1366         * perform the update e.g. for the root filesystem.
1367         */
1368        if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1369                error = xfs_mount_log_sb(mp, mp->m_update_flags);
1370                if (error) {
1371                        cmn_err(CE_WARN, "XFS: failed to write sb changes");
1372                        goto out_rtunmount;
1373                }
1374        }
1375
1376        /*
1377         * Initialise the XFS quota management subsystem for this mount
1378         */
1379        if (XFS_IS_QUOTA_RUNNING(mp)) {
1380                error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
1381                if (error)
1382                        goto out_rtunmount;
1383        } else {
1384                ASSERT(!XFS_IS_QUOTA_ON(mp));
1385
1386                /*
1387                 * If a file system had quotas running earlier, but decided to
1388                 * mount without -o uquota/pquota/gquota options, revoke the
1389                 * quotachecked license.
1390                 */
1391                if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
1392                        cmn_err(CE_NOTE,
1393                                "XFS: resetting qflags for filesystem %s",
1394                                mp->m_fsname);
1395
1396                        error = xfs_mount_reset_sbqflags(mp);
1397                        if (error)
1398                                return error;
1399                }
1400        }
1401
1402        /*
1403         * Finish recovering the file system.  This part needed to be
1404         * delayed until after the root and real-time bitmap inodes
1405         * were consistently read in.
1406         */
1407        error = xfs_log_mount_finish(mp);
1408        if (error) {
1409                cmn_err(CE_WARN, "XFS: log mount finish failed");
1410                goto out_rtunmount;
1411        }
1412
1413        /*
1414         * Complete the quota initialisation, post-log-replay component.
1415         */
1416        if (quotamount) {
1417                ASSERT(mp->m_qflags == 0);
1418                mp->m_qflags = quotaflags;
1419
1420                xfs_qm_mount_quotas(mp);
1421        }
1422
1423        /*
1424         * Now we are mounted, reserve a small amount of unused space for
1425         * privileged transactions. This is needed so that transaction
1426         * space required for critical operations can dip into this pool
1427         * when at ENOSPC. This is needed for operations like create with
1428         * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
1429         * are not allowed to use this reserved space.
1430         *
1431         * This may drive us straight to ENOSPC on mount, but that implies
1432         * we were already there on the last unmount. Warn if this occurs.
1433         */
1434        if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
1435                resblks = xfs_default_resblks(mp);
1436                error = xfs_reserve_blocks(mp, &resblks, NULL);
1437                if (error)
1438                        cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
1439                                "blocks. Continuing without a reserve pool.");
1440        }
1441
1442        return 0;
1443
1444 out_rtunmount:
1445        xfs_rtunmount_inodes(mp);
1446 out_rele_rip:
1447        IRELE(rip);
1448 out_log_dealloc:
1449        xfs_log_unmount(mp);
1450 out_free_perag:
1451        xfs_free_perag(mp);
1452 out_remove_uuid:
1453        xfs_uuid_unmount(mp);
1454 out:
1455        return error;
1456}
1457
1458/*
1459 * This flushes out the inodes,dquots and the superblock, unmounts the
1460 * log and makes sure that incore structures are freed.
1461 */
1462void
1463xfs_unmountfs(
1464        struct xfs_mount        *mp)
1465{
1466        __uint64_t              resblks;
1467        int                     error;
1468
1469        xfs_qm_unmount_quotas(mp);
1470        xfs_rtunmount_inodes(mp);
1471        IRELE(mp->m_rootip);
1472
1473        /*
1474         * We can potentially deadlock here if we have an inode cluster
1475         * that has been freed has its buffer still pinned in memory because
1476         * the transaction is still sitting in a iclog. The stale inodes
1477         * on that buffer will have their flush locks held until the
1478         * transaction hits the disk and the callbacks run. the inode
1479         * flush takes the flush lock unconditionally and with nothing to
1480         * push out the iclog we will never get that unlocked. hence we
1481         * need to force the log first.
1482         */
1483        xfs_log_force(mp, XFS_LOG_SYNC);
1484
1485        /*
1486         * Do a delwri reclaim pass first so that as many dirty inodes are
1487         * queued up for IO as possible. Then flush the buffers before making
1488         * a synchronous path to catch all the remaining inodes are reclaimed.
1489         * This makes the reclaim process as quick as possible by avoiding
1490         * synchronous writeout and blocking on inodes already in the delwri
1491         * state as much as possible.
1492         */
1493        xfs_reclaim_inodes(mp, 0);
1494        XFS_bflush(mp->m_ddev_targp);
1495        xfs_reclaim_inodes(mp, SYNC_WAIT);
1496
1497        xfs_qm_unmount(mp);
1498
1499        /*
1500         * Flush out the log synchronously so that we know for sure
1501         * that nothing is pinned.  This is important because bflush()
1502         * will skip pinned buffers.
1503         */
1504        xfs_log_force(mp, XFS_LOG_SYNC);
1505
1506        xfs_binval(mp->m_ddev_targp);
1507        if (mp->m_rtdev_targp) {
1508                xfs_binval(mp->m_rtdev_targp);
1509        }
1510
1511        /*
1512         * Unreserve any blocks we have so that when we unmount we don't account
1513         * the reserved free space as used. This is really only necessary for
1514         * lazy superblock counting because it trusts the incore superblock
1515         * counters to be absolutely correct on clean unmount.
1516         *
1517         * We don't bother correcting this elsewhere for lazy superblock
1518         * counting because on mount of an unclean filesystem we reconstruct the
1519         * correct counter value and this is irrelevant.
1520         *
1521         * For non-lazy counter filesystems, this doesn't matter at all because
1522         * we only every apply deltas to the superblock and hence the incore
1523         * value does not matter....
1524         */
1525        resblks = 0;
1526        error = xfs_reserve_blocks(mp, &resblks, NULL);
1527        if (error)
1528                cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. "
1529                                "Freespace may not be correct on next mount.");
1530
1531        error = xfs_log_sbcount(mp, 1);
1532        if (error)
1533                cmn_err(CE_WARN, "XFS: Unable to update superblock counters. "
1534                                "Freespace may not be correct on next mount.");
1535        xfs_unmountfs_writesb(mp);
1536        xfs_unmountfs_wait(mp);                 /* wait for async bufs */
1537        xfs_log_unmount_write(mp);
1538        xfs_log_unmount(mp);
1539        xfs_uuid_unmount(mp);
1540
1541#if defined(DEBUG)
1542        xfs_errortag_clearall(mp, 0);
1543#endif
1544        xfs_free_perag(mp);
1545}
1546
1547STATIC void
1548xfs_unmountfs_wait(xfs_mount_t *mp)
1549{
1550        if (mp->m_logdev_targp != mp->m_ddev_targp)
1551                xfs_wait_buftarg(mp->m_logdev_targp);
1552        if (mp->m_rtdev_targp)
1553                xfs_wait_buftarg(mp->m_rtdev_targp);
1554        xfs_wait_buftarg(mp->m_ddev_targp);
1555}
1556
1557int
1558xfs_fs_writable(xfs_mount_t *mp)
1559{
1560        return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) ||
1561                (mp->m_flags & XFS_MOUNT_RDONLY));
1562}
1563
1564/*
1565 * xfs_log_sbcount
1566 *
1567 * Called either periodically to keep the on disk superblock values
1568 * roughly up to date or from unmount to make sure the values are
1569 * correct on a clean unmount.
1570 *
1571 * Note this code can be called during the process of freezing, so
1572 * we may need to use the transaction allocator which does not not
1573 * block when the transaction subsystem is in its frozen state.
1574 */
1575int
1576xfs_log_sbcount(
1577        xfs_mount_t     *mp,
1578        uint            sync)
1579{
1580        xfs_trans_t     *tp;
1581        int             error;
1582
1583        if (!xfs_fs_writable(mp))
1584                return 0;
1585
1586        xfs_icsb_sync_counters(mp, 0);
1587
1588        /*
1589         * we don't need to do this if we are updating the superblock
1590         * counters on every modification.
1591         */
1592        if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
1593                return 0;
1594
1595        tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
1596        error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1597                                        XFS_DEFAULT_LOG_COUNT);
1598        if (error) {
1599                xfs_trans_cancel(tp, 0);
1600                return error;
1601        }
1602
1603        xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
1604        if (sync)
1605                xfs_trans_set_sync(tp);
1606        error = xfs_trans_commit(tp, 0);
1607        return error;
1608}
1609
1610int
1611xfs_unmountfs_writesb(xfs_mount_t *mp)
1612{
1613        xfs_buf_t       *sbp;
1614        int             error = 0;
1615
1616        /*
1617         * skip superblock write if fs is read-only, or
1618         * if we are doing a forced umount.
1619         */
1620        if (!((mp->m_flags & XFS_MOUNT_RDONLY) ||
1621                XFS_FORCED_SHUTDOWN(mp))) {
1622
1623                sbp = xfs_getsb(mp, 0);
1624
1625                XFS_BUF_UNDONE(sbp);
1626                XFS_BUF_UNREAD(sbp);
1627                XFS_BUF_UNDELAYWRITE(sbp);
1628                XFS_BUF_WRITE(sbp);
1629                XFS_BUF_UNASYNC(sbp);
1630                ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1631                xfsbdstrat(mp, sbp);
1632                error = xfs_buf_iowait(sbp);
1633                if (error)
1634                        xfs_ioerror_alert("xfs_unmountfs_writesb",
1635                                          mp, sbp, XFS_BUF_ADDR(sbp));
1636                xfs_buf_relse(sbp);
1637        }
1638        return error;
1639}
1640
1641/*
1642 * xfs_mod_sb() can be used to copy arbitrary changes to the
1643 * in-core superblock into the superblock buffer to be logged.
1644 * It does not provide the higher level of locking that is
1645 * needed to protect the in-core superblock from concurrent
1646 * access.
1647 */
1648void
1649xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1650{
1651        xfs_buf_t       *bp;
1652        int             first;
1653        int             last;
1654        xfs_mount_t     *mp;
1655        xfs_sb_field_t  f;
1656
1657        ASSERT(fields);
1658        if (!fields)
1659                return;
1660        mp = tp->t_mountp;
1661        bp = xfs_trans_getsb(tp, mp, 0);
1662        first = sizeof(xfs_sb_t);
1663        last = 0;
1664
1665        /* translate/copy */
1666
1667        xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
1668
1669        /* find modified range */
1670        f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1671        ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1672        last = xfs_sb_info[f + 1].offset - 1;
1673
1674        f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1675        ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1676        first = xfs_sb_info[f].offset;
1677
1678        xfs_trans_log_buf(tp, bp, first, last);
1679}
1680
1681
1682/*
1683 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1684 * a delta to a specified field in the in-core superblock.  Simply
1685 * switch on the field indicated and apply the delta to that field.
1686 * Fields are not allowed to dip below zero, so if the delta would
1687 * do this do not apply it and return EINVAL.
1688 *
1689 * The m_sb_lock must be held when this routine is called.
1690 */
1691STATIC int
1692xfs_mod_incore_sb_unlocked(
1693        xfs_mount_t     *mp,
1694        xfs_sb_field_t  field,
1695        int64_t         delta,
1696        int             rsvd)
1697{
1698        int             scounter;       /* short counter for 32 bit fields */
1699        long long       lcounter;       /* long counter for 64 bit fields */
1700        long long       res_used, rem;
1701
1702        /*
1703         * With the in-core superblock spin lock held, switch
1704         * on the indicated field.  Apply the delta to the
1705         * proper field.  If the fields value would dip below
1706         * 0, then do not apply the delta and return EINVAL.
1707         */
1708        switch (field) {
1709        case XFS_SBS_ICOUNT:
1710                lcounter = (long long)mp->m_sb.sb_icount;
1711                lcounter += delta;
1712                if (lcounter < 0) {
1713                        ASSERT(0);
1714                        return XFS_ERROR(EINVAL);
1715                }
1716                mp->m_sb.sb_icount = lcounter;
1717                return 0;
1718        case XFS_SBS_IFREE:
1719                lcounter = (long long)mp->m_sb.sb_ifree;
1720                lcounter += delta;
1721                if (lcounter < 0) {
1722                        ASSERT(0);
1723                        return XFS_ERROR(EINVAL);
1724                }
1725                mp->m_sb.sb_ifree = lcounter;
1726                return 0;
1727        case XFS_SBS_FDBLOCKS:
1728                lcounter = (long long)
1729                        mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1730                res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1731
1732                if (delta > 0) {                /* Putting blocks back */
1733                        if (res_used > delta) {
1734                                mp->m_resblks_avail += delta;
1735                        } else {
1736                                rem = delta - res_used;
1737                                mp->m_resblks_avail = mp->m_resblks;
1738                                lcounter += rem;
1739                        }
1740                } else {                                /* Taking blocks away */
1741                        lcounter += delta;
1742                        if (lcounter >= 0) {
1743                                mp->m_sb.sb_fdblocks = lcounter +
1744                                                        XFS_ALLOC_SET_ASIDE(mp);
1745                                return 0;
1746                        }
1747
1748                        /*
1749                         * We are out of blocks, use any available reserved
1750                         * blocks if were allowed to.
1751                         */
1752                        if (!rsvd)
1753                                return XFS_ERROR(ENOSPC);
1754
1755                        lcounter = (long long)mp->m_resblks_avail + delta;
1756                        if (lcounter >= 0) {
1757                                mp->m_resblks_avail = lcounter;
1758                                return 0;
1759                        }
1760                        printk_once(KERN_WARNING
1761                                "Filesystem \"%s\": reserve blocks depleted! "
1762                                "Consider increasing reserve pool size.",
1763                                mp->m_fsname);
1764                        return XFS_ERROR(ENOSPC);
1765                }
1766
1767                mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1768                return 0;
1769        case XFS_SBS_FREXTENTS:
1770                lcounter = (long long)mp->m_sb.sb_frextents;
1771                lcounter += delta;
1772                if (lcounter < 0) {
1773                        return XFS_ERROR(ENOSPC);
1774                }
1775                mp->m_sb.sb_frextents = lcounter;
1776                return 0;
1777        case XFS_SBS_DBLOCKS:
1778                lcounter = (long long)mp->m_sb.sb_dblocks;
1779                lcounter += delta;
1780                if (lcounter < 0) {
1781                        ASSERT(0);
1782                        return XFS_ERROR(EINVAL);
1783                }
1784                mp->m_sb.sb_dblocks = lcounter;
1785                return 0;
1786        case XFS_SBS_AGCOUNT:
1787                scounter = mp->m_sb.sb_agcount;
1788                scounter += delta;
1789                if (scounter < 0) {
1790                        ASSERT(0);
1791                        return XFS_ERROR(EINVAL);
1792                }
1793                mp->m_sb.sb_agcount = scounter;
1794                return 0;
1795        case XFS_SBS_IMAX_PCT:
1796                scounter = mp->m_sb.sb_imax_pct;
1797                scounter += delta;
1798                if (scounter < 0) {
1799                        ASSERT(0);
1800                        return XFS_ERROR(EINVAL);
1801                }
1802                mp->m_sb.sb_imax_pct = scounter;
1803                return 0;
1804        case XFS_SBS_REXTSIZE:
1805                scounter = mp->m_sb.sb_rextsize;
1806                scounter += delta;
1807                if (scounter < 0) {
1808                        ASSERT(0);
1809                        return XFS_ERROR(EINVAL);
1810                }
1811                mp->m_sb.sb_rextsize = scounter;
1812                return 0;
1813        case XFS_SBS_RBMBLOCKS:
1814                scounter = mp->m_sb.sb_rbmblocks;
1815                scounter += delta;
1816                if (scounter < 0) {
1817                        ASSERT(0);
1818                        return XFS_ERROR(EINVAL);
1819                }
1820                mp->m_sb.sb_rbmblocks = scounter;
1821                return 0;
1822        case XFS_SBS_RBLOCKS:
1823                lcounter = (long long)mp->m_sb.sb_rblocks;
1824                lcounter += delta;
1825                if (lcounter < 0) {
1826                        ASSERT(0);
1827                        return XFS_ERROR(EINVAL);
1828                }
1829                mp->m_sb.sb_rblocks = lcounter;
1830                return 0;
1831        case XFS_SBS_REXTENTS:
1832                lcounter = (long long)mp->m_sb.sb_rextents;
1833                lcounter += delta;
1834                if (lcounter < 0) {
1835                        ASSERT(0);
1836                        return XFS_ERROR(EINVAL);
1837                }
1838                mp->m_sb.sb_rextents = lcounter;
1839                return 0;
1840        case XFS_SBS_REXTSLOG:
1841                scounter = mp->m_sb.sb_rextslog;
1842                scounter += delta;
1843                if (scounter < 0) {
1844                        ASSERT(0);
1845                        return XFS_ERROR(EINVAL);
1846                }
1847                mp->m_sb.sb_rextslog = scounter;
1848                return 0;
1849        default:
1850                ASSERT(0);
1851                return XFS_ERROR(EINVAL);
1852        }
1853}
1854
1855/*
1856 * xfs_mod_incore_sb() is used to change a field in the in-core
1857 * superblock structure by the specified delta.  This modification
1858 * is protected by the m_sb_lock.  Just use the xfs_mod_incore_sb_unlocked()
1859 * routine to do the work.
1860 */
1861int
1862xfs_mod_incore_sb(
1863        struct xfs_mount        *mp,
1864        xfs_sb_field_t          field,
1865        int64_t                 delta,
1866        int                     rsvd)
1867{
1868        int                     status;
1869
1870#ifdef HAVE_PERCPU_SB
1871        ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS);
1872#endif
1873        spin_lock(&mp->m_sb_lock);
1874        status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1875        spin_unlock(&mp->m_sb_lock);
1876
1877        return status;
1878}
1879
1880/*
1881 * Change more than one field in the in-core superblock structure at a time.
1882 *
1883 * The fields and changes to those fields are specified in the array of
1884 * xfs_mod_sb structures passed in.  Either all of the specified deltas
1885 * will be applied or none of them will.  If any modified field dips below 0,
1886 * then all modifications will be backed out and EINVAL will be returned.
1887 *
1888 * Note that this function may not be used for the superblock values that
1889 * are tracked with the in-memory per-cpu counters - a direct call to
1890 * xfs_icsb_modify_counters is required for these.
1891 */
1892int
1893xfs_mod_incore_sb_batch(
1894        struct xfs_mount        *mp,
1895        xfs_mod_sb_t            *msb,
1896        uint                    nmsb,
1897        int                     rsvd)
1898{
1899        xfs_mod_sb_t            *msbp = &msb[0];
1900        int                     error = 0;
1901
1902        /*
1903         * Loop through the array of mod structures and apply each individually.
1904         * If any fail, then back out all those which have already been applied.
1905         * Do all of this within the scope of the m_sb_lock so that all of the
1906         * changes will be atomic.
1907         */
1908        spin_lock(&mp->m_sb_lock);
1909        for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1910                ASSERT(msbp->msb_field < XFS_SBS_ICOUNT ||
1911                       msbp->msb_field > XFS_SBS_FDBLOCKS);
1912
1913                error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1914                                                   msbp->msb_delta, rsvd);
1915                if (error)
1916                        goto unwind;
1917        }
1918        spin_unlock(&mp->m_sb_lock);
1919        return 0;
1920
1921unwind:
1922        while (--msbp >= msb) {
1923                error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1924                                                   -msbp->msb_delta, rsvd);
1925                ASSERT(error == 0);
1926        }
1927        spin_unlock(&mp->m_sb_lock);
1928        return error;
1929}
1930
1931/*
1932 * xfs_getsb() is called to obtain the buffer for the superblock.
1933 * The buffer is returned locked and read in from disk.
1934 * The buffer should be released with a call to xfs_brelse().
1935 *
1936 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1937 * the superblock buffer if it can be locked without sleeping.
1938 * If it can't then we'll return NULL.
1939 */
1940xfs_buf_t *
1941xfs_getsb(
1942        xfs_mount_t     *mp,
1943        int             flags)
1944{
1945        xfs_buf_t       *bp;
1946
1947        ASSERT(mp->m_sb_bp != NULL);
1948        bp = mp->m_sb_bp;
1949        if (flags & XBF_TRYLOCK) {
1950                if (!XFS_BUF_CPSEMA(bp)) {
1951                        return NULL;
1952                }
1953        } else {
1954                XFS_BUF_PSEMA(bp, PRIBIO);
1955        }
1956        XFS_BUF_HOLD(bp);
1957        ASSERT(XFS_BUF_ISDONE(bp));
1958        return bp;
1959}
1960
1961/*
1962 * Used to free the superblock along various error paths.
1963 */
1964void
1965xfs_freesb(
1966        struct xfs_mount        *mp)
1967{
1968        struct xfs_buf          *bp = mp->m_sb_bp;
1969
1970        xfs_buf_lock(bp);
1971        mp->m_sb_bp = NULL;
1972        xfs_buf_relse(bp);
1973}
1974
1975/*
1976 * Used to log changes to the superblock unit and width fields which could
1977 * be altered by the mount options, as well as any potential sb_features2
1978 * fixup. Only the first superblock is updated.
1979 */
1980int
1981xfs_mount_log_sb(
1982        xfs_mount_t     *mp,
1983        __int64_t       fields)
1984{
1985        xfs_trans_t     *tp;
1986        int             error;
1987
1988        ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
1989                         XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
1990                         XFS_SB_VERSIONNUM));
1991
1992        tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1993        error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1994                                XFS_DEFAULT_LOG_COUNT);
1995        if (error) {
1996                xfs_trans_cancel(tp, 0);
1997                return error;
1998        }
1999        xfs_mod_sb(tp, fields);
2000        error = xfs_trans_commit(tp, 0);
2001        return error;
2002}
2003
2004/*
2005 * If the underlying (data/log/rt) device is readonly, there are some
2006 * operations that cannot proceed.
2007 */
2008int
2009xfs_dev_is_read_only(
2010        struct xfs_mount        *mp,
2011        char                    *message)
2012{
2013        if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
2014            xfs_readonly_buftarg(mp->m_logdev_targp) ||
2015            (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
2016                cmn_err(CE_NOTE,
2017                        "XFS: %s required on read-only device.", message);
2018                cmn_err(CE_NOTE,
2019                        "XFS: write access unavailable, cannot proceed.");
2020                return EROFS;
2021        }
2022        return 0;
2023}
2024
2025#ifdef HAVE_PERCPU_SB
2026/*
2027 * Per-cpu incore superblock counters
2028 *
2029 * Simple concept, difficult implementation
2030 *
2031 * Basically, replace the incore superblock counters with a distributed per cpu
2032 * counter for contended fields (e.g.  free block count).
2033 *
2034 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
2035 * hence needs to be accurately read when we are running low on space. Hence
2036 * there is a method to enable and disable the per-cpu counters based on how
2037 * much "stuff" is available in them.
2038 *
2039 * Basically, a counter is enabled if there is enough free resource to justify
2040 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
2041 * ENOSPC), then we disable the counters to synchronise all callers and
2042 * re-distribute the available resources.
2043 *
2044 * If, once we redistributed the available resources, we still get a failure,
2045 * we disable the per-cpu counter and go through the slow path.
2046 *
2047 * The slow path is the current xfs_mod_incore_sb() function.  This means that
2048 * when we disable a per-cpu counter, we need to drain its resources back to
2049 * the global superblock. We do this after disabling the counter to prevent
2050 * more threads from queueing up on the counter.
2051 *
2052 * Essentially, this means that we still need a lock in the fast path to enable
2053 * synchronisation between the global counters and the per-cpu counters. This
2054 * is not a problem because the lock will be local to a CPU almost all the time
2055 * and have little contention except when we get to ENOSPC conditions.
2056 *
2057 * Basically, this lock becomes a barrier that enables us to lock out the fast
2058 * path while we do things like enabling and disabling counters and
2059 * synchronising the counters.
2060 *
2061 * Locking rules:
2062 *
2063 *      1. m_sb_lock before picking up per-cpu locks
2064 *      2. per-cpu locks always picked up via for_each_online_cpu() order
2065 *      3. accurate counter sync requires m_sb_lock + per cpu locks
2066 *      4. modifying per-cpu counters requires holding per-cpu lock
2067 *      5. modifying global counters requires holding m_sb_lock
2068 *      6. enabling or disabling a counter requires holding the m_sb_lock 
2069 *         and _none_ of the per-cpu locks.
2070 *
2071 * Disabled counters are only ever re-enabled by a balance operation
2072 * that results in more free resources per CPU than a given threshold.
2073 * To ensure counters don't remain disabled, they are rebalanced when
2074 * the global resource goes above a higher threshold (i.e. some hysteresis
2075 * is present to prevent thrashing).
2076 */
2077
2078#ifdef CONFIG_HOTPLUG_CPU
2079/*
2080 * hot-plug CPU notifier support.
2081 *
2082 * We need a notifier per filesystem as we need to be able to identify
2083 * the filesystem to balance the counters out. This is achieved by
2084 * having a notifier block embedded in the xfs_mount_t and doing pointer
2085 * magic to get the mount pointer from the notifier block address.
2086 */
2087STATIC int
2088xfs_icsb_cpu_notify(
2089        struct notifier_block *nfb,
2090        unsigned long action,
2091        void *hcpu)
2092{
2093        xfs_icsb_cnts_t *cntp;
2094        xfs_mount_t     *mp;
2095
2096        mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
2097        cntp = (xfs_icsb_cnts_t *)
2098                        per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
2099        switch (action) {
2100        case CPU_UP_PREPARE:
2101        case CPU_UP_PREPARE_FROZEN:
2102                /* Easy Case - initialize the area and locks, and
2103                 * then rebalance when online does everything else for us. */
2104                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2105                break;
2106        case CPU_ONLINE:
2107        case CPU_ONLINE_FROZEN:
2108                xfs_icsb_lock(mp);
2109                xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2110                xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2111                xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2112                xfs_icsb_unlock(mp);
2113                break;
2114        case CPU_DEAD:
2115        case CPU_DEAD_FROZEN:
2116                /* Disable all the counters, then fold the dead cpu's
2117                 * count into the total on the global superblock and
2118                 * re-enable the counters. */
2119                xfs_icsb_lock(mp);
2120                spin_lock(&mp->m_sb_lock);
2121                xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
2122                xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
2123                xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
2124
2125                mp->m_sb.sb_icount += cntp->icsb_icount;
2126                mp->m_sb.sb_ifree += cntp->icsb_ifree;
2127                mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
2128
2129                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2130
2131                xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
2132                xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
2133                xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
2134                spin_unlock(&mp->m_sb_lock);
2135                xfs_icsb_unlock(mp);
2136                break;
2137        }
2138
2139        return NOTIFY_OK;
2140}
2141#endif /* CONFIG_HOTPLUG_CPU */
2142
2143int
2144xfs_icsb_init_counters(
2145        xfs_mount_t     *mp)
2146{
2147        xfs_icsb_cnts_t *cntp;
2148        int             i;
2149
2150        mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
2151        if (mp->m_sb_cnts == NULL)
2152                return -ENOMEM;
2153
2154#ifdef CONFIG_HOTPLUG_CPU
2155        mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
2156        mp->m_icsb_notifier.priority = 0;
2157        register_hotcpu_notifier(&mp->m_icsb_notifier);
2158#endif /* CONFIG_HOTPLUG_CPU */
2159
2160        for_each_online_cpu(i) {
2161                cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2162                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
2163        }
2164
2165        mutex_init(&mp->m_icsb_mutex);
2166
2167        /*
2168         * start with all counters disabled so that the
2169         * initial balance kicks us off correctly
2170         */
2171        mp->m_icsb_counters = -1;
2172        return 0;
2173}
2174
2175void
2176xfs_icsb_reinit_counters(
2177        xfs_mount_t     *mp)
2178{
2179        xfs_icsb_lock(mp);
2180        /*
2181         * start with all counters disabled so that the
2182         * initial balance kicks us off correctly
2183         */
2184        mp->m_icsb_counters = -1;
2185        xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
2186        xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
2187        xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
2188        xfs_icsb_unlock(mp);
2189}
2190
2191void
2192xfs_icsb_destroy_counters(
2193        xfs_mount_t     *mp)
2194{
2195        if (mp->m_sb_cnts) {
2196                unregister_hotcpu_notifier(&mp->m_icsb_notifier);
2197                free_percpu(mp->m_sb_cnts);
2198        }
2199        mutex_destroy(&mp->m_icsb_mutex);
2200}
2201
2202STATIC void
2203xfs_icsb_lock_cntr(
2204        xfs_icsb_cnts_t *icsbp)
2205{
2206        while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
2207                ndelay(1000);
2208        }
2209}
2210
2211STATIC void
2212xfs_icsb_unlock_cntr(
2213        xfs_icsb_cnts_t *icsbp)
2214{
2215        clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
2216}
2217
2218
2219STATIC void
2220xfs_icsb_lock_all_counters(
2221        xfs_mount_t     *mp)
2222{
2223        xfs_icsb_cnts_t *cntp;
2224        int             i;
2225
2226        for_each_online_cpu(i) {
2227                cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2228                xfs_icsb_lock_cntr(cntp);
2229        }
2230}
2231
2232STATIC void
2233xfs_icsb_unlock_all_counters(
2234        xfs_mount_t     *mp)
2235{
2236        xfs_icsb_cnts_t *cntp;
2237        int             i;
2238
2239        for_each_online_cpu(i) {
2240                cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2241                xfs_icsb_unlock_cntr(cntp);
2242        }
2243}
2244
2245STATIC void
2246xfs_icsb_count(
2247        xfs_mount_t     *mp,
2248        xfs_icsb_cnts_t *cnt,
2249        int             flags)
2250{
2251        xfs_icsb_cnts_t *cntp;
2252        int             i;
2253
2254        memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
2255
2256        if (!(flags & XFS_ICSB_LAZY_COUNT))
2257                xfs_icsb_lock_all_counters(mp);
2258
2259        for_each_online_cpu(i) {
2260                cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
2261                cnt->icsb_icount += cntp->icsb_icount;
2262                cnt->icsb_ifree += cntp->icsb_ifree;
2263                cnt->icsb_fdblocks += cntp->icsb_fdblocks;
2264        }
2265
2266        if (!(flags & XFS_ICSB_LAZY_COUNT))
2267                xfs_icsb_unlock_all_counters(mp);
2268}
2269
2270STATIC int
2271xfs_icsb_counter_disabled(
2272        xfs_mount_t     *mp,
2273        xfs_sb_field_t  field)
2274{
2275        ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2276        return test_bit(field, &mp->m_icsb_counters);
2277}
2278
2279STATIC void
2280xfs_icsb_disable_counter(
2281        xfs_mount_t     *mp,
2282        xfs_sb_field_t  field)
2283{
2284        xfs_icsb_cnts_t cnt;
2285
2286        ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2287
2288        /*
2289         * If we are already disabled, then there is nothing to do
2290         * here. We check before locking all the counters to avoid
2291         * the expensive lock operation when being called in the
2292         * slow path and the counter is already disabled. This is
2293         * safe because the only time we set or clear this state is under
2294         * the m_icsb_mutex.
2295         */
2296        if (xfs_icsb_counter_disabled(mp, field))
2297                return;
2298
2299        xfs_icsb_lock_all_counters(mp);
2300        if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
2301                /* drain back to superblock */
2302
2303                xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
2304                switch(field) {
2305                case XFS_SBS_ICOUNT:
2306                        mp->m_sb.sb_icount = cnt.icsb_icount;
2307                        break;
2308                case XFS_SBS_IFREE:
2309                        mp->m_sb.sb_ifree = cnt.icsb_ifree;
2310                        break;
2311                case XFS_SBS_FDBLOCKS:
2312                        mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2313                        break;
2314                default:
2315                        BUG();
2316                }
2317        }
2318
2319        xfs_icsb_unlock_all_counters(mp);
2320}
2321
2322STATIC void
2323xfs_icsb_enable_counter(
2324        xfs_mount_t     *mp,
2325        xfs_sb_field_t  field,
2326        uint64_t        count,
2327        uint64_t        resid)
2328{
2329        xfs_icsb_cnts_t *cntp;
2330        int             i;
2331
2332        ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
2333
2334        xfs_icsb_lock_all_counters(mp);
2335        for_each_online_cpu(i) {
2336                cntp = per_cpu_ptr(mp->m_sb_cnts, i);
2337                switch (field) {
2338                case XFS_SBS_ICOUNT:
2339                        cntp->icsb_icount = count + resid;
2340                        break;
2341                case XFS_SBS_IFREE:
2342                        cntp->icsb_ifree = count + resid;
2343                        break;
2344                case XFS_SBS_FDBLOCKS:
2345                        cntp->icsb_fdblocks = count + resid;
2346                        break;
2347                default:
2348                        BUG();
2349                        break;
2350                }
2351                resid = 0;
2352        }
2353        clear_bit(field, &mp->m_icsb_counters);
2354        xfs_icsb_unlock_all_counters(mp);
2355}
2356
2357void
2358xfs_icsb_sync_counters_locked(
2359        xfs_mount_t     *mp,
2360        int             flags)
2361{
2362        xfs_icsb_cnts_t cnt;
2363
2364        xfs_icsb_count(mp, &cnt, flags);
2365
2366        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
2367                mp->m_sb.sb_icount = cnt.icsb_icount;
2368        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
2369                mp->m_sb.sb_ifree = cnt.icsb_ifree;
2370        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
2371                mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
2372}
2373
2374/*
2375 * Accurate update of per-cpu counters to incore superblock
2376 */
2377void
2378xfs_icsb_sync_counters(
2379        xfs_mount_t     *mp,
2380        int             flags)
2381{
2382        spin_lock(&mp->m_sb_lock);
2383        xfs_icsb_sync_counters_locked(mp, flags);
2384        spin_unlock(&mp->m_sb_lock);
2385}
2386
2387/*
2388 * Balance and enable/disable counters as necessary.
2389 *
2390 * Thresholds for re-enabling counters are somewhat magic.  inode counts are
2391 * chosen to be the same number as single on disk allocation chunk per CPU, and
2392 * free blocks is something far enough zero that we aren't going thrash when we
2393 * get near ENOSPC. We also need to supply a minimum we require per cpu to
2394 * prevent looping endlessly when xfs_alloc_space asks for more than will
2395 * be distributed to a single CPU but each CPU has enough blocks to be
2396 * reenabled.
2397 *
2398 * Note that we can be called when counters are already disabled.
2399 * xfs_icsb_disable_counter() optimises the counter locking in this case to
2400 * prevent locking every per-cpu counter needlessly.
2401 */
2402
2403#define XFS_ICSB_INO_CNTR_REENABLE      (uint64_t)64
2404#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
2405                (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
2406STATIC void
2407xfs_icsb_balance_counter_locked(
2408        xfs_mount_t     *mp,
2409        xfs_sb_field_t  field,
2410        int             min_per_cpu)
2411{
2412        uint64_t        count, resid;
2413        int             weight = num_online_cpus();
2414        uint64_t        min = (uint64_t)min_per_cpu;
2415
2416        /* disable counter and sync counter */
2417        xfs_icsb_disable_counter(mp, field);
2418
2419        /* update counters  - first CPU gets residual*/
2420        switch (field) {
2421        case XFS_SBS_ICOUNT:
2422                count = mp->m_sb.sb_icount;
2423                resid = do_div(count, weight);
2424                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2425                        return;
2426                break;
2427        case XFS_SBS_IFREE:
2428                count = mp->m_sb.sb_ifree;
2429                resid = do_div(count, weight);
2430                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
2431                        return;
2432                break;
2433        case XFS_SBS_FDBLOCKS:
2434                count = mp->m_sb.sb_fdblocks;
2435                resid = do_div(count, weight);
2436                if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
2437                        return;
2438                break;
2439        default:
2440                BUG();
2441                count = resid = 0;      /* quiet, gcc */
2442                break;
2443        }
2444
2445        xfs_icsb_enable_counter(mp, field, count, resid);
2446}
2447
2448STATIC void
2449xfs_icsb_balance_counter(
2450        xfs_mount_t     *mp,
2451        xfs_sb_field_t  fields,
2452        int             min_per_cpu)
2453{
2454        spin_lock(&mp->m_sb_lock);
2455        xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
2456        spin_unlock(&mp->m_sb_lock);
2457}
2458
2459int
2460xfs_icsb_modify_counters(
2461        xfs_mount_t     *mp,
2462        xfs_sb_field_t  field,
2463        int64_t         delta,
2464        int             rsvd)
2465{
2466        xfs_icsb_cnts_t *icsbp;
2467        long long       lcounter;       /* long counter for 64 bit fields */
2468        int             ret = 0;
2469
2470        might_sleep();
2471again:
2472        preempt_disable();
2473        icsbp = this_cpu_ptr(mp->m_sb_cnts);
2474
2475        /*
2476         * if the counter is disabled, go to slow path
2477         */
2478        if (unlikely(xfs_icsb_counter_disabled(mp, field)))
2479                goto slow_path;
2480        xfs_icsb_lock_cntr(icsbp);
2481        if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
2482                xfs_icsb_unlock_cntr(icsbp);
2483                goto slow_path;
2484        }
2485
2486        switch (field) {
2487        case XFS_SBS_ICOUNT:
2488                lcounter = icsbp->icsb_icount;
2489                lcounter += delta;
2490                if (unlikely(lcounter < 0))
2491                        goto balance_counter;
2492                icsbp->icsb_icount = lcounter;
2493                break;
2494
2495        case XFS_SBS_IFREE:
2496                lcounter = icsbp->icsb_ifree;
2497                lcounter += delta;
2498                if (unlikely(lcounter < 0))
2499                        goto balance_counter;
2500                icsbp->icsb_ifree = lcounter;
2501                break;
2502
2503        case XFS_SBS_FDBLOCKS:
2504                BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
2505
2506                lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
2507                lcounter += delta;
2508                if (unlikely(lcounter < 0))
2509                        goto balance_counter;
2510                icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
2511                break;
2512        default:
2513                BUG();
2514                break;
2515        }
2516        xfs_icsb_unlock_cntr(icsbp);
2517        preempt_enable();
2518        return 0;
2519
2520slow_path:
2521        preempt_enable();
2522
2523        /*
2524         * serialise with a mutex so we don't burn lots of cpu on
2525         * the superblock lock. We still need to hold the superblock
2526         * lock, however, when we modify the global structures.
2527         */
2528        xfs_icsb_lock(mp);
2529
2530        /*
2531         * Now running atomically.
2532         *
2533         * If the counter is enabled, someone has beaten us to rebalancing.
2534         * Drop the lock and try again in the fast path....
2535         */
2536        if (!(xfs_icsb_counter_disabled(mp, field))) {
2537                xfs_icsb_unlock(mp);
2538                goto again;
2539        }
2540
2541        /*
2542         * The counter is currently disabled. Because we are
2543         * running atomically here, we know a rebalance cannot
2544         * be in progress. Hence we can go straight to operating
2545         * on the global superblock. We do not call xfs_mod_incore_sb()
2546         * here even though we need to get the m_sb_lock. Doing so
2547         * will cause us to re-enter this function and deadlock.
2548         * Hence we get the m_sb_lock ourselves and then call
2549         * xfs_mod_incore_sb_unlocked() as the unlocked path operates
2550         * directly on the global counters.
2551         */
2552        spin_lock(&mp->m_sb_lock);
2553        ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
2554        spin_unlock(&mp->m_sb_lock);
2555
2556        /*
2557         * Now that we've modified the global superblock, we
2558         * may be able to re-enable the distributed counters
2559         * (e.g. lots of space just got freed). After that
2560         * we are done.
2561         */
2562        if (ret != ENOSPC)
2563                xfs_icsb_balance_counter(mp, field, 0);
2564        xfs_icsb_unlock(mp);
2565        return ret;
2566
2567balance_counter:
2568        xfs_icsb_unlock_cntr(icsbp);
2569        preempt_enable();
2570
2571        /*
2572         * We may have multiple threads here if multiple per-cpu
2573         * counters run dry at the same time. This will mean we can
2574         * do more balances than strictly necessary but it is not
2575         * the common slowpath case.
2576         */
2577        xfs_icsb_lock(mp);
2578
2579        /*
2580         * running atomically.
2581         *
2582         * This will leave the counter in the correct state for future
2583         * accesses. After the rebalance, we simply try again and our retry
2584         * will either succeed through the fast path or slow path without
2585         * another balance operation being required.
2586         */
2587        xfs_icsb_balance_counter(mp, field, delta);
2588        xfs_icsb_unlock(mp);
2589        goto again;
2590}
2591
2592#endif
2593