linux/fs/xfs/xfs_dquot.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_mount.h"
  14#include "xfs_defer.h"
  15#include "xfs_inode.h"
  16#include "xfs_bmap.h"
  17#include "xfs_quota.h"
  18#include "xfs_trans.h"
  19#include "xfs_buf_item.h"
  20#include "xfs_trans_space.h"
  21#include "xfs_trans_priv.h"
  22#include "xfs_qm.h"
  23#include "xfs_trace.h"
  24#include "xfs_log.h"
  25#include "xfs_bmap_btree.h"
  26#include "xfs_error.h"
  27
  28/*
  29 * Lock order:
  30 *
  31 * ip->i_lock
  32 *   qi->qi_tree_lock
  33 *     dquot->q_qlock (xfs_dqlock() and friends)
  34 *       dquot->q_flush (xfs_dqflock() and friends)
  35 *       qi->qi_lru_lock
  36 *
  37 * If two dquots need to be locked the order is user before group/project,
  38 * otherwise by the lowest id first, see xfs_dqlock2.
  39 */
  40
  41struct kmem_cache               *xfs_dqtrx_cache;
  42static struct kmem_cache        *xfs_dquot_cache;
  43
  44static struct lock_class_key xfs_dquot_group_class;
  45static struct lock_class_key xfs_dquot_project_class;
  46
  47/*
  48 * This is called to free all the memory associated with a dquot
  49 */
  50void
  51xfs_qm_dqdestroy(
  52        struct xfs_dquot        *dqp)
  53{
  54        ASSERT(list_empty(&dqp->q_lru));
  55
  56        kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
  57        mutex_destroy(&dqp->q_qlock);
  58
  59        XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
  60        kmem_cache_free(xfs_dquot_cache, dqp);
  61}
  62
  63/*
  64 * If default limits are in force, push them into the dquot now.
  65 * We overwrite the dquot limits only if they are zero and this
  66 * is not the root dquot.
  67 */
  68void
  69xfs_qm_adjust_dqlimits(
  70        struct xfs_dquot        *dq)
  71{
  72        struct xfs_mount        *mp = dq->q_mount;
  73        struct xfs_quotainfo    *q = mp->m_quotainfo;
  74        struct xfs_def_quota    *defq;
  75        int                     prealloc = 0;
  76
  77        ASSERT(dq->q_id);
  78        defq = xfs_get_defquota(q, xfs_dquot_type(dq));
  79
  80        if (!dq->q_blk.softlimit) {
  81                dq->q_blk.softlimit = defq->blk.soft;
  82                prealloc = 1;
  83        }
  84        if (!dq->q_blk.hardlimit) {
  85                dq->q_blk.hardlimit = defq->blk.hard;
  86                prealloc = 1;
  87        }
  88        if (!dq->q_ino.softlimit)
  89                dq->q_ino.softlimit = defq->ino.soft;
  90        if (!dq->q_ino.hardlimit)
  91                dq->q_ino.hardlimit = defq->ino.hard;
  92        if (!dq->q_rtb.softlimit)
  93                dq->q_rtb.softlimit = defq->rtb.soft;
  94        if (!dq->q_rtb.hardlimit)
  95                dq->q_rtb.hardlimit = defq->rtb.hard;
  96
  97        if (prealloc)
  98                xfs_dquot_set_prealloc_limits(dq);
  99}
 100
 101/* Set the expiration time of a quota's grace period. */
 102time64_t
 103xfs_dquot_set_timeout(
 104        struct xfs_mount        *mp,
 105        time64_t                timeout)
 106{
 107        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 108
 109        return clamp_t(time64_t, timeout, qi->qi_expiry_min,
 110                                          qi->qi_expiry_max);
 111}
 112
 113/* Set the length of the default grace period. */
 114time64_t
 115xfs_dquot_set_grace_period(
 116        time64_t                grace)
 117{
 118        return clamp_t(time64_t, grace, XFS_DQ_GRACE_MIN, XFS_DQ_GRACE_MAX);
 119}
 120
 121/*
 122 * Determine if this quota counter is over either limit and set the quota
 123 * timers as appropriate.
 124 */
 125static inline void
 126xfs_qm_adjust_res_timer(
 127        struct xfs_mount        *mp,
 128        struct xfs_dquot_res    *res,
 129        struct xfs_quota_limits *qlim)
 130{
 131        ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit);
 132
 133        if ((res->softlimit && res->count > res->softlimit) ||
 134            (res->hardlimit && res->count > res->hardlimit)) {
 135                if (res->timer == 0)
 136                        res->timer = xfs_dquot_set_timeout(mp,
 137                                        ktime_get_real_seconds() + qlim->time);
 138        } else {
 139                if (res->timer == 0)
 140                        res->warnings = 0;
 141                else
 142                        res->timer = 0;
 143        }
 144}
 145
 146/*
 147 * Check the limits and timers of a dquot and start or reset timers
 148 * if necessary.
 149 * This gets called even when quota enforcement is OFF, which makes our
 150 * life a little less complicated. (We just don't reject any quota
 151 * reservations in that case, when enforcement is off).
 152 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
 153 * enforcement's off.
 154 * In contrast, warnings are a little different in that they don't
 155 * 'automatically' get started when limits get exceeded.  They do
 156 * get reset to zero, however, when we find the count to be under
 157 * the soft limit (they are only ever set non-zero via userspace).
 158 */
 159void
 160xfs_qm_adjust_dqtimers(
 161        struct xfs_dquot        *dq)
 162{
 163        struct xfs_mount        *mp = dq->q_mount;
 164        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 165        struct xfs_def_quota    *defq;
 166
 167        ASSERT(dq->q_id);
 168        defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
 169
 170        xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_blk, &defq->blk);
 171        xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_ino, &defq->ino);
 172        xfs_qm_adjust_res_timer(dq->q_mount, &dq->q_rtb, &defq->rtb);
 173}
 174
 175/*
 176 * initialize a buffer full of dquots and log the whole thing
 177 */
 178STATIC void
 179xfs_qm_init_dquot_blk(
 180        struct xfs_trans        *tp,
 181        struct xfs_mount        *mp,
 182        xfs_dqid_t              id,
 183        xfs_dqtype_t            type,
 184        struct xfs_buf          *bp)
 185{
 186        struct xfs_quotainfo    *q = mp->m_quotainfo;
 187        struct xfs_dqblk        *d;
 188        xfs_dqid_t              curid;
 189        unsigned int            qflag;
 190        unsigned int            blftype;
 191        int                     i;
 192
 193        ASSERT(tp);
 194        ASSERT(xfs_buf_islocked(bp));
 195
 196        switch (type) {
 197        case XFS_DQTYPE_USER:
 198                qflag = XFS_UQUOTA_CHKD;
 199                blftype = XFS_BLF_UDQUOT_BUF;
 200                break;
 201        case XFS_DQTYPE_PROJ:
 202                qflag = XFS_PQUOTA_CHKD;
 203                blftype = XFS_BLF_PDQUOT_BUF;
 204                break;
 205        case XFS_DQTYPE_GROUP:
 206                qflag = XFS_GQUOTA_CHKD;
 207                blftype = XFS_BLF_GDQUOT_BUF;
 208                break;
 209        default:
 210                ASSERT(0);
 211                return;
 212        }
 213
 214        d = bp->b_addr;
 215
 216        /*
 217         * ID of the first dquot in the block - id's are zero based.
 218         */
 219        curid = id - (id % q->qi_dqperchunk);
 220        memset(d, 0, BBTOB(q->qi_dqchunklen));
 221        for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
 222                d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 223                d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
 224                d->dd_diskdq.d_id = cpu_to_be32(curid);
 225                d->dd_diskdq.d_type = type;
 226                if (curid > 0 && xfs_has_bigtime(mp))
 227                        d->dd_diskdq.d_type |= XFS_DQTYPE_BIGTIME;
 228                if (xfs_has_crc(mp)) {
 229                        uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 230                        xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 231                                         XFS_DQUOT_CRC_OFF);
 232                }
 233        }
 234
 235        xfs_trans_dquot_buf(tp, bp, blftype);
 236
 237        /*
 238         * quotacheck uses delayed writes to update all the dquots on disk in an
 239         * efficient manner instead of logging the individual dquot changes as
 240         * they are made. However if we log the buffer allocated here and crash
 241         * after quotacheck while the logged initialisation is still in the
 242         * active region of the log, log recovery can replay the dquot buffer
 243         * initialisation over the top of the checked dquots and corrupt quota
 244         * accounting.
 245         *
 246         * To avoid this problem, quotacheck cannot log the initialised buffer.
 247         * We must still dirty the buffer and write it back before the
 248         * allocation transaction clears the log. Therefore, mark the buffer as
 249         * ordered instead of logging it directly. This is safe for quotacheck
 250         * because it detects and repairs allocated but initialized dquot blocks
 251         * in the quota inodes.
 252         */
 253        if (!(mp->m_qflags & qflag))
 254                xfs_trans_ordered_buf(tp, bp);
 255        else
 256                xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 257}
 258
 259/*
 260 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
 261 * watermarks correspond to the soft and hard limits by default. If a soft limit
 262 * is not specified, we use 95% of the hard limit.
 263 */
 264void
 265xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 266{
 267        uint64_t space;
 268
 269        dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit;
 270        dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit;
 271        if (!dqp->q_prealloc_lo_wmark) {
 272                dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
 273                do_div(dqp->q_prealloc_lo_wmark, 100);
 274                dqp->q_prealloc_lo_wmark *= 95;
 275        }
 276
 277        space = dqp->q_prealloc_hi_wmark;
 278
 279        do_div(space, 100);
 280        dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
 281        dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
 282        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 283}
 284
 285/*
 286 * Ensure that the given in-core dquot has a buffer on disk backing it, and
 287 * return the buffer locked and held. This is called when the bmapi finds a
 288 * hole.
 289 */
 290STATIC int
 291xfs_dquot_disk_alloc(
 292        struct xfs_trans        **tpp,
 293        struct xfs_dquot        *dqp,
 294        struct xfs_buf          **bpp)
 295{
 296        struct xfs_bmbt_irec    map;
 297        struct xfs_trans        *tp = *tpp;
 298        struct xfs_mount        *mp = tp->t_mountp;
 299        struct xfs_buf          *bp;
 300        xfs_dqtype_t            qtype = xfs_dquot_type(dqp);
 301        struct xfs_inode        *quotip = xfs_quota_inode(mp, qtype);
 302        int                     nmaps = 1;
 303        int                     error;
 304
 305        trace_xfs_dqalloc(dqp);
 306
 307        xfs_ilock(quotip, XFS_ILOCK_EXCL);
 308        if (!xfs_this_quota_on(dqp->q_mount, qtype)) {
 309                /*
 310                 * Return if this type of quotas is turned off while we didn't
 311                 * have an inode lock
 312                 */
 313                xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 314                return -ESRCH;
 315        }
 316
 317        xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 318
 319        error = xfs_iext_count_may_overflow(quotip, XFS_DATA_FORK,
 320                        XFS_IEXT_ADD_NOSPLIT_CNT);
 321        if (error)
 322                return error;
 323
 324        /* Create the block mapping. */
 325        error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
 326                        XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, 0, &map,
 327                        &nmaps);
 328        if (error)
 329                return error;
 330        ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
 331        ASSERT(nmaps == 1);
 332        ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
 333               (map.br_startblock != HOLESTARTBLOCK));
 334
 335        /*
 336         * Keep track of the blkno to save a lookup later
 337         */
 338        dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 339
 340        /* now we can just get the buffer (there's nothing to read yet) */
 341        error = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
 342                        mp->m_quotainfo->qi_dqchunklen, 0, &bp);
 343        if (error)
 344                return error;
 345        bp->b_ops = &xfs_dquot_buf_ops;
 346
 347        /*
 348         * Make a chunk of dquots out of this buffer and log
 349         * the entire thing.
 350         */
 351        xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp);
 352        xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 353
 354        /*
 355         * Hold the buffer and join it to the dfops so that we'll still own
 356         * the buffer when we return to the caller.  The buffer disposal on
 357         * error must be paid attention to very carefully, as it has been
 358         * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
 359         * code when allocating a new dquot record" in 2005, and the later
 360         * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
 361         * the buffer locked across the _defer_finish call.  We can now do
 362         * this correctly with xfs_defer_bjoin.
 363         *
 364         * Above, we allocated a disk block for the dquot information and used
 365         * get_buf to initialize the dquot. If the _defer_finish fails, the old
 366         * transaction is gone but the new buffer is not joined or held to any
 367         * transaction, so we must _buf_relse it.
 368         *
 369         * If everything succeeds, the caller of this function is returned a
 370         * buffer that is locked and held to the transaction.  The caller
 371         * is responsible for unlocking any buffer passed back, either
 372         * manually or by committing the transaction.  On error, the buffer is
 373         * released and not passed back.
 374         */
 375        xfs_trans_bhold(tp, bp);
 376        error = xfs_defer_finish(tpp);
 377        if (error) {
 378                xfs_trans_bhold_release(*tpp, bp);
 379                xfs_trans_brelse(*tpp, bp);
 380                return error;
 381        }
 382        *bpp = bp;
 383        return 0;
 384}
 385
 386/*
 387 * Read in the in-core dquot's on-disk metadata and return the buffer.
 388 * Returns ENOENT to signal a hole.
 389 */
 390STATIC int
 391xfs_dquot_disk_read(
 392        struct xfs_mount        *mp,
 393        struct xfs_dquot        *dqp,
 394        struct xfs_buf          **bpp)
 395{
 396        struct xfs_bmbt_irec    map;
 397        struct xfs_buf          *bp;
 398        xfs_dqtype_t            qtype = xfs_dquot_type(dqp);
 399        struct xfs_inode        *quotip = xfs_quota_inode(mp, qtype);
 400        uint                    lock_mode;
 401        int                     nmaps = 1;
 402        int                     error;
 403
 404        lock_mode = xfs_ilock_data_map_shared(quotip);
 405        if (!xfs_this_quota_on(mp, qtype)) {
 406                /*
 407                 * Return if this type of quotas is turned off while we
 408                 * didn't have the quota inode lock.
 409                 */
 410                xfs_iunlock(quotip, lock_mode);
 411                return -ESRCH;
 412        }
 413
 414        /*
 415         * Find the block map; no allocations yet
 416         */
 417        error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
 418                        XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
 419        xfs_iunlock(quotip, lock_mode);
 420        if (error)
 421                return error;
 422
 423        ASSERT(nmaps == 1);
 424        ASSERT(map.br_blockcount >= 1);
 425        ASSERT(map.br_startblock != DELAYSTARTBLOCK);
 426        if (map.br_startblock == HOLESTARTBLOCK)
 427                return -ENOENT;
 428
 429        trace_xfs_dqtobp_read(dqp);
 430
 431        /*
 432         * store the blkno etc so that we don't have to do the
 433         * mapping all the time
 434         */
 435        dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 436
 437        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
 438                        mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 439                        &xfs_dquot_buf_ops);
 440        if (error) {
 441                ASSERT(bp == NULL);
 442                return error;
 443        }
 444
 445        ASSERT(xfs_buf_islocked(bp));
 446        xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 447        *bpp = bp;
 448
 449        return 0;
 450}
 451
 452/* Allocate and initialize everything we need for an incore dquot. */
 453STATIC struct xfs_dquot *
 454xfs_dquot_alloc(
 455        struct xfs_mount        *mp,
 456        xfs_dqid_t              id,
 457        xfs_dqtype_t            type)
 458{
 459        struct xfs_dquot        *dqp;
 460
 461        dqp = kmem_cache_zalloc(xfs_dquot_cache, GFP_KERNEL | __GFP_NOFAIL);
 462
 463        dqp->q_type = type;
 464        dqp->q_id = id;
 465        dqp->q_mount = mp;
 466        INIT_LIST_HEAD(&dqp->q_lru);
 467        mutex_init(&dqp->q_qlock);
 468        init_waitqueue_head(&dqp->q_pinwait);
 469        dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 470        /*
 471         * Offset of dquot in the (fixed sized) dquot chunk.
 472         */
 473        dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
 474                        sizeof(struct xfs_dqblk);
 475
 476        /*
 477         * Because we want to use a counting completion, complete
 478         * the flush completion once to allow a single access to
 479         * the flush completion without blocking.
 480         */
 481        init_completion(&dqp->q_flush);
 482        complete(&dqp->q_flush);
 483
 484        /*
 485         * Make sure group quotas have a different lock class than user
 486         * quotas.
 487         */
 488        switch (type) {
 489        case XFS_DQTYPE_USER:
 490                /* uses the default lock class */
 491                break;
 492        case XFS_DQTYPE_GROUP:
 493                lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
 494                break;
 495        case XFS_DQTYPE_PROJ:
 496                lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
 497                break;
 498        default:
 499                ASSERT(0);
 500                break;
 501        }
 502
 503        xfs_qm_dquot_logitem_init(dqp);
 504
 505        XFS_STATS_INC(mp, xs_qm_dquot);
 506        return dqp;
 507}
 508
 509/* Check the ondisk dquot's id and type match what the incore dquot expects. */
 510static bool
 511xfs_dquot_check_type(
 512        struct xfs_dquot        *dqp,
 513        struct xfs_disk_dquot   *ddqp)
 514{
 515        uint8_t                 ddqp_type;
 516        uint8_t                 dqp_type;
 517
 518        ddqp_type = ddqp->d_type & XFS_DQTYPE_REC_MASK;
 519        dqp_type = xfs_dquot_type(dqp);
 520
 521        if (be32_to_cpu(ddqp->d_id) != dqp->q_id)
 522                return false;
 523
 524        /*
 525         * V5 filesystems always expect an exact type match.  V4 filesystems
 526         * expect an exact match for user dquots and for non-root group and
 527         * project dquots.
 528         */
 529        if (xfs_has_crc(dqp->q_mount) ||
 530            dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0)
 531                return ddqp_type == dqp_type;
 532
 533        /*
 534         * V4 filesystems support either group or project quotas, but not both
 535         * at the same time.  The non-user quota file can be switched between
 536         * group and project quota uses depending on the mount options, which
 537         * means that we can encounter the other type when we try to load quota
 538         * defaults.  Quotacheck will soon reset the the entire quota file
 539         * (including the root dquot) anyway, but don't log scary corruption
 540         * reports to dmesg.
 541         */
 542        return ddqp_type == XFS_DQTYPE_GROUP || ddqp_type == XFS_DQTYPE_PROJ;
 543}
 544
 545/* Copy the in-core quota fields in from the on-disk buffer. */
 546STATIC int
 547xfs_dquot_from_disk(
 548        struct xfs_dquot        *dqp,
 549        struct xfs_buf          *bp)
 550{
 551        struct xfs_disk_dquot   *ddqp = bp->b_addr + dqp->q_bufoffset;
 552
 553        /*
 554         * Ensure that we got the type and ID we were looking for.
 555         * Everything else was checked by the dquot buffer verifier.
 556         */
 557        if (!xfs_dquot_check_type(dqp, ddqp)) {
 558                xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR,
 559                          "Metadata corruption detected at %pS, quota %u",
 560                          __this_address, dqp->q_id);
 561                xfs_alert(bp->b_mount, "Unmount and run xfs_repair");
 562                return -EFSCORRUPTED;
 563        }
 564
 565        /* copy everything from disk dquot to the incore dquot */
 566        dqp->q_type = ddqp->d_type;
 567        dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
 568        dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit);
 569        dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
 570        dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit);
 571        dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
 572        dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
 573
 574        dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount);
 575        dqp->q_ino.count = be64_to_cpu(ddqp->d_icount);
 576        dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount);
 577
 578        dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns);
 579        dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns);
 580        dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns);
 581
 582        dqp->q_blk.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_btimer);
 583        dqp->q_ino.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_itimer);
 584        dqp->q_rtb.timer = xfs_dquot_from_disk_ts(ddqp, ddqp->d_rtbtimer);
 585
 586        /*
 587         * Reservation counters are defined as reservation plus current usage
 588         * to avoid having to add every time.
 589         */
 590        dqp->q_blk.reserved = dqp->q_blk.count;
 591        dqp->q_ino.reserved = dqp->q_ino.count;
 592        dqp->q_rtb.reserved = dqp->q_rtb.count;
 593
 594        /* initialize the dquot speculative prealloc thresholds */
 595        xfs_dquot_set_prealloc_limits(dqp);
 596        return 0;
 597}
 598
 599/* Copy the in-core quota fields into the on-disk buffer. */
 600void
 601xfs_dquot_to_disk(
 602        struct xfs_disk_dquot   *ddqp,
 603        struct xfs_dquot        *dqp)
 604{
 605        ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 606        ddqp->d_version = XFS_DQUOT_VERSION;
 607        ddqp->d_type = dqp->q_type;
 608        ddqp->d_id = cpu_to_be32(dqp->q_id);
 609        ddqp->d_pad0 = 0;
 610        ddqp->d_pad = 0;
 611
 612        ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit);
 613        ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit);
 614        ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit);
 615        ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit);
 616        ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit);
 617        ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit);
 618
 619        ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count);
 620        ddqp->d_icount = cpu_to_be64(dqp->q_ino.count);
 621        ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count);
 622
 623        ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings);
 624        ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings);
 625        ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings);
 626
 627        ddqp->d_btimer = xfs_dquot_to_disk_ts(dqp, dqp->q_blk.timer);
 628        ddqp->d_itimer = xfs_dquot_to_disk_ts(dqp, dqp->q_ino.timer);
 629        ddqp->d_rtbtimer = xfs_dquot_to_disk_ts(dqp, dqp->q_rtb.timer);
 630}
 631
 632/* Allocate and initialize the dquot buffer for this in-core dquot. */
 633static int
 634xfs_qm_dqread_alloc(
 635        struct xfs_mount        *mp,
 636        struct xfs_dquot        *dqp,
 637        struct xfs_buf          **bpp)
 638{
 639        struct xfs_trans        *tp;
 640        int                     error;
 641
 642        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
 643                        XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
 644        if (error)
 645                goto err;
 646
 647        error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
 648        if (error)
 649                goto err_cancel;
 650
 651        error = xfs_trans_commit(tp);
 652        if (error) {
 653                /*
 654                 * Buffer was held to the transaction, so we have to unlock it
 655                 * manually here because we're not passing it back.
 656                 */
 657                xfs_buf_relse(*bpp);
 658                *bpp = NULL;
 659                goto err;
 660        }
 661        return 0;
 662
 663err_cancel:
 664        xfs_trans_cancel(tp);
 665err:
 666        return error;
 667}
 668
 669/*
 670 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 671 * and release the buffer immediately.  If @can_alloc is true, fill any
 672 * holes in the on-disk metadata.
 673 */
 674static int
 675xfs_qm_dqread(
 676        struct xfs_mount        *mp,
 677        xfs_dqid_t              id,
 678        xfs_dqtype_t            type,
 679        bool                    can_alloc,
 680        struct xfs_dquot        **dqpp)
 681{
 682        struct xfs_dquot        *dqp;
 683        struct xfs_buf          *bp;
 684        int                     error;
 685
 686        dqp = xfs_dquot_alloc(mp, id, type);
 687        trace_xfs_dqread(dqp);
 688
 689        /* Try to read the buffer, allocating if necessary. */
 690        error = xfs_dquot_disk_read(mp, dqp, &bp);
 691        if (error == -ENOENT && can_alloc)
 692                error = xfs_qm_dqread_alloc(mp, dqp, &bp);
 693        if (error)
 694                goto err;
 695
 696        /*
 697         * At this point we should have a clean locked buffer.  Copy the data
 698         * to the incore dquot and release the buffer since the incore dquot
 699         * has its own locking protocol so we needn't tie up the buffer any
 700         * further.
 701         */
 702        ASSERT(xfs_buf_islocked(bp));
 703        error = xfs_dquot_from_disk(dqp, bp);
 704        xfs_buf_relse(bp);
 705        if (error)
 706                goto err;
 707
 708        *dqpp = dqp;
 709        return error;
 710
 711err:
 712        trace_xfs_dqread_fail(dqp);
 713        xfs_qm_dqdestroy(dqp);
 714        *dqpp = NULL;
 715        return error;
 716}
 717
 718/*
 719 * Advance to the next id in the current chunk, or if at the
 720 * end of the chunk, skip ahead to first id in next allocated chunk
 721 * using the SEEK_DATA interface.
 722 */
 723static int
 724xfs_dq_get_next_id(
 725        struct xfs_mount        *mp,
 726        xfs_dqtype_t            type,
 727        xfs_dqid_t              *id)
 728{
 729        struct xfs_inode        *quotip = xfs_quota_inode(mp, type);
 730        xfs_dqid_t              next_id = *id + 1; /* simple advance */
 731        uint                    lock_flags;
 732        struct xfs_bmbt_irec    got;
 733        struct xfs_iext_cursor  cur;
 734        xfs_fsblock_t           start;
 735        int                     error = 0;
 736
 737        /* If we'd wrap past the max ID, stop */
 738        if (next_id < *id)
 739                return -ENOENT;
 740
 741        /* If new ID is within the current chunk, advancing it sufficed */
 742        if (next_id % mp->m_quotainfo->qi_dqperchunk) {
 743                *id = next_id;
 744                return 0;
 745        }
 746
 747        /* Nope, next_id is now past the current chunk, so find the next one */
 748        start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
 749
 750        lock_flags = xfs_ilock_data_map_shared(quotip);
 751        error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
 752        if (error)
 753                return error;
 754
 755        if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
 756                /* contiguous chunk, bump startoff for the id calculation */
 757                if (got.br_startoff < start)
 758                        got.br_startoff = start;
 759                *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
 760        } else {
 761                error = -ENOENT;
 762        }
 763
 764        xfs_iunlock(quotip, lock_flags);
 765
 766        return error;
 767}
 768
 769/*
 770 * Look up the dquot in the in-core cache.  If found, the dquot is returned
 771 * locked and ready to go.
 772 */
 773static struct xfs_dquot *
 774xfs_qm_dqget_cache_lookup(
 775        struct xfs_mount        *mp,
 776        struct xfs_quotainfo    *qi,
 777        struct radix_tree_root  *tree,
 778        xfs_dqid_t              id)
 779{
 780        struct xfs_dquot        *dqp;
 781
 782restart:
 783        mutex_lock(&qi->qi_tree_lock);
 784        dqp = radix_tree_lookup(tree, id);
 785        if (!dqp) {
 786                mutex_unlock(&qi->qi_tree_lock);
 787                XFS_STATS_INC(mp, xs_qm_dqcachemisses);
 788                return NULL;
 789        }
 790
 791        xfs_dqlock(dqp);
 792        if (dqp->q_flags & XFS_DQFLAG_FREEING) {
 793                xfs_dqunlock(dqp);
 794                mutex_unlock(&qi->qi_tree_lock);
 795                trace_xfs_dqget_freeing(dqp);
 796                delay(1);
 797                goto restart;
 798        }
 799
 800        dqp->q_nrefs++;
 801        mutex_unlock(&qi->qi_tree_lock);
 802
 803        trace_xfs_dqget_hit(dqp);
 804        XFS_STATS_INC(mp, xs_qm_dqcachehits);
 805        return dqp;
 806}
 807
 808/*
 809 * Try to insert a new dquot into the in-core cache.  If an error occurs the
 810 * caller should throw away the dquot and start over.  Otherwise, the dquot
 811 * is returned locked (and held by the cache) as if there had been a cache
 812 * hit.
 813 */
 814static int
 815xfs_qm_dqget_cache_insert(
 816        struct xfs_mount        *mp,
 817        struct xfs_quotainfo    *qi,
 818        struct radix_tree_root  *tree,
 819        xfs_dqid_t              id,
 820        struct xfs_dquot        *dqp)
 821{
 822        int                     error;
 823
 824        mutex_lock(&qi->qi_tree_lock);
 825        error = radix_tree_insert(tree, id, dqp);
 826        if (unlikely(error)) {
 827                /* Duplicate found!  Caller must try again. */
 828                WARN_ON(error != -EEXIST);
 829                mutex_unlock(&qi->qi_tree_lock);
 830                trace_xfs_dqget_dup(dqp);
 831                return error;
 832        }
 833
 834        /* Return a locked dquot to the caller, with a reference taken. */
 835        xfs_dqlock(dqp);
 836        dqp->q_nrefs = 1;
 837
 838        qi->qi_dquots++;
 839        mutex_unlock(&qi->qi_tree_lock);
 840
 841        return 0;
 842}
 843
 844/* Check our input parameters. */
 845static int
 846xfs_qm_dqget_checks(
 847        struct xfs_mount        *mp,
 848        xfs_dqtype_t            type)
 849{
 850        switch (type) {
 851        case XFS_DQTYPE_USER:
 852                if (!XFS_IS_UQUOTA_ON(mp))
 853                        return -ESRCH;
 854                return 0;
 855        case XFS_DQTYPE_GROUP:
 856                if (!XFS_IS_GQUOTA_ON(mp))
 857                        return -ESRCH;
 858                return 0;
 859        case XFS_DQTYPE_PROJ:
 860                if (!XFS_IS_PQUOTA_ON(mp))
 861                        return -ESRCH;
 862                return 0;
 863        default:
 864                WARN_ON_ONCE(0);
 865                return -EINVAL;
 866        }
 867}
 868
 869/*
 870 * Given the file system, id, and type (UDQUOT/GDQUOT/PDQUOT), return a
 871 * locked dquot, doing an allocation (if requested) as needed.
 872 */
 873int
 874xfs_qm_dqget(
 875        struct xfs_mount        *mp,
 876        xfs_dqid_t              id,
 877        xfs_dqtype_t            type,
 878        bool                    can_alloc,
 879        struct xfs_dquot        **O_dqpp)
 880{
 881        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 882        struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 883        struct xfs_dquot        *dqp;
 884        int                     error;
 885
 886        error = xfs_qm_dqget_checks(mp, type);
 887        if (error)
 888                return error;
 889
 890restart:
 891        dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 892        if (dqp) {
 893                *O_dqpp = dqp;
 894                return 0;
 895        }
 896
 897        error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
 898        if (error)
 899                return error;
 900
 901        error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
 902        if (error) {
 903                /*
 904                 * Duplicate found. Just throw away the new dquot and start
 905                 * over.
 906                 */
 907                xfs_qm_dqdestroy(dqp);
 908                XFS_STATS_INC(mp, xs_qm_dquot_dups);
 909                goto restart;
 910        }
 911
 912        trace_xfs_dqget_miss(dqp);
 913        *O_dqpp = dqp;
 914        return 0;
 915}
 916
 917/*
 918 * Given a dquot id and type, read and initialize a dquot from the on-disk
 919 * metadata.  This function is only for use during quota initialization so
 920 * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
 921 * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
 922 */
 923int
 924xfs_qm_dqget_uncached(
 925        struct xfs_mount        *mp,
 926        xfs_dqid_t              id,
 927        xfs_dqtype_t            type,
 928        struct xfs_dquot        **dqpp)
 929{
 930        int                     error;
 931
 932        error = xfs_qm_dqget_checks(mp, type);
 933        if (error)
 934                return error;
 935
 936        return xfs_qm_dqread(mp, id, type, 0, dqpp);
 937}
 938
 939/* Return the quota id for a given inode and type. */
 940xfs_dqid_t
 941xfs_qm_id_for_quotatype(
 942        struct xfs_inode        *ip,
 943        xfs_dqtype_t            type)
 944{
 945        switch (type) {
 946        case XFS_DQTYPE_USER:
 947                return i_uid_read(VFS_I(ip));
 948        case XFS_DQTYPE_GROUP:
 949                return i_gid_read(VFS_I(ip));
 950        case XFS_DQTYPE_PROJ:
 951                return ip->i_projid;
 952        }
 953        ASSERT(0);
 954        return 0;
 955}
 956
 957/*
 958 * Return the dquot for a given inode and type.  If @can_alloc is true, then
 959 * allocate blocks if needed.  The inode's ILOCK must be held and it must not
 960 * have already had an inode attached.
 961 */
 962int
 963xfs_qm_dqget_inode(
 964        struct xfs_inode        *ip,
 965        xfs_dqtype_t            type,
 966        bool                    can_alloc,
 967        struct xfs_dquot        **O_dqpp)
 968{
 969        struct xfs_mount        *mp = ip->i_mount;
 970        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 971        struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 972        struct xfs_dquot        *dqp;
 973        xfs_dqid_t              id;
 974        int                     error;
 975
 976        error = xfs_qm_dqget_checks(mp, type);
 977        if (error)
 978                return error;
 979
 980        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 981        ASSERT(xfs_inode_dquot(ip, type) == NULL);
 982
 983        id = xfs_qm_id_for_quotatype(ip, type);
 984
 985restart:
 986        dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 987        if (dqp) {
 988                *O_dqpp = dqp;
 989                return 0;
 990        }
 991
 992        /*
 993         * Dquot cache miss. We don't want to keep the inode lock across
 994         * a (potential) disk read. Also we don't want to deal with the lock
 995         * ordering between quotainode and this inode. OTOH, dropping the inode
 996         * lock here means dealing with a chown that can happen before
 997         * we re-acquire the lock.
 998         */
 999        xfs_iunlock(ip, XFS_ILOCK_EXCL);
1000        error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
1001        xfs_ilock(ip, XFS_ILOCK_EXCL);
1002        if (error)
1003                return error;
1004
1005        /*
1006         * A dquot could be attached to this inode by now, since we had
1007         * dropped the ilock.
1008         */
1009        if (xfs_this_quota_on(mp, type)) {
1010                struct xfs_dquot        *dqp1;
1011
1012                dqp1 = xfs_inode_dquot(ip, type);
1013                if (dqp1) {
1014                        xfs_qm_dqdestroy(dqp);
1015                        dqp = dqp1;
1016                        xfs_dqlock(dqp);
1017                        goto dqret;
1018                }
1019        } else {
1020                /* inode stays locked on return */
1021                xfs_qm_dqdestroy(dqp);
1022                return -ESRCH;
1023        }
1024
1025        error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
1026        if (error) {
1027                /*
1028                 * Duplicate found. Just throw away the new dquot and start
1029                 * over.
1030                 */
1031                xfs_qm_dqdestroy(dqp);
1032                XFS_STATS_INC(mp, xs_qm_dquot_dups);
1033                goto restart;
1034        }
1035
1036dqret:
1037        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1038        trace_xfs_dqget_miss(dqp);
1039        *O_dqpp = dqp;
1040        return 0;
1041}
1042
1043/*
1044 * Starting at @id and progressing upwards, look for an initialized incore
1045 * dquot, lock it, and return it.
1046 */
1047int
1048xfs_qm_dqget_next(
1049        struct xfs_mount        *mp,
1050        xfs_dqid_t              id,
1051        xfs_dqtype_t            type,
1052        struct xfs_dquot        **dqpp)
1053{
1054        struct xfs_dquot        *dqp;
1055        int                     error = 0;
1056
1057        *dqpp = NULL;
1058        for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
1059                error = xfs_qm_dqget(mp, id, type, false, &dqp);
1060                if (error == -ENOENT)
1061                        continue;
1062                else if (error != 0)
1063                        break;
1064
1065                if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
1066                        *dqpp = dqp;
1067                        return 0;
1068                }
1069
1070                xfs_qm_dqput(dqp);
1071        }
1072
1073        return error;
1074}
1075
1076/*
1077 * Release a reference to the dquot (decrement ref-count) and unlock it.
1078 *
1079 * If there is a group quota attached to this dquot, carefully release that
1080 * too without tripping over deadlocks'n'stuff.
1081 */
1082void
1083xfs_qm_dqput(
1084        struct xfs_dquot        *dqp)
1085{
1086        ASSERT(dqp->q_nrefs > 0);
1087        ASSERT(XFS_DQ_IS_LOCKED(dqp));
1088
1089        trace_xfs_dqput(dqp);
1090
1091        if (--dqp->q_nrefs == 0) {
1092                struct xfs_quotainfo    *qi = dqp->q_mount->m_quotainfo;
1093                trace_xfs_dqput_free(dqp);
1094
1095                if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
1096                        XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
1097        }
1098        xfs_dqunlock(dqp);
1099}
1100
1101/*
1102 * Release a dquot. Flush it if dirty, then dqput() it.
1103 * dquot must not be locked.
1104 */
1105void
1106xfs_qm_dqrele(
1107        struct xfs_dquot        *dqp)
1108{
1109        if (!dqp)
1110                return;
1111
1112        trace_xfs_dqrele(dqp);
1113
1114        xfs_dqlock(dqp);
1115        /*
1116         * We don't care to flush it if the dquot is dirty here.
1117         * That will create stutters that we want to avoid.
1118         * Instead we do a delayed write when we try to reclaim
1119         * a dirty dquot. Also xfs_sync will take part of the burden...
1120         */
1121        xfs_qm_dqput(dqp);
1122}
1123
1124/*
1125 * This is the dquot flushing I/O completion routine.  It is called
1126 * from interrupt level when the buffer containing the dquot is
1127 * flushed to disk.  It is responsible for removing the dquot logitem
1128 * from the AIL if it has not been re-logged, and unlocking the dquot's
1129 * flush lock. This behavior is very similar to that of inodes..
1130 */
1131static void
1132xfs_qm_dqflush_done(
1133        struct xfs_log_item     *lip)
1134{
1135        struct xfs_dq_logitem   *qip = (struct xfs_dq_logitem *)lip;
1136        struct xfs_dquot        *dqp = qip->qli_dquot;
1137        struct xfs_ail          *ailp = lip->li_ailp;
1138        xfs_lsn_t               tail_lsn;
1139
1140        /*
1141         * We only want to pull the item from the AIL if its
1142         * location in the log has not changed since we started the flush.
1143         * Thus, we only bother if the dquot's lsn has
1144         * not changed. First we check the lsn outside the lock
1145         * since it's cheaper, and then we recheck while
1146         * holding the lock before removing the dquot from the AIL.
1147         */
1148        if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1149            ((lip->li_lsn == qip->qli_flush_lsn) ||
1150             test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1151
1152                spin_lock(&ailp->ail_lock);
1153                xfs_clear_li_failed(lip);
1154                if (lip->li_lsn == qip->qli_flush_lsn) {
1155                        /* xfs_ail_update_finish() drops the AIL lock */
1156                        tail_lsn = xfs_ail_delete_one(ailp, lip);
1157                        xfs_ail_update_finish(ailp, tail_lsn);
1158                } else {
1159                        spin_unlock(&ailp->ail_lock);
1160                }
1161        }
1162
1163        /*
1164         * Release the dq's flush lock since we're done with it.
1165         */
1166        xfs_dqfunlock(dqp);
1167}
1168
1169void
1170xfs_buf_dquot_iodone(
1171        struct xfs_buf          *bp)
1172{
1173        struct xfs_log_item     *lip, *n;
1174
1175        list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
1176                list_del_init(&lip->li_bio_list);
1177                xfs_qm_dqflush_done(lip);
1178        }
1179}
1180
1181void
1182xfs_buf_dquot_io_fail(
1183        struct xfs_buf          *bp)
1184{
1185        struct xfs_log_item     *lip;
1186
1187        spin_lock(&bp->b_mount->m_ail->ail_lock);
1188        list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
1189                xfs_set_li_failed(lip, bp);
1190        spin_unlock(&bp->b_mount->m_ail->ail_lock);
1191}
1192
1193/* Check incore dquot for errors before we flush. */
1194static xfs_failaddr_t
1195xfs_qm_dqflush_check(
1196        struct xfs_dquot        *dqp)
1197{
1198        xfs_dqtype_t            type = xfs_dquot_type(dqp);
1199
1200        if (type != XFS_DQTYPE_USER &&
1201            type != XFS_DQTYPE_GROUP &&
1202            type != XFS_DQTYPE_PROJ)
1203                return __this_address;
1204
1205        if (dqp->q_id == 0)
1206                return NULL;
1207
1208        if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit &&
1209            !dqp->q_blk.timer)
1210                return __this_address;
1211
1212        if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit &&
1213            !dqp->q_ino.timer)
1214                return __this_address;
1215
1216        if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit &&
1217            !dqp->q_rtb.timer)
1218                return __this_address;
1219
1220        /* bigtime flag should never be set on root dquots */
1221        if (dqp->q_type & XFS_DQTYPE_BIGTIME) {
1222                if (!xfs_has_bigtime(dqp->q_mount))
1223                        return __this_address;
1224                if (dqp->q_id == 0)
1225                        return __this_address;
1226        }
1227
1228        return NULL;
1229}
1230
1231/*
1232 * Write a modified dquot to disk.
1233 * The dquot must be locked and the flush lock too taken by caller.
1234 * The flush lock will not be unlocked until the dquot reaches the disk,
1235 * but the dquot is free to be unlocked and modified by the caller
1236 * in the interim. Dquot is still locked on return. This behavior is
1237 * identical to that of inodes.
1238 */
1239int
1240xfs_qm_dqflush(
1241        struct xfs_dquot        *dqp,
1242        struct xfs_buf          **bpp)
1243{
1244        struct xfs_mount        *mp = dqp->q_mount;
1245        struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
1246        struct xfs_buf          *bp;
1247        struct xfs_dqblk        *dqblk;
1248        xfs_failaddr_t          fa;
1249        int                     error;
1250
1251        ASSERT(XFS_DQ_IS_LOCKED(dqp));
1252        ASSERT(!completion_done(&dqp->q_flush));
1253
1254        trace_xfs_dqflush(dqp);
1255
1256        *bpp = NULL;
1257
1258        xfs_qm_dqunpin_wait(dqp);
1259
1260        /*
1261         * Get the buffer containing the on-disk dquot
1262         */
1263        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1264                                   mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
1265                                   &bp, &xfs_dquot_buf_ops);
1266        if (error == -EAGAIN)
1267                goto out_unlock;
1268        if (error)
1269                goto out_abort;
1270
1271        fa = xfs_qm_dqflush_check(dqp);
1272        if (fa) {
1273                xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1274                                dqp->q_id, fa);
1275                xfs_buf_relse(bp);
1276                error = -EFSCORRUPTED;
1277                goto out_abort;
1278        }
1279
1280        /* Flush the incore dquot to the ondisk buffer. */
1281        dqblk = bp->b_addr + dqp->q_bufoffset;
1282        xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp);
1283
1284        /*
1285         * Clear the dirty field and remember the flush lsn for later use.
1286         */
1287        dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1288
1289        xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1290                                        &dqp->q_logitem.qli_item.li_lsn);
1291
1292        /*
1293         * copy the lsn into the on-disk dquot now while we have the in memory
1294         * dquot here. This can't be done later in the write verifier as we
1295         * can't get access to the log item at that point in time.
1296         *
1297         * We also calculate the CRC here so that the on-disk dquot in the
1298         * buffer always has a valid CRC. This ensures there is no possibility
1299         * of a dquot without an up-to-date CRC getting to disk.
1300         */
1301        if (xfs_has_crc(mp)) {
1302                dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1303                xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk),
1304                                 XFS_DQUOT_CRC_OFF);
1305        }
1306
1307        /*
1308         * Attach the dquot to the buffer so that we can remove this dquot from
1309         * the AIL and release the flush lock once the dquot is synced to disk.
1310         */
1311        bp->b_flags |= _XBF_DQUOTS;
1312        list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list);
1313
1314        /*
1315         * If the buffer is pinned then push on the log so we won't
1316         * get stuck waiting in the write for too long.
1317         */
1318        if (xfs_buf_ispinned(bp)) {
1319                trace_xfs_dqflush_force(dqp);
1320                xfs_log_force(mp, 0);
1321        }
1322
1323        trace_xfs_dqflush_done(dqp);
1324        *bpp = bp;
1325        return 0;
1326
1327out_abort:
1328        dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
1329        xfs_trans_ail_delete(lip, 0);
1330        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1331out_unlock:
1332        xfs_dqfunlock(dqp);
1333        return error;
1334}
1335
1336/*
1337 * Lock two xfs_dquot structures.
1338 *
1339 * To avoid deadlocks we always lock the quota structure with
1340 * the lowerd id first.
1341 */
1342void
1343xfs_dqlock2(
1344        struct xfs_dquot        *d1,
1345        struct xfs_dquot        *d2)
1346{
1347        if (d1 && d2) {
1348                ASSERT(d1 != d2);
1349                if (d1->q_id > d2->q_id) {
1350                        mutex_lock(&d2->q_qlock);
1351                        mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1352                } else {
1353                        mutex_lock(&d1->q_qlock);
1354                        mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1355                }
1356        } else if (d1) {
1357                mutex_lock(&d1->q_qlock);
1358        } else if (d2) {
1359                mutex_lock(&d2->q_qlock);
1360        }
1361}
1362
1363int __init
1364xfs_qm_init(void)
1365{
1366        xfs_dquot_cache = kmem_cache_create("xfs_dquot",
1367                                          sizeof(struct xfs_dquot),
1368                                          0, 0, NULL);
1369        if (!xfs_dquot_cache)
1370                goto out;
1371
1372        xfs_dqtrx_cache = kmem_cache_create("xfs_dqtrx",
1373                                             sizeof(struct xfs_dquot_acct),
1374                                             0, 0, NULL);
1375        if (!xfs_dqtrx_cache)
1376                goto out_free_dquot_cache;
1377
1378        return 0;
1379
1380out_free_dquot_cache:
1381        kmem_cache_destroy(xfs_dquot_cache);
1382out:
1383        return -ENOMEM;
1384}
1385
1386void
1387xfs_qm_exit(void)
1388{
1389        kmem_cache_destroy(xfs_dqtrx_cache);
1390        kmem_cache_destroy(xfs_dquot_cache);
1391}
1392
1393/*
1394 * Iterate every dquot of a particular type.  The caller must ensure that the
1395 * particular quota type is active.  iter_fn can return negative error codes,
1396 * or -ECANCELED to indicate that it wants to stop iterating.
1397 */
1398int
1399xfs_qm_dqiterate(
1400        struct xfs_mount        *mp,
1401        xfs_dqtype_t            type,
1402        xfs_qm_dqiterate_fn     iter_fn,
1403        void                    *priv)
1404{
1405        struct xfs_dquot        *dq;
1406        xfs_dqid_t              id = 0;
1407        int                     error;
1408
1409        do {
1410                error = xfs_qm_dqget_next(mp, id, type, &dq);
1411                if (error == -ENOENT)
1412                        return 0;
1413                if (error)
1414                        return error;
1415
1416                error = iter_fn(dq, type, priv);
1417                id = dq->q_id;
1418                xfs_qm_dqput(dq);
1419        } while (error == 0 && id != 0);
1420
1421        return error;
1422}
1423