linux/fs/xfs/xfs_dquot.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
   4 * All Rights Reserved.
   5 */
   6#include "xfs.h"
   7#include "xfs_fs.h"
   8#include "xfs_format.h"
   9#include "xfs_log_format.h"
  10#include "xfs_shared.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_bit.h"
  13#include "xfs_mount.h"
  14#include "xfs_defer.h"
  15#include "xfs_inode.h"
  16#include "xfs_bmap.h"
  17#include "xfs_bmap_util.h"
  18#include "xfs_alloc.h"
  19#include "xfs_quota.h"
  20#include "xfs_error.h"
  21#include "xfs_trans.h"
  22#include "xfs_buf_item.h"
  23#include "xfs_trans_space.h"
  24#include "xfs_trans_priv.h"
  25#include "xfs_qm.h"
  26#include "xfs_cksum.h"
  27#include "xfs_trace.h"
  28#include "xfs_log.h"
  29#include "xfs_bmap_btree.h"
  30
  31/*
  32 * Lock order:
  33 *
  34 * ip->i_lock
  35 *   qi->qi_tree_lock
  36 *     dquot->q_qlock (xfs_dqlock() and friends)
  37 *       dquot->q_flush (xfs_dqflock() and friends)
  38 *       qi->qi_lru_lock
  39 *
  40 * If two dquots need to be locked the order is user before group/project,
  41 * otherwise by the lowest id first, see xfs_dqlock2.
  42 */
  43
  44struct kmem_zone                *xfs_qm_dqtrxzone;
  45static struct kmem_zone         *xfs_qm_dqzone;
  46
  47static struct lock_class_key xfs_dquot_group_class;
  48static struct lock_class_key xfs_dquot_project_class;
  49
  50/*
  51 * This is called to free all the memory associated with a dquot
  52 */
  53void
  54xfs_qm_dqdestroy(
  55        xfs_dquot_t     *dqp)
  56{
  57        ASSERT(list_empty(&dqp->q_lru));
  58
  59        kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
  60        mutex_destroy(&dqp->q_qlock);
  61
  62        XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
  63        kmem_zone_free(xfs_qm_dqzone, dqp);
  64}
  65
  66/*
  67 * If default limits are in force, push them into the dquot now.
  68 * We overwrite the dquot limits only if they are zero and this
  69 * is not the root dquot.
  70 */
  71void
  72xfs_qm_adjust_dqlimits(
  73        struct xfs_mount        *mp,
  74        struct xfs_dquot        *dq)
  75{
  76        struct xfs_quotainfo    *q = mp->m_quotainfo;
  77        struct xfs_disk_dquot   *d = &dq->q_core;
  78        struct xfs_def_quota    *defq;
  79        int                     prealloc = 0;
  80
  81        ASSERT(d->d_id);
  82        defq = xfs_get_defquota(dq, q);
  83
  84        if (defq->bsoftlimit && !d->d_blk_softlimit) {
  85                d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
  86                prealloc = 1;
  87        }
  88        if (defq->bhardlimit && !d->d_blk_hardlimit) {
  89                d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
  90                prealloc = 1;
  91        }
  92        if (defq->isoftlimit && !d->d_ino_softlimit)
  93                d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
  94        if (defq->ihardlimit && !d->d_ino_hardlimit)
  95                d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
  96        if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
  97                d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
  98        if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
  99                d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
 100
 101        if (prealloc)
 102                xfs_dquot_set_prealloc_limits(dq);
 103}
 104
 105/*
 106 * Check the limits and timers of a dquot and start or reset timers
 107 * if necessary.
 108 * This gets called even when quota enforcement is OFF, which makes our
 109 * life a little less complicated. (We just don't reject any quota
 110 * reservations in that case, when enforcement is off).
 111 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
 112 * enforcement's off.
 113 * In contrast, warnings are a little different in that they don't
 114 * 'automatically' get started when limits get exceeded.  They do
 115 * get reset to zero, however, when we find the count to be under
 116 * the soft limit (they are only ever set non-zero via userspace).
 117 */
 118void
 119xfs_qm_adjust_dqtimers(
 120        xfs_mount_t             *mp,
 121        xfs_disk_dquot_t        *d)
 122{
 123        ASSERT(d->d_id);
 124
 125#ifdef DEBUG
 126        if (d->d_blk_hardlimit)
 127                ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
 128                       be64_to_cpu(d->d_blk_hardlimit));
 129        if (d->d_ino_hardlimit)
 130                ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
 131                       be64_to_cpu(d->d_ino_hardlimit));
 132        if (d->d_rtb_hardlimit)
 133                ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
 134                       be64_to_cpu(d->d_rtb_hardlimit));
 135#endif
 136
 137        if (!d->d_btimer) {
 138                if ((d->d_blk_softlimit &&
 139                     (be64_to_cpu(d->d_bcount) >
 140                      be64_to_cpu(d->d_blk_softlimit))) ||
 141                    (d->d_blk_hardlimit &&
 142                     (be64_to_cpu(d->d_bcount) >
 143                      be64_to_cpu(d->d_blk_hardlimit)))) {
 144                        d->d_btimer = cpu_to_be32(get_seconds() +
 145                                        mp->m_quotainfo->qi_btimelimit);
 146                } else {
 147                        d->d_bwarns = 0;
 148                }
 149        } else {
 150                if ((!d->d_blk_softlimit ||
 151                     (be64_to_cpu(d->d_bcount) <=
 152                      be64_to_cpu(d->d_blk_softlimit))) &&
 153                    (!d->d_blk_hardlimit ||
 154                    (be64_to_cpu(d->d_bcount) <=
 155                     be64_to_cpu(d->d_blk_hardlimit)))) {
 156                        d->d_btimer = 0;
 157                }
 158        }
 159
 160        if (!d->d_itimer) {
 161                if ((d->d_ino_softlimit &&
 162                     (be64_to_cpu(d->d_icount) >
 163                      be64_to_cpu(d->d_ino_softlimit))) ||
 164                    (d->d_ino_hardlimit &&
 165                     (be64_to_cpu(d->d_icount) >
 166                      be64_to_cpu(d->d_ino_hardlimit)))) {
 167                        d->d_itimer = cpu_to_be32(get_seconds() +
 168                                        mp->m_quotainfo->qi_itimelimit);
 169                } else {
 170                        d->d_iwarns = 0;
 171                }
 172        } else {
 173                if ((!d->d_ino_softlimit ||
 174                     (be64_to_cpu(d->d_icount) <=
 175                      be64_to_cpu(d->d_ino_softlimit)))  &&
 176                    (!d->d_ino_hardlimit ||
 177                     (be64_to_cpu(d->d_icount) <=
 178                      be64_to_cpu(d->d_ino_hardlimit)))) {
 179                        d->d_itimer = 0;
 180                }
 181        }
 182
 183        if (!d->d_rtbtimer) {
 184                if ((d->d_rtb_softlimit &&
 185                     (be64_to_cpu(d->d_rtbcount) >
 186                      be64_to_cpu(d->d_rtb_softlimit))) ||
 187                    (d->d_rtb_hardlimit &&
 188                     (be64_to_cpu(d->d_rtbcount) >
 189                      be64_to_cpu(d->d_rtb_hardlimit)))) {
 190                        d->d_rtbtimer = cpu_to_be32(get_seconds() +
 191                                        mp->m_quotainfo->qi_rtbtimelimit);
 192                } else {
 193                        d->d_rtbwarns = 0;
 194                }
 195        } else {
 196                if ((!d->d_rtb_softlimit ||
 197                     (be64_to_cpu(d->d_rtbcount) <=
 198                      be64_to_cpu(d->d_rtb_softlimit))) &&
 199                    (!d->d_rtb_hardlimit ||
 200                     (be64_to_cpu(d->d_rtbcount) <=
 201                      be64_to_cpu(d->d_rtb_hardlimit)))) {
 202                        d->d_rtbtimer = 0;
 203                }
 204        }
 205}
 206
 207/*
 208 * initialize a buffer full of dquots and log the whole thing
 209 */
 210STATIC void
 211xfs_qm_init_dquot_blk(
 212        xfs_trans_t     *tp,
 213        xfs_mount_t     *mp,
 214        xfs_dqid_t      id,
 215        uint            type,
 216        xfs_buf_t       *bp)
 217{
 218        struct xfs_quotainfo    *q = mp->m_quotainfo;
 219        xfs_dqblk_t     *d;
 220        xfs_dqid_t      curid;
 221        int             i;
 222
 223        ASSERT(tp);
 224        ASSERT(xfs_buf_islocked(bp));
 225
 226        d = bp->b_addr;
 227
 228        /*
 229         * ID of the first dquot in the block - id's are zero based.
 230         */
 231        curid = id - (id % q->qi_dqperchunk);
 232        memset(d, 0, BBTOB(q->qi_dqchunklen));
 233        for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
 234                d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
 235                d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
 236                d->dd_diskdq.d_id = cpu_to_be32(curid);
 237                d->dd_diskdq.d_flags = type;
 238                if (xfs_sb_version_hascrc(&mp->m_sb)) {
 239                        uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid);
 240                        xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
 241                                         XFS_DQUOT_CRC_OFF);
 242                }
 243        }
 244
 245        xfs_trans_dquot_buf(tp, bp,
 246                            (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
 247                            ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
 248                             XFS_BLF_GDQUOT_BUF)));
 249        xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
 250}
 251
 252/*
 253 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
 254 * watermarks correspond to the soft and hard limits by default. If a soft limit
 255 * is not specified, we use 95% of the hard limit.
 256 */
 257void
 258xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
 259{
 260        uint64_t space;
 261
 262        dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
 263        dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
 264        if (!dqp->q_prealloc_lo_wmark) {
 265                dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
 266                do_div(dqp->q_prealloc_lo_wmark, 100);
 267                dqp->q_prealloc_lo_wmark *= 95;
 268        }
 269
 270        space = dqp->q_prealloc_hi_wmark;
 271
 272        do_div(space, 100);
 273        dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
 274        dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
 275        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 276}
 277
 278/*
 279 * Ensure that the given in-core dquot has a buffer on disk backing it, and
 280 * return the buffer locked and held. This is called when the bmapi finds a
 281 * hole.
 282 */
 283STATIC int
 284xfs_dquot_disk_alloc(
 285        struct xfs_trans        **tpp,
 286        struct xfs_dquot        *dqp,
 287        struct xfs_buf          **bpp)
 288{
 289        struct xfs_bmbt_irec    map;
 290        struct xfs_trans        *tp = *tpp;
 291        struct xfs_mount        *mp = tp->t_mountp;
 292        struct xfs_buf          *bp;
 293        struct xfs_inode        *quotip = xfs_quota_inode(mp, dqp->dq_flags);
 294        int                     nmaps = 1;
 295        int                     error;
 296
 297        trace_xfs_dqalloc(dqp);
 298
 299        xfs_ilock(quotip, XFS_ILOCK_EXCL);
 300        if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
 301                /*
 302                 * Return if this type of quotas is turned off while we didn't
 303                 * have an inode lock
 304                 */
 305                xfs_iunlock(quotip, XFS_ILOCK_EXCL);
 306                return -ESRCH;
 307        }
 308
 309        /* Create the block mapping. */
 310        xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
 311        error = xfs_bmapi_write(tp, quotip, dqp->q_fileoffset,
 312                        XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
 313                        XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps);
 314        if (error)
 315                return error;
 316        ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
 317        ASSERT(nmaps == 1);
 318        ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
 319               (map.br_startblock != HOLESTARTBLOCK));
 320
 321        /*
 322         * Keep track of the blkno to save a lookup later
 323         */
 324        dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 325
 326        /* now we can just get the buffer (there's nothing to read yet) */
 327        bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, dqp->q_blkno,
 328                        mp->m_quotainfo->qi_dqchunklen, 0);
 329        if (!bp)
 330                return -ENOMEM;
 331        bp->b_ops = &xfs_dquot_buf_ops;
 332
 333        /*
 334         * Make a chunk of dquots out of this buffer and log
 335         * the entire thing.
 336         */
 337        xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
 338                              dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
 339        xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 340
 341        /*
 342         * Hold the buffer and join it to the dfops so that we'll still own
 343         * the buffer when we return to the caller.  The buffer disposal on
 344         * error must be paid attention to very carefully, as it has been
 345         * broken since commit efa092f3d4c6 "[XFS] Fixes a bug in the quota
 346         * code when allocating a new dquot record" in 2005, and the later
 347         * conversion to xfs_defer_ops in commit 310a75a3c6c747 failed to keep
 348         * the buffer locked across the _defer_finish call.  We can now do
 349         * this correctly with xfs_defer_bjoin.
 350         *
 351         * Above, we allocated a disk block for the dquot information and used
 352         * get_buf to initialize the dquot. If the _defer_finish fails, the old
 353         * transaction is gone but the new buffer is not joined or held to any
 354         * transaction, so we must _buf_relse it.
 355         *
 356         * If everything succeeds, the caller of this function is returned a
 357         * buffer that is locked and held to the transaction.  The caller
 358         * is responsible for unlocking any buffer passed back, either
 359         * manually or by committing the transaction.  On error, the buffer is
 360         * released and not passed back.
 361         */
 362        xfs_trans_bhold(tp, bp);
 363        error = xfs_defer_finish(tpp);
 364        if (error) {
 365                xfs_trans_bhold_release(*tpp, bp);
 366                xfs_trans_brelse(*tpp, bp);
 367                return error;
 368        }
 369        *bpp = bp;
 370        return 0;
 371}
 372
 373/*
 374 * Read in the in-core dquot's on-disk metadata and return the buffer.
 375 * Returns ENOENT to signal a hole.
 376 */
 377STATIC int
 378xfs_dquot_disk_read(
 379        struct xfs_mount        *mp,
 380        struct xfs_dquot        *dqp,
 381        struct xfs_buf          **bpp)
 382{
 383        struct xfs_bmbt_irec    map;
 384        struct xfs_buf          *bp;
 385        struct xfs_inode        *quotip = xfs_quota_inode(mp, dqp->dq_flags);
 386        uint                    lock_mode;
 387        int                     nmaps = 1;
 388        int                     error;
 389
 390        lock_mode = xfs_ilock_data_map_shared(quotip);
 391        if (!xfs_this_quota_on(mp, dqp->dq_flags)) {
 392                /*
 393                 * Return if this type of quotas is turned off while we
 394                 * didn't have the quota inode lock.
 395                 */
 396                xfs_iunlock(quotip, lock_mode);
 397                return -ESRCH;
 398        }
 399
 400        /*
 401         * Find the block map; no allocations yet
 402         */
 403        error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
 404                        XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
 405        xfs_iunlock(quotip, lock_mode);
 406        if (error)
 407                return error;
 408
 409        ASSERT(nmaps == 1);
 410        ASSERT(map.br_blockcount >= 1);
 411        ASSERT(map.br_startblock != DELAYSTARTBLOCK);
 412        if (map.br_startblock == HOLESTARTBLOCK)
 413                return -ENOENT;
 414
 415        trace_xfs_dqtobp_read(dqp);
 416
 417        /*
 418         * store the blkno etc so that we don't have to do the
 419         * mapping all the time
 420         */
 421        dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
 422
 423        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
 424                        mp->m_quotainfo->qi_dqchunklen, 0, &bp,
 425                        &xfs_dquot_buf_ops);
 426        if (error) {
 427                ASSERT(bp == NULL);
 428                return error;
 429        }
 430
 431        ASSERT(xfs_buf_islocked(bp));
 432        xfs_buf_set_ref(bp, XFS_DQUOT_REF);
 433        *bpp = bp;
 434
 435        return 0;
 436}
 437
 438/* Allocate and initialize everything we need for an incore dquot. */
 439STATIC struct xfs_dquot *
 440xfs_dquot_alloc(
 441        struct xfs_mount        *mp,
 442        xfs_dqid_t              id,
 443        uint                    type)
 444{
 445        struct xfs_dquot        *dqp;
 446
 447        dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
 448
 449        dqp->dq_flags = type;
 450        dqp->q_core.d_id = cpu_to_be32(id);
 451        dqp->q_mount = mp;
 452        INIT_LIST_HEAD(&dqp->q_lru);
 453        mutex_init(&dqp->q_qlock);
 454        init_waitqueue_head(&dqp->q_pinwait);
 455        dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 456        /*
 457         * Offset of dquot in the (fixed sized) dquot chunk.
 458         */
 459        dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
 460                        sizeof(xfs_dqblk_t);
 461
 462        /*
 463         * Because we want to use a counting completion, complete
 464         * the flush completion once to allow a single access to
 465         * the flush completion without blocking.
 466         */
 467        init_completion(&dqp->q_flush);
 468        complete(&dqp->q_flush);
 469
 470        /*
 471         * Make sure group quotas have a different lock class than user
 472         * quotas.
 473         */
 474        switch (type) {
 475        case XFS_DQ_USER:
 476                /* uses the default lock class */
 477                break;
 478        case XFS_DQ_GROUP:
 479                lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
 480                break;
 481        case XFS_DQ_PROJ:
 482                lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
 483                break;
 484        default:
 485                ASSERT(0);
 486                break;
 487        }
 488
 489        xfs_qm_dquot_logitem_init(dqp);
 490
 491        XFS_STATS_INC(mp, xs_qm_dquot);
 492        return dqp;
 493}
 494
 495/* Copy the in-core quota fields in from the on-disk buffer. */
 496STATIC void
 497xfs_dquot_from_disk(
 498        struct xfs_dquot        *dqp,
 499        struct xfs_buf          *bp)
 500{
 501        struct xfs_disk_dquot   *ddqp = bp->b_addr + dqp->q_bufoffset;
 502
 503        /* copy everything from disk dquot to the incore dquot */
 504        memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
 505
 506        /*
 507         * Reservation counters are defined as reservation plus current usage
 508         * to avoid having to add every time.
 509         */
 510        dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
 511        dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
 512        dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
 513
 514        /* initialize the dquot speculative prealloc thresholds */
 515        xfs_dquot_set_prealloc_limits(dqp);
 516}
 517
 518/* Allocate and initialize the dquot buffer for this in-core dquot. */
 519static int
 520xfs_qm_dqread_alloc(
 521        struct xfs_mount        *mp,
 522        struct xfs_dquot        *dqp,
 523        struct xfs_buf          **bpp)
 524{
 525        struct xfs_trans        *tp;
 526        int                     error;
 527
 528        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
 529                        XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
 530        if (error)
 531                goto err;
 532
 533        error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
 534        if (error)
 535                goto err_cancel;
 536
 537        error = xfs_trans_commit(tp);
 538        if (error) {
 539                /*
 540                 * Buffer was held to the transaction, so we have to unlock it
 541                 * manually here because we're not passing it back.
 542                 */
 543                xfs_buf_relse(*bpp);
 544                *bpp = NULL;
 545                goto err;
 546        }
 547        return 0;
 548
 549err_cancel:
 550        xfs_trans_cancel(tp);
 551err:
 552        return error;
 553}
 554
 555/*
 556 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
 557 * and release the buffer immediately.  If @can_alloc is true, fill any
 558 * holes in the on-disk metadata.
 559 */
 560static int
 561xfs_qm_dqread(
 562        struct xfs_mount        *mp,
 563        xfs_dqid_t              id,
 564        uint                    type,
 565        bool                    can_alloc,
 566        struct xfs_dquot        **dqpp)
 567{
 568        struct xfs_dquot        *dqp;
 569        struct xfs_buf          *bp;
 570        int                     error;
 571
 572        dqp = xfs_dquot_alloc(mp, id, type);
 573        trace_xfs_dqread(dqp);
 574
 575        /* Try to read the buffer, allocating if necessary. */
 576        error = xfs_dquot_disk_read(mp, dqp, &bp);
 577        if (error == -ENOENT && can_alloc)
 578                error = xfs_qm_dqread_alloc(mp, dqp, &bp);
 579        if (error)
 580                goto err;
 581
 582        /*
 583         * At this point we should have a clean locked buffer.  Copy the data
 584         * to the incore dquot and release the buffer since the incore dquot
 585         * has its own locking protocol so we needn't tie up the buffer any
 586         * further.
 587         */
 588        ASSERT(xfs_buf_islocked(bp));
 589        xfs_dquot_from_disk(dqp, bp);
 590
 591        xfs_buf_relse(bp);
 592        *dqpp = dqp;
 593        return error;
 594
 595err:
 596        trace_xfs_dqread_fail(dqp);
 597        xfs_qm_dqdestroy(dqp);
 598        *dqpp = NULL;
 599        return error;
 600}
 601
 602/*
 603 * Advance to the next id in the current chunk, or if at the
 604 * end of the chunk, skip ahead to first id in next allocated chunk
 605 * using the SEEK_DATA interface.
 606 */
 607static int
 608xfs_dq_get_next_id(
 609        struct xfs_mount        *mp,
 610        uint                    type,
 611        xfs_dqid_t              *id)
 612{
 613        struct xfs_inode        *quotip = xfs_quota_inode(mp, type);
 614        xfs_dqid_t              next_id = *id + 1; /* simple advance */
 615        uint                    lock_flags;
 616        struct xfs_bmbt_irec    got;
 617        struct xfs_iext_cursor  cur;
 618        xfs_fsblock_t           start;
 619        int                     error = 0;
 620
 621        /* If we'd wrap past the max ID, stop */
 622        if (next_id < *id)
 623                return -ENOENT;
 624
 625        /* If new ID is within the current chunk, advancing it sufficed */
 626        if (next_id % mp->m_quotainfo->qi_dqperchunk) {
 627                *id = next_id;
 628                return 0;
 629        }
 630
 631        /* Nope, next_id is now past the current chunk, so find the next one */
 632        start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
 633
 634        lock_flags = xfs_ilock_data_map_shared(quotip);
 635        if (!(quotip->i_df.if_flags & XFS_IFEXTENTS)) {
 636                error = xfs_iread_extents(NULL, quotip, XFS_DATA_FORK);
 637                if (error)
 638                        return error;
 639        }
 640
 641        if (xfs_iext_lookup_extent(quotip, &quotip->i_df, start, &cur, &got)) {
 642                /* contiguous chunk, bump startoff for the id calculation */
 643                if (got.br_startoff < start)
 644                        got.br_startoff = start;
 645                *id = got.br_startoff * mp->m_quotainfo->qi_dqperchunk;
 646        } else {
 647                error = -ENOENT;
 648        }
 649
 650        xfs_iunlock(quotip, lock_flags);
 651
 652        return error;
 653}
 654
 655/*
 656 * Look up the dquot in the in-core cache.  If found, the dquot is returned
 657 * locked and ready to go.
 658 */
 659static struct xfs_dquot *
 660xfs_qm_dqget_cache_lookup(
 661        struct xfs_mount        *mp,
 662        struct xfs_quotainfo    *qi,
 663        struct radix_tree_root  *tree,
 664        xfs_dqid_t              id)
 665{
 666        struct xfs_dquot        *dqp;
 667
 668restart:
 669        mutex_lock(&qi->qi_tree_lock);
 670        dqp = radix_tree_lookup(tree, id);
 671        if (!dqp) {
 672                mutex_unlock(&qi->qi_tree_lock);
 673                XFS_STATS_INC(mp, xs_qm_dqcachemisses);
 674                return NULL;
 675        }
 676
 677        xfs_dqlock(dqp);
 678        if (dqp->dq_flags & XFS_DQ_FREEING) {
 679                xfs_dqunlock(dqp);
 680                mutex_unlock(&qi->qi_tree_lock);
 681                trace_xfs_dqget_freeing(dqp);
 682                delay(1);
 683                goto restart;
 684        }
 685
 686        dqp->q_nrefs++;
 687        mutex_unlock(&qi->qi_tree_lock);
 688
 689        trace_xfs_dqget_hit(dqp);
 690        XFS_STATS_INC(mp, xs_qm_dqcachehits);
 691        return dqp;
 692}
 693
 694/*
 695 * Try to insert a new dquot into the in-core cache.  If an error occurs the
 696 * caller should throw away the dquot and start over.  Otherwise, the dquot
 697 * is returned locked (and held by the cache) as if there had been a cache
 698 * hit.
 699 */
 700static int
 701xfs_qm_dqget_cache_insert(
 702        struct xfs_mount        *mp,
 703        struct xfs_quotainfo    *qi,
 704        struct radix_tree_root  *tree,
 705        xfs_dqid_t              id,
 706        struct xfs_dquot        *dqp)
 707{
 708        int                     error;
 709
 710        mutex_lock(&qi->qi_tree_lock);
 711        error = radix_tree_insert(tree, id, dqp);
 712        if (unlikely(error)) {
 713                /* Duplicate found!  Caller must try again. */
 714                WARN_ON(error != -EEXIST);
 715                mutex_unlock(&qi->qi_tree_lock);
 716                trace_xfs_dqget_dup(dqp);
 717                return error;
 718        }
 719
 720        /* Return a locked dquot to the caller, with a reference taken. */
 721        xfs_dqlock(dqp);
 722        dqp->q_nrefs = 1;
 723
 724        qi->qi_dquots++;
 725        mutex_unlock(&qi->qi_tree_lock);
 726
 727        return 0;
 728}
 729
 730/* Check our input parameters. */
 731static int
 732xfs_qm_dqget_checks(
 733        struct xfs_mount        *mp,
 734        uint                    type)
 735{
 736        if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp)))
 737                return -ESRCH;
 738
 739        switch (type) {
 740        case XFS_DQ_USER:
 741                if (!XFS_IS_UQUOTA_ON(mp))
 742                        return -ESRCH;
 743                return 0;
 744        case XFS_DQ_GROUP:
 745                if (!XFS_IS_GQUOTA_ON(mp))
 746                        return -ESRCH;
 747                return 0;
 748        case XFS_DQ_PROJ:
 749                if (!XFS_IS_PQUOTA_ON(mp))
 750                        return -ESRCH;
 751                return 0;
 752        default:
 753                WARN_ON_ONCE(0);
 754                return -EINVAL;
 755        }
 756}
 757
 758/*
 759 * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked
 760 * dquot, doing an allocation (if requested) as needed.
 761 */
 762int
 763xfs_qm_dqget(
 764        struct xfs_mount        *mp,
 765        xfs_dqid_t              id,
 766        uint                    type,
 767        bool                    can_alloc,
 768        struct xfs_dquot        **O_dqpp)
 769{
 770        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 771        struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 772        struct xfs_dquot        *dqp;
 773        int                     error;
 774
 775        error = xfs_qm_dqget_checks(mp, type);
 776        if (error)
 777                return error;
 778
 779restart:
 780        dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 781        if (dqp) {
 782                *O_dqpp = dqp;
 783                return 0;
 784        }
 785
 786        error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
 787        if (error)
 788                return error;
 789
 790        error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
 791        if (error) {
 792                /*
 793                 * Duplicate found. Just throw away the new dquot and start
 794                 * over.
 795                 */
 796                xfs_qm_dqdestroy(dqp);
 797                XFS_STATS_INC(mp, xs_qm_dquot_dups);
 798                goto restart;
 799        }
 800
 801        trace_xfs_dqget_miss(dqp);
 802        *O_dqpp = dqp;
 803        return 0;
 804}
 805
 806/*
 807 * Given a dquot id and type, read and initialize a dquot from the on-disk
 808 * metadata.  This function is only for use during quota initialization so
 809 * it ignores the dquot cache assuming that the dquot shrinker isn't set up.
 810 * The caller is responsible for _qm_dqdestroy'ing the returned dquot.
 811 */
 812int
 813xfs_qm_dqget_uncached(
 814        struct xfs_mount        *mp,
 815        xfs_dqid_t              id,
 816        uint                    type,
 817        struct xfs_dquot        **dqpp)
 818{
 819        int                     error;
 820
 821        error = xfs_qm_dqget_checks(mp, type);
 822        if (error)
 823                return error;
 824
 825        return xfs_qm_dqread(mp, id, type, 0, dqpp);
 826}
 827
 828/* Return the quota id for a given inode and type. */
 829xfs_dqid_t
 830xfs_qm_id_for_quotatype(
 831        struct xfs_inode        *ip,
 832        uint                    type)
 833{
 834        switch (type) {
 835        case XFS_DQ_USER:
 836                return ip->i_d.di_uid;
 837        case XFS_DQ_GROUP:
 838                return ip->i_d.di_gid;
 839        case XFS_DQ_PROJ:
 840                return xfs_get_projid(ip);
 841        }
 842        ASSERT(0);
 843        return 0;
 844}
 845
 846/*
 847 * Return the dquot for a given inode and type.  If @can_alloc is true, then
 848 * allocate blocks if needed.  The inode's ILOCK must be held and it must not
 849 * have already had an inode attached.
 850 */
 851int
 852xfs_qm_dqget_inode(
 853        struct xfs_inode        *ip,
 854        uint                    type,
 855        bool                    can_alloc,
 856        struct xfs_dquot        **O_dqpp)
 857{
 858        struct xfs_mount        *mp = ip->i_mount;
 859        struct xfs_quotainfo    *qi = mp->m_quotainfo;
 860        struct radix_tree_root  *tree = xfs_dquot_tree(qi, type);
 861        struct xfs_dquot        *dqp;
 862        xfs_dqid_t              id;
 863        int                     error;
 864
 865        error = xfs_qm_dqget_checks(mp, type);
 866        if (error)
 867                return error;
 868
 869        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 870        ASSERT(xfs_inode_dquot(ip, type) == NULL);
 871
 872        id = xfs_qm_id_for_quotatype(ip, type);
 873
 874restart:
 875        dqp = xfs_qm_dqget_cache_lookup(mp, qi, tree, id);
 876        if (dqp) {
 877                *O_dqpp = dqp;
 878                return 0;
 879        }
 880
 881        /*
 882         * Dquot cache miss. We don't want to keep the inode lock across
 883         * a (potential) disk read. Also we don't want to deal with the lock
 884         * ordering between quotainode and this inode. OTOH, dropping the inode
 885         * lock here means dealing with a chown that can happen before
 886         * we re-acquire the lock.
 887         */
 888        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 889        error = xfs_qm_dqread(mp, id, type, can_alloc, &dqp);
 890        xfs_ilock(ip, XFS_ILOCK_EXCL);
 891        if (error)
 892                return error;
 893
 894        /*
 895         * A dquot could be attached to this inode by now, since we had
 896         * dropped the ilock.
 897         */
 898        if (xfs_this_quota_on(mp, type)) {
 899                struct xfs_dquot        *dqp1;
 900
 901                dqp1 = xfs_inode_dquot(ip, type);
 902                if (dqp1) {
 903                        xfs_qm_dqdestroy(dqp);
 904                        dqp = dqp1;
 905                        xfs_dqlock(dqp);
 906                        goto dqret;
 907                }
 908        } else {
 909                /* inode stays locked on return */
 910                xfs_qm_dqdestroy(dqp);
 911                return -ESRCH;
 912        }
 913
 914        error = xfs_qm_dqget_cache_insert(mp, qi, tree, id, dqp);
 915        if (error) {
 916                /*
 917                 * Duplicate found. Just throw away the new dquot and start
 918                 * over.
 919                 */
 920                xfs_qm_dqdestroy(dqp);
 921                XFS_STATS_INC(mp, xs_qm_dquot_dups);
 922                goto restart;
 923        }
 924
 925dqret:
 926        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 927        trace_xfs_dqget_miss(dqp);
 928        *O_dqpp = dqp;
 929        return 0;
 930}
 931
 932/*
 933 * Starting at @id and progressing upwards, look for an initialized incore
 934 * dquot, lock it, and return it.
 935 */
 936int
 937xfs_qm_dqget_next(
 938        struct xfs_mount        *mp,
 939        xfs_dqid_t              id,
 940        uint                    type,
 941        struct xfs_dquot        **dqpp)
 942{
 943        struct xfs_dquot        *dqp;
 944        int                     error = 0;
 945
 946        *dqpp = NULL;
 947        for (; !error; error = xfs_dq_get_next_id(mp, type, &id)) {
 948                error = xfs_qm_dqget(mp, id, type, false, &dqp);
 949                if (error == -ENOENT)
 950                        continue;
 951                else if (error != 0)
 952                        break;
 953
 954                if (!XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
 955                        *dqpp = dqp;
 956                        return 0;
 957                }
 958
 959                xfs_qm_dqput(dqp);
 960        }
 961
 962        return error;
 963}
 964
 965/*
 966 * Release a reference to the dquot (decrement ref-count) and unlock it.
 967 *
 968 * If there is a group quota attached to this dquot, carefully release that
 969 * too without tripping over deadlocks'n'stuff.
 970 */
 971void
 972xfs_qm_dqput(
 973        struct xfs_dquot        *dqp)
 974{
 975        ASSERT(dqp->q_nrefs > 0);
 976        ASSERT(XFS_DQ_IS_LOCKED(dqp));
 977
 978        trace_xfs_dqput(dqp);
 979
 980        if (--dqp->q_nrefs == 0) {
 981                struct xfs_quotainfo    *qi = dqp->q_mount->m_quotainfo;
 982                trace_xfs_dqput_free(dqp);
 983
 984                if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
 985                        XFS_STATS_INC(dqp->q_mount, xs_qm_dquot_unused);
 986        }
 987        xfs_dqunlock(dqp);
 988}
 989
 990/*
 991 * Release a dquot. Flush it if dirty, then dqput() it.
 992 * dquot must not be locked.
 993 */
 994void
 995xfs_qm_dqrele(
 996        xfs_dquot_t     *dqp)
 997{
 998        if (!dqp)
 999                return;
1000
1001        trace_xfs_dqrele(dqp);
1002
1003        xfs_dqlock(dqp);
1004        /*
1005         * We don't care to flush it if the dquot is dirty here.
1006         * That will create stutters that we want to avoid.
1007         * Instead we do a delayed write when we try to reclaim
1008         * a dirty dquot. Also xfs_sync will take part of the burden...
1009         */
1010        xfs_qm_dqput(dqp);
1011}
1012
1013/*
1014 * This is the dquot flushing I/O completion routine.  It is called
1015 * from interrupt level when the buffer containing the dquot is
1016 * flushed to disk.  It is responsible for removing the dquot logitem
1017 * from the AIL if it has not been re-logged, and unlocking the dquot's
1018 * flush lock. This behavior is very similar to that of inodes..
1019 */
1020STATIC void
1021xfs_qm_dqflush_done(
1022        struct xfs_buf          *bp,
1023        struct xfs_log_item     *lip)
1024{
1025        xfs_dq_logitem_t        *qip = (struct xfs_dq_logitem *)lip;
1026        xfs_dquot_t             *dqp = qip->qli_dquot;
1027        struct xfs_ail          *ailp = lip->li_ailp;
1028
1029        /*
1030         * We only want to pull the item from the AIL if its
1031         * location in the log has not changed since we started the flush.
1032         * Thus, we only bother if the dquot's lsn has
1033         * not changed. First we check the lsn outside the lock
1034         * since it's cheaper, and then we recheck while
1035         * holding the lock before removing the dquot from the AIL.
1036         */
1037        if (test_bit(XFS_LI_IN_AIL, &lip->li_flags) &&
1038            ((lip->li_lsn == qip->qli_flush_lsn) ||
1039             test_bit(XFS_LI_FAILED, &lip->li_flags))) {
1040
1041                /* xfs_trans_ail_delete() drops the AIL lock. */
1042                spin_lock(&ailp->ail_lock);
1043                if (lip->li_lsn == qip->qli_flush_lsn) {
1044                        xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1045                } else {
1046                        /*
1047                         * Clear the failed state since we are about to drop the
1048                         * flush lock
1049                         */
1050                        xfs_clear_li_failed(lip);
1051                        spin_unlock(&ailp->ail_lock);
1052                }
1053        }
1054
1055        /*
1056         * Release the dq's flush lock since we're done with it.
1057         */
1058        xfs_dqfunlock(dqp);
1059}
1060
1061/*
1062 * Write a modified dquot to disk.
1063 * The dquot must be locked and the flush lock too taken by caller.
1064 * The flush lock will not be unlocked until the dquot reaches the disk,
1065 * but the dquot is free to be unlocked and modified by the caller
1066 * in the interim. Dquot is still locked on return. This behavior is
1067 * identical to that of inodes.
1068 */
1069int
1070xfs_qm_dqflush(
1071        struct xfs_dquot        *dqp,
1072        struct xfs_buf          **bpp)
1073{
1074        struct xfs_mount        *mp = dqp->q_mount;
1075        struct xfs_buf          *bp;
1076        struct xfs_dqblk        *dqb;
1077        struct xfs_disk_dquot   *ddqp;
1078        xfs_failaddr_t          fa;
1079        int                     error;
1080
1081        ASSERT(XFS_DQ_IS_LOCKED(dqp));
1082        ASSERT(!completion_done(&dqp->q_flush));
1083
1084        trace_xfs_dqflush(dqp);
1085
1086        *bpp = NULL;
1087
1088        xfs_qm_dqunpin_wait(dqp);
1089
1090        /*
1091         * This may have been unpinned because the filesystem is shutting
1092         * down forcibly. If that's the case we must not write this dquot
1093         * to disk, because the log record didn't make it to disk.
1094         *
1095         * We also have to remove the log item from the AIL in this case,
1096         * as we wait for an emptry AIL as part of the unmount process.
1097         */
1098        if (XFS_FORCED_SHUTDOWN(mp)) {
1099                struct xfs_log_item     *lip = &dqp->q_logitem.qli_item;
1100                dqp->dq_flags &= ~XFS_DQ_DIRTY;
1101
1102                xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
1103
1104                error = -EIO;
1105                goto out_unlock;
1106        }
1107
1108        /*
1109         * Get the buffer containing the on-disk dquot
1110         */
1111        error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
1112                                   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1113                                   &xfs_dquot_buf_ops);
1114        if (error)
1115                goto out_unlock;
1116
1117        /*
1118         * Calculate the location of the dquot inside the buffer.
1119         */
1120        dqb = bp->b_addr + dqp->q_bufoffset;
1121        ddqp = &dqb->dd_diskdq;
1122
1123        /*
1124         * A simple sanity check in case we got a corrupted dquot.
1125         */
1126        fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0);
1127        if (fa) {
1128                xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
1129                                be32_to_cpu(ddqp->d_id), fa);
1130                xfs_buf_relse(bp);
1131                xfs_dqfunlock(dqp);
1132                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1133                return -EIO;
1134        }
1135
1136        /* This is the only portion of data that needs to persist */
1137        memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1138
1139        /*
1140         * Clear the dirty field and remember the flush lsn for later use.
1141         */
1142        dqp->dq_flags &= ~XFS_DQ_DIRTY;
1143
1144        xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1145                                        &dqp->q_logitem.qli_item.li_lsn);
1146
1147        /*
1148         * copy the lsn into the on-disk dquot now while we have the in memory
1149         * dquot here. This can't be done later in the write verifier as we
1150         * can't get access to the log item at that point in time.
1151         *
1152         * We also calculate the CRC here so that the on-disk dquot in the
1153         * buffer always has a valid CRC. This ensures there is no possibility
1154         * of a dquot without an up-to-date CRC getting to disk.
1155         */
1156        if (xfs_sb_version_hascrc(&mp->m_sb)) {
1157                dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1158                xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1159                                 XFS_DQUOT_CRC_OFF);
1160        }
1161
1162        /*
1163         * Attach an iodone routine so that we can remove this dquot from the
1164         * AIL and release the flush lock once the dquot is synced to disk.
1165         */
1166        xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1167                                  &dqp->q_logitem.qli_item);
1168
1169        /*
1170         * If the buffer is pinned then push on the log so we won't
1171         * get stuck waiting in the write for too long.
1172         */
1173        if (xfs_buf_ispinned(bp)) {
1174                trace_xfs_dqflush_force(dqp);
1175                xfs_log_force(mp, 0);
1176        }
1177
1178        trace_xfs_dqflush_done(dqp);
1179        *bpp = bp;
1180        return 0;
1181
1182out_unlock:
1183        xfs_dqfunlock(dqp);
1184        return -EIO;
1185}
1186
1187/*
1188 * Lock two xfs_dquot structures.
1189 *
1190 * To avoid deadlocks we always lock the quota structure with
1191 * the lowerd id first.
1192 */
1193void
1194xfs_dqlock2(
1195        xfs_dquot_t     *d1,
1196        xfs_dquot_t     *d2)
1197{
1198        if (d1 && d2) {
1199                ASSERT(d1 != d2);
1200                if (be32_to_cpu(d1->q_core.d_id) >
1201                    be32_to_cpu(d2->q_core.d_id)) {
1202                        mutex_lock(&d2->q_qlock);
1203                        mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1204                } else {
1205                        mutex_lock(&d1->q_qlock);
1206                        mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1207                }
1208        } else if (d1) {
1209                mutex_lock(&d1->q_qlock);
1210        } else if (d2) {
1211                mutex_lock(&d2->q_qlock);
1212        }
1213}
1214
1215int __init
1216xfs_qm_init(void)
1217{
1218        xfs_qm_dqzone =
1219                kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1220        if (!xfs_qm_dqzone)
1221                goto out;
1222
1223        xfs_qm_dqtrxzone =
1224                kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1225        if (!xfs_qm_dqtrxzone)
1226                goto out_free_dqzone;
1227
1228        return 0;
1229
1230out_free_dqzone:
1231        kmem_zone_destroy(xfs_qm_dqzone);
1232out:
1233        return -ENOMEM;
1234}
1235
1236void
1237xfs_qm_exit(void)
1238{
1239        kmem_zone_destroy(xfs_qm_dqtrxzone);
1240        kmem_zone_destroy(xfs_qm_dqzone);
1241}
1242
1243/*
1244 * Iterate every dquot of a particular type.  The caller must ensure that the
1245 * particular quota type is active.  iter_fn can return negative error codes,
1246 * or XFS_BTREE_QUERY_RANGE_ABORT to indicate that it wants to stop iterating.
1247 */
1248int
1249xfs_qm_dqiterate(
1250        struct xfs_mount        *mp,
1251        uint                    dqtype,
1252        xfs_qm_dqiterate_fn     iter_fn,
1253        void                    *priv)
1254{
1255        struct xfs_dquot        *dq;
1256        xfs_dqid_t              id = 0;
1257        int                     error;
1258
1259        do {
1260                error = xfs_qm_dqget_next(mp, id, dqtype, &dq);
1261                if (error == -ENOENT)
1262                        return 0;
1263                if (error)
1264                        return error;
1265
1266                error = iter_fn(dq, dqtype, priv);
1267                id = be32_to_cpu(dq->q_core.d_id);
1268                xfs_qm_dqput(dq);
1269                id++;
1270        } while (error == 0 && id != 0);
1271
1272        return error;
1273}
1274