linux/fs/gfs2/quota.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <linux/mm.h>
  42#include <linux/spinlock.h>
  43#include <linux/completion.h>
  44#include <linux/buffer_head.h>
  45#include <linux/sort.h>
  46#include <linux/fs.h>
  47#include <linux/bio.h>
  48#include <linux/gfs2_ondisk.h>
  49#include <linux/kthread.h>
  50#include <linux/freezer.h>
  51#include <linux/quota.h>
  52#include <linux/dqblk_xfs.h>
  53
  54#include "gfs2.h"
  55#include "incore.h"
  56#include "bmap.h"
  57#include "glock.h"
  58#include "glops.h"
  59#include "log.h"
  60#include "meta_io.h"
  61#include "quota.h"
  62#include "rgrp.h"
  63#include "super.h"
  64#include "trans.h"
  65#include "inode.h"
  66#include "util.h"
  67
  68struct gfs2_quota_change_host {
  69        u64 qc_change;
  70        u32 qc_flags; /* GFS2_QCF_... */
  71        struct kqid qc_id;
  72};
  73
  74static LIST_HEAD(qd_lru_list);
  75static atomic_t qd_lru_count = ATOMIC_INIT(0);
  76static DEFINE_SPINLOCK(qd_lru_lock);
  77
  78unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  79                                  struct shrink_control *sc)
  80{
  81        struct gfs2_quota_data *qd;
  82        struct gfs2_sbd *sdp;
  83        int nr_to_scan = sc->nr_to_scan;
  84        long freed = 0;
  85
  86        if (!(sc->gfp_mask & __GFP_FS))
  87                return SHRINK_STOP;
  88
  89        spin_lock(&qd_lru_lock);
  90        while (nr_to_scan && !list_empty(&qd_lru_list)) {
  91                qd = list_entry(qd_lru_list.next,
  92                                struct gfs2_quota_data, qd_reclaim);
  93                sdp = qd->qd_gl->gl_sbd;
  94
  95                /* Free from the filesystem-specific list */
  96                list_del(&qd->qd_list);
  97
  98                gfs2_assert_warn(sdp, !qd->qd_change);
  99                gfs2_assert_warn(sdp, !qd->qd_slot_count);
 100                gfs2_assert_warn(sdp, !qd->qd_bh_count);
 101
 102                gfs2_glock_put(qd->qd_gl);
 103                atomic_dec(&sdp->sd_quota_count);
 104
 105                /* Delete it from the common reclaim list */
 106                list_del_init(&qd->qd_reclaim);
 107                atomic_dec(&qd_lru_count);
 108                spin_unlock(&qd_lru_lock);
 109                kmem_cache_free(gfs2_quotad_cachep, qd);
 110                spin_lock(&qd_lru_lock);
 111                nr_to_scan--;
 112                freed++;
 113        }
 114        spin_unlock(&qd_lru_lock);
 115        return freed;
 116}
 117
 118unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 119                                   struct shrink_control *sc)
 120{
 121        return vfs_pressure_ratio(atomic_read(&qd_lru_count));
 122}
 123
 124static u64 qd2index(struct gfs2_quota_data *qd)
 125{
 126        struct kqid qid = qd->qd_id;
 127        return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 128                ((qid.type == USRQUOTA) ? 0 : 1);
 129}
 130
 131static u64 qd2offset(struct gfs2_quota_data *qd)
 132{
 133        u64 offset;
 134
 135        offset = qd2index(qd);
 136        offset *= sizeof(struct gfs2_quota);
 137
 138        return offset;
 139}
 140
 141static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
 142                    struct gfs2_quota_data **qdp)
 143{
 144        struct gfs2_quota_data *qd;
 145        int error;
 146
 147        qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 148        if (!qd)
 149                return -ENOMEM;
 150
 151        atomic_set(&qd->qd_count, 1);
 152        qd->qd_id = qid;
 153        qd->qd_slot = -1;
 154        INIT_LIST_HEAD(&qd->qd_reclaim);
 155
 156        error = gfs2_glock_get(sdp, qd2index(qd),
 157                              &gfs2_quota_glops, CREATE, &qd->qd_gl);
 158        if (error)
 159                goto fail;
 160
 161        *qdp = qd;
 162
 163        return 0;
 164
 165fail:
 166        kmem_cache_free(gfs2_quotad_cachep, qd);
 167        return error;
 168}
 169
 170static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 171                  struct gfs2_quota_data **qdp)
 172{
 173        struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
 174        int error, found;
 175
 176        *qdp = NULL;
 177
 178        for (;;) {
 179                found = 0;
 180                spin_lock(&qd_lru_lock);
 181                list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 182                        if (qid_eq(qd->qd_id, qid)) {
 183                                if (!atomic_read(&qd->qd_count) &&
 184                                    !list_empty(&qd->qd_reclaim)) {
 185                                        /* Remove it from reclaim list */
 186                                        list_del_init(&qd->qd_reclaim);
 187                                        atomic_dec(&qd_lru_count);
 188                                }
 189                                atomic_inc(&qd->qd_count);
 190                                found = 1;
 191                                break;
 192                        }
 193                }
 194
 195                if (!found)
 196                        qd = NULL;
 197
 198                if (!qd && new_qd) {
 199                        qd = new_qd;
 200                        list_add(&qd->qd_list, &sdp->sd_quota_list);
 201                        atomic_inc(&sdp->sd_quota_count);
 202                        new_qd = NULL;
 203                }
 204
 205                spin_unlock(&qd_lru_lock);
 206
 207                if (qd) {
 208                        if (new_qd) {
 209                                gfs2_glock_put(new_qd->qd_gl);
 210                                kmem_cache_free(gfs2_quotad_cachep, new_qd);
 211                        }
 212                        *qdp = qd;
 213                        return 0;
 214                }
 215
 216                error = qd_alloc(sdp, qid, &new_qd);
 217                if (error)
 218                        return error;
 219        }
 220}
 221
 222static void qd_hold(struct gfs2_quota_data *qd)
 223{
 224        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 225        gfs2_assert(sdp, atomic_read(&qd->qd_count));
 226        atomic_inc(&qd->qd_count);
 227}
 228
 229static void qd_put(struct gfs2_quota_data *qd)
 230{
 231        if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
 232                /* Add to the reclaim list */
 233                list_add_tail(&qd->qd_reclaim, &qd_lru_list);
 234                atomic_inc(&qd_lru_count);
 235                spin_unlock(&qd_lru_lock);
 236        }
 237}
 238
 239static int slot_get(struct gfs2_quota_data *qd)
 240{
 241        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 242        unsigned int c, o = 0, b;
 243        unsigned char byte = 0;
 244
 245        spin_lock(&qd_lru_lock);
 246
 247        if (qd->qd_slot_count++) {
 248                spin_unlock(&qd_lru_lock);
 249                return 0;
 250        }
 251
 252        for (c = 0; c < sdp->sd_quota_chunks; c++)
 253                for (o = 0; o < PAGE_SIZE; o++) {
 254                        byte = sdp->sd_quota_bitmap[c][o];
 255                        if (byte != 0xFF)
 256                                goto found;
 257                }
 258
 259        goto fail;
 260
 261found:
 262        for (b = 0; b < 8; b++)
 263                if (!(byte & (1 << b)))
 264                        break;
 265        qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
 266
 267        if (qd->qd_slot >= sdp->sd_quota_slots)
 268                goto fail;
 269
 270        sdp->sd_quota_bitmap[c][o] |= 1 << b;
 271
 272        spin_unlock(&qd_lru_lock);
 273
 274        return 0;
 275
 276fail:
 277        qd->qd_slot_count--;
 278        spin_unlock(&qd_lru_lock);
 279        return -ENOSPC;
 280}
 281
 282static void slot_hold(struct gfs2_quota_data *qd)
 283{
 284        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 285
 286        spin_lock(&qd_lru_lock);
 287        gfs2_assert(sdp, qd->qd_slot_count);
 288        qd->qd_slot_count++;
 289        spin_unlock(&qd_lru_lock);
 290}
 291
 292static void slot_put(struct gfs2_quota_data *qd)
 293{
 294        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 295
 296        spin_lock(&qd_lru_lock);
 297        gfs2_assert(sdp, qd->qd_slot_count);
 298        if (!--qd->qd_slot_count) {
 299                gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
 300                qd->qd_slot = -1;
 301        }
 302        spin_unlock(&qd_lru_lock);
 303}
 304
 305static int bh_get(struct gfs2_quota_data *qd)
 306{
 307        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 308        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 309        unsigned int block, offset;
 310        struct buffer_head *bh;
 311        int error;
 312        struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 313
 314        mutex_lock(&sdp->sd_quota_mutex);
 315
 316        if (qd->qd_bh_count++) {
 317                mutex_unlock(&sdp->sd_quota_mutex);
 318                return 0;
 319        }
 320
 321        block = qd->qd_slot / sdp->sd_qc_per_block;
 322        offset = qd->qd_slot % sdp->sd_qc_per_block;
 323
 324        bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 325        error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 326        if (error)
 327                goto fail;
 328        error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 329        if (error)
 330                goto fail;
 331        error = -EIO;
 332        if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 333                goto fail_brelse;
 334
 335        qd->qd_bh = bh;
 336        qd->qd_bh_qc = (struct gfs2_quota_change *)
 337                (bh->b_data + sizeof(struct gfs2_meta_header) +
 338                 offset * sizeof(struct gfs2_quota_change));
 339
 340        mutex_unlock(&sdp->sd_quota_mutex);
 341
 342        return 0;
 343
 344fail_brelse:
 345        brelse(bh);
 346fail:
 347        qd->qd_bh_count--;
 348        mutex_unlock(&sdp->sd_quota_mutex);
 349        return error;
 350}
 351
 352static void bh_put(struct gfs2_quota_data *qd)
 353{
 354        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 355
 356        mutex_lock(&sdp->sd_quota_mutex);
 357        gfs2_assert(sdp, qd->qd_bh_count);
 358        if (!--qd->qd_bh_count) {
 359                brelse(qd->qd_bh);
 360                qd->qd_bh = NULL;
 361                qd->qd_bh_qc = NULL;
 362        }
 363        mutex_unlock(&sdp->sd_quota_mutex);
 364}
 365
 366static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 367{
 368        struct gfs2_quota_data *qd = NULL;
 369        int error;
 370        int found = 0;
 371
 372        *qdp = NULL;
 373
 374        if (sdp->sd_vfs->s_flags & MS_RDONLY)
 375                return 0;
 376
 377        spin_lock(&qd_lru_lock);
 378
 379        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 380                if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 381                    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 382                    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
 383                        continue;
 384
 385                list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 386
 387                set_bit(QDF_LOCKED, &qd->qd_flags);
 388                gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 389                atomic_inc(&qd->qd_count);
 390                qd->qd_change_sync = qd->qd_change;
 391                gfs2_assert_warn(sdp, qd->qd_slot_count);
 392                qd->qd_slot_count++;
 393                found = 1;
 394
 395                break;
 396        }
 397
 398        if (!found)
 399                qd = NULL;
 400
 401        spin_unlock(&qd_lru_lock);
 402
 403        if (qd) {
 404                gfs2_assert_warn(sdp, qd->qd_change_sync);
 405                error = bh_get(qd);
 406                if (error) {
 407                        clear_bit(QDF_LOCKED, &qd->qd_flags);
 408                        slot_put(qd);
 409                        qd_put(qd);
 410                        return error;
 411                }
 412        }
 413
 414        *qdp = qd;
 415
 416        return 0;
 417}
 418
 419static int qd_trylock(struct gfs2_quota_data *qd)
 420{
 421        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 422
 423        if (sdp->sd_vfs->s_flags & MS_RDONLY)
 424                return 0;
 425
 426        spin_lock(&qd_lru_lock);
 427
 428        if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 429            !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 430                spin_unlock(&qd_lru_lock);
 431                return 0;
 432        }
 433
 434        list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 435
 436        set_bit(QDF_LOCKED, &qd->qd_flags);
 437        gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 438        atomic_inc(&qd->qd_count);
 439        qd->qd_change_sync = qd->qd_change;
 440        gfs2_assert_warn(sdp, qd->qd_slot_count);
 441        qd->qd_slot_count++;
 442
 443        spin_unlock(&qd_lru_lock);
 444
 445        gfs2_assert_warn(sdp, qd->qd_change_sync);
 446        if (bh_get(qd)) {
 447                clear_bit(QDF_LOCKED, &qd->qd_flags);
 448                slot_put(qd);
 449                qd_put(qd);
 450                return 0;
 451        }
 452
 453        return 1;
 454}
 455
 456static void qd_unlock(struct gfs2_quota_data *qd)
 457{
 458        gfs2_assert_warn(qd->qd_gl->gl_sbd,
 459                         test_bit(QDF_LOCKED, &qd->qd_flags));
 460        clear_bit(QDF_LOCKED, &qd->qd_flags);
 461        bh_put(qd);
 462        slot_put(qd);
 463        qd_put(qd);
 464}
 465
 466static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 467                    struct gfs2_quota_data **qdp)
 468{
 469        int error;
 470
 471        error = qd_get(sdp, qid, qdp);
 472        if (error)
 473                return error;
 474
 475        error = slot_get(*qdp);
 476        if (error)
 477                goto fail;
 478
 479        error = bh_get(*qdp);
 480        if (error)
 481                goto fail_slot;
 482
 483        return 0;
 484
 485fail_slot:
 486        slot_put(*qdp);
 487fail:
 488        qd_put(*qdp);
 489        return error;
 490}
 491
 492static void qdsb_put(struct gfs2_quota_data *qd)
 493{
 494        bh_put(qd);
 495        slot_put(qd);
 496        qd_put(qd);
 497}
 498
 499int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 500{
 501        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 502        struct gfs2_quota_data **qd;
 503        int error;
 504
 505        if (ip->i_res == NULL) {
 506                error = gfs2_rs_alloc(ip);
 507                if (error)
 508                        return error;
 509        }
 510
 511        qd = ip->i_res->rs_qa_qd;
 512
 513        if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
 514            gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 515                return -EIO;
 516
 517        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 518                return 0;
 519
 520        error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 521        if (error)
 522                goto out;
 523        ip->i_res->rs_qa_qd_num++;
 524        qd++;
 525
 526        error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 527        if (error)
 528                goto out;
 529        ip->i_res->rs_qa_qd_num++;
 530        qd++;
 531
 532        if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 533            !uid_eq(uid, ip->i_inode.i_uid)) {
 534                error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 535                if (error)
 536                        goto out;
 537                ip->i_res->rs_qa_qd_num++;
 538                qd++;
 539        }
 540
 541        if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 542            !gid_eq(gid, ip->i_inode.i_gid)) {
 543                error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 544                if (error)
 545                        goto out;
 546                ip->i_res->rs_qa_qd_num++;
 547                qd++;
 548        }
 549
 550out:
 551        if (error)
 552                gfs2_quota_unhold(ip);
 553        return error;
 554}
 555
 556void gfs2_quota_unhold(struct gfs2_inode *ip)
 557{
 558        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 559        unsigned int x;
 560
 561        if (ip->i_res == NULL)
 562                return;
 563        gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 564
 565        for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 566                qdsb_put(ip->i_res->rs_qa_qd[x]);
 567                ip->i_res->rs_qa_qd[x] = NULL;
 568        }
 569        ip->i_res->rs_qa_qd_num = 0;
 570}
 571
 572static int sort_qd(const void *a, const void *b)
 573{
 574        const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 575        const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 576
 577        if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 578                return -1;
 579        if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 580                return 1;
 581        return 0;
 582}
 583
 584static void do_qc(struct gfs2_quota_data *qd, s64 change)
 585{
 586        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 587        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 588        struct gfs2_quota_change *qc = qd->qd_bh_qc;
 589        s64 x;
 590
 591        mutex_lock(&sdp->sd_quota_mutex);
 592        gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 593
 594        if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 595                qc->qc_change = 0;
 596                qc->qc_flags = 0;
 597                if (qd->qd_id.type == USRQUOTA)
 598                        qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 599                qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 600        }
 601
 602        x = be64_to_cpu(qc->qc_change) + change;
 603        qc->qc_change = cpu_to_be64(x);
 604
 605        spin_lock(&qd_lru_lock);
 606        qd->qd_change = x;
 607        spin_unlock(&qd_lru_lock);
 608
 609        if (!x) {
 610                gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 611                clear_bit(QDF_CHANGE, &qd->qd_flags);
 612                qc->qc_flags = 0;
 613                qc->qc_id = 0;
 614                slot_put(qd);
 615                qd_put(qd);
 616        } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 617                qd_hold(qd);
 618                slot_hold(qd);
 619        }
 620
 621        mutex_unlock(&sdp->sd_quota_mutex);
 622}
 623
 624/**
 625 * gfs2_adjust_quota - adjust record of current block usage
 626 * @ip: The quota inode
 627 * @loc: Offset of the entry in the quota file
 628 * @change: The amount of usage change to record
 629 * @qd: The quota data
 630 * @fdq: The updated limits to record
 631 *
 632 * This function was mostly borrowed from gfs2_block_truncate_page which was
 633 * in turn mostly borrowed from ext3
 634 *
 635 * Returns: 0 or -ve on error
 636 */
 637
 638static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 639                             s64 change, struct gfs2_quota_data *qd,
 640                             struct fs_disk_quota *fdq)
 641{
 642        struct inode *inode = &ip->i_inode;
 643        struct gfs2_sbd *sdp = GFS2_SB(inode);
 644        struct address_space *mapping = inode->i_mapping;
 645        unsigned long index = loc >> PAGE_CACHE_SHIFT;
 646        unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 647        unsigned blocksize, iblock, pos;
 648        struct buffer_head *bh;
 649        struct page *page;
 650        void *kaddr, *ptr;
 651        struct gfs2_quota q, *qp;
 652        int err, nbytes;
 653        u64 size;
 654
 655        if (gfs2_is_stuffed(ip)) {
 656                err = gfs2_unstuff_dinode(ip, NULL);
 657                if (err)
 658                        return err;
 659        }
 660
 661        memset(&q, 0, sizeof(struct gfs2_quota));
 662        err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 663        if (err < 0)
 664                return err;
 665
 666        err = -EIO;
 667        qp = &q;
 668        qp->qu_value = be64_to_cpu(qp->qu_value);
 669        qp->qu_value += change;
 670        qp->qu_value = cpu_to_be64(qp->qu_value);
 671        qd->qd_qb.qb_value = qp->qu_value;
 672        if (fdq) {
 673                if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 674                        qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 675                        qd->qd_qb.qb_warn = qp->qu_warn;
 676                }
 677                if (fdq->d_fieldmask & FS_DQ_BHARD) {
 678                        qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 679                        qd->qd_qb.qb_limit = qp->qu_limit;
 680                }
 681                if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 682                        qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 683                        qd->qd_qb.qb_value = qp->qu_value;
 684                }
 685        }
 686
 687        /* Write the quota into the quota file on disk */
 688        ptr = qp;
 689        nbytes = sizeof(struct gfs2_quota);
 690get_a_page:
 691        page = find_or_create_page(mapping, index, GFP_NOFS);
 692        if (!page)
 693                return -ENOMEM;
 694
 695        blocksize = inode->i_sb->s_blocksize;
 696        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 697
 698        if (!page_has_buffers(page))
 699                create_empty_buffers(page, blocksize, 0);
 700
 701        bh = page_buffers(page);
 702        pos = blocksize;
 703        while (offset >= pos) {
 704                bh = bh->b_this_page;
 705                iblock++;
 706                pos += blocksize;
 707        }
 708
 709        if (!buffer_mapped(bh)) {
 710                gfs2_block_map(inode, iblock, bh, 1);
 711                if (!buffer_mapped(bh))
 712                        goto unlock_out;
 713                /* If it's a newly allocated disk block for quota, zero it */
 714                if (buffer_new(bh))
 715                        zero_user(page, pos - blocksize, bh->b_size);
 716        }
 717
 718        if (PageUptodate(page))
 719                set_buffer_uptodate(bh);
 720
 721        if (!buffer_uptodate(bh)) {
 722                ll_rw_block(READ | REQ_META, 1, &bh);
 723                wait_on_buffer(bh);
 724                if (!buffer_uptodate(bh))
 725                        goto unlock_out;
 726        }
 727
 728        gfs2_trans_add_data(ip->i_gl, bh);
 729
 730        kaddr = kmap_atomic(page);
 731        if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 732                nbytes = PAGE_CACHE_SIZE - offset;
 733        memcpy(kaddr + offset, ptr, nbytes);
 734        flush_dcache_page(page);
 735        kunmap_atomic(kaddr);
 736        unlock_page(page);
 737        page_cache_release(page);
 738
 739        /* If quota straddles page boundary, we need to update the rest of the
 740         * quota at the beginning of the next page */
 741        if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 742                ptr = ptr + nbytes;
 743                nbytes = sizeof(struct gfs2_quota) - nbytes;
 744                offset = 0;
 745                index++;
 746                goto get_a_page;
 747        }
 748
 749        size = loc + sizeof(struct gfs2_quota);
 750        if (size > inode->i_size)
 751                i_size_write(inode, size);
 752        inode->i_mtime = inode->i_atime = CURRENT_TIME;
 753        mark_inode_dirty(inode);
 754        return 0;
 755
 756unlock_out:
 757        unlock_page(page);
 758        page_cache_release(page);
 759        return err;
 760}
 761
 762static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 763{
 764        struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 765        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 766        unsigned int data_blocks, ind_blocks;
 767        struct gfs2_holder *ghs, i_gh;
 768        unsigned int qx, x;
 769        struct gfs2_quota_data *qd;
 770        unsigned reserved;
 771        loff_t offset;
 772        unsigned int nalloc = 0, blocks;
 773        int error;
 774
 775        error = gfs2_rs_alloc(ip);
 776        if (error)
 777                return error;
 778
 779        gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 780                              &data_blocks, &ind_blocks);
 781
 782        ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 783        if (!ghs)
 784                return -ENOMEM;
 785
 786        sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 787        mutex_lock(&ip->i_inode.i_mutex);
 788        for (qx = 0; qx < num_qd; qx++) {
 789                error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 790                                           GL_NOCACHE, &ghs[qx]);
 791                if (error)
 792                        goto out;
 793        }
 794
 795        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 796        if (error)
 797                goto out;
 798
 799        for (x = 0; x < num_qd; x++) {
 800                offset = qd2offset(qda[x]);
 801                if (gfs2_write_alloc_required(ip, offset,
 802                                              sizeof(struct gfs2_quota)))
 803                        nalloc++;
 804        }
 805
 806        /* 
 807         * 1 blk for unstuffing inode if stuffed. We add this extra
 808         * block to the reservation unconditionally. If the inode
 809         * doesn't need unstuffing, the block will be released to the 
 810         * rgrp since it won't be allocated during the transaction
 811         */
 812        /* +3 in the end for unstuffing block, inode size update block
 813         * and another block in case quota straddles page boundary and 
 814         * two blocks need to be updated instead of 1 */
 815        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 816
 817        reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 818        error = gfs2_inplace_reserve(ip, reserved, 0);
 819        if (error)
 820                goto out_alloc;
 821
 822        if (nalloc)
 823                blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 824
 825        error = gfs2_trans_begin(sdp, blocks, 0);
 826        if (error)
 827                goto out_ipres;
 828
 829        for (x = 0; x < num_qd; x++) {
 830                qd = qda[x];
 831                offset = qd2offset(qd);
 832                error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 833                if (error)
 834                        goto out_end_trans;
 835
 836                do_qc(qd, -qd->qd_change_sync);
 837                set_bit(QDF_REFRESH, &qd->qd_flags);
 838        }
 839
 840        error = 0;
 841
 842out_end_trans:
 843        gfs2_trans_end(sdp);
 844out_ipres:
 845        gfs2_inplace_release(ip);
 846out_alloc:
 847        gfs2_glock_dq_uninit(&i_gh);
 848out:
 849        while (qx--)
 850                gfs2_glock_dq_uninit(&ghs[qx]);
 851        mutex_unlock(&ip->i_inode.i_mutex);
 852        kfree(ghs);
 853        gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 854        return error;
 855}
 856
 857static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 858{
 859        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 860        struct gfs2_quota q;
 861        struct gfs2_quota_lvb *qlvb;
 862        loff_t pos;
 863        int error;
 864
 865        memset(&q, 0, sizeof(struct gfs2_quota));
 866        pos = qd2offset(qd);
 867        error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 868        if (error < 0)
 869                return error;
 870
 871        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 872        qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 873        qlvb->__pad = 0;
 874        qlvb->qb_limit = q.qu_limit;
 875        qlvb->qb_warn = q.qu_warn;
 876        qlvb->qb_value = q.qu_value;
 877        qd->qd_qb = *qlvb;
 878
 879        return 0;
 880}
 881
 882static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 883                    struct gfs2_holder *q_gh)
 884{
 885        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 886        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 887        struct gfs2_holder i_gh;
 888        int error;
 889
 890restart:
 891        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 892        if (error)
 893                return error;
 894
 895        qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 896
 897        if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 898                gfs2_glock_dq_uninit(q_gh);
 899                error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 900                                           GL_NOCACHE, q_gh);
 901                if (error)
 902                        return error;
 903
 904                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 905                if (error)
 906                        goto fail;
 907
 908                error = update_qd(sdp, qd);
 909                if (error)
 910                        goto fail_gunlock;
 911
 912                gfs2_glock_dq_uninit(&i_gh);
 913                gfs2_glock_dq_uninit(q_gh);
 914                force_refresh = 0;
 915                goto restart;
 916        }
 917
 918        return 0;
 919
 920fail_gunlock:
 921        gfs2_glock_dq_uninit(&i_gh);
 922fail:
 923        gfs2_glock_dq_uninit(q_gh);
 924        return error;
 925}
 926
 927int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 928{
 929        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 930        struct gfs2_quota_data *qd;
 931        unsigned int x;
 932        int error = 0;
 933
 934        error = gfs2_quota_hold(ip, uid, gid);
 935        if (error)
 936                return error;
 937
 938        if (capable(CAP_SYS_RESOURCE) ||
 939            sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 940                return 0;
 941
 942        sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
 943             sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 944
 945        for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
 946                int force = NO_FORCE;
 947                qd = ip->i_res->rs_qa_qd[x];
 948                if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 949                        force = FORCE;
 950                error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
 951                if (error)
 952                        break;
 953        }
 954
 955        if (!error)
 956                set_bit(GIF_QD_LOCKED, &ip->i_flags);
 957        else {
 958                while (x--)
 959                        gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
 960                gfs2_quota_unhold(ip);
 961        }
 962
 963        return error;
 964}
 965
 966static int need_sync(struct gfs2_quota_data *qd)
 967{
 968        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 969        struct gfs2_tune *gt = &sdp->sd_tune;
 970        s64 value;
 971        unsigned int num, den;
 972        int do_sync = 1;
 973
 974        if (!qd->qd_qb.qb_limit)
 975                return 0;
 976
 977        spin_lock(&qd_lru_lock);
 978        value = qd->qd_change;
 979        spin_unlock(&qd_lru_lock);
 980
 981        spin_lock(&gt->gt_spin);
 982        num = gt->gt_quota_scale_num;
 983        den = gt->gt_quota_scale_den;
 984        spin_unlock(&gt->gt_spin);
 985
 986        if (value < 0)
 987                do_sync = 0;
 988        else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
 989                 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 990                do_sync = 0;
 991        else {
 992                value *= gfs2_jindex_size(sdp) * num;
 993                value = div_s64(value, den);
 994                value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
 995                if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 996                        do_sync = 0;
 997        }
 998
 999        return do_sync;
1000}
1001
1002void gfs2_quota_unlock(struct gfs2_inode *ip)
1003{
1004        struct gfs2_quota_data *qda[4];
1005        unsigned int count = 0;
1006        unsigned int x;
1007
1008        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1009                goto out;
1010
1011        for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1012                struct gfs2_quota_data *qd;
1013                int sync;
1014
1015                qd = ip->i_res->rs_qa_qd[x];
1016                sync = need_sync(qd);
1017
1018                gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
1019
1020                if (sync && qd_trylock(qd))
1021                        qda[count++] = qd;
1022        }
1023
1024        if (count) {
1025                do_sync(count, qda);
1026                for (x = 0; x < count; x++)
1027                        qd_unlock(qda[x]);
1028        }
1029
1030out:
1031        gfs2_quota_unhold(ip);
1032}
1033
1034#define MAX_LINE 256
1035
1036static int print_message(struct gfs2_quota_data *qd, char *type)
1037{
1038        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1039
1040        printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1041               sdp->sd_fsname, type,
1042               (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1043               from_kqid(&init_user_ns, qd->qd_id));
1044
1045        return 0;
1046}
1047
1048int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1049{
1050        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1051        struct gfs2_quota_data *qd;
1052        s64 value;
1053        unsigned int x;
1054        int error = 0;
1055
1056        if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1057                return 0;
1058
1059        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1060                return 0;
1061
1062        for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1063                qd = ip->i_res->rs_qa_qd[x];
1064
1065                if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1066                      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1067                        continue;
1068
1069                value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1070                spin_lock(&qd_lru_lock);
1071                value += qd->qd_change;
1072                spin_unlock(&qd_lru_lock);
1073
1074                if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1075                        print_message(qd, "exceeded");
1076                        quota_send_warning(qd->qd_id,
1077                                           sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1078
1079                        error = -EDQUOT;
1080                        break;
1081                } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1082                           (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1083                           time_after_eq(jiffies, qd->qd_last_warn +
1084                                         gfs2_tune_get(sdp,
1085                                                gt_quota_warn_period) * HZ)) {
1086                        quota_send_warning(qd->qd_id,
1087                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1088                        error = print_message(qd, "warning");
1089                        qd->qd_last_warn = jiffies;
1090                }
1091        }
1092
1093        return error;
1094}
1095
1096void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1097                       kuid_t uid, kgid_t gid)
1098{
1099        struct gfs2_quota_data *qd;
1100        unsigned int x;
1101
1102        if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1103                return;
1104        if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1105                return;
1106
1107        for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
1108                qd = ip->i_res->rs_qa_qd[x];
1109
1110                if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1111                    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1112                        do_qc(qd, change);
1113                }
1114        }
1115}
1116
1117int gfs2_quota_sync(struct super_block *sb, int type)
1118{
1119        struct gfs2_sbd *sdp = sb->s_fs_info;
1120        struct gfs2_quota_data **qda;
1121        unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1122        unsigned int num_qd;
1123        unsigned int x;
1124        int error = 0;
1125
1126        sdp->sd_quota_sync_gen++;
1127
1128        qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1129        if (!qda)
1130                return -ENOMEM;
1131
1132        do {
1133                num_qd = 0;
1134
1135                for (;;) {
1136                        error = qd_fish(sdp, qda + num_qd);
1137                        if (error || !qda[num_qd])
1138                                break;
1139                        if (++num_qd == max_qd)
1140                                break;
1141                }
1142
1143                if (num_qd) {
1144                        if (!error)
1145                                error = do_sync(num_qd, qda);
1146                        if (!error)
1147                                for (x = 0; x < num_qd; x++)
1148                                        qda[x]->qd_sync_gen =
1149                                                sdp->sd_quota_sync_gen;
1150
1151                        for (x = 0; x < num_qd; x++)
1152                                qd_unlock(qda[x]);
1153                }
1154        } while (!error && num_qd == max_qd);
1155
1156        kfree(qda);
1157
1158        return error;
1159}
1160
1161int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1162{
1163        struct gfs2_quota_data *qd;
1164        struct gfs2_holder q_gh;
1165        int error;
1166
1167        error = qd_get(sdp, qid, &qd);
1168        if (error)
1169                return error;
1170
1171        error = do_glock(qd, FORCE, &q_gh);
1172        if (!error)
1173                gfs2_glock_dq_uninit(&q_gh);
1174
1175        qd_put(qd);
1176        return error;
1177}
1178
1179static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1180{
1181        const struct gfs2_quota_change *str = buf;
1182
1183        qc->qc_change = be64_to_cpu(str->qc_change);
1184        qc->qc_flags = be32_to_cpu(str->qc_flags);
1185        qc->qc_id = make_kqid(&init_user_ns,
1186                              (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
1187                              be32_to_cpu(str->qc_id));
1188}
1189
1190int gfs2_quota_init(struct gfs2_sbd *sdp)
1191{
1192        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1193        u64 size = i_size_read(sdp->sd_qc_inode);
1194        unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1195        unsigned int x, slot = 0;
1196        unsigned int found = 0;
1197        u64 dblock;
1198        u32 extlen = 0;
1199        int error;
1200
1201        if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1202                return -EIO;
1203
1204        sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1205        sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1206
1207        error = -ENOMEM;
1208
1209        sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1210                                       sizeof(unsigned char *), GFP_NOFS);
1211        if (!sdp->sd_quota_bitmap)
1212                return error;
1213
1214        for (x = 0; x < sdp->sd_quota_chunks; x++) {
1215                sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1216                if (!sdp->sd_quota_bitmap[x])
1217                        goto fail;
1218        }
1219
1220        for (x = 0; x < blocks; x++) {
1221                struct buffer_head *bh;
1222                unsigned int y;
1223
1224                if (!extlen) {
1225                        int new = 0;
1226                        error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1227                        if (error)
1228                                goto fail;
1229                }
1230                error = -EIO;
1231                bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1232                if (!bh)
1233                        goto fail;
1234                if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1235                        brelse(bh);
1236                        goto fail;
1237                }
1238
1239                for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1240                     y++, slot++) {
1241                        struct gfs2_quota_change_host qc;
1242                        struct gfs2_quota_data *qd;
1243
1244                        gfs2_quota_change_in(&qc, bh->b_data +
1245                                          sizeof(struct gfs2_meta_header) +
1246                                          y * sizeof(struct gfs2_quota_change));
1247                        if (!qc.qc_change)
1248                                continue;
1249
1250                        error = qd_alloc(sdp, qc.qc_id, &qd);
1251                        if (error) {
1252                                brelse(bh);
1253                                goto fail;
1254                        }
1255
1256                        set_bit(QDF_CHANGE, &qd->qd_flags);
1257                        qd->qd_change = qc.qc_change;
1258                        qd->qd_slot = slot;
1259                        qd->qd_slot_count = 1;
1260
1261                        spin_lock(&qd_lru_lock);
1262                        gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1263                        list_add(&qd->qd_list, &sdp->sd_quota_list);
1264                        atomic_inc(&sdp->sd_quota_count);
1265                        spin_unlock(&qd_lru_lock);
1266
1267                        found++;
1268                }
1269
1270                brelse(bh);
1271                dblock++;
1272                extlen--;
1273        }
1274
1275        if (found)
1276                fs_info(sdp, "found %u quota changes\n", found);
1277
1278        return 0;
1279
1280fail:
1281        gfs2_quota_cleanup(sdp);
1282        return error;
1283}
1284
1285void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1286{
1287        struct list_head *head = &sdp->sd_quota_list;
1288        struct gfs2_quota_data *qd;
1289        unsigned int x;
1290
1291        spin_lock(&qd_lru_lock);
1292        while (!list_empty(head)) {
1293                qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1294
1295                if (atomic_read(&qd->qd_count) > 1 ||
1296                    (atomic_read(&qd->qd_count) &&
1297                     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1298                        list_move(&qd->qd_list, head);
1299                        spin_unlock(&qd_lru_lock);
1300                        schedule();
1301                        spin_lock(&qd_lru_lock);
1302                        continue;
1303                }
1304
1305                list_del(&qd->qd_list);
1306                /* Also remove if this qd exists in the reclaim list */
1307                if (!list_empty(&qd->qd_reclaim)) {
1308                        list_del_init(&qd->qd_reclaim);
1309                        atomic_dec(&qd_lru_count);
1310                }
1311                atomic_dec(&sdp->sd_quota_count);
1312                spin_unlock(&qd_lru_lock);
1313
1314                if (!atomic_read(&qd->qd_count)) {
1315                        gfs2_assert_warn(sdp, !qd->qd_change);
1316                        gfs2_assert_warn(sdp, !qd->qd_slot_count);
1317                } else
1318                        gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1319                gfs2_assert_warn(sdp, !qd->qd_bh_count);
1320
1321                gfs2_glock_put(qd->qd_gl);
1322                kmem_cache_free(gfs2_quotad_cachep, qd);
1323
1324                spin_lock(&qd_lru_lock);
1325        }
1326        spin_unlock(&qd_lru_lock);
1327
1328        gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1329
1330        if (sdp->sd_quota_bitmap) {
1331                for (x = 0; x < sdp->sd_quota_chunks; x++)
1332                        kfree(sdp->sd_quota_bitmap[x]);
1333                kfree(sdp->sd_quota_bitmap);
1334        }
1335}
1336
1337static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1338{
1339        if (error == 0 || error == -EROFS)
1340                return;
1341        if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1342                fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1343}
1344
1345static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1346                               int (*fxn)(struct super_block *sb, int type),
1347                               unsigned long t, unsigned long *timeo,
1348                               unsigned int *new_timeo)
1349{
1350        if (t >= *timeo) {
1351                int error = fxn(sdp->sd_vfs, 0);
1352                quotad_error(sdp, msg, error);
1353                *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1354        } else {
1355                *timeo -= t;
1356        }
1357}
1358
1359static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1360{
1361        struct gfs2_inode *ip;
1362
1363        while(1) {
1364                ip = NULL;
1365                spin_lock(&sdp->sd_trunc_lock);
1366                if (!list_empty(&sdp->sd_trunc_list)) {
1367                        ip = list_entry(sdp->sd_trunc_list.next,
1368                                        struct gfs2_inode, i_trunc_list);
1369                        list_del_init(&ip->i_trunc_list);
1370                }
1371                spin_unlock(&sdp->sd_trunc_lock);
1372                if (ip == NULL)
1373                        return;
1374                gfs2_glock_finish_truncate(ip);
1375        }
1376}
1377
1378void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1379        if (!sdp->sd_statfs_force_sync) {
1380                sdp->sd_statfs_force_sync = 1;
1381                wake_up(&sdp->sd_quota_wait);
1382        }
1383}
1384
1385
1386/**
1387 * gfs2_quotad - Write cached quota changes into the quota file
1388 * @sdp: Pointer to GFS2 superblock
1389 *
1390 */
1391
1392int gfs2_quotad(void *data)
1393{
1394        struct gfs2_sbd *sdp = data;
1395        struct gfs2_tune *tune = &sdp->sd_tune;
1396        unsigned long statfs_timeo = 0;
1397        unsigned long quotad_timeo = 0;
1398        unsigned long t = 0;
1399        DEFINE_WAIT(wait);
1400        int empty;
1401
1402        while (!kthread_should_stop()) {
1403
1404                /* Update the master statfs file */
1405                if (sdp->sd_statfs_force_sync) {
1406                        int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1407                        quotad_error(sdp, "statfs", error);
1408                        statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1409                }
1410                else
1411                        quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1412                                           &statfs_timeo,
1413                                           &tune->gt_statfs_quantum);
1414
1415                /* Update quota file */
1416                quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1417                                   &quotad_timeo, &tune->gt_quota_quantum);
1418
1419                /* Check for & recover partially truncated inodes */
1420                quotad_check_trunc_list(sdp);
1421
1422                try_to_freeze();
1423
1424                t = min(quotad_timeo, statfs_timeo);
1425
1426                prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1427                spin_lock(&sdp->sd_trunc_lock);
1428                empty = list_empty(&sdp->sd_trunc_list);
1429                spin_unlock(&sdp->sd_trunc_lock);
1430                if (empty && !sdp->sd_statfs_force_sync)
1431                        t -= schedule_timeout(t);
1432                else
1433                        t = 0;
1434                finish_wait(&sdp->sd_quota_wait, &wait);
1435        }
1436
1437        return 0;
1438}
1439
1440static int gfs2_quota_get_xstate(struct super_block *sb,
1441                                 struct fs_quota_stat *fqs)
1442{
1443        struct gfs2_sbd *sdp = sb->s_fs_info;
1444
1445        memset(fqs, 0, sizeof(struct fs_quota_stat));
1446        fqs->qs_version = FS_QSTAT_VERSION;
1447
1448        switch (sdp->sd_args.ar_quota) {
1449        case GFS2_QUOTA_ON:
1450                fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1451                /*FALLTHRU*/
1452        case GFS2_QUOTA_ACCOUNT:
1453                fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1454                break;
1455        case GFS2_QUOTA_OFF:
1456                break;
1457        }
1458
1459        if (sdp->sd_quota_inode) {
1460                fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1461                fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1462        }
1463        fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1464        fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1465        fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1466        return 0;
1467}
1468
1469static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1470                          struct fs_disk_quota *fdq)
1471{
1472        struct gfs2_sbd *sdp = sb->s_fs_info;
1473        struct gfs2_quota_lvb *qlvb;
1474        struct gfs2_quota_data *qd;
1475        struct gfs2_holder q_gh;
1476        int error;
1477
1478        memset(fdq, 0, sizeof(struct fs_disk_quota));
1479
1480        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1481                return -ESRCH; /* Crazy XFS error code */
1482
1483        if ((qid.type != USRQUOTA) &&
1484            (qid.type != GRPQUOTA))
1485                return -EINVAL;
1486
1487        error = qd_get(sdp, qid, &qd);
1488        if (error)
1489                return error;
1490        error = do_glock(qd, FORCE, &q_gh);
1491        if (error)
1492                goto out;
1493
1494        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1495        fdq->d_version = FS_DQUOT_VERSION;
1496        fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1497        fdq->d_id = from_kqid_munged(current_user_ns(), qid);
1498        fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1499        fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1500        fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1501
1502        gfs2_glock_dq_uninit(&q_gh);
1503out:
1504        qd_put(qd);
1505        return error;
1506}
1507
1508/* GFS2 only supports a subset of the XFS fields */
1509#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1510
1511static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1512                          struct fs_disk_quota *fdq)
1513{
1514        struct gfs2_sbd *sdp = sb->s_fs_info;
1515        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1516        struct gfs2_quota_data *qd;
1517        struct gfs2_holder q_gh, i_gh;
1518        unsigned int data_blocks, ind_blocks;
1519        unsigned int blocks = 0;
1520        int alloc_required;
1521        loff_t offset;
1522        int error;
1523
1524        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1525                return -ESRCH; /* Crazy XFS error code */
1526
1527        if ((qid.type != USRQUOTA) &&
1528            (qid.type != GRPQUOTA))
1529                return -EINVAL;
1530
1531        if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1532                return -EINVAL;
1533
1534        error = qd_get(sdp, qid, &qd);
1535        if (error)
1536                return error;
1537
1538        error = gfs2_rs_alloc(ip);
1539        if (error)
1540                goto out_put;
1541
1542        mutex_lock(&ip->i_inode.i_mutex);
1543        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1544        if (error)
1545                goto out_unlockput;
1546        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1547        if (error)
1548                goto out_q;
1549
1550        /* Check for existing entry, if none then alloc new blocks */
1551        error = update_qd(sdp, qd);
1552        if (error)
1553                goto out_i;
1554
1555        /* If nothing has changed, this is a no-op */
1556        if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1557            ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1558                fdq->d_fieldmask ^= FS_DQ_BSOFT;
1559
1560        if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1561            ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1562                fdq->d_fieldmask ^= FS_DQ_BHARD;
1563
1564        if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1565            ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1566                fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1567
1568        if (fdq->d_fieldmask == 0)
1569                goto out_i;
1570
1571        offset = qd2offset(qd);
1572        alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1573        if (gfs2_is_stuffed(ip))
1574                alloc_required = 1;
1575        if (alloc_required) {
1576                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1577                                       &data_blocks, &ind_blocks);
1578                blocks = 1 + data_blocks + ind_blocks;
1579                error = gfs2_inplace_reserve(ip, blocks, 0);
1580                if (error)
1581                        goto out_i;
1582                blocks += gfs2_rg_blocks(ip, blocks);
1583        }
1584
1585        /* Some quotas span block boundaries and can update two blocks,
1586           adding an extra block to the transaction to handle such quotas */
1587        error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1588        if (error)
1589                goto out_release;
1590
1591        /* Apply changes */
1592        error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1593
1594        gfs2_trans_end(sdp);
1595out_release:
1596        if (alloc_required)
1597                gfs2_inplace_release(ip);
1598out_i:
1599        gfs2_glock_dq_uninit(&i_gh);
1600out_q:
1601        gfs2_glock_dq_uninit(&q_gh);
1602out_unlockput:
1603        mutex_unlock(&ip->i_inode.i_mutex);
1604out_put:
1605        qd_put(qd);
1606        return error;
1607}
1608
1609const struct quotactl_ops gfs2_quotactl_ops = {
1610        .quota_sync     = gfs2_quota_sync,
1611        .get_xstate     = gfs2_quota_get_xstate,
1612        .get_dqblk      = gfs2_get_dqblk,
1613        .set_dqblk      = gfs2_set_dqblk,
1614};
1615