linux/fs/gfs2/quota.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10/*
  11 * Quota change tags are associated with each transaction that allocates or
  12 * deallocates space.  Those changes are accumulated locally to each node (in a
  13 * per-node file) and then are periodically synced to the quota file.  This
  14 * avoids the bottleneck of constantly touching the quota file, but introduces
  15 * fuzziness in the current usage value of IDs that are being used on different
  16 * nodes in the cluster simultaneously.  So, it is possible for a user on
  17 * multiple nodes to overrun their quota, but that overrun is controlable.
  18 * Since quota tags are part of transactions, there is no need for a quota check
  19 * program to be run on node crashes or anything like that.
  20 *
  21 * There are couple of knobs that let the administrator manage the quota
  22 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  23 * sitting on one node before being synced to the quota file.  (The default is
  24 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  25 * of quota file syncs increases as the user moves closer to their limit.  The
  26 * more frequent the syncs, the more accurate the quota enforcement, but that
  27 * means that there is more contention between the nodes for the quota file.
  28 * The default value is one.  This sets the maximum theoretical quota overrun
  29 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  30 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  31 * number greater than one makes quota syncs more frequent and reduces the
  32 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  33 * syncs less frequent.
  34 *
  35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  36 * the quota file, so it is not being constantly read.
  37 */
  38
  39#include <linux/sched.h>
  40#include <linux/slab.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52
  53#include "gfs2.h"
  54#include "incore.h"
  55#include "bmap.h"
  56#include "glock.h"
  57#include "glops.h"
  58#include "log.h"
  59#include "meta_io.h"
  60#include "quota.h"
  61#include "rgrp.h"
  62#include "super.h"
  63#include "trans.h"
  64#include "inode.h"
  65#include "util.h"
  66
  67#define QUOTA_USER 1
  68#define QUOTA_GROUP 0
  69
  70struct gfs2_quota_change_host {
  71        u64 qc_change;
  72        u32 qc_flags; /* GFS2_QCF_... */
  73        u32 qc_id;
  74};
  75
  76static LIST_HEAD(qd_lru_list);
  77static atomic_t qd_lru_count = ATOMIC_INIT(0);
  78static DEFINE_SPINLOCK(qd_lru_lock);
  79
  80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
  81{
  82        struct gfs2_quota_data *qd;
  83        struct gfs2_sbd *sdp;
  84
  85        if (nr == 0)
  86                goto out;
  87
  88        if (!(gfp_mask & __GFP_FS))
  89                return -1;
  90
  91        spin_lock(&qd_lru_lock);
  92        while (nr && !list_empty(&qd_lru_list)) {
  93                qd = list_entry(qd_lru_list.next,
  94                                struct gfs2_quota_data, qd_reclaim);
  95                sdp = qd->qd_gl->gl_sbd;
  96
  97                /* Free from the filesystem-specific list */
  98                list_del(&qd->qd_list);
  99
 100                gfs2_assert_warn(sdp, !qd->qd_change);
 101                gfs2_assert_warn(sdp, !qd->qd_slot_count);
 102                gfs2_assert_warn(sdp, !qd->qd_bh_count);
 103
 104                gfs2_glock_put(qd->qd_gl);
 105                atomic_dec(&sdp->sd_quota_count);
 106
 107                /* Delete it from the common reclaim list */
 108                list_del_init(&qd->qd_reclaim);
 109                atomic_dec(&qd_lru_count);
 110                spin_unlock(&qd_lru_lock);
 111                kmem_cache_free(gfs2_quotad_cachep, qd);
 112                spin_lock(&qd_lru_lock);
 113                nr--;
 114        }
 115        spin_unlock(&qd_lru_lock);
 116
 117out:
 118        return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
 119}
 120
 121static u64 qd2offset(struct gfs2_quota_data *qd)
 122{
 123        u64 offset;
 124
 125        offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
 126        offset *= sizeof(struct gfs2_quota);
 127
 128        return offset;
 129}
 130
 131static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
 132                    struct gfs2_quota_data **qdp)
 133{
 134        struct gfs2_quota_data *qd;
 135        int error;
 136
 137        qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 138        if (!qd)
 139                return -ENOMEM;
 140
 141        atomic_set(&qd->qd_count, 1);
 142        qd->qd_id = id;
 143        if (user)
 144                set_bit(QDF_USER, &qd->qd_flags);
 145        qd->qd_slot = -1;
 146        INIT_LIST_HEAD(&qd->qd_reclaim);
 147
 148        error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
 149                              &gfs2_quota_glops, CREATE, &qd->qd_gl);
 150        if (error)
 151                goto fail;
 152
 153        *qdp = qd;
 154
 155        return 0;
 156
 157fail:
 158        kmem_cache_free(gfs2_quotad_cachep, qd);
 159        return error;
 160}
 161
 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
 163                  struct gfs2_quota_data **qdp)
 164{
 165        struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
 166        int error, found;
 167
 168        *qdp = NULL;
 169
 170        for (;;) {
 171                found = 0;
 172                spin_lock(&qd_lru_lock);
 173                list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 174                        if (qd->qd_id == id &&
 175                            !test_bit(QDF_USER, &qd->qd_flags) == !user) {
 176                                if (!atomic_read(&qd->qd_count) &&
 177                                    !list_empty(&qd->qd_reclaim)) {
 178                                        /* Remove it from reclaim list */
 179                                        list_del_init(&qd->qd_reclaim);
 180                                        atomic_dec(&qd_lru_count);
 181                                }
 182                                atomic_inc(&qd->qd_count);
 183                                found = 1;
 184                                break;
 185                        }
 186                }
 187
 188                if (!found)
 189                        qd = NULL;
 190
 191                if (!qd && new_qd) {
 192                        qd = new_qd;
 193                        list_add(&qd->qd_list, &sdp->sd_quota_list);
 194                        atomic_inc(&sdp->sd_quota_count);
 195                        new_qd = NULL;
 196                }
 197
 198                spin_unlock(&qd_lru_lock);
 199
 200                if (qd) {
 201                        if (new_qd) {
 202                                gfs2_glock_put(new_qd->qd_gl);
 203                                kmem_cache_free(gfs2_quotad_cachep, new_qd);
 204                        }
 205                        *qdp = qd;
 206                        return 0;
 207                }
 208
 209                error = qd_alloc(sdp, user, id, &new_qd);
 210                if (error)
 211                        return error;
 212        }
 213}
 214
 215static void qd_hold(struct gfs2_quota_data *qd)
 216{
 217        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 218        gfs2_assert(sdp, atomic_read(&qd->qd_count));
 219        atomic_inc(&qd->qd_count);
 220}
 221
 222static void qd_put(struct gfs2_quota_data *qd)
 223{
 224        if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
 225                /* Add to the reclaim list */
 226                list_add_tail(&qd->qd_reclaim, &qd_lru_list);
 227                atomic_inc(&qd_lru_count);
 228                spin_unlock(&qd_lru_lock);
 229        }
 230}
 231
 232static int slot_get(struct gfs2_quota_data *qd)
 233{
 234        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 235        unsigned int c, o = 0, b;
 236        unsigned char byte = 0;
 237
 238        spin_lock(&qd_lru_lock);
 239
 240        if (qd->qd_slot_count++) {
 241                spin_unlock(&qd_lru_lock);
 242                return 0;
 243        }
 244
 245        for (c = 0; c < sdp->sd_quota_chunks; c++)
 246                for (o = 0; o < PAGE_SIZE; o++) {
 247                        byte = sdp->sd_quota_bitmap[c][o];
 248                        if (byte != 0xFF)
 249                                goto found;
 250                }
 251
 252        goto fail;
 253
 254found:
 255        for (b = 0; b < 8; b++)
 256                if (!(byte & (1 << b)))
 257                        break;
 258        qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
 259
 260        if (qd->qd_slot >= sdp->sd_quota_slots)
 261                goto fail;
 262
 263        sdp->sd_quota_bitmap[c][o] |= 1 << b;
 264
 265        spin_unlock(&qd_lru_lock);
 266
 267        return 0;
 268
 269fail:
 270        qd->qd_slot_count--;
 271        spin_unlock(&qd_lru_lock);
 272        return -ENOSPC;
 273}
 274
 275static void slot_hold(struct gfs2_quota_data *qd)
 276{
 277        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 278
 279        spin_lock(&qd_lru_lock);
 280        gfs2_assert(sdp, qd->qd_slot_count);
 281        qd->qd_slot_count++;
 282        spin_unlock(&qd_lru_lock);
 283}
 284
 285static void slot_put(struct gfs2_quota_data *qd)
 286{
 287        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 288
 289        spin_lock(&qd_lru_lock);
 290        gfs2_assert(sdp, qd->qd_slot_count);
 291        if (!--qd->qd_slot_count) {
 292                gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
 293                qd->qd_slot = -1;
 294        }
 295        spin_unlock(&qd_lru_lock);
 296}
 297
 298static int bh_get(struct gfs2_quota_data *qd)
 299{
 300        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 301        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 302        unsigned int block, offset;
 303        struct buffer_head *bh;
 304        int error;
 305        struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 306
 307        mutex_lock(&sdp->sd_quota_mutex);
 308
 309        if (qd->qd_bh_count++) {
 310                mutex_unlock(&sdp->sd_quota_mutex);
 311                return 0;
 312        }
 313
 314        block = qd->qd_slot / sdp->sd_qc_per_block;
 315        offset = qd->qd_slot % sdp->sd_qc_per_block;
 316
 317        bh_map.b_size = 1 << ip->i_inode.i_blkbits;
 318        error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 319        if (error)
 320                goto fail;
 321        error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
 322        if (error)
 323                goto fail;
 324        error = -EIO;
 325        if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 326                goto fail_brelse;
 327
 328        qd->qd_bh = bh;
 329        qd->qd_bh_qc = (struct gfs2_quota_change *)
 330                (bh->b_data + sizeof(struct gfs2_meta_header) +
 331                 offset * sizeof(struct gfs2_quota_change));
 332
 333        mutex_unlock(&sdp->sd_quota_mutex);
 334
 335        return 0;
 336
 337fail_brelse:
 338        brelse(bh);
 339fail:
 340        qd->qd_bh_count--;
 341        mutex_unlock(&sdp->sd_quota_mutex);
 342        return error;
 343}
 344
 345static void bh_put(struct gfs2_quota_data *qd)
 346{
 347        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 348
 349        mutex_lock(&sdp->sd_quota_mutex);
 350        gfs2_assert(sdp, qd->qd_bh_count);
 351        if (!--qd->qd_bh_count) {
 352                brelse(qd->qd_bh);
 353                qd->qd_bh = NULL;
 354                qd->qd_bh_qc = NULL;
 355        }
 356        mutex_unlock(&sdp->sd_quota_mutex);
 357}
 358
 359static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 360{
 361        struct gfs2_quota_data *qd = NULL;
 362        int error;
 363        int found = 0;
 364
 365        *qdp = NULL;
 366
 367        if (sdp->sd_vfs->s_flags & MS_RDONLY)
 368                return 0;
 369
 370        spin_lock(&qd_lru_lock);
 371
 372        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 373                if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 374                    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 375                    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
 376                        continue;
 377
 378                list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 379
 380                set_bit(QDF_LOCKED, &qd->qd_flags);
 381                gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 382                atomic_inc(&qd->qd_count);
 383                qd->qd_change_sync = qd->qd_change;
 384                gfs2_assert_warn(sdp, qd->qd_slot_count);
 385                qd->qd_slot_count++;
 386                found = 1;
 387
 388                break;
 389        }
 390
 391        if (!found)
 392                qd = NULL;
 393
 394        spin_unlock(&qd_lru_lock);
 395
 396        if (qd) {
 397                gfs2_assert_warn(sdp, qd->qd_change_sync);
 398                error = bh_get(qd);
 399                if (error) {
 400                        clear_bit(QDF_LOCKED, &qd->qd_flags);
 401                        slot_put(qd);
 402                        qd_put(qd);
 403                        return error;
 404                }
 405        }
 406
 407        *qdp = qd;
 408
 409        return 0;
 410}
 411
 412static int qd_trylock(struct gfs2_quota_data *qd)
 413{
 414        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 415
 416        if (sdp->sd_vfs->s_flags & MS_RDONLY)
 417                return 0;
 418
 419        spin_lock(&qd_lru_lock);
 420
 421        if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 422            !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 423                spin_unlock(&qd_lru_lock);
 424                return 0;
 425        }
 426
 427        list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 428
 429        set_bit(QDF_LOCKED, &qd->qd_flags);
 430        gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
 431        atomic_inc(&qd->qd_count);
 432        qd->qd_change_sync = qd->qd_change;
 433        gfs2_assert_warn(sdp, qd->qd_slot_count);
 434        qd->qd_slot_count++;
 435
 436        spin_unlock(&qd_lru_lock);
 437
 438        gfs2_assert_warn(sdp, qd->qd_change_sync);
 439        if (bh_get(qd)) {
 440                clear_bit(QDF_LOCKED, &qd->qd_flags);
 441                slot_put(qd);
 442                qd_put(qd);
 443                return 0;
 444        }
 445
 446        return 1;
 447}
 448
 449static void qd_unlock(struct gfs2_quota_data *qd)
 450{
 451        gfs2_assert_warn(qd->qd_gl->gl_sbd,
 452                         test_bit(QDF_LOCKED, &qd->qd_flags));
 453        clear_bit(QDF_LOCKED, &qd->qd_flags);
 454        bh_put(qd);
 455        slot_put(qd);
 456        qd_put(qd);
 457}
 458
 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
 460                    struct gfs2_quota_data **qdp)
 461{
 462        int error;
 463
 464        error = qd_get(sdp, user, id, qdp);
 465        if (error)
 466                return error;
 467
 468        error = slot_get(*qdp);
 469        if (error)
 470                goto fail;
 471
 472        error = bh_get(*qdp);
 473        if (error)
 474                goto fail_slot;
 475
 476        return 0;
 477
 478fail_slot:
 479        slot_put(*qdp);
 480fail:
 481        qd_put(*qdp);
 482        return error;
 483}
 484
 485static void qdsb_put(struct gfs2_quota_data *qd)
 486{
 487        bh_put(qd);
 488        slot_put(qd);
 489        qd_put(qd);
 490}
 491
 492int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
 493{
 494        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 495        struct gfs2_alloc *al = ip->i_alloc;
 496        struct gfs2_quota_data **qd = al->al_qd;
 497        int error;
 498
 499        if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
 500            gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
 501                return -EIO;
 502
 503        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 504                return 0;
 505
 506        error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
 507        if (error)
 508                goto out;
 509        al->al_qd_num++;
 510        qd++;
 511
 512        error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
 513        if (error)
 514                goto out;
 515        al->al_qd_num++;
 516        qd++;
 517
 518        if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
 519                error = qdsb_get(sdp, QUOTA_USER, uid, qd);
 520                if (error)
 521                        goto out;
 522                al->al_qd_num++;
 523                qd++;
 524        }
 525
 526        if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
 527                error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
 528                if (error)
 529                        goto out;
 530                al->al_qd_num++;
 531                qd++;
 532        }
 533
 534out:
 535        if (error)
 536                gfs2_quota_unhold(ip);
 537        return error;
 538}
 539
 540void gfs2_quota_unhold(struct gfs2_inode *ip)
 541{
 542        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 543        struct gfs2_alloc *al = ip->i_alloc;
 544        unsigned int x;
 545
 546        gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 547
 548        for (x = 0; x < al->al_qd_num; x++) {
 549                qdsb_put(al->al_qd[x]);
 550                al->al_qd[x] = NULL;
 551        }
 552        al->al_qd_num = 0;
 553}
 554
 555static int sort_qd(const void *a, const void *b)
 556{
 557        const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 558        const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 559
 560        if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
 561            !test_bit(QDF_USER, &qd_b->qd_flags)) {
 562                if (test_bit(QDF_USER, &qd_a->qd_flags))
 563                        return -1;
 564                else
 565                        return 1;
 566        }
 567        if (qd_a->qd_id < qd_b->qd_id)
 568                return -1;
 569        if (qd_a->qd_id > qd_b->qd_id)
 570                return 1;
 571
 572        return 0;
 573}
 574
 575static void do_qc(struct gfs2_quota_data *qd, s64 change)
 576{
 577        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 578        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 579        struct gfs2_quota_change *qc = qd->qd_bh_qc;
 580        s64 x;
 581
 582        mutex_lock(&sdp->sd_quota_mutex);
 583        gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
 584
 585        if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 586                qc->qc_change = 0;
 587                qc->qc_flags = 0;
 588                if (test_bit(QDF_USER, &qd->qd_flags))
 589                        qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 590                qc->qc_id = cpu_to_be32(qd->qd_id);
 591        }
 592
 593        x = be64_to_cpu(qc->qc_change) + change;
 594        qc->qc_change = cpu_to_be64(x);
 595
 596        spin_lock(&qd_lru_lock);
 597        qd->qd_change = x;
 598        spin_unlock(&qd_lru_lock);
 599
 600        if (!x) {
 601                gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 602                clear_bit(QDF_CHANGE, &qd->qd_flags);
 603                qc->qc_flags = 0;
 604                qc->qc_id = 0;
 605                slot_put(qd);
 606                qd_put(qd);
 607        } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 608                qd_hold(qd);
 609                slot_hold(qd);
 610        }
 611
 612        mutex_unlock(&sdp->sd_quota_mutex);
 613}
 614
 615/**
 616 * gfs2_adjust_quota - adjust record of current block usage
 617 * @ip: The quota inode
 618 * @loc: Offset of the entry in the quota file
 619 * @change: The amount of usage change to record
 620 * @qd: The quota data
 621 * @fdq: The updated limits to record
 622 *
 623 * This function was mostly borrowed from gfs2_block_truncate_page which was
 624 * in turn mostly borrowed from ext3
 625 *
 626 * Returns: 0 or -ve on error
 627 */
 628
 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 630                             s64 change, struct gfs2_quota_data *qd,
 631                             struct fs_disk_quota *fdq)
 632{
 633        struct inode *inode = &ip->i_inode;
 634        struct gfs2_sbd *sdp = GFS2_SB(inode);
 635        struct address_space *mapping = inode->i_mapping;
 636        unsigned long index = loc >> PAGE_CACHE_SHIFT;
 637        unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
 638        unsigned blocksize, iblock, pos;
 639        struct buffer_head *bh, *dibh;
 640        struct page *page;
 641        void *kaddr, *ptr;
 642        struct gfs2_quota q, *qp;
 643        int err, nbytes;
 644        u64 size;
 645
 646        if (gfs2_is_stuffed(ip))
 647                gfs2_unstuff_dinode(ip, NULL);
 648
 649        memset(&q, 0, sizeof(struct gfs2_quota));
 650        err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
 651        if (err < 0)
 652                return err;
 653
 654        err = -EIO;
 655        qp = &q;
 656        qp->qu_value = be64_to_cpu(qp->qu_value);
 657        qp->qu_value += change;
 658        qp->qu_value = cpu_to_be64(qp->qu_value);
 659        qd->qd_qb.qb_value = qp->qu_value;
 660        if (fdq) {
 661                if (fdq->d_fieldmask & FS_DQ_BSOFT) {
 662                        qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
 663                        qd->qd_qb.qb_warn = qp->qu_warn;
 664                }
 665                if (fdq->d_fieldmask & FS_DQ_BHARD) {
 666                        qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
 667                        qd->qd_qb.qb_limit = qp->qu_limit;
 668                }
 669                if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
 670                        qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
 671                        qd->qd_qb.qb_value = qp->qu_value;
 672                }
 673        }
 674
 675        /* Write the quota into the quota file on disk */
 676        ptr = qp;
 677        nbytes = sizeof(struct gfs2_quota);
 678get_a_page:
 679        page = grab_cache_page(mapping, index);
 680        if (!page)
 681                return -ENOMEM;
 682
 683        blocksize = inode->i_sb->s_blocksize;
 684        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 685
 686        if (!page_has_buffers(page))
 687                create_empty_buffers(page, blocksize, 0);
 688
 689        bh = page_buffers(page);
 690        pos = blocksize;
 691        while (offset >= pos) {
 692                bh = bh->b_this_page;
 693                iblock++;
 694                pos += blocksize;
 695        }
 696
 697        if (!buffer_mapped(bh)) {
 698                gfs2_block_map(inode, iblock, bh, 1);
 699                if (!buffer_mapped(bh))
 700                        goto unlock_out;
 701                /* If it's a newly allocated disk block for quota, zero it */
 702                if (buffer_new(bh))
 703                        zero_user(page, pos - blocksize, bh->b_size);
 704        }
 705
 706        if (PageUptodate(page))
 707                set_buffer_uptodate(bh);
 708
 709        if (!buffer_uptodate(bh)) {
 710                ll_rw_block(READ_META, 1, &bh);
 711                wait_on_buffer(bh);
 712                if (!buffer_uptodate(bh))
 713                        goto unlock_out;
 714        }
 715
 716        gfs2_trans_add_bh(ip->i_gl, bh, 0);
 717
 718        kaddr = kmap_atomic(page, KM_USER0);
 719        if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
 720                nbytes = PAGE_CACHE_SIZE - offset;
 721        memcpy(kaddr + offset, ptr, nbytes);
 722        flush_dcache_page(page);
 723        kunmap_atomic(kaddr, KM_USER0);
 724        unlock_page(page);
 725        page_cache_release(page);
 726
 727        /* If quota straddles page boundary, we need to update the rest of the
 728         * quota at the beginning of the next page */
 729        if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
 730                ptr = ptr + nbytes;
 731                nbytes = sizeof(struct gfs2_quota) - nbytes;
 732                offset = 0;
 733                index++;
 734                goto get_a_page;
 735        }
 736
 737        /* Update the disk inode timestamp and size (if extended) */
 738        err = gfs2_meta_inode_buffer(ip, &dibh);
 739        if (err)
 740                goto out;
 741
 742        size = loc + sizeof(struct gfs2_quota);
 743        if (size > inode->i_size)
 744                i_size_write(inode, size);
 745        inode->i_mtime = inode->i_atime = CURRENT_TIME;
 746        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 747        gfs2_dinode_out(ip, dibh->b_data);
 748        brelse(dibh);
 749        mark_inode_dirty(inode);
 750
 751out:
 752        return err;
 753unlock_out:
 754        unlock_page(page);
 755        page_cache_release(page);
 756        return err;
 757}
 758
 759static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 760{
 761        struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
 762        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 763        unsigned int data_blocks, ind_blocks;
 764        struct gfs2_holder *ghs, i_gh;
 765        unsigned int qx, x;
 766        struct gfs2_quota_data *qd;
 767        loff_t offset;
 768        unsigned int nalloc = 0, blocks;
 769        struct gfs2_alloc *al = NULL;
 770        int error;
 771
 772        gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 773                              &data_blocks, &ind_blocks);
 774
 775        ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 776        if (!ghs)
 777                return -ENOMEM;
 778
 779        sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 780        mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
 781        for (qx = 0; qx < num_qd; qx++) {
 782                error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 783                                           GL_NOCACHE, &ghs[qx]);
 784                if (error)
 785                        goto out;
 786        }
 787
 788        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 789        if (error)
 790                goto out;
 791
 792        for (x = 0; x < num_qd; x++) {
 793                offset = qd2offset(qda[x]);
 794                if (gfs2_write_alloc_required(ip, offset,
 795                                              sizeof(struct gfs2_quota)))
 796                        nalloc++;
 797        }
 798
 799        al = gfs2_alloc_get(ip);
 800        if (!al) {
 801                error = -ENOMEM;
 802                goto out_gunlock;
 803        }
 804        /* 
 805         * 1 blk for unstuffing inode if stuffed. We add this extra
 806         * block to the reservation unconditionally. If the inode
 807         * doesn't need unstuffing, the block will be released to the 
 808         * rgrp since it won't be allocated during the transaction
 809         */
 810        al->al_requested = 1;
 811        /* +3 in the end for unstuffing block, inode size update block
 812         * and another block in case quota straddles page boundary and 
 813         * two blocks need to be updated instead of 1 */
 814        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 815
 816        if (nalloc)
 817                al->al_requested += nalloc * (data_blocks + ind_blocks);                
 818        error = gfs2_inplace_reserve(ip);
 819        if (error)
 820                goto out_alloc;
 821
 822        if (nalloc)
 823                blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
 824
 825        error = gfs2_trans_begin(sdp, blocks, 0);
 826        if (error)
 827                goto out_ipres;
 828
 829        for (x = 0; x < num_qd; x++) {
 830                qd = qda[x];
 831                offset = qd2offset(qd);
 832                error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 833                if (error)
 834                        goto out_end_trans;
 835
 836                do_qc(qd, -qd->qd_change_sync);
 837        }
 838
 839        error = 0;
 840
 841out_end_trans:
 842        gfs2_trans_end(sdp);
 843out_ipres:
 844        gfs2_inplace_release(ip);
 845out_alloc:
 846        gfs2_alloc_put(ip);
 847out_gunlock:
 848        gfs2_glock_dq_uninit(&i_gh);
 849out:
 850        while (qx--)
 851                gfs2_glock_dq_uninit(&ghs[qx]);
 852        mutex_unlock(&ip->i_inode.i_mutex);
 853        kfree(ghs);
 854        gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
 855        return error;
 856}
 857
 858static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 859{
 860        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 861        struct gfs2_quota q;
 862        struct gfs2_quota_lvb *qlvb;
 863        loff_t pos;
 864        int error;
 865
 866        memset(&q, 0, sizeof(struct gfs2_quota));
 867        pos = qd2offset(qd);
 868        error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
 869        if (error < 0)
 870                return error;
 871
 872        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 873        qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 874        qlvb->__pad = 0;
 875        qlvb->qb_limit = q.qu_limit;
 876        qlvb->qb_warn = q.qu_warn;
 877        qlvb->qb_value = q.qu_value;
 878        qd->qd_qb = *qlvb;
 879
 880        return 0;
 881}
 882
 883static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 884                    struct gfs2_holder *q_gh)
 885{
 886        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 887        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 888        struct gfs2_holder i_gh;
 889        int error;
 890
 891restart:
 892        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
 893        if (error)
 894                return error;
 895
 896        qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
 897
 898        if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 899                gfs2_glock_dq_uninit(q_gh);
 900                error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
 901                                           GL_NOCACHE, q_gh);
 902                if (error)
 903                        return error;
 904
 905                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
 906                if (error)
 907                        goto fail;
 908
 909                error = update_qd(sdp, qd);
 910                if (error)
 911                        goto fail_gunlock;
 912
 913                gfs2_glock_dq_uninit(&i_gh);
 914                gfs2_glock_dq_uninit(q_gh);
 915                force_refresh = 0;
 916                goto restart;
 917        }
 918
 919        return 0;
 920
 921fail_gunlock:
 922        gfs2_glock_dq_uninit(&i_gh);
 923fail:
 924        gfs2_glock_dq_uninit(q_gh);
 925        return error;
 926}
 927
 928int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
 929{
 930        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 931        struct gfs2_alloc *al = ip->i_alloc;
 932        unsigned int x;
 933        int error = 0;
 934
 935        gfs2_quota_hold(ip, uid, gid);
 936
 937        if (capable(CAP_SYS_RESOURCE) ||
 938            sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
 939                return 0;
 940
 941        sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
 942             sort_qd, NULL);
 943
 944        for (x = 0; x < al->al_qd_num; x++) {
 945                error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
 946                if (error)
 947                        break;
 948        }
 949
 950        if (!error)
 951                set_bit(GIF_QD_LOCKED, &ip->i_flags);
 952        else {
 953                while (x--)
 954                        gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
 955                gfs2_quota_unhold(ip);
 956        }
 957
 958        return error;
 959}
 960
 961static int need_sync(struct gfs2_quota_data *qd)
 962{
 963        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
 964        struct gfs2_tune *gt = &sdp->sd_tune;
 965        s64 value;
 966        unsigned int num, den;
 967        int do_sync = 1;
 968
 969        if (!qd->qd_qb.qb_limit)
 970                return 0;
 971
 972        spin_lock(&qd_lru_lock);
 973        value = qd->qd_change;
 974        spin_unlock(&qd_lru_lock);
 975
 976        spin_lock(&gt->gt_spin);
 977        num = gt->gt_quota_scale_num;
 978        den = gt->gt_quota_scale_den;
 979        spin_unlock(&gt->gt_spin);
 980
 981        if (value < 0)
 982                do_sync = 0;
 983        else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
 984                 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 985                do_sync = 0;
 986        else {
 987                value *= gfs2_jindex_size(sdp) * num;
 988                value = div_s64(value, den);
 989                value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
 990                if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
 991                        do_sync = 0;
 992        }
 993
 994        return do_sync;
 995}
 996
 997void gfs2_quota_unlock(struct gfs2_inode *ip)
 998{
 999        struct gfs2_alloc *al = ip->i_alloc;
1000        struct gfs2_quota_data *qda[4];
1001        unsigned int count = 0;
1002        unsigned int x;
1003
1004        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1005                goto out;
1006
1007        for (x = 0; x < al->al_qd_num; x++) {
1008                struct gfs2_quota_data *qd;
1009                int sync;
1010
1011                qd = al->al_qd[x];
1012                sync = need_sync(qd);
1013
1014                gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
1015
1016                if (sync && qd_trylock(qd))
1017                        qda[count++] = qd;
1018        }
1019
1020        if (count) {
1021                do_sync(count, qda);
1022                for (x = 0; x < count; x++)
1023                        qd_unlock(qda[x]);
1024        }
1025
1026out:
1027        gfs2_quota_unhold(ip);
1028}
1029
1030#define MAX_LINE 256
1031
1032static int print_message(struct gfs2_quota_data *qd, char *type)
1033{
1034        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1035
1036        printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1037               sdp->sd_fsname, type,
1038               (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1039               qd->qd_id);
1040
1041        return 0;
1042}
1043
1044int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1045{
1046        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1047        struct gfs2_alloc *al = ip->i_alloc;
1048        struct gfs2_quota_data *qd;
1049        s64 value;
1050        unsigned int x;
1051        int error = 0;
1052
1053        if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1054                return 0;
1055
1056        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1057                return 0;
1058
1059        for (x = 0; x < al->al_qd_num; x++) {
1060                qd = al->al_qd[x];
1061
1062                if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1063                      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1064                        continue;
1065
1066                value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1067                spin_lock(&qd_lru_lock);
1068                value += qd->qd_change;
1069                spin_unlock(&qd_lru_lock);
1070
1071                if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1072                        print_message(qd, "exceeded");
1073                        quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1074                                           USRQUOTA : GRPQUOTA, qd->qd_id,
1075                                           sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1076
1077                        error = -EDQUOT;
1078                        break;
1079                } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1080                           (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1081                           time_after_eq(jiffies, qd->qd_last_warn +
1082                                         gfs2_tune_get(sdp,
1083                                                gt_quota_warn_period) * HZ)) {
1084                        quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1085                                           USRQUOTA : GRPQUOTA, qd->qd_id,
1086                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1087                        error = print_message(qd, "warning");
1088                        qd->qd_last_warn = jiffies;
1089                }
1090        }
1091
1092        return error;
1093}
1094
1095void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1096                       u32 uid, u32 gid)
1097{
1098        struct gfs2_alloc *al = ip->i_alloc;
1099        struct gfs2_quota_data *qd;
1100        unsigned int x;
1101
1102        if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1103                return;
1104        if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1105                return;
1106
1107        for (x = 0; x < al->al_qd_num; x++) {
1108                qd = al->al_qd[x];
1109
1110                if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1111                    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1112                        do_qc(qd, change);
1113                }
1114        }
1115}
1116
1117int gfs2_quota_sync(struct super_block *sb, int type, int wait)
1118{
1119        struct gfs2_sbd *sdp = sb->s_fs_info;
1120        struct gfs2_quota_data **qda;
1121        unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1122        unsigned int num_qd;
1123        unsigned int x;
1124        int error = 0;
1125
1126        sdp->sd_quota_sync_gen++;
1127
1128        qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1129        if (!qda)
1130                return -ENOMEM;
1131
1132        do {
1133                num_qd = 0;
1134
1135                for (;;) {
1136                        error = qd_fish(sdp, qda + num_qd);
1137                        if (error || !qda[num_qd])
1138                                break;
1139                        if (++num_qd == max_qd)
1140                                break;
1141                }
1142
1143                if (num_qd) {
1144                        if (!error)
1145                                error = do_sync(num_qd, qda);
1146                        if (!error)
1147                                for (x = 0; x < num_qd; x++)
1148                                        qda[x]->qd_sync_gen =
1149                                                sdp->sd_quota_sync_gen;
1150
1151                        for (x = 0; x < num_qd; x++)
1152                                qd_unlock(qda[x]);
1153                }
1154        } while (!error && num_qd == max_qd);
1155
1156        kfree(qda);
1157
1158        return error;
1159}
1160
1161static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1162{
1163        return gfs2_quota_sync(sb, type, 0);
1164}
1165
1166int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1167{
1168        struct gfs2_quota_data *qd;
1169        struct gfs2_holder q_gh;
1170        int error;
1171
1172        error = qd_get(sdp, user, id, &qd);
1173        if (error)
1174                return error;
1175
1176        error = do_glock(qd, FORCE, &q_gh);
1177        if (!error)
1178                gfs2_glock_dq_uninit(&q_gh);
1179
1180        qd_put(qd);
1181        return error;
1182}
1183
1184static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1185{
1186        const struct gfs2_quota_change *str = buf;
1187
1188        qc->qc_change = be64_to_cpu(str->qc_change);
1189        qc->qc_flags = be32_to_cpu(str->qc_flags);
1190        qc->qc_id = be32_to_cpu(str->qc_id);
1191}
1192
1193int gfs2_quota_init(struct gfs2_sbd *sdp)
1194{
1195        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1196        u64 size = i_size_read(sdp->sd_qc_inode);
1197        unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1198        unsigned int x, slot = 0;
1199        unsigned int found = 0;
1200        u64 dblock;
1201        u32 extlen = 0;
1202        int error;
1203
1204        if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1205                return -EIO;
1206
1207        sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1208        sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1209
1210        error = -ENOMEM;
1211
1212        sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1213                                       sizeof(unsigned char *), GFP_NOFS);
1214        if (!sdp->sd_quota_bitmap)
1215                return error;
1216
1217        for (x = 0; x < sdp->sd_quota_chunks; x++) {
1218                sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1219                if (!sdp->sd_quota_bitmap[x])
1220                        goto fail;
1221        }
1222
1223        for (x = 0; x < blocks; x++) {
1224                struct buffer_head *bh;
1225                unsigned int y;
1226
1227                if (!extlen) {
1228                        int new = 0;
1229                        error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1230                        if (error)
1231                                goto fail;
1232                }
1233                error = -EIO;
1234                bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1235                if (!bh)
1236                        goto fail;
1237                if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1238                        brelse(bh);
1239                        goto fail;
1240                }
1241
1242                for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1243                     y++, slot++) {
1244                        struct gfs2_quota_change_host qc;
1245                        struct gfs2_quota_data *qd;
1246
1247                        gfs2_quota_change_in(&qc, bh->b_data +
1248                                          sizeof(struct gfs2_meta_header) +
1249                                          y * sizeof(struct gfs2_quota_change));
1250                        if (!qc.qc_change)
1251                                continue;
1252
1253                        error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1254                                         qc.qc_id, &qd);
1255                        if (error) {
1256                                brelse(bh);
1257                                goto fail;
1258                        }
1259
1260                        set_bit(QDF_CHANGE, &qd->qd_flags);
1261                        qd->qd_change = qc.qc_change;
1262                        qd->qd_slot = slot;
1263                        qd->qd_slot_count = 1;
1264
1265                        spin_lock(&qd_lru_lock);
1266                        gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1267                        list_add(&qd->qd_list, &sdp->sd_quota_list);
1268                        atomic_inc(&sdp->sd_quota_count);
1269                        spin_unlock(&qd_lru_lock);
1270
1271                        found++;
1272                }
1273
1274                brelse(bh);
1275                dblock++;
1276                extlen--;
1277        }
1278
1279        if (found)
1280                fs_info(sdp, "found %u quota changes\n", found);
1281
1282        return 0;
1283
1284fail:
1285        gfs2_quota_cleanup(sdp);
1286        return error;
1287}
1288
1289void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1290{
1291        struct list_head *head = &sdp->sd_quota_list;
1292        struct gfs2_quota_data *qd;
1293        unsigned int x;
1294
1295        spin_lock(&qd_lru_lock);
1296        while (!list_empty(head)) {
1297                qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1298
1299                if (atomic_read(&qd->qd_count) > 1 ||
1300                    (atomic_read(&qd->qd_count) &&
1301                     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1302                        list_move(&qd->qd_list, head);
1303                        spin_unlock(&qd_lru_lock);
1304                        schedule();
1305                        spin_lock(&qd_lru_lock);
1306                        continue;
1307                }
1308
1309                list_del(&qd->qd_list);
1310                /* Also remove if this qd exists in the reclaim list */
1311                if (!list_empty(&qd->qd_reclaim)) {
1312                        list_del_init(&qd->qd_reclaim);
1313                        atomic_dec(&qd_lru_count);
1314                }
1315                atomic_dec(&sdp->sd_quota_count);
1316                spin_unlock(&qd_lru_lock);
1317
1318                if (!atomic_read(&qd->qd_count)) {
1319                        gfs2_assert_warn(sdp, !qd->qd_change);
1320                        gfs2_assert_warn(sdp, !qd->qd_slot_count);
1321                } else
1322                        gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1323                gfs2_assert_warn(sdp, !qd->qd_bh_count);
1324
1325                gfs2_glock_put(qd->qd_gl);
1326                kmem_cache_free(gfs2_quotad_cachep, qd);
1327
1328                spin_lock(&qd_lru_lock);
1329        }
1330        spin_unlock(&qd_lru_lock);
1331
1332        gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1333
1334        if (sdp->sd_quota_bitmap) {
1335                for (x = 0; x < sdp->sd_quota_chunks; x++)
1336                        kfree(sdp->sd_quota_bitmap[x]);
1337                kfree(sdp->sd_quota_bitmap);
1338        }
1339}
1340
1341static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1342{
1343        if (error == 0 || error == -EROFS)
1344                return;
1345        if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1346                fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1347}
1348
1349static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1350                               int (*fxn)(struct super_block *sb, int type),
1351                               unsigned long t, unsigned long *timeo,
1352                               unsigned int *new_timeo)
1353{
1354        if (t >= *timeo) {
1355                int error = fxn(sdp->sd_vfs, 0);
1356                quotad_error(sdp, msg, error);
1357                *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1358        } else {
1359                *timeo -= t;
1360        }
1361}
1362
1363static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1364{
1365        struct gfs2_inode *ip;
1366
1367        while(1) {
1368                ip = NULL;
1369                spin_lock(&sdp->sd_trunc_lock);
1370                if (!list_empty(&sdp->sd_trunc_list)) {
1371                        ip = list_entry(sdp->sd_trunc_list.next,
1372                                        struct gfs2_inode, i_trunc_list);
1373                        list_del_init(&ip->i_trunc_list);
1374                }
1375                spin_unlock(&sdp->sd_trunc_lock);
1376                if (ip == NULL)
1377                        return;
1378                gfs2_glock_finish_truncate(ip);
1379        }
1380}
1381
1382void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1383        if (!sdp->sd_statfs_force_sync) {
1384                sdp->sd_statfs_force_sync = 1;
1385                wake_up(&sdp->sd_quota_wait);
1386        }
1387}
1388
1389
1390/**
1391 * gfs2_quotad - Write cached quota changes into the quota file
1392 * @sdp: Pointer to GFS2 superblock
1393 *
1394 */
1395
1396int gfs2_quotad(void *data)
1397{
1398        struct gfs2_sbd *sdp = data;
1399        struct gfs2_tune *tune = &sdp->sd_tune;
1400        unsigned long statfs_timeo = 0;
1401        unsigned long quotad_timeo = 0;
1402        unsigned long t = 0;
1403        DEFINE_WAIT(wait);
1404        int empty;
1405
1406        while (!kthread_should_stop()) {
1407
1408                /* Update the master statfs file */
1409                if (sdp->sd_statfs_force_sync) {
1410                        int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1411                        quotad_error(sdp, "statfs", error);
1412                        statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1413                }
1414                else
1415                        quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1416                                           &statfs_timeo,
1417                                           &tune->gt_statfs_quantum);
1418
1419                /* Update quota file */
1420                quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1421                                   &quotad_timeo, &tune->gt_quota_quantum);
1422
1423                /* Check for & recover partially truncated inodes */
1424                quotad_check_trunc_list(sdp);
1425
1426                if (freezing(current))
1427                        refrigerator();
1428                t = min(quotad_timeo, statfs_timeo);
1429
1430                prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1431                spin_lock(&sdp->sd_trunc_lock);
1432                empty = list_empty(&sdp->sd_trunc_list);
1433                spin_unlock(&sdp->sd_trunc_lock);
1434                if (empty && !sdp->sd_statfs_force_sync)
1435                        t -= schedule_timeout(t);
1436                else
1437                        t = 0;
1438                finish_wait(&sdp->sd_quota_wait, &wait);
1439        }
1440
1441        return 0;
1442}
1443
1444static int gfs2_quota_get_xstate(struct super_block *sb,
1445                                 struct fs_quota_stat *fqs)
1446{
1447        struct gfs2_sbd *sdp = sb->s_fs_info;
1448
1449        memset(fqs, 0, sizeof(struct fs_quota_stat));
1450        fqs->qs_version = FS_QSTAT_VERSION;
1451
1452        switch (sdp->sd_args.ar_quota) {
1453        case GFS2_QUOTA_ON:
1454                fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1455                /*FALLTHRU*/
1456        case GFS2_QUOTA_ACCOUNT:
1457                fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1458                break;
1459        case GFS2_QUOTA_OFF:
1460                break;
1461        }
1462
1463        if (sdp->sd_quota_inode) {
1464                fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1465                fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1466        }
1467        fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1468        fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1469        fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1470        return 0;
1471}
1472
1473static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1474                          struct fs_disk_quota *fdq)
1475{
1476        struct gfs2_sbd *sdp = sb->s_fs_info;
1477        struct gfs2_quota_lvb *qlvb;
1478        struct gfs2_quota_data *qd;
1479        struct gfs2_holder q_gh;
1480        int error;
1481
1482        memset(fdq, 0, sizeof(struct fs_disk_quota));
1483
1484        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1485                return -ESRCH; /* Crazy XFS error code */
1486
1487        if (type == USRQUOTA)
1488                type = QUOTA_USER;
1489        else if (type == GRPQUOTA)
1490                type = QUOTA_GROUP;
1491        else
1492                return -EINVAL;
1493
1494        error = qd_get(sdp, type, id, &qd);
1495        if (error)
1496                return error;
1497        error = do_glock(qd, FORCE, &q_gh);
1498        if (error)
1499                goto out;
1500
1501        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1502        fdq->d_version = FS_DQUOT_VERSION;
1503        fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1504        fdq->d_id = id;
1505        fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1506        fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1507        fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1508
1509        gfs2_glock_dq_uninit(&q_gh);
1510out:
1511        qd_put(qd);
1512        return error;
1513}
1514
1515/* GFS2 only supports a subset of the XFS fields */
1516#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1517
1518static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1519                          struct fs_disk_quota *fdq)
1520{
1521        struct gfs2_sbd *sdp = sb->s_fs_info;
1522        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1523        struct gfs2_quota_data *qd;
1524        struct gfs2_holder q_gh, i_gh;
1525        unsigned int data_blocks, ind_blocks;
1526        unsigned int blocks = 0;
1527        int alloc_required;
1528        struct gfs2_alloc *al;
1529        loff_t offset;
1530        int error;
1531
1532        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1533                return -ESRCH; /* Crazy XFS error code */
1534
1535        switch(type) {
1536        case USRQUOTA:
1537                type = QUOTA_USER;
1538                if (fdq->d_flags != FS_USER_QUOTA)
1539                        return -EINVAL;
1540                break;
1541        case GRPQUOTA:
1542                type = QUOTA_GROUP;
1543                if (fdq->d_flags != FS_GROUP_QUOTA)
1544                        return -EINVAL;
1545                break;
1546        default:
1547                return -EINVAL;
1548        }
1549
1550        if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1551                return -EINVAL;
1552        if (fdq->d_id != id)
1553                return -EINVAL;
1554
1555        error = qd_get(sdp, type, id, &qd);
1556        if (error)
1557                return error;
1558
1559        mutex_lock(&ip->i_inode.i_mutex);
1560        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1561        if (error)
1562                goto out_put;
1563        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1564        if (error)
1565                goto out_q;
1566
1567        /* Check for existing entry, if none then alloc new blocks */
1568        error = update_qd(sdp, qd);
1569        if (error)
1570                goto out_i;
1571
1572        /* If nothing has changed, this is a no-op */
1573        if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1574            ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1575                fdq->d_fieldmask ^= FS_DQ_BSOFT;
1576
1577        if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1578            ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1579                fdq->d_fieldmask ^= FS_DQ_BHARD;
1580
1581        if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1582            ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1583                fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1584
1585        if (fdq->d_fieldmask == 0)
1586                goto out_i;
1587
1588        offset = qd2offset(qd);
1589        alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1590        if (alloc_required) {
1591                al = gfs2_alloc_get(ip);
1592                if (al == NULL)
1593                        goto out_i;
1594                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1595                                       &data_blocks, &ind_blocks);
1596                blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1597                error = gfs2_inplace_reserve(ip);
1598                if (error)
1599                        goto out_alloc;
1600                blocks += gfs2_rg_blocks(al);
1601        }
1602
1603        error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1604        if (error)
1605                goto out_release;
1606
1607        /* Apply changes */
1608        error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1609
1610        gfs2_trans_end(sdp);
1611out_release:
1612        if (alloc_required) {
1613                gfs2_inplace_release(ip);
1614out_alloc:
1615                gfs2_alloc_put(ip);
1616        }
1617out_i:
1618        gfs2_glock_dq_uninit(&i_gh);
1619out_q:
1620        gfs2_glock_dq_uninit(&q_gh);
1621out_put:
1622        mutex_unlock(&ip->i_inode.i_mutex);
1623        qd_put(qd);
1624        return error;
1625}
1626
1627const struct quotactl_ops gfs2_quotactl_ops = {
1628        .quota_sync     = gfs2_quota_sync,
1629        .get_xstate     = gfs2_quota_get_xstate,
1630        .get_dqblk      = gfs2_get_dqblk,
1631        .set_dqblk      = gfs2_set_dqblk,
1632};
1633