linux/fs/gfs2/quota.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   5 */
   6
   7/*
   8 * Quota change tags are associated with each transaction that allocates or
   9 * deallocates space.  Those changes are accumulated locally to each node (in a
  10 * per-node file) and then are periodically synced to the quota file.  This
  11 * avoids the bottleneck of constantly touching the quota file, but introduces
  12 * fuzziness in the current usage value of IDs that are being used on different
  13 * nodes in the cluster simultaneously.  So, it is possible for a user on
  14 * multiple nodes to overrun their quota, but that overrun is controlable.
  15 * Since quota tags are part of transactions, there is no need for a quota check
  16 * program to be run on node crashes or anything like that.
  17 *
  18 * There are couple of knobs that let the administrator manage the quota
  19 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
  20 * sitting on one node before being synced to the quota file.  (The default is
  21 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
  22 * of quota file syncs increases as the user moves closer to their limit.  The
  23 * more frequent the syncs, the more accurate the quota enforcement, but that
  24 * means that there is more contention between the nodes for the quota file.
  25 * The default value is one.  This sets the maximum theoretical quota overrun
  26 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
  27 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
  28 * number greater than one makes quota syncs more frequent and reduces the
  29 * maximum overrun.  Numbers less than one (but greater than zero) make quota
  30 * syncs less frequent.
  31 *
  32 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  33 * the quota file, so it is not being constantly read.
  34 */
  35
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38#include <linux/sched.h>
  39#include <linux/slab.h>
  40#include <linux/mm.h>
  41#include <linux/spinlock.h>
  42#include <linux/completion.h>
  43#include <linux/buffer_head.h>
  44#include <linux/sort.h>
  45#include <linux/fs.h>
  46#include <linux/bio.h>
  47#include <linux/gfs2_ondisk.h>
  48#include <linux/kthread.h>
  49#include <linux/freezer.h>
  50#include <linux/quota.h>
  51#include <linux/dqblk_xfs.h>
  52#include <linux/lockref.h>
  53#include <linux/list_lru.h>
  54#include <linux/rcupdate.h>
  55#include <linux/rculist_bl.h>
  56#include <linux/bit_spinlock.h>
  57#include <linux/jhash.h>
  58#include <linux/vmalloc.h>
  59
  60#include "gfs2.h"
  61#include "incore.h"
  62#include "bmap.h"
  63#include "glock.h"
  64#include "glops.h"
  65#include "log.h"
  66#include "meta_io.h"
  67#include "quota.h"
  68#include "rgrp.h"
  69#include "super.h"
  70#include "trans.h"
  71#include "inode.h"
  72#include "util.h"
  73
  74#define GFS2_QD_HASH_SHIFT      12
  75#define GFS2_QD_HASH_SIZE       BIT(GFS2_QD_HASH_SHIFT)
  76#define GFS2_QD_HASH_MASK       (GFS2_QD_HASH_SIZE - 1)
  77
  78/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  79/*                     -> sd_bitmap_lock                              */
  80static DEFINE_SPINLOCK(qd_lock);
  81struct list_lru gfs2_qd_lru;
  82
  83static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  84
  85static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  86                                 const struct kqid qid)
  87{
  88        unsigned int h;
  89
  90        h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  91        h = jhash(&qid, sizeof(struct kqid), h);
  92
  93        return h & GFS2_QD_HASH_MASK;
  94}
  95
  96static inline void spin_lock_bucket(unsigned int hash)
  97{
  98        hlist_bl_lock(&qd_hash_table[hash]);
  99}
 100
 101static inline void spin_unlock_bucket(unsigned int hash)
 102{
 103        hlist_bl_unlock(&qd_hash_table[hash]);
 104}
 105
 106static void gfs2_qd_dealloc(struct rcu_head *rcu)
 107{
 108        struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
 109        kmem_cache_free(gfs2_quotad_cachep, qd);
 110}
 111
 112static void gfs2_qd_dispose(struct list_head *list)
 113{
 114        struct gfs2_quota_data *qd;
 115        struct gfs2_sbd *sdp;
 116
 117        while (!list_empty(list)) {
 118                qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
 119                sdp = qd->qd_gl->gl_name.ln_sbd;
 120
 121                list_del(&qd->qd_lru);
 122
 123                /* Free from the filesystem-specific list */
 124                spin_lock(&qd_lock);
 125                list_del(&qd->qd_list);
 126                spin_unlock(&qd_lock);
 127
 128                spin_lock_bucket(qd->qd_hash);
 129                hlist_bl_del_rcu(&qd->qd_hlist);
 130                spin_unlock_bucket(qd->qd_hash);
 131
 132                gfs2_assert_warn(sdp, !qd->qd_change);
 133                gfs2_assert_warn(sdp, !qd->qd_slot_count);
 134                gfs2_assert_warn(sdp, !qd->qd_bh_count);
 135
 136                gfs2_glock_put(qd->qd_gl);
 137                atomic_dec(&sdp->sd_quota_count);
 138
 139                /* Delete it from the common reclaim list */
 140                call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
 141        }
 142}
 143
 144
 145static enum lru_status gfs2_qd_isolate(struct list_head *item,
 146                struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
 147{
 148        struct list_head *dispose = arg;
 149        struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
 150
 151        if (!spin_trylock(&qd->qd_lockref.lock))
 152                return LRU_SKIP;
 153
 154        if (qd->qd_lockref.count == 0) {
 155                lockref_mark_dead(&qd->qd_lockref);
 156                list_lru_isolate_move(lru, &qd->qd_lru, dispose);
 157        }
 158
 159        spin_unlock(&qd->qd_lockref.lock);
 160        return LRU_REMOVED;
 161}
 162
 163static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
 164                                         struct shrink_control *sc)
 165{
 166        LIST_HEAD(dispose);
 167        unsigned long freed;
 168
 169        if (!(sc->gfp_mask & __GFP_FS))
 170                return SHRINK_STOP;
 171
 172        freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
 173                                     gfs2_qd_isolate, &dispose);
 174
 175        gfs2_qd_dispose(&dispose);
 176
 177        return freed;
 178}
 179
 180static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
 181                                          struct shrink_control *sc)
 182{
 183        return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
 184}
 185
 186struct shrinker gfs2_qd_shrinker = {
 187        .count_objects = gfs2_qd_shrink_count,
 188        .scan_objects = gfs2_qd_shrink_scan,
 189        .seeks = DEFAULT_SEEKS,
 190        .flags = SHRINKER_NUMA_AWARE,
 191};
 192
 193
 194static u64 qd2index(struct gfs2_quota_data *qd)
 195{
 196        struct kqid qid = qd->qd_id;
 197        return (2 * (u64)from_kqid(&init_user_ns, qid)) +
 198                ((qid.type == USRQUOTA) ? 0 : 1);
 199}
 200
 201static u64 qd2offset(struct gfs2_quota_data *qd)
 202{
 203        u64 offset;
 204
 205        offset = qd2index(qd);
 206        offset *= sizeof(struct gfs2_quota);
 207
 208        return offset;
 209}
 210
 211static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
 212{
 213        struct gfs2_quota_data *qd;
 214        int error;
 215
 216        qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
 217        if (!qd)
 218                return NULL;
 219
 220        qd->qd_sbd = sdp;
 221        qd->qd_lockref.count = 1;
 222        spin_lock_init(&qd->qd_lockref.lock);
 223        qd->qd_id = qid;
 224        qd->qd_slot = -1;
 225        INIT_LIST_HEAD(&qd->qd_lru);
 226        qd->qd_hash = hash;
 227
 228        error = gfs2_glock_get(sdp, qd2index(qd),
 229                              &gfs2_quota_glops, CREATE, &qd->qd_gl);
 230        if (error)
 231                goto fail;
 232
 233        return qd;
 234
 235fail:
 236        kmem_cache_free(gfs2_quotad_cachep, qd);
 237        return NULL;
 238}
 239
 240static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
 241                                                     const struct gfs2_sbd *sdp,
 242                                                     struct kqid qid)
 243{
 244        struct gfs2_quota_data *qd;
 245        struct hlist_bl_node *h;
 246
 247        hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
 248                if (!qid_eq(qd->qd_id, qid))
 249                        continue;
 250                if (qd->qd_sbd != sdp)
 251                        continue;
 252                if (lockref_get_not_dead(&qd->qd_lockref)) {
 253                        list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
 254                        return qd;
 255                }
 256        }
 257
 258        return NULL;
 259}
 260
 261
 262static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 263                  struct gfs2_quota_data **qdp)
 264{
 265        struct gfs2_quota_data *qd, *new_qd;
 266        unsigned int hash = gfs2_qd_hash(sdp, qid);
 267
 268        rcu_read_lock();
 269        *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 270        rcu_read_unlock();
 271
 272        if (qd)
 273                return 0;
 274
 275        new_qd = qd_alloc(hash, sdp, qid);
 276        if (!new_qd)
 277                return -ENOMEM;
 278
 279        spin_lock(&qd_lock);
 280        spin_lock_bucket(hash);
 281        *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
 282        if (qd == NULL) {
 283                *qdp = new_qd;
 284                list_add(&new_qd->qd_list, &sdp->sd_quota_list);
 285                hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
 286                atomic_inc(&sdp->sd_quota_count);
 287        }
 288        spin_unlock_bucket(hash);
 289        spin_unlock(&qd_lock);
 290
 291        if (qd) {
 292                gfs2_glock_put(new_qd->qd_gl);
 293                kmem_cache_free(gfs2_quotad_cachep, new_qd);
 294        }
 295
 296        return 0;
 297}
 298
 299
 300static void qd_hold(struct gfs2_quota_data *qd)
 301{
 302        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 303        gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
 304        lockref_get(&qd->qd_lockref);
 305}
 306
 307static void qd_put(struct gfs2_quota_data *qd)
 308{
 309        if (lockref_put_or_lock(&qd->qd_lockref))
 310                return;
 311
 312        qd->qd_lockref.count = 0;
 313        list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
 314        spin_unlock(&qd->qd_lockref.lock);
 315
 316}
 317
 318static int slot_get(struct gfs2_quota_data *qd)
 319{
 320        struct gfs2_sbd *sdp = qd->qd_sbd;
 321        unsigned int bit;
 322        int error = 0;
 323
 324        spin_lock(&sdp->sd_bitmap_lock);
 325        if (qd->qd_slot_count != 0)
 326                goto out;
 327
 328        error = -ENOSPC;
 329        bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
 330        if (bit < sdp->sd_quota_slots) {
 331                set_bit(bit, sdp->sd_quota_bitmap);
 332                qd->qd_slot = bit;
 333                error = 0;
 334out:
 335                qd->qd_slot_count++;
 336        }
 337        spin_unlock(&sdp->sd_bitmap_lock);
 338
 339        return error;
 340}
 341
 342static void slot_hold(struct gfs2_quota_data *qd)
 343{
 344        struct gfs2_sbd *sdp = qd->qd_sbd;
 345
 346        spin_lock(&sdp->sd_bitmap_lock);
 347        gfs2_assert(sdp, qd->qd_slot_count);
 348        qd->qd_slot_count++;
 349        spin_unlock(&sdp->sd_bitmap_lock);
 350}
 351
 352static void slot_put(struct gfs2_quota_data *qd)
 353{
 354        struct gfs2_sbd *sdp = qd->qd_sbd;
 355
 356        spin_lock(&sdp->sd_bitmap_lock);
 357        gfs2_assert(sdp, qd->qd_slot_count);
 358        if (!--qd->qd_slot_count) {
 359                BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
 360                qd->qd_slot = -1;
 361        }
 362        spin_unlock(&sdp->sd_bitmap_lock);
 363}
 364
 365static int bh_get(struct gfs2_quota_data *qd)
 366{
 367        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 368        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 369        unsigned int block, offset;
 370        struct buffer_head *bh;
 371        int error;
 372        struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 373
 374        mutex_lock(&sdp->sd_quota_mutex);
 375
 376        if (qd->qd_bh_count++) {
 377                mutex_unlock(&sdp->sd_quota_mutex);
 378                return 0;
 379        }
 380
 381        block = qd->qd_slot / sdp->sd_qc_per_block;
 382        offset = qd->qd_slot % sdp->sd_qc_per_block;
 383
 384        bh_map.b_size = BIT(ip->i_inode.i_blkbits);
 385        error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
 386        if (error)
 387                goto fail;
 388        error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
 389        if (error)
 390                goto fail;
 391        error = -EIO;
 392        if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
 393                goto fail_brelse;
 394
 395        qd->qd_bh = bh;
 396        qd->qd_bh_qc = (struct gfs2_quota_change *)
 397                (bh->b_data + sizeof(struct gfs2_meta_header) +
 398                 offset * sizeof(struct gfs2_quota_change));
 399
 400        mutex_unlock(&sdp->sd_quota_mutex);
 401
 402        return 0;
 403
 404fail_brelse:
 405        brelse(bh);
 406fail:
 407        qd->qd_bh_count--;
 408        mutex_unlock(&sdp->sd_quota_mutex);
 409        return error;
 410}
 411
 412static void bh_put(struct gfs2_quota_data *qd)
 413{
 414        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 415
 416        mutex_lock(&sdp->sd_quota_mutex);
 417        gfs2_assert(sdp, qd->qd_bh_count);
 418        if (!--qd->qd_bh_count) {
 419                brelse(qd->qd_bh);
 420                qd->qd_bh = NULL;
 421                qd->qd_bh_qc = NULL;
 422        }
 423        mutex_unlock(&sdp->sd_quota_mutex);
 424}
 425
 426static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 427                         u64 *sync_gen)
 428{
 429        if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 430            !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 431            (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
 432                return 0;
 433
 434        if (!lockref_get_not_dead(&qd->qd_lockref))
 435                return 0;
 436
 437        list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 438        set_bit(QDF_LOCKED, &qd->qd_flags);
 439        qd->qd_change_sync = qd->qd_change;
 440        slot_hold(qd);
 441        return 1;
 442}
 443
 444static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 445{
 446        struct gfs2_quota_data *qd = NULL;
 447        int error;
 448        int found = 0;
 449
 450        *qdp = NULL;
 451
 452        if (sb_rdonly(sdp->sd_vfs))
 453                return 0;
 454
 455        spin_lock(&qd_lock);
 456
 457        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
 458                found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
 459                if (found)
 460                        break;
 461        }
 462
 463        if (!found)
 464                qd = NULL;
 465
 466        spin_unlock(&qd_lock);
 467
 468        if (qd) {
 469                gfs2_assert_warn(sdp, qd->qd_change_sync);
 470                error = bh_get(qd);
 471                if (error) {
 472                        clear_bit(QDF_LOCKED, &qd->qd_flags);
 473                        slot_put(qd);
 474                        qd_put(qd);
 475                        return error;
 476                }
 477        }
 478
 479        *qdp = qd;
 480
 481        return 0;
 482}
 483
 484static void qd_unlock(struct gfs2_quota_data *qd)
 485{
 486        gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
 487                         test_bit(QDF_LOCKED, &qd->qd_flags));
 488        clear_bit(QDF_LOCKED, &qd->qd_flags);
 489        bh_put(qd);
 490        slot_put(qd);
 491        qd_put(qd);
 492}
 493
 494static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
 495                    struct gfs2_quota_data **qdp)
 496{
 497        int error;
 498
 499        error = qd_get(sdp, qid, qdp);
 500        if (error)
 501                return error;
 502
 503        error = slot_get(*qdp);
 504        if (error)
 505                goto fail;
 506
 507        error = bh_get(*qdp);
 508        if (error)
 509                goto fail_slot;
 510
 511        return 0;
 512
 513fail_slot:
 514        slot_put(*qdp);
 515fail:
 516        qd_put(*qdp);
 517        return error;
 518}
 519
 520static void qdsb_put(struct gfs2_quota_data *qd)
 521{
 522        bh_put(qd);
 523        slot_put(qd);
 524        qd_put(qd);
 525}
 526
 527/**
 528 * gfs2_qa_get - make sure we have a quota allocations data structure,
 529 *               if necessary
 530 * @ip: the inode for this reservation
 531 */
 532int gfs2_qa_get(struct gfs2_inode *ip)
 533{
 534        int error = 0;
 535        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 536
 537        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 538                return 0;
 539
 540        down_write(&ip->i_rw_mutex);
 541        if (ip->i_qadata == NULL) {
 542                ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
 543                if (!ip->i_qadata) {
 544                        error = -ENOMEM;
 545                        goto out;
 546                }
 547        }
 548        ip->i_qadata->qa_ref++;
 549out:
 550        up_write(&ip->i_rw_mutex);
 551        return error;
 552}
 553
 554void gfs2_qa_put(struct gfs2_inode *ip)
 555{
 556        down_write(&ip->i_rw_mutex);
 557        if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
 558                kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
 559                ip->i_qadata = NULL;
 560        }
 561        up_write(&ip->i_rw_mutex);
 562}
 563
 564int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
 565{
 566        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 567        struct gfs2_quota_data **qd;
 568        int error;
 569
 570        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
 571                return 0;
 572
 573        error = gfs2_qa_get(ip);
 574        if (error)
 575                return error;
 576
 577        qd = ip->i_qadata->qa_qd;
 578
 579        if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
 580            gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
 581                error = -EIO;
 582                goto out;
 583        }
 584
 585        error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
 586        if (error)
 587                goto out_unhold;
 588        ip->i_qadata->qa_qd_num++;
 589        qd++;
 590
 591        error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
 592        if (error)
 593                goto out_unhold;
 594        ip->i_qadata->qa_qd_num++;
 595        qd++;
 596
 597        if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
 598            !uid_eq(uid, ip->i_inode.i_uid)) {
 599                error = qdsb_get(sdp, make_kqid_uid(uid), qd);
 600                if (error)
 601                        goto out_unhold;
 602                ip->i_qadata->qa_qd_num++;
 603                qd++;
 604        }
 605
 606        if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
 607            !gid_eq(gid, ip->i_inode.i_gid)) {
 608                error = qdsb_get(sdp, make_kqid_gid(gid), qd);
 609                if (error)
 610                        goto out_unhold;
 611                ip->i_qadata->qa_qd_num++;
 612                qd++;
 613        }
 614
 615out_unhold:
 616        if (error)
 617                gfs2_quota_unhold(ip);
 618out:
 619        return error;
 620}
 621
 622void gfs2_quota_unhold(struct gfs2_inode *ip)
 623{
 624        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 625        u32 x;
 626
 627        if (ip->i_qadata == NULL)
 628                return;
 629
 630        gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
 631
 632        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
 633                qdsb_put(ip->i_qadata->qa_qd[x]);
 634                ip->i_qadata->qa_qd[x] = NULL;
 635        }
 636        ip->i_qadata->qa_qd_num = 0;
 637        gfs2_qa_put(ip);
 638}
 639
 640static int sort_qd(const void *a, const void *b)
 641{
 642        const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
 643        const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
 644
 645        if (qid_lt(qd_a->qd_id, qd_b->qd_id))
 646                return -1;
 647        if (qid_lt(qd_b->qd_id, qd_a->qd_id))
 648                return 1;
 649        return 0;
 650}
 651
 652static void do_qc(struct gfs2_quota_data *qd, s64 change)
 653{
 654        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
 655        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 656        struct gfs2_quota_change *qc = qd->qd_bh_qc;
 657        s64 x;
 658
 659        mutex_lock(&sdp->sd_quota_mutex);
 660        gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
 661
 662        if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
 663                qc->qc_change = 0;
 664                qc->qc_flags = 0;
 665                if (qd->qd_id.type == USRQUOTA)
 666                        qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
 667                qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
 668        }
 669
 670        x = be64_to_cpu(qc->qc_change) + change;
 671        qc->qc_change = cpu_to_be64(x);
 672
 673        spin_lock(&qd_lock);
 674        qd->qd_change = x;
 675        spin_unlock(&qd_lock);
 676
 677        if (!x) {
 678                gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
 679                clear_bit(QDF_CHANGE, &qd->qd_flags);
 680                qc->qc_flags = 0;
 681                qc->qc_id = 0;
 682                slot_put(qd);
 683                qd_put(qd);
 684        } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
 685                qd_hold(qd);
 686                slot_hold(qd);
 687        }
 688
 689        if (change < 0) /* Reset quiet flag if we freed some blocks */
 690                clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 691        mutex_unlock(&sdp->sd_quota_mutex);
 692}
 693
 694static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
 695                                  unsigned off, void *buf, unsigned bytes)
 696{
 697        struct inode *inode = &ip->i_inode;
 698        struct gfs2_sbd *sdp = GFS2_SB(inode);
 699        struct address_space *mapping = inode->i_mapping;
 700        struct page *page;
 701        struct buffer_head *bh;
 702        void *kaddr;
 703        u64 blk;
 704        unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
 705        unsigned to_write = bytes, pg_off = off;
 706        int done = 0;
 707
 708        blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
 709        boff = off % bsize;
 710
 711        page = find_or_create_page(mapping, index, GFP_NOFS);
 712        if (!page)
 713                return -ENOMEM;
 714        if (!page_has_buffers(page))
 715                create_empty_buffers(page, bsize, 0);
 716
 717        bh = page_buffers(page);
 718        while (!done) {
 719                /* Find the beginning block within the page */
 720                if (pg_off >= ((bnum * bsize) + bsize)) {
 721                        bh = bh->b_this_page;
 722                        bnum++;
 723                        blk++;
 724                        continue;
 725                }
 726                if (!buffer_mapped(bh)) {
 727                        gfs2_block_map(inode, blk, bh, 1);
 728                        if (!buffer_mapped(bh))
 729                                goto unlock_out;
 730                        /* If it's a newly allocated disk block, zero it */
 731                        if (buffer_new(bh))
 732                                zero_user(page, bnum * bsize, bh->b_size);
 733                }
 734                if (PageUptodate(page))
 735                        set_buffer_uptodate(bh);
 736                if (!buffer_uptodate(bh)) {
 737                        ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
 738                        wait_on_buffer(bh);
 739                        if (!buffer_uptodate(bh))
 740                                goto unlock_out;
 741                }
 742                if (gfs2_is_jdata(ip))
 743                        gfs2_trans_add_data(ip->i_gl, bh);
 744                else
 745                        gfs2_ordered_add_inode(ip);
 746
 747                /* If we need to write to the next block as well */
 748                if (to_write > (bsize - boff)) {
 749                        pg_off += (bsize - boff);
 750                        to_write -= (bsize - boff);
 751                        boff = pg_off % bsize;
 752                        continue;
 753                }
 754                done = 1;
 755        }
 756
 757        /* Write to the page, now that we have setup the buffer(s) */
 758        kaddr = kmap_atomic(page);
 759        memcpy(kaddr + off, buf, bytes);
 760        flush_dcache_page(page);
 761        kunmap_atomic(kaddr);
 762        unlock_page(page);
 763        put_page(page);
 764
 765        return 0;
 766
 767unlock_out:
 768        unlock_page(page);
 769        put_page(page);
 770        return -EIO;
 771}
 772
 773static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
 774                                 loff_t loc)
 775{
 776        unsigned long pg_beg;
 777        unsigned pg_off, nbytes, overflow = 0;
 778        int pg_oflow = 0, error;
 779        void *ptr;
 780
 781        nbytes = sizeof(struct gfs2_quota);
 782
 783        pg_beg = loc >> PAGE_SHIFT;
 784        pg_off = offset_in_page(loc);
 785
 786        /* If the quota straddles a page boundary, split the write in two */
 787        if ((pg_off + nbytes) > PAGE_SIZE) {
 788                pg_oflow = 1;
 789                overflow = (pg_off + nbytes) - PAGE_SIZE;
 790        }
 791
 792        ptr = qp;
 793        error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
 794                                       nbytes - overflow);
 795        /* If there's an overflow, write the remaining bytes to the next page */
 796        if (!error && pg_oflow)
 797                error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
 798                                               ptr + nbytes - overflow,
 799                                               overflow);
 800        return error;
 801}
 802
 803/**
 804 * gfs2_adjust_quota - adjust record of current block usage
 805 * @ip: The quota inode
 806 * @loc: Offset of the entry in the quota file
 807 * @change: The amount of usage change to record
 808 * @qd: The quota data
 809 * @fdq: The updated limits to record
 810 *
 811 * This function was mostly borrowed from gfs2_block_truncate_page which was
 812 * in turn mostly borrowed from ext3
 813 *
 814 * Returns: 0 or -ve on error
 815 */
 816
 817static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
 818                             s64 change, struct gfs2_quota_data *qd,
 819                             struct qc_dqblk *fdq)
 820{
 821        struct inode *inode = &ip->i_inode;
 822        struct gfs2_sbd *sdp = GFS2_SB(inode);
 823        struct gfs2_quota q;
 824        int err;
 825        u64 size;
 826
 827        if (gfs2_is_stuffed(ip)) {
 828                err = gfs2_unstuff_dinode(ip);
 829                if (err)
 830                        return err;
 831        }
 832
 833        memset(&q, 0, sizeof(struct gfs2_quota));
 834        err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
 835        if (err < 0)
 836                return err;
 837
 838        loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
 839        err = -EIO;
 840        be64_add_cpu(&q.qu_value, change);
 841        if (((s64)be64_to_cpu(q.qu_value)) < 0)
 842                q.qu_value = 0; /* Never go negative on quota usage */
 843        qd->qd_qb.qb_value = q.qu_value;
 844        if (fdq) {
 845                if (fdq->d_fieldmask & QC_SPC_SOFT) {
 846                        q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
 847                        qd->qd_qb.qb_warn = q.qu_warn;
 848                }
 849                if (fdq->d_fieldmask & QC_SPC_HARD) {
 850                        q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
 851                        qd->qd_qb.qb_limit = q.qu_limit;
 852                }
 853                if (fdq->d_fieldmask & QC_SPACE) {
 854                        q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
 855                        qd->qd_qb.qb_value = q.qu_value;
 856                }
 857        }
 858
 859        err = gfs2_write_disk_quota(ip, &q, loc);
 860        if (!err) {
 861                size = loc + sizeof(struct gfs2_quota);
 862                if (size > inode->i_size)
 863                        i_size_write(inode, size);
 864                inode->i_mtime = inode->i_atime = current_time(inode);
 865                mark_inode_dirty(inode);
 866                set_bit(QDF_REFRESH, &qd->qd_flags);
 867        }
 868
 869        return err;
 870}
 871
 872static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 873{
 874        struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 875        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 876        struct gfs2_alloc_parms ap = { .aflags = 0, };
 877        unsigned int data_blocks, ind_blocks;
 878        struct gfs2_holder *ghs, i_gh;
 879        unsigned int qx, x;
 880        struct gfs2_quota_data *qd;
 881        unsigned reserved;
 882        loff_t offset;
 883        unsigned int nalloc = 0, blocks;
 884        int error;
 885
 886        error = gfs2_qa_get(ip);
 887        if (error)
 888                return error;
 889
 890        gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
 891                              &data_blocks, &ind_blocks);
 892
 893        ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 894        if (!ghs) {
 895                error = -ENOMEM;
 896                goto out;
 897        }
 898
 899        sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
 900        inode_lock(&ip->i_inode);
 901        for (qx = 0; qx < num_qd; qx++) {
 902                error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
 903                                           GL_NOCACHE, &ghs[qx]);
 904                if (error)
 905                        goto out_dq;
 906        }
 907
 908        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
 909        if (error)
 910                goto out_dq;
 911
 912        for (x = 0; x < num_qd; x++) {
 913                offset = qd2offset(qda[x]);
 914                if (gfs2_write_alloc_required(ip, offset,
 915                                              sizeof(struct gfs2_quota)))
 916                        nalloc++;
 917        }
 918
 919        /* 
 920         * 1 blk for unstuffing inode if stuffed. We add this extra
 921         * block to the reservation unconditionally. If the inode
 922         * doesn't need unstuffing, the block will be released to the 
 923         * rgrp since it won't be allocated during the transaction
 924         */
 925        /* +3 in the end for unstuffing block, inode size update block
 926         * and another block in case quota straddles page boundary and 
 927         * two blocks need to be updated instead of 1 */
 928        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 929
 930        reserved = 1 + (nalloc * (data_blocks + ind_blocks));
 931        ap.target = reserved;
 932        error = gfs2_inplace_reserve(ip, &ap);
 933        if (error)
 934                goto out_alloc;
 935
 936        if (nalloc)
 937                blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
 938
 939        error = gfs2_trans_begin(sdp, blocks, 0);
 940        if (error)
 941                goto out_ipres;
 942
 943        for (x = 0; x < num_qd; x++) {
 944                qd = qda[x];
 945                offset = qd2offset(qd);
 946                error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
 947                if (error)
 948                        goto out_end_trans;
 949
 950                do_qc(qd, -qd->qd_change_sync);
 951                set_bit(QDF_REFRESH, &qd->qd_flags);
 952        }
 953
 954        error = 0;
 955
 956out_end_trans:
 957        gfs2_trans_end(sdp);
 958out_ipres:
 959        gfs2_inplace_release(ip);
 960out_alloc:
 961        gfs2_glock_dq_uninit(&i_gh);
 962out_dq:
 963        while (qx--)
 964                gfs2_glock_dq_uninit(&ghs[qx]);
 965        inode_unlock(&ip->i_inode);
 966        kfree(ghs);
 967        gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
 968                       GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
 969out:
 970        gfs2_qa_put(ip);
 971        return error;
 972}
 973
 974static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 975{
 976        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 977        struct gfs2_quota q;
 978        struct gfs2_quota_lvb *qlvb;
 979        loff_t pos;
 980        int error;
 981
 982        memset(&q, 0, sizeof(struct gfs2_quota));
 983        pos = qd2offset(qd);
 984        error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
 985        if (error < 0)
 986                return error;
 987
 988        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
 989        qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
 990        qlvb->__pad = 0;
 991        qlvb->qb_limit = q.qu_limit;
 992        qlvb->qb_warn = q.qu_warn;
 993        qlvb->qb_value = q.qu_value;
 994        qd->qd_qb = *qlvb;
 995
 996        return 0;
 997}
 998
 999static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
1000                    struct gfs2_holder *q_gh)
1001{
1002        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1003        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1004        struct gfs2_holder i_gh;
1005        int error;
1006
1007restart:
1008        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
1009        if (error)
1010                return error;
1011
1012        if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
1013                force_refresh = FORCE;
1014
1015        qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1016
1017        if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
1018                gfs2_glock_dq_uninit(q_gh);
1019                error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
1020                                           GL_NOCACHE, q_gh);
1021                if (error)
1022                        return error;
1023
1024                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
1025                if (error)
1026                        goto fail;
1027
1028                error = update_qd(sdp, qd);
1029                if (error)
1030                        goto fail_gunlock;
1031
1032                gfs2_glock_dq_uninit(&i_gh);
1033                gfs2_glock_dq_uninit(q_gh);
1034                force_refresh = 0;
1035                goto restart;
1036        }
1037
1038        return 0;
1039
1040fail_gunlock:
1041        gfs2_glock_dq_uninit(&i_gh);
1042fail:
1043        gfs2_glock_dq_uninit(q_gh);
1044        return error;
1045}
1046
1047int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
1048{
1049        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1050        struct gfs2_quota_data *qd;
1051        u32 x;
1052        int error = 0;
1053
1054        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1055                return 0;
1056
1057        error = gfs2_quota_hold(ip, uid, gid);
1058        if (error)
1059                return error;
1060
1061        sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
1062             sizeof(struct gfs2_quota_data *), sort_qd, NULL);
1063
1064        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1065                qd = ip->i_qadata->qa_qd[x];
1066                error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
1067                if (error)
1068                        break;
1069        }
1070
1071        if (!error)
1072                set_bit(GIF_QD_LOCKED, &ip->i_flags);
1073        else {
1074                while (x--)
1075                        gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1076                gfs2_quota_unhold(ip);
1077        }
1078
1079        return error;
1080}
1081
1082static int need_sync(struct gfs2_quota_data *qd)
1083{
1084        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1085        struct gfs2_tune *gt = &sdp->sd_tune;
1086        s64 value;
1087        unsigned int num, den;
1088        int do_sync = 1;
1089
1090        if (!qd->qd_qb.qb_limit)
1091                return 0;
1092
1093        spin_lock(&qd_lock);
1094        value = qd->qd_change;
1095        spin_unlock(&qd_lock);
1096
1097        spin_lock(&gt->gt_spin);
1098        num = gt->gt_quota_scale_num;
1099        den = gt->gt_quota_scale_den;
1100        spin_unlock(&gt->gt_spin);
1101
1102        if (value < 0)
1103                do_sync = 0;
1104        else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
1105                 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1106                do_sync = 0;
1107        else {
1108                value *= gfs2_jindex_size(sdp) * num;
1109                value = div_s64(value, den);
1110                value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
1111                if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
1112                        do_sync = 0;
1113        }
1114
1115        return do_sync;
1116}
1117
1118void gfs2_quota_unlock(struct gfs2_inode *ip)
1119{
1120        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1121        struct gfs2_quota_data *qda[4];
1122        unsigned int count = 0;
1123        u32 x;
1124        int found;
1125
1126        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
1127                return;
1128
1129        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1130                struct gfs2_quota_data *qd;
1131                int sync;
1132
1133                qd = ip->i_qadata->qa_qd[x];
1134                sync = need_sync(qd);
1135
1136                gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
1137                if (!sync)
1138                        continue;
1139
1140                spin_lock(&qd_lock);
1141                found = qd_check_sync(sdp, qd, NULL);
1142                spin_unlock(&qd_lock);
1143
1144                if (!found)
1145                        continue;
1146
1147                gfs2_assert_warn(sdp, qd->qd_change_sync);
1148                if (bh_get(qd)) {
1149                        clear_bit(QDF_LOCKED, &qd->qd_flags);
1150                        slot_put(qd);
1151                        qd_put(qd);
1152                        continue;
1153                }
1154
1155                qda[count++] = qd;
1156        }
1157
1158        if (count) {
1159                do_sync(count, qda);
1160                for (x = 0; x < count; x++)
1161                        qd_unlock(qda[x]);
1162        }
1163
1164        gfs2_quota_unhold(ip);
1165}
1166
1167#define MAX_LINE 256
1168
1169static int print_message(struct gfs2_quota_data *qd, char *type)
1170{
1171        struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
1172
1173        fs_info(sdp, "quota %s for %s %u\n",
1174                type,
1175                (qd->qd_id.type == USRQUOTA) ? "user" : "group",
1176                from_kqid(&init_user_ns, qd->qd_id));
1177
1178        return 0;
1179}
1180
1181/**
1182 * gfs2_quota_check - check if allocating new blocks will exceed quota
1183 * @ip:  The inode for which this check is being performed
1184 * @uid: The uid to check against
1185 * @gid: The gid to check against
1186 * @ap:  The allocation parameters. ap->target contains the requested
1187 *       blocks. ap->min_target, if set, contains the minimum blks
1188 *       requested.
1189 *
1190 * Returns: 0 on success.
1191 *                  min_req = ap->min_target ? ap->min_target : ap->target;
1192 *                  quota must allow at least min_req blks for success and
1193 *                  ap->allowed is set to the number of blocks allowed
1194 *
1195 *          -EDQUOT otherwise, quota violation. ap->allowed is set to number
1196 *                  of blocks available.
1197 */
1198int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
1199                     struct gfs2_alloc_parms *ap)
1200{
1201        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1202        struct gfs2_quota_data *qd;
1203        s64 value, warn, limit;
1204        u32 x;
1205        int error = 0;
1206
1207        ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
1208        if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1209                return 0;
1210
1211        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1212                qd = ip->i_qadata->qa_qd[x];
1213
1214                if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1215                      qid_eq(qd->qd_id, make_kqid_gid(gid))))
1216                        continue;
1217
1218                warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
1219                limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
1220                value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1221                spin_lock(&qd_lock);
1222                value += qd->qd_change;
1223                spin_unlock(&qd_lock);
1224
1225                if (limit > 0 && (limit - value) < ap->allowed)
1226                        ap->allowed = limit - value;
1227                /* If we can't meet the target */
1228                if (limit && limit < (value + (s64)ap->target)) {
1229                        /* If no min_target specified or we don't meet
1230                         * min_target, return -EDQUOT */
1231                        if (!ap->min_target || ap->min_target > ap->allowed) {
1232                                if (!test_and_set_bit(QDF_QMSG_QUIET,
1233                                                      &qd->qd_flags)) {
1234                                        print_message(qd, "exceeded");
1235                                        quota_send_warning(qd->qd_id,
1236                                                           sdp->sd_vfs->s_dev,
1237                                                           QUOTA_NL_BHARDWARN);
1238                                }
1239                                error = -EDQUOT;
1240                                break;
1241                        }
1242                } else if (warn && warn < value &&
1243                           time_after_eq(jiffies, qd->qd_last_warn +
1244                                         gfs2_tune_get(sdp, gt_quota_warn_period)
1245                                         * HZ)) {
1246                        quota_send_warning(qd->qd_id,
1247                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1248                        error = print_message(qd, "warning");
1249                        qd->qd_last_warn = jiffies;
1250                }
1251        }
1252        return error;
1253}
1254
1255void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1256                       kuid_t uid, kgid_t gid)
1257{
1258        struct gfs2_quota_data *qd;
1259        u32 x;
1260        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1261
1262        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
1263            gfs2_assert_warn(sdp, change))
1264                return;
1265        if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1266                return;
1267
1268        if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
1269                                 ip->i_qadata->qa_ref > 0))
1270                return;
1271        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
1272                qd = ip->i_qadata->qa_qd[x];
1273
1274                if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
1275                    qid_eq(qd->qd_id, make_kqid_gid(gid))) {
1276                        do_qc(qd, change);
1277                }
1278        }
1279}
1280
1281int gfs2_quota_sync(struct super_block *sb, int type)
1282{
1283        struct gfs2_sbd *sdp = sb->s_fs_info;
1284        struct gfs2_quota_data **qda;
1285        unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
1286        unsigned int num_qd;
1287        unsigned int x;
1288        int error = 0;
1289
1290        qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1291        if (!qda)
1292                return -ENOMEM;
1293
1294        mutex_lock(&sdp->sd_quota_sync_mutex);
1295        sdp->sd_quota_sync_gen++;
1296
1297        do {
1298                num_qd = 0;
1299
1300                for (;;) {
1301                        error = qd_fish(sdp, qda + num_qd);
1302                        if (error || !qda[num_qd])
1303                                break;
1304                        if (++num_qd == max_qd)
1305                                break;
1306                }
1307
1308                if (num_qd) {
1309                        if (!error)
1310                                error = do_sync(num_qd, qda);
1311                        if (!error)
1312                                for (x = 0; x < num_qd; x++)
1313                                        qda[x]->qd_sync_gen =
1314                                                sdp->sd_quota_sync_gen;
1315
1316                        for (x = 0; x < num_qd; x++)
1317                                qd_unlock(qda[x]);
1318                }
1319        } while (!error && num_qd == max_qd);
1320
1321        mutex_unlock(&sdp->sd_quota_sync_mutex);
1322        kfree(qda);
1323
1324        return error;
1325}
1326
1327int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
1328{
1329        struct gfs2_quota_data *qd;
1330        struct gfs2_holder q_gh;
1331        int error;
1332
1333        error = qd_get(sdp, qid, &qd);
1334        if (error)
1335                return error;
1336
1337        error = do_glock(qd, FORCE, &q_gh);
1338        if (!error)
1339                gfs2_glock_dq_uninit(&q_gh);
1340
1341        qd_put(qd);
1342        return error;
1343}
1344
1345int gfs2_quota_init(struct gfs2_sbd *sdp)
1346{
1347        struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1348        u64 size = i_size_read(sdp->sd_qc_inode);
1349        unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1350        unsigned int x, slot = 0;
1351        unsigned int found = 0;
1352        unsigned int hash;
1353        unsigned int bm_size;
1354        u64 dblock;
1355        u32 extlen = 0;
1356        int error;
1357
1358        if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1359                return -EIO;
1360
1361        sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1362        bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
1363        bm_size *= sizeof(unsigned long);
1364        error = -ENOMEM;
1365        sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
1366        if (sdp->sd_quota_bitmap == NULL)
1367                sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
1368                                                 __GFP_ZERO);
1369        if (!sdp->sd_quota_bitmap)
1370                return error;
1371
1372        for (x = 0; x < blocks; x++) {
1373                struct buffer_head *bh;
1374                const struct gfs2_quota_change *qc;
1375                unsigned int y;
1376
1377                if (!extlen) {
1378                        extlen = 32;
1379                        error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen);
1380                        if (error)
1381                                goto fail;
1382                }
1383                error = -EIO;
1384                bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1385                if (!bh)
1386                        goto fail;
1387                if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1388                        brelse(bh);
1389                        goto fail;
1390                }
1391
1392                qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
1393                for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1394                     y++, slot++) {
1395                        struct gfs2_quota_data *qd;
1396                        s64 qc_change = be64_to_cpu(qc->qc_change);
1397                        u32 qc_flags = be32_to_cpu(qc->qc_flags);
1398                        enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
1399                                                USRQUOTA : GRPQUOTA;
1400                        struct kqid qc_id = make_kqid(&init_user_ns, qtype,
1401                                                      be32_to_cpu(qc->qc_id));
1402                        qc++;
1403                        if (!qc_change)
1404                                continue;
1405
1406                        hash = gfs2_qd_hash(sdp, qc_id);
1407                        qd = qd_alloc(hash, sdp, qc_id);
1408                        if (qd == NULL) {
1409                                brelse(bh);
1410                                goto fail;
1411                        }
1412
1413                        set_bit(QDF_CHANGE, &qd->qd_flags);
1414                        qd->qd_change = qc_change;
1415                        qd->qd_slot = slot;
1416                        qd->qd_slot_count = 1;
1417
1418                        spin_lock(&qd_lock);
1419                        BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
1420                        list_add(&qd->qd_list, &sdp->sd_quota_list);
1421                        atomic_inc(&sdp->sd_quota_count);
1422                        spin_unlock(&qd_lock);
1423
1424                        spin_lock_bucket(hash);
1425                        hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
1426                        spin_unlock_bucket(hash);
1427
1428                        found++;
1429                }
1430
1431                brelse(bh);
1432                dblock++;
1433                extlen--;
1434        }
1435
1436        if (found)
1437                fs_info(sdp, "found %u quota changes\n", found);
1438
1439        return 0;
1440
1441fail:
1442        gfs2_quota_cleanup(sdp);
1443        return error;
1444}
1445
1446void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1447{
1448        struct list_head *head = &sdp->sd_quota_list;
1449        struct gfs2_quota_data *qd;
1450
1451        spin_lock(&qd_lock);
1452        while (!list_empty(head)) {
1453                qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
1454
1455                list_del(&qd->qd_list);
1456
1457                /* Also remove if this qd exists in the reclaim list */
1458                list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
1459                atomic_dec(&sdp->sd_quota_count);
1460                spin_unlock(&qd_lock);
1461
1462                spin_lock_bucket(qd->qd_hash);
1463                hlist_bl_del_rcu(&qd->qd_hlist);
1464                spin_unlock_bucket(qd->qd_hash);
1465
1466                gfs2_assert_warn(sdp, !qd->qd_change);
1467                gfs2_assert_warn(sdp, !qd->qd_slot_count);
1468                gfs2_assert_warn(sdp, !qd->qd_bh_count);
1469
1470                gfs2_glock_put(qd->qd_gl);
1471                call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
1472
1473                spin_lock(&qd_lock);
1474        }
1475        spin_unlock(&qd_lock);
1476
1477        gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1478
1479        kvfree(sdp->sd_quota_bitmap);
1480        sdp->sd_quota_bitmap = NULL;
1481}
1482
1483static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1484{
1485        if (error == 0 || error == -EROFS)
1486                return;
1487        if (!gfs2_withdrawn(sdp)) {
1488                if (!cmpxchg(&sdp->sd_log_error, 0, error))
1489                        fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1490                wake_up(&sdp->sd_logd_waitq);
1491        }
1492}
1493
1494static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1495                               int (*fxn)(struct super_block *sb, int type),
1496                               unsigned long t, unsigned long *timeo,
1497                               unsigned int *new_timeo)
1498{
1499        if (t >= *timeo) {
1500                int error = fxn(sdp->sd_vfs, 0);
1501                quotad_error(sdp, msg, error);
1502                *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1503        } else {
1504                *timeo -= t;
1505        }
1506}
1507
1508static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1509{
1510        struct gfs2_inode *ip;
1511
1512        while(1) {
1513                ip = NULL;
1514                spin_lock(&sdp->sd_trunc_lock);
1515                if (!list_empty(&sdp->sd_trunc_list)) {
1516                        ip = list_first_entry(&sdp->sd_trunc_list,
1517                                        struct gfs2_inode, i_trunc_list);
1518                        list_del_init(&ip->i_trunc_list);
1519                }
1520                spin_unlock(&sdp->sd_trunc_lock);
1521                if (ip == NULL)
1522                        return;
1523                gfs2_glock_finish_truncate(ip);
1524        }
1525}
1526
1527void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1528        if (!sdp->sd_statfs_force_sync) {
1529                sdp->sd_statfs_force_sync = 1;
1530                wake_up(&sdp->sd_quota_wait);
1531        }
1532}
1533
1534
1535/**
1536 * gfs2_quotad - Write cached quota changes into the quota file
1537 * @data: Pointer to GFS2 superblock
1538 *
1539 */
1540
1541int gfs2_quotad(void *data)
1542{
1543        struct gfs2_sbd *sdp = data;
1544        struct gfs2_tune *tune = &sdp->sd_tune;
1545        unsigned long statfs_timeo = 0;
1546        unsigned long quotad_timeo = 0;
1547        unsigned long t = 0;
1548        DEFINE_WAIT(wait);
1549        int empty;
1550
1551        while (!kthread_should_stop()) {
1552
1553                if (gfs2_withdrawn(sdp))
1554                        goto bypass;
1555                /* Update the master statfs file */
1556                if (sdp->sd_statfs_force_sync) {
1557                        int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1558                        quotad_error(sdp, "statfs", error);
1559                        statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1560                }
1561                else
1562                        quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1563                                           &statfs_timeo,
1564                                           &tune->gt_statfs_quantum);
1565
1566                /* Update quota file */
1567                quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1568                                   &quotad_timeo, &tune->gt_quota_quantum);
1569
1570                /* Check for & recover partially truncated inodes */
1571                quotad_check_trunc_list(sdp);
1572
1573                try_to_freeze();
1574
1575bypass:
1576                t = min(quotad_timeo, statfs_timeo);
1577
1578                prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1579                spin_lock(&sdp->sd_trunc_lock);
1580                empty = list_empty(&sdp->sd_trunc_list);
1581                spin_unlock(&sdp->sd_trunc_lock);
1582                if (empty && !sdp->sd_statfs_force_sync)
1583                        t -= schedule_timeout(t);
1584                else
1585                        t = 0;
1586                finish_wait(&sdp->sd_quota_wait, &wait);
1587        }
1588
1589        return 0;
1590}
1591
1592static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
1593{
1594        struct gfs2_sbd *sdp = sb->s_fs_info;
1595
1596        memset(state, 0, sizeof(*state));
1597
1598        switch (sdp->sd_args.ar_quota) {
1599        case GFS2_QUOTA_ON:
1600                state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
1601                state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
1602                fallthrough;
1603        case GFS2_QUOTA_ACCOUNT:
1604                state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
1605                                                  QCI_SYSFILE;
1606                state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
1607                                                  QCI_SYSFILE;
1608                break;
1609        case GFS2_QUOTA_OFF:
1610                break;
1611        }
1612        if (sdp->sd_quota_inode) {
1613                state->s_state[USRQUOTA].ino =
1614                                        GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1615                state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
1616        }
1617        state->s_state[USRQUOTA].nextents = 1;  /* unsupported */
1618        state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
1619        state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
1620        return 0;
1621}
1622
1623static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
1624                          struct qc_dqblk *fdq)
1625{
1626        struct gfs2_sbd *sdp = sb->s_fs_info;
1627        struct gfs2_quota_lvb *qlvb;
1628        struct gfs2_quota_data *qd;
1629        struct gfs2_holder q_gh;
1630        int error;
1631
1632        memset(fdq, 0, sizeof(*fdq));
1633
1634        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1635                return -ESRCH; /* Crazy XFS error code */
1636
1637        if ((qid.type != USRQUOTA) &&
1638            (qid.type != GRPQUOTA))
1639                return -EINVAL;
1640
1641        error = qd_get(sdp, qid, &qd);
1642        if (error)
1643                return error;
1644        error = do_glock(qd, FORCE, &q_gh);
1645        if (error)
1646                goto out;
1647
1648        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
1649        fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
1650        fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
1651        fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
1652
1653        gfs2_glock_dq_uninit(&q_gh);
1654out:
1655        qd_put(qd);
1656        return error;
1657}
1658
1659/* GFS2 only supports a subset of the XFS fields */
1660#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
1661
1662static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
1663                          struct qc_dqblk *fdq)
1664{
1665        struct gfs2_sbd *sdp = sb->s_fs_info;
1666        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1667        struct gfs2_quota_data *qd;
1668        struct gfs2_holder q_gh, i_gh;
1669        unsigned int data_blocks, ind_blocks;
1670        unsigned int blocks = 0;
1671        int alloc_required;
1672        loff_t offset;
1673        int error;
1674
1675        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1676                return -ESRCH; /* Crazy XFS error code */
1677
1678        if ((qid.type != USRQUOTA) &&
1679            (qid.type != GRPQUOTA))
1680                return -EINVAL;
1681
1682        if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1683                return -EINVAL;
1684
1685        error = qd_get(sdp, qid, &qd);
1686        if (error)
1687                return error;
1688
1689        error = gfs2_qa_get(ip);
1690        if (error)
1691                goto out_put;
1692
1693        inode_lock(&ip->i_inode);
1694        error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1695        if (error)
1696                goto out_unlockput;
1697        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1698        if (error)
1699                goto out_q;
1700
1701        /* Check for existing entry, if none then alloc new blocks */
1702        error = update_qd(sdp, qd);
1703        if (error)
1704                goto out_i;
1705
1706        /* If nothing has changed, this is a no-op */
1707        if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
1708            ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1709                fdq->d_fieldmask ^= QC_SPC_SOFT;
1710
1711        if ((fdq->d_fieldmask & QC_SPC_HARD) &&
1712            ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1713                fdq->d_fieldmask ^= QC_SPC_HARD;
1714
1715        if ((fdq->d_fieldmask & QC_SPACE) &&
1716            ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1717                fdq->d_fieldmask ^= QC_SPACE;
1718
1719        if (fdq->d_fieldmask == 0)
1720                goto out_i;
1721
1722        offset = qd2offset(qd);
1723        alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1724        if (gfs2_is_stuffed(ip))
1725                alloc_required = 1;
1726        if (alloc_required) {
1727                struct gfs2_alloc_parms ap = { .aflags = 0, };
1728                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1729                                       &data_blocks, &ind_blocks);
1730                blocks = 1 + data_blocks + ind_blocks;
1731                ap.target = blocks;
1732                error = gfs2_inplace_reserve(ip, &ap);
1733                if (error)
1734                        goto out_i;
1735                blocks += gfs2_rg_blocks(ip, blocks);
1736        }
1737
1738        /* Some quotas span block boundaries and can update two blocks,
1739           adding an extra block to the transaction to handle such quotas */
1740        error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1741        if (error)
1742                goto out_release;
1743
1744        /* Apply changes */
1745        error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1746        if (!error)
1747                clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
1748
1749        gfs2_trans_end(sdp);
1750out_release:
1751        if (alloc_required)
1752                gfs2_inplace_release(ip);
1753out_i:
1754        gfs2_glock_dq_uninit(&i_gh);
1755out_q:
1756        gfs2_glock_dq_uninit(&q_gh);
1757out_unlockput:
1758        gfs2_qa_put(ip);
1759        inode_unlock(&ip->i_inode);
1760out_put:
1761        qd_put(qd);
1762        return error;
1763}
1764
1765const struct quotactl_ops gfs2_quotactl_ops = {
1766        .quota_sync     = gfs2_quota_sync,
1767        .get_state      = gfs2_quota_get_state,
1768        .get_dqblk      = gfs2_get_dqblk,
1769        .set_dqblk      = gfs2_set_dqblk,
1770};
1771
1772void __init gfs2_quota_hash_init(void)
1773{
1774        unsigned i;
1775
1776        for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
1777                INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
1778}
1779