linux/fs/gfs2/log.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/slab.h>
  12#include <linux/spinlock.h>
  13#include <linux/completion.h>
  14#include <linux/buffer_head.h>
  15#include <linux/gfs2_ondisk.h>
  16#include <linux/crc32.h>
  17#include <linux/delay.h>
  18#include <linux/kthread.h>
  19#include <linux/freezer.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/writeback.h>
  23#include <linux/list_sort.h>
  24
  25#include "gfs2.h"
  26#include "incore.h"
  27#include "bmap.h"
  28#include "glock.h"
  29#include "log.h"
  30#include "lops.h"
  31#include "meta_io.h"
  32#include "util.h"
  33#include "dir.h"
  34#include "trace_gfs2.h"
  35
  36/**
  37 * gfs2_struct2blk - compute stuff
  38 * @sdp: the filesystem
  39 * @nstruct: the number of structures
  40 * @ssize: the size of the structures
  41 *
  42 * Compute the number of log descriptor blocks needed to hold a certain number
  43 * of structures of a certain size.
  44 *
  45 * Returns: the number of blocks needed (minimum is always 1)
  46 */
  47
  48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
  49                             unsigned int ssize)
  50{
  51        unsigned int blks;
  52        unsigned int first, second;
  53
  54        blks = 1;
  55        first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
  56
  57        if (nstruct > first) {
  58                second = (sdp->sd_sb.sb_bsize -
  59                          sizeof(struct gfs2_meta_header)) / ssize;
  60                blks += DIV_ROUND_UP(nstruct - first, second);
  61        }
  62
  63        return blks;
  64}
  65
  66/**
  67 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
  68 * @mapping: The associated mapping (maybe NULL)
  69 * @bd: The gfs2_bufdata to remove
  70 *
  71 * The ail lock _must_ be held when calling this function
  72 *
  73 */
  74
  75void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
  76{
  77        bd->bd_tr = NULL;
  78        list_del_init(&bd->bd_ail_st_list);
  79        list_del_init(&bd->bd_ail_gl_list);
  80        atomic_dec(&bd->bd_gl->gl_ail_count);
  81        brelse(bd->bd_bh);
  82}
  83
  84/**
  85 * gfs2_ail1_start_one - Start I/O on a part of the AIL
  86 * @sdp: the filesystem
  87 * @wbc: The writeback control structure
  88 * @ai: The ail structure
  89 *
  90 */
  91
  92static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
  93                               struct writeback_control *wbc,
  94                               struct gfs2_trans *tr)
  95__releases(&sdp->sd_ail_lock)
  96__acquires(&sdp->sd_ail_lock)
  97{
  98        struct gfs2_glock *gl = NULL;
  99        struct address_space *mapping;
 100        struct gfs2_bufdata *bd, *s;
 101        struct buffer_head *bh;
 102
 103        list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
 104                bh = bd->bd_bh;
 105
 106                gfs2_assert(sdp, bd->bd_tr == tr);
 107
 108                if (!buffer_busy(bh)) {
 109                        if (!buffer_uptodate(bh))
 110                                gfs2_io_error_bh(sdp, bh);
 111                        list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 112                        continue;
 113                }
 114
 115                if (!buffer_dirty(bh))
 116                        continue;
 117                if (gl == bd->bd_gl)
 118                        continue;
 119                gl = bd->bd_gl;
 120                list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 121                mapping = bh->b_page->mapping;
 122                if (!mapping)
 123                        continue;
 124                spin_unlock(&sdp->sd_ail_lock);
 125                generic_writepages(mapping, wbc);
 126                spin_lock(&sdp->sd_ail_lock);
 127                if (wbc->nr_to_write <= 0)
 128                        break;
 129                return 1;
 130        }
 131
 132        return 0;
 133}
 134
 135
 136/**
 137 * gfs2_ail1_flush - start writeback of some ail1 entries 
 138 * @sdp: The super block
 139 * @wbc: The writeback control structure
 140 *
 141 * Writes back some ail1 entries, according to the limits in the
 142 * writeback control structure
 143 */
 144
 145void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
 146{
 147        struct list_head *head = &sdp->sd_ail1_list;
 148        struct gfs2_trans *tr;
 149        struct blk_plug plug;
 150
 151        trace_gfs2_ail_flush(sdp, wbc, 1);
 152        blk_start_plug(&plug);
 153        spin_lock(&sdp->sd_ail_lock);
 154restart:
 155        list_for_each_entry_reverse(tr, head, tr_list) {
 156                if (wbc->nr_to_write <= 0)
 157                        break;
 158                if (gfs2_ail1_start_one(sdp, wbc, tr))
 159                        goto restart;
 160        }
 161        spin_unlock(&sdp->sd_ail_lock);
 162        blk_finish_plug(&plug);
 163        trace_gfs2_ail_flush(sdp, wbc, 0);
 164}
 165
 166/**
 167 * gfs2_ail1_start - start writeback of all ail1 entries
 168 * @sdp: The superblock
 169 */
 170
 171static void gfs2_ail1_start(struct gfs2_sbd *sdp)
 172{
 173        struct writeback_control wbc = {
 174                .sync_mode = WB_SYNC_NONE,
 175                .nr_to_write = LONG_MAX,
 176                .range_start = 0,
 177                .range_end = LLONG_MAX,
 178        };
 179
 180        return gfs2_ail1_flush(sdp, &wbc);
 181}
 182
 183/**
 184 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
 185 * @sdp: the filesystem
 186 * @ai: the AIL entry
 187 *
 188 */
 189
 190static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 191{
 192        struct gfs2_bufdata *bd, *s;
 193        struct buffer_head *bh;
 194
 195        list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
 196                                         bd_ail_st_list) {
 197                bh = bd->bd_bh;
 198                gfs2_assert(sdp, bd->bd_tr == tr);
 199                if (buffer_busy(bh))
 200                        continue;
 201                if (!buffer_uptodate(bh))
 202                        gfs2_io_error_bh(sdp, bh);
 203                list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 204        }
 205
 206}
 207
 208/**
 209 * gfs2_ail1_empty - Try to empty the ail1 lists
 210 * @sdp: The superblock
 211 *
 212 * Tries to empty the ail1 lists, starting with the oldest first
 213 */
 214
 215static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
 216{
 217        struct gfs2_trans *tr, *s;
 218        int oldest_tr = 1;
 219        int ret;
 220
 221        spin_lock(&sdp->sd_ail_lock);
 222        list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
 223                gfs2_ail1_empty_one(sdp, tr);
 224                if (list_empty(&tr->tr_ail1_list) && oldest_tr)
 225                        list_move(&tr->tr_list, &sdp->sd_ail2_list);
 226                else
 227                        oldest_tr = 0;
 228        }
 229        ret = list_empty(&sdp->sd_ail1_list);
 230        spin_unlock(&sdp->sd_ail_lock);
 231
 232        return ret;
 233}
 234
 235static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
 236{
 237        struct gfs2_trans *tr;
 238        struct gfs2_bufdata *bd;
 239        struct buffer_head *bh;
 240
 241        spin_lock(&sdp->sd_ail_lock);
 242        list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
 243                list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
 244                        bh = bd->bd_bh;
 245                        if (!buffer_locked(bh))
 246                                continue;
 247                        get_bh(bh);
 248                        spin_unlock(&sdp->sd_ail_lock);
 249                        wait_on_buffer(bh);
 250                        brelse(bh);
 251                        return;
 252                }
 253        }
 254        spin_unlock(&sdp->sd_ail_lock);
 255}
 256
 257/**
 258 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
 259 * @sdp: the filesystem
 260 * @ai: the AIL entry
 261 *
 262 */
 263
 264static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 265{
 266        struct list_head *head = &tr->tr_ail2_list;
 267        struct gfs2_bufdata *bd;
 268
 269        while (!list_empty(head)) {
 270                bd = list_entry(head->prev, struct gfs2_bufdata,
 271                                bd_ail_st_list);
 272                gfs2_assert(sdp, bd->bd_tr == tr);
 273                gfs2_remove_from_ail(bd);
 274        }
 275}
 276
 277static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
 278{
 279        struct gfs2_trans *tr, *safe;
 280        unsigned int old_tail = sdp->sd_log_tail;
 281        int wrap = (new_tail < old_tail);
 282        int a, b, rm;
 283
 284        spin_lock(&sdp->sd_ail_lock);
 285
 286        list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
 287                a = (old_tail <= tr->tr_first);
 288                b = (tr->tr_first < new_tail);
 289                rm = (wrap) ? (a || b) : (a && b);
 290                if (!rm)
 291                        continue;
 292
 293                gfs2_ail2_empty_one(sdp, tr);
 294                list_del(&tr->tr_list);
 295                gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
 296                gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
 297                kfree(tr);
 298        }
 299
 300        spin_unlock(&sdp->sd_ail_lock);
 301}
 302
 303/**
 304 * gfs2_log_release - Release a given number of log blocks
 305 * @sdp: The GFS2 superblock
 306 * @blks: The number of blocks
 307 *
 308 */
 309
 310void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
 311{
 312
 313        atomic_add(blks, &sdp->sd_log_blks_free);
 314        trace_gfs2_log_blocks(sdp, blks);
 315        gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 316                                  sdp->sd_jdesc->jd_blocks);
 317        up_read(&sdp->sd_log_flush_lock);
 318}
 319
 320/**
 321 * gfs2_log_reserve - Make a log reservation
 322 * @sdp: The GFS2 superblock
 323 * @blks: The number of blocks to reserve
 324 *
 325 * Note that we never give out the last few blocks of the journal. Thats
 326 * due to the fact that there is a small number of header blocks
 327 * associated with each log flush. The exact number can't be known until
 328 * flush time, so we ensure that we have just enough free blocks at all
 329 * times to avoid running out during a log flush.
 330 *
 331 * We no longer flush the log here, instead we wake up logd to do that
 332 * for us. To avoid the thundering herd and to ensure that we deal fairly
 333 * with queued waiters, we use an exclusive wait. This means that when we
 334 * get woken with enough journal space to get our reservation, we need to
 335 * wake the next waiter on the list.
 336 *
 337 * Returns: errno
 338 */
 339
 340int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 341{
 342        int ret = 0;
 343        unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
 344        unsigned wanted = blks + reserved_blks;
 345        DEFINE_WAIT(wait);
 346        int did_wait = 0;
 347        unsigned int free_blocks;
 348
 349        if (gfs2_assert_warn(sdp, blks) ||
 350            gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
 351                return -EINVAL;
 352retry:
 353        free_blocks = atomic_read(&sdp->sd_log_blks_free);
 354        if (unlikely(free_blocks <= wanted)) {
 355                do {
 356                        prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
 357                                        TASK_UNINTERRUPTIBLE);
 358                        wake_up(&sdp->sd_logd_waitq);
 359                        did_wait = 1;
 360                        if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
 361                                io_schedule();
 362                        free_blocks = atomic_read(&sdp->sd_log_blks_free);
 363                } while(free_blocks <= wanted);
 364                finish_wait(&sdp->sd_log_waitq, &wait);
 365        }
 366        atomic_inc(&sdp->sd_reserving_log);
 367        if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
 368                                free_blocks - blks) != free_blocks) {
 369                if (atomic_dec_and_test(&sdp->sd_reserving_log))
 370                        wake_up(&sdp->sd_reserving_log_wait);
 371                goto retry;
 372        }
 373        trace_gfs2_log_blocks(sdp, -blks);
 374
 375        /*
 376         * If we waited, then so might others, wake them up _after_ we get
 377         * our share of the log.
 378         */
 379        if (unlikely(did_wait))
 380                wake_up(&sdp->sd_log_waitq);
 381
 382        down_read(&sdp->sd_log_flush_lock);
 383        if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
 384                gfs2_log_release(sdp, blks);
 385                ret = -EROFS;
 386        }
 387        if (atomic_dec_and_test(&sdp->sd_reserving_log))
 388                wake_up(&sdp->sd_reserving_log_wait);
 389        return ret;
 390}
 391
 392/**
 393 * log_distance - Compute distance between two journal blocks
 394 * @sdp: The GFS2 superblock
 395 * @newer: The most recent journal block of the pair
 396 * @older: The older journal block of the pair
 397 *
 398 *   Compute the distance (in the journal direction) between two
 399 *   blocks in the journal
 400 *
 401 * Returns: the distance in blocks
 402 */
 403
 404static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
 405                                        unsigned int older)
 406{
 407        int dist;
 408
 409        dist = newer - older;
 410        if (dist < 0)
 411                dist += sdp->sd_jdesc->jd_blocks;
 412
 413        return dist;
 414}
 415
 416/**
 417 * calc_reserved - Calculate the number of blocks to reserve when
 418 *                 refunding a transaction's unused buffers.
 419 * @sdp: The GFS2 superblock
 420 *
 421 * This is complex.  We need to reserve room for all our currently used
 422 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
 423 * all our journaled data buffers for journaled files (e.g. files in the 
 424 * meta_fs like rindex, or files for which chattr +j was done.)
 425 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
 426 * will count it as free space (sd_log_blks_free) and corruption will follow.
 427 *
 428 * We can have metadata bufs and jdata bufs in the same journal.  So each
 429 * type gets its own log header, for which we need to reserve a block.
 430 * In fact, each type has the potential for needing more than one header 
 431 * in cases where we have more buffers than will fit on a journal page.
 432 * Metadata journal entries take up half the space of journaled buffer entries.
 433 * Thus, metadata entries have buf_limit (502) and journaled buffers have
 434 * databuf_limit (251) before they cause a wrap around.
 435 *
 436 * Also, we need to reserve blocks for revoke journal entries and one for an
 437 * overall header for the lot.
 438 *
 439 * Returns: the number of blocks reserved
 440 */
 441static unsigned int calc_reserved(struct gfs2_sbd *sdp)
 442{
 443        unsigned int reserved = 0;
 444        unsigned int mbuf;
 445        unsigned int dbuf;
 446        struct gfs2_trans *tr = sdp->sd_log_tr;
 447
 448        if (tr) {
 449                mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
 450                dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
 451                reserved = mbuf + dbuf;
 452                /* Account for header blocks */
 453                reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
 454                reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
 455        }
 456
 457        if (sdp->sd_log_commited_revoke > 0)
 458                reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
 459                                          sizeof(u64));
 460        /* One for the overall header */
 461        if (reserved)
 462                reserved++;
 463        return reserved;
 464}
 465
 466static unsigned int current_tail(struct gfs2_sbd *sdp)
 467{
 468        struct gfs2_trans *tr;
 469        unsigned int tail;
 470
 471        spin_lock(&sdp->sd_ail_lock);
 472
 473        if (list_empty(&sdp->sd_ail1_list)) {
 474                tail = sdp->sd_log_head;
 475        } else {
 476                tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
 477                                tr_list);
 478                tail = tr->tr_first;
 479        }
 480
 481        spin_unlock(&sdp->sd_ail_lock);
 482
 483        return tail;
 484}
 485
 486static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
 487{
 488        unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
 489
 490        ail2_empty(sdp, new_tail);
 491
 492        atomic_add(dist, &sdp->sd_log_blks_free);
 493        trace_gfs2_log_blocks(sdp, dist);
 494        gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 495                             sdp->sd_jdesc->jd_blocks);
 496
 497        sdp->sd_log_tail = new_tail;
 498}
 499
 500
 501static void log_flush_wait(struct gfs2_sbd *sdp)
 502{
 503        DEFINE_WAIT(wait);
 504
 505        if (atomic_read(&sdp->sd_log_in_flight)) {
 506                do {
 507                        prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
 508                                        TASK_UNINTERRUPTIBLE);
 509                        if (atomic_read(&sdp->sd_log_in_flight))
 510                                io_schedule();
 511                } while(atomic_read(&sdp->sd_log_in_flight));
 512                finish_wait(&sdp->sd_log_flush_wait, &wait);
 513        }
 514}
 515
 516static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
 517{
 518        struct gfs2_inode *ipa, *ipb;
 519
 520        ipa = list_entry(a, struct gfs2_inode, i_ordered);
 521        ipb = list_entry(b, struct gfs2_inode, i_ordered);
 522
 523        if (ipa->i_no_addr < ipb->i_no_addr)
 524                return -1;
 525        if (ipa->i_no_addr > ipb->i_no_addr)
 526                return 1;
 527        return 0;
 528}
 529
 530static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 531{
 532        struct gfs2_inode *ip;
 533        LIST_HEAD(written);
 534
 535        spin_lock(&sdp->sd_ordered_lock);
 536        list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
 537        while (!list_empty(&sdp->sd_log_le_ordered)) {
 538                ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
 539                list_move(&ip->i_ordered, &written);
 540                if (ip->i_inode.i_mapping->nrpages == 0)
 541                        continue;
 542                spin_unlock(&sdp->sd_ordered_lock);
 543                filemap_fdatawrite(ip->i_inode.i_mapping);
 544                spin_lock(&sdp->sd_ordered_lock);
 545        }
 546        list_splice(&written, &sdp->sd_log_le_ordered);
 547        spin_unlock(&sdp->sd_ordered_lock);
 548}
 549
 550static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
 551{
 552        struct gfs2_inode *ip;
 553
 554        spin_lock(&sdp->sd_ordered_lock);
 555        while (!list_empty(&sdp->sd_log_le_ordered)) {
 556                ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
 557                list_del(&ip->i_ordered);
 558                WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
 559                if (ip->i_inode.i_mapping->nrpages == 0)
 560                        continue;
 561                spin_unlock(&sdp->sd_ordered_lock);
 562                filemap_fdatawait(ip->i_inode.i_mapping);
 563                spin_lock(&sdp->sd_ordered_lock);
 564        }
 565        spin_unlock(&sdp->sd_ordered_lock);
 566}
 567
 568void gfs2_ordered_del_inode(struct gfs2_inode *ip)
 569{
 570        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 571
 572        spin_lock(&sdp->sd_ordered_lock);
 573        if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
 574                list_del(&ip->i_ordered);
 575        spin_unlock(&sdp->sd_ordered_lock);
 576}
 577
 578void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
 579{
 580        struct buffer_head *bh = bd->bd_bh;
 581        struct gfs2_glock *gl = bd->bd_gl;
 582
 583        bh->b_private = NULL;
 584        bd->bd_blkno = bh->b_blocknr;
 585        gfs2_remove_from_ail(bd); /* drops ref on bh */
 586        bd->bd_bh = NULL;
 587        bd->bd_ops = &gfs2_revoke_lops;
 588        sdp->sd_log_num_revoke++;
 589        atomic_inc(&gl->gl_revokes);
 590        set_bit(GLF_LFLUSH, &gl->gl_flags);
 591        list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
 592}
 593
 594void gfs2_write_revokes(struct gfs2_sbd *sdp)
 595{
 596        struct gfs2_trans *tr;
 597        struct gfs2_bufdata *bd, *tmp;
 598        int have_revokes = 0;
 599        int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
 600
 601        gfs2_ail1_empty(sdp);
 602        spin_lock(&sdp->sd_ail_lock);
 603        list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
 604                list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
 605                        if (list_empty(&bd->bd_list)) {
 606                                have_revokes = 1;
 607                                goto done;
 608                        }
 609                }
 610        }
 611done:
 612        spin_unlock(&sdp->sd_ail_lock);
 613        if (have_revokes == 0)
 614                return;
 615        while (sdp->sd_log_num_revoke > max_revokes)
 616                max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
 617        max_revokes -= sdp->sd_log_num_revoke;
 618        if (!sdp->sd_log_num_revoke) {
 619                atomic_dec(&sdp->sd_log_blks_free);
 620                /* If no blocks have been reserved, we need to also
 621                 * reserve a block for the header */
 622                if (!sdp->sd_log_blks_reserved)
 623                        atomic_dec(&sdp->sd_log_blks_free);
 624        }
 625        gfs2_log_lock(sdp);
 626        spin_lock(&sdp->sd_ail_lock);
 627        list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
 628                list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
 629                        if (max_revokes == 0)
 630                                goto out_of_blocks;
 631                        if (!list_empty(&bd->bd_list))
 632                                continue;
 633                        gfs2_add_revoke(sdp, bd);
 634                        max_revokes--;
 635                }
 636        }
 637out_of_blocks:
 638        spin_unlock(&sdp->sd_ail_lock);
 639        gfs2_log_unlock(sdp);
 640
 641        if (!sdp->sd_log_num_revoke) {
 642                atomic_inc(&sdp->sd_log_blks_free);
 643                if (!sdp->sd_log_blks_reserved)
 644                        atomic_inc(&sdp->sd_log_blks_free);
 645        }
 646}
 647
 648/**
 649 * log_write_header - Get and initialize a journal header buffer
 650 * @sdp: The GFS2 superblock
 651 *
 652 * Returns: the initialized log buffer descriptor
 653 */
 654
 655static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 656{
 657        struct gfs2_log_header *lh;
 658        unsigned int tail;
 659        u32 hash;
 660        int rw = WRITE_FLUSH_FUA | REQ_META;
 661        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 662        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 663        lh = page_address(page);
 664        clear_page(lh);
 665
 666        gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
 667
 668        tail = current_tail(sdp);
 669
 670        lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
 671        lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
 672        lh->lh_header.__pad0 = cpu_to_be64(0);
 673        lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
 674        lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
 675        lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
 676        lh->lh_flags = cpu_to_be32(flags);
 677        lh->lh_tail = cpu_to_be32(tail);
 678        lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
 679        hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
 680        lh->lh_hash = cpu_to_be32(hash);
 681
 682        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
 683                gfs2_ordered_wait(sdp);
 684                log_flush_wait(sdp);
 685                rw = WRITE_SYNC | REQ_META | REQ_PRIO;
 686        }
 687
 688        sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
 689        gfs2_log_write_page(sdp, page);
 690        gfs2_log_flush_bio(sdp, rw);
 691        log_flush_wait(sdp);
 692
 693        if (sdp->sd_log_tail != tail)
 694                log_pull_tail(sdp, tail);
 695}
 696
 697/**
 698 * gfs2_log_flush - flush incore transaction(s)
 699 * @sdp: the filesystem
 700 * @gl: The glock structure to flush.  If NULL, flush the whole incore log
 701 *
 702 */
 703
 704void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
 705                    enum gfs2_flush_type type)
 706{
 707        struct gfs2_trans *tr;
 708        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 709
 710        down_write(&sdp->sd_log_flush_lock);
 711
 712        /* Log might have been flushed while we waited for the flush lock */
 713        if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
 714                up_write(&sdp->sd_log_flush_lock);
 715                return;
 716        }
 717        trace_gfs2_log_flush(sdp, 1);
 718
 719        sdp->sd_log_flush_head = sdp->sd_log_head;
 720        sdp->sd_log_flush_wrapped = 0;
 721        tr = sdp->sd_log_tr;
 722        if (tr) {
 723                sdp->sd_log_tr = NULL;
 724                INIT_LIST_HEAD(&tr->tr_ail1_list);
 725                INIT_LIST_HEAD(&tr->tr_ail2_list);
 726                tr->tr_first = sdp->sd_log_flush_head;
 727                if (unlikely (state == SFS_FROZEN))
 728                        gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
 729        }
 730
 731        if (unlikely(state == SFS_FROZEN))
 732                gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 733        gfs2_assert_withdraw(sdp,
 734                        sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
 735
 736        gfs2_ordered_write(sdp);
 737        lops_before_commit(sdp, tr);
 738        gfs2_log_flush_bio(sdp, WRITE);
 739
 740        if (sdp->sd_log_head != sdp->sd_log_flush_head) {
 741                log_flush_wait(sdp);
 742                log_write_header(sdp, 0);
 743        } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
 744                atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
 745                trace_gfs2_log_blocks(sdp, -1);
 746                log_write_header(sdp, 0);
 747        }
 748        lops_after_commit(sdp, tr);
 749
 750        gfs2_log_lock(sdp);
 751        sdp->sd_log_head = sdp->sd_log_flush_head;
 752        sdp->sd_log_blks_reserved = 0;
 753        sdp->sd_log_commited_revoke = 0;
 754
 755        spin_lock(&sdp->sd_ail_lock);
 756        if (tr && !list_empty(&tr->tr_ail1_list)) {
 757                list_add(&tr->tr_list, &sdp->sd_ail1_list);
 758                tr = NULL;
 759        }
 760        spin_unlock(&sdp->sd_ail_lock);
 761        gfs2_log_unlock(sdp);
 762
 763        if (type != NORMAL_FLUSH) {
 764                if (!sdp->sd_log_idle) {
 765                        for (;;) {
 766                                gfs2_ail1_start(sdp);
 767                                gfs2_ail1_wait(sdp);
 768                                if (gfs2_ail1_empty(sdp))
 769                                        break;
 770                        }
 771                        atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
 772                        trace_gfs2_log_blocks(sdp, -1);
 773                        sdp->sd_log_flush_wrapped = 0;
 774                        log_write_header(sdp, 0);
 775                        sdp->sd_log_head = sdp->sd_log_flush_head;
 776                }
 777                if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
 778                        gfs2_log_shutdown(sdp);
 779                if (type == FREEZE_FLUSH)
 780                        atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
 781        }
 782
 783        trace_gfs2_log_flush(sdp, 0);
 784        up_write(&sdp->sd_log_flush_lock);
 785
 786        kfree(tr);
 787}
 788
 789/**
 790 * gfs2_merge_trans - Merge a new transaction into a cached transaction
 791 * @old: Original transaction to be expanded
 792 * @new: New transaction to be merged
 793 */
 794
 795static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
 796{
 797        WARN_ON_ONCE(old->tr_attached != 1);
 798
 799        old->tr_num_buf_new     += new->tr_num_buf_new;
 800        old->tr_num_databuf_new += new->tr_num_databuf_new;
 801        old->tr_num_buf_rm      += new->tr_num_buf_rm;
 802        old->tr_num_databuf_rm  += new->tr_num_databuf_rm;
 803        old->tr_num_revoke      += new->tr_num_revoke;
 804        old->tr_num_revoke_rm   += new->tr_num_revoke_rm;
 805
 806        list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
 807        list_splice_tail_init(&new->tr_buf, &old->tr_buf);
 808}
 809
 810static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 811{
 812        unsigned int reserved;
 813        unsigned int unused;
 814        unsigned int maxres;
 815
 816        gfs2_log_lock(sdp);
 817
 818        if (sdp->sd_log_tr) {
 819                gfs2_merge_trans(sdp->sd_log_tr, tr);
 820        } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
 821                gfs2_assert_withdraw(sdp, tr->tr_alloced);
 822                sdp->sd_log_tr = tr;
 823                tr->tr_attached = 1;
 824        }
 825
 826        sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
 827        reserved = calc_reserved(sdp);
 828        maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
 829        gfs2_assert_withdraw(sdp, maxres >= reserved);
 830        unused = maxres - reserved;
 831        atomic_add(unused, &sdp->sd_log_blks_free);
 832        trace_gfs2_log_blocks(sdp, unused);
 833        gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
 834                             sdp->sd_jdesc->jd_blocks);
 835        sdp->sd_log_blks_reserved = reserved;
 836
 837        gfs2_log_unlock(sdp);
 838}
 839
 840/**
 841 * gfs2_log_commit - Commit a transaction to the log
 842 * @sdp: the filesystem
 843 * @tr: the transaction
 844 *
 845 * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
 846 * or the total number of used blocks (pinned blocks plus AIL blocks)
 847 * is greater than thresh2.
 848 *
 849 * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
 850 * journal size.
 851 *
 852 * Returns: errno
 853 */
 854
 855void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 856{
 857        log_refund(sdp, tr);
 858
 859        if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
 860            ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
 861            atomic_read(&sdp->sd_log_thresh2)))
 862                wake_up(&sdp->sd_logd_waitq);
 863}
 864
 865/**
 866 * gfs2_log_shutdown - write a shutdown header into a journal
 867 * @sdp: the filesystem
 868 *
 869 */
 870
 871void gfs2_log_shutdown(struct gfs2_sbd *sdp)
 872{
 873        gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
 874        gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
 875        gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
 876
 877        sdp->sd_log_flush_head = sdp->sd_log_head;
 878        sdp->sd_log_flush_wrapped = 0;
 879
 880        log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
 881
 882        gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
 883        gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
 884
 885        sdp->sd_log_head = sdp->sd_log_flush_head;
 886        sdp->sd_log_tail = sdp->sd_log_head;
 887}
 888
 889static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
 890{
 891        return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
 892}
 893
 894static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
 895{
 896        unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
 897        return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
 898}
 899
 900/**
 901 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
 902 * @sdp: Pointer to GFS2 superblock
 903 *
 904 * Also, periodically check to make sure that we're using the most recent
 905 * journal index.
 906 */
 907
 908int gfs2_logd(void *data)
 909{
 910        struct gfs2_sbd *sdp = data;
 911        unsigned long t = 1;
 912        DEFINE_WAIT(wait);
 913
 914        while (!kthread_should_stop()) {
 915
 916                if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
 917                        gfs2_ail1_empty(sdp);
 918                        gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 919                }
 920
 921                if (gfs2_ail_flush_reqd(sdp)) {
 922                        gfs2_ail1_start(sdp);
 923                        gfs2_ail1_wait(sdp);
 924                        gfs2_ail1_empty(sdp);
 925                        gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
 926                }
 927
 928                if (!gfs2_ail_flush_reqd(sdp))
 929                        wake_up(&sdp->sd_log_waitq);
 930
 931                t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
 932
 933                try_to_freeze();
 934
 935                do {
 936                        prepare_to_wait(&sdp->sd_logd_waitq, &wait,
 937                                        TASK_INTERRUPTIBLE);
 938                        if (!gfs2_ail_flush_reqd(sdp) &&
 939                            !gfs2_jrnl_flush_reqd(sdp) &&
 940                            !kthread_should_stop())
 941                                t = schedule_timeout(t);
 942                } while(t && !gfs2_ail_flush_reqd(sdp) &&
 943                        !gfs2_jrnl_flush_reqd(sdp) &&
 944                        !kthread_should_stop());
 945                finish_wait(&sdp->sd_logd_waitq, &wait);
 946        }
 947
 948        return 0;
 949}
 950
 951