linux/fs/jbd/commit.c
<<
>>
Prefs
   1/*
   2 * linux/fs/jbd/commit.c
   3 *
   4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   5 *
   6 * Copyright 1998 Red Hat corp --- All Rights Reserved
   7 *
   8 * This file is part of the Linux kernel and is made available under
   9 * the terms of the GNU General Public License, version 2, or at your
  10 * option, any later version, incorporated herein by reference.
  11 *
  12 * Journal commit routines for the generic filesystem journaling code;
  13 * part of the ext2fs journaling system.
  14 */
  15
  16#include <linux/time.h>
  17#include <linux/fs.h>
  18#include <linux/jbd.h>
  19#include <linux/errno.h>
  20#include <linux/mm.h>
  21#include <linux/pagemap.h>
  22#include <linux/bio.h>
  23
  24/*
  25 * Default IO end handler for temporary BJ_IO buffer_heads.
  26 */
  27static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  28{
  29        BUFFER_TRACE(bh, "");
  30        if (uptodate)
  31                set_buffer_uptodate(bh);
  32        else
  33                clear_buffer_uptodate(bh);
  34        unlock_buffer(bh);
  35}
  36
  37/*
  38 * When an ext3-ordered file is truncated, it is possible that many pages are
  39 * not successfully freed, because they are attached to a committing transaction.
  40 * After the transaction commits, these pages are left on the LRU, with no
  41 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  42 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  43 * the numbers in /proc/meminfo look odd.
  44 *
  45 * So here, we have a buffer which has just come off the forget list.  Look to
  46 * see if we can strip all buffers from the backing page.
  47 *
  48 * Called under journal->j_list_lock.  The caller provided us with a ref
  49 * against the buffer, and we drop that here.
  50 */
  51static void release_buffer_page(struct buffer_head *bh)
  52{
  53        struct page *page;
  54
  55        if (buffer_dirty(bh))
  56                goto nope;
  57        if (atomic_read(&bh->b_count) != 1)
  58                goto nope;
  59        page = bh->b_page;
  60        if (!page)
  61                goto nope;
  62        if (page->mapping)
  63                goto nope;
  64
  65        /* OK, it's a truncated page */
  66        if (!trylock_page(page))
  67                goto nope;
  68
  69        page_cache_get(page);
  70        __brelse(bh);
  71        try_to_free_buffers(page);
  72        unlock_page(page);
  73        page_cache_release(page);
  74        return;
  75
  76nope:
  77        __brelse(bh);
  78}
  79
  80/*
  81 * Decrement reference counter for data buffer. If it has been marked
  82 * 'BH_Freed', release it and the page to which it belongs if possible.
  83 */
  84static void release_data_buffer(struct buffer_head *bh)
  85{
  86        if (buffer_freed(bh)) {
  87                clear_buffer_freed(bh);
  88                release_buffer_page(bh);
  89        } else
  90                put_bh(bh);
  91}
  92
  93/*
  94 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
  95 * held.  For ranking reasons we must trylock.  If we lose, schedule away and
  96 * return 0.  j_list_lock is dropped in this case.
  97 */
  98static int inverted_lock(journal_t *journal, struct buffer_head *bh)
  99{
 100        if (!jbd_trylock_bh_state(bh)) {
 101                spin_unlock(&journal->j_list_lock);
 102                schedule();
 103                return 0;
 104        }
 105        return 1;
 106}
 107
 108/* Done it all: now write the commit record.  We should have
 109 * cleaned up our previous buffers by now, so if we are in abort
 110 * mode we can now just skip the rest of the journal write
 111 * entirely.
 112 *
 113 * Returns 1 if the journal needs to be aborted or 0 on success
 114 */
 115static int journal_write_commit_record(journal_t *journal,
 116                                        transaction_t *commit_transaction)
 117{
 118        struct journal_head *descriptor;
 119        struct buffer_head *bh;
 120        journal_header_t *header;
 121        int ret;
 122
 123        if (is_journal_aborted(journal))
 124                return 0;
 125
 126        descriptor = journal_get_descriptor_buffer(journal);
 127        if (!descriptor)
 128                return 1;
 129
 130        bh = jh2bh(descriptor);
 131
 132        header = (journal_header_t *)(bh->b_data);
 133        header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
 134        header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
 135        header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
 136
 137        JBUFFER_TRACE(descriptor, "write commit block");
 138        set_buffer_dirty(bh);
 139
 140        if (journal->j_flags & JFS_BARRIER)
 141                ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
 142        else
 143                ret = sync_dirty_buffer(bh);
 144
 145        put_bh(bh);             /* One for getblk() */
 146        journal_put_journal_head(descriptor);
 147
 148        return (ret == -EIO);
 149}
 150
 151static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
 152                                   int write_op)
 153{
 154        int i;
 155
 156        for (i = 0; i < bufs; i++) {
 157                wbuf[i]->b_end_io = end_buffer_write_sync;
 158                /* We use-up our safety reference in submit_bh() */
 159                submit_bh(write_op, wbuf[i]);
 160        }
 161}
 162
 163/*
 164 *  Submit all the data buffers to disk
 165 */
 166static int journal_submit_data_buffers(journal_t *journal,
 167                                       transaction_t *commit_transaction,
 168                                       int write_op)
 169{
 170        struct journal_head *jh;
 171        struct buffer_head *bh;
 172        int locked;
 173        int bufs = 0;
 174        struct buffer_head **wbuf = journal->j_wbuf;
 175        int err = 0;
 176
 177        /*
 178         * Whenever we unlock the journal and sleep, things can get added
 179         * onto ->t_sync_datalist, so we have to keep looping back to
 180         * write_out_data until we *know* that the list is empty.
 181         *
 182         * Cleanup any flushed data buffers from the data list.  Even in
 183         * abort mode, we want to flush this out as soon as possible.
 184         */
 185write_out_data:
 186        cond_resched();
 187        spin_lock(&journal->j_list_lock);
 188
 189        while (commit_transaction->t_sync_datalist) {
 190                jh = commit_transaction->t_sync_datalist;
 191                bh = jh2bh(jh);
 192                locked = 0;
 193
 194                /* Get reference just to make sure buffer does not disappear
 195                 * when we are forced to drop various locks */
 196                get_bh(bh);
 197                /* If the buffer is dirty, we need to submit IO and hence
 198                 * we need the buffer lock. We try to lock the buffer without
 199                 * blocking. If we fail, we need to drop j_list_lock and do
 200                 * blocking lock_buffer().
 201                 */
 202                if (buffer_dirty(bh)) {
 203                        if (!trylock_buffer(bh)) {
 204                                BUFFER_TRACE(bh, "needs blocking lock");
 205                                spin_unlock(&journal->j_list_lock);
 206                                /* Write out all data to prevent deadlocks */
 207                                journal_do_submit_data(wbuf, bufs, write_op);
 208                                bufs = 0;
 209                                lock_buffer(bh);
 210                                spin_lock(&journal->j_list_lock);
 211                        }
 212                        locked = 1;
 213                }
 214                /* We have to get bh_state lock. Again out of order, sigh. */
 215                if (!inverted_lock(journal, bh)) {
 216                        jbd_lock_bh_state(bh);
 217                        spin_lock(&journal->j_list_lock);
 218                }
 219                /* Someone already cleaned up the buffer? */
 220                if (!buffer_jbd(bh) || bh2jh(bh) != jh
 221                        || jh->b_transaction != commit_transaction
 222                        || jh->b_jlist != BJ_SyncData) {
 223                        jbd_unlock_bh_state(bh);
 224                        if (locked)
 225                                unlock_buffer(bh);
 226                        BUFFER_TRACE(bh, "already cleaned up");
 227                        release_data_buffer(bh);
 228                        continue;
 229                }
 230                if (locked && test_clear_buffer_dirty(bh)) {
 231                        BUFFER_TRACE(bh, "needs writeout, adding to array");
 232                        wbuf[bufs++] = bh;
 233                        __journal_file_buffer(jh, commit_transaction,
 234                                                BJ_Locked);
 235                        jbd_unlock_bh_state(bh);
 236                        if (bufs == journal->j_wbufsize) {
 237                                spin_unlock(&journal->j_list_lock);
 238                                journal_do_submit_data(wbuf, bufs, write_op);
 239                                bufs = 0;
 240                                goto write_out_data;
 241                        }
 242                } else if (!locked && buffer_locked(bh)) {
 243                        __journal_file_buffer(jh, commit_transaction,
 244                                                BJ_Locked);
 245                        jbd_unlock_bh_state(bh);
 246                        put_bh(bh);
 247                } else {
 248                        BUFFER_TRACE(bh, "writeout complete: unfile");
 249                        if (unlikely(!buffer_uptodate(bh)))
 250                                err = -EIO;
 251                        __journal_unfile_buffer(jh);
 252                        jbd_unlock_bh_state(bh);
 253                        if (locked)
 254                                unlock_buffer(bh);
 255                        journal_remove_journal_head(bh);
 256                        /* One for our safety reference, other for
 257                         * journal_remove_journal_head() */
 258                        put_bh(bh);
 259                        release_data_buffer(bh);
 260                }
 261
 262                if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
 263                        spin_unlock(&journal->j_list_lock);
 264                        goto write_out_data;
 265                }
 266        }
 267        spin_unlock(&journal->j_list_lock);
 268        journal_do_submit_data(wbuf, bufs, write_op);
 269
 270        return err;
 271}
 272
 273/*
 274 * journal_commit_transaction
 275 *
 276 * The primary function for committing a transaction to the log.  This
 277 * function is called by the journal thread to begin a complete commit.
 278 */
 279void journal_commit_transaction(journal_t *journal)
 280{
 281        transaction_t *commit_transaction;
 282        struct journal_head *jh, *new_jh, *descriptor;
 283        struct buffer_head **wbuf = journal->j_wbuf;
 284        int bufs;
 285        int flags;
 286        int err;
 287        unsigned int blocknr;
 288        ktime_t start_time;
 289        u64 commit_time;
 290        char *tagp = NULL;
 291        journal_header_t *header;
 292        journal_block_tag_t *tag = NULL;
 293        int space_left = 0;
 294        int first_tag = 0;
 295        int tag_flag;
 296        int i;
 297        int write_op = WRITE_SYNC;
 298
 299        /*
 300         * First job: lock down the current transaction and wait for
 301         * all outstanding updates to complete.
 302         */
 303
 304#ifdef COMMIT_STATS
 305        spin_lock(&journal->j_list_lock);
 306        summarise_journal_usage(journal);
 307        spin_unlock(&journal->j_list_lock);
 308#endif
 309
 310        /* Do we need to erase the effects of a prior journal_flush? */
 311        if (journal->j_flags & JFS_FLUSHED) {
 312                jbd_debug(3, "super block updated\n");
 313                journal_update_superblock(journal, 1);
 314        } else {
 315                jbd_debug(3, "superblock not updated\n");
 316        }
 317
 318        J_ASSERT(journal->j_running_transaction != NULL);
 319        J_ASSERT(journal->j_committing_transaction == NULL);
 320
 321        commit_transaction = journal->j_running_transaction;
 322        J_ASSERT(commit_transaction->t_state == T_RUNNING);
 323
 324        jbd_debug(1, "JBD: starting commit of transaction %d\n",
 325                        commit_transaction->t_tid);
 326
 327        spin_lock(&journal->j_state_lock);
 328        commit_transaction->t_state = T_LOCKED;
 329
 330        /*
 331         * Use plugged writes here, since we want to submit several before
 332         * we unplug the device. We don't do explicit unplugging in here,
 333         * instead we rely on sync_buffer() doing the unplug for us.
 334         */
 335        if (commit_transaction->t_synchronous_commit)
 336                write_op = WRITE_SYNC_PLUG;
 337        spin_lock(&commit_transaction->t_handle_lock);
 338        while (commit_transaction->t_updates) {
 339                DEFINE_WAIT(wait);
 340
 341                prepare_to_wait(&journal->j_wait_updates, &wait,
 342                                        TASK_UNINTERRUPTIBLE);
 343                if (commit_transaction->t_updates) {
 344                        spin_unlock(&commit_transaction->t_handle_lock);
 345                        spin_unlock(&journal->j_state_lock);
 346                        schedule();
 347                        spin_lock(&journal->j_state_lock);
 348                        spin_lock(&commit_transaction->t_handle_lock);
 349                }
 350                finish_wait(&journal->j_wait_updates, &wait);
 351        }
 352        spin_unlock(&commit_transaction->t_handle_lock);
 353
 354        J_ASSERT (commit_transaction->t_outstanding_credits <=
 355                        journal->j_max_transaction_buffers);
 356
 357        /*
 358         * First thing we are allowed to do is to discard any remaining
 359         * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 360         * that there are no such buffers: if a large filesystem
 361         * operation like a truncate needs to split itself over multiple
 362         * transactions, then it may try to do a journal_restart() while
 363         * there are still BJ_Reserved buffers outstanding.  These must
 364         * be released cleanly from the current transaction.
 365         *
 366         * In this case, the filesystem must still reserve write access
 367         * again before modifying the buffer in the new transaction, but
 368         * we do not require it to remember exactly which old buffers it
 369         * has reserved.  This is consistent with the existing behaviour
 370         * that multiple journal_get_write_access() calls to the same
 371         * buffer are perfectly permissable.
 372         */
 373        while (commit_transaction->t_reserved_list) {
 374                jh = commit_transaction->t_reserved_list;
 375                JBUFFER_TRACE(jh, "reserved, unused: refile");
 376                /*
 377                 * A journal_get_undo_access()+journal_release_buffer() may
 378                 * leave undo-committed data.
 379                 */
 380                if (jh->b_committed_data) {
 381                        struct buffer_head *bh = jh2bh(jh);
 382
 383                        jbd_lock_bh_state(bh);
 384                        jbd_free(jh->b_committed_data, bh->b_size);
 385                        jh->b_committed_data = NULL;
 386                        jbd_unlock_bh_state(bh);
 387                }
 388                journal_refile_buffer(journal, jh);
 389        }
 390
 391        /*
 392         * Now try to drop any written-back buffers from the journal's
 393         * checkpoint lists.  We do this *before* commit because it potentially
 394         * frees some memory
 395         */
 396        spin_lock(&journal->j_list_lock);
 397        __journal_clean_checkpoint_list(journal);
 398        spin_unlock(&journal->j_list_lock);
 399
 400        jbd_debug (3, "JBD: commit phase 1\n");
 401
 402        /*
 403         * Switch to a new revoke table.
 404         */
 405        journal_switch_revoke_table(journal);
 406
 407        commit_transaction->t_state = T_FLUSH;
 408        journal->j_committing_transaction = commit_transaction;
 409        journal->j_running_transaction = NULL;
 410        start_time = ktime_get();
 411        commit_transaction->t_log_start = journal->j_head;
 412        wake_up(&journal->j_wait_transaction_locked);
 413        spin_unlock(&journal->j_state_lock);
 414
 415        jbd_debug (3, "JBD: commit phase 2\n");
 416
 417        /*
 418         * Now start flushing things to disk, in the order they appear
 419         * on the transaction lists.  Data blocks go first.
 420         */
 421        err = journal_submit_data_buffers(journal, commit_transaction,
 422                                          write_op);
 423
 424        /*
 425         * Wait for all previously submitted IO to complete.
 426         */
 427        spin_lock(&journal->j_list_lock);
 428        while (commit_transaction->t_locked_list) {
 429                struct buffer_head *bh;
 430
 431                jh = commit_transaction->t_locked_list->b_tprev;
 432                bh = jh2bh(jh);
 433                get_bh(bh);
 434                if (buffer_locked(bh)) {
 435                        spin_unlock(&journal->j_list_lock);
 436                        wait_on_buffer(bh);
 437                        spin_lock(&journal->j_list_lock);
 438                }
 439                if (unlikely(!buffer_uptodate(bh))) {
 440                        if (!trylock_page(bh->b_page)) {
 441                                spin_unlock(&journal->j_list_lock);
 442                                lock_page(bh->b_page);
 443                                spin_lock(&journal->j_list_lock);
 444                        }
 445                        if (bh->b_page->mapping)
 446                                set_bit(AS_EIO, &bh->b_page->mapping->flags);
 447
 448                        unlock_page(bh->b_page);
 449                        SetPageError(bh->b_page);
 450                        err = -EIO;
 451                }
 452                if (!inverted_lock(journal, bh)) {
 453                        put_bh(bh);
 454                        spin_lock(&journal->j_list_lock);
 455                        continue;
 456                }
 457                if (buffer_jbd(bh) && bh2jh(bh) == jh &&
 458                    jh->b_transaction == commit_transaction &&
 459                    jh->b_jlist == BJ_Locked) {
 460                        __journal_unfile_buffer(jh);
 461                        jbd_unlock_bh_state(bh);
 462                        journal_remove_journal_head(bh);
 463                        put_bh(bh);
 464                } else {
 465                        jbd_unlock_bh_state(bh);
 466                }
 467                release_data_buffer(bh);
 468                cond_resched_lock(&journal->j_list_lock);
 469        }
 470        spin_unlock(&journal->j_list_lock);
 471
 472        if (err) {
 473                char b[BDEVNAME_SIZE];
 474
 475                printk(KERN_WARNING
 476                        "JBD: Detected IO errors while flushing file data "
 477                        "on %s\n", bdevname(journal->j_fs_dev, b));
 478                if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
 479                        journal_abort(journal, err);
 480                err = 0;
 481        }
 482
 483        journal_write_revoke_records(journal, commit_transaction, write_op);
 484
 485        /*
 486         * If we found any dirty or locked buffers, then we should have
 487         * looped back up to the write_out_data label.  If there weren't
 488         * any then journal_clean_data_list should have wiped the list
 489         * clean by now, so check that it is in fact empty.
 490         */
 491        J_ASSERT (commit_transaction->t_sync_datalist == NULL);
 492
 493        jbd_debug (3, "JBD: commit phase 3\n");
 494
 495        /*
 496         * Way to go: we have now written out all of the data for a
 497         * transaction!  Now comes the tricky part: we need to write out
 498         * metadata.  Loop over the transaction's entire buffer list:
 499         */
 500        spin_lock(&journal->j_state_lock);
 501        commit_transaction->t_state = T_COMMIT;
 502        spin_unlock(&journal->j_state_lock);
 503
 504        J_ASSERT(commit_transaction->t_nr_buffers <=
 505                 commit_transaction->t_outstanding_credits);
 506
 507        descriptor = NULL;
 508        bufs = 0;
 509        while (commit_transaction->t_buffers) {
 510
 511                /* Find the next buffer to be journaled... */
 512
 513                jh = commit_transaction->t_buffers;
 514
 515                /* If we're in abort mode, we just un-journal the buffer and
 516                   release it. */
 517
 518                if (is_journal_aborted(journal)) {
 519                        clear_buffer_jbddirty(jh2bh(jh));
 520                        JBUFFER_TRACE(jh, "journal is aborting: refile");
 521                        journal_refile_buffer(journal, jh);
 522                        /* If that was the last one, we need to clean up
 523                         * any descriptor buffers which may have been
 524                         * already allocated, even if we are now
 525                         * aborting. */
 526                        if (!commit_transaction->t_buffers)
 527                                goto start_journal_io;
 528                        continue;
 529                }
 530
 531                /* Make sure we have a descriptor block in which to
 532                   record the metadata buffer. */
 533
 534                if (!descriptor) {
 535                        struct buffer_head *bh;
 536
 537                        J_ASSERT (bufs == 0);
 538
 539                        jbd_debug(4, "JBD: get descriptor\n");
 540
 541                        descriptor = journal_get_descriptor_buffer(journal);
 542                        if (!descriptor) {
 543                                journal_abort(journal, -EIO);
 544                                continue;
 545                        }
 546
 547                        bh = jh2bh(descriptor);
 548                        jbd_debug(4, "JBD: got buffer %llu (%p)\n",
 549                                (unsigned long long)bh->b_blocknr, bh->b_data);
 550                        header = (journal_header_t *)&bh->b_data[0];
 551                        header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
 552                        header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
 553                        header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 554
 555                        tagp = &bh->b_data[sizeof(journal_header_t)];
 556                        space_left = bh->b_size - sizeof(journal_header_t);
 557                        first_tag = 1;
 558                        set_buffer_jwrite(bh);
 559                        set_buffer_dirty(bh);
 560                        wbuf[bufs++] = bh;
 561
 562                        /* Record it so that we can wait for IO
 563                           completion later */
 564                        BUFFER_TRACE(bh, "ph3: file as descriptor");
 565                        journal_file_buffer(descriptor, commit_transaction,
 566                                        BJ_LogCtl);
 567                }
 568
 569                /* Where is the buffer to be written? */
 570
 571                err = journal_next_log_block(journal, &blocknr);
 572                /* If the block mapping failed, just abandon the buffer
 573                   and repeat this loop: we'll fall into the
 574                   refile-on-abort condition above. */
 575                if (err) {
 576                        journal_abort(journal, err);
 577                        continue;
 578                }
 579
 580                /*
 581                 * start_this_handle() uses t_outstanding_credits to determine
 582                 * the free space in the log, but this counter is changed
 583                 * by journal_next_log_block() also.
 584                 */
 585                commit_transaction->t_outstanding_credits--;
 586
 587                /* Bump b_count to prevent truncate from stumbling over
 588                   the shadowed buffer!  @@@ This can go if we ever get
 589                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
 590                get_bh(jh2bh(jh));
 591
 592                /* Make a temporary IO buffer with which to write it out
 593                   (this will requeue both the metadata buffer and the
 594                   temporary IO buffer). new_bh goes on BJ_IO*/
 595
 596                set_buffer_jwrite(jh2bh(jh));
 597                /*
 598                 * akpm: journal_write_metadata_buffer() sets
 599                 * new_bh->b_transaction to commit_transaction.
 600                 * We need to clean this up before we release new_bh
 601                 * (which is of type BJ_IO)
 602                 */
 603                JBUFFER_TRACE(jh, "ph3: write metadata");
 604                flags = journal_write_metadata_buffer(commit_transaction,
 605                                                      jh, &new_jh, blocknr);
 606                set_buffer_jwrite(jh2bh(new_jh));
 607                wbuf[bufs++] = jh2bh(new_jh);
 608
 609                /* Record the new block's tag in the current descriptor
 610                   buffer */
 611
 612                tag_flag = 0;
 613                if (flags & 1)
 614                        tag_flag |= JFS_FLAG_ESCAPE;
 615                if (!first_tag)
 616                        tag_flag |= JFS_FLAG_SAME_UUID;
 617
 618                tag = (journal_block_tag_t *) tagp;
 619                tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
 620                tag->t_flags = cpu_to_be32(tag_flag);
 621                tagp += sizeof(journal_block_tag_t);
 622                space_left -= sizeof(journal_block_tag_t);
 623
 624                if (first_tag) {
 625                        memcpy (tagp, journal->j_uuid, 16);
 626                        tagp += 16;
 627                        space_left -= 16;
 628                        first_tag = 0;
 629                }
 630
 631                /* If there's no more to do, or if the descriptor is full,
 632                   let the IO rip! */
 633
 634                if (bufs == journal->j_wbufsize ||
 635                    commit_transaction->t_buffers == NULL ||
 636                    space_left < sizeof(journal_block_tag_t) + 16) {
 637
 638                        jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
 639
 640                        /* Write an end-of-descriptor marker before
 641                           submitting the IOs.  "tag" still points to
 642                           the last tag we set up. */
 643
 644                        tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
 645
 646start_journal_io:
 647                        for (i = 0; i < bufs; i++) {
 648                                struct buffer_head *bh = wbuf[i];
 649                                lock_buffer(bh);
 650                                clear_buffer_dirty(bh);
 651                                set_buffer_uptodate(bh);
 652                                bh->b_end_io = journal_end_buffer_io_sync;
 653                                submit_bh(write_op, bh);
 654                        }
 655                        cond_resched();
 656
 657                        /* Force a new descriptor to be generated next
 658                           time round the loop. */
 659                        descriptor = NULL;
 660                        bufs = 0;
 661                }
 662        }
 663
 664        /* Lo and behold: we have just managed to send a transaction to
 665           the log.  Before we can commit it, wait for the IO so far to
 666           complete.  Control buffers being written are on the
 667           transaction's t_log_list queue, and metadata buffers are on
 668           the t_iobuf_list queue.
 669
 670           Wait for the buffers in reverse order.  That way we are
 671           less likely to be woken up until all IOs have completed, and
 672           so we incur less scheduling load.
 673        */
 674
 675        jbd_debug(3, "JBD: commit phase 4\n");
 676
 677        /*
 678         * akpm: these are BJ_IO, and j_list_lock is not needed.
 679         * See __journal_try_to_free_buffer.
 680         */
 681wait_for_iobuf:
 682        while (commit_transaction->t_iobuf_list != NULL) {
 683                struct buffer_head *bh;
 684
 685                jh = commit_transaction->t_iobuf_list->b_tprev;
 686                bh = jh2bh(jh);
 687                if (buffer_locked(bh)) {
 688                        wait_on_buffer(bh);
 689                        goto wait_for_iobuf;
 690                }
 691                if (cond_resched())
 692                        goto wait_for_iobuf;
 693
 694                if (unlikely(!buffer_uptodate(bh)))
 695                        err = -EIO;
 696
 697                clear_buffer_jwrite(bh);
 698
 699                JBUFFER_TRACE(jh, "ph4: unfile after journal write");
 700                journal_unfile_buffer(journal, jh);
 701
 702                /*
 703                 * ->t_iobuf_list should contain only dummy buffer_heads
 704                 * which were created by journal_write_metadata_buffer().
 705                 */
 706                BUFFER_TRACE(bh, "dumping temporary bh");
 707                journal_put_journal_head(jh);
 708                __brelse(bh);
 709                J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 710                free_buffer_head(bh);
 711
 712                /* We also have to unlock and free the corresponding
 713                   shadowed buffer */
 714                jh = commit_transaction->t_shadow_list->b_tprev;
 715                bh = jh2bh(jh);
 716                clear_buffer_jwrite(bh);
 717                J_ASSERT_BH(bh, buffer_jbddirty(bh));
 718
 719                /* The metadata is now released for reuse, but we need
 720                   to remember it against this transaction so that when
 721                   we finally commit, we can do any checkpointing
 722                   required. */
 723                JBUFFER_TRACE(jh, "file as BJ_Forget");
 724                journal_file_buffer(jh, commit_transaction, BJ_Forget);
 725                /* Wake up any transactions which were waiting for this
 726                   IO to complete */
 727                wake_up_bit(&bh->b_state, BH_Unshadow);
 728                JBUFFER_TRACE(jh, "brelse shadowed buffer");
 729                __brelse(bh);
 730        }
 731
 732        J_ASSERT (commit_transaction->t_shadow_list == NULL);
 733
 734        jbd_debug(3, "JBD: commit phase 5\n");
 735
 736        /* Here we wait for the revoke record and descriptor record buffers */
 737 wait_for_ctlbuf:
 738        while (commit_transaction->t_log_list != NULL) {
 739                struct buffer_head *bh;
 740
 741                jh = commit_transaction->t_log_list->b_tprev;
 742                bh = jh2bh(jh);
 743                if (buffer_locked(bh)) {
 744                        wait_on_buffer(bh);
 745                        goto wait_for_ctlbuf;
 746                }
 747                if (cond_resched())
 748                        goto wait_for_ctlbuf;
 749
 750                if (unlikely(!buffer_uptodate(bh)))
 751                        err = -EIO;
 752
 753                BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 754                clear_buffer_jwrite(bh);
 755                journal_unfile_buffer(journal, jh);
 756                journal_put_journal_head(jh);
 757                __brelse(bh);           /* One for getblk */
 758                /* AKPM: bforget here */
 759        }
 760
 761        if (err)
 762                journal_abort(journal, err);
 763
 764        jbd_debug(3, "JBD: commit phase 6\n");
 765
 766        /* All metadata is written, now write commit record and do cleanup */
 767        spin_lock(&journal->j_state_lock);
 768        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 769        commit_transaction->t_state = T_COMMIT_RECORD;
 770        spin_unlock(&journal->j_state_lock);
 771
 772        if (journal_write_commit_record(journal, commit_transaction))
 773                err = -EIO;
 774
 775        if (err)
 776                journal_abort(journal, err);
 777
 778        /* End of a transaction!  Finally, we can do checkpoint
 779           processing: any buffers committed as a result of this
 780           transaction can be removed from any checkpoint list it was on
 781           before. */
 782
 783        jbd_debug(3, "JBD: commit phase 7\n");
 784
 785        J_ASSERT(commit_transaction->t_sync_datalist == NULL);
 786        J_ASSERT(commit_transaction->t_buffers == NULL);
 787        J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 788        J_ASSERT(commit_transaction->t_iobuf_list == NULL);
 789        J_ASSERT(commit_transaction->t_shadow_list == NULL);
 790        J_ASSERT(commit_transaction->t_log_list == NULL);
 791
 792restart_loop:
 793        /*
 794         * As there are other places (journal_unmap_buffer()) adding buffers
 795         * to this list we have to be careful and hold the j_list_lock.
 796         */
 797        spin_lock(&journal->j_list_lock);
 798        while (commit_transaction->t_forget) {
 799                transaction_t *cp_transaction;
 800                struct buffer_head *bh;
 801
 802                jh = commit_transaction->t_forget;
 803                spin_unlock(&journal->j_list_lock);
 804                bh = jh2bh(jh);
 805                jbd_lock_bh_state(bh);
 806                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
 807                        jh->b_transaction == journal->j_running_transaction);
 808
 809                /*
 810                 * If there is undo-protected committed data against
 811                 * this buffer, then we can remove it now.  If it is a
 812                 * buffer needing such protection, the old frozen_data
 813                 * field now points to a committed version of the
 814                 * buffer, so rotate that field to the new committed
 815                 * data.
 816                 *
 817                 * Otherwise, we can just throw away the frozen data now.
 818                 */
 819                if (jh->b_committed_data) {
 820                        jbd_free(jh->b_committed_data, bh->b_size);
 821                        jh->b_committed_data = NULL;
 822                        if (jh->b_frozen_data) {
 823                                jh->b_committed_data = jh->b_frozen_data;
 824                                jh->b_frozen_data = NULL;
 825                        }
 826                } else if (jh->b_frozen_data) {
 827                        jbd_free(jh->b_frozen_data, bh->b_size);
 828                        jh->b_frozen_data = NULL;
 829                }
 830
 831                spin_lock(&journal->j_list_lock);
 832                cp_transaction = jh->b_cp_transaction;
 833                if (cp_transaction) {
 834                        JBUFFER_TRACE(jh, "remove from old cp transaction");
 835                        __journal_remove_checkpoint(jh);
 836                }
 837
 838                /* Only re-checkpoint the buffer_head if it is marked
 839                 * dirty.  If the buffer was added to the BJ_Forget list
 840                 * by journal_forget, it may no longer be dirty and
 841                 * there's no point in keeping a checkpoint record for
 842                 * it. */
 843
 844                /* A buffer which has been freed while still being
 845                 * journaled by a previous transaction may end up still
 846                 * being dirty here, but we want to avoid writing back
 847                 * that buffer in the future after the "add to orphan"
 848                 * operation been committed,  That's not only a performance
 849                 * gain, it also stops aliasing problems if the buffer is
 850                 * left behind for writeback and gets reallocated for another
 851                 * use in a different page. */
 852                if (buffer_freed(bh) && !jh->b_next_transaction) {
 853                        clear_buffer_freed(bh);
 854                        clear_buffer_jbddirty(bh);
 855                }
 856
 857                if (buffer_jbddirty(bh)) {
 858                        JBUFFER_TRACE(jh, "add to new checkpointing trans");
 859                        __journal_insert_checkpoint(jh, commit_transaction);
 860                        if (is_journal_aborted(journal))
 861                                clear_buffer_jbddirty(bh);
 862                        JBUFFER_TRACE(jh, "refile for checkpoint writeback");
 863                        __journal_refile_buffer(jh);
 864                        jbd_unlock_bh_state(bh);
 865                } else {
 866                        J_ASSERT_BH(bh, !buffer_dirty(bh));
 867                        /* The buffer on BJ_Forget list and not jbddirty means
 868                         * it has been freed by this transaction and hence it
 869                         * could not have been reallocated until this
 870                         * transaction has committed. *BUT* it could be
 871                         * reallocated once we have written all the data to
 872                         * disk and before we process the buffer on BJ_Forget
 873                         * list. */
 874                        JBUFFER_TRACE(jh, "refile or unfile freed buffer");
 875                        __journal_refile_buffer(jh);
 876                        if (!jh->b_transaction) {
 877                                jbd_unlock_bh_state(bh);
 878                                 /* needs a brelse */
 879                                journal_remove_journal_head(bh);
 880                                release_buffer_page(bh);
 881                        } else
 882                                jbd_unlock_bh_state(bh);
 883                }
 884                cond_resched_lock(&journal->j_list_lock);
 885        }
 886        spin_unlock(&journal->j_list_lock);
 887        /*
 888         * This is a bit sleazy.  We use j_list_lock to protect transition
 889         * of a transaction into T_FINISHED state and calling
 890         * __journal_drop_transaction(). Otherwise we could race with
 891         * other checkpointing code processing the transaction...
 892         */
 893        spin_lock(&journal->j_state_lock);
 894        spin_lock(&journal->j_list_lock);
 895        /*
 896         * Now recheck if some buffers did not get attached to the transaction
 897         * while the lock was dropped...
 898         */
 899        if (commit_transaction->t_forget) {
 900                spin_unlock(&journal->j_list_lock);
 901                spin_unlock(&journal->j_state_lock);
 902                goto restart_loop;
 903        }
 904
 905        /* Done with this transaction! */
 906
 907        jbd_debug(3, "JBD: commit phase 8\n");
 908
 909        J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
 910
 911        commit_transaction->t_state = T_FINISHED;
 912        J_ASSERT(commit_transaction == journal->j_committing_transaction);
 913        journal->j_commit_sequence = commit_transaction->t_tid;
 914        journal->j_committing_transaction = NULL;
 915        commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
 916
 917        /*
 918         * weight the commit time higher than the average time so we don't
 919         * react too strongly to vast changes in commit time
 920         */
 921        if (likely(journal->j_average_commit_time))
 922                journal->j_average_commit_time = (commit_time*3 +
 923                                journal->j_average_commit_time) / 4;
 924        else
 925                journal->j_average_commit_time = commit_time;
 926
 927        spin_unlock(&journal->j_state_lock);
 928
 929        if (commit_transaction->t_checkpoint_list == NULL &&
 930            commit_transaction->t_checkpoint_io_list == NULL) {
 931                __journal_drop_transaction(journal, commit_transaction);
 932        } else {
 933                if (journal->j_checkpoint_transactions == NULL) {
 934                        journal->j_checkpoint_transactions = commit_transaction;
 935                        commit_transaction->t_cpnext = commit_transaction;
 936                        commit_transaction->t_cpprev = commit_transaction;
 937                } else {
 938                        commit_transaction->t_cpnext =
 939                                journal->j_checkpoint_transactions;
 940                        commit_transaction->t_cpprev =
 941                                commit_transaction->t_cpnext->t_cpprev;
 942                        commit_transaction->t_cpnext->t_cpprev =
 943                                commit_transaction;
 944                        commit_transaction->t_cpprev->t_cpnext =
 945                                commit_transaction;
 946                }
 947        }
 948        spin_unlock(&journal->j_list_lock);
 949
 950        jbd_debug(1, "JBD: commit %d complete, head %d\n",
 951                  journal->j_commit_sequence, journal->j_tail_sequence);
 952
 953        wake_up(&journal->j_wait_done_commit);
 954}
 955