linux/fs/jbd2/commit.c
<<
>>
Prefs
   1/*
   2 * linux/fs/jbd2/commit.c
   3 *
   4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
   5 *
   6 * Copyright 1998 Red Hat corp --- All Rights Reserved
   7 *
   8 * This file is part of the Linux kernel and is made available under
   9 * the terms of the GNU General Public License, version 2, or at your
  10 * option, any later version, incorporated herein by reference.
  11 *
  12 * Journal commit routines for the generic filesystem journaling code;
  13 * part of the ext2fs journaling system.
  14 */
  15
  16#include <linux/time.h>
  17#include <linux/fs.h>
  18#include <linux/jbd2.h>
  19#include <linux/errno.h>
  20#include <linux/slab.h>
  21#include <linux/mm.h>
  22#include <linux/pagemap.h>
  23#include <linux/jiffies.h>
  24#include <linux/crc32.h>
  25#include <linux/writeback.h>
  26#include <linux/backing-dev.h>
  27#include <linux/bio.h>
  28#include <linux/blkdev.h>
  29#include <linux/bitops.h>
  30#include <trace/events/jbd2.h>
  31#include <asm/system.h>
  32
  33/*
  34 * Default IO end handler for temporary BJ_IO buffer_heads.
  35 */
  36static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  37{
  38        BUFFER_TRACE(bh, "");
  39        if (uptodate)
  40                set_buffer_uptodate(bh);
  41        else
  42                clear_buffer_uptodate(bh);
  43        unlock_buffer(bh);
  44}
  45
  46/*
  47 * When an ext4 file is truncated, it is possible that some pages are not
  48 * successfully freed, because they are attached to a committing transaction.
  49 * After the transaction commits, these pages are left on the LRU, with no
  50 * ->mapping, and with attached buffers.  These pages are trivially reclaimable
  51 * by the VM, but their apparent absence upsets the VM accounting, and it makes
  52 * the numbers in /proc/meminfo look odd.
  53 *
  54 * So here, we have a buffer which has just come off the forget list.  Look to
  55 * see if we can strip all buffers from the backing page.
  56 *
  57 * Called under lock_journal(), and possibly under journal_datalist_lock.  The
  58 * caller provided us with a ref against the buffer, and we drop that here.
  59 */
  60static void release_buffer_page(struct buffer_head *bh)
  61{
  62        struct page *page;
  63
  64        if (buffer_dirty(bh))
  65                goto nope;
  66        if (atomic_read(&bh->b_count) != 1)
  67                goto nope;
  68        page = bh->b_page;
  69        if (!page)
  70                goto nope;
  71        if (page->mapping)
  72                goto nope;
  73
  74        /* OK, it's a truncated page */
  75        if (!trylock_page(page))
  76                goto nope;
  77
  78        page_cache_get(page);
  79        __brelse(bh);
  80        try_to_free_buffers(page);
  81        unlock_page(page);
  82        page_cache_release(page);
  83        return;
  84
  85nope:
  86        __brelse(bh);
  87}
  88
  89/*
  90 * Done it all: now submit the commit record.  We should have
  91 * cleaned up our previous buffers by now, so if we are in abort
  92 * mode we can now just skip the rest of the journal write
  93 * entirely.
  94 *
  95 * Returns 1 if the journal needs to be aborted or 0 on success
  96 */
  97static int journal_submit_commit_record(journal_t *journal,
  98                                        transaction_t *commit_transaction,
  99                                        struct buffer_head **cbh,
 100                                        __u32 crc32_sum)
 101{
 102        struct journal_head *descriptor;
 103        struct commit_header *tmp;
 104        struct buffer_head *bh;
 105        int ret;
 106        struct timespec now = current_kernel_time();
 107
 108        *cbh = NULL;
 109
 110        if (is_journal_aborted(journal))
 111                return 0;
 112
 113        descriptor = jbd2_journal_get_descriptor_buffer(journal);
 114        if (!descriptor)
 115                return 1;
 116
 117        bh = jh2bh(descriptor);
 118
 119        tmp = (struct commit_header *)bh->b_data;
 120        tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
 121        tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
 122        tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
 123        tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
 124        tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
 125
 126        if (JBD2_HAS_COMPAT_FEATURE(journal,
 127                                    JBD2_FEATURE_COMPAT_CHECKSUM)) {
 128                tmp->h_chksum_type      = JBD2_CRC32_CHKSUM;
 129                tmp->h_chksum_size      = JBD2_CRC32_CHKSUM_SIZE;
 130                tmp->h_chksum[0]        = cpu_to_be32(crc32_sum);
 131        }
 132
 133        JBUFFER_TRACE(descriptor, "submit commit block");
 134        lock_buffer(bh);
 135        clear_buffer_dirty(bh);
 136        set_buffer_uptodate(bh);
 137        bh->b_end_io = journal_end_buffer_io_sync;
 138
 139        if (journal->j_flags & JBD2_BARRIER &&
 140            !JBD2_HAS_INCOMPAT_FEATURE(journal,
 141                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
 142                ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
 143        else
 144                ret = submit_bh(WRITE_SYNC, bh);
 145
 146        *cbh = bh;
 147        return ret;
 148}
 149
 150/*
 151 * This function along with journal_submit_commit_record
 152 * allows to write the commit record asynchronously.
 153 */
 154static int journal_wait_on_commit_record(journal_t *journal,
 155                                         struct buffer_head *bh)
 156{
 157        int ret = 0;
 158
 159        clear_buffer_dirty(bh);
 160        wait_on_buffer(bh);
 161
 162        if (unlikely(!buffer_uptodate(bh)))
 163                ret = -EIO;
 164        put_bh(bh);            /* One for getblk() */
 165        jbd2_journal_put_journal_head(bh2jh(bh));
 166
 167        return ret;
 168}
 169
 170/*
 171 * write the filemap data using writepage() address_space_operations.
 172 * We don't do block allocation here even for delalloc. We don't
 173 * use writepages() because with dealyed allocation we may be doing
 174 * block allocation in writepages().
 175 */
 176static int journal_submit_inode_data_buffers(struct address_space *mapping)
 177{
 178        int ret;
 179        struct writeback_control wbc = {
 180                .sync_mode =  WB_SYNC_ALL,
 181                .nr_to_write = mapping->nrpages * 2,
 182                .range_start = 0,
 183                .range_end = i_size_read(mapping->host),
 184        };
 185
 186        ret = generic_writepages(mapping, &wbc);
 187        return ret;
 188}
 189
 190/*
 191 * Submit all the data buffers of inode associated with the transaction to
 192 * disk.
 193 *
 194 * We are in a committing transaction. Therefore no new inode can be added to
 195 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
 196 * operate on from being released while we write out pages.
 197 */
 198static int journal_submit_data_buffers(journal_t *journal,
 199                transaction_t *commit_transaction)
 200{
 201        struct jbd2_inode *jinode;
 202        int err, ret = 0;
 203        struct address_space *mapping;
 204
 205        spin_lock(&journal->j_list_lock);
 206        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 207                mapping = jinode->i_vfs_inode->i_mapping;
 208                set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 209                spin_unlock(&journal->j_list_lock);
 210                /*
 211                 * submit the inode data buffers. We use writepage
 212                 * instead of writepages. Because writepages can do
 213                 * block allocation  with delalloc. We need to write
 214                 * only allocated blocks here.
 215                 */
 216                trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
 217                err = journal_submit_inode_data_buffers(mapping);
 218                if (!ret)
 219                        ret = err;
 220                spin_lock(&journal->j_list_lock);
 221                J_ASSERT(jinode->i_transaction == commit_transaction);
 222                clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 223                smp_mb__after_clear_bit();
 224                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 225        }
 226        spin_unlock(&journal->j_list_lock);
 227        return ret;
 228}
 229
 230/*
 231 * Wait for data submitted for writeout, refile inodes to proper
 232 * transaction if needed.
 233 *
 234 */
 235static int journal_finish_inode_data_buffers(journal_t *journal,
 236                transaction_t *commit_transaction)
 237{
 238        struct jbd2_inode *jinode, *next_i;
 239        int err, ret = 0;
 240
 241        /* For locking, see the comment in journal_submit_data_buffers() */
 242        spin_lock(&journal->j_list_lock);
 243        list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
 244                set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 245                spin_unlock(&journal->j_list_lock);
 246                err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
 247                if (err) {
 248                        /*
 249                         * Because AS_EIO is cleared by
 250                         * filemap_fdatawait_range(), set it again so
 251                         * that user process can get -EIO from fsync().
 252                         */
 253                        set_bit(AS_EIO,
 254                                &jinode->i_vfs_inode->i_mapping->flags);
 255
 256                        if (!ret)
 257                                ret = err;
 258                }
 259                spin_lock(&journal->j_list_lock);
 260                clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
 261                smp_mb__after_clear_bit();
 262                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
 263        }
 264
 265        /* Now refile inode to proper lists */
 266        list_for_each_entry_safe(jinode, next_i,
 267                                 &commit_transaction->t_inode_list, i_list) {
 268                list_del(&jinode->i_list);
 269                if (jinode->i_next_transaction) {
 270                        jinode->i_transaction = jinode->i_next_transaction;
 271                        jinode->i_next_transaction = NULL;
 272                        list_add(&jinode->i_list,
 273                                &jinode->i_transaction->t_inode_list);
 274                } else {
 275                        jinode->i_transaction = NULL;
 276                }
 277        }
 278        spin_unlock(&journal->j_list_lock);
 279
 280        return ret;
 281}
 282
 283static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
 284{
 285        struct page *page = bh->b_page;
 286        char *addr;
 287        __u32 checksum;
 288
 289        addr = kmap_atomic(page, KM_USER0);
 290        checksum = crc32_be(crc32_sum,
 291                (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
 292        kunmap_atomic(addr, KM_USER0);
 293
 294        return checksum;
 295}
 296
 297static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
 298                                   unsigned long long block)
 299{
 300        tag->t_blocknr = cpu_to_be32(block & (u32)~0);
 301        if (tag_bytes > JBD2_TAG_SIZE32)
 302                tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
 303}
 304
 305/*
 306 * jbd2_journal_commit_transaction
 307 *
 308 * The primary function for committing a transaction to the log.  This
 309 * function is called by the journal thread to begin a complete commit.
 310 */
 311void jbd2_journal_commit_transaction(journal_t *journal)
 312{
 313        struct transaction_stats_s stats;
 314        transaction_t *commit_transaction;
 315        struct journal_head *jh, *new_jh, *descriptor;
 316        struct buffer_head **wbuf = journal->j_wbuf;
 317        int bufs;
 318        int flags;
 319        int err;
 320        unsigned long long blocknr;
 321        ktime_t start_time;
 322        u64 commit_time;
 323        char *tagp = NULL;
 324        journal_header_t *header;
 325        journal_block_tag_t *tag = NULL;
 326        int space_left = 0;
 327        int first_tag = 0;
 328        int tag_flag;
 329        int i, to_free = 0;
 330        int tag_bytes = journal_tag_bytes(journal);
 331        struct buffer_head *cbh = NULL; /* For transactional checksums */
 332        __u32 crc32_sum = ~0;
 333        struct blk_plug plug;
 334
 335        /*
 336         * First job: lock down the current transaction and wait for
 337         * all outstanding updates to complete.
 338         */
 339
 340        /* Do we need to erase the effects of a prior jbd2_journal_flush? */
 341        if (journal->j_flags & JBD2_FLUSHED) {
 342                jbd_debug(3, "super block updated\n");
 343                jbd2_journal_update_superblock(journal, 1);
 344        } else {
 345                jbd_debug(3, "superblock not updated\n");
 346        }
 347
 348        J_ASSERT(journal->j_running_transaction != NULL);
 349        J_ASSERT(journal->j_committing_transaction == NULL);
 350
 351        commit_transaction = journal->j_running_transaction;
 352        J_ASSERT(commit_transaction->t_state == T_RUNNING);
 353
 354        trace_jbd2_start_commit(journal, commit_transaction);
 355        jbd_debug(1, "JBD: starting commit of transaction %d\n",
 356                        commit_transaction->t_tid);
 357
 358        write_lock(&journal->j_state_lock);
 359        commit_transaction->t_state = T_LOCKED;
 360
 361        trace_jbd2_commit_locking(journal, commit_transaction);
 362        stats.run.rs_wait = commit_transaction->t_max_wait;
 363        stats.run.rs_locked = jiffies;
 364        stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
 365                                              stats.run.rs_locked);
 366
 367        spin_lock(&commit_transaction->t_handle_lock);
 368        while (atomic_read(&commit_transaction->t_updates)) {
 369                DEFINE_WAIT(wait);
 370
 371                prepare_to_wait(&journal->j_wait_updates, &wait,
 372                                        TASK_UNINTERRUPTIBLE);
 373                if (atomic_read(&commit_transaction->t_updates)) {
 374                        spin_unlock(&commit_transaction->t_handle_lock);
 375                        write_unlock(&journal->j_state_lock);
 376                        schedule();
 377                        write_lock(&journal->j_state_lock);
 378                        spin_lock(&commit_transaction->t_handle_lock);
 379                }
 380                finish_wait(&journal->j_wait_updates, &wait);
 381        }
 382        spin_unlock(&commit_transaction->t_handle_lock);
 383
 384        J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
 385                        journal->j_max_transaction_buffers);
 386
 387        /*
 388         * First thing we are allowed to do is to discard any remaining
 389         * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
 390         * that there are no such buffers: if a large filesystem
 391         * operation like a truncate needs to split itself over multiple
 392         * transactions, then it may try to do a jbd2_journal_restart() while
 393         * there are still BJ_Reserved buffers outstanding.  These must
 394         * be released cleanly from the current transaction.
 395         *
 396         * In this case, the filesystem must still reserve write access
 397         * again before modifying the buffer in the new transaction, but
 398         * we do not require it to remember exactly which old buffers it
 399         * has reserved.  This is consistent with the existing behaviour
 400         * that multiple jbd2_journal_get_write_access() calls to the same
 401         * buffer are perfectly permissible.
 402         */
 403        while (commit_transaction->t_reserved_list) {
 404                jh = commit_transaction->t_reserved_list;
 405                JBUFFER_TRACE(jh, "reserved, unused: refile");
 406                /*
 407                 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
 408                 * leave undo-committed data.
 409                 */
 410                if (jh->b_committed_data) {
 411                        struct buffer_head *bh = jh2bh(jh);
 412
 413                        jbd_lock_bh_state(bh);
 414                        jbd2_free(jh->b_committed_data, bh->b_size);
 415                        jh->b_committed_data = NULL;
 416                        jbd_unlock_bh_state(bh);
 417                }
 418                jbd2_journal_refile_buffer(journal, jh);
 419        }
 420
 421        /*
 422         * Now try to drop any written-back buffers from the journal's
 423         * checkpoint lists.  We do this *before* commit because it potentially
 424         * frees some memory
 425         */
 426        spin_lock(&journal->j_list_lock);
 427        __jbd2_journal_clean_checkpoint_list(journal);
 428        spin_unlock(&journal->j_list_lock);
 429
 430        jbd_debug (3, "JBD: commit phase 1\n");
 431
 432        /*
 433         * Switch to a new revoke table.
 434         */
 435        jbd2_journal_switch_revoke_table(journal);
 436
 437        trace_jbd2_commit_flushing(journal, commit_transaction);
 438        stats.run.rs_flushing = jiffies;
 439        stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
 440                                             stats.run.rs_flushing);
 441
 442        commit_transaction->t_state = T_FLUSH;
 443        journal->j_committing_transaction = commit_transaction;
 444        journal->j_running_transaction = NULL;
 445        start_time = ktime_get();
 446        commit_transaction->t_log_start = journal->j_head;
 447        wake_up(&journal->j_wait_transaction_locked);
 448        write_unlock(&journal->j_state_lock);
 449
 450        jbd_debug (3, "JBD: commit phase 2\n");
 451
 452        /*
 453         * Now start flushing things to disk, in the order they appear
 454         * on the transaction lists.  Data blocks go first.
 455         */
 456        err = journal_submit_data_buffers(journal, commit_transaction);
 457        if (err)
 458                jbd2_journal_abort(journal, err);
 459
 460        blk_start_plug(&plug);
 461        jbd2_journal_write_revoke_records(journal, commit_transaction,
 462                                          WRITE_SYNC);
 463        blk_finish_plug(&plug);
 464
 465        jbd_debug(3, "JBD: commit phase 2\n");
 466
 467        /*
 468         * Way to go: we have now written out all of the data for a
 469         * transaction!  Now comes the tricky part: we need to write out
 470         * metadata.  Loop over the transaction's entire buffer list:
 471         */
 472        write_lock(&journal->j_state_lock);
 473        commit_transaction->t_state = T_COMMIT;
 474        write_unlock(&journal->j_state_lock);
 475
 476        trace_jbd2_commit_logging(journal, commit_transaction);
 477        stats.run.rs_logging = jiffies;
 478        stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
 479                                               stats.run.rs_logging);
 480        stats.run.rs_blocks =
 481                atomic_read(&commit_transaction->t_outstanding_credits);
 482        stats.run.rs_blocks_logged = 0;
 483
 484        J_ASSERT(commit_transaction->t_nr_buffers <=
 485                 atomic_read(&commit_transaction->t_outstanding_credits));
 486
 487        err = 0;
 488        descriptor = NULL;
 489        bufs = 0;
 490        blk_start_plug(&plug);
 491        while (commit_transaction->t_buffers) {
 492
 493                /* Find the next buffer to be journaled... */
 494
 495                jh = commit_transaction->t_buffers;
 496
 497                /* If we're in abort mode, we just un-journal the buffer and
 498                   release it. */
 499
 500                if (is_journal_aborted(journal)) {
 501                        clear_buffer_jbddirty(jh2bh(jh));
 502                        JBUFFER_TRACE(jh, "journal is aborting: refile");
 503                        jbd2_buffer_abort_trigger(jh,
 504                                                  jh->b_frozen_data ?
 505                                                  jh->b_frozen_triggers :
 506                                                  jh->b_triggers);
 507                        jbd2_journal_refile_buffer(journal, jh);
 508                        /* If that was the last one, we need to clean up
 509                         * any descriptor buffers which may have been
 510                         * already allocated, even if we are now
 511                         * aborting. */
 512                        if (!commit_transaction->t_buffers)
 513                                goto start_journal_io;
 514                        continue;
 515                }
 516
 517                /* Make sure we have a descriptor block in which to
 518                   record the metadata buffer. */
 519
 520                if (!descriptor) {
 521                        struct buffer_head *bh;
 522
 523                        J_ASSERT (bufs == 0);
 524
 525                        jbd_debug(4, "JBD: get descriptor\n");
 526
 527                        descriptor = jbd2_journal_get_descriptor_buffer(journal);
 528                        if (!descriptor) {
 529                                jbd2_journal_abort(journal, -EIO);
 530                                continue;
 531                        }
 532
 533                        bh = jh2bh(descriptor);
 534                        jbd_debug(4, "JBD: got buffer %llu (%p)\n",
 535                                (unsigned long long)bh->b_blocknr, bh->b_data);
 536                        header = (journal_header_t *)&bh->b_data[0];
 537                        header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
 538                        header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
 539                        header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
 540
 541                        tagp = &bh->b_data[sizeof(journal_header_t)];
 542                        space_left = bh->b_size - sizeof(journal_header_t);
 543                        first_tag = 1;
 544                        set_buffer_jwrite(bh);
 545                        set_buffer_dirty(bh);
 546                        wbuf[bufs++] = bh;
 547
 548                        /* Record it so that we can wait for IO
 549                           completion later */
 550                        BUFFER_TRACE(bh, "ph3: file as descriptor");
 551                        jbd2_journal_file_buffer(descriptor, commit_transaction,
 552                                        BJ_LogCtl);
 553                }
 554
 555                /* Where is the buffer to be written? */
 556
 557                err = jbd2_journal_next_log_block(journal, &blocknr);
 558                /* If the block mapping failed, just abandon the buffer
 559                   and repeat this loop: we'll fall into the
 560                   refile-on-abort condition above. */
 561                if (err) {
 562                        jbd2_journal_abort(journal, err);
 563                        continue;
 564                }
 565
 566                /*
 567                 * start_this_handle() uses t_outstanding_credits to determine
 568                 * the free space in the log, but this counter is changed
 569                 * by jbd2_journal_next_log_block() also.
 570                 */
 571                atomic_dec(&commit_transaction->t_outstanding_credits);
 572
 573                /* Bump b_count to prevent truncate from stumbling over
 574                   the shadowed buffer!  @@@ This can go if we ever get
 575                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
 576                atomic_inc(&jh2bh(jh)->b_count);
 577
 578                /* Make a temporary IO buffer with which to write it out
 579                   (this will requeue both the metadata buffer and the
 580                   temporary IO buffer). new_bh goes on BJ_IO*/
 581
 582                set_bit(BH_JWrite, &jh2bh(jh)->b_state);
 583                /*
 584                 * akpm: jbd2_journal_write_metadata_buffer() sets
 585                 * new_bh->b_transaction to commit_transaction.
 586                 * We need to clean this up before we release new_bh
 587                 * (which is of type BJ_IO)
 588                 */
 589                JBUFFER_TRACE(jh, "ph3: write metadata");
 590                flags = jbd2_journal_write_metadata_buffer(commit_transaction,
 591                                                      jh, &new_jh, blocknr);
 592                if (flags < 0) {
 593                        jbd2_journal_abort(journal, flags);
 594                        continue;
 595                }
 596                set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
 597                wbuf[bufs++] = jh2bh(new_jh);
 598
 599                /* Record the new block's tag in the current descriptor
 600                   buffer */
 601
 602                tag_flag = 0;
 603                if (flags & 1)
 604                        tag_flag |= JBD2_FLAG_ESCAPE;
 605                if (!first_tag)
 606                        tag_flag |= JBD2_FLAG_SAME_UUID;
 607
 608                tag = (journal_block_tag_t *) tagp;
 609                write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
 610                tag->t_flags = cpu_to_be32(tag_flag);
 611                tagp += tag_bytes;
 612                space_left -= tag_bytes;
 613
 614                if (first_tag) {
 615                        memcpy (tagp, journal->j_uuid, 16);
 616                        tagp += 16;
 617                        space_left -= 16;
 618                        first_tag = 0;
 619                }
 620
 621                /* If there's no more to do, or if the descriptor is full,
 622                   let the IO rip! */
 623
 624                if (bufs == journal->j_wbufsize ||
 625                    commit_transaction->t_buffers == NULL ||
 626                    space_left < tag_bytes + 16) {
 627
 628                        jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
 629
 630                        /* Write an end-of-descriptor marker before
 631                           submitting the IOs.  "tag" still points to
 632                           the last tag we set up. */
 633
 634                        tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
 635
 636start_journal_io:
 637                        for (i = 0; i < bufs; i++) {
 638                                struct buffer_head *bh = wbuf[i];
 639                                /*
 640                                 * Compute checksum.
 641                                 */
 642                                if (JBD2_HAS_COMPAT_FEATURE(journal,
 643                                        JBD2_FEATURE_COMPAT_CHECKSUM)) {
 644                                        crc32_sum =
 645                                            jbd2_checksum_data(crc32_sum, bh);
 646                                }
 647
 648                                lock_buffer(bh);
 649                                clear_buffer_dirty(bh);
 650                                set_buffer_uptodate(bh);
 651                                bh->b_end_io = journal_end_buffer_io_sync;
 652                                submit_bh(WRITE_SYNC, bh);
 653                        }
 654                        cond_resched();
 655                        stats.run.rs_blocks_logged += bufs;
 656
 657                        /* Force a new descriptor to be generated next
 658                           time round the loop. */
 659                        descriptor = NULL;
 660                        bufs = 0;
 661                }
 662        }
 663
 664        err = journal_finish_inode_data_buffers(journal, commit_transaction);
 665        if (err) {
 666                printk(KERN_WARNING
 667                        "JBD2: Detected IO errors while flushing file data "
 668                       "on %s\n", journal->j_devname);
 669                if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
 670                        jbd2_journal_abort(journal, err);
 671                err = 0;
 672        }
 673
 674        write_lock(&journal->j_state_lock);
 675        J_ASSERT(commit_transaction->t_state == T_COMMIT);
 676        commit_transaction->t_state = T_COMMIT_DFLUSH;
 677        write_unlock(&journal->j_state_lock);
 678        /* 
 679         * If the journal is not located on the file system device,
 680         * then we must flush the file system device before we issue
 681         * the commit record
 682         */
 683        if (commit_transaction->t_need_data_flush &&
 684            (journal->j_fs_dev != journal->j_dev) &&
 685            (journal->j_flags & JBD2_BARRIER))
 686                blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
 687
 688        /* Done it all: now write the commit record asynchronously. */
 689        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
 690                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
 691                err = journal_submit_commit_record(journal, commit_transaction,
 692                                                 &cbh, crc32_sum);
 693                if (err)
 694                        __jbd2_journal_abort_hard(journal);
 695        }
 696
 697        blk_finish_plug(&plug);
 698
 699        /* Lo and behold: we have just managed to send a transaction to
 700           the log.  Before we can commit it, wait for the IO so far to
 701           complete.  Control buffers being written are on the
 702           transaction's t_log_list queue, and metadata buffers are on
 703           the t_iobuf_list queue.
 704
 705           Wait for the buffers in reverse order.  That way we are
 706           less likely to be woken up until all IOs have completed, and
 707           so we incur less scheduling load.
 708        */
 709
 710        jbd_debug(3, "JBD: commit phase 3\n");
 711
 712        /*
 713         * akpm: these are BJ_IO, and j_list_lock is not needed.
 714         * See __journal_try_to_free_buffer.
 715         */
 716wait_for_iobuf:
 717        while (commit_transaction->t_iobuf_list != NULL) {
 718                struct buffer_head *bh;
 719
 720                jh = commit_transaction->t_iobuf_list->b_tprev;
 721                bh = jh2bh(jh);
 722                if (buffer_locked(bh)) {
 723                        wait_on_buffer(bh);
 724                        goto wait_for_iobuf;
 725                }
 726                if (cond_resched())
 727                        goto wait_for_iobuf;
 728
 729                if (unlikely(!buffer_uptodate(bh)))
 730                        err = -EIO;
 731
 732                clear_buffer_jwrite(bh);
 733
 734                JBUFFER_TRACE(jh, "ph4: unfile after journal write");
 735                jbd2_journal_unfile_buffer(journal, jh);
 736
 737                /*
 738                 * ->t_iobuf_list should contain only dummy buffer_heads
 739                 * which were created by jbd2_journal_write_metadata_buffer().
 740                 */
 741                BUFFER_TRACE(bh, "dumping temporary bh");
 742                jbd2_journal_put_journal_head(jh);
 743                __brelse(bh);
 744                J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
 745                free_buffer_head(bh);
 746
 747                /* We also have to unlock and free the corresponding
 748                   shadowed buffer */
 749                jh = commit_transaction->t_shadow_list->b_tprev;
 750                bh = jh2bh(jh);
 751                clear_bit(BH_JWrite, &bh->b_state);
 752                J_ASSERT_BH(bh, buffer_jbddirty(bh));
 753
 754                /* The metadata is now released for reuse, but we need
 755                   to remember it against this transaction so that when
 756                   we finally commit, we can do any checkpointing
 757                   required. */
 758                JBUFFER_TRACE(jh, "file as BJ_Forget");
 759                jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
 760                /*
 761                 * Wake up any transactions which were waiting for this IO to
 762                 * complete. The barrier must be here so that changes by
 763                 * jbd2_journal_file_buffer() take effect before wake_up_bit()
 764                 * does the waitqueue check.
 765                 */
 766                smp_mb();
 767                wake_up_bit(&bh->b_state, BH_Unshadow);
 768                JBUFFER_TRACE(jh, "brelse shadowed buffer");
 769                __brelse(bh);
 770        }
 771
 772        J_ASSERT (commit_transaction->t_shadow_list == NULL);
 773
 774        jbd_debug(3, "JBD: commit phase 4\n");
 775
 776        /* Here we wait for the revoke record and descriptor record buffers */
 777 wait_for_ctlbuf:
 778        while (commit_transaction->t_log_list != NULL) {
 779                struct buffer_head *bh;
 780
 781                jh = commit_transaction->t_log_list->b_tprev;
 782                bh = jh2bh(jh);
 783                if (buffer_locked(bh)) {
 784                        wait_on_buffer(bh);
 785                        goto wait_for_ctlbuf;
 786                }
 787                if (cond_resched())
 788                        goto wait_for_ctlbuf;
 789
 790                if (unlikely(!buffer_uptodate(bh)))
 791                        err = -EIO;
 792
 793                BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
 794                clear_buffer_jwrite(bh);
 795                jbd2_journal_unfile_buffer(journal, jh);
 796                jbd2_journal_put_journal_head(jh);
 797                __brelse(bh);           /* One for getblk */
 798                /* AKPM: bforget here */
 799        }
 800
 801        if (err)
 802                jbd2_journal_abort(journal, err);
 803
 804        jbd_debug(3, "JBD: commit phase 5\n");
 805        write_lock(&journal->j_state_lock);
 806        J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
 807        commit_transaction->t_state = T_COMMIT_JFLUSH;
 808        write_unlock(&journal->j_state_lock);
 809
 810        if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
 811                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
 812                err = journal_submit_commit_record(journal, commit_transaction,
 813                                                &cbh, crc32_sum);
 814                if (err)
 815                        __jbd2_journal_abort_hard(journal);
 816        }
 817        if (cbh)
 818                err = journal_wait_on_commit_record(journal, cbh);
 819        if (JBD2_HAS_INCOMPAT_FEATURE(journal,
 820                                      JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
 821            journal->j_flags & JBD2_BARRIER) {
 822                blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
 823        }
 824
 825        if (err)
 826                jbd2_journal_abort(journal, err);
 827
 828        /* End of a transaction!  Finally, we can do checkpoint
 829           processing: any buffers committed as a result of this
 830           transaction can be removed from any checkpoint list it was on
 831           before. */
 832
 833        jbd_debug(3, "JBD: commit phase 6\n");
 834
 835        J_ASSERT(list_empty(&commit_transaction->t_inode_list));
 836        J_ASSERT(commit_transaction->t_buffers == NULL);
 837        J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
 838        J_ASSERT(commit_transaction->t_iobuf_list == NULL);
 839        J_ASSERT(commit_transaction->t_shadow_list == NULL);
 840        J_ASSERT(commit_transaction->t_log_list == NULL);
 841
 842restart_loop:
 843        /*
 844         * As there are other places (journal_unmap_buffer()) adding buffers
 845         * to this list we have to be careful and hold the j_list_lock.
 846         */
 847        spin_lock(&journal->j_list_lock);
 848        while (commit_transaction->t_forget) {
 849                transaction_t *cp_transaction;
 850                struct buffer_head *bh;
 851                int try_to_free = 0;
 852
 853                jh = commit_transaction->t_forget;
 854                spin_unlock(&journal->j_list_lock);
 855                bh = jh2bh(jh);
 856                /*
 857                 * Get a reference so that bh cannot be freed before we are
 858                 * done with it.
 859                 */
 860                get_bh(bh);
 861                jbd_lock_bh_state(bh);
 862                J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
 863
 864                /*
 865                 * If there is undo-protected committed data against
 866                 * this buffer, then we can remove it now.  If it is a
 867                 * buffer needing such protection, the old frozen_data
 868                 * field now points to a committed version of the
 869                 * buffer, so rotate that field to the new committed
 870                 * data.
 871                 *
 872                 * Otherwise, we can just throw away the frozen data now.
 873                 *
 874                 * We also know that the frozen data has already fired
 875                 * its triggers if they exist, so we can clear that too.
 876                 */
 877                if (jh->b_committed_data) {
 878                        jbd2_free(jh->b_committed_data, bh->b_size);
 879                        jh->b_committed_data = NULL;
 880                        if (jh->b_frozen_data) {
 881                                jh->b_committed_data = jh->b_frozen_data;
 882                                jh->b_frozen_data = NULL;
 883                                jh->b_frozen_triggers = NULL;
 884                        }
 885                } else if (jh->b_frozen_data) {
 886                        jbd2_free(jh->b_frozen_data, bh->b_size);
 887                        jh->b_frozen_data = NULL;
 888                        jh->b_frozen_triggers = NULL;
 889                }
 890
 891                spin_lock(&journal->j_list_lock);
 892                cp_transaction = jh->b_cp_transaction;
 893                if (cp_transaction) {
 894                        JBUFFER_TRACE(jh, "remove from old cp transaction");
 895                        cp_transaction->t_chp_stats.cs_dropped++;
 896                        __jbd2_journal_remove_checkpoint(jh);
 897                }
 898
 899                /* Only re-checkpoint the buffer_head if it is marked
 900                 * dirty.  If the buffer was added to the BJ_Forget list
 901                 * by jbd2_journal_forget, it may no longer be dirty and
 902                 * there's no point in keeping a checkpoint record for
 903                 * it. */
 904
 905                /* A buffer which has been freed while still being
 906                 * journaled by a previous transaction may end up still
 907                 * being dirty here, but we want to avoid writing back
 908                 * that buffer in the future after the "add to orphan"
 909                 * operation been committed,  That's not only a performance
 910                 * gain, it also stops aliasing problems if the buffer is
 911                 * left behind for writeback and gets reallocated for another
 912                 * use in a different page. */
 913                if (buffer_freed(bh) && !jh->b_next_transaction) {
 914                        clear_buffer_freed(bh);
 915                        clear_buffer_jbddirty(bh);
 916                }
 917
 918                if (buffer_jbddirty(bh)) {
 919                        JBUFFER_TRACE(jh, "add to new checkpointing trans");
 920                        __jbd2_journal_insert_checkpoint(jh, commit_transaction);
 921                        if (is_journal_aborted(journal))
 922                                clear_buffer_jbddirty(bh);
 923                } else {
 924                        J_ASSERT_BH(bh, !buffer_dirty(bh));
 925                        /*
 926                         * The buffer on BJ_Forget list and not jbddirty means
 927                         * it has been freed by this transaction and hence it
 928                         * could not have been reallocated until this
 929                         * transaction has committed. *BUT* it could be
 930                         * reallocated once we have written all the data to
 931                         * disk and before we process the buffer on BJ_Forget
 932                         * list.
 933                         */
 934                        if (!jh->b_next_transaction)
 935                                try_to_free = 1;
 936                }
 937                JBUFFER_TRACE(jh, "refile or unfile buffer");
 938                __jbd2_journal_refile_buffer(jh);
 939                jbd_unlock_bh_state(bh);
 940                if (try_to_free)
 941                        release_buffer_page(bh);        /* Drops bh reference */
 942                else
 943                        __brelse(bh);
 944                cond_resched_lock(&journal->j_list_lock);
 945        }
 946        spin_unlock(&journal->j_list_lock);
 947        /*
 948         * This is a bit sleazy.  We use j_list_lock to protect transition
 949         * of a transaction into T_FINISHED state and calling
 950         * __jbd2_journal_drop_transaction(). Otherwise we could race with
 951         * other checkpointing code processing the transaction...
 952         */
 953        write_lock(&journal->j_state_lock);
 954        spin_lock(&journal->j_list_lock);
 955        /*
 956         * Now recheck if some buffers did not get attached to the transaction
 957         * while the lock was dropped...
 958         */
 959        if (commit_transaction->t_forget) {
 960                spin_unlock(&journal->j_list_lock);
 961                write_unlock(&journal->j_state_lock);
 962                goto restart_loop;
 963        }
 964
 965        /* Done with this transaction! */
 966
 967        jbd_debug(3, "JBD: commit phase 7\n");
 968
 969        J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
 970
 971        commit_transaction->t_start = jiffies;
 972        stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
 973                                              commit_transaction->t_start);
 974
 975        /*
 976         * File the transaction statistics
 977         */
 978        stats.ts_tid = commit_transaction->t_tid;
 979        stats.run.rs_handle_count =
 980                atomic_read(&commit_transaction->t_handle_count);
 981        trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
 982                             commit_transaction->t_tid, &stats.run);
 983
 984        /*
 985         * Calculate overall stats
 986         */
 987        spin_lock(&journal->j_history_lock);
 988        journal->j_stats.ts_tid++;
 989        journal->j_stats.run.rs_wait += stats.run.rs_wait;
 990        journal->j_stats.run.rs_running += stats.run.rs_running;
 991        journal->j_stats.run.rs_locked += stats.run.rs_locked;
 992        journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
 993        journal->j_stats.run.rs_logging += stats.run.rs_logging;
 994        journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
 995        journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
 996        journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
 997        spin_unlock(&journal->j_history_lock);
 998
 999        commit_transaction->t_state = T_FINISHED;
1000        J_ASSERT(commit_transaction == journal->j_committing_transaction);
1001        journal->j_commit_sequence = commit_transaction->t_tid;
1002        journal->j_committing_transaction = NULL;
1003        commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1004
1005        /*
1006         * weight the commit time higher than the average time so we don't
1007         * react too strongly to vast changes in the commit time
1008         */
1009        if (likely(journal->j_average_commit_time))
1010                journal->j_average_commit_time = (commit_time +
1011                                journal->j_average_commit_time*3) / 4;
1012        else
1013                journal->j_average_commit_time = commit_time;
1014        write_unlock(&journal->j_state_lock);
1015
1016        if (commit_transaction->t_checkpoint_list == NULL &&
1017            commit_transaction->t_checkpoint_io_list == NULL) {
1018                __jbd2_journal_drop_transaction(journal, commit_transaction);
1019                to_free = 1;
1020        } else {
1021                if (journal->j_checkpoint_transactions == NULL) {
1022                        journal->j_checkpoint_transactions = commit_transaction;
1023                        commit_transaction->t_cpnext = commit_transaction;
1024                        commit_transaction->t_cpprev = commit_transaction;
1025                } else {
1026                        commit_transaction->t_cpnext =
1027                                journal->j_checkpoint_transactions;
1028                        commit_transaction->t_cpprev =
1029                                commit_transaction->t_cpnext->t_cpprev;
1030                        commit_transaction->t_cpnext->t_cpprev =
1031                                commit_transaction;
1032                        commit_transaction->t_cpprev->t_cpnext =
1033                                commit_transaction;
1034                }
1035        }
1036        spin_unlock(&journal->j_list_lock);
1037
1038        if (journal->j_commit_callback)
1039                journal->j_commit_callback(journal, commit_transaction);
1040
1041        trace_jbd2_end_commit(journal, commit_transaction);
1042        jbd_debug(1, "JBD: commit %d complete, head %d\n",
1043                  journal->j_commit_sequence, journal->j_tail_sequence);
1044        if (to_free)
1045                kfree(commit_transaction);
1046
1047        wake_up(&journal->j_wait_done_commit);
1048}
1049