1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/time.h>
17#include <linux/fs.h>
18#include <linux/jbd2.h>
19#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/pagemap.h>
23#include <linux/jiffies.h>
24#include <linux/crc32.h>
25#include <linux/writeback.h>
26#include <linux/backing-dev.h>
27#include <linux/bio.h>
28#include <linux/blkdev.h>
29#include <linux/bitops.h>
30#include <trace/events/jbd2.h>
31#include <asm/system.h>
32
33
34
35
36static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
37{
38 BUFFER_TRACE(bh, "");
39 if (uptodate)
40 set_buffer_uptodate(bh);
41 else
42 clear_buffer_uptodate(bh);
43 unlock_buffer(bh);
44}
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60static void release_buffer_page(struct buffer_head *bh)
61{
62 struct page *page;
63
64 if (buffer_dirty(bh))
65 goto nope;
66 if (atomic_read(&bh->b_count) != 1)
67 goto nope;
68 page = bh->b_page;
69 if (!page)
70 goto nope;
71 if (page->mapping)
72 goto nope;
73
74
75 if (!trylock_page(page))
76 goto nope;
77
78 page_cache_get(page);
79 __brelse(bh);
80 try_to_free_buffers(page);
81 unlock_page(page);
82 page_cache_release(page);
83 return;
84
85nope:
86 __brelse(bh);
87}
88
89
90
91
92
93
94
95
96
97static int journal_submit_commit_record(journal_t *journal,
98 transaction_t *commit_transaction,
99 struct buffer_head **cbh,
100 __u32 crc32_sum)
101{
102 struct journal_head *descriptor;
103 struct commit_header *tmp;
104 struct buffer_head *bh;
105 int ret;
106 struct timespec now = current_kernel_time();
107
108 *cbh = NULL;
109
110 if (is_journal_aborted(journal))
111 return 0;
112
113 descriptor = jbd2_journal_get_descriptor_buffer(journal);
114 if (!descriptor)
115 return 1;
116
117 bh = jh2bh(descriptor);
118
119 tmp = (struct commit_header *)bh->b_data;
120 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
121 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
122 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
123 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
124 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
125
126 if (JBD2_HAS_COMPAT_FEATURE(journal,
127 JBD2_FEATURE_COMPAT_CHECKSUM)) {
128 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
129 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
130 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
131 }
132
133 JBUFFER_TRACE(descriptor, "submit commit block");
134 lock_buffer(bh);
135 clear_buffer_dirty(bh);
136 set_buffer_uptodate(bh);
137 bh->b_end_io = journal_end_buffer_io_sync;
138
139 if (journal->j_flags & JBD2_BARRIER &&
140 !JBD2_HAS_INCOMPAT_FEATURE(journal,
141 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
142 ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
143 else
144 ret = submit_bh(WRITE_SYNC, bh);
145
146 *cbh = bh;
147 return ret;
148}
149
150
151
152
153
154static int journal_wait_on_commit_record(journal_t *journal,
155 struct buffer_head *bh)
156{
157 int ret = 0;
158
159 clear_buffer_dirty(bh);
160 wait_on_buffer(bh);
161
162 if (unlikely(!buffer_uptodate(bh)))
163 ret = -EIO;
164 put_bh(bh);
165 jbd2_journal_put_journal_head(bh2jh(bh));
166
167 return ret;
168}
169
170
171
172
173
174
175
176static int journal_submit_inode_data_buffers(struct address_space *mapping)
177{
178 int ret;
179 struct writeback_control wbc = {
180 .sync_mode = WB_SYNC_ALL,
181 .nr_to_write = mapping->nrpages * 2,
182 .range_start = 0,
183 .range_end = i_size_read(mapping->host),
184 };
185
186 ret = generic_writepages(mapping, &wbc);
187 return ret;
188}
189
190
191
192
193
194
195
196
197
198static int journal_submit_data_buffers(journal_t *journal,
199 transaction_t *commit_transaction)
200{
201 struct jbd2_inode *jinode;
202 int err, ret = 0;
203 struct address_space *mapping;
204
205 spin_lock(&journal->j_list_lock);
206 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
207 mapping = jinode->i_vfs_inode->i_mapping;
208 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
209 spin_unlock(&journal->j_list_lock);
210
211
212
213
214
215
216 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217 err = journal_submit_inode_data_buffers(mapping);
218 if (!ret)
219 ret = err;
220 spin_lock(&journal->j_list_lock);
221 J_ASSERT(jinode->i_transaction == commit_transaction);
222 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
223 smp_mb__after_clear_bit();
224 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
225 }
226 spin_unlock(&journal->j_list_lock);
227 return ret;
228}
229
230
231
232
233
234
235static int journal_finish_inode_data_buffers(journal_t *journal,
236 transaction_t *commit_transaction)
237{
238 struct jbd2_inode *jinode, *next_i;
239 int err, ret = 0;
240
241
242 spin_lock(&journal->j_list_lock);
243 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
244 set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
245 spin_unlock(&journal->j_list_lock);
246 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
247 if (err) {
248
249
250
251
252
253 set_bit(AS_EIO,
254 &jinode->i_vfs_inode->i_mapping->flags);
255
256 if (!ret)
257 ret = err;
258 }
259 spin_lock(&journal->j_list_lock);
260 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
261 smp_mb__after_clear_bit();
262 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
263 }
264
265
266 list_for_each_entry_safe(jinode, next_i,
267 &commit_transaction->t_inode_list, i_list) {
268 list_del(&jinode->i_list);
269 if (jinode->i_next_transaction) {
270 jinode->i_transaction = jinode->i_next_transaction;
271 jinode->i_next_transaction = NULL;
272 list_add(&jinode->i_list,
273 &jinode->i_transaction->t_inode_list);
274 } else {
275 jinode->i_transaction = NULL;
276 }
277 }
278 spin_unlock(&journal->j_list_lock);
279
280 return ret;
281}
282
283static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
284{
285 struct page *page = bh->b_page;
286 char *addr;
287 __u32 checksum;
288
289 addr = kmap_atomic(page, KM_USER0);
290 checksum = crc32_be(crc32_sum,
291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
292 kunmap_atomic(addr, KM_USER0);
293
294 return checksum;
295}
296
297static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
298 unsigned long long block)
299{
300 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
301 if (tag_bytes > JBD2_TAG_SIZE32)
302 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
303}
304
305
306
307
308
309
310
311void jbd2_journal_commit_transaction(journal_t *journal)
312{
313 struct transaction_stats_s stats;
314 transaction_t *commit_transaction;
315 struct journal_head *jh, *new_jh, *descriptor;
316 struct buffer_head **wbuf = journal->j_wbuf;
317 int bufs;
318 int flags;
319 int err;
320 unsigned long long blocknr;
321 ktime_t start_time;
322 u64 commit_time;
323 char *tagp = NULL;
324 journal_header_t *header;
325 journal_block_tag_t *tag = NULL;
326 int space_left = 0;
327 int first_tag = 0;
328 int tag_flag;
329 int i, to_free = 0;
330 int tag_bytes = journal_tag_bytes(journal);
331 struct buffer_head *cbh = NULL;
332 __u32 crc32_sum = ~0;
333 struct blk_plug plug;
334
335
336
337
338
339
340
341 if (journal->j_flags & JBD2_FLUSHED) {
342 jbd_debug(3, "super block updated\n");
343 jbd2_journal_update_superblock(journal, 1);
344 } else {
345 jbd_debug(3, "superblock not updated\n");
346 }
347
348 J_ASSERT(journal->j_running_transaction != NULL);
349 J_ASSERT(journal->j_committing_transaction == NULL);
350
351 commit_transaction = journal->j_running_transaction;
352 J_ASSERT(commit_transaction->t_state == T_RUNNING);
353
354 trace_jbd2_start_commit(journal, commit_transaction);
355 jbd_debug(1, "JBD: starting commit of transaction %d\n",
356 commit_transaction->t_tid);
357
358 write_lock(&journal->j_state_lock);
359 commit_transaction->t_state = T_LOCKED;
360
361 trace_jbd2_commit_locking(journal, commit_transaction);
362 stats.run.rs_wait = commit_transaction->t_max_wait;
363 stats.run.rs_locked = jiffies;
364 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
365 stats.run.rs_locked);
366
367 spin_lock(&commit_transaction->t_handle_lock);
368 while (atomic_read(&commit_transaction->t_updates)) {
369 DEFINE_WAIT(wait);
370
371 prepare_to_wait(&journal->j_wait_updates, &wait,
372 TASK_UNINTERRUPTIBLE);
373 if (atomic_read(&commit_transaction->t_updates)) {
374 spin_unlock(&commit_transaction->t_handle_lock);
375 write_unlock(&journal->j_state_lock);
376 schedule();
377 write_lock(&journal->j_state_lock);
378 spin_lock(&commit_transaction->t_handle_lock);
379 }
380 finish_wait(&journal->j_wait_updates, &wait);
381 }
382 spin_unlock(&commit_transaction->t_handle_lock);
383
384 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
385 journal->j_max_transaction_buffers);
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403 while (commit_transaction->t_reserved_list) {
404 jh = commit_transaction->t_reserved_list;
405 JBUFFER_TRACE(jh, "reserved, unused: refile");
406
407
408
409
410 if (jh->b_committed_data) {
411 struct buffer_head *bh = jh2bh(jh);
412
413 jbd_lock_bh_state(bh);
414 jbd2_free(jh->b_committed_data, bh->b_size);
415 jh->b_committed_data = NULL;
416 jbd_unlock_bh_state(bh);
417 }
418 jbd2_journal_refile_buffer(journal, jh);
419 }
420
421
422
423
424
425
426 spin_lock(&journal->j_list_lock);
427 __jbd2_journal_clean_checkpoint_list(journal);
428 spin_unlock(&journal->j_list_lock);
429
430 jbd_debug (3, "JBD: commit phase 1\n");
431
432
433
434
435 jbd2_journal_switch_revoke_table(journal);
436
437 trace_jbd2_commit_flushing(journal, commit_transaction);
438 stats.run.rs_flushing = jiffies;
439 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
440 stats.run.rs_flushing);
441
442 commit_transaction->t_state = T_FLUSH;
443 journal->j_committing_transaction = commit_transaction;
444 journal->j_running_transaction = NULL;
445 start_time = ktime_get();
446 commit_transaction->t_log_start = journal->j_head;
447 wake_up(&journal->j_wait_transaction_locked);
448 write_unlock(&journal->j_state_lock);
449
450 jbd_debug (3, "JBD: commit phase 2\n");
451
452
453
454
455
456 err = journal_submit_data_buffers(journal, commit_transaction);
457 if (err)
458 jbd2_journal_abort(journal, err);
459
460 blk_start_plug(&plug);
461 jbd2_journal_write_revoke_records(journal, commit_transaction,
462 WRITE_SYNC);
463 blk_finish_plug(&plug);
464
465 jbd_debug(3, "JBD: commit phase 2\n");
466
467
468
469
470
471
472 write_lock(&journal->j_state_lock);
473 commit_transaction->t_state = T_COMMIT;
474 write_unlock(&journal->j_state_lock);
475
476 trace_jbd2_commit_logging(journal, commit_transaction);
477 stats.run.rs_logging = jiffies;
478 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
479 stats.run.rs_logging);
480 stats.run.rs_blocks =
481 atomic_read(&commit_transaction->t_outstanding_credits);
482 stats.run.rs_blocks_logged = 0;
483
484 J_ASSERT(commit_transaction->t_nr_buffers <=
485 atomic_read(&commit_transaction->t_outstanding_credits));
486
487 err = 0;
488 descriptor = NULL;
489 bufs = 0;
490 blk_start_plug(&plug);
491 while (commit_transaction->t_buffers) {
492
493
494
495 jh = commit_transaction->t_buffers;
496
497
498
499
500 if (is_journal_aborted(journal)) {
501 clear_buffer_jbddirty(jh2bh(jh));
502 JBUFFER_TRACE(jh, "journal is aborting: refile");
503 jbd2_buffer_abort_trigger(jh,
504 jh->b_frozen_data ?
505 jh->b_frozen_triggers :
506 jh->b_triggers);
507 jbd2_journal_refile_buffer(journal, jh);
508
509
510
511
512 if (!commit_transaction->t_buffers)
513 goto start_journal_io;
514 continue;
515 }
516
517
518
519
520 if (!descriptor) {
521 struct buffer_head *bh;
522
523 J_ASSERT (bufs == 0);
524
525 jbd_debug(4, "JBD: get descriptor\n");
526
527 descriptor = jbd2_journal_get_descriptor_buffer(journal);
528 if (!descriptor) {
529 jbd2_journal_abort(journal, -EIO);
530 continue;
531 }
532
533 bh = jh2bh(descriptor);
534 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
535 (unsigned long long)bh->b_blocknr, bh->b_data);
536 header = (journal_header_t *)&bh->b_data[0];
537 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
538 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
539 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
540
541 tagp = &bh->b_data[sizeof(journal_header_t)];
542 space_left = bh->b_size - sizeof(journal_header_t);
543 first_tag = 1;
544 set_buffer_jwrite(bh);
545 set_buffer_dirty(bh);
546 wbuf[bufs++] = bh;
547
548
549
550 BUFFER_TRACE(bh, "ph3: file as descriptor");
551 jbd2_journal_file_buffer(descriptor, commit_transaction,
552 BJ_LogCtl);
553 }
554
555
556
557 err = jbd2_journal_next_log_block(journal, &blocknr);
558
559
560
561 if (err) {
562 jbd2_journal_abort(journal, err);
563 continue;
564 }
565
566
567
568
569
570
571 atomic_dec(&commit_transaction->t_outstanding_credits);
572
573
574
575
576 atomic_inc(&jh2bh(jh)->b_count);
577
578
579
580
581
582 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
583
584
585
586
587
588
589 JBUFFER_TRACE(jh, "ph3: write metadata");
590 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
591 jh, &new_jh, blocknr);
592 if (flags < 0) {
593 jbd2_journal_abort(journal, flags);
594 continue;
595 }
596 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
597 wbuf[bufs++] = jh2bh(new_jh);
598
599
600
601
602 tag_flag = 0;
603 if (flags & 1)
604 tag_flag |= JBD2_FLAG_ESCAPE;
605 if (!first_tag)
606 tag_flag |= JBD2_FLAG_SAME_UUID;
607
608 tag = (journal_block_tag_t *) tagp;
609 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
610 tag->t_flags = cpu_to_be32(tag_flag);
611 tagp += tag_bytes;
612 space_left -= tag_bytes;
613
614 if (first_tag) {
615 memcpy (tagp, journal->j_uuid, 16);
616 tagp += 16;
617 space_left -= 16;
618 first_tag = 0;
619 }
620
621
622
623
624 if (bufs == journal->j_wbufsize ||
625 commit_transaction->t_buffers == NULL ||
626 space_left < tag_bytes + 16) {
627
628 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
629
630
631
632
633
634 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
635
636start_journal_io:
637 for (i = 0; i < bufs; i++) {
638 struct buffer_head *bh = wbuf[i];
639
640
641
642 if (JBD2_HAS_COMPAT_FEATURE(journal,
643 JBD2_FEATURE_COMPAT_CHECKSUM)) {
644 crc32_sum =
645 jbd2_checksum_data(crc32_sum, bh);
646 }
647
648 lock_buffer(bh);
649 clear_buffer_dirty(bh);
650 set_buffer_uptodate(bh);
651 bh->b_end_io = journal_end_buffer_io_sync;
652 submit_bh(WRITE_SYNC, bh);
653 }
654 cond_resched();
655 stats.run.rs_blocks_logged += bufs;
656
657
658
659 descriptor = NULL;
660 bufs = 0;
661 }
662 }
663
664 err = journal_finish_inode_data_buffers(journal, commit_transaction);
665 if (err) {
666 printk(KERN_WARNING
667 "JBD2: Detected IO errors while flushing file data "
668 "on %s\n", journal->j_devname);
669 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
670 jbd2_journal_abort(journal, err);
671 err = 0;
672 }
673
674 write_lock(&journal->j_state_lock);
675 J_ASSERT(commit_transaction->t_state == T_COMMIT);
676 commit_transaction->t_state = T_COMMIT_DFLUSH;
677 write_unlock(&journal->j_state_lock);
678
679
680
681
682
683 if (commit_transaction->t_need_data_flush &&
684 (journal->j_fs_dev != journal->j_dev) &&
685 (journal->j_flags & JBD2_BARRIER))
686 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
687
688
689 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
690 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
691 err = journal_submit_commit_record(journal, commit_transaction,
692 &cbh, crc32_sum);
693 if (err)
694 __jbd2_journal_abort_hard(journal);
695 }
696
697 blk_finish_plug(&plug);
698
699
700
701
702
703
704
705
706
707
708
709
710 jbd_debug(3, "JBD: commit phase 3\n");
711
712
713
714
715
716wait_for_iobuf:
717 while (commit_transaction->t_iobuf_list != NULL) {
718 struct buffer_head *bh;
719
720 jh = commit_transaction->t_iobuf_list->b_tprev;
721 bh = jh2bh(jh);
722 if (buffer_locked(bh)) {
723 wait_on_buffer(bh);
724 goto wait_for_iobuf;
725 }
726 if (cond_resched())
727 goto wait_for_iobuf;
728
729 if (unlikely(!buffer_uptodate(bh)))
730 err = -EIO;
731
732 clear_buffer_jwrite(bh);
733
734 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
735 jbd2_journal_unfile_buffer(journal, jh);
736
737
738
739
740
741 BUFFER_TRACE(bh, "dumping temporary bh");
742 jbd2_journal_put_journal_head(jh);
743 __brelse(bh);
744 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
745 free_buffer_head(bh);
746
747
748
749 jh = commit_transaction->t_shadow_list->b_tprev;
750 bh = jh2bh(jh);
751 clear_bit(BH_JWrite, &bh->b_state);
752 J_ASSERT_BH(bh, buffer_jbddirty(bh));
753
754
755
756
757
758 JBUFFER_TRACE(jh, "file as BJ_Forget");
759 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
760
761
762
763
764
765
766 smp_mb();
767 wake_up_bit(&bh->b_state, BH_Unshadow);
768 JBUFFER_TRACE(jh, "brelse shadowed buffer");
769 __brelse(bh);
770 }
771
772 J_ASSERT (commit_transaction->t_shadow_list == NULL);
773
774 jbd_debug(3, "JBD: commit phase 4\n");
775
776
777 wait_for_ctlbuf:
778 while (commit_transaction->t_log_list != NULL) {
779 struct buffer_head *bh;
780
781 jh = commit_transaction->t_log_list->b_tprev;
782 bh = jh2bh(jh);
783 if (buffer_locked(bh)) {
784 wait_on_buffer(bh);
785 goto wait_for_ctlbuf;
786 }
787 if (cond_resched())
788 goto wait_for_ctlbuf;
789
790 if (unlikely(!buffer_uptodate(bh)))
791 err = -EIO;
792
793 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
794 clear_buffer_jwrite(bh);
795 jbd2_journal_unfile_buffer(journal, jh);
796 jbd2_journal_put_journal_head(jh);
797 __brelse(bh);
798
799 }
800
801 if (err)
802 jbd2_journal_abort(journal, err);
803
804 jbd_debug(3, "JBD: commit phase 5\n");
805 write_lock(&journal->j_state_lock);
806 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
807 commit_transaction->t_state = T_COMMIT_JFLUSH;
808 write_unlock(&journal->j_state_lock);
809
810 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
811 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
812 err = journal_submit_commit_record(journal, commit_transaction,
813 &cbh, crc32_sum);
814 if (err)
815 __jbd2_journal_abort_hard(journal);
816 }
817 if (cbh)
818 err = journal_wait_on_commit_record(journal, cbh);
819 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
820 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
821 journal->j_flags & JBD2_BARRIER) {
822 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
823 }
824
825 if (err)
826 jbd2_journal_abort(journal, err);
827
828
829
830
831
832
833 jbd_debug(3, "JBD: commit phase 6\n");
834
835 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
836 J_ASSERT(commit_transaction->t_buffers == NULL);
837 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
838 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
839 J_ASSERT(commit_transaction->t_shadow_list == NULL);
840 J_ASSERT(commit_transaction->t_log_list == NULL);
841
842restart_loop:
843
844
845
846
847 spin_lock(&journal->j_list_lock);
848 while (commit_transaction->t_forget) {
849 transaction_t *cp_transaction;
850 struct buffer_head *bh;
851 int try_to_free = 0;
852
853 jh = commit_transaction->t_forget;
854 spin_unlock(&journal->j_list_lock);
855 bh = jh2bh(jh);
856
857
858
859
860 get_bh(bh);
861 jbd_lock_bh_state(bh);
862 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877 if (jh->b_committed_data) {
878 jbd2_free(jh->b_committed_data, bh->b_size);
879 jh->b_committed_data = NULL;
880 if (jh->b_frozen_data) {
881 jh->b_committed_data = jh->b_frozen_data;
882 jh->b_frozen_data = NULL;
883 jh->b_frozen_triggers = NULL;
884 }
885 } else if (jh->b_frozen_data) {
886 jbd2_free(jh->b_frozen_data, bh->b_size);
887 jh->b_frozen_data = NULL;
888 jh->b_frozen_triggers = NULL;
889 }
890
891 spin_lock(&journal->j_list_lock);
892 cp_transaction = jh->b_cp_transaction;
893 if (cp_transaction) {
894 JBUFFER_TRACE(jh, "remove from old cp transaction");
895 cp_transaction->t_chp_stats.cs_dropped++;
896 __jbd2_journal_remove_checkpoint(jh);
897 }
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913 if (buffer_freed(bh) && !jh->b_next_transaction) {
914 clear_buffer_freed(bh);
915 clear_buffer_jbddirty(bh);
916 }
917
918 if (buffer_jbddirty(bh)) {
919 JBUFFER_TRACE(jh, "add to new checkpointing trans");
920 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
921 if (is_journal_aborted(journal))
922 clear_buffer_jbddirty(bh);
923 } else {
924 J_ASSERT_BH(bh, !buffer_dirty(bh));
925
926
927
928
929
930
931
932
933
934 if (!jh->b_next_transaction)
935 try_to_free = 1;
936 }
937 JBUFFER_TRACE(jh, "refile or unfile buffer");
938 __jbd2_journal_refile_buffer(jh);
939 jbd_unlock_bh_state(bh);
940 if (try_to_free)
941 release_buffer_page(bh);
942 else
943 __brelse(bh);
944 cond_resched_lock(&journal->j_list_lock);
945 }
946 spin_unlock(&journal->j_list_lock);
947
948
949
950
951
952
953 write_lock(&journal->j_state_lock);
954 spin_lock(&journal->j_list_lock);
955
956
957
958
959 if (commit_transaction->t_forget) {
960 spin_unlock(&journal->j_list_lock);
961 write_unlock(&journal->j_state_lock);
962 goto restart_loop;
963 }
964
965
966
967 jbd_debug(3, "JBD: commit phase 7\n");
968
969 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
970
971 commit_transaction->t_start = jiffies;
972 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
973 commit_transaction->t_start);
974
975
976
977
978 stats.ts_tid = commit_transaction->t_tid;
979 stats.run.rs_handle_count =
980 atomic_read(&commit_transaction->t_handle_count);
981 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
982 commit_transaction->t_tid, &stats.run);
983
984
985
986
987 spin_lock(&journal->j_history_lock);
988 journal->j_stats.ts_tid++;
989 journal->j_stats.run.rs_wait += stats.run.rs_wait;
990 journal->j_stats.run.rs_running += stats.run.rs_running;
991 journal->j_stats.run.rs_locked += stats.run.rs_locked;
992 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
993 journal->j_stats.run.rs_logging += stats.run.rs_logging;
994 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
995 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
996 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
997 spin_unlock(&journal->j_history_lock);
998
999 commit_transaction->t_state = T_FINISHED;
1000 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1001 journal->j_commit_sequence = commit_transaction->t_tid;
1002 journal->j_committing_transaction = NULL;
1003 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1004
1005
1006
1007
1008
1009 if (likely(journal->j_average_commit_time))
1010 journal->j_average_commit_time = (commit_time +
1011 journal->j_average_commit_time*3) / 4;
1012 else
1013 journal->j_average_commit_time = commit_time;
1014 write_unlock(&journal->j_state_lock);
1015
1016 if (commit_transaction->t_checkpoint_list == NULL &&
1017 commit_transaction->t_checkpoint_io_list == NULL) {
1018 __jbd2_journal_drop_transaction(journal, commit_transaction);
1019 to_free = 1;
1020 } else {
1021 if (journal->j_checkpoint_transactions == NULL) {
1022 journal->j_checkpoint_transactions = commit_transaction;
1023 commit_transaction->t_cpnext = commit_transaction;
1024 commit_transaction->t_cpprev = commit_transaction;
1025 } else {
1026 commit_transaction->t_cpnext =
1027 journal->j_checkpoint_transactions;
1028 commit_transaction->t_cpprev =
1029 commit_transaction->t_cpnext->t_cpprev;
1030 commit_transaction->t_cpnext->t_cpprev =
1031 commit_transaction;
1032 commit_transaction->t_cpprev->t_cpnext =
1033 commit_transaction;
1034 }
1035 }
1036 spin_unlock(&journal->j_list_lock);
1037
1038 if (journal->j_commit_callback)
1039 journal->j_commit_callback(journal, commit_transaction);
1040
1041 trace_jbd2_end_commit(journal, commit_transaction);
1042 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1043 journal->j_commit_sequence, journal->j_tail_sequence);
1044 if (to_free)
1045 kfree(commit_transaction);
1046
1047 wake_up(&journal->j_wait_done_commit);
1048}
1049