1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/time.h>
14#include <linux/fs.h>
15#include <linux/jbd2.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/jiffies.h>
21#include <linux/crc32.h>
22#include <linux/writeback.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/blkdev.h>
26#include <linux/bitops.h>
27#include <trace/events/jbd2.h>
28
29
30
31
32static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
33{
34 struct buffer_head *orig_bh = bh->b_private;
35
36 BUFFER_TRACE(bh, "");
37 if (uptodate)
38 set_buffer_uptodate(bh);
39 else
40 clear_buffer_uptodate(bh);
41 if (orig_bh) {
42 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
43 smp_mb__after_atomic();
44 wake_up_bit(&orig_bh->b_state, BH_Shadow);
45 }
46 unlock_buffer(bh);
47}
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static void release_buffer_page(struct buffer_head *bh)
64{
65 struct page *page;
66
67 if (buffer_dirty(bh))
68 goto nope;
69 if (atomic_read(&bh->b_count) != 1)
70 goto nope;
71 page = bh->b_page;
72 if (!page)
73 goto nope;
74 if (page->mapping)
75 goto nope;
76
77
78 if (!trylock_page(page))
79 goto nope;
80
81 get_page(page);
82 __brelse(bh);
83 try_to_free_buffers(page);
84 unlock_page(page);
85 put_page(page);
86 return;
87
88nope:
89 __brelse(bh);
90}
91
92static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
93{
94 struct commit_header *h;
95 __u32 csum;
96
97 if (!jbd2_journal_has_csum_v2or3(j))
98 return;
99
100 h = (struct commit_header *)(bh->b_data);
101 h->h_chksum_type = 0;
102 h->h_chksum_size = 0;
103 h->h_chksum[0] = 0;
104 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
105 h->h_chksum[0] = cpu_to_be32(csum);
106}
107
108
109
110
111
112
113
114
115
116static int journal_submit_commit_record(journal_t *journal,
117 transaction_t *commit_transaction,
118 struct buffer_head **cbh,
119 __u32 crc32_sum)
120{
121 struct commit_header *tmp;
122 struct buffer_head *bh;
123 int ret;
124 struct timespec64 now;
125
126 *cbh = NULL;
127
128 if (is_journal_aborted(journal))
129 return 0;
130
131 bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
132 JBD2_COMMIT_BLOCK);
133 if (!bh)
134 return 1;
135
136 tmp = (struct commit_header *)bh->b_data;
137 ktime_get_coarse_real_ts64(&now);
138 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
139 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
140
141 if (jbd2_has_feature_checksum(journal)) {
142 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
143 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
144 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
145 }
146 jbd2_commit_block_csum_set(journal, bh);
147
148 BUFFER_TRACE(bh, "submit commit block");
149 lock_buffer(bh);
150 clear_buffer_dirty(bh);
151 set_buffer_uptodate(bh);
152 bh->b_end_io = journal_end_buffer_io_sync;
153
154 if (journal->j_flags & JBD2_BARRIER &&
155 !jbd2_has_feature_async_commit(journal))
156 ret = submit_bh(REQ_OP_WRITE,
157 REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
158 else
159 ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
160
161 *cbh = bh;
162 return ret;
163}
164
165
166
167
168
169static int journal_wait_on_commit_record(journal_t *journal,
170 struct buffer_head *bh)
171{
172 int ret = 0;
173
174 clear_buffer_dirty(bh);
175 wait_on_buffer(bh);
176
177 if (unlikely(!buffer_uptodate(bh)))
178 ret = -EIO;
179 put_bh(bh);
180
181 return ret;
182}
183
184
185
186
187
188
189
190int jbd2_journal_submit_inode_data_buffers(struct jbd2_inode *jinode)
191{
192 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
193 struct writeback_control wbc = {
194 .sync_mode = WB_SYNC_ALL,
195 .nr_to_write = mapping->nrpages * 2,
196 .range_start = jinode->i_dirty_start,
197 .range_end = jinode->i_dirty_end,
198 };
199
200
201
202
203
204
205
206 return generic_writepages(mapping, &wbc);
207}
208
209
210int jbd2_submit_inode_data(struct jbd2_inode *jinode)
211{
212
213 if (!jinode || !(jinode->i_flags & JI_WRITE_DATA))
214 return 0;
215
216 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
217 return jbd2_journal_submit_inode_data_buffers(jinode);
218
219}
220EXPORT_SYMBOL(jbd2_submit_inode_data);
221
222int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode)
223{
224 if (!jinode || !(jinode->i_flags & JI_WAIT_DATA) ||
225 !jinode->i_vfs_inode || !jinode->i_vfs_inode->i_mapping)
226 return 0;
227 return filemap_fdatawait_range_keep_errors(
228 jinode->i_vfs_inode->i_mapping, jinode->i_dirty_start,
229 jinode->i_dirty_end);
230}
231EXPORT_SYMBOL(jbd2_wait_inode_data);
232
233
234
235
236
237
238
239
240
241static int journal_submit_data_buffers(journal_t *journal,
242 transaction_t *commit_transaction)
243{
244 struct jbd2_inode *jinode;
245 int err, ret = 0;
246
247 spin_lock(&journal->j_list_lock);
248 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
249 if (!(jinode->i_flags & JI_WRITE_DATA))
250 continue;
251 jinode->i_flags |= JI_COMMIT_RUNNING;
252 spin_unlock(&journal->j_list_lock);
253
254 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
255 if (journal->j_submit_inode_data_buffers) {
256 err = journal->j_submit_inode_data_buffers(jinode);
257 if (!ret)
258 ret = err;
259 }
260 spin_lock(&journal->j_list_lock);
261 J_ASSERT(jinode->i_transaction == commit_transaction);
262 jinode->i_flags &= ~JI_COMMIT_RUNNING;
263 smp_mb();
264 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
265 }
266 spin_unlock(&journal->j_list_lock);
267 return ret;
268}
269
270int jbd2_journal_finish_inode_data_buffers(struct jbd2_inode *jinode)
271{
272 struct address_space *mapping = jinode->i_vfs_inode->i_mapping;
273
274 return filemap_fdatawait_range_keep_errors(mapping,
275 jinode->i_dirty_start,
276 jinode->i_dirty_end);
277}
278
279
280
281
282
283
284static int journal_finish_inode_data_buffers(journal_t *journal,
285 transaction_t *commit_transaction)
286{
287 struct jbd2_inode *jinode, *next_i;
288 int err, ret = 0;
289
290
291 spin_lock(&journal->j_list_lock);
292 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
293 if (!(jinode->i_flags & JI_WAIT_DATA))
294 continue;
295 jinode->i_flags |= JI_COMMIT_RUNNING;
296 spin_unlock(&journal->j_list_lock);
297
298 if (journal->j_finish_inode_data_buffers) {
299 err = journal->j_finish_inode_data_buffers(jinode);
300 if (!ret)
301 ret = err;
302 }
303 spin_lock(&journal->j_list_lock);
304 jinode->i_flags &= ~JI_COMMIT_RUNNING;
305 smp_mb();
306 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
307 }
308
309
310 list_for_each_entry_safe(jinode, next_i,
311 &commit_transaction->t_inode_list, i_list) {
312 list_del(&jinode->i_list);
313 if (jinode->i_next_transaction) {
314 jinode->i_transaction = jinode->i_next_transaction;
315 jinode->i_next_transaction = NULL;
316 list_add(&jinode->i_list,
317 &jinode->i_transaction->t_inode_list);
318 } else {
319 jinode->i_transaction = NULL;
320 jinode->i_dirty_start = 0;
321 jinode->i_dirty_end = 0;
322 }
323 }
324 spin_unlock(&journal->j_list_lock);
325
326 return ret;
327}
328
329static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
330{
331 struct page *page = bh->b_page;
332 char *addr;
333 __u32 checksum;
334
335 addr = kmap_atomic(page);
336 checksum = crc32_be(crc32_sum,
337 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
338 kunmap_atomic(addr);
339
340 return checksum;
341}
342
343static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
344 unsigned long long block)
345{
346 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
347 if (jbd2_has_feature_64bit(j))
348 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
349}
350
351static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
352 struct buffer_head *bh, __u32 sequence)
353{
354 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
355 struct page *page = bh->b_page;
356 __u8 *addr;
357 __u32 csum32;
358 __be32 seq;
359
360 if (!jbd2_journal_has_csum_v2or3(j))
361 return;
362
363 seq = cpu_to_be32(sequence);
364 addr = kmap_atomic(page);
365 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
366 csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
367 bh->b_size);
368 kunmap_atomic(addr);
369
370 if (jbd2_has_feature_csum3(j))
371 tag3->t_checksum = cpu_to_be32(csum32);
372 else
373 tag->t_checksum = cpu_to_be16(csum32);
374}
375
376
377
378
379
380
381void jbd2_journal_commit_transaction(journal_t *journal)
382{
383 struct transaction_stats_s stats;
384 transaction_t *commit_transaction;
385 struct journal_head *jh;
386 struct buffer_head *descriptor;
387 struct buffer_head **wbuf = journal->j_wbuf;
388 int bufs;
389 int flags;
390 int err;
391 unsigned long long blocknr;
392 ktime_t start_time;
393 u64 commit_time;
394 char *tagp = NULL;
395 journal_block_tag_t *tag = NULL;
396 int space_left = 0;
397 int first_tag = 0;
398 int tag_flag;
399 int i;
400 int tag_bytes = journal_tag_bytes(journal);
401 struct buffer_head *cbh = NULL;
402 __u32 crc32_sum = ~0;
403 struct blk_plug plug;
404
405 unsigned long first_block;
406 tid_t first_tid;
407 int update_tail;
408 int csum_size = 0;
409 LIST_HEAD(io_bufs);
410 LIST_HEAD(log_bufs);
411
412 if (jbd2_journal_has_csum_v2or3(journal))
413 csum_size = sizeof(struct jbd2_journal_block_tail);
414
415
416
417
418
419
420
421 if (journal->j_flags & JBD2_FLUSHED) {
422 jbd_debug(3, "super block updated\n");
423 mutex_lock_io(&journal->j_checkpoint_mutex);
424
425
426
427
428
429
430 jbd2_journal_update_sb_log_tail(journal,
431 journal->j_tail_sequence,
432 journal->j_tail,
433 REQ_SYNC);
434 mutex_unlock(&journal->j_checkpoint_mutex);
435 } else {
436 jbd_debug(3, "superblock not updated\n");
437 }
438
439 J_ASSERT(journal->j_running_transaction != NULL);
440 J_ASSERT(journal->j_committing_transaction == NULL);
441
442 write_lock(&journal->j_state_lock);
443 journal->j_flags |= JBD2_FULL_COMMIT_ONGOING;
444 while (journal->j_flags & JBD2_FAST_COMMIT_ONGOING) {
445 DEFINE_WAIT(wait);
446
447 prepare_to_wait(&journal->j_fc_wait, &wait,
448 TASK_UNINTERRUPTIBLE);
449 write_unlock(&journal->j_state_lock);
450 schedule();
451 write_lock(&journal->j_state_lock);
452 finish_wait(&journal->j_fc_wait, &wait);
453
454
455
456
457
458
459
460
461
462 }
463 write_unlock(&journal->j_state_lock);
464
465 commit_transaction = journal->j_running_transaction;
466
467 trace_jbd2_start_commit(journal, commit_transaction);
468 jbd_debug(1, "JBD2: starting commit of transaction %d\n",
469 commit_transaction->t_tid);
470
471 write_lock(&journal->j_state_lock);
472 journal->j_fc_off = 0;
473 J_ASSERT(commit_transaction->t_state == T_RUNNING);
474 commit_transaction->t_state = T_LOCKED;
475
476 trace_jbd2_commit_locking(journal, commit_transaction);
477 stats.run.rs_wait = commit_transaction->t_max_wait;
478 stats.run.rs_request_delay = 0;
479 stats.run.rs_locked = jiffies;
480 if (commit_transaction->t_requested)
481 stats.run.rs_request_delay =
482 jbd2_time_diff(commit_transaction->t_requested,
483 stats.run.rs_locked);
484 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
485 stats.run.rs_locked);
486
487 spin_lock(&commit_transaction->t_handle_lock);
488 while (atomic_read(&commit_transaction->t_updates)) {
489 DEFINE_WAIT(wait);
490
491 prepare_to_wait(&journal->j_wait_updates, &wait,
492 TASK_UNINTERRUPTIBLE);
493 if (atomic_read(&commit_transaction->t_updates)) {
494 spin_unlock(&commit_transaction->t_handle_lock);
495 write_unlock(&journal->j_state_lock);
496 schedule();
497 write_lock(&journal->j_state_lock);
498 spin_lock(&commit_transaction->t_handle_lock);
499 }
500 finish_wait(&journal->j_wait_updates, &wait);
501 }
502 spin_unlock(&commit_transaction->t_handle_lock);
503 commit_transaction->t_state = T_SWITCH;
504 write_unlock(&journal->j_state_lock);
505
506 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
507 journal->j_max_transaction_buffers);
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525 while (commit_transaction->t_reserved_list) {
526 jh = commit_transaction->t_reserved_list;
527 JBUFFER_TRACE(jh, "reserved, unused: refile");
528
529
530
531
532 if (jh->b_committed_data) {
533 struct buffer_head *bh = jh2bh(jh);
534
535 spin_lock(&jh->b_state_lock);
536 jbd2_free(jh->b_committed_data, bh->b_size);
537 jh->b_committed_data = NULL;
538 spin_unlock(&jh->b_state_lock);
539 }
540 jbd2_journal_refile_buffer(journal, jh);
541 }
542
543
544
545
546
547
548 spin_lock(&journal->j_list_lock);
549 __jbd2_journal_clean_checkpoint_list(journal, false);
550 spin_unlock(&journal->j_list_lock);
551
552 jbd_debug(3, "JBD2: commit phase 1\n");
553
554
555
556
557
558 jbd2_clear_buffer_revoked_flags(journal);
559
560
561
562
563 jbd2_journal_switch_revoke_table(journal);
564
565
566
567
568 atomic_sub(atomic_read(&journal->j_reserved_credits),
569 &commit_transaction->t_outstanding_credits);
570
571 write_lock(&journal->j_state_lock);
572 trace_jbd2_commit_flushing(journal, commit_transaction);
573 stats.run.rs_flushing = jiffies;
574 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
575 stats.run.rs_flushing);
576
577 commit_transaction->t_state = T_FLUSH;
578 journal->j_committing_transaction = commit_transaction;
579 journal->j_running_transaction = NULL;
580 start_time = ktime_get();
581 commit_transaction->t_log_start = journal->j_head;
582 wake_up(&journal->j_wait_transaction_locked);
583 write_unlock(&journal->j_state_lock);
584
585 jbd_debug(3, "JBD2: commit phase 2a\n");
586
587
588
589
590
591 err = journal_submit_data_buffers(journal, commit_transaction);
592 if (err)
593 jbd2_journal_abort(journal, err);
594
595 blk_start_plug(&plug);
596 jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
597
598 jbd_debug(3, "JBD2: commit phase 2b\n");
599
600
601
602
603
604
605 write_lock(&journal->j_state_lock);
606 commit_transaction->t_state = T_COMMIT;
607 write_unlock(&journal->j_state_lock);
608
609 trace_jbd2_commit_logging(journal, commit_transaction);
610 stats.run.rs_logging = jiffies;
611 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
612 stats.run.rs_logging);
613 stats.run.rs_blocks = commit_transaction->t_nr_buffers;
614 stats.run.rs_blocks_logged = 0;
615
616 J_ASSERT(commit_transaction->t_nr_buffers <=
617 atomic_read(&commit_transaction->t_outstanding_credits));
618
619 err = 0;
620 bufs = 0;
621 descriptor = NULL;
622 while (commit_transaction->t_buffers) {
623
624
625
626 jh = commit_transaction->t_buffers;
627
628
629
630
631 if (is_journal_aborted(journal)) {
632 clear_buffer_jbddirty(jh2bh(jh));
633 JBUFFER_TRACE(jh, "journal is aborting: refile");
634 jbd2_buffer_abort_trigger(jh,
635 jh->b_frozen_data ?
636 jh->b_frozen_triggers :
637 jh->b_triggers);
638 jbd2_journal_refile_buffer(journal, jh);
639
640
641
642
643 if (!commit_transaction->t_buffers)
644 goto start_journal_io;
645 continue;
646 }
647
648
649
650
651 if (!descriptor) {
652 J_ASSERT (bufs == 0);
653
654 jbd_debug(4, "JBD2: get descriptor\n");
655
656 descriptor = jbd2_journal_get_descriptor_buffer(
657 commit_transaction,
658 JBD2_DESCRIPTOR_BLOCK);
659 if (!descriptor) {
660 jbd2_journal_abort(journal, -EIO);
661 continue;
662 }
663
664 jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
665 (unsigned long long)descriptor->b_blocknr,
666 descriptor->b_data);
667 tagp = &descriptor->b_data[sizeof(journal_header_t)];
668 space_left = descriptor->b_size -
669 sizeof(journal_header_t);
670 first_tag = 1;
671 set_buffer_jwrite(descriptor);
672 set_buffer_dirty(descriptor);
673 wbuf[bufs++] = descriptor;
674
675
676
677 BUFFER_TRACE(descriptor, "ph3: file as descriptor");
678 jbd2_file_log_bh(&log_bufs, descriptor);
679 }
680
681
682
683 err = jbd2_journal_next_log_block(journal, &blocknr);
684
685
686
687 if (err) {
688 jbd2_journal_abort(journal, err);
689 continue;
690 }
691
692
693
694
695
696 atomic_dec(&commit_transaction->t_outstanding_credits);
697
698
699
700
701 atomic_inc(&jh2bh(jh)->b_count);
702
703
704
705
706
707 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
708 JBUFFER_TRACE(jh, "ph3: write metadata");
709 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
710 jh, &wbuf[bufs], blocknr);
711 if (flags < 0) {
712 jbd2_journal_abort(journal, flags);
713 continue;
714 }
715 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
716
717
718
719
720 tag_flag = 0;
721 if (flags & 1)
722 tag_flag |= JBD2_FLAG_ESCAPE;
723 if (!first_tag)
724 tag_flag |= JBD2_FLAG_SAME_UUID;
725
726 tag = (journal_block_tag_t *) tagp;
727 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
728 tag->t_flags = cpu_to_be16(tag_flag);
729 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
730 commit_transaction->t_tid);
731 tagp += tag_bytes;
732 space_left -= tag_bytes;
733 bufs++;
734
735 if (first_tag) {
736 memcpy (tagp, journal->j_uuid, 16);
737 tagp += 16;
738 space_left -= 16;
739 first_tag = 0;
740 }
741
742
743
744
745 if (bufs == journal->j_wbufsize ||
746 commit_transaction->t_buffers == NULL ||
747 space_left < tag_bytes + 16 + csum_size) {
748
749 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
750
751
752
753
754
755 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
756start_journal_io:
757 if (descriptor)
758 jbd2_descriptor_block_csum_set(journal,
759 descriptor);
760
761 for (i = 0; i < bufs; i++) {
762 struct buffer_head *bh = wbuf[i];
763
764
765
766 if (jbd2_has_feature_checksum(journal)) {
767 crc32_sum =
768 jbd2_checksum_data(crc32_sum, bh);
769 }
770
771 lock_buffer(bh);
772 clear_buffer_dirty(bh);
773 set_buffer_uptodate(bh);
774 bh->b_end_io = journal_end_buffer_io_sync;
775 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
776 }
777 cond_resched();
778
779
780
781 descriptor = NULL;
782 bufs = 0;
783 }
784 }
785
786 err = journal_finish_inode_data_buffers(journal, commit_transaction);
787 if (err) {
788 printk(KERN_WARNING
789 "JBD2: Detected IO errors while flushing file data "
790 "on %s\n", journal->j_devname);
791 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
792 jbd2_journal_abort(journal, err);
793 err = 0;
794 }
795
796
797
798
799
800
801
802
803 update_tail =
804 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
805
806 write_lock(&journal->j_state_lock);
807 if (update_tail) {
808 long freed = first_block - journal->j_tail;
809
810 if (first_block < journal->j_tail)
811 freed += journal->j_last - journal->j_first;
812
813 if (freed < jbd2_journal_get_max_txn_bufs(journal))
814 update_tail = 0;
815 }
816 J_ASSERT(commit_transaction->t_state == T_COMMIT);
817 commit_transaction->t_state = T_COMMIT_DFLUSH;
818 write_unlock(&journal->j_state_lock);
819
820
821
822
823
824
825 if (commit_transaction->t_need_data_flush &&
826 (journal->j_fs_dev != journal->j_dev) &&
827 (journal->j_flags & JBD2_BARRIER))
828 blkdev_issue_flush(journal->j_fs_dev);
829
830
831 if (jbd2_has_feature_async_commit(journal)) {
832 err = journal_submit_commit_record(journal, commit_transaction,
833 &cbh, crc32_sum);
834 if (err)
835 jbd2_journal_abort(journal, err);
836 }
837
838 blk_finish_plug(&plug);
839
840
841
842
843
844
845
846
847
848
849
850
851 jbd_debug(3, "JBD2: commit phase 3\n");
852
853 while (!list_empty(&io_bufs)) {
854 struct buffer_head *bh = list_entry(io_bufs.prev,
855 struct buffer_head,
856 b_assoc_buffers);
857
858 wait_on_buffer(bh);
859 cond_resched();
860
861 if (unlikely(!buffer_uptodate(bh)))
862 err = -EIO;
863 jbd2_unfile_log_bh(bh);
864 stats.run.rs_blocks_logged++;
865
866
867
868
869
870 BUFFER_TRACE(bh, "dumping temporary bh");
871 __brelse(bh);
872 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
873 free_buffer_head(bh);
874
875
876 jh = commit_transaction->t_shadow_list->b_tprev;
877 bh = jh2bh(jh);
878 clear_buffer_jwrite(bh);
879 J_ASSERT_BH(bh, buffer_jbddirty(bh));
880 J_ASSERT_BH(bh, !buffer_shadow(bh));
881
882
883
884
885
886 JBUFFER_TRACE(jh, "file as BJ_Forget");
887 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
888 JBUFFER_TRACE(jh, "brelse shadowed buffer");
889 __brelse(bh);
890 }
891
892 J_ASSERT (commit_transaction->t_shadow_list == NULL);
893
894 jbd_debug(3, "JBD2: commit phase 4\n");
895
896
897 while (!list_empty(&log_bufs)) {
898 struct buffer_head *bh;
899
900 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
901 wait_on_buffer(bh);
902 cond_resched();
903
904 if (unlikely(!buffer_uptodate(bh)))
905 err = -EIO;
906
907 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
908 clear_buffer_jwrite(bh);
909 jbd2_unfile_log_bh(bh);
910 stats.run.rs_blocks_logged++;
911 __brelse(bh);
912
913 }
914
915 if (err)
916 jbd2_journal_abort(journal, err);
917
918 jbd_debug(3, "JBD2: commit phase 5\n");
919 write_lock(&journal->j_state_lock);
920 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
921 commit_transaction->t_state = T_COMMIT_JFLUSH;
922 write_unlock(&journal->j_state_lock);
923
924 if (!jbd2_has_feature_async_commit(journal)) {
925 err = journal_submit_commit_record(journal, commit_transaction,
926 &cbh, crc32_sum);
927 if (err)
928 jbd2_journal_abort(journal, err);
929 }
930 if (cbh)
931 err = journal_wait_on_commit_record(journal, cbh);
932 stats.run.rs_blocks_logged++;
933 if (jbd2_has_feature_async_commit(journal) &&
934 journal->j_flags & JBD2_BARRIER) {
935 blkdev_issue_flush(journal->j_dev);
936 }
937
938 if (err)
939 jbd2_journal_abort(journal, err);
940
941 WARN_ON_ONCE(
942 atomic_read(&commit_transaction->t_outstanding_credits) < 0);
943
944
945
946
947
948
949 if (update_tail)
950 jbd2_update_log_tail(journal, first_tid, first_block);
951
952
953
954
955
956
957 jbd_debug(3, "JBD2: commit phase 6\n");
958
959 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
960 J_ASSERT(commit_transaction->t_buffers == NULL);
961 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
962 J_ASSERT(commit_transaction->t_shadow_list == NULL);
963
964restart_loop:
965
966
967
968
969 spin_lock(&journal->j_list_lock);
970 while (commit_transaction->t_forget) {
971 transaction_t *cp_transaction;
972 struct buffer_head *bh;
973 int try_to_free = 0;
974 bool drop_ref;
975
976 jh = commit_transaction->t_forget;
977 spin_unlock(&journal->j_list_lock);
978 bh = jh2bh(jh);
979
980
981
982
983 get_bh(bh);
984 spin_lock(&jh->b_state_lock);
985 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000 if (jh->b_committed_data) {
1001 jbd2_free(jh->b_committed_data, bh->b_size);
1002 jh->b_committed_data = NULL;
1003 if (jh->b_frozen_data) {
1004 jh->b_committed_data = jh->b_frozen_data;
1005 jh->b_frozen_data = NULL;
1006 jh->b_frozen_triggers = NULL;
1007 }
1008 } else if (jh->b_frozen_data) {
1009 jbd2_free(jh->b_frozen_data, bh->b_size);
1010 jh->b_frozen_data = NULL;
1011 jh->b_frozen_triggers = NULL;
1012 }
1013
1014 spin_lock(&journal->j_list_lock);
1015 cp_transaction = jh->b_cp_transaction;
1016 if (cp_transaction) {
1017 JBUFFER_TRACE(jh, "remove from old cp transaction");
1018 cp_transaction->t_chp_stats.cs_dropped++;
1019 __jbd2_journal_remove_checkpoint(jh);
1020 }
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (buffer_freed(bh) && !jh->b_next_transaction) {
1038 struct address_space *mapping;
1039
1040 clear_buffer_freed(bh);
1041 clear_buffer_jbddirty(bh);
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 mapping = READ_ONCE(bh->b_page->mapping);
1056 if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
1057 clear_buffer_mapped(bh);
1058 clear_buffer_new(bh);
1059 clear_buffer_req(bh);
1060 bh->b_bdev = NULL;
1061 }
1062 }
1063
1064 if (buffer_jbddirty(bh)) {
1065 JBUFFER_TRACE(jh, "add to new checkpointing trans");
1066 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
1067 if (is_journal_aborted(journal))
1068 clear_buffer_jbddirty(bh);
1069 } else {
1070 J_ASSERT_BH(bh, !buffer_dirty(bh));
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 if (!jh->b_next_transaction)
1081 try_to_free = 1;
1082 }
1083 JBUFFER_TRACE(jh, "refile or unfile buffer");
1084 drop_ref = __jbd2_journal_refile_buffer(jh);
1085 spin_unlock(&jh->b_state_lock);
1086 if (drop_ref)
1087 jbd2_journal_put_journal_head(jh);
1088 if (try_to_free)
1089 release_buffer_page(bh);
1090 else
1091 __brelse(bh);
1092 cond_resched_lock(&journal->j_list_lock);
1093 }
1094 spin_unlock(&journal->j_list_lock);
1095
1096
1097
1098
1099
1100
1101 write_lock(&journal->j_state_lock);
1102 spin_lock(&journal->j_list_lock);
1103
1104
1105
1106
1107 if (commit_transaction->t_forget) {
1108 spin_unlock(&journal->j_list_lock);
1109 write_unlock(&journal->j_state_lock);
1110 goto restart_loop;
1111 }
1112
1113
1114
1115
1116 if (journal->j_checkpoint_transactions == NULL) {
1117 journal->j_checkpoint_transactions = commit_transaction;
1118 commit_transaction->t_cpnext = commit_transaction;
1119 commit_transaction->t_cpprev = commit_transaction;
1120 } else {
1121 commit_transaction->t_cpnext =
1122 journal->j_checkpoint_transactions;
1123 commit_transaction->t_cpprev =
1124 commit_transaction->t_cpnext->t_cpprev;
1125 commit_transaction->t_cpnext->t_cpprev =
1126 commit_transaction;
1127 commit_transaction->t_cpprev->t_cpnext =
1128 commit_transaction;
1129 }
1130 spin_unlock(&journal->j_list_lock);
1131
1132
1133
1134 jbd_debug(3, "JBD2: commit phase 7\n");
1135
1136 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
1137
1138 commit_transaction->t_start = jiffies;
1139 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1140 commit_transaction->t_start);
1141
1142
1143
1144
1145 stats.ts_tid = commit_transaction->t_tid;
1146 stats.run.rs_handle_count =
1147 atomic_read(&commit_transaction->t_handle_count);
1148 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1149 commit_transaction->t_tid, &stats.run);
1150 stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
1151
1152 commit_transaction->t_state = T_COMMIT_CALLBACK;
1153 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1154 journal->j_commit_sequence = commit_transaction->t_tid;
1155 journal->j_committing_transaction = NULL;
1156 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1157
1158
1159
1160
1161
1162 if (likely(journal->j_average_commit_time))
1163 journal->j_average_commit_time = (commit_time +
1164 journal->j_average_commit_time*3) / 4;
1165 else
1166 journal->j_average_commit_time = commit_time;
1167
1168 write_unlock(&journal->j_state_lock);
1169
1170 if (journal->j_commit_callback)
1171 journal->j_commit_callback(journal, commit_transaction);
1172 if (journal->j_fc_cleanup_callback)
1173 journal->j_fc_cleanup_callback(journal, 1);
1174
1175 trace_jbd2_end_commit(journal, commit_transaction);
1176 jbd_debug(1, "JBD2: commit %d complete, head %d\n",
1177 journal->j_commit_sequence, journal->j_tail_sequence);
1178
1179 write_lock(&journal->j_state_lock);
1180 journal->j_flags &= ~JBD2_FULL_COMMIT_ONGOING;
1181 journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
1182 spin_lock(&journal->j_list_lock);
1183 commit_transaction->t_state = T_FINISHED;
1184
1185 if (commit_transaction->t_checkpoint_list == NULL &&
1186 commit_transaction->t_checkpoint_io_list == NULL) {
1187 __jbd2_journal_drop_transaction(journal, commit_transaction);
1188 jbd2_journal_free_transaction(commit_transaction);
1189 }
1190 spin_unlock(&journal->j_list_lock);
1191 write_unlock(&journal->j_state_lock);
1192 wake_up(&journal->j_wait_done_commit);
1193 wake_up(&journal->j_fc_wait);
1194
1195
1196
1197
1198 spin_lock(&journal->j_history_lock);
1199 journal->j_stats.ts_tid++;
1200 journal->j_stats.ts_requested += stats.ts_requested;
1201 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1202 journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1203 journal->j_stats.run.rs_running += stats.run.rs_running;
1204 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1205 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1206 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1207 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1208 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1209 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1210 spin_unlock(&journal->j_history_lock);
1211}
1212