1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/gfs2_ondisk.h>
13#include <linux/crc32.h>
14#include <linux/crc32c.h>
15#include <linux/delay.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
20#include <linux/writeback.h>
21#include <linux/list_sort.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "bmap.h"
26#include "glock.h"
27#include "log.h"
28#include "lops.h"
29#include "meta_io.h"
30#include "util.h"
31#include "dir.h"
32#include "trace_gfs2.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
47 unsigned int ssize)
48{
49 unsigned int blks;
50 unsigned int first, second;
51
52 blks = 1;
53 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
54
55 if (nstruct > first) {
56 second = (sdp->sd_sb.sb_bsize -
57 sizeof(struct gfs2_meta_header)) / ssize;
58 blks += DIV_ROUND_UP(nstruct - first, second);
59 }
60
61 return blks;
62}
63
64
65
66
67
68
69
70
71
72
73static void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
74{
75 bd->bd_tr = NULL;
76 list_del_init(&bd->bd_ail_st_list);
77 list_del_init(&bd->bd_ail_gl_list);
78 atomic_dec(&bd->bd_gl->gl_ail_count);
79 brelse(bd->bd_bh);
80}
81
82
83
84
85
86
87
88
89
90static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
91 struct writeback_control *wbc,
92 struct gfs2_trans *tr,
93 bool *withdraw)
94__releases(&sdp->sd_ail_lock)
95__acquires(&sdp->sd_ail_lock)
96{
97 struct gfs2_glock *gl = NULL;
98 struct address_space *mapping;
99 struct gfs2_bufdata *bd, *s;
100 struct buffer_head *bh;
101
102 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
103 bh = bd->bd_bh;
104
105 gfs2_assert(sdp, bd->bd_tr == tr);
106
107 if (!buffer_busy(bh)) {
108 if (!buffer_uptodate(bh) &&
109 !test_and_set_bit(SDF_AIL1_IO_ERROR,
110 &sdp->sd_flags)) {
111 gfs2_io_error_bh(sdp, bh);
112 *withdraw = true;
113 }
114 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
115 continue;
116 }
117
118 if (!buffer_dirty(bh))
119 continue;
120 if (gl == bd->bd_gl)
121 continue;
122 gl = bd->bd_gl;
123 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 mapping = bh->b_page->mapping;
125 if (!mapping)
126 continue;
127 spin_unlock(&sdp->sd_ail_lock);
128 generic_writepages(mapping, wbc);
129 spin_lock(&sdp->sd_ail_lock);
130 if (wbc->nr_to_write <= 0)
131 break;
132 return 1;
133 }
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146
147
148void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
149{
150 struct list_head *head = &sdp->sd_ail1_list;
151 struct gfs2_trans *tr;
152 struct blk_plug plug;
153 bool withdraw = false;
154
155 trace_gfs2_ail_flush(sdp, wbc, 1);
156 blk_start_plug(&plug);
157 spin_lock(&sdp->sd_ail_lock);
158restart:
159 list_for_each_entry_reverse(tr, head, tr_list) {
160 if (wbc->nr_to_write <= 0)
161 break;
162 if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
163 goto restart;
164 }
165 spin_unlock(&sdp->sd_ail_lock);
166 blk_finish_plug(&plug);
167 if (withdraw)
168 gfs2_lm_withdraw(sdp, NULL);
169 trace_gfs2_ail_flush(sdp, wbc, 0);
170}
171
172
173
174
175
176
177static void gfs2_ail1_start(struct gfs2_sbd *sdp)
178{
179 struct writeback_control wbc = {
180 .sync_mode = WB_SYNC_NONE,
181 .nr_to_write = LONG_MAX,
182 .range_start = 0,
183 .range_end = LLONG_MAX,
184 };
185
186 return gfs2_ail1_flush(sdp, &wbc);
187}
188
189
190
191
192
193
194
195
196static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
197 bool *withdraw)
198{
199 struct gfs2_bufdata *bd, *s;
200 struct buffer_head *bh;
201
202 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
203 bd_ail_st_list) {
204 bh = bd->bd_bh;
205 gfs2_assert(sdp, bd->bd_tr == tr);
206 if (buffer_busy(bh))
207 continue;
208 if (!buffer_uptodate(bh) &&
209 !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
210 gfs2_io_error_bh(sdp, bh);
211 *withdraw = true;
212 }
213 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
214 }
215}
216
217
218
219
220
221
222
223
224static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
225{
226 struct gfs2_trans *tr, *s;
227 int oldest_tr = 1;
228 int ret;
229 bool withdraw = false;
230
231 spin_lock(&sdp->sd_ail_lock);
232 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
233 gfs2_ail1_empty_one(sdp, tr, &withdraw);
234 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
235 list_move(&tr->tr_list, &sdp->sd_ail2_list);
236 else
237 oldest_tr = 0;
238 }
239 ret = list_empty(&sdp->sd_ail1_list);
240 spin_unlock(&sdp->sd_ail_lock);
241
242 if (withdraw)
243 gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
244
245 return ret;
246}
247
248static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
249{
250 struct gfs2_trans *tr;
251 struct gfs2_bufdata *bd;
252 struct buffer_head *bh;
253
254 spin_lock(&sdp->sd_ail_lock);
255 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
256 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
257 bh = bd->bd_bh;
258 if (!buffer_locked(bh))
259 continue;
260 get_bh(bh);
261 spin_unlock(&sdp->sd_ail_lock);
262 wait_on_buffer(bh);
263 brelse(bh);
264 return;
265 }
266 }
267 spin_unlock(&sdp->sd_ail_lock);
268}
269
270
271
272
273
274
275
276
277static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
278{
279 struct list_head *head = &tr->tr_ail2_list;
280 struct gfs2_bufdata *bd;
281
282 while (!list_empty(head)) {
283 bd = list_entry(head->prev, struct gfs2_bufdata,
284 bd_ail_st_list);
285 gfs2_assert(sdp, bd->bd_tr == tr);
286 gfs2_remove_from_ail(bd);
287 }
288}
289
290static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
291{
292 struct gfs2_trans *tr, *safe;
293 unsigned int old_tail = sdp->sd_log_tail;
294 int wrap = (new_tail < old_tail);
295 int a, b, rm;
296
297 spin_lock(&sdp->sd_ail_lock);
298
299 list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
300 a = (old_tail <= tr->tr_first);
301 b = (tr->tr_first < new_tail);
302 rm = (wrap) ? (a || b) : (a && b);
303 if (!rm)
304 continue;
305
306 gfs2_ail2_empty_one(sdp, tr);
307 list_del(&tr->tr_list);
308 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
309 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
310 kfree(tr);
311 }
312
313 spin_unlock(&sdp->sd_ail_lock);
314}
315
316
317
318
319
320
321
322
323void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
324{
325
326 atomic_add(blks, &sdp->sd_log_blks_free);
327 trace_gfs2_log_blocks(sdp, blks);
328 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
329 sdp->sd_jdesc->jd_blocks);
330 up_read(&sdp->sd_log_flush_lock);
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
354{
355 int ret = 0;
356 unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
357 unsigned wanted = blks + reserved_blks;
358 DEFINE_WAIT(wait);
359 int did_wait = 0;
360 unsigned int free_blocks;
361
362 if (gfs2_assert_warn(sdp, blks) ||
363 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
364 return -EINVAL;
365 atomic_add(blks, &sdp->sd_log_blks_needed);
366retry:
367 free_blocks = atomic_read(&sdp->sd_log_blks_free);
368 if (unlikely(free_blocks <= wanted)) {
369 do {
370 prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
371 TASK_UNINTERRUPTIBLE);
372 wake_up(&sdp->sd_logd_waitq);
373 did_wait = 1;
374 if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
375 io_schedule();
376 free_blocks = atomic_read(&sdp->sd_log_blks_free);
377 } while(free_blocks <= wanted);
378 finish_wait(&sdp->sd_log_waitq, &wait);
379 }
380 atomic_inc(&sdp->sd_reserving_log);
381 if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
382 free_blocks - blks) != free_blocks) {
383 if (atomic_dec_and_test(&sdp->sd_reserving_log))
384 wake_up(&sdp->sd_reserving_log_wait);
385 goto retry;
386 }
387 atomic_sub(blks, &sdp->sd_log_blks_needed);
388 trace_gfs2_log_blocks(sdp, -blks);
389
390
391
392
393
394 if (unlikely(did_wait))
395 wake_up(&sdp->sd_log_waitq);
396
397 down_read(&sdp->sd_log_flush_lock);
398 if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
399 gfs2_log_release(sdp, blks);
400 ret = -EROFS;
401 }
402 if (atomic_dec_and_test(&sdp->sd_reserving_log))
403 wake_up(&sdp->sd_reserving_log_wait);
404 return ret;
405}
406
407
408
409
410
411
412
413
414
415
416
417
418
419static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
420 unsigned int older)
421{
422 int dist;
423
424 dist = newer - older;
425 if (dist < 0)
426 dist += sdp->sd_jdesc->jd_blocks;
427
428 return dist;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static unsigned int calc_reserved(struct gfs2_sbd *sdp)
457{
458 unsigned int reserved = 0;
459 unsigned int mbuf;
460 unsigned int dbuf;
461 struct gfs2_trans *tr = sdp->sd_log_tr;
462
463 if (tr) {
464 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
465 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
466 reserved = mbuf + dbuf;
467
468 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
469 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
470 }
471
472 if (sdp->sd_log_commited_revoke > 0)
473 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
474 sizeof(u64));
475
476 if (reserved)
477 reserved++;
478 return reserved;
479}
480
481static unsigned int current_tail(struct gfs2_sbd *sdp)
482{
483 struct gfs2_trans *tr;
484 unsigned int tail;
485
486 spin_lock(&sdp->sd_ail_lock);
487
488 if (list_empty(&sdp->sd_ail1_list)) {
489 tail = sdp->sd_log_head;
490 } else {
491 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
492 tr_list);
493 tail = tr->tr_first;
494 }
495
496 spin_unlock(&sdp->sd_ail_lock);
497
498 return tail;
499}
500
501static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
502{
503 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
504
505 ail2_empty(sdp, new_tail);
506
507 atomic_add(dist, &sdp->sd_log_blks_free);
508 trace_gfs2_log_blocks(sdp, dist);
509 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
510 sdp->sd_jdesc->jd_blocks);
511
512 sdp->sd_log_tail = new_tail;
513}
514
515
516static void log_flush_wait(struct gfs2_sbd *sdp)
517{
518 DEFINE_WAIT(wait);
519
520 if (atomic_read(&sdp->sd_log_in_flight)) {
521 do {
522 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
523 TASK_UNINTERRUPTIBLE);
524 if (atomic_read(&sdp->sd_log_in_flight))
525 io_schedule();
526 } while(atomic_read(&sdp->sd_log_in_flight));
527 finish_wait(&sdp->sd_log_flush_wait, &wait);
528 }
529}
530
531static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
532{
533 struct gfs2_inode *ipa, *ipb;
534
535 ipa = list_entry(a, struct gfs2_inode, i_ordered);
536 ipb = list_entry(b, struct gfs2_inode, i_ordered);
537
538 if (ipa->i_no_addr < ipb->i_no_addr)
539 return -1;
540 if (ipa->i_no_addr > ipb->i_no_addr)
541 return 1;
542 return 0;
543}
544
545static void gfs2_ordered_write(struct gfs2_sbd *sdp)
546{
547 struct gfs2_inode *ip;
548 LIST_HEAD(written);
549
550 spin_lock(&sdp->sd_ordered_lock);
551 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
552 while (!list_empty(&sdp->sd_log_ordered)) {
553 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
554 if (ip->i_inode.i_mapping->nrpages == 0) {
555 test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
556 list_del(&ip->i_ordered);
557 continue;
558 }
559 list_move(&ip->i_ordered, &written);
560 spin_unlock(&sdp->sd_ordered_lock);
561 filemap_fdatawrite(ip->i_inode.i_mapping);
562 spin_lock(&sdp->sd_ordered_lock);
563 }
564 list_splice(&written, &sdp->sd_log_ordered);
565 spin_unlock(&sdp->sd_ordered_lock);
566}
567
568static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
569{
570 struct gfs2_inode *ip;
571
572 spin_lock(&sdp->sd_ordered_lock);
573 while (!list_empty(&sdp->sd_log_ordered)) {
574 ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
575 list_del(&ip->i_ordered);
576 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
577 if (ip->i_inode.i_mapping->nrpages == 0)
578 continue;
579 spin_unlock(&sdp->sd_ordered_lock);
580 filemap_fdatawait(ip->i_inode.i_mapping);
581 spin_lock(&sdp->sd_ordered_lock);
582 }
583 spin_unlock(&sdp->sd_ordered_lock);
584}
585
586void gfs2_ordered_del_inode(struct gfs2_inode *ip)
587{
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589
590 spin_lock(&sdp->sd_ordered_lock);
591 if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
592 list_del(&ip->i_ordered);
593 spin_unlock(&sdp->sd_ordered_lock);
594}
595
596void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
597{
598 struct buffer_head *bh = bd->bd_bh;
599 struct gfs2_glock *gl = bd->bd_gl;
600
601 bh->b_private = NULL;
602 bd->bd_blkno = bh->b_blocknr;
603 gfs2_remove_from_ail(bd);
604 bd->bd_bh = NULL;
605 sdp->sd_log_num_revoke++;
606 if (atomic_inc_return(&gl->gl_revokes) == 1)
607 gfs2_glock_hold(gl);
608 set_bit(GLF_LFLUSH, &gl->gl_flags);
609 list_add(&bd->bd_list, &sdp->sd_log_revokes);
610}
611
612void gfs2_write_revokes(struct gfs2_sbd *sdp)
613{
614 struct gfs2_trans *tr;
615 struct gfs2_bufdata *bd, *tmp;
616 int have_revokes = 0;
617 int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
618
619 gfs2_ail1_empty(sdp);
620 spin_lock(&sdp->sd_ail_lock);
621 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
622 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
623 if (list_empty(&bd->bd_list)) {
624 have_revokes = 1;
625 goto done;
626 }
627 }
628 }
629done:
630 spin_unlock(&sdp->sd_ail_lock);
631 if (have_revokes == 0)
632 return;
633 while (sdp->sd_log_num_revoke > max_revokes)
634 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
635 max_revokes -= sdp->sd_log_num_revoke;
636 if (!sdp->sd_log_num_revoke) {
637 atomic_dec(&sdp->sd_log_blks_free);
638
639
640 if (!sdp->sd_log_blks_reserved)
641 atomic_dec(&sdp->sd_log_blks_free);
642 }
643 gfs2_log_lock(sdp);
644 spin_lock(&sdp->sd_ail_lock);
645 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
646 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
647 if (max_revokes == 0)
648 goto out_of_blocks;
649 if (!list_empty(&bd->bd_list))
650 continue;
651 gfs2_add_revoke(sdp, bd);
652 max_revokes--;
653 }
654 }
655out_of_blocks:
656 spin_unlock(&sdp->sd_ail_lock);
657 gfs2_log_unlock(sdp);
658
659 if (!sdp->sd_log_num_revoke) {
660 atomic_inc(&sdp->sd_log_blks_free);
661 if (!sdp->sd_log_blks_reserved)
662 atomic_inc(&sdp->sd_log_blks_free);
663 }
664}
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
680 u64 seq, u32 tail, u32 lblock, u32 flags,
681 int op_flags)
682{
683 struct gfs2_log_header *lh;
684 u32 hash, crc;
685 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
686 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
687 struct timespec64 tv;
688 struct super_block *sb = sdp->sd_vfs;
689 u64 dblock;
690
691 lh = page_address(page);
692 clear_page(lh);
693
694 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
695 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
696 lh->lh_header.__pad0 = cpu_to_be64(0);
697 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
698 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
699 lh->lh_sequence = cpu_to_be64(seq);
700 lh->lh_flags = cpu_to_be32(flags);
701 lh->lh_tail = cpu_to_be32(tail);
702 lh->lh_blkno = cpu_to_be32(lblock);
703 hash = ~crc32(~0, lh, LH_V1_SIZE);
704 lh->lh_hash = cpu_to_be32(hash);
705
706 ktime_get_coarse_real_ts64(&tv);
707 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
708 lh->lh_sec = cpu_to_be64(tv.tv_sec);
709 if (!list_empty(&jd->extent_list))
710 dblock = gfs2_log_bmap(sdp);
711 else {
712 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
713 if (gfs2_assert_withdraw(sdp, ret == 0))
714 return;
715 }
716 lh->lh_addr = cpu_to_be64(dblock);
717 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
718
719
720
721
722 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
723 lh->lh_statfs_addr =
724 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
725 lh->lh_quota_addr =
726 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
727
728 spin_lock(&sdp->sd_statfs_spin);
729 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
730 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
731 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
732 spin_unlock(&sdp->sd_statfs_spin);
733 }
734
735 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
736
737 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
738 sb->s_blocksize - LH_V1_SIZE - 4);
739 lh->lh_crc = cpu_to_be32(crc);
740
741 gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
742 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
743 log_flush_wait(sdp);
744}
745
746
747
748
749
750
751
752
753
754static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
755{
756 unsigned int tail;
757 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
758 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
759
760 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
761 tail = current_tail(sdp);
762
763 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
764 gfs2_ordered_wait(sdp);
765 log_flush_wait(sdp);
766 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
767 }
768 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
769 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
770 sdp->sd_log_flush_head, flags, op_flags);
771
772 if (sdp->sd_log_tail != tail)
773 log_pull_tail(sdp, tail);
774}
775
776
777
778
779
780
781
782
783
784void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
785{
786 struct gfs2_trans *tr;
787 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
788
789 down_write(&sdp->sd_log_flush_lock);
790
791
792 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
793 up_write(&sdp->sd_log_flush_lock);
794 return;
795 }
796 trace_gfs2_log_flush(sdp, 1, flags);
797
798 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
799 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
800
801 sdp->sd_log_flush_head = sdp->sd_log_head;
802 tr = sdp->sd_log_tr;
803 if (tr) {
804 sdp->sd_log_tr = NULL;
805 INIT_LIST_HEAD(&tr->tr_ail1_list);
806 INIT_LIST_HEAD(&tr->tr_ail2_list);
807 tr->tr_first = sdp->sd_log_flush_head;
808 if (unlikely (state == SFS_FROZEN))
809 gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
810 }
811
812 if (unlikely(state == SFS_FROZEN))
813 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
814 gfs2_assert_withdraw(sdp,
815 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
816
817 gfs2_ordered_write(sdp);
818 lops_before_commit(sdp, tr);
819 gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
820
821 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
822 log_flush_wait(sdp);
823 log_write_header(sdp, flags);
824 } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
825 atomic_dec(&sdp->sd_log_blks_free);
826 trace_gfs2_log_blocks(sdp, -1);
827 log_write_header(sdp, flags);
828 }
829 lops_after_commit(sdp, tr);
830
831 gfs2_log_lock(sdp);
832 sdp->sd_log_head = sdp->sd_log_flush_head;
833 sdp->sd_log_blks_reserved = 0;
834 sdp->sd_log_commited_revoke = 0;
835
836 spin_lock(&sdp->sd_ail_lock);
837 if (tr && !list_empty(&tr->tr_ail1_list)) {
838 list_add(&tr->tr_list, &sdp->sd_ail1_list);
839 tr = NULL;
840 }
841 spin_unlock(&sdp->sd_ail_lock);
842 gfs2_log_unlock(sdp);
843
844 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
845 if (!sdp->sd_log_idle) {
846 for (;;) {
847 gfs2_ail1_start(sdp);
848 gfs2_ail1_wait(sdp);
849 if (gfs2_ail1_empty(sdp))
850 break;
851 }
852 atomic_dec(&sdp->sd_log_blks_free);
853 trace_gfs2_log_blocks(sdp, -1);
854 log_write_header(sdp, flags);
855 sdp->sd_log_head = sdp->sd_log_flush_head;
856 }
857 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
858 GFS2_LOG_HEAD_FLUSH_FREEZE))
859 gfs2_log_shutdown(sdp);
860 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
861 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
862 }
863
864 trace_gfs2_log_flush(sdp, 0, flags);
865 up_write(&sdp->sd_log_flush_lock);
866
867 kfree(tr);
868}
869
870
871
872
873
874
875
876static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
877{
878 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
879
880 old->tr_num_buf_new += new->tr_num_buf_new;
881 old->tr_num_databuf_new += new->tr_num_databuf_new;
882 old->tr_num_buf_rm += new->tr_num_buf_rm;
883 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
884 old->tr_num_revoke += new->tr_num_revoke;
885
886 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
887 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
888}
889
890static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
891{
892 unsigned int reserved;
893 unsigned int unused;
894 unsigned int maxres;
895
896 gfs2_log_lock(sdp);
897
898 if (sdp->sd_log_tr) {
899 gfs2_merge_trans(sdp->sd_log_tr, tr);
900 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
901 gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
902 sdp->sd_log_tr = tr;
903 set_bit(TR_ATTACHED, &tr->tr_flags);
904 }
905
906 sdp->sd_log_commited_revoke += tr->tr_num_revoke;
907 reserved = calc_reserved(sdp);
908 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
909 gfs2_assert_withdraw(sdp, maxres >= reserved);
910 unused = maxres - reserved;
911 atomic_add(unused, &sdp->sd_log_blks_free);
912 trace_gfs2_log_blocks(sdp, unused);
913 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
914 sdp->sd_jdesc->jd_blocks);
915 sdp->sd_log_blks_reserved = reserved;
916
917 gfs2_log_unlock(sdp);
918}
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
936{
937 log_refund(sdp, tr);
938
939 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
940 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
941 atomic_read(&sdp->sd_log_thresh2)))
942 wake_up(&sdp->sd_logd_waitq);
943}
944
945
946
947
948
949
950
951void gfs2_log_shutdown(struct gfs2_sbd *sdp)
952{
953 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
954 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
955 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
956
957 sdp->sd_log_flush_head = sdp->sd_log_head;
958
959 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
960
961 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
962 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
963
964 sdp->sd_log_head = sdp->sd_log_flush_head;
965 sdp->sd_log_tail = sdp->sd_log_head;
966}
967
968static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
969{
970 return (atomic_read(&sdp->sd_log_pinned) +
971 atomic_read(&sdp->sd_log_blks_needed) >=
972 atomic_read(&sdp->sd_log_thresh1));
973}
974
975static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
976{
977 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
978
979 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
980 return 1;
981
982 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
983 atomic_read(&sdp->sd_log_thresh2);
984}
985
986
987
988
989
990
991
992
993
994int gfs2_logd(void *data)
995{
996 struct gfs2_sbd *sdp = data;
997 unsigned long t = 1;
998 DEFINE_WAIT(wait);
999 bool did_flush;
1000
1001 while (!kthread_should_stop()) {
1002
1003
1004 if (sdp->sd_log_error) {
1005 gfs2_lm_withdraw(sdp,
1006 "GFS2: fsid=%s: error %d: "
1007 "withdrawing the file system to "
1008 "prevent further damage.\n",
1009 sdp->sd_fsname, sdp->sd_log_error);
1010 }
1011
1012 did_flush = false;
1013 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1014 gfs2_ail1_empty(sdp);
1015 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1016 GFS2_LFC_LOGD_JFLUSH_REQD);
1017 did_flush = true;
1018 }
1019
1020 if (gfs2_ail_flush_reqd(sdp)) {
1021 gfs2_ail1_start(sdp);
1022 gfs2_ail1_wait(sdp);
1023 gfs2_ail1_empty(sdp);
1024 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1025 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1026 did_flush = true;
1027 }
1028
1029 if (!gfs2_ail_flush_reqd(sdp) || did_flush)
1030 wake_up(&sdp->sd_log_waitq);
1031
1032 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1033
1034 try_to_freeze();
1035
1036 do {
1037 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1038 TASK_INTERRUPTIBLE);
1039 if (!gfs2_ail_flush_reqd(sdp) &&
1040 !gfs2_jrnl_flush_reqd(sdp) &&
1041 !kthread_should_stop())
1042 t = schedule_timeout(t);
1043 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1044 !gfs2_jrnl_flush_reqd(sdp) &&
1045 !kthread_should_stop());
1046 finish_wait(&sdp->sd_logd_waitq, &wait);
1047 }
1048
1049 return 0;
1050}
1051
1052