1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/gfs2_ondisk.h>
13#include <linux/crc32.h>
14#include <linux/crc32c.h>
15#include <linux/delay.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
20#include <linux/writeback.h>
21#include <linux/list_sort.h>
22
23#include "gfs2.h"
24#include "incore.h"
25#include "bmap.h"
26#include "glock.h"
27#include "log.h"
28#include "lops.h"
29#include "meta_io.h"
30#include "util.h"
31#include "dir.h"
32#include "trace_gfs2.h"
33#include "trans.h"
34
35static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
36
37
38
39
40
41
42
43
44
45
46
47
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
49{
50 unsigned int blks;
51 unsigned int first, second;
52
53
54 blks = 1;
55 first = sdp->sd_ldptrs;
56
57 if (nstruct > first) {
58
59 second = sdp->sd_inptrs;
60 blks += DIV_ROUND_UP(nstruct - first, second);
61 }
62
63 return blks;
64}
65
66
67
68
69
70
71
72
73
74
75void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76{
77 bd->bd_tr = NULL;
78 list_del_init(&bd->bd_ail_st_list);
79 list_del_init(&bd->bd_ail_gl_list);
80 atomic_dec(&bd->bd_gl->gl_ail_count);
81 brelse(bd->bd_bh);
82}
83
84
85
86
87
88
89
90
91
92static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93 struct writeback_control *wbc,
94 struct gfs2_trans *tr, struct blk_plug *plug)
95__releases(&sdp->sd_ail_lock)
96__acquires(&sdp->sd_ail_lock)
97{
98 struct gfs2_glock *gl = NULL;
99 struct address_space *mapping;
100 struct gfs2_bufdata *bd, *s;
101 struct buffer_head *bh;
102 int ret = 0;
103
104 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
105 bh = bd->bd_bh;
106
107 gfs2_assert(sdp, bd->bd_tr == tr);
108
109 if (!buffer_busy(bh)) {
110 if (buffer_uptodate(bh)) {
111 list_move(&bd->bd_ail_st_list,
112 &tr->tr_ail2_list);
113 continue;
114 }
115 if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
116 gfs2_io_error_bh(sdp, bh);
117 gfs2_withdraw_delayed(sdp);
118 }
119 }
120
121 if (gfs2_withdrawn(sdp)) {
122 gfs2_remove_from_ail(bd);
123 continue;
124 }
125 if (!buffer_dirty(bh))
126 continue;
127 if (gl == bd->bd_gl)
128 continue;
129 gl = bd->bd_gl;
130 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
131 mapping = bh->b_page->mapping;
132 if (!mapping)
133 continue;
134 spin_unlock(&sdp->sd_ail_lock);
135 ret = generic_writepages(mapping, wbc);
136 if (need_resched()) {
137 blk_finish_plug(plug);
138 cond_resched();
139 blk_start_plug(plug);
140 }
141 spin_lock(&sdp->sd_ail_lock);
142 if (ret == -ENODATA)
143 ret = 0;
144 if (ret || wbc->nr_to_write <= 0)
145 break;
146 return -EBUSY;
147 }
148
149 return ret;
150}
151
152static void dump_ail_list(struct gfs2_sbd *sdp)
153{
154 struct gfs2_trans *tr;
155 struct gfs2_bufdata *bd;
156 struct buffer_head *bh;
157
158 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
159 list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
160 bd_ail_st_list) {
161 bh = bd->bd_bh;
162 fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
163 (unsigned long long)bd->bd_blkno, bh);
164 if (!bh) {
165 fs_err(sdp, "\n");
166 continue;
167 }
168 fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
169 "map:%d new:%d ar:%d aw:%d delay:%d "
170 "io err:%d unwritten:%d dfr:%d pin:%d esc:%d\n",
171 (unsigned long long)bh->b_blocknr,
172 buffer_uptodate(bh), buffer_dirty(bh),
173 buffer_locked(bh), buffer_req(bh),
174 buffer_mapped(bh), buffer_new(bh),
175 buffer_async_read(bh), buffer_async_write(bh),
176 buffer_delay(bh), buffer_write_io_error(bh),
177 buffer_unwritten(bh),
178 buffer_defer_completion(bh),
179 buffer_pinned(bh), buffer_escaped(bh));
180 }
181 }
182}
183
184
185
186
187
188
189
190
191
192
193void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
194{
195 struct list_head *head = &sdp->sd_ail1_list;
196 struct gfs2_trans *tr;
197 struct blk_plug plug;
198 int ret;
199 unsigned long flush_start = jiffies;
200
201 trace_gfs2_ail_flush(sdp, wbc, 1);
202 blk_start_plug(&plug);
203 spin_lock(&sdp->sd_ail_lock);
204restart:
205 ret = 0;
206 if (time_after(jiffies, flush_start + (HZ * 600))) {
207 fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
208 __func__, current->journal_info ? 1 : 0);
209 dump_ail_list(sdp);
210 goto out;
211 }
212 list_for_each_entry_reverse(tr, head, tr_list) {
213 if (wbc->nr_to_write <= 0)
214 break;
215 ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
216 if (ret) {
217 if (ret == -EBUSY)
218 goto restart;
219 break;
220 }
221 }
222out:
223 spin_unlock(&sdp->sd_ail_lock);
224 blk_finish_plug(&plug);
225 if (ret) {
226 gfs2_lm(sdp, "gfs2_ail1_start_one (generic_writepages) "
227 "returned: %d\n", ret);
228 gfs2_withdraw(sdp);
229 }
230 trace_gfs2_ail_flush(sdp, wbc, 0);
231}
232
233
234
235
236
237
238static void gfs2_ail1_start(struct gfs2_sbd *sdp)
239{
240 struct writeback_control wbc = {
241 .sync_mode = WB_SYNC_NONE,
242 .nr_to_write = LONG_MAX,
243 .range_start = 0,
244 .range_end = LLONG_MAX,
245 };
246
247 return gfs2_ail1_flush(sdp, &wbc);
248}
249
250static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
251{
252 unsigned int new_flush_tail = sdp->sd_log_head;
253 struct gfs2_trans *tr;
254
255 if (!list_empty(&sdp->sd_ail1_list)) {
256 tr = list_last_entry(&sdp->sd_ail1_list,
257 struct gfs2_trans, tr_list);
258 new_flush_tail = tr->tr_first;
259 }
260 sdp->sd_log_flush_tail = new_flush_tail;
261}
262
263static void gfs2_log_update_head(struct gfs2_sbd *sdp)
264{
265 unsigned int new_head = sdp->sd_log_flush_head;
266
267 if (sdp->sd_log_flush_tail == sdp->sd_log_head)
268 sdp->sd_log_flush_tail = new_head;
269 sdp->sd_log_head = new_head;
270}
271
272
273
274
275
276static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
277 struct list_head *head)
278{
279 struct gfs2_bufdata *bd;
280
281 while (!list_empty(head)) {
282 bd = list_first_entry(head, struct gfs2_bufdata,
283 bd_ail_st_list);
284 gfs2_assert(sdp, bd->bd_tr == tr);
285 gfs2_remove_from_ail(bd);
286 }
287}
288
289
290
291
292
293
294
295
296
297
298static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
299 int *max_revokes)
300{
301 struct gfs2_bufdata *bd, *s;
302 struct buffer_head *bh;
303 int active_count = 0;
304
305 list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
306 bd_ail_st_list) {
307 bh = bd->bd_bh;
308 gfs2_assert(sdp, bd->bd_tr == tr);
309
310
311
312
313
314
315
316
317
318 if (!sdp->sd_log_error && buffer_busy(bh)) {
319 active_count++;
320 continue;
321 }
322 if (!buffer_uptodate(bh) &&
323 !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
324 gfs2_io_error_bh(sdp, bh);
325 gfs2_withdraw_delayed(sdp);
326 }
327
328
329
330
331
332
333 if (*max_revokes && list_empty(&bd->bd_list)) {
334 gfs2_add_revoke(sdp, bd);
335 (*max_revokes)--;
336 continue;
337 }
338 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
339 }
340 return active_count;
341}
342
343
344
345
346
347
348
349
350
351static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
352{
353 struct gfs2_trans *tr, *s;
354 int oldest_tr = 1;
355 int ret;
356
357 spin_lock(&sdp->sd_ail_lock);
358 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
359 if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
360 list_move(&tr->tr_list, &sdp->sd_ail2_list);
361 else
362 oldest_tr = 0;
363 }
364 gfs2_log_update_flush_tail(sdp);
365 ret = list_empty(&sdp->sd_ail1_list);
366 spin_unlock(&sdp->sd_ail_lock);
367
368 if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
369 gfs2_lm(sdp, "fatal: I/O error(s)\n");
370 gfs2_withdraw(sdp);
371 }
372
373 return ret;
374}
375
376static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
377{
378 struct gfs2_trans *tr;
379 struct gfs2_bufdata *bd;
380 struct buffer_head *bh;
381
382 spin_lock(&sdp->sd_ail_lock);
383 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
384 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
385 bh = bd->bd_bh;
386 if (!buffer_locked(bh))
387 continue;
388 get_bh(bh);
389 spin_unlock(&sdp->sd_ail_lock);
390 wait_on_buffer(bh);
391 brelse(bh);
392 return;
393 }
394 }
395 spin_unlock(&sdp->sd_ail_lock);
396}
397
398static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
399{
400 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
401 list_del(&tr->tr_list);
402 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
403 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
404 gfs2_trans_free(sdp, tr);
405}
406
407static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
408{
409 struct list_head *ail2_list = &sdp->sd_ail2_list;
410 unsigned int old_tail = sdp->sd_log_tail;
411 struct gfs2_trans *tr, *safe;
412
413 spin_lock(&sdp->sd_ail_lock);
414 if (old_tail <= new_tail) {
415 list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
416 if (old_tail <= tr->tr_first && tr->tr_first < new_tail)
417 __ail2_empty(sdp, tr);
418 }
419 } else {
420 list_for_each_entry_safe(tr, safe, ail2_list, tr_list) {
421 if (old_tail <= tr->tr_first || tr->tr_first < new_tail)
422 __ail2_empty(sdp, tr);
423 }
424 }
425 spin_unlock(&sdp->sd_ail_lock);
426}
427
428
429
430
431
432
433bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
434 return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
435}
436
437static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
438{
439 unsigned int available;
440
441 available = atomic_read(&sdp->sd_log_revokes_available);
442 while (available >= revokes) {
443 if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
444 &available, available - revokes))
445 return true;
446 }
447 return false;
448}
449
450
451
452
453
454
455
456
457void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
458{
459 if (revokes)
460 atomic_add(revokes, &sdp->sd_log_revokes_available);
461}
462
463
464
465
466
467
468
469
470void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
471{
472 atomic_add(blks, &sdp->sd_log_blks_free);
473 trace_gfs2_log_blocks(sdp, blks);
474 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
475 sdp->sd_jdesc->jd_blocks);
476 if (atomic_read(&sdp->sd_log_blks_needed))
477 wake_up(&sdp->sd_log_waitq);
478}
479
480
481
482
483
484
485
486
487
488
489static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
490 unsigned int taboo_blks)
491{
492 unsigned wanted = blks + taboo_blks;
493 unsigned int free_blocks;
494
495 free_blocks = atomic_read(&sdp->sd_log_blks_free);
496 while (free_blocks >= wanted) {
497 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
498 free_blocks - blks)) {
499 trace_gfs2_log_blocks(sdp, -blks);
500 return true;
501 }
502 }
503 return false;
504}
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
526 unsigned int taboo_blks)
527{
528 unsigned wanted = blks + taboo_blks;
529 unsigned int free_blocks;
530
531 atomic_add(blks, &sdp->sd_log_blks_needed);
532 for (;;) {
533 if (current != sdp->sd_logd_process)
534 wake_up(&sdp->sd_logd_waitq);
535 io_wait_event(sdp->sd_log_waitq,
536 (free_blocks = atomic_read(&sdp->sd_log_blks_free),
537 free_blocks >= wanted));
538 do {
539 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
540 &free_blocks,
541 free_blocks - blks))
542 goto reserved;
543 } while (free_blocks >= wanted);
544 }
545
546reserved:
547 trace_gfs2_log_blocks(sdp, -blks);
548 if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
549 wake_up(&sdp->sd_log_waitq);
550}
551
552
553
554
555
556
557
558
559
560
561
562bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
563 unsigned int *extra_revokes)
564{
565 unsigned int blks = tr->tr_reserved;
566 unsigned int revokes = tr->tr_revokes;
567 unsigned int revoke_blks = 0;
568
569 *extra_revokes = 0;
570 if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
571 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
572 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
573 blks += revoke_blks;
574 }
575 if (!blks)
576 return true;
577 if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
578 return true;
579 if (!revoke_blks)
580 gfs2_log_release_revokes(sdp, revokes);
581 return false;
582}
583
584
585
586
587
588
589
590
591
592
593void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
594 unsigned int *extra_revokes)
595{
596 unsigned int blks = tr->tr_reserved;
597 unsigned int revokes = tr->tr_revokes;
598 unsigned int revoke_blks = 0;
599
600 *extra_revokes = 0;
601 if (revokes) {
602 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
603 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
604 blks += revoke_blks;
605 }
606 __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
607}
608
609
610
611
612
613
614
615
616
617
618
619
620
621static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
622 unsigned int older)
623{
624 int dist;
625
626 dist = newer - older;
627 if (dist < 0)
628 dist += sdp->sd_jdesc->jd_blocks;
629
630 return dist;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654static unsigned int calc_reserved(struct gfs2_sbd *sdp)
655{
656 unsigned int reserved = GFS2_LOG_FLUSH_MIN_BLOCKS;
657 unsigned int blocks;
658 struct gfs2_trans *tr = sdp->sd_log_tr;
659
660 if (tr) {
661 blocks = tr->tr_num_buf_new - tr->tr_num_buf_rm;
662 reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
663 blocks = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
664 reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
665 }
666 return reserved;
667}
668
669static void log_pull_tail(struct gfs2_sbd *sdp)
670{
671 unsigned int new_tail = sdp->sd_log_flush_tail;
672 unsigned int dist;
673
674 if (new_tail == sdp->sd_log_tail)
675 return;
676 dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
677 ail2_empty(sdp, new_tail);
678 gfs2_log_release(sdp, dist);
679 sdp->sd_log_tail = new_tail;
680}
681
682
683void log_flush_wait(struct gfs2_sbd *sdp)
684{
685 DEFINE_WAIT(wait);
686
687 if (atomic_read(&sdp->sd_log_in_flight)) {
688 do {
689 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
690 TASK_UNINTERRUPTIBLE);
691 if (atomic_read(&sdp->sd_log_in_flight))
692 io_schedule();
693 } while(atomic_read(&sdp->sd_log_in_flight));
694 finish_wait(&sdp->sd_log_flush_wait, &wait);
695 }
696}
697
698static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
699{
700 struct gfs2_inode *ipa, *ipb;
701
702 ipa = list_entry(a, struct gfs2_inode, i_ordered);
703 ipb = list_entry(b, struct gfs2_inode, i_ordered);
704
705 if (ipa->i_no_addr < ipb->i_no_addr)
706 return -1;
707 if (ipa->i_no_addr > ipb->i_no_addr)
708 return 1;
709 return 0;
710}
711
712static void __ordered_del_inode(struct gfs2_inode *ip)
713{
714 if (!list_empty(&ip->i_ordered))
715 list_del_init(&ip->i_ordered);
716}
717
718static void gfs2_ordered_write(struct gfs2_sbd *sdp)
719{
720 struct gfs2_inode *ip;
721 LIST_HEAD(written);
722
723 spin_lock(&sdp->sd_ordered_lock);
724 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
725 while (!list_empty(&sdp->sd_log_ordered)) {
726 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
727 if (ip->i_inode.i_mapping->nrpages == 0) {
728 __ordered_del_inode(ip);
729 continue;
730 }
731 list_move(&ip->i_ordered, &written);
732 spin_unlock(&sdp->sd_ordered_lock);
733 filemap_fdatawrite(ip->i_inode.i_mapping);
734 spin_lock(&sdp->sd_ordered_lock);
735 }
736 list_splice(&written, &sdp->sd_log_ordered);
737 spin_unlock(&sdp->sd_ordered_lock);
738}
739
740static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
741{
742 struct gfs2_inode *ip;
743
744 spin_lock(&sdp->sd_ordered_lock);
745 while (!list_empty(&sdp->sd_log_ordered)) {
746 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
747 __ordered_del_inode(ip);
748 if (ip->i_inode.i_mapping->nrpages == 0)
749 continue;
750 spin_unlock(&sdp->sd_ordered_lock);
751 filemap_fdatawait(ip->i_inode.i_mapping);
752 spin_lock(&sdp->sd_ordered_lock);
753 }
754 spin_unlock(&sdp->sd_ordered_lock);
755}
756
757void gfs2_ordered_del_inode(struct gfs2_inode *ip)
758{
759 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
760
761 spin_lock(&sdp->sd_ordered_lock);
762 __ordered_del_inode(ip);
763 spin_unlock(&sdp->sd_ordered_lock);
764}
765
766void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
767{
768 struct buffer_head *bh = bd->bd_bh;
769 struct gfs2_glock *gl = bd->bd_gl;
770
771 sdp->sd_log_num_revoke++;
772 if (atomic_inc_return(&gl->gl_revokes) == 1)
773 gfs2_glock_hold(gl);
774 bh->b_private = NULL;
775 bd->bd_blkno = bh->b_blocknr;
776 gfs2_remove_from_ail(bd);
777 bd->bd_bh = NULL;
778 set_bit(GLF_LFLUSH, &gl->gl_flags);
779 list_add(&bd->bd_list, &sdp->sd_log_revokes);
780}
781
782void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
783{
784 if (atomic_dec_return(&gl->gl_revokes) == 0) {
785 clear_bit(GLF_LFLUSH, &gl->gl_flags);
786 gfs2_glock_queue_put(gl);
787 }
788}
789
790
791
792
793
794
795
796
797
798
799
800
801
802void gfs2_flush_revokes(struct gfs2_sbd *sdp)
803{
804
805 unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
806
807 gfs2_log_lock(sdp);
808 gfs2_ail1_empty(sdp, max_revokes);
809 gfs2_log_unlock(sdp);
810}
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
826 u64 seq, u32 tail, u32 lblock, u32 flags,
827 int op_flags)
828{
829 struct gfs2_log_header *lh;
830 u32 hash, crc;
831 struct page *page;
832 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
833 struct timespec64 tv;
834 struct super_block *sb = sdp->sd_vfs;
835 u64 dblock;
836
837 if (gfs2_withdrawn(sdp))
838 return;
839
840 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
841 lh = page_address(page);
842 clear_page(lh);
843
844 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
845 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
846 lh->lh_header.__pad0 = cpu_to_be64(0);
847 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
848 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
849 lh->lh_sequence = cpu_to_be64(seq);
850 lh->lh_flags = cpu_to_be32(flags);
851 lh->lh_tail = cpu_to_be32(tail);
852 lh->lh_blkno = cpu_to_be32(lblock);
853 hash = ~crc32(~0, lh, LH_V1_SIZE);
854 lh->lh_hash = cpu_to_be32(hash);
855
856 ktime_get_coarse_real_ts64(&tv);
857 lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
858 lh->lh_sec = cpu_to_be64(tv.tv_sec);
859 if (!list_empty(&jd->extent_list))
860 dblock = gfs2_log_bmap(jd, lblock);
861 else {
862 int ret = gfs2_lblk_to_dblk(jd->jd_inode, lblock, &dblock);
863 if (gfs2_assert_withdraw(sdp, ret == 0))
864 return;
865 }
866 lh->lh_addr = cpu_to_be64(dblock);
867 lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
868
869
870
871
872 if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
873 lh->lh_statfs_addr =
874 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
875 lh->lh_quota_addr =
876 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
877
878 spin_lock(&sdp->sd_statfs_spin);
879 lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
880 lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
881 lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
882 spin_unlock(&sdp->sd_statfs_spin);
883 }
884
885 BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
886
887 crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
888 sb->s_blocksize - LH_V1_SIZE - 4);
889 lh->lh_crc = cpu_to_be32(crc);
890
891 gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
892 gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags);
893}
894
895
896
897
898
899
900
901
902
903static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
904{
905 int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
906 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
907
908 gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
909
910 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
911 gfs2_ordered_wait(sdp);
912 log_flush_wait(sdp);
913 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
914 }
915 sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
916 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
917 sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
918 flags, op_flags);
919 gfs2_log_incr_head(sdp);
920 log_flush_wait(sdp);
921 log_pull_tail(sdp);
922 gfs2_log_update_head(sdp);
923}
924
925
926
927
928
929static void ail_drain(struct gfs2_sbd *sdp)
930{
931 struct gfs2_trans *tr;
932
933 spin_lock(&sdp->sd_ail_lock);
934
935
936
937
938
939
940
941 while (!list_empty(&sdp->sd_ail1_list)) {
942 tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
943 tr_list);
944 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
945 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
946 list_del(&tr->tr_list);
947 gfs2_trans_free(sdp, tr);
948 }
949 while (!list_empty(&sdp->sd_ail2_list)) {
950 tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
951 tr_list);
952 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
953 list_del(&tr->tr_list);
954 gfs2_trans_free(sdp, tr);
955 }
956 spin_unlock(&sdp->sd_ail_lock);
957}
958
959
960
961
962
963static void empty_ail1_list(struct gfs2_sbd *sdp)
964{
965 unsigned long start = jiffies;
966
967 for (;;) {
968 if (time_after(jiffies, start + (HZ * 600))) {
969 fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
970 __func__, current->journal_info ? 1 : 0);
971 dump_ail_list(sdp);
972 return;
973 }
974 gfs2_ail1_start(sdp);
975 gfs2_ail1_wait(sdp);
976 if (gfs2_ail1_empty(sdp, 0))
977 return;
978 }
979}
980
981
982
983
984
985
986
987
988
989static void trans_drain(struct gfs2_trans *tr)
990{
991 struct gfs2_bufdata *bd;
992 struct list_head *head;
993
994 if (!tr)
995 return;
996
997 head = &tr->tr_buf;
998 while (!list_empty(head)) {
999 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1000 list_del_init(&bd->bd_list);
1001 if (!list_empty(&bd->bd_ail_st_list))
1002 gfs2_remove_from_ail(bd);
1003 kmem_cache_free(gfs2_bufdata_cachep, bd);
1004 }
1005 head = &tr->tr_databuf;
1006 while (!list_empty(head)) {
1007 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1008 list_del_init(&bd->bd_list);
1009 if (!list_empty(&bd->bd_ail_st_list))
1010 gfs2_remove_from_ail(bd);
1011 kmem_cache_free(gfs2_bufdata_cachep, bd);
1012 }
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
1024{
1025 struct gfs2_trans *tr = NULL;
1026 unsigned int reserved_blocks = 0, used_blocks = 0;
1027 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
1028 unsigned int first_log_head;
1029 unsigned int reserved_revokes = 0;
1030
1031 down_write(&sdp->sd_log_flush_lock);
1032 trace_gfs2_log_flush(sdp, 1, flags);
1033
1034repeat:
1035
1036
1037
1038
1039 if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1040 goto out;
1041
1042
1043 if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
1044 goto out;
1045
1046 first_log_head = sdp->sd_log_head;
1047 sdp->sd_log_flush_head = first_log_head;
1048
1049 tr = sdp->sd_log_tr;
1050 if (tr || sdp->sd_log_num_revoke) {
1051 if (reserved_blocks)
1052 gfs2_log_release(sdp, reserved_blocks);
1053 reserved_blocks = sdp->sd_log_blks_reserved;
1054 reserved_revokes = sdp->sd_log_num_revoke;
1055 if (tr) {
1056 sdp->sd_log_tr = NULL;
1057 tr->tr_first = first_log_head;
1058 if (unlikely (state == SFS_FROZEN)) {
1059 if (gfs2_assert_withdraw_delayed(sdp,
1060 !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
1061 goto out_withdraw;
1062 }
1063 }
1064 } else if (!reserved_blocks) {
1065 unsigned int taboo_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1066
1067 reserved_blocks = GFS2_LOG_FLUSH_MIN_BLOCKS;
1068 if (current == sdp->sd_logd_process)
1069 taboo_blocks = 0;
1070
1071 if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1072 up_write(&sdp->sd_log_flush_lock);
1073 __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1074 down_write(&sdp->sd_log_flush_lock);
1075 goto repeat;
1076 }
1077 BUG_ON(sdp->sd_log_num_revoke);
1078 }
1079
1080 if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
1081 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1082
1083 if (unlikely(state == SFS_FROZEN))
1084 if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1085 goto out_withdraw;
1086
1087 gfs2_ordered_write(sdp);
1088 if (gfs2_withdrawn(sdp))
1089 goto out_withdraw;
1090 lops_before_commit(sdp, tr);
1091 if (gfs2_withdrawn(sdp))
1092 goto out_withdraw;
1093 gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1094 if (gfs2_withdrawn(sdp))
1095 goto out_withdraw;
1096
1097 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1098 log_write_header(sdp, flags);
1099 } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1100 log_write_header(sdp, flags);
1101 }
1102 if (gfs2_withdrawn(sdp))
1103 goto out_withdraw;
1104 lops_after_commit(sdp, tr);
1105
1106 gfs2_log_lock(sdp);
1107 sdp->sd_log_blks_reserved = 0;
1108
1109 spin_lock(&sdp->sd_ail_lock);
1110 if (tr && !list_empty(&tr->tr_ail1_list)) {
1111 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1112 tr = NULL;
1113 }
1114 spin_unlock(&sdp->sd_ail_lock);
1115 gfs2_log_unlock(sdp);
1116
1117 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
1118 if (!sdp->sd_log_idle) {
1119 empty_ail1_list(sdp);
1120 if (gfs2_withdrawn(sdp))
1121 goto out_withdraw;
1122 log_write_header(sdp, flags);
1123 }
1124 if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
1125 GFS2_LOG_HEAD_FLUSH_FREEZE))
1126 gfs2_log_shutdown(sdp);
1127 if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
1128 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
1129 }
1130
1131out_end:
1132 used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1133 reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1134 atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1135 gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1136 if (reserved_revokes > sdp->sd_ldptrs)
1137 reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1138out:
1139 if (used_blocks != reserved_blocks) {
1140 gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1141 gfs2_log_release(sdp, reserved_blocks - used_blocks);
1142 }
1143 up_write(&sdp->sd_log_flush_lock);
1144 gfs2_trans_free(sdp, tr);
1145 if (gfs2_withdrawing(sdp))
1146 gfs2_withdraw(sdp);
1147 trace_gfs2_log_flush(sdp, 0, flags);
1148 return;
1149
1150out_withdraw:
1151 trans_drain(tr);
1152
1153
1154
1155
1156
1157
1158 spin_lock(&sdp->sd_ail_lock);
1159 if (tr && list_empty(&tr->tr_list))
1160 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1161 spin_unlock(&sdp->sd_ail_lock);
1162 ail_drain(sdp);
1163 tr = NULL;
1164 goto out_end;
1165}
1166
1167
1168
1169
1170
1171
1172
1173static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1174{
1175 struct gfs2_trans *old = sdp->sd_log_tr;
1176
1177 WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
1178
1179 old->tr_num_buf_new += new->tr_num_buf_new;
1180 old->tr_num_databuf_new += new->tr_num_databuf_new;
1181 old->tr_num_buf_rm += new->tr_num_buf_rm;
1182 old->tr_num_databuf_rm += new->tr_num_databuf_rm;
1183 old->tr_revokes += new->tr_revokes;
1184 old->tr_num_revoke += new->tr_num_revoke;
1185
1186 list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
1187 list_splice_tail_init(&new->tr_buf, &old->tr_buf);
1188
1189 spin_lock(&sdp->sd_ail_lock);
1190 list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
1191 list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
1192 spin_unlock(&sdp->sd_ail_lock);
1193}
1194
1195static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1196{
1197 unsigned int reserved;
1198 unsigned int unused;
1199 unsigned int maxres;
1200
1201 gfs2_log_lock(sdp);
1202
1203 if (sdp->sd_log_tr) {
1204 gfs2_merge_trans(sdp, tr);
1205 } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
1206 gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1207 sdp->sd_log_tr = tr;
1208 set_bit(TR_ATTACHED, &tr->tr_flags);
1209 }
1210
1211 reserved = calc_reserved(sdp);
1212 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1213 gfs2_assert_withdraw(sdp, maxres >= reserved);
1214 unused = maxres - reserved;
1215 if (unused)
1216 gfs2_log_release(sdp, unused);
1217 sdp->sd_log_blks_reserved = reserved;
1218
1219 gfs2_log_unlock(sdp);
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1238{
1239 log_refund(sdp, tr);
1240
1241 if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
1242 ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
1243 atomic_read(&sdp->sd_log_thresh2)))
1244 wake_up(&sdp->sd_logd_waitq);
1245}
1246
1247
1248
1249
1250
1251
1252
1253static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1254{
1255 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1256 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1257 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1258
1259 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1260 log_pull_tail(sdp);
1261
1262 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1263 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1264}
1265
1266static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1267{
1268 return (atomic_read(&sdp->sd_log_pinned) +
1269 atomic_read(&sdp->sd_log_blks_needed) >=
1270 atomic_read(&sdp->sd_log_thresh1));
1271}
1272
1273static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1274{
1275 unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
1276
1277 if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
1278 return 1;
1279
1280 return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
1281 atomic_read(&sdp->sd_log_thresh2);
1282}
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int gfs2_logd(void *data)
1293{
1294 struct gfs2_sbd *sdp = data;
1295 unsigned long t = 1;
1296 DEFINE_WAIT(wait);
1297
1298 while (!kthread_should_stop()) {
1299
1300 if (gfs2_withdrawn(sdp)) {
1301 msleep_interruptible(HZ);
1302 continue;
1303 }
1304
1305 if (sdp->sd_log_error) {
1306 gfs2_lm(sdp,
1307 "GFS2: fsid=%s: error %d: "
1308 "withdrawing the file system to "
1309 "prevent further damage.\n",
1310 sdp->sd_fsname, sdp->sd_log_error);
1311 gfs2_withdraw(sdp);
1312 continue;
1313 }
1314
1315 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1316 gfs2_ail1_empty(sdp, 0);
1317 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1318 GFS2_LFC_LOGD_JFLUSH_REQD);
1319 }
1320
1321 if (gfs2_ail_flush_reqd(sdp)) {
1322 gfs2_ail1_start(sdp);
1323 gfs2_ail1_wait(sdp);
1324 gfs2_ail1_empty(sdp, 0);
1325 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1326 GFS2_LFC_LOGD_AIL_FLUSH_REQD);
1327 }
1328
1329 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1330
1331 try_to_freeze();
1332
1333 do {
1334 prepare_to_wait(&sdp->sd_logd_waitq, &wait,
1335 TASK_INTERRUPTIBLE);
1336 if (!gfs2_ail_flush_reqd(sdp) &&
1337 !gfs2_jrnl_flush_reqd(sdp) &&
1338 !kthread_should_stop())
1339 t = schedule_timeout(t);
1340 } while(t && !gfs2_ail_flush_reqd(sdp) &&
1341 !gfs2_jrnl_flush_reqd(sdp) &&
1342 !kthread_should_stop());
1343 finish_wait(&sdp->sd_logd_waitq, &wait);
1344 }
1345
1346 return 0;
1347}
1348
1349