1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mempool.h>
13#include <linux/gfs2_ondisk.h>
14#include <linux/bio.h>
15#include <linux/fs.h>
16#include <linux/list_sort.h>
17#include <linux/blkdev.h>
18
19#include "bmap.h"
20#include "dir.h"
21#include "gfs2.h"
22#include "incore.h"
23#include "inode.h"
24#include "glock.h"
25#include "glops.h"
26#include "log.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "recovery.h"
30#include "rgrp.h"
31#include "trans.h"
32#include "util.h"
33#include "trace_gfs2.h"
34
35
36
37
38
39
40
41
42void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43{
44 struct gfs2_bufdata *bd;
45
46 BUG_ON(!current->journal_info);
47
48 clear_buffer_dirty(bh);
49 if (test_set_buffer_pinned(bh))
50 gfs2_assert_withdraw(sdp, 0);
51 if (!buffer_uptodate(bh))
52 gfs2_io_error_bh_wd(sdp, bh);
53 bd = bh->b_private;
54
55
56
57 spin_lock(&sdp->sd_ail_lock);
58 if (bd->bd_tr)
59 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 spin_unlock(&sdp->sd_ail_lock);
61 get_bh(bh);
62 atomic_inc(&sdp->sd_log_pinned);
63 trace_gfs2_pin(bd, 1);
64}
65
66static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67{
68 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69}
70
71static void maybe_release_space(struct gfs2_bufdata *bd)
72{
73 struct gfs2_glock *gl = bd->bd_gl;
74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 struct gfs2_bitmap *bi = rgd->rd_bits + index;
78
79 rgrp_lock_local(rgd);
80 if (bi->bi_clone == NULL)
81 goto out;
82 if (sdp->sd_args.ar_discard)
83 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 memcpy(bi->bi_clone + bi->bi_offset,
85 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 clear_bit(GBF_FULL, &bi->bi_flags);
87 rgd->rd_free_clone = rgd->rd_free;
88 BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 rgd->rd_extfail_pt = rgd->rd_free;
90
91out:
92 rgrp_unlock_local(rgd);
93}
94
95
96
97
98
99
100
101
102static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 struct gfs2_trans *tr)
104{
105 struct gfs2_bufdata *bd = bh->b_private;
106
107 BUG_ON(!buffer_uptodate(bh));
108 BUG_ON(!buffer_pinned(bh));
109
110 lock_buffer(bh);
111 mark_buffer_dirty(bh);
112 clear_buffer_pinned(bh);
113
114 if (buffer_is_rgrp(bd))
115 maybe_release_space(bd);
116
117 spin_lock(&sdp->sd_ail_lock);
118 if (bd->bd_tr) {
119 list_del(&bd->bd_ail_st_list);
120 brelse(bh);
121 } else {
122 struct gfs2_glock *gl = bd->bd_gl;
123 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 atomic_inc(&gl->gl_ail_count);
125 }
126 bd->bd_tr = tr;
127 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 spin_unlock(&sdp->sd_ail_lock);
129
130 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 trace_gfs2_pin(bd, 0);
132 unlock_buffer(bh);
133 atomic_dec(&sdp->sd_log_pinned);
134}
135
136void gfs2_log_incr_head(struct gfs2_sbd *sdp)
137{
138 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 (sdp->sd_log_flush_head != sdp->sd_log_head));
140
141 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 sdp->sd_log_flush_head = 0;
143}
144
145u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
146{
147 struct gfs2_journal_extent *je;
148
149 list_for_each_entry(je, &jd->extent_list, list) {
150 if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 return je->dblock + lblock - je->lblock;
152 }
153
154 return -1;
155}
156
157
158
159
160
161
162
163
164
165
166
167
168
169static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
170 struct bio_vec *bvec,
171 blk_status_t error)
172{
173 struct buffer_head *bh, *next;
174 struct page *page = bvec->bv_page;
175 unsigned size;
176
177 bh = page_buffers(page);
178 size = bvec->bv_len;
179 while (bh_offset(bh) < bvec->bv_offset)
180 bh = bh->b_this_page;
181 do {
182 if (error)
183 mark_buffer_write_io_error(bh);
184 unlock_buffer(bh);
185 next = bh->b_this_page;
186 size -= bh->b_size;
187 brelse(bh);
188 bh = next;
189 } while(bh && size);
190}
191
192
193
194
195
196
197
198
199
200
201
202static void gfs2_end_log_write(struct bio *bio)
203{
204 struct gfs2_sbd *sdp = bio->bi_private;
205 struct bio_vec *bvec;
206 struct page *page;
207 struct bvec_iter_all iter_all;
208
209 if (bio->bi_status) {
210 if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
211 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
212 bio->bi_status, sdp->sd_jdesc->jd_jid);
213 gfs2_withdraw_delayed(sdp);
214
215 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
216 wake_up(&sdp->sd_logd_waitq);
217 }
218
219 bio_for_each_segment_all(bvec, bio, iter_all) {
220 page = bvec->bv_page;
221 if (page_has_buffers(page))
222 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
223 else
224 mempool_free(page, gfs2_page_pool);
225 }
226
227 bio_put(bio);
228 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
229 wake_up(&sdp->sd_log_flush_wait);
230}
231
232
233
234
235
236
237
238
239
240
241void gfs2_log_submit_bio(struct bio **biop, int opf)
242{
243 struct bio *bio = *biop;
244 if (bio) {
245 struct gfs2_sbd *sdp = bio->bi_private;
246 atomic_inc(&sdp->sd_log_in_flight);
247 bio->bi_opf = opf;
248 submit_bio(bio);
249 *biop = NULL;
250 }
251}
252
253
254
255
256
257
258
259
260
261
262
263
264static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
265 bio_end_io_t *end_io)
266{
267 struct super_block *sb = sdp->sd_vfs;
268 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
269
270 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
271 bio_set_dev(bio, sb->s_bdev);
272 bio->bi_end_io = end_io;
273 bio->bi_private = sdp;
274
275 return bio;
276}
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
296 struct bio **biop, int op,
297 bio_end_io_t *end_io, bool flush)
298{
299 struct bio *bio = *biop;
300
301 if (bio) {
302 u64 nblk;
303
304 nblk = bio_end_sector(bio);
305 nblk >>= sdp->sd_fsb2bb_shift;
306 if (blkno == nblk && !flush)
307 return bio;
308 gfs2_log_submit_bio(biop, op);
309 }
310
311 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
312 return *biop;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
330 struct page *page, unsigned size, unsigned offset,
331 u64 blkno)
332{
333 struct bio *bio;
334 int ret;
335
336 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
337 gfs2_end_log_write, false);
338 ret = bio_add_page(bio, page, size, offset);
339 if (ret == 0) {
340 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
341 REQ_OP_WRITE, gfs2_end_log_write, true);
342 ret = bio_add_page(bio, page, size, offset);
343 WARN_ON(ret == 0);
344 }
345}
346
347
348
349
350
351
352
353
354
355
356
357static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
358{
359 u64 dblock;
360
361 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
362 gfs2_log_incr_head(sdp);
363 gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size,
364 bh_offset(bh), dblock);
365}
366
367
368
369
370
371
372
373
374
375
376
377
378static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
379{
380 struct super_block *sb = sdp->sd_vfs;
381 u64 dblock;
382
383 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
384 gfs2_log_incr_head(sdp);
385 gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
386}
387
388
389
390
391
392
393
394
395
396static void gfs2_end_log_read(struct bio *bio)
397{
398 struct page *page;
399 struct bio_vec *bvec;
400 struct bvec_iter_all iter_all;
401
402 bio_for_each_segment_all(bvec, bio, iter_all) {
403 page = bvec->bv_page;
404 if (bio->bi_status) {
405 int err = blk_status_to_errno(bio->bi_status);
406
407 SetPageError(page);
408 mapping_set_error(page->mapping, err);
409 }
410 unlock_page(page);
411 }
412
413 bio_put(bio);
414}
415
416
417
418
419
420
421
422
423
424
425static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
426 struct gfs2_log_header_host *head,
427 struct page *page)
428{
429 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
430 struct gfs2_log_header_host lh;
431 void *kaddr = kmap_atomic(page);
432 unsigned int offset;
433 bool ret = false;
434
435 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
436 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
437 if (lh.lh_sequence >= head->lh_sequence)
438 *head = lh;
439 else {
440 ret = true;
441 break;
442 }
443 }
444 }
445 kunmap_atomic(kaddr);
446 return ret;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
470 struct gfs2_log_header_host *head,
471 bool *done)
472{
473 struct page *page;
474
475 page = find_get_page(jd->jd_inode->i_mapping, index);
476 wait_on_page_locked(page);
477
478 if (PageError(page))
479 *done = true;
480
481 if (!*done)
482 *done = gfs2_jhead_pg_srch(jd, head, page);
483
484 put_page(page);
485 put_page(page);
486}
487
488static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
489{
490 struct bio *new;
491
492 new = bio_alloc(GFP_NOIO, nr_iovecs);
493 bio_copy_dev(new, prev);
494 new->bi_iter.bi_sector = bio_end_sector(prev);
495 new->bi_opf = prev->bi_opf;
496 new->bi_write_hint = prev->bi_write_hint;
497 bio_chain(new, prev);
498 submit_bio(prev);
499 return new;
500}
501
502
503
504
505
506
507
508
509
510
511
512
513int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
514 bool keep_cache)
515{
516 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
517 struct address_space *mapping = jd->jd_inode->i_mapping;
518 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
519 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
520 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
521 unsigned int shift = PAGE_SHIFT - bsize_shift;
522 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
523 struct gfs2_journal_extent *je;
524 int sz, ret = 0;
525 struct bio *bio = NULL;
526 struct page *page = NULL;
527 bool done = false;
528 errseq_t since;
529
530 memset(head, 0, sizeof(*head));
531 if (list_empty(&jd->extent_list))
532 gfs2_map_journal_extents(sdp, jd);
533
534 since = filemap_sample_wb_err(mapping);
535 list_for_each_entry(je, &jd->extent_list, list) {
536 u64 dblock = je->dblock;
537
538 for (; block < je->lblock + je->blocks; block++, dblock++) {
539 if (!page) {
540 page = find_or_create_page(mapping,
541 block >> shift, GFP_NOFS);
542 if (!page) {
543 ret = -ENOMEM;
544 done = true;
545 goto out;
546 }
547 off = 0;
548 }
549
550 if (bio && (off || block < blocks_submitted + max_blocks)) {
551 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
552
553 if (bio_end_sector(bio) == sector) {
554 sz = bio_add_page(bio, page, bsize, off);
555 if (sz == bsize)
556 goto block_added;
557 }
558 if (off) {
559 unsigned int blocks =
560 (PAGE_SIZE - off) >> bsize_shift;
561
562 bio = gfs2_chain_bio(bio, blocks);
563 goto add_block_to_new_bio;
564 }
565 }
566
567 if (bio) {
568 blocks_submitted = block;
569 submit_bio(bio);
570 }
571
572 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
573 bio->bi_opf = REQ_OP_READ;
574add_block_to_new_bio:
575 sz = bio_add_page(bio, page, bsize, off);
576 BUG_ON(sz != bsize);
577block_added:
578 off += bsize;
579 if (off == PAGE_SIZE)
580 page = NULL;
581 if (blocks_submitted <= blocks_read + max_blocks) {
582
583 continue;
584 }
585
586 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
587 blocks_read += PAGE_SIZE >> bsize_shift;
588 if (done)
589 goto out;
590 }
591 }
592
593out:
594 if (bio)
595 submit_bio(bio);
596 while (blocks_read < block) {
597 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
598 blocks_read += PAGE_SIZE >> bsize_shift;
599 }
600
601 if (!ret)
602 ret = filemap_check_wb_err(mapping, since);
603
604 if (!keep_cache)
605 truncate_inode_pages(mapping, 0);
606
607 return ret;
608}
609
610static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
611 u32 ld_length, u32 ld_data1)
612{
613 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
614 struct gfs2_log_descriptor *ld = page_address(page);
615 clear_page(ld);
616 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
617 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
618 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
619 ld->ld_type = cpu_to_be32(ld_type);
620 ld->ld_length = cpu_to_be32(ld_length);
621 ld->ld_data1 = cpu_to_be32(ld_data1);
622 ld->ld_data2 = 0;
623 return page;
624}
625
626static void gfs2_check_magic(struct buffer_head *bh)
627{
628 void *kaddr;
629 __be32 *ptr;
630
631 clear_buffer_escaped(bh);
632 kaddr = kmap_atomic(bh->b_page);
633 ptr = kaddr + bh_offset(bh);
634 if (*ptr == cpu_to_be32(GFS2_MAGIC))
635 set_buffer_escaped(bh);
636 kunmap_atomic(kaddr);
637}
638
639static int blocknr_cmp(void *priv, const struct list_head *a,
640 const struct list_head *b)
641{
642 struct gfs2_bufdata *bda, *bdb;
643
644 bda = list_entry(a, struct gfs2_bufdata, bd_list);
645 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
646
647 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
648 return -1;
649 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
650 return 1;
651 return 0;
652}
653
654static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
655 unsigned int total, struct list_head *blist,
656 bool is_databuf)
657{
658 struct gfs2_log_descriptor *ld;
659 struct gfs2_bufdata *bd1 = NULL, *bd2;
660 struct page *page;
661 unsigned int num;
662 unsigned n;
663 __be64 *ptr;
664
665 gfs2_log_lock(sdp);
666 list_sort(NULL, blist, blocknr_cmp);
667 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
668 while(total) {
669 num = total;
670 if (total > limit)
671 num = limit;
672 gfs2_log_unlock(sdp);
673 page = gfs2_get_log_desc(sdp,
674 is_databuf ? GFS2_LOG_DESC_JDATA :
675 GFS2_LOG_DESC_METADATA, num + 1, num);
676 ld = page_address(page);
677 gfs2_log_lock(sdp);
678 ptr = (__be64 *)(ld + 1);
679
680 n = 0;
681 list_for_each_entry_continue(bd1, blist, bd_list) {
682 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
683 if (is_databuf) {
684 gfs2_check_magic(bd1->bd_bh);
685 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
686 }
687 if (++n >= num)
688 break;
689 }
690
691 gfs2_log_unlock(sdp);
692 gfs2_log_write_page(sdp, page);
693 gfs2_log_lock(sdp);
694
695 n = 0;
696 list_for_each_entry_continue(bd2, blist, bd_list) {
697 get_bh(bd2->bd_bh);
698 gfs2_log_unlock(sdp);
699 lock_buffer(bd2->bd_bh);
700
701 if (buffer_escaped(bd2->bd_bh)) {
702 void *kaddr;
703 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
704 ptr = page_address(page);
705 kaddr = kmap_atomic(bd2->bd_bh->b_page);
706 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
707 bd2->bd_bh->b_size);
708 kunmap_atomic(kaddr);
709 *(__be32 *)ptr = 0;
710 clear_buffer_escaped(bd2->bd_bh);
711 unlock_buffer(bd2->bd_bh);
712 brelse(bd2->bd_bh);
713 gfs2_log_write_page(sdp, page);
714 } else {
715 gfs2_log_write_bh(sdp, bd2->bd_bh);
716 }
717 gfs2_log_lock(sdp);
718 if (++n >= num)
719 break;
720 }
721
722 BUG_ON(total < num);
723 total -= num;
724 }
725 gfs2_log_unlock(sdp);
726}
727
728static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
729{
730 unsigned int limit = buf_limit(sdp);
731 unsigned int nbuf;
732 if (tr == NULL)
733 return;
734 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
735 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
736}
737
738static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
739{
740 struct list_head *head;
741 struct gfs2_bufdata *bd;
742
743 if (tr == NULL)
744 return;
745
746 head = &tr->tr_buf;
747 while (!list_empty(head)) {
748 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
749 list_del_init(&bd->bd_list);
750 gfs2_unpin(sdp, bd->bd_bh, tr);
751 }
752}
753
754static void buf_lo_before_scan(struct gfs2_jdesc *jd,
755 struct gfs2_log_header_host *head, int pass)
756{
757 if (pass != 0)
758 return;
759
760 jd->jd_found_blocks = 0;
761 jd->jd_replayed_blocks = 0;
762}
763
764#define obsolete_rgrp_replay \
765"Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
766#define obsolete_rgrp_replay2 \
767"busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
768
769static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
770 u64 blkno)
771{
772 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
773 struct gfs2_rgrpd *rgd;
774 struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
775
776 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
777 if (rgd && rgd->rd_addr == blkno &&
778 rgd->rd_bits && rgd->rd_bits->bi_bh) {
779 fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
780 jd->jd_jid, bh_log->b_blocknr);
781 fs_info(sdp, obsolete_rgrp_replay2,
782 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
783 buffer_pinned(rgd->rd_bits->bi_bh),
784 rgd->rd_igeneration,
785 be64_to_cpu(jrgd->rg_igeneration));
786 gfs2_dump_glock(NULL, rgd->rd_gl, true);
787 }
788}
789
790static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
791 struct gfs2_log_descriptor *ld, __be64 *ptr,
792 int pass)
793{
794 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
795 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
796 struct gfs2_glock *gl = ip->i_gl;
797 unsigned int blks = be32_to_cpu(ld->ld_data1);
798 struct buffer_head *bh_log, *bh_ip;
799 u64 blkno;
800 int error = 0;
801
802 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
803 return 0;
804
805 gfs2_replay_incr_blk(jd, &start);
806
807 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
808 blkno = be64_to_cpu(*ptr++);
809
810 jd->jd_found_blocks++;
811
812 if (gfs2_revoke_check(jd, blkno, start))
813 continue;
814
815 error = gfs2_replay_read_block(jd, start, &bh_log);
816 if (error)
817 return error;
818
819 bh_ip = gfs2_meta_new(gl, blkno);
820 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
821
822 if (gfs2_meta_check(sdp, bh_ip))
823 error = -EIO;
824 else {
825 struct gfs2_meta_header *mh =
826 (struct gfs2_meta_header *)bh_ip->b_data;
827
828 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
829 obsolete_rgrp(jd, bh_log, blkno);
830
831 mark_buffer_dirty(bh_ip);
832 }
833 brelse(bh_log);
834 brelse(bh_ip);
835
836 if (error)
837 break;
838
839 jd->jd_replayed_blocks++;
840 }
841
842 return error;
843}
844
845static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
846{
847 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
848 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
849
850 if (error) {
851 gfs2_inode_metasync(ip->i_gl);
852 return;
853 }
854 if (pass != 1)
855 return;
856
857 gfs2_inode_metasync(ip->i_gl);
858
859 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
860 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
861}
862
863static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
864{
865 struct gfs2_meta_header *mh;
866 unsigned int offset;
867 struct list_head *head = &sdp->sd_log_revokes;
868 struct gfs2_bufdata *bd;
869 struct page *page;
870 unsigned int length;
871
872 gfs2_flush_revokes(sdp);
873 if (!sdp->sd_log_num_revoke)
874 return;
875
876 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
877 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
878 offset = sizeof(struct gfs2_log_descriptor);
879
880 list_for_each_entry(bd, head, bd_list) {
881 sdp->sd_log_num_revoke--;
882
883 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
884 gfs2_log_write_page(sdp, page);
885 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
886 mh = page_address(page);
887 clear_page(mh);
888 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
889 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
890 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
891 offset = sizeof(struct gfs2_meta_header);
892 }
893
894 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
895 offset += sizeof(u64);
896 }
897 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
898
899 gfs2_log_write_page(sdp, page);
900}
901
902void gfs2_drain_revokes(struct gfs2_sbd *sdp)
903{
904 struct list_head *head = &sdp->sd_log_revokes;
905 struct gfs2_bufdata *bd;
906 struct gfs2_glock *gl;
907
908 while (!list_empty(head)) {
909 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
910 list_del_init(&bd->bd_list);
911 gl = bd->bd_gl;
912 gfs2_glock_remove_revoke(gl);
913 kmem_cache_free(gfs2_bufdata_cachep, bd);
914 }
915}
916
917static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
918{
919 gfs2_drain_revokes(sdp);
920}
921
922static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
923 struct gfs2_log_header_host *head, int pass)
924{
925 if (pass != 0)
926 return;
927
928 jd->jd_found_revokes = 0;
929 jd->jd_replay_tail = head->lh_tail;
930}
931
932static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
933 struct gfs2_log_descriptor *ld, __be64 *ptr,
934 int pass)
935{
936 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
937 unsigned int blks = be32_to_cpu(ld->ld_length);
938 unsigned int revokes = be32_to_cpu(ld->ld_data1);
939 struct buffer_head *bh;
940 unsigned int offset;
941 u64 blkno;
942 int first = 1;
943 int error;
944
945 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
946 return 0;
947
948 offset = sizeof(struct gfs2_log_descriptor);
949
950 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
951 error = gfs2_replay_read_block(jd, start, &bh);
952 if (error)
953 return error;
954
955 if (!first)
956 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
957
958 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
959 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
960
961 error = gfs2_revoke_add(jd, blkno, start);
962 if (error < 0) {
963 brelse(bh);
964 return error;
965 }
966 else if (error)
967 jd->jd_found_revokes++;
968
969 if (!--revokes)
970 break;
971 offset += sizeof(u64);
972 }
973
974 brelse(bh);
975 offset = sizeof(struct gfs2_meta_header);
976 first = 0;
977 }
978
979 return 0;
980}
981
982static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
983{
984 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
985
986 if (error) {
987 gfs2_revoke_clean(jd);
988 return;
989 }
990 if (pass != 1)
991 return;
992
993 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
994 jd->jd_jid, jd->jd_found_revokes);
995
996 gfs2_revoke_clean(jd);
997}
998
999
1000
1001
1002
1003
1004
1005static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1006{
1007 unsigned int limit = databuf_limit(sdp);
1008 unsigned int nbuf;
1009 if (tr == NULL)
1010 return;
1011 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
1012 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1013}
1014
1015static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1016 struct gfs2_log_descriptor *ld,
1017 __be64 *ptr, int pass)
1018{
1019 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1020 struct gfs2_glock *gl = ip->i_gl;
1021 unsigned int blks = be32_to_cpu(ld->ld_data1);
1022 struct buffer_head *bh_log, *bh_ip;
1023 u64 blkno;
1024 u64 esc;
1025 int error = 0;
1026
1027 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1028 return 0;
1029
1030 gfs2_replay_incr_blk(jd, &start);
1031 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1032 blkno = be64_to_cpu(*ptr++);
1033 esc = be64_to_cpu(*ptr++);
1034
1035 jd->jd_found_blocks++;
1036
1037 if (gfs2_revoke_check(jd, blkno, start))
1038 continue;
1039
1040 error = gfs2_replay_read_block(jd, start, &bh_log);
1041 if (error)
1042 return error;
1043
1044 bh_ip = gfs2_meta_new(gl, blkno);
1045 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1046
1047
1048 if (esc) {
1049 __be32 *eptr = (__be32 *)bh_ip->b_data;
1050 *eptr = cpu_to_be32(GFS2_MAGIC);
1051 }
1052 mark_buffer_dirty(bh_ip);
1053
1054 brelse(bh_log);
1055 brelse(bh_ip);
1056
1057 jd->jd_replayed_blocks++;
1058 }
1059
1060 return error;
1061}
1062
1063
1064
1065static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1066{
1067 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1068 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1069
1070 if (error) {
1071 gfs2_inode_metasync(ip->i_gl);
1072 return;
1073 }
1074 if (pass != 1)
1075 return;
1076
1077
1078 gfs2_inode_metasync(ip->i_gl);
1079
1080 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1081 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1082}
1083
1084static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1085{
1086 struct list_head *head;
1087 struct gfs2_bufdata *bd;
1088
1089 if (tr == NULL)
1090 return;
1091
1092 head = &tr->tr_databuf;
1093 while (!list_empty(head)) {
1094 bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1095 list_del_init(&bd->bd_list);
1096 gfs2_unpin(sdp, bd->bd_bh, tr);
1097 }
1098}
1099
1100
1101static const struct gfs2_log_operations gfs2_buf_lops = {
1102 .lo_before_commit = buf_lo_before_commit,
1103 .lo_after_commit = buf_lo_after_commit,
1104 .lo_before_scan = buf_lo_before_scan,
1105 .lo_scan_elements = buf_lo_scan_elements,
1106 .lo_after_scan = buf_lo_after_scan,
1107 .lo_name = "buf",
1108};
1109
1110static const struct gfs2_log_operations gfs2_revoke_lops = {
1111 .lo_before_commit = revoke_lo_before_commit,
1112 .lo_after_commit = revoke_lo_after_commit,
1113 .lo_before_scan = revoke_lo_before_scan,
1114 .lo_scan_elements = revoke_lo_scan_elements,
1115 .lo_after_scan = revoke_lo_after_scan,
1116 .lo_name = "revoke",
1117};
1118
1119static const struct gfs2_log_operations gfs2_databuf_lops = {
1120 .lo_before_commit = databuf_lo_before_commit,
1121 .lo_after_commit = databuf_lo_after_commit,
1122 .lo_scan_elements = databuf_lo_scan_elements,
1123 .lo_after_scan = databuf_lo_after_scan,
1124 .lo_name = "databuf",
1125};
1126
1127const struct gfs2_log_operations *gfs2_log_ops[] = {
1128 &gfs2_databuf_lops,
1129 &gfs2_buf_lops,
1130 &gfs2_revoke_lops,
1131 NULL,
1132};
1133
1134