1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/delay.h>
17#include <linux/bio.h>
18#include <linux/gfs2_ondisk.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "glops.h"
24#include "inode.h"
25#include "log.h"
26#include "lops.h"
27#include "meta_io.h"
28#include "rgrp.h"
29#include "trans.h"
30#include "util.h"
31#include "trace_gfs2.h"
32
33static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
34{
35 struct buffer_head *bh, *head;
36 int nr_underway = 0;
37 int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
38
39 BUG_ON(!PageLocked(page));
40 BUG_ON(!page_has_buffers(page));
41
42 head = page_buffers(page);
43 bh = head;
44
45 do {
46 if (!buffer_mapped(bh))
47 continue;
48
49
50
51
52
53
54
55 if (wbc->sync_mode != WB_SYNC_NONE) {
56 lock_buffer(bh);
57 } else if (!trylock_buffer(bh)) {
58 redirty_page_for_writepage(wbc, page);
59 continue;
60 }
61 if (test_clear_buffer_dirty(bh)) {
62 mark_buffer_async_write(bh);
63 } else {
64 unlock_buffer(bh);
65 }
66 } while ((bh = bh->b_this_page) != head);
67
68
69
70
71
72 BUG_ON(PageWriteback(page));
73 set_page_writeback(page);
74
75 do {
76 struct buffer_head *next = bh->b_this_page;
77 if (buffer_async_write(bh)) {
78 submit_bh(REQ_OP_WRITE, write_flags, bh);
79 nr_underway++;
80 }
81 bh = next;
82 } while (bh != head);
83 unlock_page(page);
84
85 if (nr_underway == 0)
86 end_page_writeback(page);
87
88 return 0;
89}
90
91const struct address_space_operations gfs2_meta_aops = {
92 .dirty_folio = block_dirty_folio,
93 .invalidate_folio = block_invalidate_folio,
94 .writepage = gfs2_aspace_writepage,
95 .releasepage = gfs2_releasepage,
96};
97
98const struct address_space_operations gfs2_rgrp_aops = {
99 .dirty_folio = block_dirty_folio,
100 .invalidate_folio = block_invalidate_folio,
101 .writepage = gfs2_aspace_writepage,
102 .releasepage = gfs2_releasepage,
103};
104
105
106
107
108
109
110
111
112
113
114struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
115{
116 struct address_space *mapping = gfs2_glock2aspace(gl);
117 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
118 struct page *page;
119 struct buffer_head *bh;
120 unsigned int shift;
121 unsigned long index;
122 unsigned int bufnum;
123
124 if (mapping == NULL)
125 mapping = &sdp->sd_aspace;
126
127 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
128 index = blkno >> shift;
129 bufnum = blkno - (index << shift);
130
131 if (create) {
132 for (;;) {
133 page = grab_cache_page(mapping, index);
134 if (page)
135 break;
136 yield();
137 }
138 if (!page_has_buffers(page))
139 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
140 } else {
141 page = find_get_page_flags(mapping, index,
142 FGP_LOCK|FGP_ACCESSED);
143 if (!page)
144 return NULL;
145 if (!page_has_buffers(page)) {
146 bh = NULL;
147 goto out_unlock;
148 }
149 }
150
151
152 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
153 ;
154 get_bh(bh);
155
156 if (!buffer_mapped(bh))
157 map_bh(bh, sdp->sd_vfs, blkno);
158
159out_unlock:
160 unlock_page(page);
161 put_page(page);
162
163 return bh;
164}
165
166static void meta_prep_new(struct buffer_head *bh)
167{
168 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
169
170 lock_buffer(bh);
171 clear_buffer_dirty(bh);
172 set_buffer_uptodate(bh);
173 unlock_buffer(bh);
174
175 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
176}
177
178
179
180
181
182
183
184
185
186struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
187{
188 struct buffer_head *bh;
189 bh = gfs2_getbuf(gl, blkno, CREATE);
190 meta_prep_new(bh);
191 return bh;
192}
193
194static void gfs2_meta_read_endio(struct bio *bio)
195{
196 struct bio_vec *bvec;
197 struct bvec_iter_all iter_all;
198
199 bio_for_each_segment_all(bvec, bio, iter_all) {
200 struct page *page = bvec->bv_page;
201 struct buffer_head *bh = page_buffers(page);
202 unsigned int len = bvec->bv_len;
203
204 while (bh_offset(bh) < bvec->bv_offset)
205 bh = bh->b_this_page;
206 do {
207 struct buffer_head *next = bh->b_this_page;
208 len -= bh->b_size;
209 bh->b_end_io(bh, !bio->bi_status);
210 bh = next;
211 } while (bh && len);
212 }
213 bio_put(bio);
214}
215
216
217
218
219
220static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
221 int num)
222{
223 while (num > 0) {
224 struct buffer_head *bh = *bhs;
225 struct bio *bio;
226
227 bio = bio_alloc(bh->b_bdev, num, op | op_flags, GFP_NOIO);
228 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
229 while (num > 0) {
230 bh = *bhs;
231 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
232 BUG_ON(bio->bi_iter.bi_size == 0);
233 break;
234 }
235 bhs++;
236 num--;
237 }
238 bio->bi_end_io = gfs2_meta_read_endio;
239 submit_bio(bio);
240 }
241}
242
243
244
245
246
247
248
249
250
251
252
253
254int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
255 int rahead, struct buffer_head **bhp)
256{
257 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
258 struct buffer_head *bh, *bhs[2];
259 int num = 0;
260
261 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp)) {
262 *bhp = NULL;
263 return -EIO;
264 }
265
266 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
267
268 lock_buffer(bh);
269 if (buffer_uptodate(bh)) {
270 unlock_buffer(bh);
271 flags &= ~DIO_WAIT;
272 } else {
273 bh->b_end_io = end_buffer_read_sync;
274 get_bh(bh);
275 bhs[num++] = bh;
276 }
277
278 if (rahead) {
279 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
280
281 lock_buffer(bh);
282 if (buffer_uptodate(bh)) {
283 unlock_buffer(bh);
284 brelse(bh);
285 } else {
286 bh->b_end_io = end_buffer_read_sync;
287 bhs[num++] = bh;
288 }
289 }
290
291 gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
292 if (!(flags & DIO_WAIT))
293 return 0;
294
295 bh = *bhp;
296 wait_on_buffer(bh);
297 if (unlikely(!buffer_uptodate(bh))) {
298 struct gfs2_trans *tr = current->journal_info;
299 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
300 gfs2_io_error_bh_wd(sdp, bh);
301 brelse(bh);
302 *bhp = NULL;
303 return -EIO;
304 }
305
306 return 0;
307}
308
309
310
311
312
313
314
315
316
317int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
318{
319 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
320 return -EIO;
321
322 wait_on_buffer(bh);
323
324 if (!buffer_uptodate(bh)) {
325 struct gfs2_trans *tr = current->journal_info;
326 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
327 gfs2_io_error_bh_wd(sdp, bh);
328 return -EIO;
329 }
330 if (unlikely(gfs2_withdrawn(sdp)) && !gfs2_withdraw_in_prog(sdp))
331 return -EIO;
332
333 return 0;
334}
335
336void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
337{
338 struct address_space *mapping = bh->b_page->mapping;
339 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
340 struct gfs2_bufdata *bd = bh->b_private;
341 struct gfs2_trans *tr = current->journal_info;
342 int was_pinned = 0;
343
344 if (test_clear_buffer_pinned(bh)) {
345 trace_gfs2_pin(bd, 0);
346 atomic_dec(&sdp->sd_log_pinned);
347 list_del_init(&bd->bd_list);
348 if (meta == REMOVE_META)
349 tr->tr_num_buf_rm++;
350 else
351 tr->tr_num_databuf_rm++;
352 set_bit(TR_TOUCHED, &tr->tr_flags);
353 was_pinned = 1;
354 brelse(bh);
355 }
356 if (bd) {
357 if (bd->bd_tr) {
358 gfs2_trans_add_revoke(sdp, bd);
359 } else if (was_pinned) {
360 bh->b_private = NULL;
361 kmem_cache_free(gfs2_bufdata_cachep, bd);
362 } else if (!list_empty(&bd->bd_ail_st_list) &&
363 !list_empty(&bd->bd_ail_gl_list)) {
364 gfs2_remove_from_ail(bd);
365 }
366 }
367 clear_buffer_dirty(bh);
368 clear_buffer_uptodate(bh);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
383{
384 struct gfs2_trans *tr, *s;
385 struct gfs2_bufdata *bd, *bs;
386 struct buffer_head *bh;
387 u64 end = bstart + blen;
388
389 gfs2_log_lock(sdp);
390 spin_lock(&sdp->sd_ail_lock);
391 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
392 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
393 bd_ail_st_list) {
394 bh = bd->bd_bh;
395 if (bh->b_blocknr < bstart || bh->b_blocknr >= end)
396 continue;
397
398 gfs2_remove_from_journal(bh, REMOVE_JDATA);
399 }
400 }
401 spin_unlock(&sdp->sd_ail_lock);
402 gfs2_log_unlock(sdp);
403}
404
405static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
406{
407 struct address_space *mapping = ip->i_inode.i_mapping;
408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
409 struct page *page;
410 struct buffer_head *bh;
411 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
412 unsigned long index = blkno >> shift;
413 unsigned int bufnum = blkno - (index << shift);
414
415 page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED);
416 if (!page)
417 return NULL;
418 if (!page_has_buffers(page)) {
419 unlock_page(page);
420 put_page(page);
421 return NULL;
422 }
423
424 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
425 ;
426 get_bh(bh);
427 unlock_page(page);
428 put_page(page);
429 return bh;
430}
431
432
433
434
435
436
437
438
439
440void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
441{
442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
443 struct buffer_head *bh;
444 int ty;
445
446 gfs2_ail1_wipe(sdp, bstart, blen);
447 while (blen) {
448 ty = REMOVE_META;
449 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
450 if (!bh && gfs2_is_jdata(ip)) {
451 bh = gfs2_getjdatabuf(ip, bstart);
452 ty = REMOVE_JDATA;
453 }
454 if (bh) {
455 lock_buffer(bh);
456 gfs2_log_lock(sdp);
457 spin_lock(&sdp->sd_ail_lock);
458 gfs2_remove_from_journal(bh, ty);
459 spin_unlock(&sdp->sd_ail_lock);
460 gfs2_log_unlock(sdp);
461 unlock_buffer(bh);
462 brelse(bh);
463 }
464
465 bstart++;
466 blen--;
467 }
468}
469
470
471
472
473
474
475
476
477
478
479
480int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
481 struct buffer_head **bhp)
482{
483 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
484 struct gfs2_glock *gl = ip->i_gl;
485 struct buffer_head *bh;
486 int ret = 0;
487 int rahead = 0;
488
489 if (num == ip->i_no_addr)
490 rahead = ip->i_rahead;
491
492 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
493 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
494 brelse(bh);
495 ret = -EIO;
496 } else {
497 *bhp = bh;
498 }
499 return ret;
500}
501
502
503
504
505
506
507
508
509
510
511struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
512{
513 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
514 struct buffer_head *first_bh, *bh;
515 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
516 sdp->sd_sb.sb_bsize_shift;
517
518 BUG_ON(!extlen);
519
520 if (max_ra < 1)
521 max_ra = 1;
522 if (extlen > max_ra)
523 extlen = max_ra;
524
525 first_bh = gfs2_getbuf(gl, dblock, CREATE);
526
527 if (buffer_uptodate(first_bh))
528 goto out;
529 if (!buffer_locked(first_bh))
530 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
531
532 dblock++;
533 extlen--;
534
535 while (extlen) {
536 bh = gfs2_getbuf(gl, dblock, CREATE);
537
538 if (!buffer_uptodate(bh) && !buffer_locked(bh))
539 ll_rw_block(REQ_OP_READ,
540 REQ_RAHEAD | REQ_META | REQ_PRIO,
541 1, &bh);
542 brelse(bh);
543 dblock++;
544 extlen--;
545 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
546 goto out;
547 }
548
549 wait_on_buffer(first_bh);
550out:
551 return first_bh;
552}
553
554