1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/mm.h>
13#include <linux/pagemap.h>
14#include <linux/writeback.h>
15#include <linux/swap.h>
16#include <linux/delay.h>
17#include <linux/bio.h>
18#include <linux/gfs2_ondisk.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "glops.h"
24#include "inode.h"
25#include "log.h"
26#include "lops.h"
27#include "meta_io.h"
28#include "rgrp.h"
29#include "trans.h"
30#include "util.h"
31#include "trace_gfs2.h"
32
33static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
34{
35 struct buffer_head *bh, *head;
36 int nr_underway = 0;
37 int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
38
39 BUG_ON(!PageLocked(page));
40 BUG_ON(!page_has_buffers(page));
41
42 head = page_buffers(page);
43 bh = head;
44
45 do {
46 if (!buffer_mapped(bh))
47 continue;
48
49
50
51
52
53
54
55 if (wbc->sync_mode != WB_SYNC_NONE) {
56 lock_buffer(bh);
57 } else if (!trylock_buffer(bh)) {
58 redirty_page_for_writepage(wbc, page);
59 continue;
60 }
61 if (test_clear_buffer_dirty(bh)) {
62 mark_buffer_async_write(bh);
63 } else {
64 unlock_buffer(bh);
65 }
66 } while ((bh = bh->b_this_page) != head);
67
68
69
70
71
72 BUG_ON(PageWriteback(page));
73 set_page_writeback(page);
74
75 do {
76 struct buffer_head *next = bh->b_this_page;
77 if (buffer_async_write(bh)) {
78 submit_bh(REQ_OP_WRITE, write_flags, bh);
79 nr_underway++;
80 }
81 bh = next;
82 } while (bh != head);
83 unlock_page(page);
84
85 if (nr_underway == 0)
86 end_page_writeback(page);
87
88 return 0;
89}
90
91const struct address_space_operations gfs2_meta_aops = {
92 .writepage = gfs2_aspace_writepage,
93 .releasepage = gfs2_releasepage,
94};
95
96const struct address_space_operations gfs2_rgrp_aops = {
97 .writepage = gfs2_aspace_writepage,
98 .releasepage = gfs2_releasepage,
99};
100
101
102
103
104
105
106
107
108
109
110struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
111{
112 struct address_space *mapping = gfs2_glock2aspace(gl);
113 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
114 struct page *page;
115 struct buffer_head *bh;
116 unsigned int shift;
117 unsigned long index;
118 unsigned int bufnum;
119
120 if (mapping == NULL)
121 mapping = &sdp->sd_aspace;
122
123 shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
124 index = blkno >> shift;
125 bufnum = blkno - (index << shift);
126
127 if (create) {
128 for (;;) {
129 page = grab_cache_page(mapping, index);
130 if (page)
131 break;
132 yield();
133 }
134 } else {
135 page = find_get_page_flags(mapping, index,
136 FGP_LOCK|FGP_ACCESSED);
137 if (!page)
138 return NULL;
139 }
140
141 if (!page_has_buffers(page))
142 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
143
144
145 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
146 ;
147 get_bh(bh);
148
149 if (!buffer_mapped(bh))
150 map_bh(bh, sdp->sd_vfs, blkno);
151
152 unlock_page(page);
153 put_page(page);
154
155 return bh;
156}
157
158static void meta_prep_new(struct buffer_head *bh)
159{
160 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
161
162 lock_buffer(bh);
163 clear_buffer_dirty(bh);
164 set_buffer_uptodate(bh);
165 unlock_buffer(bh);
166
167 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
168}
169
170
171
172
173
174
175
176
177
178struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
179{
180 struct buffer_head *bh;
181 bh = gfs2_getbuf(gl, blkno, CREATE);
182 meta_prep_new(bh);
183 return bh;
184}
185
186static void gfs2_meta_read_endio(struct bio *bio)
187{
188 struct bio_vec *bvec;
189 struct bvec_iter_all iter_all;
190
191 bio_for_each_segment_all(bvec, bio, iter_all) {
192 struct page *page = bvec->bv_page;
193 struct buffer_head *bh = page_buffers(page);
194 unsigned int len = bvec->bv_len;
195
196 while (bh_offset(bh) < bvec->bv_offset)
197 bh = bh->b_this_page;
198 do {
199 struct buffer_head *next = bh->b_this_page;
200 len -= bh->b_size;
201 bh->b_end_io(bh, !bio->bi_status);
202 bh = next;
203 } while (bh && len);
204 }
205 bio_put(bio);
206}
207
208
209
210
211
212static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
213 int num)
214{
215 while (num > 0) {
216 struct buffer_head *bh = *bhs;
217 struct bio *bio;
218
219 bio = bio_alloc(GFP_NOIO, num);
220 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
221 bio_set_dev(bio, bh->b_bdev);
222 while (num > 0) {
223 bh = *bhs;
224 if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
225 BUG_ON(bio->bi_iter.bi_size == 0);
226 break;
227 }
228 bhs++;
229 num--;
230 }
231 bio->bi_end_io = gfs2_meta_read_endio;
232 bio_set_op_attrs(bio, op, op_flags);
233 submit_bio(bio);
234 }
235}
236
237
238
239
240
241
242
243
244
245
246
247int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
248 int rahead, struct buffer_head **bhp)
249{
250 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
251 struct buffer_head *bh, *bhs[2];
252 int num = 0;
253
254 if (unlikely(gfs2_withdrawn(sdp)) &&
255 (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) {
256 *bhp = NULL;
257 return -EIO;
258 }
259
260 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
261
262 lock_buffer(bh);
263 if (buffer_uptodate(bh)) {
264 unlock_buffer(bh);
265 flags &= ~DIO_WAIT;
266 } else {
267 bh->b_end_io = end_buffer_read_sync;
268 get_bh(bh);
269 bhs[num++] = bh;
270 }
271
272 if (rahead) {
273 bh = gfs2_getbuf(gl, blkno + 1, CREATE);
274
275 lock_buffer(bh);
276 if (buffer_uptodate(bh)) {
277 unlock_buffer(bh);
278 brelse(bh);
279 } else {
280 bh->b_end_io = end_buffer_read_sync;
281 bhs[num++] = bh;
282 }
283 }
284
285 gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
286 if (!(flags & DIO_WAIT))
287 return 0;
288
289 bh = *bhp;
290 wait_on_buffer(bh);
291 if (unlikely(!buffer_uptodate(bh))) {
292 struct gfs2_trans *tr = current->journal_info;
293 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
294 gfs2_io_error_bh_wd(sdp, bh);
295 brelse(bh);
296 *bhp = NULL;
297 return -EIO;
298 }
299
300 return 0;
301}
302
303
304
305
306
307
308
309
310
311int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
312{
313 if (unlikely(gfs2_withdrawn(sdp)))
314 return -EIO;
315
316 wait_on_buffer(bh);
317
318 if (!buffer_uptodate(bh)) {
319 struct gfs2_trans *tr = current->journal_info;
320 if (tr && test_bit(TR_TOUCHED, &tr->tr_flags))
321 gfs2_io_error_bh_wd(sdp, bh);
322 return -EIO;
323 }
324 if (unlikely(gfs2_withdrawn(sdp)))
325 return -EIO;
326
327 return 0;
328}
329
330void gfs2_remove_from_journal(struct buffer_head *bh, int meta)
331{
332 struct address_space *mapping = bh->b_page->mapping;
333 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
334 struct gfs2_bufdata *bd = bh->b_private;
335 struct gfs2_trans *tr = current->journal_info;
336 int was_pinned = 0;
337
338 if (test_clear_buffer_pinned(bh)) {
339 trace_gfs2_pin(bd, 0);
340 atomic_dec(&sdp->sd_log_pinned);
341 list_del_init(&bd->bd_list);
342 if (meta == REMOVE_META)
343 tr->tr_num_buf_rm++;
344 else
345 tr->tr_num_databuf_rm++;
346 set_bit(TR_TOUCHED, &tr->tr_flags);
347 was_pinned = 1;
348 brelse(bh);
349 }
350 if (bd) {
351 if (bd->bd_tr) {
352 gfs2_trans_add_revoke(sdp, bd);
353 } else if (was_pinned) {
354 bh->b_private = NULL;
355 kmem_cache_free(gfs2_bufdata_cachep, bd);
356 } else if (!list_empty(&bd->bd_ail_st_list) &&
357 !list_empty(&bd->bd_ail_gl_list)) {
358 gfs2_remove_from_ail(bd);
359 }
360 }
361 clear_buffer_dirty(bh);
362 clear_buffer_uptodate(bh);
363}
364
365
366
367
368
369
370
371
372
373
374
375
376static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
377{
378 struct gfs2_trans *tr, *s;
379 struct gfs2_bufdata *bd, *bs;
380 struct buffer_head *bh;
381 u64 end = bstart + blen;
382
383 gfs2_log_lock(sdp);
384 spin_lock(&sdp->sd_ail_lock);
385 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
386 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
387 bd_ail_st_list) {
388 bh = bd->bd_bh;
389 if (bh->b_blocknr < bstart || bh->b_blocknr >= end)
390 continue;
391
392 gfs2_remove_from_journal(bh, REMOVE_JDATA);
393 }
394 }
395 spin_unlock(&sdp->sd_ail_lock);
396 gfs2_log_unlock(sdp);
397}
398
399static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
400{
401 struct address_space *mapping = ip->i_inode.i_mapping;
402 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
403 struct page *page;
404 struct buffer_head *bh;
405 unsigned int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
406 unsigned long index = blkno >> shift;
407 unsigned int bufnum = blkno - (index << shift);
408
409 page = find_get_page_flags(mapping, index, FGP_LOCK|FGP_ACCESSED);
410 if (!page)
411 return NULL;
412 if (!page_has_buffers(page)) {
413 unlock_page(page);
414 put_page(page);
415 return NULL;
416 }
417
418 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
419 ;
420 get_bh(bh);
421 unlock_page(page);
422 put_page(page);
423 return bh;
424}
425
426
427
428
429
430
431
432
433
434void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
435{
436 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
437 struct buffer_head *bh;
438 int ty;
439
440 gfs2_ail1_wipe(sdp, bstart, blen);
441 while (blen) {
442 ty = REMOVE_META;
443 bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
444 if (!bh && gfs2_is_jdata(ip)) {
445 bh = gfs2_getjdatabuf(ip, bstart);
446 ty = REMOVE_JDATA;
447 }
448 if (bh) {
449 lock_buffer(bh);
450 gfs2_log_lock(sdp);
451 spin_lock(&sdp->sd_ail_lock);
452 gfs2_remove_from_journal(bh, ty);
453 spin_unlock(&sdp->sd_ail_lock);
454 gfs2_log_unlock(sdp);
455 unlock_buffer(bh);
456 brelse(bh);
457 }
458
459 bstart++;
460 blen--;
461 }
462}
463
464
465
466
467
468
469
470
471
472
473
474int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
475 struct buffer_head **bhp)
476{
477 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
478 struct gfs2_glock *gl = ip->i_gl;
479 struct buffer_head *bh;
480 int ret = 0;
481 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
482 int rahead = 0;
483
484 if (num == ip->i_no_addr)
485 rahead = ip->i_rahead;
486
487 ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh);
488 if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
489 brelse(bh);
490 ret = -EIO;
491 } else {
492 *bhp = bh;
493 }
494 return ret;
495}
496
497
498
499
500
501
502
503
504
505
506struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
507{
508 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
509 struct buffer_head *first_bh, *bh;
510 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
511 sdp->sd_sb.sb_bsize_shift;
512
513 BUG_ON(!extlen);
514
515 if (max_ra < 1)
516 max_ra = 1;
517 if (extlen > max_ra)
518 extlen = max_ra;
519
520 first_bh = gfs2_getbuf(gl, dblock, CREATE);
521
522 if (buffer_uptodate(first_bh))
523 goto out;
524 if (!buffer_locked(first_bh))
525 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh);
526
527 dblock++;
528 extlen--;
529
530 while (extlen) {
531 bh = gfs2_getbuf(gl, dblock, CREATE);
532
533 if (!buffer_uptodate(bh) && !buffer_locked(bh))
534 ll_rw_block(REQ_OP_READ,
535 REQ_RAHEAD | REQ_META | REQ_PRIO,
536 1, &bh);
537 brelse(bh);
538 dblock++;
539 extlen--;
540 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
541 goto out;
542 }
543
544 wait_on_buffer(first_bh);
545out:
546 return first_bh;
547}
548
549