1
2
3
4
5
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
42{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int to = from + len;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59}
60
61
62
63
64
65
66
67
68
69
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73{
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82}
83
84
85
86
87
88
89static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
90{
91 struct inode *inode = page->mapping->host;
92 struct gfs2_inode *ip = GFS2_I(inode);
93 struct gfs2_sbd *sdp = GFS2_SB(inode);
94 struct iomap_writepage_ctx wpc = { };
95
96 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
97 goto out;
98 if (current->journal_info)
99 goto redirty;
100 return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
101
102redirty:
103 redirty_page_for_writepage(wbc, page);
104out:
105 unlock_page(page);
106 return 0;
107}
108
109
110
111
112
113
114
115
116
117static int gfs2_write_jdata_page(struct page *page,
118 struct writeback_control *wbc)
119{
120 struct inode * const inode = page->mapping->host;
121 loff_t i_size = i_size_read(inode);
122 const pgoff_t end_index = i_size >> PAGE_SHIFT;
123 unsigned offset;
124
125
126
127
128
129
130
131
132 offset = i_size & (PAGE_SIZE - 1);
133 if (page->index == end_index && offset)
134 zero_user_segment(page, offset, PAGE_SIZE);
135
136 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
137 end_buffer_async_write);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
153 struct inode *inode = page->mapping->host;
154 struct gfs2_inode *ip = GFS2_I(inode);
155 struct gfs2_sbd *sdp = GFS2_SB(inode);
156
157 if (PageChecked(page)) {
158 ClearPageChecked(page);
159 if (!page_has_buffers(page)) {
160 create_empty_buffers(page, inode->i_sb->s_blocksize,
161 BIT(BH_Dirty)|BIT(BH_Uptodate));
162 }
163 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
164 }
165 return gfs2_write_jdata_page(page, wbc);
166}
167
168
169
170
171
172
173
174
175
176
177static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
178{
179 struct inode *inode = page->mapping->host;
180 struct gfs2_inode *ip = GFS2_I(inode);
181 struct gfs2_sbd *sdp = GFS2_SB(inode);
182
183 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
184 goto out;
185 if (PageChecked(page) || current->journal_info)
186 goto out_ignore;
187 return __gfs2_jdata_writepage(page, wbc);
188
189out_ignore:
190 redirty_page_for_writepage(wbc, page);
191out:
192 unlock_page(page);
193 return 0;
194}
195
196
197
198
199
200
201
202
203static int gfs2_writepages(struct address_space *mapping,
204 struct writeback_control *wbc)
205{
206 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
207 struct iomap_writepage_ctx wpc = { };
208 int ret;
209
210
211
212
213
214
215
216 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
217 if (ret == 0)
218 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
219 return ret;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233static int gfs2_write_jdata_pagevec(struct address_space *mapping,
234 struct writeback_control *wbc,
235 struct pagevec *pvec,
236 int nr_pages,
237 pgoff_t *done_index)
238{
239 struct inode *inode = mapping->host;
240 struct gfs2_sbd *sdp = GFS2_SB(inode);
241 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
242 int i;
243 int ret;
244
245 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
246 if (ret < 0)
247 return ret;
248
249 for(i = 0; i < nr_pages; i++) {
250 struct page *page = pvec->pages[i];
251
252 *done_index = page->index;
253
254 lock_page(page);
255
256 if (unlikely(page->mapping != mapping)) {
257continue_unlock:
258 unlock_page(page);
259 continue;
260 }
261
262 if (!PageDirty(page)) {
263
264 goto continue_unlock;
265 }
266
267 if (PageWriteback(page)) {
268 if (wbc->sync_mode != WB_SYNC_NONE)
269 wait_on_page_writeback(page);
270 else
271 goto continue_unlock;
272 }
273
274 BUG_ON(PageWriteback(page));
275 if (!clear_page_dirty_for_io(page))
276 goto continue_unlock;
277
278 trace_wbc_writepage(wbc, inode_to_bdi(inode));
279
280 ret = __gfs2_jdata_writepage(page, wbc);
281 if (unlikely(ret)) {
282 if (ret == AOP_WRITEPAGE_ACTIVATE) {
283 unlock_page(page);
284 ret = 0;
285 } else {
286
287
288
289
290
291
292
293
294
295
296 *done_index = page->index + 1;
297 ret = 1;
298 break;
299 }
300 }
301
302
303
304
305
306
307
308 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
309 ret = 1;
310 break;
311 }
312
313 }
314 gfs2_trans_end(sdp);
315 return ret;
316}
317
318
319
320
321
322
323
324
325
326
327
328static int gfs2_write_cache_jdata(struct address_space *mapping,
329 struct writeback_control *wbc)
330{
331 int ret = 0;
332 int done = 0;
333 struct pagevec pvec;
334 int nr_pages;
335 pgoff_t writeback_index;
336 pgoff_t index;
337 pgoff_t end;
338 pgoff_t done_index;
339 int cycled;
340 int range_whole = 0;
341 xa_mark_t tag;
342
343 pagevec_init(&pvec);
344 if (wbc->range_cyclic) {
345 writeback_index = mapping->writeback_index;
346 index = writeback_index;
347 if (index == 0)
348 cycled = 1;
349 else
350 cycled = 0;
351 end = -1;
352 } else {
353 index = wbc->range_start >> PAGE_SHIFT;
354 end = wbc->range_end >> PAGE_SHIFT;
355 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
356 range_whole = 1;
357 cycled = 1;
358 }
359 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
360 tag = PAGECACHE_TAG_TOWRITE;
361 else
362 tag = PAGECACHE_TAG_DIRTY;
363
364retry:
365 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
366 tag_pages_for_writeback(mapping, index, end);
367 done_index = index;
368 while (!done && (index <= end)) {
369 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
370 tag);
371 if (nr_pages == 0)
372 break;
373
374 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
375 if (ret)
376 done = 1;
377 if (ret > 0)
378 ret = 0;
379 pagevec_release(&pvec);
380 cond_resched();
381 }
382
383 if (!cycled && !done) {
384
385
386
387
388
389 cycled = 1;
390 index = 0;
391 end = writeback_index - 1;
392 goto retry;
393 }
394
395 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
396 mapping->writeback_index = done_index;
397
398 return ret;
399}
400
401
402
403
404
405
406
407
408
409static int gfs2_jdata_writepages(struct address_space *mapping,
410 struct writeback_control *wbc)
411{
412 struct gfs2_inode *ip = GFS2_I(mapping->host);
413 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414 int ret;
415
416 ret = gfs2_write_cache_jdata(mapping, wbc);
417 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
419 GFS2_LFC_JDATA_WPAGES);
420 ret = gfs2_write_cache_jdata(mapping, wbc);
421 }
422 return ret;
423}
424
425
426
427
428
429
430
431
432static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
433{
434 struct buffer_head *dibh;
435 u64 dsize = i_size_read(&ip->i_inode);
436 void *kaddr;
437 int error;
438
439
440
441
442
443
444 if (unlikely(page->index)) {
445 zero_user(page, 0, PAGE_SIZE);
446 SetPageUptodate(page);
447 return 0;
448 }
449
450 error = gfs2_meta_inode_buffer(ip, &dibh);
451 if (error)
452 return error;
453
454 kaddr = kmap_atomic(page);
455 if (dsize > gfs2_max_stuffed_size(ip))
456 dsize = gfs2_max_stuffed_size(ip);
457 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
458 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
459 kunmap_atomic(kaddr);
460 flush_dcache_page(page);
461 brelse(dibh);
462 SetPageUptodate(page);
463
464 return 0;
465}
466
467
468static int __gfs2_readpage(void *file, struct page *page)
469{
470 struct inode *inode = page->mapping->host;
471 struct gfs2_inode *ip = GFS2_I(inode);
472 struct gfs2_sbd *sdp = GFS2_SB(inode);
473 int error;
474
475 if (!gfs2_is_jdata(ip) ||
476 (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
477 error = iomap_readpage(page, &gfs2_iomap_ops);
478 } else if (gfs2_is_stuffed(ip)) {
479 error = stuffed_readpage(ip, page);
480 unlock_page(page);
481 } else {
482 error = mpage_readpage(page, gfs2_block_map);
483 }
484
485 if (unlikely(gfs2_withdrawn(sdp)))
486 return -EIO;
487
488 return error;
489}
490
491
492
493
494
495
496
497static int gfs2_readpage(struct file *file, struct page *page)
498{
499 return __gfs2_readpage(file, page);
500}
501
502
503
504
505
506
507
508
509
510
511int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
512 unsigned size)
513{
514 struct address_space *mapping = ip->i_inode.i_mapping;
515 unsigned long index = *pos >> PAGE_SHIFT;
516 unsigned offset = *pos & (PAGE_SIZE - 1);
517 unsigned copied = 0;
518 unsigned amt;
519 struct page *page;
520 void *p;
521
522 do {
523 amt = size - copied;
524 if (offset + size > PAGE_SIZE)
525 amt = PAGE_SIZE - offset;
526 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
527 if (IS_ERR(page))
528 return PTR_ERR(page);
529 p = kmap_atomic(page);
530 memcpy(buf + copied, p + offset, amt);
531 kunmap_atomic(p);
532 put_page(page);
533 copied += amt;
534 index++;
535 offset = 0;
536 } while(copied < size);
537 (*pos) += size;
538 return size;
539}
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static void gfs2_readahead(struct readahead_control *rac)
557{
558 struct inode *inode = rac->mapping->host;
559 struct gfs2_inode *ip = GFS2_I(inode);
560
561 if (gfs2_is_stuffed(ip))
562 ;
563 else if (gfs2_is_jdata(ip))
564 mpage_readahead(rac, gfs2_block_map);
565 else
566 iomap_readahead(rac, &gfs2_iomap_ops);
567}
568
569
570
571
572
573void adjust_fs_space(struct inode *inode)
574{
575 struct gfs2_sbd *sdp = GFS2_SB(inode);
576 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
577 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
578 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
579 struct buffer_head *m_bh;
580 u64 fs_total, new_free;
581
582 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
583 return;
584
585
586 fs_total = gfs2_ri_total(sdp);
587 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
588 goto out;
589
590 spin_lock(&sdp->sd_statfs_spin);
591 gfs2_statfs_change_in(m_sc, m_bh->b_data +
592 sizeof(struct gfs2_dinode));
593 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
594 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
595 else
596 new_free = 0;
597 spin_unlock(&sdp->sd_statfs_spin);
598 fs_warn(sdp, "File system extended by %llu blocks.\n",
599 (unsigned long long)new_free);
600 gfs2_statfs_change(sdp, new_free, new_free, 0);
601
602 update_statfs(sdp, m_bh);
603 brelse(m_bh);
604out:
605 sdp->sd_rindex_uptodate = 0;
606 gfs2_trans_end(sdp);
607}
608
609
610
611
612
613
614
615
616static int jdata_set_page_dirty(struct page *page)
617{
618 if (current->journal_info)
619 SetPageChecked(page);
620 return __set_page_dirty_buffers(page);
621}
622
623
624
625
626
627
628
629
630
631static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
632{
633 struct gfs2_inode *ip = GFS2_I(mapping->host);
634 struct gfs2_holder i_gh;
635 sector_t dblock = 0;
636 int error;
637
638 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
639 if (error)
640 return 0;
641
642 if (!gfs2_is_stuffed(ip))
643 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
644
645 gfs2_glock_dq_uninit(&i_gh);
646
647 return dblock;
648}
649
650static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
651{
652 struct gfs2_bufdata *bd;
653
654 lock_buffer(bh);
655 gfs2_log_lock(sdp);
656 clear_buffer_dirty(bh);
657 bd = bh->b_private;
658 if (bd) {
659 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
660 list_del_init(&bd->bd_list);
661 else {
662 spin_lock(&sdp->sd_ail_lock);
663 gfs2_remove_from_journal(bh, REMOVE_JDATA);
664 spin_unlock(&sdp->sd_ail_lock);
665 }
666 }
667 bh->b_bdev = NULL;
668 clear_buffer_mapped(bh);
669 clear_buffer_req(bh);
670 clear_buffer_new(bh);
671 gfs2_log_unlock(sdp);
672 unlock_buffer(bh);
673}
674
675static void gfs2_invalidatepage(struct page *page, unsigned int offset,
676 unsigned int length)
677{
678 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
679 unsigned int stop = offset + length;
680 int partial_page = (offset || length < PAGE_SIZE);
681 struct buffer_head *bh, *head;
682 unsigned long pos = 0;
683
684 BUG_ON(!PageLocked(page));
685 if (!partial_page)
686 ClearPageChecked(page);
687 if (!page_has_buffers(page))
688 goto out;
689
690 bh = head = page_buffers(page);
691 do {
692 if (pos + bh->b_size > stop)
693 return;
694
695 if (offset <= pos)
696 gfs2_discard(sdp, bh);
697 pos += bh->b_size;
698 bh = bh->b_this_page;
699 } while (bh != head);
700out:
701 if (!partial_page)
702 try_to_release_page(page, 0);
703}
704
705
706
707
708
709
710
711
712
713
714
715
716int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
717{
718 struct address_space *mapping = page->mapping;
719 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
720 struct buffer_head *bh, *head;
721 struct gfs2_bufdata *bd;
722
723 if (!page_has_buffers(page))
724 return 0;
725
726
727
728
729
730
731
732
733
734
735
736 gfs2_log_lock(sdp);
737 head = bh = page_buffers(page);
738 do {
739 if (atomic_read(&bh->b_count))
740 goto cannot_release;
741 bd = bh->b_private;
742 if (bd && bd->bd_tr)
743 goto cannot_release;
744 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
745 goto cannot_release;
746 bh = bh->b_this_page;
747 } while(bh != head);
748
749 head = bh = page_buffers(page);
750 do {
751 bd = bh->b_private;
752 if (bd) {
753 gfs2_assert_warn(sdp, bd->bd_bh == bh);
754 bd->bd_bh = NULL;
755 bh->b_private = NULL;
756
757
758
759
760 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
761 list_del_init(&bd->bd_list);
762 if (list_empty(&bd->bd_list))
763 kmem_cache_free(gfs2_bufdata_cachep, bd);
764 }
765
766 bh = bh->b_this_page;
767 } while (bh != head);
768 gfs2_log_unlock(sdp);
769
770 return try_to_free_buffers(page);
771
772cannot_release:
773 gfs2_log_unlock(sdp);
774 return 0;
775}
776
777static const struct address_space_operations gfs2_aops = {
778 .writepage = gfs2_writepage,
779 .writepages = gfs2_writepages,
780 .readpage = gfs2_readpage,
781 .readahead = gfs2_readahead,
782 .set_page_dirty = __set_page_dirty_nobuffers,
783 .releasepage = iomap_releasepage,
784 .invalidatepage = iomap_invalidatepage,
785 .bmap = gfs2_bmap,
786 .direct_IO = noop_direct_IO,
787 .migratepage = iomap_migrate_page,
788 .is_partially_uptodate = iomap_is_partially_uptodate,
789 .error_remove_page = generic_error_remove_page,
790};
791
792static const struct address_space_operations gfs2_jdata_aops = {
793 .writepage = gfs2_jdata_writepage,
794 .writepages = gfs2_jdata_writepages,
795 .readpage = gfs2_readpage,
796 .readahead = gfs2_readahead,
797 .set_page_dirty = jdata_set_page_dirty,
798 .bmap = gfs2_bmap,
799 .invalidatepage = gfs2_invalidatepage,
800 .releasepage = gfs2_releasepage,
801 .is_partially_uptodate = block_is_partially_uptodate,
802 .error_remove_page = generic_error_remove_page,
803};
804
805void gfs2_set_aops(struct inode *inode)
806{
807 if (gfs2_is_jdata(GFS2_I(inode)))
808 inode->i_mapping->a_ops = &gfs2_jdata_aops;
809 else
810 inode->i_mapping->a_ops = &gfs2_aops;
811}
812