1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
17#include <linux/mpage.h>
18#include <linux/fs.h>
19#include <linux/writeback.h>
20#include <linux/swap.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/backing-dev.h>
23#include <linux/uio.h>
24#include <trace/events/writeback.h>
25#include <linux/sched/signal.h>
26
27#include "gfs2.h"
28#include "incore.h"
29#include "bmap.h"
30#include "glock.h"
31#include "inode.h"
32#include "log.h"
33#include "meta_io.h"
34#include "quota.h"
35#include "trans.h"
36#include "rgrp.h"
37#include "super.h"
38#include "util.h"
39#include "glops.h"
40#include "aops.h"
41
42
43void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
44 unsigned int from, unsigned int len)
45{
46 struct buffer_head *head = page_buffers(page);
47 unsigned int bsize = head->b_size;
48 struct buffer_head *bh;
49 unsigned int to = from + len;
50 unsigned int start, end;
51
52 for (bh = head, start = 0; bh != head || !start;
53 bh = bh->b_this_page, start = end) {
54 end = start + bsize;
55 if (end <= from)
56 continue;
57 if (start >= to)
58 break;
59 set_buffer_uptodate(bh);
60 gfs2_trans_add_data(ip->i_gl, bh);
61 }
62}
63
64
65
66
67
68
69
70
71
72
73
74static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
75 struct buffer_head *bh_result, int create)
76{
77 int error;
78
79 error = gfs2_block_map(inode, lblock, bh_result, 0);
80 if (error)
81 return error;
82 if (!buffer_mapped(bh_result))
83 return -ENODATA;
84 return 0;
85}
86
87
88
89
90
91
92static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
93{
94 struct inode *inode = page->mapping->host;
95 struct gfs2_inode *ip = GFS2_I(inode);
96 struct gfs2_sbd *sdp = GFS2_SB(inode);
97 struct iomap_writepage_ctx wpc = { };
98
99 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
100 goto out;
101 if (current->journal_info)
102 goto redirty;
103 return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
104
105redirty:
106 redirty_page_for_writepage(wbc, page);
107out:
108 unlock_page(page);
109 return 0;
110}
111
112
113
114
115
116
117
118
119
120static int gfs2_write_jdata_page(struct page *page,
121 struct writeback_control *wbc)
122{
123 struct inode * const inode = page->mapping->host;
124 loff_t i_size = i_size_read(inode);
125 const pgoff_t end_index = i_size >> PAGE_SHIFT;
126 unsigned offset;
127
128
129
130
131
132
133
134
135 offset = i_size & (PAGE_SIZE-1);
136 if (page->index == end_index && offset)
137 zero_user_segment(page, offset, PAGE_SIZE);
138
139 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
140 end_buffer_async_write);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
155{
156 struct inode *inode = page->mapping->host;
157 struct gfs2_inode *ip = GFS2_I(inode);
158 struct gfs2_sbd *sdp = GFS2_SB(inode);
159
160 if (PageChecked(page)) {
161 ClearPageChecked(page);
162 if (!page_has_buffers(page)) {
163 create_empty_buffers(page, inode->i_sb->s_blocksize,
164 BIT(BH_Dirty)|BIT(BH_Uptodate));
165 }
166 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
167 }
168 return gfs2_write_jdata_page(page, wbc);
169}
170
171
172
173
174
175
176
177
178
179
180static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
181{
182 struct inode *inode = page->mapping->host;
183 struct gfs2_inode *ip = GFS2_I(inode);
184 struct gfs2_sbd *sdp = GFS2_SB(inode);
185 int ret;
186
187 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
188 goto out;
189 if (PageChecked(page) || current->journal_info)
190 goto out_ignore;
191 ret = __gfs2_jdata_writepage(page, wbc);
192 return ret;
193
194out_ignore:
195 redirty_page_for_writepage(wbc, page);
196out:
197 unlock_page(page);
198 return 0;
199}
200
201
202
203
204
205
206
207
208static int gfs2_writepages(struct address_space *mapping,
209 struct writeback_control *wbc)
210{
211 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
212 struct iomap_writepage_ctx wpc = { };
213 int ret;
214
215
216
217
218
219
220
221 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
222 if (ret == 0)
223 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
224 return ret;
225}
226
227
228
229
230
231
232
233
234
235
236
237
238static int gfs2_write_jdata_pagevec(struct address_space *mapping,
239 struct writeback_control *wbc,
240 struct pagevec *pvec,
241 int nr_pages,
242 pgoff_t *done_index)
243{
244 struct inode *inode = mapping->host;
245 struct gfs2_sbd *sdp = GFS2_SB(inode);
246 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
247 int i;
248 int ret;
249
250 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
251 if (ret < 0)
252 return ret;
253
254 for(i = 0; i < nr_pages; i++) {
255 struct page *page = pvec->pages[i];
256
257 *done_index = page->index;
258
259 lock_page(page);
260
261 if (unlikely(page->mapping != mapping)) {
262continue_unlock:
263 unlock_page(page);
264 continue;
265 }
266
267 if (!PageDirty(page)) {
268
269 goto continue_unlock;
270 }
271
272 if (PageWriteback(page)) {
273 if (wbc->sync_mode != WB_SYNC_NONE)
274 wait_on_page_writeback(page);
275 else
276 goto continue_unlock;
277 }
278
279 BUG_ON(PageWriteback(page));
280 if (!clear_page_dirty_for_io(page))
281 goto continue_unlock;
282
283 trace_wbc_writepage(wbc, inode_to_bdi(inode));
284
285 ret = __gfs2_jdata_writepage(page, wbc);
286 if (unlikely(ret)) {
287 if (ret == AOP_WRITEPAGE_ACTIVATE) {
288 unlock_page(page);
289 ret = 0;
290 } else {
291
292
293
294
295
296
297
298
299
300
301 *done_index = page->index + 1;
302 ret = 1;
303 break;
304 }
305 }
306
307
308
309
310
311
312
313 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
314 ret = 1;
315 break;
316 }
317
318 }
319 gfs2_trans_end(sdp);
320 return ret;
321}
322
323
324
325
326
327
328
329
330
331
332
333static int gfs2_write_cache_jdata(struct address_space *mapping,
334 struct writeback_control *wbc)
335{
336 int ret = 0;
337 int done = 0;
338 struct pagevec pvec;
339 int nr_pages;
340 pgoff_t uninitialized_var(writeback_index);
341 pgoff_t index;
342 pgoff_t end;
343 pgoff_t done_index;
344 int cycled;
345 int range_whole = 0;
346 int tag;
347
348 pagevec_init(&pvec);
349 if (wbc->range_cyclic) {
350 writeback_index = mapping->writeback_index;
351 index = writeback_index;
352 if (index == 0)
353 cycled = 1;
354 else
355 cycled = 0;
356 end = -1;
357 } else {
358 index = wbc->range_start >> PAGE_SHIFT;
359 end = wbc->range_end >> PAGE_SHIFT;
360 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
361 range_whole = 1;
362 cycled = 1;
363 }
364 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
365 tag = PAGECACHE_TAG_TOWRITE;
366 else
367 tag = PAGECACHE_TAG_DIRTY;
368
369retry:
370 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
371 tag_pages_for_writeback(mapping, index, end);
372 done_index = index;
373 while (!done && (index <= end)) {
374 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
375 tag);
376 if (nr_pages == 0)
377 break;
378
379 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
380 if (ret)
381 done = 1;
382 if (ret > 0)
383 ret = 0;
384 pagevec_release(&pvec);
385 cond_resched();
386 }
387
388 if (!cycled && !done) {
389
390
391
392
393
394 cycled = 1;
395 index = 0;
396 end = writeback_index - 1;
397 goto retry;
398 }
399
400 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
401 mapping->writeback_index = done_index;
402
403 return ret;
404}
405
406
407
408
409
410
411
412
413
414static int gfs2_jdata_writepages(struct address_space *mapping,
415 struct writeback_control *wbc)
416{
417 struct gfs2_inode *ip = GFS2_I(mapping->host);
418 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
419 int ret;
420
421 ret = gfs2_write_cache_jdata(mapping, wbc);
422 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
423 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
424 GFS2_LFC_JDATA_WPAGES);
425 ret = gfs2_write_cache_jdata(mapping, wbc);
426 }
427 return ret;
428}
429
430
431
432
433
434
435
436
437static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
438{
439 struct buffer_head *dibh;
440 u64 dsize = i_size_read(&ip->i_inode);
441 void *kaddr;
442 int error;
443
444
445
446
447
448
449 if (unlikely(page->index)) {
450 zero_user(page, 0, PAGE_SIZE);
451 SetPageUptodate(page);
452 return 0;
453 }
454
455 error = gfs2_meta_inode_buffer(ip, &dibh);
456 if (error)
457 return error;
458
459 kaddr = kmap_atomic(page);
460 if (dsize > gfs2_max_stuffed_size(ip))
461 dsize = gfs2_max_stuffed_size(ip);
462 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
463 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
464 kunmap_atomic(kaddr);
465 flush_dcache_page(page);
466 brelse(dibh);
467 SetPageUptodate(page);
468
469 return 0;
470}
471
472
473static int __gfs2_readpage(void *file, struct page *page)
474{
475 struct inode *inode = page->mapping->host;
476 struct gfs2_inode *ip = GFS2_I(inode);
477 struct gfs2_sbd *sdp = GFS2_SB(inode);
478 int error;
479
480 if (!gfs2_is_jdata(ip) ||
481 (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
482 error = iomap_readpage(page, &gfs2_iomap_ops);
483 } else if (gfs2_is_stuffed(ip)) {
484 error = stuffed_readpage(ip, page);
485 unlock_page(page);
486 } else {
487 error = mpage_readpage(page, gfs2_block_map);
488 }
489
490 if (unlikely(gfs2_withdrawn(sdp)))
491 return -EIO;
492
493 return error;
494}
495
496
497
498
499
500
501
502static int gfs2_readpage(struct file *file, struct page *page)
503{
504 return __gfs2_readpage(file, page);
505}
506
507
508
509
510
511
512
513
514
515
516int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
517 unsigned size)
518{
519 struct address_space *mapping = ip->i_inode.i_mapping;
520 unsigned long index = *pos / PAGE_SIZE;
521 unsigned offset = *pos & (PAGE_SIZE - 1);
522 unsigned copied = 0;
523 unsigned amt;
524 struct page *page;
525 void *p;
526
527 do {
528 amt = size - copied;
529 if (offset + size > PAGE_SIZE)
530 amt = PAGE_SIZE - offset;
531 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
532 if (IS_ERR(page))
533 return PTR_ERR(page);
534 p = kmap_atomic(page);
535 memcpy(buf + copied, p + offset, amt);
536 kunmap_atomic(p);
537 put_page(page);
538 copied += amt;
539 index++;
540 offset = 0;
541 } while(copied < size);
542 (*pos) += size;
543 return size;
544}
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564static int gfs2_readpages(struct file *file, struct address_space *mapping,
565 struct list_head *pages, unsigned nr_pages)
566{
567 struct inode *inode = mapping->host;
568 struct gfs2_inode *ip = GFS2_I(inode);
569
570 if (gfs2_is_stuffed(ip))
571 return 0;
572 else if (gfs2_is_jdata(ip))
573 return mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
574 else
575 return iomap_readpages(mapping, pages, nr_pages, &gfs2_iomap_ops);
576}
577
578
579
580
581
582void adjust_fs_space(struct inode *inode)
583{
584 struct gfs2_sbd *sdp = GFS2_SB(inode);
585 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
586 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
587 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
588 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
589 struct buffer_head *m_bh, *l_bh;
590 u64 fs_total, new_free;
591
592 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
593 return;
594
595
596 fs_total = gfs2_ri_total(sdp);
597 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
598 goto out;
599
600 spin_lock(&sdp->sd_statfs_spin);
601 gfs2_statfs_change_in(m_sc, m_bh->b_data +
602 sizeof(struct gfs2_dinode));
603 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
604 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
605 else
606 new_free = 0;
607 spin_unlock(&sdp->sd_statfs_spin);
608 fs_warn(sdp, "File system extended by %llu blocks.\n",
609 (unsigned long long)new_free);
610 gfs2_statfs_change(sdp, new_free, new_free, 0);
611
612 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
613 goto out2;
614 update_statfs(sdp, m_bh, l_bh);
615 brelse(l_bh);
616out2:
617 brelse(m_bh);
618out:
619 sdp->sd_rindex_uptodate = 0;
620 gfs2_trans_end(sdp);
621}
622
623
624
625
626
627
628
629
630static int jdata_set_page_dirty(struct page *page)
631{
632 if (current->journal_info)
633 SetPageChecked(page);
634 return __set_page_dirty_buffers(page);
635}
636
637
638
639
640
641
642
643
644
645static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
646{
647 struct gfs2_inode *ip = GFS2_I(mapping->host);
648 struct gfs2_holder i_gh;
649 sector_t dblock = 0;
650 int error;
651
652 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
653 if (error)
654 return 0;
655
656 if (!gfs2_is_stuffed(ip))
657 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
658
659 gfs2_glock_dq_uninit(&i_gh);
660
661 return dblock;
662}
663
664static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
665{
666 struct gfs2_bufdata *bd;
667
668 lock_buffer(bh);
669 gfs2_log_lock(sdp);
670 clear_buffer_dirty(bh);
671 bd = bh->b_private;
672 if (bd) {
673 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
674 list_del_init(&bd->bd_list);
675 else {
676 spin_lock(&sdp->sd_ail_lock);
677 gfs2_remove_from_journal(bh, REMOVE_JDATA);
678 spin_unlock(&sdp->sd_ail_lock);
679 }
680 }
681 bh->b_bdev = NULL;
682 clear_buffer_mapped(bh);
683 clear_buffer_req(bh);
684 clear_buffer_new(bh);
685 gfs2_log_unlock(sdp);
686 unlock_buffer(bh);
687}
688
689static void gfs2_invalidatepage(struct page *page, unsigned int offset,
690 unsigned int length)
691{
692 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
693 unsigned int stop = offset + length;
694 int partial_page = (offset || length < PAGE_SIZE);
695 struct buffer_head *bh, *head;
696 unsigned long pos = 0;
697
698 BUG_ON(!PageLocked(page));
699 if (!partial_page)
700 ClearPageChecked(page);
701 if (!page_has_buffers(page))
702 goto out;
703
704 bh = head = page_buffers(page);
705 do {
706 if (pos + bh->b_size > stop)
707 return;
708
709 if (offset <= pos)
710 gfs2_discard(sdp, bh);
711 pos += bh->b_size;
712 bh = bh->b_this_page;
713 } while (bh != head);
714out:
715 if (!partial_page)
716 try_to_release_page(page, 0);
717}
718
719
720
721
722
723
724
725
726
727
728
729
730int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
731{
732 struct address_space *mapping = page->mapping;
733 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
734 struct buffer_head *bh, *head;
735 struct gfs2_bufdata *bd;
736
737 if (!page_has_buffers(page))
738 return 0;
739
740
741
742
743
744
745
746
747
748
749
750 gfs2_log_lock(sdp);
751 head = bh = page_buffers(page);
752 do {
753 if (atomic_read(&bh->b_count))
754 goto cannot_release;
755 bd = bh->b_private;
756 if (bd && bd->bd_tr)
757 goto cannot_release;
758 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
759 goto cannot_release;
760 bh = bh->b_this_page;
761 } while(bh != head);
762
763 head = bh = page_buffers(page);
764 do {
765 bd = bh->b_private;
766 if (bd) {
767 gfs2_assert_warn(sdp, bd->bd_bh == bh);
768 bd->bd_bh = NULL;
769 bh->b_private = NULL;
770
771
772
773
774 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
775 list_del_init(&bd->bd_list);
776 if (list_empty(&bd->bd_list))
777 kmem_cache_free(gfs2_bufdata_cachep, bd);
778 }
779
780 bh = bh->b_this_page;
781 } while (bh != head);
782 gfs2_log_unlock(sdp);
783
784 return try_to_free_buffers(page);
785
786cannot_release:
787 gfs2_log_unlock(sdp);
788 return 0;
789}
790
791static const struct address_space_operations gfs2_aops = {
792 .writepage = gfs2_writepage,
793 .writepages = gfs2_writepages,
794 .readpage = gfs2_readpage,
795 .readpages = gfs2_readpages,
796 .set_page_dirty = iomap_set_page_dirty,
797 .releasepage = iomap_releasepage,
798 .invalidatepage = iomap_invalidatepage,
799 .bmap = gfs2_bmap,
800 .direct_IO = noop_direct_IO,
801 .migratepage = iomap_migrate_page,
802 .is_partially_uptodate = iomap_is_partially_uptodate,
803 .error_remove_page = generic_error_remove_page,
804};
805
806static const struct address_space_operations gfs2_jdata_aops = {
807 .writepage = gfs2_jdata_writepage,
808 .writepages = gfs2_jdata_writepages,
809 .readpage = gfs2_readpage,
810 .readpages = gfs2_readpages,
811 .set_page_dirty = jdata_set_page_dirty,
812 .bmap = gfs2_bmap,
813 .invalidatepage = gfs2_invalidatepage,
814 .releasepage = gfs2_releasepage,
815 .is_partially_uptodate = block_is_partially_uptodate,
816 .error_remove_page = generic_error_remove_page,
817};
818
819void gfs2_set_aops(struct inode *inode)
820{
821 if (gfs2_is_jdata(GFS2_I(inode)))
822 inode->i_mapping->a_ops = &gfs2_jdata_aops;
823 else
824 inode->i_mapping->a_ops = &gfs2_aops;
825}
826