1
2
3
4
5
6
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_inode_item.h"
16#include "xfs_alloc.h"
17#include "xfs_error.h"
18#include "xfs_iomap.h"
19#include "xfs_trace.h"
20#include "xfs_bmap.h"
21#include "xfs_bmap_util.h"
22#include "xfs_bmap_btree.h"
23#include "xfs_reflink.h"
24#include <linux/writeback.h>
25
26
27
28
29struct xfs_writepage_ctx {
30 struct xfs_bmbt_irec imap;
31 unsigned int io_type;
32 unsigned int data_seq;
33 unsigned int cow_seq;
34 struct xfs_ioend *ioend;
35};
36
37struct block_device *
38xfs_find_bdev_for_inode(
39 struct inode *inode)
40{
41 struct xfs_inode *ip = XFS_I(inode);
42 struct xfs_mount *mp = ip->i_mount;
43
44 if (XFS_IS_REALTIME_INODE(ip))
45 return mp->m_rtdev_targp->bt_bdev;
46 else
47 return mp->m_ddev_targp->bt_bdev;
48}
49
50struct dax_device *
51xfs_find_daxdev_for_inode(
52 struct inode *inode)
53{
54 struct xfs_inode *ip = XFS_I(inode);
55 struct xfs_mount *mp = ip->i_mount;
56
57 if (XFS_IS_REALTIME_INODE(ip))
58 return mp->m_rtdev_targp->bt_daxdev;
59 else
60 return mp->m_ddev_targp->bt_daxdev;
61}
62
63static void
64xfs_finish_page_writeback(
65 struct inode *inode,
66 struct bio_vec *bvec,
67 int error)
68{
69 struct iomap_page *iop = to_iomap_page(bvec->bv_page);
70
71 if (error) {
72 SetPageError(bvec->bv_page);
73 mapping_set_error(inode->i_mapping, -EIO);
74 }
75
76 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
77 ASSERT(!iop || atomic_read(&iop->write_count) > 0);
78
79 if (!iop || atomic_dec_and_test(&iop->write_count))
80 end_page_writeback(bvec->bv_page);
81}
82
83
84
85
86
87
88STATIC void
89xfs_destroy_ioend(
90 struct xfs_ioend *ioend,
91 int error)
92{
93 struct inode *inode = ioend->io_inode;
94 struct bio *bio = &ioend->io_inline_bio;
95 struct bio *last = ioend->io_bio, *next;
96 u64 start = bio->bi_iter.bi_sector;
97 bool quiet = bio_flagged(bio, BIO_QUIET);
98
99 for (bio = &ioend->io_inline_bio; bio; bio = next) {
100 struct bio_vec *bvec;
101 int i;
102
103
104
105
106
107 if (bio == last)
108 next = NULL;
109 else
110 next = bio->bi_private;
111
112
113 bio_for_each_segment_all(bvec, bio, i)
114 xfs_finish_page_writeback(inode, bvec, error);
115 bio_put(bio);
116 }
117
118 if (unlikely(error && !quiet)) {
119 xfs_err_ratelimited(XFS_I(inode)->i_mount,
120 "writeback error on sector %llu", start);
121 }
122}
123
124
125
126
127static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
128{
129 return ioend->io_offset + ioend->io_size >
130 XFS_I(ioend->io_inode)->i_d.di_size;
131}
132
133STATIC int
134xfs_setfilesize_trans_alloc(
135 struct xfs_ioend *ioend)
136{
137 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
138 struct xfs_trans *tp;
139 int error;
140
141 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0,
142 XFS_TRANS_NOFS, &tp);
143 if (error)
144 return error;
145
146 ioend->io_append_trans = tp;
147
148
149
150
151
152 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
153
154
155
156
157 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
158 return 0;
159}
160
161
162
163
164STATIC int
165__xfs_setfilesize(
166 struct xfs_inode *ip,
167 struct xfs_trans *tp,
168 xfs_off_t offset,
169 size_t size)
170{
171 xfs_fsize_t isize;
172
173 xfs_ilock(ip, XFS_ILOCK_EXCL);
174 isize = xfs_new_eof(ip, offset + size);
175 if (!isize) {
176 xfs_iunlock(ip, XFS_ILOCK_EXCL);
177 xfs_trans_cancel(tp);
178 return 0;
179 }
180
181 trace_xfs_setfilesize(ip, offset, size);
182
183 ip->i_d.di_size = isize;
184 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
185 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
186
187 return xfs_trans_commit(tp);
188}
189
190int
191xfs_setfilesize(
192 struct xfs_inode *ip,
193 xfs_off_t offset,
194 size_t size)
195{
196 struct xfs_mount *mp = ip->i_mount;
197 struct xfs_trans *tp;
198 int error;
199
200 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
201 if (error)
202 return error;
203
204 return __xfs_setfilesize(ip, tp, offset, size);
205}
206
207STATIC int
208xfs_setfilesize_ioend(
209 struct xfs_ioend *ioend,
210 int error)
211{
212 struct xfs_inode *ip = XFS_I(ioend->io_inode);
213 struct xfs_trans *tp = ioend->io_append_trans;
214
215
216
217
218
219
220 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
221 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
222
223
224 if (error) {
225 xfs_trans_cancel(tp);
226 return error;
227 }
228
229 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
230}
231
232
233
234
235STATIC void
236xfs_end_io(
237 struct work_struct *work)
238{
239 struct xfs_ioend *ioend =
240 container_of(work, struct xfs_ioend, io_work);
241 struct xfs_inode *ip = XFS_I(ioend->io_inode);
242 xfs_off_t offset = ioend->io_offset;
243 size_t size = ioend->io_size;
244 int error;
245
246
247
248
249 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
250 error = -EIO;
251 goto done;
252 }
253
254
255
256
257 error = blk_status_to_errno(ioend->io_bio->bi_status);
258 if (unlikely(error)) {
259 switch (ioend->io_type) {
260 case XFS_IO_COW:
261 xfs_reflink_cancel_cow_range(ip, offset, size, true);
262 break;
263 }
264
265 goto done;
266 }
267
268
269
270
271 switch (ioend->io_type) {
272 case XFS_IO_COW:
273 error = xfs_reflink_end_cow(ip, offset, size);
274 break;
275 case XFS_IO_UNWRITTEN:
276
277 error = xfs_iomap_write_unwritten(ip, offset, size, false);
278 break;
279 default:
280 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
281 break;
282 }
283
284done:
285 if (ioend->io_append_trans)
286 error = xfs_setfilesize_ioend(ioend, error);
287 xfs_destroy_ioend(ioend, error);
288}
289
290STATIC void
291xfs_end_bio(
292 struct bio *bio)
293{
294 struct xfs_ioend *ioend = bio->bi_private;
295 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
296
297 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
298 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
299 else if (ioend->io_append_trans)
300 queue_work(mp->m_data_workqueue, &ioend->io_work);
301 else
302 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
303}
304
305
306
307
308
309static bool
310xfs_imap_valid(
311 struct xfs_writepage_ctx *wpc,
312 struct xfs_inode *ip,
313 xfs_fileoff_t offset_fsb)
314{
315 if (offset_fsb < wpc->imap.br_startoff ||
316 offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
317 return false;
318
319
320
321
322
323 if (wpc->io_type == XFS_IO_COW)
324 return true;
325
326
327
328
329
330
331
332
333 if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
334 return false;
335 if (xfs_inode_has_cow_data(ip) &&
336 wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
337 return false;
338 return true;
339}
340
341STATIC int
342xfs_map_blocks(
343 struct xfs_writepage_ctx *wpc,
344 struct inode *inode,
345 loff_t offset)
346{
347 struct xfs_inode *ip = XFS_I(inode);
348 struct xfs_mount *mp = ip->i_mount;
349 ssize_t count = i_blocksize(inode);
350 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb;
351 xfs_fileoff_t cow_fsb = NULLFILEOFF;
352 struct xfs_bmbt_irec imap;
353 int whichfork = XFS_DATA_FORK;
354 struct xfs_iext_cursor icur;
355 int error = 0;
356
357 if (XFS_FORCED_SHUTDOWN(mp))
358 return -EIO;
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 if (xfs_imap_valid(wpc, ip, offset_fsb))
376 return 0;
377
378
379
380
381
382
383
384 xfs_ilock(ip, XFS_ILOCK_SHARED);
385 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
386 (ip->i_df.if_flags & XFS_IFEXTENTS));
387 ASSERT(offset <= mp->m_super->s_maxbytes);
388
389 if (offset > mp->m_super->s_maxbytes - count)
390 count = mp->m_super->s_maxbytes - offset;
391 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
392
393
394
395
396
397 if (xfs_inode_has_cow_data(ip) &&
398 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
399 cow_fsb = imap.br_startoff;
400 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
401 wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
402 xfs_iunlock(ip, XFS_ILOCK_SHARED);
403
404
405
406
407
408
409
410
411
412
413
414 if (offset > i_size_read(inode)) {
415 wpc->io_type = XFS_IO_HOLE;
416 return 0;
417 }
418 whichfork = XFS_COW_FORK;
419 wpc->io_type = XFS_IO_COW;
420 goto allocate_blocks;
421 }
422
423
424
425
426
427 if (xfs_imap_valid(wpc, ip, offset_fsb)) {
428 xfs_iunlock(ip, XFS_ILOCK_SHARED);
429 return 0;
430 }
431
432
433
434
435
436
437 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
438 imap.br_startoff = end_fsb;
439 wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
440 xfs_iunlock(ip, XFS_ILOCK_SHARED);
441
442 if (imap.br_startoff > offset_fsb) {
443
444 imap.br_blockcount = imap.br_startoff - offset_fsb;
445 imap.br_startoff = offset_fsb;
446 imap.br_startblock = HOLESTARTBLOCK;
447 wpc->io_type = XFS_IO_HOLE;
448 } else {
449
450
451
452
453
454
455 if (cow_fsb != NULLFILEOFF &&
456 cow_fsb < imap.br_startoff + imap.br_blockcount)
457 imap.br_blockcount = cow_fsb - imap.br_startoff;
458
459 if (isnullstartblock(imap.br_startblock)) {
460
461 wpc->io_type = XFS_IO_DELALLOC;
462 goto allocate_blocks;
463 }
464
465 if (imap.br_state == XFS_EXT_UNWRITTEN)
466 wpc->io_type = XFS_IO_UNWRITTEN;
467 else
468 wpc->io_type = XFS_IO_OVERWRITE;
469 }
470
471 wpc->imap = imap;
472 trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
473 return 0;
474allocate_blocks:
475 error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap,
476 whichfork == XFS_COW_FORK ?
477 &wpc->cow_seq : &wpc->data_seq);
478 if (error)
479 return error;
480 ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
481 imap.br_startoff + imap.br_blockcount <= cow_fsb);
482 wpc->imap = imap;
483 trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
484 return 0;
485}
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501STATIC int
502xfs_submit_ioend(
503 struct writeback_control *wbc,
504 struct xfs_ioend *ioend,
505 int status)
506{
507
508 if (!status && ioend->io_type == XFS_IO_COW) {
509
510
511
512
513
514
515
516 unsigned nofs_flag;
517
518 nofs_flag = memalloc_nofs_save();
519 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
520 ioend->io_offset, ioend->io_size);
521 memalloc_nofs_restore(nofs_flag);
522 }
523
524
525 if (!status &&
526 ioend->io_type != XFS_IO_UNWRITTEN &&
527 xfs_ioend_is_append(ioend) &&
528 !ioend->io_append_trans)
529 status = xfs_setfilesize_trans_alloc(ioend);
530
531 ioend->io_bio->bi_private = ioend;
532 ioend->io_bio->bi_end_io = xfs_end_bio;
533 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
534
535
536
537
538
539
540
541 if (status) {
542 ioend->io_bio->bi_status = errno_to_blk_status(status);
543 bio_endio(ioend->io_bio);
544 return status;
545 }
546
547 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
548 submit_bio(ioend->io_bio);
549 return 0;
550}
551
552static struct xfs_ioend *
553xfs_alloc_ioend(
554 struct inode *inode,
555 unsigned int type,
556 xfs_off_t offset,
557 struct block_device *bdev,
558 sector_t sector)
559{
560 struct xfs_ioend *ioend;
561 struct bio *bio;
562
563 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
564 bio_set_dev(bio, bdev);
565 bio->bi_iter.bi_sector = sector;
566
567 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
568 INIT_LIST_HEAD(&ioend->io_list);
569 ioend->io_type = type;
570 ioend->io_inode = inode;
571 ioend->io_size = 0;
572 ioend->io_offset = offset;
573 INIT_WORK(&ioend->io_work, xfs_end_io);
574 ioend->io_append_trans = NULL;
575 ioend->io_bio = bio;
576 return ioend;
577}
578
579
580
581
582
583
584
585
586static void
587xfs_chain_bio(
588 struct xfs_ioend *ioend,
589 struct writeback_control *wbc,
590 struct block_device *bdev,
591 sector_t sector)
592{
593 struct bio *new;
594
595 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
596 bio_set_dev(new, bdev);
597 new->bi_iter.bi_sector = sector;
598 bio_chain(ioend->io_bio, new);
599 bio_get(ioend->io_bio);
600 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
601 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
602 submit_bio(ioend->io_bio);
603 ioend->io_bio = new;
604}
605
606
607
608
609
610STATIC void
611xfs_add_to_ioend(
612 struct inode *inode,
613 xfs_off_t offset,
614 struct page *page,
615 struct iomap_page *iop,
616 struct xfs_writepage_ctx *wpc,
617 struct writeback_control *wbc,
618 struct list_head *iolist)
619{
620 struct xfs_inode *ip = XFS_I(inode);
621 struct xfs_mount *mp = ip->i_mount;
622 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
623 unsigned len = i_blocksize(inode);
624 unsigned poff = offset & (PAGE_SIZE - 1);
625 sector_t sector;
626
627 sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
628 ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
629
630 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
631 sector != bio_end_sector(wpc->ioend->io_bio) ||
632 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
633 if (wpc->ioend)
634 list_add(&wpc->ioend->io_list, iolist);
635 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset,
636 bdev, sector);
637 }
638
639 if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) {
640 if (iop)
641 atomic_inc(&iop->write_count);
642 if (bio_full(wpc->ioend->io_bio))
643 xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
644 __bio_add_page(wpc->ioend->io_bio, page, len, poff);
645 }
646
647 wpc->ioend->io_size += len;
648}
649
650STATIC void
651xfs_vm_invalidatepage(
652 struct page *page,
653 unsigned int offset,
654 unsigned int length)
655{
656 trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
657 iomap_invalidatepage(page, offset, length);
658}
659
660
661
662
663
664
665
666
667
668
669
670
671STATIC void
672xfs_aops_discard_page(
673 struct page *page)
674{
675 struct inode *inode = page->mapping->host;
676 struct xfs_inode *ip = XFS_I(inode);
677 struct xfs_mount *mp = ip->i_mount;
678 loff_t offset = page_offset(page);
679 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
680 int error;
681
682 if (XFS_FORCED_SHUTDOWN(mp))
683 goto out_invalidate;
684
685 xfs_alert(mp,
686 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
687 page, ip->i_ino, offset);
688
689 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
690 PAGE_SIZE / i_blocksize(inode));
691 if (error && !XFS_FORCED_SHUTDOWN(mp))
692 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
693out_invalidate:
694 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713static int
714xfs_writepage_map(
715 struct xfs_writepage_ctx *wpc,
716 struct writeback_control *wbc,
717 struct inode *inode,
718 struct page *page,
719 uint64_t end_offset)
720{
721 LIST_HEAD(submit_list);
722 struct iomap_page *iop = to_iomap_page(page);
723 unsigned len = i_blocksize(inode);
724 struct xfs_ioend *ioend, *next;
725 uint64_t file_offset;
726 int error = 0, count = 0, i;
727
728 ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
729 ASSERT(!iop || atomic_read(&iop->write_count) == 0);
730
731
732
733
734
735
736 for (i = 0, file_offset = page_offset(page);
737 i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
738 i++, file_offset += len) {
739 if (iop && !test_bit(i, iop->uptodate))
740 continue;
741
742 error = xfs_map_blocks(wpc, inode, file_offset);
743 if (error)
744 break;
745 if (wpc->io_type == XFS_IO_HOLE)
746 continue;
747 xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
748 &submit_list);
749 count++;
750 }
751
752 ASSERT(wpc->ioend || list_empty(&submit_list));
753 ASSERT(PageLocked(page));
754 ASSERT(!PageWriteback(page));
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770 if (unlikely(error)) {
771 if (!count) {
772 xfs_aops_discard_page(page);
773 ClearPageUptodate(page);
774 unlock_page(page);
775 goto done;
776 }
777
778
779
780
781
782
783
784
785
786 set_page_writeback_keepwrite(page);
787 } else {
788 clear_page_dirty_for_io(page);
789 set_page_writeback(page);
790 }
791
792 unlock_page(page);
793
794
795
796
797
798
799 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
800 int error2;
801
802 list_del_init(&ioend->io_list);
803 error2 = xfs_submit_ioend(wbc, ioend, error);
804 if (error2 && !error)
805 error = error2;
806 }
807
808
809
810
811
812 if (!count)
813 end_page_writeback(page);
814done:
815 mapping_set_error(page->mapping, error);
816 return error;
817}
818
819
820
821
822
823
824
825
826STATIC int
827xfs_do_writepage(
828 struct page *page,
829 struct writeback_control *wbc,
830 void *data)
831{
832 struct xfs_writepage_ctx *wpc = data;
833 struct inode *inode = page->mapping->host;
834 loff_t offset;
835 uint64_t end_offset;
836 pgoff_t end_index;
837
838 trace_xfs_writepage(inode, page, 0, 0);
839
840
841
842
843
844
845
846
847
848
849
850 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
851 PF_MEMALLOC))
852 goto redirty;
853
854
855
856
857
858 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
859 goto redirty;
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874 offset = i_size_read(inode);
875 end_index = offset >> PAGE_SHIFT;
876 if (page->index < end_index)
877 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
878 else {
879
880
881
882
883
884
885
886
887
888
889
890 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909 if (page->index > end_index ||
910 (page->index == end_index && offset_into_page == 0))
911 goto redirty;
912
913
914
915
916
917
918
919
920
921 zero_user_segment(page, offset_into_page, PAGE_SIZE);
922
923
924 end_offset = offset;
925 }
926
927 return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
928
929redirty:
930 redirty_page_for_writepage(wbc, page);
931 unlock_page(page);
932 return 0;
933}
934
935STATIC int
936xfs_vm_writepage(
937 struct page *page,
938 struct writeback_control *wbc)
939{
940 struct xfs_writepage_ctx wpc = {
941 .io_type = XFS_IO_HOLE,
942 };
943 int ret;
944
945 ret = xfs_do_writepage(page, wbc, &wpc);
946 if (wpc.ioend)
947 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
948 return ret;
949}
950
951STATIC int
952xfs_vm_writepages(
953 struct address_space *mapping,
954 struct writeback_control *wbc)
955{
956 struct xfs_writepage_ctx wpc = {
957 .io_type = XFS_IO_HOLE,
958 };
959 int ret;
960
961 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
962 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
963 if (wpc.ioend)
964 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
965 return ret;
966}
967
968STATIC int
969xfs_dax_writepages(
970 struct address_space *mapping,
971 struct writeback_control *wbc)
972{
973 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
974 return dax_writeback_mapping_range(mapping,
975 xfs_find_bdev_for_inode(mapping->host), wbc);
976}
977
978STATIC int
979xfs_vm_releasepage(
980 struct page *page,
981 gfp_t gfp_mask)
982{
983 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
984 return iomap_releasepage(page, gfp_mask);
985}
986
987STATIC sector_t
988xfs_vm_bmap(
989 struct address_space *mapping,
990 sector_t block)
991{
992 struct xfs_inode *ip = XFS_I(mapping->host);
993
994 trace_xfs_vm_bmap(ip);
995
996
997
998
999
1000
1001
1002
1003
1004
1005 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1006 return 0;
1007 return iomap_bmap(mapping, block, &xfs_iomap_ops);
1008}
1009
1010STATIC int
1011xfs_vm_readpage(
1012 struct file *unused,
1013 struct page *page)
1014{
1015 trace_xfs_vm_readpage(page->mapping->host, 1);
1016 return iomap_readpage(page, &xfs_iomap_ops);
1017}
1018
1019STATIC int
1020xfs_vm_readpages(
1021 struct file *unused,
1022 struct address_space *mapping,
1023 struct list_head *pages,
1024 unsigned nr_pages)
1025{
1026 trace_xfs_vm_readpages(mapping->host, nr_pages);
1027 return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1028}
1029
1030static int
1031xfs_iomap_swapfile_activate(
1032 struct swap_info_struct *sis,
1033 struct file *swap_file,
1034 sector_t *span)
1035{
1036 sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1037 return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
1038}
1039
1040const struct address_space_operations xfs_address_space_operations = {
1041 .readpage = xfs_vm_readpage,
1042 .readpages = xfs_vm_readpages,
1043 .writepage = xfs_vm_writepage,
1044 .writepages = xfs_vm_writepages,
1045 .set_page_dirty = iomap_set_page_dirty,
1046 .releasepage = xfs_vm_releasepage,
1047 .invalidatepage = xfs_vm_invalidatepage,
1048 .bmap = xfs_vm_bmap,
1049 .direct_IO = noop_direct_IO,
1050 .migratepage = iomap_migrate_page,
1051 .is_partially_uptodate = iomap_is_partially_uptodate,
1052 .error_remove_page = generic_error_remove_page,
1053 .swap_activate = xfs_iomap_swapfile_activate,
1054};
1055
1056const struct address_space_operations xfs_dax_aops = {
1057 .writepages = xfs_dax_writepages,
1058 .direct_IO = noop_direct_IO,
1059 .set_page_dirty = noop_set_page_dirty,
1060 .invalidatepage = noop_invalidatepage,
1061 .swap_activate = xfs_iomap_swapfile_activate,
1062};
1063