1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_log.h"
20#include "xfs_sb.h"
21#include "xfs_ag.h"
22#include "xfs_trans.h"
23#include "xfs_mount.h"
24#include "xfs_bmap_btree.h"
25#include "xfs_dinode.h"
26#include "xfs_inode.h"
27#include "xfs_inode_item.h"
28#include "xfs_alloc.h"
29#include "xfs_error.h"
30#include "xfs_iomap.h"
31#include "xfs_trace.h"
32#include "xfs_bmap.h"
33#include "xfs_bmap_util.h"
34#include <linux/aio.h>
35#include <linux/gfp.h>
36#include <linux/mpage.h>
37#include <linux/pagevec.h>
38#include <linux/writeback.h>
39
40void
41xfs_count_page_state(
42 struct page *page,
43 int *delalloc,
44 int *unwritten)
45{
46 struct buffer_head *bh, *head;
47
48 *delalloc = *unwritten = 0;
49
50 bh = head = page_buffers(page);
51 do {
52 if (buffer_unwritten(bh))
53 (*unwritten) = 1;
54 else if (buffer_delay(bh))
55 (*delalloc) = 1;
56 } while ((bh = bh->b_this_page) != head);
57}
58
59STATIC struct block_device *
60xfs_find_bdev_for_inode(
61 struct inode *inode)
62{
63 struct xfs_inode *ip = XFS_I(inode);
64 struct xfs_mount *mp = ip->i_mount;
65
66 if (XFS_IS_REALTIME_INODE(ip))
67 return mp->m_rtdev_targp->bt_bdev;
68 else
69 return mp->m_ddev_targp->bt_bdev;
70}
71
72
73
74
75
76
77
78STATIC void
79xfs_destroy_ioend(
80 xfs_ioend_t *ioend)
81{
82 struct buffer_head *bh, *next;
83
84 for (bh = ioend->io_buffer_head; bh; bh = next) {
85 next = bh->b_private;
86 bh->b_end_io(bh, !ioend->io_error);
87 }
88
89 mempool_free(ioend, xfs_ioend_pool);
90}
91
92
93
94
95static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
96{
97 return ioend->io_offset + ioend->io_size >
98 XFS_I(ioend->io_inode)->i_d.di_size;
99}
100
101STATIC int
102xfs_setfilesize_trans_alloc(
103 struct xfs_ioend *ioend)
104{
105 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
106 struct xfs_trans *tp;
107 int error;
108
109 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
110
111 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
112 if (error) {
113 xfs_trans_cancel(tp, 0);
114 return error;
115 }
116
117 ioend->io_append_trans = tp;
118
119
120
121
122
123 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
124 1, _THIS_IP_);
125
126
127
128
129 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
130 return 0;
131}
132
133
134
135
136STATIC int
137xfs_setfilesize(
138 struct xfs_ioend *ioend)
139{
140 struct xfs_inode *ip = XFS_I(ioend->io_inode);
141 struct xfs_trans *tp = ioend->io_append_trans;
142 xfs_fsize_t isize;
143
144
145
146
147
148
149 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
150 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
151 0, 1, _THIS_IP_);
152
153 xfs_ilock(ip, XFS_ILOCK_EXCL);
154 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
155 if (!isize) {
156 xfs_iunlock(ip, XFS_ILOCK_EXCL);
157 xfs_trans_cancel(tp, 0);
158 return 0;
159 }
160
161 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
162
163 ip->i_d.di_size = isize;
164 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
165 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
166
167 return xfs_trans_commit(tp, 0);
168}
169
170
171
172
173
174
175
176STATIC void
177xfs_finish_ioend(
178 struct xfs_ioend *ioend)
179{
180 if (atomic_dec_and_test(&ioend->io_remaining)) {
181 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
182
183 if (ioend->io_type == XFS_IO_UNWRITTEN)
184 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
185 else if (ioend->io_append_trans ||
186 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
187 queue_work(mp->m_data_workqueue, &ioend->io_work);
188 else
189 xfs_destroy_ioend(ioend);
190 }
191}
192
193
194
195
196STATIC void
197xfs_end_io(
198 struct work_struct *work)
199{
200 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
201 struct xfs_inode *ip = XFS_I(ioend->io_inode);
202 int error = 0;
203
204 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
205 ioend->io_error = -EIO;
206 goto done;
207 }
208 if (ioend->io_error)
209 goto done;
210
211
212
213
214
215 if (ioend->io_type == XFS_IO_UNWRITTEN) {
216 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
217 ioend->io_size);
218 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
219
220
221
222
223
224
225
226
227
228 error = xfs_setfilesize_trans_alloc(ioend);
229 if (error)
230 goto done;
231 error = xfs_setfilesize(ioend);
232 } else if (ioend->io_append_trans) {
233 error = xfs_setfilesize(ioend);
234 } else {
235 ASSERT(!xfs_ioend_is_append(ioend));
236 }
237
238done:
239 if (error)
240 ioend->io_error = -error;
241 xfs_destroy_ioend(ioend);
242}
243
244
245
246
247STATIC void
248xfs_finish_ioend_sync(
249 struct xfs_ioend *ioend)
250{
251 if (atomic_dec_and_test(&ioend->io_remaining))
252 xfs_end_io(&ioend->io_work);
253}
254
255
256
257
258
259
260
261STATIC xfs_ioend_t *
262xfs_alloc_ioend(
263 struct inode *inode,
264 unsigned int type)
265{
266 xfs_ioend_t *ioend;
267
268 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
269
270
271
272
273
274
275 atomic_set(&ioend->io_remaining, 1);
276 ioend->io_isdirect = 0;
277 ioend->io_error = 0;
278 ioend->io_list = NULL;
279 ioend->io_type = type;
280 ioend->io_inode = inode;
281 ioend->io_buffer_head = NULL;
282 ioend->io_buffer_tail = NULL;
283 ioend->io_offset = 0;
284 ioend->io_size = 0;
285 ioend->io_append_trans = NULL;
286
287 INIT_WORK(&ioend->io_work, xfs_end_io);
288 return ioend;
289}
290
291STATIC int
292xfs_map_blocks(
293 struct inode *inode,
294 loff_t offset,
295 struct xfs_bmbt_irec *imap,
296 int type,
297 int nonblocking)
298{
299 struct xfs_inode *ip = XFS_I(inode);
300 struct xfs_mount *mp = ip->i_mount;
301 ssize_t count = 1 << inode->i_blkbits;
302 xfs_fileoff_t offset_fsb, end_fsb;
303 int error = 0;
304 int bmapi_flags = XFS_BMAPI_ENTIRE;
305 int nimaps = 1;
306
307 if (XFS_FORCED_SHUTDOWN(mp))
308 return -XFS_ERROR(EIO);
309
310 if (type == XFS_IO_UNWRITTEN)
311 bmapi_flags |= XFS_BMAPI_IGSTATE;
312
313 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
314 if (nonblocking)
315 return -XFS_ERROR(EAGAIN);
316 xfs_ilock(ip, XFS_ILOCK_SHARED);
317 }
318
319 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
320 (ip->i_df.if_flags & XFS_IFEXTENTS));
321 ASSERT(offset <= mp->m_super->s_maxbytes);
322
323 if (offset + count > mp->m_super->s_maxbytes)
324 count = mp->m_super->s_maxbytes - offset;
325 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
326 offset_fsb = XFS_B_TO_FSBT(mp, offset);
327 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
328 imap, &nimaps, bmapi_flags);
329 xfs_iunlock(ip, XFS_ILOCK_SHARED);
330
331 if (error)
332 return -XFS_ERROR(error);
333
334 if (type == XFS_IO_DELALLOC &&
335 (!nimaps || isnullstartblock(imap->br_startblock))) {
336 error = xfs_iomap_write_allocate(ip, offset, count, imap);
337 if (!error)
338 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
339 return -XFS_ERROR(error);
340 }
341
342#ifdef DEBUG
343 if (type == XFS_IO_UNWRITTEN) {
344 ASSERT(nimaps);
345 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
346 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
347 }
348#endif
349 if (nimaps)
350 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
351 return 0;
352}
353
354STATIC int
355xfs_imap_valid(
356 struct inode *inode,
357 struct xfs_bmbt_irec *imap,
358 xfs_off_t offset)
359{
360 offset >>= inode->i_blkbits;
361
362 return offset >= imap->br_startoff &&
363 offset < imap->br_startoff + imap->br_blockcount;
364}
365
366
367
368
369STATIC void
370xfs_end_bio(
371 struct bio *bio,
372 int error)
373{
374 xfs_ioend_t *ioend = bio->bi_private;
375
376 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
377 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
378
379
380 bio->bi_private = NULL;
381 bio->bi_end_io = NULL;
382 bio_put(bio);
383
384 xfs_finish_ioend(ioend);
385}
386
387STATIC void
388xfs_submit_ioend_bio(
389 struct writeback_control *wbc,
390 xfs_ioend_t *ioend,
391 struct bio *bio)
392{
393 atomic_inc(&ioend->io_remaining);
394 bio->bi_private = ioend;
395 bio->bi_end_io = xfs_end_bio;
396 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
397}
398
399STATIC struct bio *
400xfs_alloc_ioend_bio(
401 struct buffer_head *bh)
402{
403 int nvecs = bio_get_nr_vecs(bh->b_bdev);
404 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
405
406 ASSERT(bio->bi_private == NULL);
407 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
408 bio->bi_bdev = bh->b_bdev;
409 return bio;
410}
411
412STATIC void
413xfs_start_buffer_writeback(
414 struct buffer_head *bh)
415{
416 ASSERT(buffer_mapped(bh));
417 ASSERT(buffer_locked(bh));
418 ASSERT(!buffer_delay(bh));
419 ASSERT(!buffer_unwritten(bh));
420
421 mark_buffer_async_write(bh);
422 set_buffer_uptodate(bh);
423 clear_buffer_dirty(bh);
424}
425
426STATIC void
427xfs_start_page_writeback(
428 struct page *page,
429 int clear_dirty,
430 int buffers)
431{
432 ASSERT(PageLocked(page));
433 ASSERT(!PageWriteback(page));
434 if (clear_dirty)
435 clear_page_dirty_for_io(page);
436 set_page_writeback(page);
437 unlock_page(page);
438
439 if (!buffers)
440 end_page_writeback(page);
441}
442
443static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
444{
445 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470STATIC void
471xfs_submit_ioend(
472 struct writeback_control *wbc,
473 xfs_ioend_t *ioend,
474 int fail)
475{
476 xfs_ioend_t *head = ioend;
477 xfs_ioend_t *next;
478 struct buffer_head *bh;
479 struct bio *bio;
480 sector_t lastblock = 0;
481
482
483 do {
484 next = ioend->io_list;
485 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
486 xfs_start_buffer_writeback(bh);
487 } while ((ioend = next) != NULL);
488
489
490 ioend = head;
491 do {
492 next = ioend->io_list;
493 bio = NULL;
494
495
496
497
498
499
500
501 if (fail) {
502 ioend->io_error = -fail;
503 xfs_finish_ioend(ioend);
504 continue;
505 }
506
507 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
508
509 if (!bio) {
510 retry:
511 bio = xfs_alloc_ioend_bio(bh);
512 } else if (bh->b_blocknr != lastblock + 1) {
513 xfs_submit_ioend_bio(wbc, ioend, bio);
514 goto retry;
515 }
516
517 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
518 xfs_submit_ioend_bio(wbc, ioend, bio);
519 goto retry;
520 }
521
522 lastblock = bh->b_blocknr;
523 }
524 if (bio)
525 xfs_submit_ioend_bio(wbc, ioend, bio);
526 xfs_finish_ioend(ioend);
527 } while ((ioend = next) != NULL);
528}
529
530
531
532
533
534
535STATIC void
536xfs_cancel_ioend(
537 xfs_ioend_t *ioend)
538{
539 xfs_ioend_t *next;
540 struct buffer_head *bh, *next_bh;
541
542 do {
543 next = ioend->io_list;
544 bh = ioend->io_buffer_head;
545 do {
546 next_bh = bh->b_private;
547 clear_buffer_async_write(bh);
548 unlock_buffer(bh);
549 } while ((bh = next_bh) != NULL);
550
551 mempool_free(ioend, xfs_ioend_pool);
552 } while ((ioend = next) != NULL);
553}
554
555
556
557
558
559
560
561STATIC void
562xfs_add_to_ioend(
563 struct inode *inode,
564 struct buffer_head *bh,
565 xfs_off_t offset,
566 unsigned int type,
567 xfs_ioend_t **result,
568 int need_ioend)
569{
570 xfs_ioend_t *ioend = *result;
571
572 if (!ioend || need_ioend || type != ioend->io_type) {
573 xfs_ioend_t *previous = *result;
574
575 ioend = xfs_alloc_ioend(inode, type);
576 ioend->io_offset = offset;
577 ioend->io_buffer_head = bh;
578 ioend->io_buffer_tail = bh;
579 if (previous)
580 previous->io_list = ioend;
581 *result = ioend;
582 } else {
583 ioend->io_buffer_tail->b_private = bh;
584 ioend->io_buffer_tail = bh;
585 }
586
587 bh->b_private = NULL;
588 ioend->io_size += bh->b_size;
589}
590
591STATIC void
592xfs_map_buffer(
593 struct inode *inode,
594 struct buffer_head *bh,
595 struct xfs_bmbt_irec *imap,
596 xfs_off_t offset)
597{
598 sector_t bn;
599 struct xfs_mount *m = XFS_I(inode)->i_mount;
600 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
601 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
602
603 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
604 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
605
606 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
607 ((offset - iomap_offset) >> inode->i_blkbits);
608
609 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
610
611 bh->b_blocknr = bn;
612 set_buffer_mapped(bh);
613}
614
615STATIC void
616xfs_map_at_offset(
617 struct inode *inode,
618 struct buffer_head *bh,
619 struct xfs_bmbt_irec *imap,
620 xfs_off_t offset)
621{
622 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
623 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
624
625 xfs_map_buffer(inode, bh, imap, offset);
626 set_buffer_mapped(bh);
627 clear_buffer_delay(bh);
628 clear_buffer_unwritten(bh);
629}
630
631
632
633
634
635STATIC int
636xfs_check_page_type(
637 struct page *page,
638 unsigned int type)
639{
640 if (PageWriteback(page))
641 return 0;
642
643 if (page->mapping && page_has_buffers(page)) {
644 struct buffer_head *bh, *head;
645 int acceptable = 0;
646
647 bh = head = page_buffers(page);
648 do {
649 if (buffer_unwritten(bh))
650 acceptable += (type == XFS_IO_UNWRITTEN);
651 else if (buffer_delay(bh))
652 acceptable += (type == XFS_IO_DELALLOC);
653 else if (buffer_dirty(bh) && buffer_mapped(bh))
654 acceptable += (type == XFS_IO_OVERWRITE);
655 else
656 break;
657 } while ((bh = bh->b_this_page) != head);
658
659 if (acceptable)
660 return 1;
661 }
662
663 return 0;
664}
665
666
667
668
669
670
671
672STATIC int
673xfs_convert_page(
674 struct inode *inode,
675 struct page *page,
676 loff_t tindex,
677 struct xfs_bmbt_irec *imap,
678 xfs_ioend_t **ioendp,
679 struct writeback_control *wbc)
680{
681 struct buffer_head *bh, *head;
682 xfs_off_t end_offset;
683 unsigned long p_offset;
684 unsigned int type;
685 int len, page_dirty;
686 int count = 0, done = 0, uptodate = 1;
687 xfs_off_t offset = page_offset(page);
688
689 if (page->index != tindex)
690 goto fail;
691 if (!trylock_page(page))
692 goto fail;
693 if (PageWriteback(page))
694 goto fail_unlock_page;
695 if (page->mapping != inode->i_mapping)
696 goto fail_unlock_page;
697 if (!xfs_check_page_type(page, (*ioendp)->io_type))
698 goto fail_unlock_page;
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713 end_offset = min_t(unsigned long long,
714 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
715 i_size_read(inode));
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 if (!xfs_imap_valid(inode, imap, end_offset))
734 goto fail_unlock_page;
735
736 len = 1 << inode->i_blkbits;
737 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
738 PAGE_CACHE_SIZE);
739 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
740 page_dirty = p_offset / len;
741
742 bh = head = page_buffers(page);
743 do {
744 if (offset >= end_offset)
745 break;
746 if (!buffer_uptodate(bh))
747 uptodate = 0;
748 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
749 done = 1;
750 continue;
751 }
752
753 if (buffer_unwritten(bh) || buffer_delay(bh) ||
754 buffer_mapped(bh)) {
755 if (buffer_unwritten(bh))
756 type = XFS_IO_UNWRITTEN;
757 else if (buffer_delay(bh))
758 type = XFS_IO_DELALLOC;
759 else
760 type = XFS_IO_OVERWRITE;
761
762 if (!xfs_imap_valid(inode, imap, offset)) {
763 done = 1;
764 continue;
765 }
766
767 lock_buffer(bh);
768 if (type != XFS_IO_OVERWRITE)
769 xfs_map_at_offset(inode, bh, imap, offset);
770 xfs_add_to_ioend(inode, bh, offset, type,
771 ioendp, done);
772
773 page_dirty--;
774 count++;
775 } else {
776 done = 1;
777 }
778 } while (offset += len, (bh = bh->b_this_page) != head);
779
780 if (uptodate && bh == head)
781 SetPageUptodate(page);
782
783 if (count) {
784 if (--wbc->nr_to_write <= 0 &&
785 wbc->sync_mode == WB_SYNC_NONE)
786 done = 1;
787 }
788 xfs_start_page_writeback(page, !page_dirty, count);
789
790 return done;
791 fail_unlock_page:
792 unlock_page(page);
793 fail:
794 return 1;
795}
796
797
798
799
800
801STATIC void
802xfs_cluster_write(
803 struct inode *inode,
804 pgoff_t tindex,
805 struct xfs_bmbt_irec *imap,
806 xfs_ioend_t **ioendp,
807 struct writeback_control *wbc,
808 pgoff_t tlast)
809{
810 struct pagevec pvec;
811 int done = 0, i;
812
813 pagevec_init(&pvec, 0);
814 while (!done && tindex <= tlast) {
815 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
816
817 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
818 break;
819
820 for (i = 0; i < pagevec_count(&pvec); i++) {
821 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
822 imap, ioendp, wbc);
823 if (done)
824 break;
825 }
826
827 pagevec_release(&pvec);
828 cond_resched();
829 }
830}
831
832STATIC void
833xfs_vm_invalidatepage(
834 struct page *page,
835 unsigned int offset,
836 unsigned int length)
837{
838 trace_xfs_invalidatepage(page->mapping->host, page, offset,
839 length);
840 block_invalidatepage(page, offset, length);
841}
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859STATIC void
860xfs_aops_discard_page(
861 struct page *page)
862{
863 struct inode *inode = page->mapping->host;
864 struct xfs_inode *ip = XFS_I(inode);
865 struct buffer_head *bh, *head;
866 loff_t offset = page_offset(page);
867
868 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
869 goto out_invalidate;
870
871 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
872 goto out_invalidate;
873
874 xfs_alert(ip->i_mount,
875 "page discard on page %p, inode 0x%llx, offset %llu.",
876 page, ip->i_ino, offset);
877
878 xfs_ilock(ip, XFS_ILOCK_EXCL);
879 bh = head = page_buffers(page);
880 do {
881 int error;
882 xfs_fileoff_t start_fsb;
883
884 if (!buffer_delay(bh))
885 goto next_buffer;
886
887 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
888 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
889 if (error) {
890
891 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
892 xfs_alert(ip->i_mount,
893 "page discard unable to remove delalloc mapping.");
894 }
895 break;
896 }
897next_buffer:
898 offset += 1 << inode->i_blkbits;
899
900 } while ((bh = bh->b_this_page) != head);
901
902 xfs_iunlock(ip, XFS_ILOCK_EXCL);
903out_invalidate:
904 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
905 return;
906}
907
908
909
910
911
912
913
914
915
916STATIC int
917xfs_vm_writepage(
918 struct page *page,
919 struct writeback_control *wbc)
920{
921 struct inode *inode = page->mapping->host;
922 struct buffer_head *bh, *head;
923 struct xfs_bmbt_irec imap;
924 xfs_ioend_t *ioend = NULL, *iohead = NULL;
925 loff_t offset;
926 unsigned int type;
927 __uint64_t end_offset;
928 pgoff_t end_index, last_index;
929 ssize_t len;
930 int err, imap_valid = 0, uptodate = 1;
931 int count = 0;
932 int nonblocking = 0;
933
934 trace_xfs_writepage(inode, page, 0, 0);
935
936 ASSERT(page_has_buffers(page));
937
938
939
940
941
942
943
944
945
946
947
948 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
949 PF_MEMALLOC))
950 goto redirty;
951
952
953
954
955
956 if (WARN_ON(current->flags & PF_FSTRANS))
957 goto redirty;
958
959
960 offset = i_size_read(inode);
961 end_index = offset >> PAGE_CACHE_SHIFT;
962 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
963 if (page->index >= end_index) {
964 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
965
966
967
968
969
970
971
972 if (page->index >= end_index + 1 || offset_into_page == 0)
973 goto redirty;
974
975
976
977
978
979
980
981
982
983 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
984 }
985
986 end_offset = min_t(unsigned long long,
987 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
988 offset);
989 len = 1 << inode->i_blkbits;
990
991 bh = head = page_buffers(page);
992 offset = page_offset(page);
993 type = XFS_IO_OVERWRITE;
994
995 if (wbc->sync_mode == WB_SYNC_NONE)
996 nonblocking = 1;
997
998 do {
999 int new_ioend = 0;
1000
1001 if (offset >= end_offset)
1002 break;
1003 if (!buffer_uptodate(bh))
1004 uptodate = 0;
1005
1006
1007
1008
1009
1010
1011
1012 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1013 imap_valid = 0;
1014 continue;
1015 }
1016
1017 if (buffer_unwritten(bh)) {
1018 if (type != XFS_IO_UNWRITTEN) {
1019 type = XFS_IO_UNWRITTEN;
1020 imap_valid = 0;
1021 }
1022 } else if (buffer_delay(bh)) {
1023 if (type != XFS_IO_DELALLOC) {
1024 type = XFS_IO_DELALLOC;
1025 imap_valid = 0;
1026 }
1027 } else if (buffer_uptodate(bh)) {
1028 if (type != XFS_IO_OVERWRITE) {
1029 type = XFS_IO_OVERWRITE;
1030 imap_valid = 0;
1031 }
1032 } else {
1033 if (PageUptodate(page))
1034 ASSERT(buffer_mapped(bh));
1035
1036
1037
1038
1039
1040
1041 imap_valid = 0;
1042 continue;
1043 }
1044
1045 if (imap_valid)
1046 imap_valid = xfs_imap_valid(inode, &imap, offset);
1047 if (!imap_valid) {
1048
1049
1050
1051
1052
1053
1054
1055
1056 new_ioend = 1;
1057 err = xfs_map_blocks(inode, offset, &imap, type,
1058 nonblocking);
1059 if (err)
1060 goto error;
1061 imap_valid = xfs_imap_valid(inode, &imap, offset);
1062 }
1063 if (imap_valid) {
1064 lock_buffer(bh);
1065 if (type != XFS_IO_OVERWRITE)
1066 xfs_map_at_offset(inode, bh, &imap, offset);
1067 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1068 new_ioend);
1069 count++;
1070 }
1071
1072 if (!iohead)
1073 iohead = ioend;
1074
1075 } while (offset += len, ((bh = bh->b_this_page) != head));
1076
1077 if (uptodate && bh == head)
1078 SetPageUptodate(page);
1079
1080 xfs_start_page_writeback(page, 1, count);
1081
1082
1083 if (!ioend)
1084 return 0;
1085
1086 ASSERT(iohead);
1087
1088
1089
1090
1091
1092
1093 if (imap_valid) {
1094 xfs_off_t end_index;
1095
1096 end_index = imap.br_startoff + imap.br_blockcount;
1097
1098
1099 end_index <<= inode->i_blkbits;
1100
1101
1102 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1103
1104
1105 if (end_index > last_index)
1106 end_index = last_index;
1107
1108 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1109 wbc, end_index);
1110 }
1111
1112
1113
1114
1115
1116 err = 0;
1117 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1118 err = xfs_setfilesize_trans_alloc(ioend);
1119
1120 xfs_submit_ioend(wbc, iohead, err);
1121
1122 return 0;
1123
1124error:
1125 if (iohead)
1126 xfs_cancel_ioend(iohead);
1127
1128 if (err == -EAGAIN)
1129 goto redirty;
1130
1131 xfs_aops_discard_page(page);
1132 ClearPageUptodate(page);
1133 unlock_page(page);
1134 return err;
1135
1136redirty:
1137 redirty_page_for_writepage(wbc, page);
1138 unlock_page(page);
1139 return 0;
1140}
1141
1142STATIC int
1143xfs_vm_writepages(
1144 struct address_space *mapping,
1145 struct writeback_control *wbc)
1146{
1147 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1148 return generic_writepages(mapping, wbc);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158STATIC int
1159xfs_vm_releasepage(
1160 struct page *page,
1161 gfp_t gfp_mask)
1162{
1163 int delalloc, unwritten;
1164
1165 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1166
1167 xfs_count_page_state(page, &delalloc, &unwritten);
1168
1169 if (WARN_ON(delalloc))
1170 return 0;
1171 if (WARN_ON(unwritten))
1172 return 0;
1173
1174 return try_to_free_buffers(page);
1175}
1176
1177STATIC int
1178__xfs_get_blocks(
1179 struct inode *inode,
1180 sector_t iblock,
1181 struct buffer_head *bh_result,
1182 int create,
1183 int direct)
1184{
1185 struct xfs_inode *ip = XFS_I(inode);
1186 struct xfs_mount *mp = ip->i_mount;
1187 xfs_fileoff_t offset_fsb, end_fsb;
1188 int error = 0;
1189 int lockmode = 0;
1190 struct xfs_bmbt_irec imap;
1191 int nimaps = 1;
1192 xfs_off_t offset;
1193 ssize_t size;
1194 int new = 0;
1195
1196 if (XFS_FORCED_SHUTDOWN(mp))
1197 return -XFS_ERROR(EIO);
1198
1199 offset = (xfs_off_t)iblock << inode->i_blkbits;
1200 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1201 size = bh_result->b_size;
1202
1203 if (!create && direct && offset >= i_size_read(inode))
1204 return 0;
1205
1206
1207
1208
1209
1210
1211
1212
1213 if (create && !direct) {
1214 lockmode = XFS_ILOCK_EXCL;
1215 xfs_ilock(ip, lockmode);
1216 } else {
1217 lockmode = xfs_ilock_map_shared(ip);
1218 }
1219
1220 ASSERT(offset <= mp->m_super->s_maxbytes);
1221 if (offset + size > mp->m_super->s_maxbytes)
1222 size = mp->m_super->s_maxbytes - offset;
1223 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1224 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1225
1226 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1227 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1228 if (error)
1229 goto out_unlock;
1230
1231 if (create &&
1232 (!nimaps ||
1233 (imap.br_startblock == HOLESTARTBLOCK ||
1234 imap.br_startblock == DELAYSTARTBLOCK))) {
1235 if (direct || xfs_get_extsz_hint(ip)) {
1236
1237
1238
1239
1240
1241
1242 xfs_iunlock(ip, lockmode);
1243 error = xfs_iomap_write_direct(ip, offset, size,
1244 &imap, nimaps);
1245 if (error)
1246 return -error;
1247 new = 1;
1248 } else {
1249
1250
1251
1252
1253
1254
1255
1256
1257 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1258 new = 1;
1259 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1260 if (error)
1261 goto out_unlock;
1262
1263 xfs_iunlock(ip, lockmode);
1264 }
1265
1266 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1267 } else if (nimaps) {
1268 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1269 xfs_iunlock(ip, lockmode);
1270 } else {
1271 trace_xfs_get_blocks_notfound(ip, offset, size);
1272 goto out_unlock;
1273 }
1274
1275 if (imap.br_startblock != HOLESTARTBLOCK &&
1276 imap.br_startblock != DELAYSTARTBLOCK) {
1277
1278
1279
1280
1281 if (create || !ISUNWRITTEN(&imap))
1282 xfs_map_buffer(inode, bh_result, &imap, offset);
1283 if (create && ISUNWRITTEN(&imap)) {
1284 if (direct) {
1285 bh_result->b_private = inode;
1286 set_buffer_defer_completion(bh_result);
1287 }
1288 set_buffer_unwritten(bh_result);
1289 }
1290 }
1291
1292
1293
1294
1295
1296 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307 if (create &&
1308 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1309 (offset >= i_size_read(inode)) ||
1310 (new || ISUNWRITTEN(&imap))))
1311 set_buffer_new(bh_result);
1312
1313 if (imap.br_startblock == DELAYSTARTBLOCK) {
1314 BUG_ON(direct);
1315 if (create) {
1316 set_buffer_uptodate(bh_result);
1317 set_buffer_mapped(bh_result);
1318 set_buffer_delay(bh_result);
1319 }
1320 }
1321
1322
1323
1324
1325
1326 if (direct || size > (1 << inode->i_blkbits)) {
1327 xfs_off_t mapping_size;
1328
1329 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1330 mapping_size <<= inode->i_blkbits;
1331
1332 ASSERT(mapping_size > 0);
1333 if (mapping_size > size)
1334 mapping_size = size;
1335 if (mapping_size > LONG_MAX)
1336 mapping_size = LONG_MAX;
1337
1338 bh_result->b_size = mapping_size;
1339 }
1340
1341 return 0;
1342
1343out_unlock:
1344 xfs_iunlock(ip, lockmode);
1345 return -error;
1346}
1347
1348int
1349xfs_get_blocks(
1350 struct inode *inode,
1351 sector_t iblock,
1352 struct buffer_head *bh_result,
1353 int create)
1354{
1355 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1356}
1357
1358STATIC int
1359xfs_get_blocks_direct(
1360 struct inode *inode,
1361 sector_t iblock,
1362 struct buffer_head *bh_result,
1363 int create)
1364{
1365 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379STATIC void
1380xfs_end_io_direct_write(
1381 struct kiocb *iocb,
1382 loff_t offset,
1383 ssize_t size,
1384 void *private)
1385{
1386 struct xfs_ioend *ioend = iocb->private;
1387
1388
1389
1390
1391
1392
1393
1394 if (offset + size > i_size_read(ioend->io_inode))
1395 i_size_write(ioend->io_inode, offset + size);
1396
1397
1398
1399
1400
1401
1402 iocb->private = NULL;
1403
1404 ioend->io_offset = offset;
1405 ioend->io_size = size;
1406 if (private && size > 0)
1407 ioend->io_type = XFS_IO_UNWRITTEN;
1408
1409 xfs_finish_ioend_sync(ioend);
1410}
1411
1412STATIC ssize_t
1413xfs_vm_direct_IO(
1414 int rw,
1415 struct kiocb *iocb,
1416 const struct iovec *iov,
1417 loff_t offset,
1418 unsigned long nr_segs)
1419{
1420 struct inode *inode = iocb->ki_filp->f_mapping->host;
1421 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1422 struct xfs_ioend *ioend = NULL;
1423 ssize_t ret;
1424
1425 if (rw & WRITE) {
1426 size_t size = iov_length(iov, nr_segs);
1427
1428
1429
1430
1431
1432
1433
1434 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1435 if (offset + size > XFS_I(inode)->i_d.di_size)
1436 ioend->io_isdirect = 1;
1437
1438 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1439 offset, nr_segs,
1440 xfs_get_blocks_direct,
1441 xfs_end_io_direct_write, NULL, 0);
1442 if (ret != -EIOCBQUEUED && iocb->private)
1443 goto out_destroy_ioend;
1444 } else {
1445 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1446 offset, nr_segs,
1447 xfs_get_blocks_direct,
1448 NULL, NULL, 0);
1449 }
1450
1451 return ret;
1452
1453out_destroy_ioend:
1454 xfs_destroy_ioend(ioend);
1455 return ret;
1456}
1457
1458
1459
1460
1461
1462
1463
1464STATIC void
1465xfs_vm_kill_delalloc_range(
1466 struct inode *inode,
1467 loff_t start,
1468 loff_t end)
1469{
1470 struct xfs_inode *ip = XFS_I(inode);
1471 xfs_fileoff_t start_fsb;
1472 xfs_fileoff_t end_fsb;
1473 int error;
1474
1475 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1476 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1477 if (end_fsb <= start_fsb)
1478 return;
1479
1480 xfs_ilock(ip, XFS_ILOCK_EXCL);
1481 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1482 end_fsb - start_fsb);
1483 if (error) {
1484
1485 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1486 xfs_alert(ip->i_mount,
1487 "xfs_vm_write_failed: unable to clean up ino %lld",
1488 ip->i_ino);
1489 }
1490 }
1491 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1492}
1493
1494STATIC void
1495xfs_vm_write_failed(
1496 struct inode *inode,
1497 struct page *page,
1498 loff_t pos,
1499 unsigned len)
1500{
1501 loff_t block_offset;
1502 loff_t block_start;
1503 loff_t block_end;
1504 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1505 loff_t to = from + len;
1506 struct buffer_head *bh, *head;
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1520
1521 ASSERT(block_offset + from == pos);
1522
1523 head = page_buffers(page);
1524 block_start = 0;
1525 for (bh = head; bh != head || !block_start;
1526 bh = bh->b_this_page, block_start = block_end,
1527 block_offset += bh->b_size) {
1528 block_end = block_start + bh->b_size;
1529
1530
1531 if (block_end <= from)
1532 continue;
1533
1534
1535 if (block_start >= to)
1536 break;
1537
1538 if (!buffer_delay(bh))
1539 continue;
1540
1541 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1542 continue;
1543
1544 xfs_vm_kill_delalloc_range(inode, block_offset,
1545 block_offset + bh->b_size);
1546 }
1547
1548}
1549
1550
1551
1552
1553
1554
1555
1556STATIC int
1557xfs_vm_write_begin(
1558 struct file *file,
1559 struct address_space *mapping,
1560 loff_t pos,
1561 unsigned len,
1562 unsigned flags,
1563 struct page **pagep,
1564 void **fsdata)
1565{
1566 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1567 struct page *page;
1568 int status;
1569
1570 ASSERT(len <= PAGE_CACHE_SIZE);
1571
1572 page = grab_cache_page_write_begin(mapping, index,
1573 flags | AOP_FLAG_NOFS);
1574 if (!page)
1575 return -ENOMEM;
1576
1577 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1578 if (unlikely(status)) {
1579 struct inode *inode = mapping->host;
1580
1581 xfs_vm_write_failed(inode, page, pos, len);
1582 unlock_page(page);
1583
1584 if (pos + len > i_size_read(inode))
1585 truncate_pagecache(inode, i_size_read(inode));
1586
1587 page_cache_release(page);
1588 page = NULL;
1589 }
1590
1591 *pagep = page;
1592 return status;
1593}
1594
1595
1596
1597
1598
1599
1600STATIC int
1601xfs_vm_write_end(
1602 struct file *file,
1603 struct address_space *mapping,
1604 loff_t pos,
1605 unsigned len,
1606 unsigned copied,
1607 struct page *page,
1608 void *fsdata)
1609{
1610 int ret;
1611
1612 ASSERT(len <= PAGE_CACHE_SIZE);
1613
1614 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1615 if (unlikely(ret < len)) {
1616 struct inode *inode = mapping->host;
1617 size_t isize = i_size_read(inode);
1618 loff_t to = pos + len;
1619
1620 if (to > isize) {
1621 truncate_pagecache(inode, isize);
1622 xfs_vm_kill_delalloc_range(inode, isize, to);
1623 }
1624 }
1625 return ret;
1626}
1627
1628STATIC sector_t
1629xfs_vm_bmap(
1630 struct address_space *mapping,
1631 sector_t block)
1632{
1633 struct inode *inode = (struct inode *)mapping->host;
1634 struct xfs_inode *ip = XFS_I(inode);
1635
1636 trace_xfs_vm_bmap(XFS_I(inode));
1637 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1638 filemap_write_and_wait(mapping);
1639 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1640 return generic_block_bmap(mapping, block, xfs_get_blocks);
1641}
1642
1643STATIC int
1644xfs_vm_readpage(
1645 struct file *unused,
1646 struct page *page)
1647{
1648 return mpage_readpage(page, xfs_get_blocks);
1649}
1650
1651STATIC int
1652xfs_vm_readpages(
1653 struct file *unused,
1654 struct address_space *mapping,
1655 struct list_head *pages,
1656 unsigned nr_pages)
1657{
1658 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1659}
1660
1661const struct address_space_operations xfs_address_space_operations = {
1662 .readpage = xfs_vm_readpage,
1663 .readpages = xfs_vm_readpages,
1664 .writepage = xfs_vm_writepage,
1665 .writepages = xfs_vm_writepages,
1666 .releasepage = xfs_vm_releasepage,
1667 .invalidatepage = xfs_vm_invalidatepage,
1668 .write_begin = xfs_vm_write_begin,
1669 .write_end = xfs_vm_write_end,
1670 .bmap = xfs_vm_bmap,
1671 .direct_IO = xfs_vm_direct_IO,
1672 .migratepage = buffer_migrate_page,
1673 .is_partially_uptodate = block_is_partially_uptodate,
1674 .error_remove_page = generic_error_remove_page,
1675};
1676