1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_log.h"
20#include "xfs_sb.h"
21#include "xfs_ag.h"
22#include "xfs_trans.h"
23#include "xfs_mount.h"
24#include "xfs_bmap_btree.h"
25#include "xfs_dinode.h"
26#include "xfs_inode.h"
27#include "xfs_inode_item.h"
28#include "xfs_alloc.h"
29#include "xfs_error.h"
30#include "xfs_iomap.h"
31#include "xfs_vnodeops.h"
32#include "xfs_trace.h"
33#include "xfs_bmap.h"
34#include <linux/aio.h>
35#include <linux/gfp.h>
36#include <linux/mpage.h>
37#include <linux/pagevec.h>
38#include <linux/writeback.h>
39
40void
41xfs_count_page_state(
42 struct page *page,
43 int *delalloc,
44 int *unwritten)
45{
46 struct buffer_head *bh, *head;
47
48 *delalloc = *unwritten = 0;
49
50 bh = head = page_buffers(page);
51 do {
52 if (buffer_unwritten(bh))
53 (*unwritten) = 1;
54 else if (buffer_delay(bh))
55 (*delalloc) = 1;
56 } while ((bh = bh->b_this_page) != head);
57}
58
59STATIC struct block_device *
60xfs_find_bdev_for_inode(
61 struct inode *inode)
62{
63 struct xfs_inode *ip = XFS_I(inode);
64 struct xfs_mount *mp = ip->i_mount;
65
66 if (XFS_IS_REALTIME_INODE(ip))
67 return mp->m_rtdev_targp->bt_bdev;
68 else
69 return mp->m_ddev_targp->bt_bdev;
70}
71
72
73
74
75
76
77
78STATIC void
79xfs_destroy_ioend(
80 xfs_ioend_t *ioend)
81{
82 struct buffer_head *bh, *next;
83
84 for (bh = ioend->io_buffer_head; bh; bh = next) {
85 next = bh->b_private;
86 bh->b_end_io(bh, !ioend->io_error);
87 }
88
89 if (ioend->io_iocb) {
90 inode_dio_done(ioend->io_inode);
91 if (ioend->io_isasync) {
92 aio_complete(ioend->io_iocb, ioend->io_error ?
93 ioend->io_error : ioend->io_result, 0);
94 }
95 }
96
97 mempool_free(ioend, xfs_ioend_pool);
98}
99
100
101
102
103static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
104{
105 return ioend->io_offset + ioend->io_size >
106 XFS_I(ioend->io_inode)->i_d.di_size;
107}
108
109STATIC int
110xfs_setfilesize_trans_alloc(
111 struct xfs_ioend *ioend)
112{
113 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
114 struct xfs_trans *tp;
115 int error;
116
117 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
118
119 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
120 if (error) {
121 xfs_trans_cancel(tp, 0);
122 return error;
123 }
124
125 ioend->io_append_trans = tp;
126
127
128
129
130
131 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
132 1, _THIS_IP_);
133
134
135
136
137 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
138 return 0;
139}
140
141
142
143
144STATIC int
145xfs_setfilesize(
146 struct xfs_ioend *ioend)
147{
148 struct xfs_inode *ip = XFS_I(ioend->io_inode);
149 struct xfs_trans *tp = ioend->io_append_trans;
150 xfs_fsize_t isize;
151
152
153
154
155
156
157 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
158 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
159 0, 1, _THIS_IP_);
160
161 xfs_ilock(ip, XFS_ILOCK_EXCL);
162 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
163 if (!isize) {
164 xfs_iunlock(ip, XFS_ILOCK_EXCL);
165 xfs_trans_cancel(tp, 0);
166 return 0;
167 }
168
169 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
170
171 ip->i_d.di_size = isize;
172 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
173 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
174
175 return xfs_trans_commit(tp, 0);
176}
177
178
179
180
181
182
183
184STATIC void
185xfs_finish_ioend(
186 struct xfs_ioend *ioend)
187{
188 if (atomic_dec_and_test(&ioend->io_remaining)) {
189 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
190
191 if (ioend->io_type == XFS_IO_UNWRITTEN)
192 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
193 else if (ioend->io_append_trans ||
194 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
195 queue_work(mp->m_data_workqueue, &ioend->io_work);
196 else
197 xfs_destroy_ioend(ioend);
198 }
199}
200
201
202
203
204STATIC void
205xfs_end_io(
206 struct work_struct *work)
207{
208 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
209 struct xfs_inode *ip = XFS_I(ioend->io_inode);
210 int error = 0;
211
212 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
213 ioend->io_error = -EIO;
214 goto done;
215 }
216 if (ioend->io_error)
217 goto done;
218
219
220
221
222
223 if (ioend->io_type == XFS_IO_UNWRITTEN) {
224 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
225 ioend->io_size);
226 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
227
228
229
230
231
232
233
234
235
236 error = xfs_setfilesize_trans_alloc(ioend);
237 if (error)
238 goto done;
239 error = xfs_setfilesize(ioend);
240 } else if (ioend->io_append_trans) {
241 error = xfs_setfilesize(ioend);
242 } else {
243 ASSERT(!xfs_ioend_is_append(ioend));
244 }
245
246done:
247 if (error)
248 ioend->io_error = -error;
249 xfs_destroy_ioend(ioend);
250}
251
252
253
254
255STATIC void
256xfs_finish_ioend_sync(
257 struct xfs_ioend *ioend)
258{
259 if (atomic_dec_and_test(&ioend->io_remaining))
260 xfs_end_io(&ioend->io_work);
261}
262
263
264
265
266
267
268
269STATIC xfs_ioend_t *
270xfs_alloc_ioend(
271 struct inode *inode,
272 unsigned int type)
273{
274 xfs_ioend_t *ioend;
275
276 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
277
278
279
280
281
282
283 atomic_set(&ioend->io_remaining, 1);
284 ioend->io_isasync = 0;
285 ioend->io_isdirect = 0;
286 ioend->io_error = 0;
287 ioend->io_list = NULL;
288 ioend->io_type = type;
289 ioend->io_inode = inode;
290 ioend->io_buffer_head = NULL;
291 ioend->io_buffer_tail = NULL;
292 ioend->io_offset = 0;
293 ioend->io_size = 0;
294 ioend->io_iocb = NULL;
295 ioend->io_result = 0;
296 ioend->io_append_trans = NULL;
297
298 INIT_WORK(&ioend->io_work, xfs_end_io);
299 return ioend;
300}
301
302STATIC int
303xfs_map_blocks(
304 struct inode *inode,
305 loff_t offset,
306 struct xfs_bmbt_irec *imap,
307 int type,
308 int nonblocking)
309{
310 struct xfs_inode *ip = XFS_I(inode);
311 struct xfs_mount *mp = ip->i_mount;
312 ssize_t count = 1 << inode->i_blkbits;
313 xfs_fileoff_t offset_fsb, end_fsb;
314 int error = 0;
315 int bmapi_flags = XFS_BMAPI_ENTIRE;
316 int nimaps = 1;
317
318 if (XFS_FORCED_SHUTDOWN(mp))
319 return -XFS_ERROR(EIO);
320
321 if (type == XFS_IO_UNWRITTEN)
322 bmapi_flags |= XFS_BMAPI_IGSTATE;
323
324 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
325 if (nonblocking)
326 return -XFS_ERROR(EAGAIN);
327 xfs_ilock(ip, XFS_ILOCK_SHARED);
328 }
329
330 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
331 (ip->i_df.if_flags & XFS_IFEXTENTS));
332 ASSERT(offset <= mp->m_super->s_maxbytes);
333
334 if (offset + count > mp->m_super->s_maxbytes)
335 count = mp->m_super->s_maxbytes - offset;
336 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
337 offset_fsb = XFS_B_TO_FSBT(mp, offset);
338 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
339 imap, &nimaps, bmapi_flags);
340 xfs_iunlock(ip, XFS_ILOCK_SHARED);
341
342 if (error)
343 return -XFS_ERROR(error);
344
345 if (type == XFS_IO_DELALLOC &&
346 (!nimaps || isnullstartblock(imap->br_startblock))) {
347 error = xfs_iomap_write_allocate(ip, offset, count, imap);
348 if (!error)
349 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
350 return -XFS_ERROR(error);
351 }
352
353#ifdef DEBUG
354 if (type == XFS_IO_UNWRITTEN) {
355 ASSERT(nimaps);
356 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
357 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
358 }
359#endif
360 if (nimaps)
361 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
362 return 0;
363}
364
365STATIC int
366xfs_imap_valid(
367 struct inode *inode,
368 struct xfs_bmbt_irec *imap,
369 xfs_off_t offset)
370{
371 offset >>= inode->i_blkbits;
372
373 return offset >= imap->br_startoff &&
374 offset < imap->br_startoff + imap->br_blockcount;
375}
376
377
378
379
380STATIC void
381xfs_end_bio(
382 struct bio *bio,
383 int error)
384{
385 xfs_ioend_t *ioend = bio->bi_private;
386
387 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
388 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
389
390
391 bio->bi_private = NULL;
392 bio->bi_end_io = NULL;
393 bio_put(bio);
394
395 xfs_finish_ioend(ioend);
396}
397
398STATIC void
399xfs_submit_ioend_bio(
400 struct writeback_control *wbc,
401 xfs_ioend_t *ioend,
402 struct bio *bio)
403{
404 atomic_inc(&ioend->io_remaining);
405 bio->bi_private = ioend;
406 bio->bi_end_io = xfs_end_bio;
407 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
408}
409
410STATIC struct bio *
411xfs_alloc_ioend_bio(
412 struct buffer_head *bh)
413{
414 int nvecs = bio_get_nr_vecs(bh->b_bdev);
415 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
416
417 ASSERT(bio->bi_private == NULL);
418 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
419 bio->bi_bdev = bh->b_bdev;
420 return bio;
421}
422
423STATIC void
424xfs_start_buffer_writeback(
425 struct buffer_head *bh)
426{
427 ASSERT(buffer_mapped(bh));
428 ASSERT(buffer_locked(bh));
429 ASSERT(!buffer_delay(bh));
430 ASSERT(!buffer_unwritten(bh));
431
432 mark_buffer_async_write(bh);
433 set_buffer_uptodate(bh);
434 clear_buffer_dirty(bh);
435}
436
437STATIC void
438xfs_start_page_writeback(
439 struct page *page,
440 int clear_dirty,
441 int buffers)
442{
443 ASSERT(PageLocked(page));
444 ASSERT(!PageWriteback(page));
445 if (clear_dirty)
446 clear_page_dirty_for_io(page);
447 set_page_writeback(page);
448 unlock_page(page);
449
450 if (!buffers)
451 end_page_writeback(page);
452}
453
454static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
455{
456 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481STATIC void
482xfs_submit_ioend(
483 struct writeback_control *wbc,
484 xfs_ioend_t *ioend,
485 int fail)
486{
487 xfs_ioend_t *head = ioend;
488 xfs_ioend_t *next;
489 struct buffer_head *bh;
490 struct bio *bio;
491 sector_t lastblock = 0;
492
493
494 do {
495 next = ioend->io_list;
496 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
497 xfs_start_buffer_writeback(bh);
498 } while ((ioend = next) != NULL);
499
500
501 ioend = head;
502 do {
503 next = ioend->io_list;
504 bio = NULL;
505
506
507
508
509
510
511
512 if (fail) {
513 ioend->io_error = -fail;
514 xfs_finish_ioend(ioend);
515 continue;
516 }
517
518 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
519
520 if (!bio) {
521 retry:
522 bio = xfs_alloc_ioend_bio(bh);
523 } else if (bh->b_blocknr != lastblock + 1) {
524 xfs_submit_ioend_bio(wbc, ioend, bio);
525 goto retry;
526 }
527
528 if (bio_add_buffer(bio, bh) != bh->b_size) {
529 xfs_submit_ioend_bio(wbc, ioend, bio);
530 goto retry;
531 }
532
533 lastblock = bh->b_blocknr;
534 }
535 if (bio)
536 xfs_submit_ioend_bio(wbc, ioend, bio);
537 xfs_finish_ioend(ioend);
538 } while ((ioend = next) != NULL);
539}
540
541
542
543
544
545
546STATIC void
547xfs_cancel_ioend(
548 xfs_ioend_t *ioend)
549{
550 xfs_ioend_t *next;
551 struct buffer_head *bh, *next_bh;
552
553 do {
554 next = ioend->io_list;
555 bh = ioend->io_buffer_head;
556 do {
557 next_bh = bh->b_private;
558 clear_buffer_async_write(bh);
559 unlock_buffer(bh);
560 } while ((bh = next_bh) != NULL);
561
562 mempool_free(ioend, xfs_ioend_pool);
563 } while ((ioend = next) != NULL);
564}
565
566
567
568
569
570
571
572STATIC void
573xfs_add_to_ioend(
574 struct inode *inode,
575 struct buffer_head *bh,
576 xfs_off_t offset,
577 unsigned int type,
578 xfs_ioend_t **result,
579 int need_ioend)
580{
581 xfs_ioend_t *ioend = *result;
582
583 if (!ioend || need_ioend || type != ioend->io_type) {
584 xfs_ioend_t *previous = *result;
585
586 ioend = xfs_alloc_ioend(inode, type);
587 ioend->io_offset = offset;
588 ioend->io_buffer_head = bh;
589 ioend->io_buffer_tail = bh;
590 if (previous)
591 previous->io_list = ioend;
592 *result = ioend;
593 } else {
594 ioend->io_buffer_tail->b_private = bh;
595 ioend->io_buffer_tail = bh;
596 }
597
598 bh->b_private = NULL;
599 ioend->io_size += bh->b_size;
600}
601
602STATIC void
603xfs_map_buffer(
604 struct inode *inode,
605 struct buffer_head *bh,
606 struct xfs_bmbt_irec *imap,
607 xfs_off_t offset)
608{
609 sector_t bn;
610 struct xfs_mount *m = XFS_I(inode)->i_mount;
611 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
612 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
613
614 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
615 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
616
617 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
618 ((offset - iomap_offset) >> inode->i_blkbits);
619
620 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
621
622 bh->b_blocknr = bn;
623 set_buffer_mapped(bh);
624}
625
626STATIC void
627xfs_map_at_offset(
628 struct inode *inode,
629 struct buffer_head *bh,
630 struct xfs_bmbt_irec *imap,
631 xfs_off_t offset)
632{
633 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
634 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
635
636 xfs_map_buffer(inode, bh, imap, offset);
637 set_buffer_mapped(bh);
638 clear_buffer_delay(bh);
639 clear_buffer_unwritten(bh);
640}
641
642
643
644
645
646STATIC int
647xfs_check_page_type(
648 struct page *page,
649 unsigned int type)
650{
651 if (PageWriteback(page))
652 return 0;
653
654 if (page->mapping && page_has_buffers(page)) {
655 struct buffer_head *bh, *head;
656 int acceptable = 0;
657
658 bh = head = page_buffers(page);
659 do {
660 if (buffer_unwritten(bh))
661 acceptable += (type == XFS_IO_UNWRITTEN);
662 else if (buffer_delay(bh))
663 acceptable += (type == XFS_IO_DELALLOC);
664 else if (buffer_dirty(bh) && buffer_mapped(bh))
665 acceptable += (type == XFS_IO_OVERWRITE);
666 else
667 break;
668 } while ((bh = bh->b_this_page) != head);
669
670 if (acceptable)
671 return 1;
672 }
673
674 return 0;
675}
676
677
678
679
680
681
682
683STATIC int
684xfs_convert_page(
685 struct inode *inode,
686 struct page *page,
687 loff_t tindex,
688 struct xfs_bmbt_irec *imap,
689 xfs_ioend_t **ioendp,
690 struct writeback_control *wbc)
691{
692 struct buffer_head *bh, *head;
693 xfs_off_t end_offset;
694 unsigned long p_offset;
695 unsigned int type;
696 int len, page_dirty;
697 int count = 0, done = 0, uptodate = 1;
698 xfs_off_t offset = page_offset(page);
699
700 if (page->index != tindex)
701 goto fail;
702 if (!trylock_page(page))
703 goto fail;
704 if (PageWriteback(page))
705 goto fail_unlock_page;
706 if (page->mapping != inode->i_mapping)
707 goto fail_unlock_page;
708 if (!xfs_check_page_type(page, (*ioendp)->io_type))
709 goto fail_unlock_page;
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724 end_offset = min_t(unsigned long long,
725 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 i_size_read(inode));
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744 if (!xfs_imap_valid(inode, imap, end_offset))
745 goto fail_unlock_page;
746
747 len = 1 << inode->i_blkbits;
748 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
749 PAGE_CACHE_SIZE);
750 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
751 page_dirty = p_offset / len;
752
753 bh = head = page_buffers(page);
754 do {
755 if (offset >= end_offset)
756 break;
757 if (!buffer_uptodate(bh))
758 uptodate = 0;
759 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
760 done = 1;
761 continue;
762 }
763
764 if (buffer_unwritten(bh) || buffer_delay(bh) ||
765 buffer_mapped(bh)) {
766 if (buffer_unwritten(bh))
767 type = XFS_IO_UNWRITTEN;
768 else if (buffer_delay(bh))
769 type = XFS_IO_DELALLOC;
770 else
771 type = XFS_IO_OVERWRITE;
772
773 if (!xfs_imap_valid(inode, imap, offset)) {
774 done = 1;
775 continue;
776 }
777
778 lock_buffer(bh);
779 if (type != XFS_IO_OVERWRITE)
780 xfs_map_at_offset(inode, bh, imap, offset);
781 xfs_add_to_ioend(inode, bh, offset, type,
782 ioendp, done);
783
784 page_dirty--;
785 count++;
786 } else {
787 done = 1;
788 }
789 } while (offset += len, (bh = bh->b_this_page) != head);
790
791 if (uptodate && bh == head)
792 SetPageUptodate(page);
793
794 if (count) {
795 if (--wbc->nr_to_write <= 0 &&
796 wbc->sync_mode == WB_SYNC_NONE)
797 done = 1;
798 }
799 xfs_start_page_writeback(page, !page_dirty, count);
800
801 return done;
802 fail_unlock_page:
803 unlock_page(page);
804 fail:
805 return 1;
806}
807
808
809
810
811
812STATIC void
813xfs_cluster_write(
814 struct inode *inode,
815 pgoff_t tindex,
816 struct xfs_bmbt_irec *imap,
817 xfs_ioend_t **ioendp,
818 struct writeback_control *wbc,
819 pgoff_t tlast)
820{
821 struct pagevec pvec;
822 int done = 0, i;
823
824 pagevec_init(&pvec, 0);
825 while (!done && tindex <= tlast) {
826 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
827
828 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
829 break;
830
831 for (i = 0; i < pagevec_count(&pvec); i++) {
832 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
833 imap, ioendp, wbc);
834 if (done)
835 break;
836 }
837
838 pagevec_release(&pvec);
839 cond_resched();
840 }
841}
842
843STATIC void
844xfs_vm_invalidatepage(
845 struct page *page,
846 unsigned int offset,
847 unsigned int length)
848{
849 trace_xfs_invalidatepage(page->mapping->host, page, offset,
850 length);
851 block_invalidatepage(page, offset, length);
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870STATIC void
871xfs_aops_discard_page(
872 struct page *page)
873{
874 struct inode *inode = page->mapping->host;
875 struct xfs_inode *ip = XFS_I(inode);
876 struct buffer_head *bh, *head;
877 loff_t offset = page_offset(page);
878
879 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
880 goto out_invalidate;
881
882 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
883 goto out_invalidate;
884
885 xfs_alert(ip->i_mount,
886 "page discard on page %p, inode 0x%llx, offset %llu.",
887 page, ip->i_ino, offset);
888
889 xfs_ilock(ip, XFS_ILOCK_EXCL);
890 bh = head = page_buffers(page);
891 do {
892 int error;
893 xfs_fileoff_t start_fsb;
894
895 if (!buffer_delay(bh))
896 goto next_buffer;
897
898 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
899 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
900 if (error) {
901
902 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
903 xfs_alert(ip->i_mount,
904 "page discard unable to remove delalloc mapping.");
905 }
906 break;
907 }
908next_buffer:
909 offset += 1 << inode->i_blkbits;
910
911 } while ((bh = bh->b_this_page) != head);
912
913 xfs_iunlock(ip, XFS_ILOCK_EXCL);
914out_invalidate:
915 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
916 return;
917}
918
919
920
921
922
923
924
925
926
927STATIC int
928xfs_vm_writepage(
929 struct page *page,
930 struct writeback_control *wbc)
931{
932 struct inode *inode = page->mapping->host;
933 struct buffer_head *bh, *head;
934 struct xfs_bmbt_irec imap;
935 xfs_ioend_t *ioend = NULL, *iohead = NULL;
936 loff_t offset;
937 unsigned int type;
938 __uint64_t end_offset;
939 pgoff_t end_index, last_index;
940 ssize_t len;
941 int err, imap_valid = 0, uptodate = 1;
942 int count = 0;
943 int nonblocking = 0;
944
945 trace_xfs_writepage(inode, page, 0, 0);
946
947 ASSERT(page_has_buffers(page));
948
949
950
951
952
953
954
955
956
957
958
959 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
960 PF_MEMALLOC))
961 goto redirty;
962
963
964
965
966
967 if (WARN_ON(current->flags & PF_FSTRANS))
968 goto redirty;
969
970
971 offset = i_size_read(inode);
972 end_index = offset >> PAGE_CACHE_SHIFT;
973 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
974 if (page->index >= end_index) {
975 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
976
977
978
979
980
981
982
983 if (page->index >= end_index + 1 || offset_into_page == 0)
984 goto redirty;
985
986
987
988
989
990
991
992
993
994 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
995 }
996
997 end_offset = min_t(unsigned long long,
998 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
999 offset);
1000 len = 1 << inode->i_blkbits;
1001
1002 bh = head = page_buffers(page);
1003 offset = page_offset(page);
1004 type = XFS_IO_OVERWRITE;
1005
1006 if (wbc->sync_mode == WB_SYNC_NONE)
1007 nonblocking = 1;
1008
1009 do {
1010 int new_ioend = 0;
1011
1012 if (offset >= end_offset)
1013 break;
1014 if (!buffer_uptodate(bh))
1015 uptodate = 0;
1016
1017
1018
1019
1020
1021
1022
1023 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1024 imap_valid = 0;
1025 continue;
1026 }
1027
1028 if (buffer_unwritten(bh)) {
1029 if (type != XFS_IO_UNWRITTEN) {
1030 type = XFS_IO_UNWRITTEN;
1031 imap_valid = 0;
1032 }
1033 } else if (buffer_delay(bh)) {
1034 if (type != XFS_IO_DELALLOC) {
1035 type = XFS_IO_DELALLOC;
1036 imap_valid = 0;
1037 }
1038 } else if (buffer_uptodate(bh)) {
1039 if (type != XFS_IO_OVERWRITE) {
1040 type = XFS_IO_OVERWRITE;
1041 imap_valid = 0;
1042 }
1043 } else {
1044 if (PageUptodate(page))
1045 ASSERT(buffer_mapped(bh));
1046
1047
1048
1049
1050
1051
1052 imap_valid = 0;
1053 continue;
1054 }
1055
1056 if (imap_valid)
1057 imap_valid = xfs_imap_valid(inode, &imap, offset);
1058 if (!imap_valid) {
1059
1060
1061
1062
1063
1064
1065
1066
1067 new_ioend = 1;
1068 err = xfs_map_blocks(inode, offset, &imap, type,
1069 nonblocking);
1070 if (err)
1071 goto error;
1072 imap_valid = xfs_imap_valid(inode, &imap, offset);
1073 }
1074 if (imap_valid) {
1075 lock_buffer(bh);
1076 if (type != XFS_IO_OVERWRITE)
1077 xfs_map_at_offset(inode, bh, &imap, offset);
1078 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1079 new_ioend);
1080 count++;
1081 }
1082
1083 if (!iohead)
1084 iohead = ioend;
1085
1086 } while (offset += len, ((bh = bh->b_this_page) != head));
1087
1088 if (uptodate && bh == head)
1089 SetPageUptodate(page);
1090
1091 xfs_start_page_writeback(page, 1, count);
1092
1093
1094 if (!ioend)
1095 return 0;
1096
1097 ASSERT(iohead);
1098
1099
1100
1101
1102
1103
1104 if (imap_valid) {
1105 xfs_off_t end_index;
1106
1107 end_index = imap.br_startoff + imap.br_blockcount;
1108
1109
1110 end_index <<= inode->i_blkbits;
1111
1112
1113 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1114
1115
1116 if (end_index > last_index)
1117 end_index = last_index;
1118
1119 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1120 wbc, end_index);
1121 }
1122
1123
1124
1125
1126
1127 err = 0;
1128 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1129 err = xfs_setfilesize_trans_alloc(ioend);
1130
1131 xfs_submit_ioend(wbc, iohead, err);
1132
1133 return 0;
1134
1135error:
1136 if (iohead)
1137 xfs_cancel_ioend(iohead);
1138
1139 if (err == -EAGAIN)
1140 goto redirty;
1141
1142 xfs_aops_discard_page(page);
1143 ClearPageUptodate(page);
1144 unlock_page(page);
1145 return err;
1146
1147redirty:
1148 redirty_page_for_writepage(wbc, page);
1149 unlock_page(page);
1150 return 0;
1151}
1152
1153STATIC int
1154xfs_vm_writepages(
1155 struct address_space *mapping,
1156 struct writeback_control *wbc)
1157{
1158 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1159 return generic_writepages(mapping, wbc);
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169STATIC int
1170xfs_vm_releasepage(
1171 struct page *page,
1172 gfp_t gfp_mask)
1173{
1174 int delalloc, unwritten;
1175
1176 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1177
1178 xfs_count_page_state(page, &delalloc, &unwritten);
1179
1180 if (WARN_ON(delalloc))
1181 return 0;
1182 if (WARN_ON(unwritten))
1183 return 0;
1184
1185 return try_to_free_buffers(page);
1186}
1187
1188STATIC int
1189__xfs_get_blocks(
1190 struct inode *inode,
1191 sector_t iblock,
1192 struct buffer_head *bh_result,
1193 int create,
1194 int direct)
1195{
1196 struct xfs_inode *ip = XFS_I(inode);
1197 struct xfs_mount *mp = ip->i_mount;
1198 xfs_fileoff_t offset_fsb, end_fsb;
1199 int error = 0;
1200 int lockmode = 0;
1201 struct xfs_bmbt_irec imap;
1202 int nimaps = 1;
1203 xfs_off_t offset;
1204 ssize_t size;
1205 int new = 0;
1206
1207 if (XFS_FORCED_SHUTDOWN(mp))
1208 return -XFS_ERROR(EIO);
1209
1210 offset = (xfs_off_t)iblock << inode->i_blkbits;
1211 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1212 size = bh_result->b_size;
1213
1214 if (!create && direct && offset >= i_size_read(inode))
1215 return 0;
1216
1217
1218
1219
1220
1221
1222
1223
1224 if (create && !direct) {
1225 lockmode = XFS_ILOCK_EXCL;
1226 xfs_ilock(ip, lockmode);
1227 } else {
1228 lockmode = xfs_ilock_map_shared(ip);
1229 }
1230
1231 ASSERT(offset <= mp->m_super->s_maxbytes);
1232 if (offset + size > mp->m_super->s_maxbytes)
1233 size = mp->m_super->s_maxbytes - offset;
1234 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1235 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1236
1237 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1238 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1239 if (error)
1240 goto out_unlock;
1241
1242 if (create &&
1243 (!nimaps ||
1244 (imap.br_startblock == HOLESTARTBLOCK ||
1245 imap.br_startblock == DELAYSTARTBLOCK))) {
1246 if (direct || xfs_get_extsz_hint(ip)) {
1247
1248
1249
1250
1251
1252
1253 xfs_iunlock(ip, lockmode);
1254 error = xfs_iomap_write_direct(ip, offset, size,
1255 &imap, nimaps);
1256 if (error)
1257 return -error;
1258 new = 1;
1259 } else {
1260
1261
1262
1263
1264
1265
1266
1267
1268 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1269 new = 1;
1270 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1271 if (error)
1272 goto out_unlock;
1273
1274 xfs_iunlock(ip, lockmode);
1275 }
1276
1277 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1278 } else if (nimaps) {
1279 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1280 xfs_iunlock(ip, lockmode);
1281 } else {
1282 trace_xfs_get_blocks_notfound(ip, offset, size);
1283 goto out_unlock;
1284 }
1285
1286 if (imap.br_startblock != HOLESTARTBLOCK &&
1287 imap.br_startblock != DELAYSTARTBLOCK) {
1288
1289
1290
1291
1292 if (create || !ISUNWRITTEN(&imap))
1293 xfs_map_buffer(inode, bh_result, &imap, offset);
1294 if (create && ISUNWRITTEN(&imap)) {
1295 if (direct)
1296 bh_result->b_private = inode;
1297 set_buffer_unwritten(bh_result);
1298 }
1299 }
1300
1301
1302
1303
1304
1305 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 if (create &&
1317 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1318 (offset >= i_size_read(inode)) ||
1319 (new || ISUNWRITTEN(&imap))))
1320 set_buffer_new(bh_result);
1321
1322 if (imap.br_startblock == DELAYSTARTBLOCK) {
1323 BUG_ON(direct);
1324 if (create) {
1325 set_buffer_uptodate(bh_result);
1326 set_buffer_mapped(bh_result);
1327 set_buffer_delay(bh_result);
1328 }
1329 }
1330
1331
1332
1333
1334
1335 if (direct || size > (1 << inode->i_blkbits)) {
1336 xfs_off_t mapping_size;
1337
1338 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1339 mapping_size <<= inode->i_blkbits;
1340
1341 ASSERT(mapping_size > 0);
1342 if (mapping_size > size)
1343 mapping_size = size;
1344 if (mapping_size > LONG_MAX)
1345 mapping_size = LONG_MAX;
1346
1347 bh_result->b_size = mapping_size;
1348 }
1349
1350 return 0;
1351
1352out_unlock:
1353 xfs_iunlock(ip, lockmode);
1354 return -error;
1355}
1356
1357int
1358xfs_get_blocks(
1359 struct inode *inode,
1360 sector_t iblock,
1361 struct buffer_head *bh_result,
1362 int create)
1363{
1364 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1365}
1366
1367STATIC int
1368xfs_get_blocks_direct(
1369 struct inode *inode,
1370 sector_t iblock,
1371 struct buffer_head *bh_result,
1372 int create)
1373{
1374 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388STATIC void
1389xfs_end_io_direct_write(
1390 struct kiocb *iocb,
1391 loff_t offset,
1392 ssize_t size,
1393 void *private,
1394 int ret,
1395 bool is_async)
1396{
1397 struct xfs_ioend *ioend = iocb->private;
1398
1399
1400
1401
1402
1403
1404
1405 if (offset + size > i_size_read(ioend->io_inode))
1406 i_size_write(ioend->io_inode, offset + size);
1407
1408
1409
1410
1411
1412
1413 iocb->private = NULL;
1414
1415 ioend->io_offset = offset;
1416 ioend->io_size = size;
1417 ioend->io_iocb = iocb;
1418 ioend->io_result = ret;
1419 if (private && size > 0)
1420 ioend->io_type = XFS_IO_UNWRITTEN;
1421
1422 if (is_async) {
1423 ioend->io_isasync = 1;
1424 xfs_finish_ioend(ioend);
1425 } else {
1426 xfs_finish_ioend_sync(ioend);
1427 }
1428}
1429
1430STATIC ssize_t
1431xfs_vm_direct_IO(
1432 int rw,
1433 struct kiocb *iocb,
1434 const struct iovec *iov,
1435 loff_t offset,
1436 unsigned long nr_segs)
1437{
1438 struct inode *inode = iocb->ki_filp->f_mapping->host;
1439 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1440 struct xfs_ioend *ioend = NULL;
1441 ssize_t ret;
1442
1443 if (rw & WRITE) {
1444 size_t size = iov_length(iov, nr_segs);
1445
1446
1447
1448
1449
1450
1451
1452 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1453 if (offset + size > XFS_I(inode)->i_d.di_size)
1454 ioend->io_isdirect = 1;
1455
1456 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1457 offset, nr_segs,
1458 xfs_get_blocks_direct,
1459 xfs_end_io_direct_write, NULL, 0);
1460 if (ret != -EIOCBQUEUED && iocb->private)
1461 goto out_destroy_ioend;
1462 } else {
1463 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1464 offset, nr_segs,
1465 xfs_get_blocks_direct,
1466 NULL, NULL, 0);
1467 }
1468
1469 return ret;
1470
1471out_destroy_ioend:
1472 xfs_destroy_ioend(ioend);
1473 return ret;
1474}
1475
1476
1477
1478
1479
1480
1481
1482STATIC void
1483xfs_vm_kill_delalloc_range(
1484 struct inode *inode,
1485 loff_t start,
1486 loff_t end)
1487{
1488 struct xfs_inode *ip = XFS_I(inode);
1489 xfs_fileoff_t start_fsb;
1490 xfs_fileoff_t end_fsb;
1491 int error;
1492
1493 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1494 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1495 if (end_fsb <= start_fsb)
1496 return;
1497
1498 xfs_ilock(ip, XFS_ILOCK_EXCL);
1499 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1500 end_fsb - start_fsb);
1501 if (error) {
1502
1503 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1504 xfs_alert(ip->i_mount,
1505 "xfs_vm_write_failed: unable to clean up ino %lld",
1506 ip->i_ino);
1507 }
1508 }
1509 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1510}
1511
1512STATIC void
1513xfs_vm_write_failed(
1514 struct inode *inode,
1515 struct page *page,
1516 loff_t pos,
1517 unsigned len)
1518{
1519 loff_t block_offset = pos & PAGE_MASK;
1520 loff_t block_start;
1521 loff_t block_end;
1522 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1523 loff_t to = from + len;
1524 struct buffer_head *bh, *head;
1525
1526 ASSERT(block_offset + from == pos);
1527
1528 head = page_buffers(page);
1529 block_start = 0;
1530 for (bh = head; bh != head || !block_start;
1531 bh = bh->b_this_page, block_start = block_end,
1532 block_offset += bh->b_size) {
1533 block_end = block_start + bh->b_size;
1534
1535
1536 if (block_end <= from)
1537 continue;
1538
1539
1540 if (block_start >= to)
1541 break;
1542
1543 if (!buffer_delay(bh))
1544 continue;
1545
1546 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1547 continue;
1548
1549 xfs_vm_kill_delalloc_range(inode, block_offset,
1550 block_offset + bh->b_size);
1551 }
1552
1553}
1554
1555
1556
1557
1558
1559
1560
1561STATIC int
1562xfs_vm_write_begin(
1563 struct file *file,
1564 struct address_space *mapping,
1565 loff_t pos,
1566 unsigned len,
1567 unsigned flags,
1568 struct page **pagep,
1569 void **fsdata)
1570{
1571 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1572 struct page *page;
1573 int status;
1574
1575 ASSERT(len <= PAGE_CACHE_SIZE);
1576
1577 page = grab_cache_page_write_begin(mapping, index,
1578 flags | AOP_FLAG_NOFS);
1579 if (!page)
1580 return -ENOMEM;
1581
1582 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1583 if (unlikely(status)) {
1584 struct inode *inode = mapping->host;
1585
1586 xfs_vm_write_failed(inode, page, pos, len);
1587 unlock_page(page);
1588
1589 if (pos + len > i_size_read(inode))
1590 truncate_pagecache(inode, pos + len, i_size_read(inode));
1591
1592 page_cache_release(page);
1593 page = NULL;
1594 }
1595
1596 *pagep = page;
1597 return status;
1598}
1599
1600
1601
1602
1603
1604
1605STATIC int
1606xfs_vm_write_end(
1607 struct file *file,
1608 struct address_space *mapping,
1609 loff_t pos,
1610 unsigned len,
1611 unsigned copied,
1612 struct page *page,
1613 void *fsdata)
1614{
1615 int ret;
1616
1617 ASSERT(len <= PAGE_CACHE_SIZE);
1618
1619 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1620 if (unlikely(ret < len)) {
1621 struct inode *inode = mapping->host;
1622 size_t isize = i_size_read(inode);
1623 loff_t to = pos + len;
1624
1625 if (to > isize) {
1626 truncate_pagecache(inode, to, isize);
1627 xfs_vm_kill_delalloc_range(inode, isize, to);
1628 }
1629 }
1630 return ret;
1631}
1632
1633STATIC sector_t
1634xfs_vm_bmap(
1635 struct address_space *mapping,
1636 sector_t block)
1637{
1638 struct inode *inode = (struct inode *)mapping->host;
1639 struct xfs_inode *ip = XFS_I(inode);
1640
1641 trace_xfs_vm_bmap(XFS_I(inode));
1642 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1643 filemap_write_and_wait(mapping);
1644 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1645 return generic_block_bmap(mapping, block, xfs_get_blocks);
1646}
1647
1648STATIC int
1649xfs_vm_readpage(
1650 struct file *unused,
1651 struct page *page)
1652{
1653 return mpage_readpage(page, xfs_get_blocks);
1654}
1655
1656STATIC int
1657xfs_vm_readpages(
1658 struct file *unused,
1659 struct address_space *mapping,
1660 struct list_head *pages,
1661 unsigned nr_pages)
1662{
1663 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1664}
1665
1666const struct address_space_operations xfs_address_space_operations = {
1667 .readpage = xfs_vm_readpage,
1668 .readpages = xfs_vm_readpages,
1669 .writepage = xfs_vm_writepage,
1670 .writepages = xfs_vm_writepages,
1671 .releasepage = xfs_vm_releasepage,
1672 .invalidatepage = xfs_vm_invalidatepage,
1673 .write_begin = xfs_vm_write_begin,
1674 .write_end = xfs_vm_write_end,
1675 .bmap = xfs_vm_bmap,
1676 .direct_IO = xfs_vm_direct_IO,
1677 .migratepage = buffer_migrate_page,
1678 .is_partially_uptodate = block_is_partially_uptodate,
1679 .error_remove_page = generic_error_remove_page,
1680};
1681