1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "xfs.h"
19#include "xfs_bit.h"
20#include "xfs_log.h"
21#include "xfs_inum.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_dir2.h"
25#include "xfs_trans.h"
26#include "xfs_dmapi.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
31#include "xfs_dir2_sf.h"
32#include "xfs_attr_sf.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_alloc.h"
36#include "xfs_btree.h"
37#include "xfs_error.h"
38#include "xfs_rw.h"
39#include "xfs_iomap.h"
40#include "xfs_vnodeops.h"
41#include <linux/mpage.h>
42#include <linux/pagevec.h>
43#include <linux/writeback.h>
44
45
46
47
48
49#define NVSYNC 37
50#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
51static wait_queue_head_t xfs_ioend_wq[NVSYNC];
52
53void __init
54xfs_ioend_init(void)
55{
56 int i;
57
58 for (i = 0; i < NVSYNC; i++)
59 init_waitqueue_head(&xfs_ioend_wq[i]);
60}
61
62void
63xfs_ioend_wait(
64 xfs_inode_t *ip)
65{
66 wait_queue_head_t *wq = to_ioend_wq(ip);
67
68 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
69}
70
71STATIC void
72xfs_ioend_wake(
73 xfs_inode_t *ip)
74{
75 if (atomic_dec_and_test(&ip->i_iocount))
76 wake_up(to_ioend_wq(ip));
77}
78
79STATIC void
80xfs_count_page_state(
81 struct page *page,
82 int *delalloc,
83 int *unmapped,
84 int *unwritten)
85{
86 struct buffer_head *bh, *head;
87
88 *delalloc = *unmapped = *unwritten = 0;
89
90 bh = head = page_buffers(page);
91 do {
92 if (buffer_uptodate(bh) && !buffer_mapped(bh))
93 (*unmapped) = 1;
94 else if (buffer_unwritten(bh))
95 (*unwritten) = 1;
96 else if (buffer_delay(bh))
97 (*delalloc) = 1;
98 } while ((bh = bh->b_this_page) != head);
99}
100
101#if defined(XFS_RW_TRACE)
102void
103xfs_page_trace(
104 int tag,
105 struct inode *inode,
106 struct page *page,
107 unsigned long pgoff)
108{
109 xfs_inode_t *ip;
110 loff_t isize = i_size_read(inode);
111 loff_t offset = page_offset(page);
112 int delalloc = -1, unmapped = -1, unwritten = -1;
113
114 if (page_has_buffers(page))
115 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
116
117 ip = XFS_I(inode);
118 if (!ip->i_rwtrace)
119 return;
120
121 ktrace_enter(ip->i_rwtrace,
122 (void *)((unsigned long)tag),
123 (void *)ip,
124 (void *)inode,
125 (void *)page,
126 (void *)pgoff,
127 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
128 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
129 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
130 (void *)((unsigned long)(isize & 0xffffffff)),
131 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
132 (void *)((unsigned long)(offset & 0xffffffff)),
133 (void *)((unsigned long)delalloc),
134 (void *)((unsigned long)unmapped),
135 (void *)((unsigned long)unwritten),
136 (void *)((unsigned long)current_pid()),
137 (void *)NULL);
138}
139#else
140#define xfs_page_trace(tag, inode, page, pgoff)
141#endif
142
143STATIC struct block_device *
144xfs_find_bdev_for_inode(
145 struct xfs_inode *ip)
146{
147 struct xfs_mount *mp = ip->i_mount;
148
149 if (XFS_IS_REALTIME_INODE(ip))
150 return mp->m_rtdev_targp->bt_bdev;
151 else
152 return mp->m_ddev_targp->bt_bdev;
153}
154
155
156
157
158
159
160
161STATIC void
162xfs_destroy_ioend(
163 xfs_ioend_t *ioend)
164{
165 struct buffer_head *bh, *next;
166 struct xfs_inode *ip = XFS_I(ioend->io_inode);
167
168 for (bh = ioend->io_buffer_head; bh; bh = next) {
169 next = bh->b_private;
170 bh->b_end_io(bh, !ioend->io_error);
171 }
172
173
174
175
176
177
178
179 if (unlikely(ioend->io_error == -ENODEV)) {
180 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ,
181 __FILE__, __LINE__);
182 }
183
184 xfs_ioend_wake(ip);
185 mempool_free(ioend, xfs_ioend_pool);
186}
187
188
189
190
191
192STATIC xfs_fsize_t
193xfs_ioend_new_eof(
194 xfs_ioend_t *ioend)
195{
196 xfs_inode_t *ip = XFS_I(ioend->io_inode);
197 xfs_fsize_t isize;
198 xfs_fsize_t bsize;
199
200 bsize = ioend->io_offset + ioend->io_size;
201 isize = MAX(ip->i_size, ip->i_new_size);
202 isize = MIN(isize, bsize);
203 return isize > ip->i_d.di_size ? isize : 0;
204}
205
206
207
208
209
210
211
212
213
214STATIC void
215xfs_setfilesize(
216 xfs_ioend_t *ioend)
217{
218 xfs_inode_t *ip = XFS_I(ioend->io_inode);
219 xfs_fsize_t isize;
220
221 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
222 ASSERT(ioend->io_type != IOMAP_READ);
223
224 if (unlikely(ioend->io_error))
225 return;
226
227 xfs_ilock(ip, XFS_ILOCK_EXCL);
228 isize = xfs_ioend_new_eof(ioend);
229 if (isize) {
230 ip->i_d.di_size = isize;
231 xfs_mark_inode_dirty_sync(ip);
232 }
233
234 xfs_iunlock(ip, XFS_ILOCK_EXCL);
235}
236
237
238
239
240STATIC void
241xfs_end_bio_delalloc(
242 struct work_struct *work)
243{
244 xfs_ioend_t *ioend =
245 container_of(work, xfs_ioend_t, io_work);
246
247 xfs_setfilesize(ioend);
248 xfs_destroy_ioend(ioend);
249}
250
251
252
253
254STATIC void
255xfs_end_bio_written(
256 struct work_struct *work)
257{
258 xfs_ioend_t *ioend =
259 container_of(work, xfs_ioend_t, io_work);
260
261 xfs_setfilesize(ioend);
262 xfs_destroy_ioend(ioend);
263}
264
265
266
267
268
269
270
271STATIC void
272xfs_end_bio_unwritten(
273 struct work_struct *work)
274{
275 xfs_ioend_t *ioend =
276 container_of(work, xfs_ioend_t, io_work);
277 struct xfs_inode *ip = XFS_I(ioend->io_inode);
278 xfs_off_t offset = ioend->io_offset;
279 size_t size = ioend->io_size;
280
281 if (likely(!ioend->io_error)) {
282 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
283 int error;
284 error = xfs_iomap_write_unwritten(ip, offset, size);
285 if (error)
286 ioend->io_error = error;
287 }
288 xfs_setfilesize(ioend);
289 }
290 xfs_destroy_ioend(ioend);
291}
292
293
294
295
296STATIC void
297xfs_end_bio_read(
298 struct work_struct *work)
299{
300 xfs_ioend_t *ioend =
301 container_of(work, xfs_ioend_t, io_work);
302
303 xfs_destroy_ioend(ioend);
304}
305
306
307
308
309
310
311STATIC void
312xfs_finish_ioend(
313 xfs_ioend_t *ioend,
314 int wait)
315{
316 if (atomic_dec_and_test(&ioend->io_remaining)) {
317 struct workqueue_struct *wq = xfsdatad_workqueue;
318 if (ioend->io_work.func == xfs_end_bio_unwritten)
319 wq = xfsconvertd_workqueue;
320
321 queue_work(wq, &ioend->io_work);
322 if (wait)
323 flush_workqueue(wq);
324 }
325}
326
327
328
329
330
331
332
333STATIC xfs_ioend_t *
334xfs_alloc_ioend(
335 struct inode *inode,
336 unsigned int type)
337{
338 xfs_ioend_t *ioend;
339
340 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
341
342
343
344
345
346
347 atomic_set(&ioend->io_remaining, 1);
348 ioend->io_error = 0;
349 ioend->io_list = NULL;
350 ioend->io_type = type;
351 ioend->io_inode = inode;
352 ioend->io_buffer_head = NULL;
353 ioend->io_buffer_tail = NULL;
354 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
355 ioend->io_offset = 0;
356 ioend->io_size = 0;
357
358 if (type == IOMAP_UNWRITTEN)
359 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
360 else if (type == IOMAP_DELAY)
361 INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
362 else if (type == IOMAP_READ)
363 INIT_WORK(&ioend->io_work, xfs_end_bio_read);
364 else
365 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
366
367 return ioend;
368}
369
370STATIC int
371xfs_map_blocks(
372 struct inode *inode,
373 loff_t offset,
374 ssize_t count,
375 xfs_iomap_t *mapp,
376 int flags)
377{
378 int nmaps = 1;
379
380 return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps);
381}
382
383STATIC_INLINE int
384xfs_iomap_valid(
385 xfs_iomap_t *iomapp,
386 loff_t offset)
387{
388 return offset >= iomapp->iomap_offset &&
389 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
390}
391
392
393
394
395STATIC void
396xfs_end_bio(
397 struct bio *bio,
398 int error)
399{
400 xfs_ioend_t *ioend = bio->bi_private;
401
402 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
403 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
404
405
406 bio->bi_private = NULL;
407 bio->bi_end_io = NULL;
408 bio_put(bio);
409
410 xfs_finish_ioend(ioend, 0);
411}
412
413STATIC void
414xfs_submit_ioend_bio(
415 xfs_ioend_t *ioend,
416 struct bio *bio)
417{
418 atomic_inc(&ioend->io_remaining);
419 bio->bi_private = ioend;
420 bio->bi_end_io = xfs_end_bio;
421
422
423
424
425
426 if (xfs_ioend_new_eof(ioend))
427 xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode));
428
429 submit_bio(WRITE, bio);
430 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
431 bio_put(bio);
432}
433
434STATIC struct bio *
435xfs_alloc_ioend_bio(
436 struct buffer_head *bh)
437{
438 struct bio *bio;
439 int nvecs = bio_get_nr_vecs(bh->b_bdev);
440
441 do {
442 bio = bio_alloc(GFP_NOIO, nvecs);
443 nvecs >>= 1;
444 } while (!bio);
445
446 ASSERT(bio->bi_private == NULL);
447 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
448 bio->bi_bdev = bh->b_bdev;
449 bio_get(bio);
450 return bio;
451}
452
453STATIC void
454xfs_start_buffer_writeback(
455 struct buffer_head *bh)
456{
457 ASSERT(buffer_mapped(bh));
458 ASSERT(buffer_locked(bh));
459 ASSERT(!buffer_delay(bh));
460 ASSERT(!buffer_unwritten(bh));
461
462 mark_buffer_async_write(bh);
463 set_buffer_uptodate(bh);
464 clear_buffer_dirty(bh);
465}
466
467STATIC void
468xfs_start_page_writeback(
469 struct page *page,
470 int clear_dirty,
471 int buffers)
472{
473 ASSERT(PageLocked(page));
474 ASSERT(!PageWriteback(page));
475 if (clear_dirty)
476 clear_page_dirty_for_io(page);
477 set_page_writeback(page);
478 unlock_page(page);
479
480 if (!buffers)
481 end_page_writeback(page);
482}
483
484static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
485{
486 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506STATIC void
507xfs_submit_ioend(
508 xfs_ioend_t *ioend)
509{
510 xfs_ioend_t *head = ioend;
511 xfs_ioend_t *next;
512 struct buffer_head *bh;
513 struct bio *bio;
514 sector_t lastblock = 0;
515
516
517 do {
518 next = ioend->io_list;
519 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
520 xfs_start_buffer_writeback(bh);
521 }
522 } while ((ioend = next) != NULL);
523
524
525 ioend = head;
526 do {
527 next = ioend->io_list;
528 bio = NULL;
529
530 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
531
532 if (!bio) {
533 retry:
534 bio = xfs_alloc_ioend_bio(bh);
535 } else if (bh->b_blocknr != lastblock + 1) {
536 xfs_submit_ioend_bio(ioend, bio);
537 goto retry;
538 }
539
540 if (bio_add_buffer(bio, bh) != bh->b_size) {
541 xfs_submit_ioend_bio(ioend, bio);
542 goto retry;
543 }
544
545 lastblock = bh->b_blocknr;
546 }
547 if (bio)
548 xfs_submit_ioend_bio(ioend, bio);
549 xfs_finish_ioend(ioend, 0);
550 } while ((ioend = next) != NULL);
551}
552
553
554
555
556
557
558STATIC void
559xfs_cancel_ioend(
560 xfs_ioend_t *ioend)
561{
562 xfs_ioend_t *next;
563 struct buffer_head *bh, *next_bh;
564
565 do {
566 next = ioend->io_list;
567 bh = ioend->io_buffer_head;
568 do {
569 next_bh = bh->b_private;
570 clear_buffer_async_write(bh);
571 unlock_buffer(bh);
572 } while ((bh = next_bh) != NULL);
573
574 xfs_ioend_wake(XFS_I(ioend->io_inode));
575 mempool_free(ioend, xfs_ioend_pool);
576 } while ((ioend = next) != NULL);
577}
578
579
580
581
582
583
584
585STATIC void
586xfs_add_to_ioend(
587 struct inode *inode,
588 struct buffer_head *bh,
589 xfs_off_t offset,
590 unsigned int type,
591 xfs_ioend_t **result,
592 int need_ioend)
593{
594 xfs_ioend_t *ioend = *result;
595
596 if (!ioend || need_ioend || type != ioend->io_type) {
597 xfs_ioend_t *previous = *result;
598
599 ioend = xfs_alloc_ioend(inode, type);
600 ioend->io_offset = offset;
601 ioend->io_buffer_head = bh;
602 ioend->io_buffer_tail = bh;
603 if (previous)
604 previous->io_list = ioend;
605 *result = ioend;
606 } else {
607 ioend->io_buffer_tail->b_private = bh;
608 ioend->io_buffer_tail = bh;
609 }
610
611 bh->b_private = NULL;
612 ioend->io_size += bh->b_size;
613}
614
615STATIC void
616xfs_map_buffer(
617 struct buffer_head *bh,
618 xfs_iomap_t *mp,
619 xfs_off_t offset,
620 uint block_bits)
621{
622 sector_t bn;
623
624 ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL);
625
626 bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) +
627 ((offset - mp->iomap_offset) >> block_bits);
628
629 ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME));
630
631 bh->b_blocknr = bn;
632 set_buffer_mapped(bh);
633}
634
635STATIC void
636xfs_map_at_offset(
637 struct buffer_head *bh,
638 loff_t offset,
639 int block_bits,
640 xfs_iomap_t *iomapp)
641{
642 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
643 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
644
645 lock_buffer(bh);
646 xfs_map_buffer(bh, iomapp, offset, block_bits);
647 bh->b_bdev = iomapp->iomap_target->bt_bdev;
648 set_buffer_mapped(bh);
649 clear_buffer_delay(bh);
650 clear_buffer_unwritten(bh);
651}
652
653
654
655
656STATIC unsigned int
657xfs_probe_page(
658 struct page *page,
659 unsigned int pg_offset,
660 int mapped)
661{
662 int ret = 0;
663
664 if (PageWriteback(page))
665 return 0;
666
667 if (page->mapping && PageDirty(page)) {
668 if (page_has_buffers(page)) {
669 struct buffer_head *bh, *head;
670
671 bh = head = page_buffers(page);
672 do {
673 if (!buffer_uptodate(bh))
674 break;
675 if (mapped != buffer_mapped(bh))
676 break;
677 ret += bh->b_size;
678 if (ret >= pg_offset)
679 break;
680 } while ((bh = bh->b_this_page) != head);
681 } else
682 ret = mapped ? 0 : PAGE_CACHE_SIZE;
683 }
684
685 return ret;
686}
687
688STATIC size_t
689xfs_probe_cluster(
690 struct inode *inode,
691 struct page *startpage,
692 struct buffer_head *bh,
693 struct buffer_head *head,
694 int mapped)
695{
696 struct pagevec pvec;
697 pgoff_t tindex, tlast, tloff;
698 size_t total = 0;
699 int done = 0, i;
700
701
702 do {
703 if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh)))
704 return total;
705 total += bh->b_size;
706 } while ((bh = bh->b_this_page) != head);
707
708
709 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
710 tindex = startpage->index + 1;
711
712
713 tloff = min(tlast, startpage->index + 64);
714
715 pagevec_init(&pvec, 0);
716 while (!done && tindex <= tloff) {
717 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
718
719 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
720 break;
721
722 for (i = 0; i < pagevec_count(&pvec); i++) {
723 struct page *page = pvec.pages[i];
724 size_t pg_offset, pg_len = 0;
725
726 if (tindex == tlast) {
727 pg_offset =
728 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
729 if (!pg_offset) {
730 done = 1;
731 break;
732 }
733 } else
734 pg_offset = PAGE_CACHE_SIZE;
735
736 if (page->index == tindex && trylock_page(page)) {
737 pg_len = xfs_probe_page(page, pg_offset, mapped);
738 unlock_page(page);
739 }
740
741 if (!pg_len) {
742 done = 1;
743 break;
744 }
745
746 total += pg_len;
747 tindex++;
748 }
749
750 pagevec_release(&pvec);
751 cond_resched();
752 }
753
754 return total;
755}
756
757
758
759
760
761STATIC int
762xfs_is_delayed_page(
763 struct page *page,
764 unsigned int type)
765{
766 if (PageWriteback(page))
767 return 0;
768
769 if (page->mapping && page_has_buffers(page)) {
770 struct buffer_head *bh, *head;
771 int acceptable = 0;
772
773 bh = head = page_buffers(page);
774 do {
775 if (buffer_unwritten(bh))
776 acceptable = (type == IOMAP_UNWRITTEN);
777 else if (buffer_delay(bh))
778 acceptable = (type == IOMAP_DELAY);
779 else if (buffer_dirty(bh) && buffer_mapped(bh))
780 acceptable = (type == IOMAP_NEW);
781 else
782 break;
783 } while ((bh = bh->b_this_page) != head);
784
785 if (acceptable)
786 return 1;
787 }
788
789 return 0;
790}
791
792
793
794
795
796
797
798STATIC int
799xfs_convert_page(
800 struct inode *inode,
801 struct page *page,
802 loff_t tindex,
803 xfs_iomap_t *mp,
804 xfs_ioend_t **ioendp,
805 struct writeback_control *wbc,
806 int startio,
807 int all_bh)
808{
809 struct buffer_head *bh, *head;
810 xfs_off_t end_offset;
811 unsigned long p_offset;
812 unsigned int type;
813 int bbits = inode->i_blkbits;
814 int len, page_dirty;
815 int count = 0, done = 0, uptodate = 1;
816 xfs_off_t offset = page_offset(page);
817
818 if (page->index != tindex)
819 goto fail;
820 if (!trylock_page(page))
821 goto fail;
822 if (PageWriteback(page))
823 goto fail_unlock_page;
824 if (page->mapping != inode->i_mapping)
825 goto fail_unlock_page;
826 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
827 goto fail_unlock_page;
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842 end_offset = min_t(unsigned long long,
843 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
844 i_size_read(inode));
845
846 len = 1 << inode->i_blkbits;
847 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
848 PAGE_CACHE_SIZE);
849 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
850 page_dirty = p_offset / len;
851
852 bh = head = page_buffers(page);
853 do {
854 if (offset >= end_offset)
855 break;
856 if (!buffer_uptodate(bh))
857 uptodate = 0;
858 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
859 done = 1;
860 continue;
861 }
862
863 if (buffer_unwritten(bh) || buffer_delay(bh)) {
864 if (buffer_unwritten(bh))
865 type = IOMAP_UNWRITTEN;
866 else
867 type = IOMAP_DELAY;
868
869 if (!xfs_iomap_valid(mp, offset)) {
870 done = 1;
871 continue;
872 }
873
874 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
875 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
876
877 xfs_map_at_offset(bh, offset, bbits, mp);
878 if (startio) {
879 xfs_add_to_ioend(inode, bh, offset,
880 type, ioendp, done);
881 } else {
882 set_buffer_dirty(bh);
883 unlock_buffer(bh);
884 mark_buffer_dirty(bh);
885 }
886 page_dirty--;
887 count++;
888 } else {
889 type = IOMAP_NEW;
890 if (buffer_mapped(bh) && all_bh && startio) {
891 lock_buffer(bh);
892 xfs_add_to_ioend(inode, bh, offset,
893 type, ioendp, done);
894 count++;
895 page_dirty--;
896 } else {
897 done = 1;
898 }
899 }
900 } while (offset += len, (bh = bh->b_this_page) != head);
901
902 if (uptodate && bh == head)
903 SetPageUptodate(page);
904
905 if (startio) {
906 if (count) {
907 struct backing_dev_info *bdi;
908
909 bdi = inode->i_mapping->backing_dev_info;
910 wbc->nr_to_write--;
911 if (bdi_write_congested(bdi)) {
912 wbc->encountered_congestion = 1;
913 done = 1;
914 } else if (wbc->nr_to_write <= 0) {
915 done = 1;
916 }
917 }
918 xfs_start_page_writeback(page, !page_dirty, count);
919 }
920
921 return done;
922 fail_unlock_page:
923 unlock_page(page);
924 fail:
925 return 1;
926}
927
928
929
930
931
932STATIC void
933xfs_cluster_write(
934 struct inode *inode,
935 pgoff_t tindex,
936 xfs_iomap_t *iomapp,
937 xfs_ioend_t **ioendp,
938 struct writeback_control *wbc,
939 int startio,
940 int all_bh,
941 pgoff_t tlast)
942{
943 struct pagevec pvec;
944 int done = 0, i;
945
946 pagevec_init(&pvec, 0);
947 while (!done && tindex <= tlast) {
948 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
949
950 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
951 break;
952
953 for (i = 0; i < pagevec_count(&pvec); i++) {
954 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
955 iomapp, ioendp, wbc, startio, all_bh);
956 if (done)
957 break;
958 }
959
960 pagevec_release(&pvec);
961 cond_resched();
962 }
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984STATIC int
985xfs_page_state_convert(
986 struct inode *inode,
987 struct page *page,
988 struct writeback_control *wbc,
989 int startio,
990 int unmapped)
991{
992 struct buffer_head *bh, *head;
993 xfs_iomap_t iomap;
994 xfs_ioend_t *ioend = NULL, *iohead = NULL;
995 loff_t offset;
996 unsigned long p_offset = 0;
997 unsigned int type;
998 __uint64_t end_offset;
999 pgoff_t end_index, last_index, tlast;
1000 ssize_t size, len;
1001 int flags, err, iomap_valid = 0, uptodate = 1;
1002 int page_dirty, count = 0;
1003 int trylock = 0;
1004 int all_bh = unmapped;
1005
1006 if (startio) {
1007 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
1008 trylock |= BMAPI_TRYLOCK;
1009 }
1010
1011
1012 offset = i_size_read(inode);
1013 end_index = offset >> PAGE_CACHE_SHIFT;
1014 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
1015 if (page->index >= end_index) {
1016 if ((page->index >= end_index + 1) ||
1017 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
1018 if (startio)
1019 unlock_page(page);
1020 return 0;
1021 }
1022 }
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 end_offset = min_t(unsigned long long,
1038 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
1039 len = 1 << inode->i_blkbits;
1040 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
1041 PAGE_CACHE_SIZE);
1042 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
1043 page_dirty = p_offset / len;
1044
1045 bh = head = page_buffers(page);
1046 offset = page_offset(page);
1047 flags = BMAPI_READ;
1048 type = IOMAP_NEW;
1049
1050
1051
1052 do {
1053 if (offset >= end_offset)
1054 break;
1055 if (!buffer_uptodate(bh))
1056 uptodate = 0;
1057 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
1058
1059
1060
1061
1062 iomap_valid = 0;
1063 continue;
1064 }
1065
1066 if (iomap_valid)
1067 iomap_valid = xfs_iomap_valid(&iomap, offset);
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 if (buffer_unwritten(bh) || buffer_delay(bh) ||
1080 ((buffer_uptodate(bh) || PageUptodate(page)) &&
1081 !buffer_mapped(bh) && (unmapped || startio))) {
1082 int new_ioend = 0;
1083
1084
1085
1086
1087 if (flags == BMAPI_READ)
1088 iomap_valid = 0;
1089
1090 if (buffer_unwritten(bh)) {
1091 type = IOMAP_UNWRITTEN;
1092 flags = BMAPI_WRITE | BMAPI_IGNSTATE;
1093 } else if (buffer_delay(bh)) {
1094 type = IOMAP_DELAY;
1095 flags = BMAPI_ALLOCATE | trylock;
1096 } else {
1097 type = IOMAP_NEW;
1098 flags = BMAPI_WRITE | BMAPI_MMAP;
1099 }
1100
1101 if (!iomap_valid) {
1102
1103
1104
1105
1106
1107
1108
1109
1110 new_ioend = 1;
1111 if (type == IOMAP_NEW) {
1112 size = xfs_probe_cluster(inode,
1113 page, bh, head, 0);
1114 } else {
1115 size = len;
1116 }
1117
1118 err = xfs_map_blocks(inode, offset, size,
1119 &iomap, flags);
1120 if (err)
1121 goto error;
1122 iomap_valid = xfs_iomap_valid(&iomap, offset);
1123 }
1124 if (iomap_valid) {
1125 xfs_map_at_offset(bh, offset,
1126 inode->i_blkbits, &iomap);
1127 if (startio) {
1128 xfs_add_to_ioend(inode, bh, offset,
1129 type, &ioend,
1130 new_ioend);
1131 } else {
1132 set_buffer_dirty(bh);
1133 unlock_buffer(bh);
1134 mark_buffer_dirty(bh);
1135 }
1136 page_dirty--;
1137 count++;
1138 }
1139 } else if (buffer_uptodate(bh) && startio) {
1140
1141
1142
1143
1144
1145 if (!iomap_valid || flags != BMAPI_READ) {
1146 flags = BMAPI_READ;
1147 size = xfs_probe_cluster(inode, page, bh,
1148 head, 1);
1149 err = xfs_map_blocks(inode, offset, size,
1150 &iomap, flags);
1151 if (err)
1152 goto error;
1153 iomap_valid = xfs_iomap_valid(&iomap, offset);
1154 }
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164 type = IOMAP_NEW;
1165 if (trylock_buffer(bh)) {
1166 ASSERT(buffer_mapped(bh));
1167 if (iomap_valid)
1168 all_bh = 1;
1169 xfs_add_to_ioend(inode, bh, offset, type,
1170 &ioend, !iomap_valid);
1171 page_dirty--;
1172 count++;
1173 } else {
1174 iomap_valid = 0;
1175 }
1176 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
1177 (unmapped || startio)) {
1178 iomap_valid = 0;
1179 }
1180
1181 if (!iohead)
1182 iohead = ioend;
1183
1184 } while (offset += len, ((bh = bh->b_this_page) != head));
1185
1186 if (uptodate && bh == head)
1187 SetPageUptodate(page);
1188
1189 if (startio)
1190 xfs_start_page_writeback(page, 1, count);
1191
1192 if (ioend && iomap_valid) {
1193 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
1194 PAGE_CACHE_SHIFT;
1195 tlast = min_t(pgoff_t, offset, last_index);
1196 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1197 wbc, startio, all_bh, tlast);
1198 }
1199
1200 if (iohead)
1201 xfs_submit_ioend(iohead);
1202
1203 return page_dirty;
1204
1205error:
1206 if (iohead)
1207 xfs_cancel_ioend(iohead);
1208
1209
1210
1211
1212
1213
1214 if (err != -EAGAIN) {
1215 if (!unmapped)
1216 block_invalidatepage(page, 0);
1217 ClearPageUptodate(page);
1218 }
1219 return err;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242STATIC int
1243xfs_vm_writepage(
1244 struct page *page,
1245 struct writeback_control *wbc)
1246{
1247 int error;
1248 int need_trans;
1249 int delalloc, unmapped, unwritten;
1250 struct inode *inode = page->mapping->host;
1251
1252 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 if (!page_has_buffers(page)) {
1263 unmapped = 1;
1264 need_trans = 1;
1265 } else {
1266 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1267 if (!PageUptodate(page))
1268 unmapped = 0;
1269 need_trans = delalloc + unmapped + unwritten;
1270 }
1271
1272
1273
1274
1275
1276
1277
1278 if (current_test_flags(PF_FSTRANS) && need_trans)
1279 goto out_fail;
1280
1281
1282
1283
1284
1285 if (!page_has_buffers(page))
1286 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1287
1288
1289
1290
1291
1292
1293
1294 wbc->nr_to_write *= 4;
1295
1296
1297
1298
1299
1300 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1301 if (error == -EAGAIN)
1302 goto out_fail;
1303 if (unlikely(error < 0))
1304 goto out_unlock;
1305
1306 return 0;
1307
1308out_fail:
1309 redirty_page_for_writepage(wbc, page);
1310 unlock_page(page);
1311 return 0;
1312out_unlock:
1313 unlock_page(page);
1314 return error;
1315}
1316
1317STATIC int
1318xfs_vm_writepages(
1319 struct address_space *mapping,
1320 struct writeback_control *wbc)
1321{
1322 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1323 return generic_writepages(mapping, wbc);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345STATIC int
1346xfs_vm_releasepage(
1347 struct page *page,
1348 gfp_t gfp_mask)
1349{
1350 struct inode *inode = page->mapping->host;
1351 int dirty, delalloc, unmapped, unwritten;
1352 struct writeback_control wbc = {
1353 .sync_mode = WB_SYNC_ALL,
1354 .nr_to_write = 1,
1355 };
1356
1357 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0);
1358
1359 if (!page_has_buffers(page))
1360 return 0;
1361
1362 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1363 if (!delalloc && !unwritten)
1364 goto free_buffers;
1365
1366 if (!(gfp_mask & __GFP_FS))
1367 return 0;
1368
1369
1370
1371
1372 if (current_test_flags(PF_FSTRANS))
1373 return 0;
1374
1375
1376
1377
1378
1379
1380
1381 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1382 if (dirty == 0 && !unwritten)
1383 goto free_buffers;
1384 return 0;
1385
1386free_buffers:
1387 return try_to_free_buffers(page);
1388}
1389
1390STATIC int
1391__xfs_get_blocks(
1392 struct inode *inode,
1393 sector_t iblock,
1394 struct buffer_head *bh_result,
1395 int create,
1396 int direct,
1397 bmapi_flags_t flags)
1398{
1399 xfs_iomap_t iomap;
1400 xfs_off_t offset;
1401 ssize_t size;
1402 int niomap = 1;
1403 int error;
1404
1405 offset = (xfs_off_t)iblock << inode->i_blkbits;
1406 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1407 size = bh_result->b_size;
1408
1409 if (!create && direct && offset >= i_size_read(inode))
1410 return 0;
1411
1412 error = xfs_iomap(XFS_I(inode), offset, size,
1413 create ? flags : BMAPI_READ, &iomap, &niomap);
1414 if (error)
1415 return -error;
1416 if (niomap == 0)
1417 return 0;
1418
1419 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
1420
1421
1422
1423
1424 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1425 xfs_map_buffer(bh_result, &iomap, offset,
1426 inode->i_blkbits);
1427 }
1428 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
1429 if (direct)
1430 bh_result->b_private = inode;
1431 set_buffer_unwritten(bh_result);
1432 }
1433 }
1434
1435
1436
1437
1438
1439 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 if (create &&
1451 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1452 (offset >= i_size_read(inode)) ||
1453 (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN))))
1454 set_buffer_new(bh_result);
1455
1456 if (iomap.iomap_flags & IOMAP_DELAY) {
1457 BUG_ON(direct);
1458 if (create) {
1459 set_buffer_uptodate(bh_result);
1460 set_buffer_mapped(bh_result);
1461 set_buffer_delay(bh_result);
1462 }
1463 }
1464
1465 if (direct || size > (1 << inode->i_blkbits)) {
1466 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1467 offset = min_t(xfs_off_t,
1468 iomap.iomap_bsize - iomap.iomap_delta, size);
1469 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1470 }
1471
1472 return 0;
1473}
1474
1475int
1476xfs_get_blocks(
1477 struct inode *inode,
1478 sector_t iblock,
1479 struct buffer_head *bh_result,
1480 int create)
1481{
1482 return __xfs_get_blocks(inode, iblock,
1483 bh_result, create, 0, BMAPI_WRITE);
1484}
1485
1486STATIC int
1487xfs_get_blocks_direct(
1488 struct inode *inode,
1489 sector_t iblock,
1490 struct buffer_head *bh_result,
1491 int create)
1492{
1493 return __xfs_get_blocks(inode, iblock,
1494 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1495}
1496
1497STATIC void
1498xfs_end_io_direct(
1499 struct kiocb *iocb,
1500 loff_t offset,
1501 ssize_t size,
1502 void *private)
1503{
1504 xfs_ioend_t *ioend = iocb->private;
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525 ioend->io_offset = offset;
1526 ioend->io_size = size;
1527 if (ioend->io_type == IOMAP_READ) {
1528 xfs_finish_ioend(ioend, 0);
1529 } else if (private && size > 0) {
1530 xfs_finish_ioend(ioend, is_sync_kiocb(iocb));
1531 } else {
1532
1533
1534
1535
1536
1537
1538 INIT_WORK(&ioend->io_work, xfs_end_bio_written);
1539 xfs_finish_ioend(ioend, 0);
1540 }
1541
1542
1543
1544
1545
1546
1547 iocb->private = NULL;
1548}
1549
1550STATIC ssize_t
1551xfs_vm_direct_IO(
1552 int rw,
1553 struct kiocb *iocb,
1554 const struct iovec *iov,
1555 loff_t offset,
1556 unsigned long nr_segs)
1557{
1558 struct file *file = iocb->ki_filp;
1559 struct inode *inode = file->f_mapping->host;
1560 struct block_device *bdev;
1561 ssize_t ret;
1562
1563 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1564
1565 if (rw == WRITE) {
1566 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
1567 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1568 bdev, iov, offset, nr_segs,
1569 xfs_get_blocks_direct,
1570 xfs_end_io_direct);
1571 } else {
1572 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1573 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1574 bdev, iov, offset, nr_segs,
1575 xfs_get_blocks_direct,
1576 xfs_end_io_direct);
1577 }
1578
1579 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1580 xfs_destroy_ioend(iocb->private);
1581 return ret;
1582}
1583
1584STATIC int
1585xfs_vm_write_begin(
1586 struct file *file,
1587 struct address_space *mapping,
1588 loff_t pos,
1589 unsigned len,
1590 unsigned flags,
1591 struct page **pagep,
1592 void **fsdata)
1593{
1594 *pagep = NULL;
1595 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1596 xfs_get_blocks);
1597}
1598
1599STATIC sector_t
1600xfs_vm_bmap(
1601 struct address_space *mapping,
1602 sector_t block)
1603{
1604 struct inode *inode = (struct inode *)mapping->host;
1605 struct xfs_inode *ip = XFS_I(inode);
1606
1607 xfs_itrace_entry(XFS_I(inode));
1608 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1609 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
1610 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1611 return generic_block_bmap(mapping, block, xfs_get_blocks);
1612}
1613
1614STATIC int
1615xfs_vm_readpage(
1616 struct file *unused,
1617 struct page *page)
1618{
1619 return mpage_readpage(page, xfs_get_blocks);
1620}
1621
1622STATIC int
1623xfs_vm_readpages(
1624 struct file *unused,
1625 struct address_space *mapping,
1626 struct list_head *pages,
1627 unsigned nr_pages)
1628{
1629 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1630}
1631
1632STATIC void
1633xfs_vm_invalidatepage(
1634 struct page *page,
1635 unsigned long offset)
1636{
1637 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1638 page->mapping->host, page, offset);
1639 block_invalidatepage(page, offset);
1640}
1641
1642const struct address_space_operations xfs_address_space_operations = {
1643 .readpage = xfs_vm_readpage,
1644 .readpages = xfs_vm_readpages,
1645 .writepage = xfs_vm_writepage,
1646 .writepages = xfs_vm_writepages,
1647 .sync_page = block_sync_page,
1648 .releasepage = xfs_vm_releasepage,
1649 .invalidatepage = xfs_vm_invalidatepage,
1650 .write_begin = xfs_vm_write_begin,
1651 .write_end = generic_write_end,
1652 .bmap = xfs_vm_bmap,
1653 .direct_IO = xfs_vm_direct_IO,
1654 .migratepage = buffer_migrate_page,
1655 .is_partially_uptodate = block_is_partially_uptodate,
1656 .error_remove_page = generic_error_remove_page,
1657};
1658