1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
27#include <linux/task_io_accounting_ops.h>
28#include <linux/dax.h>
29#include <linux/sched/signal.h>
30
31#include "internal.h"
32
33
34
35
36
37
38
39
40
41
42
43
44loff_t
45iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
47{
48 struct iomap iomap = { 0 };
49 loff_t written = 0, ret;
50
51
52
53
54
55
56
57
58
59
60
61
62
63 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64 if (ret)
65 return ret;
66 if (WARN_ON(iomap.offset > pos))
67 return -EIO;
68
69
70
71
72
73 if (iomap.offset + iomap.length < pos + length)
74 length = iomap.offset + iomap.length - pos;
75
76
77
78
79
80
81 written = actor(inode, pos, length, data, &iomap);
82
83
84
85
86
87 if (ops->iomap_end) {
88 ret = ops->iomap_end(inode, pos, length,
89 written > 0 ? written : 0,
90 flags, &iomap);
91 }
92
93 return written ? written : ret;
94}
95
96static void
97iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
98{
99 loff_t i_size = i_size_read(inode);
100
101
102
103
104
105 if (pos + len > i_size)
106 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
107}
108
109static int
110iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111 struct page **pagep, struct iomap *iomap)
112{
113 pgoff_t index = pos >> PAGE_SHIFT;
114 struct page *page;
115 int status = 0;
116
117 BUG_ON(pos + len > iomap->offset + iomap->length);
118
119 if (fatal_signal_pending(current))
120 return -EINTR;
121
122 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
123 if (!page)
124 return -ENOMEM;
125
126 status = __block_write_begin_int(page, pos, len, NULL, iomap);
127 if (unlikely(status)) {
128 unlock_page(page);
129 put_page(page);
130 page = NULL;
131
132 iomap_write_failed(inode, pos, len);
133 }
134
135 *pagep = page;
136 return status;
137}
138
139static int
140iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141 unsigned copied, struct page *page)
142{
143 int ret;
144
145 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
146 copied, page, NULL);
147 if (ret < len)
148 iomap_write_failed(inode, pos, len);
149 return ret;
150}
151
152static loff_t
153iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
154 struct iomap *iomap)
155{
156 struct iov_iter *i = data;
157 long status = 0;
158 ssize_t written = 0;
159 unsigned int flags = AOP_FLAG_NOFS;
160
161 do {
162 struct page *page;
163 unsigned long offset;
164 unsigned long bytes;
165 size_t copied;
166
167 offset = (pos & (PAGE_SIZE - 1));
168 bytes = min_t(unsigned long, PAGE_SIZE - offset,
169 iov_iter_count(i));
170again:
171 if (bytes > length)
172 bytes = length;
173
174
175
176
177
178
179
180
181
182
183
184 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
185 status = -EFAULT;
186 break;
187 }
188
189 status = iomap_write_begin(inode, pos, bytes, flags, &page,
190 iomap);
191 if (unlikely(status))
192 break;
193
194 if (mapping_writably_mapped(inode->i_mapping))
195 flush_dcache_page(page);
196
197 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
198
199 flush_dcache_page(page);
200
201 status = iomap_write_end(inode, pos, bytes, copied, page);
202 if (unlikely(status < 0))
203 break;
204 copied = status;
205
206 cond_resched();
207
208 iov_iter_advance(i, copied);
209 if (unlikely(copied == 0)) {
210
211
212
213
214
215
216
217
218 bytes = min_t(unsigned long, PAGE_SIZE - offset,
219 iov_iter_single_seg_count(i));
220 goto again;
221 }
222 pos += copied;
223 written += copied;
224 length -= copied;
225
226 balance_dirty_pages_ratelimited(inode->i_mapping);
227 } while (iov_iter_count(i) && length);
228
229 return written ? written : status;
230}
231
232ssize_t
233iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
234 const struct iomap_ops *ops)
235{
236 struct inode *inode = iocb->ki_filp->f_mapping->host;
237 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
238
239 while (iov_iter_count(iter)) {
240 ret = iomap_apply(inode, pos, iov_iter_count(iter),
241 IOMAP_WRITE, ops, iter, iomap_write_actor);
242 if (ret <= 0)
243 break;
244 pos += ret;
245 written += ret;
246 }
247
248 return written ? written : ret;
249}
250EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
251
252static struct page *
253__iomap_read_page(struct inode *inode, loff_t offset)
254{
255 struct address_space *mapping = inode->i_mapping;
256 struct page *page;
257
258 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
259 if (IS_ERR(page))
260 return page;
261 if (!PageUptodate(page)) {
262 put_page(page);
263 return ERR_PTR(-EIO);
264 }
265 return page;
266}
267
268static loff_t
269iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
270 struct iomap *iomap)
271{
272 long status = 0;
273 ssize_t written = 0;
274
275 do {
276 struct page *page, *rpage;
277 unsigned long offset;
278 unsigned long bytes;
279
280 offset = (pos & (PAGE_SIZE - 1));
281 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
282
283 rpage = __iomap_read_page(inode, pos);
284 if (IS_ERR(rpage))
285 return PTR_ERR(rpage);
286
287 status = iomap_write_begin(inode, pos, bytes,
288 AOP_FLAG_NOFS, &page, iomap);
289 put_page(rpage);
290 if (unlikely(status))
291 return status;
292
293 WARN_ON_ONCE(!PageUptodate(page));
294
295 status = iomap_write_end(inode, pos, bytes, bytes, page);
296 if (unlikely(status <= 0)) {
297 if (WARN_ON_ONCE(status == 0))
298 return -EIO;
299 return status;
300 }
301
302 cond_resched();
303
304 pos += status;
305 written += status;
306 length -= status;
307
308 balance_dirty_pages_ratelimited(inode->i_mapping);
309 } while (length);
310
311 return written;
312}
313
314int
315iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
316 const struct iomap_ops *ops)
317{
318 loff_t ret;
319
320 while (len) {
321 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
322 iomap_dirty_actor);
323 if (ret <= 0)
324 return ret;
325 pos += ret;
326 len -= ret;
327 }
328
329 return 0;
330}
331EXPORT_SYMBOL_GPL(iomap_file_dirty);
332
333static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
334 unsigned bytes, struct iomap *iomap)
335{
336 struct page *page;
337 int status;
338
339 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
340 iomap);
341 if (status)
342 return status;
343
344 zero_user(page, offset, bytes);
345 mark_page_accessed(page);
346
347 return iomap_write_end(inode, pos, bytes, bytes, page);
348}
349
350static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
351 struct iomap *iomap)
352{
353 sector_t sector = iomap->blkno +
354 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
355
356 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
357 offset, bytes);
358}
359
360static loff_t
361iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
362 void *data, struct iomap *iomap)
363{
364 bool *did_zero = data;
365 loff_t written = 0;
366 int status;
367
368
369 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
370 return count;
371
372 do {
373 unsigned offset, bytes;
374
375 offset = pos & (PAGE_SIZE - 1);
376 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
377
378 if (IS_DAX(inode))
379 status = iomap_dax_zero(pos, offset, bytes, iomap);
380 else
381 status = iomap_zero(inode, pos, offset, bytes, iomap);
382 if (status < 0)
383 return status;
384
385 pos += bytes;
386 count -= bytes;
387 written += bytes;
388 if (did_zero)
389 *did_zero = true;
390 } while (count > 0);
391
392 return written;
393}
394
395int
396iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
397 const struct iomap_ops *ops)
398{
399 loff_t ret;
400
401 while (len > 0) {
402 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
403 ops, did_zero, iomap_zero_range_actor);
404 if (ret <= 0)
405 return ret;
406
407 pos += ret;
408 len -= ret;
409 }
410
411 return 0;
412}
413EXPORT_SYMBOL_GPL(iomap_zero_range);
414
415int
416iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
417 const struct iomap_ops *ops)
418{
419 unsigned int blocksize = i_blocksize(inode);
420 unsigned int off = pos & (blocksize - 1);
421
422
423 if (!off)
424 return 0;
425 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
426}
427EXPORT_SYMBOL_GPL(iomap_truncate_page);
428
429static loff_t
430iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
431 void *data, struct iomap *iomap)
432{
433 struct page *page = data;
434 int ret;
435
436 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
437 if (ret)
438 return ret;
439
440 block_commit_write(page, 0, length);
441 return length;
442}
443
444int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
445{
446 struct page *page = vmf->page;
447 struct inode *inode = file_inode(vmf->vma->vm_file);
448 unsigned long length;
449 loff_t offset, size;
450 ssize_t ret;
451
452 lock_page(page);
453 size = i_size_read(inode);
454 if ((page->mapping != inode->i_mapping) ||
455 (page_offset(page) > size)) {
456
457 ret = -EFAULT;
458 goto out_unlock;
459 }
460
461
462 if (((page->index + 1) << PAGE_SHIFT) > size)
463 length = size & ~PAGE_MASK;
464 else
465 length = PAGE_SIZE;
466
467 offset = page_offset(page);
468 while (length > 0) {
469 ret = iomap_apply(inode, offset, length,
470 IOMAP_WRITE | IOMAP_FAULT, ops, page,
471 iomap_page_mkwrite_actor);
472 if (unlikely(ret <= 0))
473 goto out_unlock;
474 offset += ret;
475 length -= ret;
476 }
477
478 set_page_dirty(page);
479 wait_for_stable_page(page);
480 return VM_FAULT_LOCKED;
481out_unlock:
482 unlock_page(page);
483 return block_page_mkwrite_return(ret);
484}
485EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
486
487struct fiemap_ctx {
488 struct fiemap_extent_info *fi;
489 struct iomap prev;
490};
491
492static int iomap_to_fiemap(struct fiemap_extent_info *fi,
493 struct iomap *iomap, u32 flags)
494{
495 switch (iomap->type) {
496 case IOMAP_HOLE:
497
498 return 0;
499 case IOMAP_DELALLOC:
500 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
501 break;
502 case IOMAP_UNWRITTEN:
503 flags |= FIEMAP_EXTENT_UNWRITTEN;
504 break;
505 case IOMAP_MAPPED:
506 break;
507 }
508
509 if (iomap->flags & IOMAP_F_MERGED)
510 flags |= FIEMAP_EXTENT_MERGED;
511 if (iomap->flags & IOMAP_F_SHARED)
512 flags |= FIEMAP_EXTENT_SHARED;
513
514 return fiemap_fill_next_extent(fi, iomap->offset,
515 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
516 iomap->length, flags);
517
518}
519
520static loff_t
521iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
522 struct iomap *iomap)
523{
524 struct fiemap_ctx *ctx = data;
525 loff_t ret = length;
526
527 if (iomap->type == IOMAP_HOLE)
528 return length;
529
530 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
531 ctx->prev = *iomap;
532 switch (ret) {
533 case 0:
534 return length;
535 case 1:
536 return 0;
537 default:
538 return ret;
539 }
540}
541
542int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
543 loff_t start, loff_t len, const struct iomap_ops *ops)
544{
545 struct fiemap_ctx ctx;
546 loff_t ret;
547
548 memset(&ctx, 0, sizeof(ctx));
549 ctx.fi = fi;
550 ctx.prev.type = IOMAP_HOLE;
551
552 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
553 if (ret)
554 return ret;
555
556 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
557 ret = filemap_write_and_wait(inode->i_mapping);
558 if (ret)
559 return ret;
560 }
561
562 while (len > 0) {
563 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
564 iomap_fiemap_actor);
565
566 if (ret == -ENOENT)
567 break;
568 if (ret < 0)
569 return ret;
570 if (ret == 0)
571 break;
572
573 start += ret;
574 len -= ret;
575 }
576
577 if (ctx.prev.type != IOMAP_HOLE) {
578 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
579 if (ret < 0)
580 return ret;
581 }
582
583 return 0;
584}
585EXPORT_SYMBOL_GPL(iomap_fiemap);
586
587static loff_t
588iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
589 void *data, struct iomap *iomap)
590{
591 switch (iomap->type) {
592 case IOMAP_UNWRITTEN:
593 offset = page_cache_seek_hole_data(inode, offset, length,
594 SEEK_HOLE);
595 if (offset < 0)
596 return length;
597
598 case IOMAP_HOLE:
599 *(loff_t *)data = offset;
600 return 0;
601 default:
602 return length;
603 }
604}
605
606loff_t
607iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
608{
609 loff_t size = i_size_read(inode);
610 loff_t length = size - offset;
611 loff_t ret;
612
613
614 if (offset < 0 || offset >= size)
615 return -ENXIO;
616
617 while (length > 0) {
618 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
619 &offset, iomap_seek_hole_actor);
620 if (ret < 0)
621 return ret;
622 if (ret == 0)
623 break;
624
625 offset += ret;
626 length -= ret;
627 }
628
629 return offset;
630}
631EXPORT_SYMBOL_GPL(iomap_seek_hole);
632
633static loff_t
634iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
635 void *data, struct iomap *iomap)
636{
637 switch (iomap->type) {
638 case IOMAP_HOLE:
639 return length;
640 case IOMAP_UNWRITTEN:
641 offset = page_cache_seek_hole_data(inode, offset, length,
642 SEEK_DATA);
643 if (offset < 0)
644 return length;
645
646 default:
647 *(loff_t *)data = offset;
648 return 0;
649 }
650}
651
652loff_t
653iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
654{
655 loff_t size = i_size_read(inode);
656 loff_t length = size - offset;
657 loff_t ret;
658
659
660 if (offset < 0 || offset >= size)
661 return -ENXIO;
662
663 while (length > 0) {
664 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
665 &offset, iomap_seek_data_actor);
666 if (ret < 0)
667 return ret;
668 if (ret == 0)
669 break;
670
671 offset += ret;
672 length -= ret;
673 }
674
675 if (length <= 0)
676 return -ENXIO;
677 return offset;
678}
679EXPORT_SYMBOL_GPL(iomap_seek_data);
680
681
682
683
684
685#define IOMAP_DIO_WRITE (1 << 30)
686#define IOMAP_DIO_DIRTY (1 << 31)
687
688struct iomap_dio {
689 struct kiocb *iocb;
690 iomap_dio_end_io_t *end_io;
691 loff_t i_size;
692 loff_t size;
693 atomic_t ref;
694 unsigned flags;
695 int error;
696
697 union {
698
699 struct {
700 struct iov_iter *iter;
701 struct task_struct *waiter;
702 struct request_queue *last_queue;
703 blk_qc_t cookie;
704 } submit;
705
706
707 struct {
708 struct work_struct work;
709 } aio;
710 };
711};
712
713static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{
715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp);
717 loff_t offset = iocb->ki_pos;
718 ssize_t ret;
719
720 if (dio->end_io) {
721 ret = dio->end_io(iocb,
722 dio->error ? dio->error : dio->size,
723 dio->flags);
724 } else {
725 ret = dio->error;
726 }
727
728 if (likely(!ret)) {
729 ret = dio->size;
730
731 if (offset + ret > dio->i_size &&
732 !(dio->flags & IOMAP_DIO_WRITE))
733 ret = dio->i_size - offset;
734 iocb->ki_pos += ret;
735 }
736
737
738
739
740
741
742
743
744
745
746
747
748
749 if (!dio->error &&
750 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
751 int err;
752 err = invalidate_inode_pages2_range(inode->i_mapping,
753 offset >> PAGE_SHIFT,
754 (offset + dio->size - 1) >> PAGE_SHIFT);
755 WARN_ON_ONCE(err);
756 }
757
758 inode_dio_end(file_inode(iocb->ki_filp));
759 kfree(dio);
760
761 return ret;
762}
763
764static void iomap_dio_complete_work(struct work_struct *work)
765{
766 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
767 struct kiocb *iocb = dio->iocb;
768 bool is_write = (dio->flags & IOMAP_DIO_WRITE);
769 ssize_t ret;
770
771 ret = iomap_dio_complete(dio);
772 if (is_write && ret > 0)
773 ret = generic_write_sync(iocb, ret);
774 iocb->ki_complete(iocb, ret, 0);
775}
776
777
778
779
780
781
782static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
783{
784 cmpxchg(&dio->error, 0, ret);
785}
786
787static void iomap_dio_bio_end_io(struct bio *bio)
788{
789 struct iomap_dio *dio = bio->bi_private;
790 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
791
792 if (bio->bi_status)
793 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
794
795 if (atomic_dec_and_test(&dio->ref)) {
796 if (is_sync_kiocb(dio->iocb)) {
797 struct task_struct *waiter = dio->submit.waiter;
798
799 WRITE_ONCE(dio->submit.waiter, NULL);
800 wake_up_process(waiter);
801 } else if (dio->flags & IOMAP_DIO_WRITE) {
802 struct inode *inode = file_inode(dio->iocb->ki_filp);
803
804 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
805 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
806 } else {
807 iomap_dio_complete_work(&dio->aio.work);
808 }
809 }
810
811 if (should_dirty) {
812 bio_check_pages_dirty(bio);
813 } else {
814 struct bio_vec *bvec;
815 int i;
816
817 bio_for_each_segment_all(bvec, bio, i)
818 put_page(bvec->bv_page);
819 bio_put(bio);
820 }
821}
822
823static blk_qc_t
824iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
825 unsigned len)
826{
827 struct page *page = ZERO_PAGE(0);
828 struct bio *bio;
829
830 bio = bio_alloc(GFP_KERNEL, 1);
831 bio_set_dev(bio, iomap->bdev);
832 bio->bi_iter.bi_sector =
833 iomap->blkno + ((pos - iomap->offset) >> 9);
834 bio->bi_private = dio;
835 bio->bi_end_io = iomap_dio_bio_end_io;
836
837 get_page(page);
838 if (bio_add_page(bio, page, len, 0) != len)
839 BUG();
840 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
841
842 atomic_inc(&dio->ref);
843 return submit_bio(bio);
844}
845
846static loff_t
847iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
848 void *data, struct iomap *iomap)
849{
850 struct iomap_dio *dio = data;
851 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
852 unsigned int fs_block_size = i_blocksize(inode), pad;
853 unsigned int align = iov_iter_alignment(dio->submit.iter);
854 struct iov_iter iter;
855 struct bio *bio;
856 bool need_zeroout = false;
857 int nr_pages, ret;
858
859 if ((pos | length | align) & ((1 << blkbits) - 1))
860 return -EINVAL;
861
862 switch (iomap->type) {
863 case IOMAP_HOLE:
864 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
865 return -EIO;
866
867 case IOMAP_UNWRITTEN:
868 if (!(dio->flags & IOMAP_DIO_WRITE)) {
869 iov_iter_zero(length, dio->submit.iter);
870 dio->size += length;
871 return length;
872 }
873 dio->flags |= IOMAP_DIO_UNWRITTEN;
874 need_zeroout = true;
875 break;
876 case IOMAP_MAPPED:
877 if (iomap->flags & IOMAP_F_SHARED)
878 dio->flags |= IOMAP_DIO_COW;
879 if (iomap->flags & IOMAP_F_NEW)
880 need_zeroout = true;
881 break;
882 default:
883 WARN_ON_ONCE(1);
884 return -EIO;
885 }
886
887
888
889
890
891 iter = *dio->submit.iter;
892 iov_iter_truncate(&iter, length);
893
894 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
895 if (nr_pages <= 0)
896 return nr_pages;
897
898 if (need_zeroout) {
899
900 pad = pos & (fs_block_size - 1);
901 if (pad)
902 iomap_dio_zero(dio, iomap, pos - pad, pad);
903 }
904
905 do {
906 if (dio->error)
907 return 0;
908
909 bio = bio_alloc(GFP_KERNEL, nr_pages);
910 bio_set_dev(bio, iomap->bdev);
911 bio->bi_iter.bi_sector =
912 iomap->blkno + ((pos - iomap->offset) >> 9);
913 bio->bi_write_hint = dio->iocb->ki_hint;
914 bio->bi_private = dio;
915 bio->bi_end_io = iomap_dio_bio_end_io;
916
917 ret = bio_iov_iter_get_pages(bio, &iter);
918 if (unlikely(ret)) {
919 bio_put(bio);
920 return ret;
921 }
922
923 if (dio->flags & IOMAP_DIO_WRITE) {
924 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
925 task_io_account_write(bio->bi_iter.bi_size);
926 } else {
927 bio_set_op_attrs(bio, REQ_OP_READ, 0);
928 if (dio->flags & IOMAP_DIO_DIRTY)
929 bio_set_pages_dirty(bio);
930 }
931
932 dio->size += bio->bi_iter.bi_size;
933 pos += bio->bi_iter.bi_size;
934
935 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
936
937 atomic_inc(&dio->ref);
938
939 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
940 dio->submit.cookie = submit_bio(bio);
941 } while (nr_pages);
942
943 if (need_zeroout) {
944
945 pad = pos & (fs_block_size - 1);
946 if (pad)
947 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
948 }
949
950 iov_iter_advance(dio->submit.iter, length);
951 return length;
952}
953
954ssize_t
955iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
956 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
957{
958 struct address_space *mapping = iocb->ki_filp->f_mapping;
959 struct inode *inode = file_inode(iocb->ki_filp);
960 size_t count = iov_iter_count(iter);
961 loff_t pos = iocb->ki_pos, start = pos;
962 loff_t end = iocb->ki_pos + count - 1, ret = 0;
963 unsigned int flags = IOMAP_DIRECT;
964 struct blk_plug plug;
965 struct iomap_dio *dio;
966
967 lockdep_assert_held(&inode->i_rwsem);
968
969 if (!count)
970 return 0;
971
972 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
973 if (!dio)
974 return -ENOMEM;
975
976 dio->iocb = iocb;
977 atomic_set(&dio->ref, 1);
978 dio->size = 0;
979 dio->i_size = i_size_read(inode);
980 dio->end_io = end_io;
981 dio->error = 0;
982 dio->flags = 0;
983
984 dio->submit.iter = iter;
985 if (is_sync_kiocb(iocb)) {
986 dio->submit.waiter = current;
987 dio->submit.cookie = BLK_QC_T_NONE;
988 dio->submit.last_queue = NULL;
989 }
990
991 if (iov_iter_rw(iter) == READ) {
992 if (pos >= dio->i_size)
993 goto out_free_dio;
994
995 if (iter->type == ITER_IOVEC)
996 dio->flags |= IOMAP_DIO_DIRTY;
997 } else {
998 dio->flags |= IOMAP_DIO_WRITE;
999 flags |= IOMAP_WRITE;
1000 }
1001
1002 if (iocb->ki_flags & IOCB_NOWAIT) {
1003 if (filemap_range_has_page(mapping, start, end)) {
1004 ret = -EAGAIN;
1005 goto out_free_dio;
1006 }
1007 flags |= IOMAP_NOWAIT;
1008 }
1009
1010 ret = filemap_write_and_wait_range(mapping, start, end);
1011 if (ret)
1012 goto out_free_dio;
1013
1014 ret = invalidate_inode_pages2_range(mapping,
1015 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1016 WARN_ON_ONCE(ret);
1017 ret = 0;
1018
1019 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1020 !inode->i_sb->s_dio_done_wq) {
1021 ret = sb_init_dio_done_wq(inode->i_sb);
1022 if (ret < 0)
1023 goto out_free_dio;
1024 }
1025
1026 inode_dio_begin(inode);
1027
1028 blk_start_plug(&plug);
1029 do {
1030 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1031 iomap_dio_actor);
1032 if (ret <= 0) {
1033
1034 if (ret == -ENOTBLK)
1035 ret = 0;
1036 break;
1037 }
1038 pos += ret;
1039
1040 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1041 break;
1042 } while ((count = iov_iter_count(iter)) > 0);
1043 blk_finish_plug(&plug);
1044
1045 if (ret < 0)
1046 iomap_dio_set_error(dio, ret);
1047
1048 if (!atomic_dec_and_test(&dio->ref)) {
1049 if (!is_sync_kiocb(iocb))
1050 return -EIOCBQUEUED;
1051
1052 for (;;) {
1053 set_current_state(TASK_UNINTERRUPTIBLE);
1054 if (!READ_ONCE(dio->submit.waiter))
1055 break;
1056
1057 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1058 !dio->submit.last_queue ||
1059 !blk_mq_poll(dio->submit.last_queue,
1060 dio->submit.cookie))
1061 io_schedule();
1062 }
1063 __set_current_state(TASK_RUNNING);
1064 }
1065
1066 ret = iomap_dio_complete(dio);
1067
1068 return ret;
1069
1070out_free_dio:
1071 kfree(dio);
1072 return ret;
1073}
1074EXPORT_SYMBOL_GPL(iomap_dio_rw);
1075