1
2
3
4
5
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
15#include <linux/list_sort.h>
16#include <linux/swap.h>
17#include <linux/bio.h>
18#include <linux/sched/signal.h>
19#include <linux/migrate.h>
20#include "trace.h"
21
22#include "../internal.h"
23
24#define IOEND_BATCH_SIZE 4096
25
26
27
28
29
30struct iomap_page {
31 atomic_t read_bytes_pending;
32 atomic_t write_bytes_pending;
33 spinlock_t uptodate_lock;
34 unsigned long uptodate[];
35};
36
37static inline struct iomap_page *to_iomap_page(struct folio *folio)
38{
39 if (folio_test_private(folio))
40 return folio_get_private(folio);
41 return NULL;
42}
43
44static struct bio_set iomap_ioend_bioset;
45
46static struct iomap_page *
47iomap_page_create(struct inode *inode, struct folio *folio)
48{
49 struct iomap_page *iop = to_iomap_page(folio);
50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
51
52 if (iop || nr_blocks <= 1)
53 return iop;
54
55 iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
56 GFP_NOFS | __GFP_NOFAIL);
57 spin_lock_init(&iop->uptodate_lock);
58 if (folio_test_uptodate(folio))
59 bitmap_fill(iop->uptodate, nr_blocks);
60 folio_attach_private(folio, iop);
61 return iop;
62}
63
64static void iomap_page_release(struct folio *folio)
65{
66 struct iomap_page *iop = folio_detach_private(folio);
67 struct inode *inode = folio->mapping->host;
68 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
69
70 if (!iop)
71 return;
72 WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
73 WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
74 WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
75 folio_test_uptodate(folio));
76 kfree(iop);
77}
78
79
80
81
82static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
83 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
84{
85 struct iomap_page *iop = to_iomap_page(folio);
86 loff_t orig_pos = *pos;
87 loff_t isize = i_size_read(inode);
88 unsigned block_bits = inode->i_blkbits;
89 unsigned block_size = (1 << block_bits);
90 size_t poff = offset_in_folio(folio, *pos);
91 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
92 unsigned first = poff >> block_bits;
93 unsigned last = (poff + plen - 1) >> block_bits;
94
95
96
97
98
99
100 if (iop) {
101 unsigned int i;
102
103
104 for (i = first; i <= last; i++) {
105 if (!test_bit(i, iop->uptodate))
106 break;
107 *pos += block_size;
108 poff += block_size;
109 plen -= block_size;
110 first++;
111 }
112
113
114 for ( ; i <= last; i++) {
115 if (test_bit(i, iop->uptodate)) {
116 plen -= (last - i + 1) * block_size;
117 last = i - 1;
118 break;
119 }
120 }
121 }
122
123
124
125
126
127
128 if (orig_pos <= isize && orig_pos + length > isize) {
129 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
130
131 if (first <= end && last > end)
132 plen -= (last - end) * block_size;
133 }
134
135 *offp = poff;
136 *lenp = plen;
137}
138
139static void iomap_iop_set_range_uptodate(struct folio *folio,
140 struct iomap_page *iop, size_t off, size_t len)
141{
142 struct inode *inode = folio->mapping->host;
143 unsigned first = off >> inode->i_blkbits;
144 unsigned last = (off + len - 1) >> inode->i_blkbits;
145 unsigned long flags;
146
147 spin_lock_irqsave(&iop->uptodate_lock, flags);
148 bitmap_set(iop->uptodate, first, last - first + 1);
149 if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
150 folio_mark_uptodate(folio);
151 spin_unlock_irqrestore(&iop->uptodate_lock, flags);
152}
153
154static void iomap_set_range_uptodate(struct folio *folio,
155 struct iomap_page *iop, size_t off, size_t len)
156{
157 if (folio_test_error(folio))
158 return;
159
160 if (iop)
161 iomap_iop_set_range_uptodate(folio, iop, off, len);
162 else
163 folio_mark_uptodate(folio);
164}
165
166static void iomap_finish_folio_read(struct folio *folio, size_t offset,
167 size_t len, int error)
168{
169 struct iomap_page *iop = to_iomap_page(folio);
170
171 if (unlikely(error)) {
172 folio_clear_uptodate(folio);
173 folio_set_error(folio);
174 } else {
175 iomap_set_range_uptodate(folio, iop, offset, len);
176 }
177
178 if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
179 folio_unlock(folio);
180}
181
182static void iomap_read_end_io(struct bio *bio)
183{
184 int error = blk_status_to_errno(bio->bi_status);
185 struct folio_iter fi;
186
187 bio_for_each_folio_all(fi, bio)
188 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
189 bio_put(bio);
190}
191
192struct iomap_readpage_ctx {
193 struct folio *cur_folio;
194 bool cur_folio_in_bio;
195 struct bio *bio;
196 struct readahead_control *rac;
197};
198
199
200
201
202
203
204
205
206
207
208static int iomap_read_inline_data(const struct iomap_iter *iter,
209 struct folio *folio)
210{
211 struct iomap_page *iop;
212 const struct iomap *iomap = iomap_iter_srcmap(iter);
213 size_t size = i_size_read(iter->inode) - iomap->offset;
214 size_t poff = offset_in_page(iomap->offset);
215 size_t offset = offset_in_folio(folio, iomap->offset);
216 void *addr;
217
218 if (folio_test_uptodate(folio))
219 return 0;
220
221 if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
222 return -EIO;
223 if (WARN_ON_ONCE(size > PAGE_SIZE -
224 offset_in_page(iomap->inline_data)))
225 return -EIO;
226 if (WARN_ON_ONCE(size > iomap->length))
227 return -EIO;
228 if (offset > 0)
229 iop = iomap_page_create(iter->inode, folio);
230 else
231 iop = to_iomap_page(folio);
232
233 addr = kmap_local_folio(folio, offset);
234 memcpy(addr, iomap->inline_data, size);
235 memset(addr + size, 0, PAGE_SIZE - poff - size);
236 kunmap_local(addr);
237 iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
238 return 0;
239}
240
241static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
242 loff_t pos)
243{
244 const struct iomap *srcmap = iomap_iter_srcmap(iter);
245
246 return srcmap->type != IOMAP_MAPPED ||
247 (srcmap->flags & IOMAP_F_NEW) ||
248 pos >= i_size_read(iter->inode);
249}
250
251static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
252 struct iomap_readpage_ctx *ctx, loff_t offset)
253{
254 const struct iomap *iomap = &iter->iomap;
255 loff_t pos = iter->pos + offset;
256 loff_t length = iomap_length(iter) - offset;
257 struct folio *folio = ctx->cur_folio;
258 struct iomap_page *iop;
259 loff_t orig_pos = pos;
260 size_t poff, plen;
261 sector_t sector;
262
263 if (iomap->type == IOMAP_INLINE)
264 return iomap_read_inline_data(iter, folio);
265
266
267 iop = iomap_page_create(iter->inode, folio);
268 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
269 if (plen == 0)
270 goto done;
271
272 if (iomap_block_needs_zeroing(iter, pos)) {
273 folio_zero_range(folio, poff, plen);
274 iomap_set_range_uptodate(folio, iop, poff, plen);
275 goto done;
276 }
277
278 ctx->cur_folio_in_bio = true;
279 if (iop)
280 atomic_add(plen, &iop->read_bytes_pending);
281
282 sector = iomap_sector(iomap, pos);
283 if (!ctx->bio ||
284 bio_end_sector(ctx->bio) != sector ||
285 !bio_add_folio(ctx->bio, folio, plen, poff)) {
286 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
287 gfp_t orig_gfp = gfp;
288 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
289
290 if (ctx->bio)
291 submit_bio(ctx->bio);
292
293 if (ctx->rac)
294 gfp |= __GFP_NORETRY | __GFP_NOWARN;
295 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
296 REQ_OP_READ, gfp);
297
298
299
300
301
302 if (!ctx->bio) {
303 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
304 orig_gfp);
305 }
306 if (ctx->rac)
307 ctx->bio->bi_opf |= REQ_RAHEAD;
308 ctx->bio->bi_iter.bi_sector = sector;
309 ctx->bio->bi_end_io = iomap_read_end_io;
310 bio_add_folio(ctx->bio, folio, plen, poff);
311 }
312
313done:
314
315
316
317
318
319
320 return pos - orig_pos + plen;
321}
322
323int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
324{
325 struct iomap_iter iter = {
326 .inode = folio->mapping->host,
327 .pos = folio_pos(folio),
328 .len = folio_size(folio),
329 };
330 struct iomap_readpage_ctx ctx = {
331 .cur_folio = folio,
332 };
333 int ret;
334
335 trace_iomap_readpage(iter.inode, 1);
336
337 while ((ret = iomap_iter(&iter, ops)) > 0)
338 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
339
340 if (ret < 0)
341 folio_set_error(folio);
342
343 if (ctx.bio) {
344 submit_bio(ctx.bio);
345 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
346 } else {
347 WARN_ON_ONCE(ctx.cur_folio_in_bio);
348 folio_unlock(folio);
349 }
350
351
352
353
354
355
356 return 0;
357}
358EXPORT_SYMBOL_GPL(iomap_read_folio);
359
360static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
361 struct iomap_readpage_ctx *ctx)
362{
363 loff_t length = iomap_length(iter);
364 loff_t done, ret;
365
366 for (done = 0; done < length; done += ret) {
367 if (ctx->cur_folio &&
368 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
369 if (!ctx->cur_folio_in_bio)
370 folio_unlock(ctx->cur_folio);
371 ctx->cur_folio = NULL;
372 }
373 if (!ctx->cur_folio) {
374 ctx->cur_folio = readahead_folio(ctx->rac);
375 ctx->cur_folio_in_bio = false;
376 }
377 ret = iomap_readpage_iter(iter, ctx, done);
378 if (ret <= 0)
379 return ret;
380 }
381
382 return done;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
401{
402 struct iomap_iter iter = {
403 .inode = rac->mapping->host,
404 .pos = readahead_pos(rac),
405 .len = readahead_length(rac),
406 };
407 struct iomap_readpage_ctx ctx = {
408 .rac = rac,
409 };
410
411 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
412
413 while (iomap_iter(&iter, ops) > 0)
414 iter.processed = iomap_readahead_iter(&iter, &ctx);
415
416 if (ctx.bio)
417 submit_bio(ctx.bio);
418 if (ctx.cur_folio) {
419 if (!ctx.cur_folio_in_bio)
420 folio_unlock(ctx.cur_folio);
421 }
422}
423EXPORT_SYMBOL_GPL(iomap_readahead);
424
425
426
427
428
429
430
431
432bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
433{
434 struct iomap_page *iop = to_iomap_page(folio);
435 struct inode *inode = folio->mapping->host;
436 unsigned first, last, i;
437
438 if (!iop)
439 return false;
440
441
442 count = min(folio_size(folio) - from, count);
443
444
445 first = from >> inode->i_blkbits;
446 last = (from + count - 1) >> inode->i_blkbits;
447
448 for (i = first; i <= last; i++)
449 if (!test_bit(i, iop->uptodate))
450 return false;
451 return true;
452}
453EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
454
455bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
456{
457 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
458 folio_size(folio));
459
460
461
462
463
464
465
466 if (folio_test_dirty(folio) || folio_test_writeback(folio))
467 return false;
468 iomap_page_release(folio);
469 return true;
470}
471EXPORT_SYMBOL_GPL(iomap_release_folio);
472
473void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
474{
475 trace_iomap_invalidate_folio(folio->mapping->host,
476 folio_pos(folio) + offset, len);
477
478
479
480
481
482 if (offset == 0 && len == folio_size(folio)) {
483 WARN_ON_ONCE(folio_test_writeback(folio));
484 folio_cancel_dirty(folio);
485 iomap_page_release(folio);
486 } else if (folio_test_large(folio)) {
487
488 WARN_ON_ONCE(!folio_test_uptodate(folio) &&
489 folio_test_dirty(folio));
490 iomap_page_release(folio);
491 }
492}
493EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
494
495#ifdef CONFIG_MIGRATION
496int
497iomap_migrate_page(struct address_space *mapping, struct page *newpage,
498 struct page *page, enum migrate_mode mode)
499{
500 struct folio *folio = page_folio(page);
501 struct folio *newfolio = page_folio(newpage);
502 int ret;
503
504 ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
505 if (ret != MIGRATEPAGE_SUCCESS)
506 return ret;
507
508 if (folio_test_private(folio))
509 folio_attach_private(newfolio, folio_detach_private(folio));
510
511 if (mode != MIGRATE_SYNC_NO_COPY)
512 folio_migrate_copy(newfolio, folio);
513 else
514 folio_migrate_flags(newfolio, folio);
515 return MIGRATEPAGE_SUCCESS;
516}
517EXPORT_SYMBOL_GPL(iomap_migrate_page);
518#endif
519
520static void
521iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
522{
523 loff_t i_size = i_size_read(inode);
524
525
526
527
528
529 if (pos + len > i_size)
530 truncate_pagecache_range(inode, max(pos, i_size),
531 pos + len - 1);
532}
533
534static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
535 size_t poff, size_t plen, const struct iomap *iomap)
536{
537 struct bio_vec bvec;
538 struct bio bio;
539
540 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
541 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
542 bio_add_folio(&bio, folio, plen, poff);
543 return submit_bio_wait(&bio);
544}
545
546static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
547 size_t len, struct folio *folio)
548{
549 const struct iomap *srcmap = iomap_iter_srcmap(iter);
550 struct iomap_page *iop = iomap_page_create(iter->inode, folio);
551 loff_t block_size = i_blocksize(iter->inode);
552 loff_t block_start = round_down(pos, block_size);
553 loff_t block_end = round_up(pos + len, block_size);
554 size_t from = offset_in_folio(folio, pos), to = from + len;
555 size_t poff, plen;
556
557 if (folio_test_uptodate(folio))
558 return 0;
559 folio_clear_error(folio);
560
561 do {
562 iomap_adjust_read_range(iter->inode, folio, &block_start,
563 block_end - block_start, &poff, &plen);
564 if (plen == 0)
565 break;
566
567 if (!(iter->flags & IOMAP_UNSHARE) &&
568 (from <= poff || from >= poff + plen) &&
569 (to <= poff || to >= poff + plen))
570 continue;
571
572 if (iomap_block_needs_zeroing(iter, block_start)) {
573 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
574 return -EIO;
575 folio_zero_segments(folio, poff, from, to, poff + plen);
576 } else {
577 int status = iomap_read_folio_sync(block_start, folio,
578 poff, plen, srcmap);
579 if (status)
580 return status;
581 }
582 iomap_set_range_uptodate(folio, iop, poff, plen);
583 } while ((block_start += plen) < block_end);
584
585 return 0;
586}
587
588static int iomap_write_begin_inline(const struct iomap_iter *iter,
589 struct folio *folio)
590{
591
592 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
593 return -EIO;
594 return iomap_read_inline_data(iter, folio);
595}
596
597static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
598 size_t len, struct folio **foliop)
599{
600 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
601 const struct iomap *srcmap = iomap_iter_srcmap(iter);
602 struct folio *folio;
603 unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
604 int status = 0;
605
606 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
607 if (srcmap != &iter->iomap)
608 BUG_ON(pos + len > srcmap->offset + srcmap->length);
609
610 if (fatal_signal_pending(current))
611 return -EINTR;
612
613 if (!mapping_large_folio_support(iter->inode->i_mapping))
614 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
615
616 if (page_ops && page_ops->page_prepare) {
617 status = page_ops->page_prepare(iter->inode, pos, len);
618 if (status)
619 return status;
620 }
621
622 folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
623 fgp, mapping_gfp_mask(iter->inode->i_mapping));
624 if (!folio) {
625 status = -ENOMEM;
626 goto out_no_page;
627 }
628 if (pos + len > folio_pos(folio) + folio_size(folio))
629 len = folio_pos(folio) + folio_size(folio) - pos;
630
631 if (srcmap->type == IOMAP_INLINE)
632 status = iomap_write_begin_inline(iter, folio);
633 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
634 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
635 else
636 status = __iomap_write_begin(iter, pos, len, folio);
637
638 if (unlikely(status))
639 goto out_unlock;
640
641 *foliop = folio;
642 return 0;
643
644out_unlock:
645 folio_unlock(folio);
646 folio_put(folio);
647 iomap_write_failed(iter->inode, pos, len);
648
649out_no_page:
650 if (page_ops && page_ops->page_done)
651 page_ops->page_done(iter->inode, pos, 0, NULL);
652 return status;
653}
654
655static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
656 size_t copied, struct folio *folio)
657{
658 struct iomap_page *iop = to_iomap_page(folio);
659 flush_dcache_folio(folio);
660
661
662
663
664
665
666
667
668
669
670
671
672 if (unlikely(copied < len && !folio_test_uptodate(folio)))
673 return 0;
674 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
675 filemap_dirty_folio(inode->i_mapping, folio);
676 return copied;
677}
678
679static size_t iomap_write_end_inline(const struct iomap_iter *iter,
680 struct folio *folio, loff_t pos, size_t copied)
681{
682 const struct iomap *iomap = &iter->iomap;
683 void *addr;
684
685 WARN_ON_ONCE(!folio_test_uptodate(folio));
686 BUG_ON(!iomap_inline_data_valid(iomap));
687
688 flush_dcache_folio(folio);
689 addr = kmap_local_folio(folio, pos);
690 memcpy(iomap_inline_data(iomap, pos), addr, copied);
691 kunmap_local(addr);
692
693 mark_inode_dirty(iter->inode);
694 return copied;
695}
696
697
698static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
699 size_t copied, struct folio *folio)
700{
701 const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
702 const struct iomap *srcmap = iomap_iter_srcmap(iter);
703 loff_t old_size = iter->inode->i_size;
704 size_t ret;
705
706 if (srcmap->type == IOMAP_INLINE) {
707 ret = iomap_write_end_inline(iter, folio, pos, copied);
708 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
709 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
710 copied, &folio->page, NULL);
711 } else {
712 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
713 }
714
715
716
717
718
719
720 if (pos + ret > old_size) {
721 i_size_write(iter->inode, pos + ret);
722 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
723 }
724 folio_unlock(folio);
725
726 if (old_size < pos)
727 pagecache_isize_extended(iter->inode, old_size, pos);
728 if (page_ops && page_ops->page_done)
729 page_ops->page_done(iter->inode, pos, ret, &folio->page);
730 folio_put(folio);
731
732 if (ret < len)
733 iomap_write_failed(iter->inode, pos + ret, len - ret);
734 return ret;
735}
736
737static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
738{
739 loff_t length = iomap_length(iter);
740 loff_t pos = iter->pos;
741 ssize_t written = 0;
742 long status = 0;
743
744 do {
745 struct folio *folio;
746 struct page *page;
747 unsigned long offset;
748 unsigned long bytes;
749 size_t copied;
750
751 offset = offset_in_page(pos);
752 bytes = min_t(unsigned long, PAGE_SIZE - offset,
753 iov_iter_count(i));
754again:
755 if (bytes > length)
756 bytes = length;
757
758
759
760
761
762
763
764 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
765 status = -EFAULT;
766 break;
767 }
768
769 status = iomap_write_begin(iter, pos, bytes, &folio);
770 if (unlikely(status))
771 break;
772
773 page = folio_file_page(folio, pos >> PAGE_SHIFT);
774 if (mapping_writably_mapped(iter->inode->i_mapping))
775 flush_dcache_page(page);
776
777 copied = copy_page_from_iter_atomic(page, offset, bytes, i);
778
779 status = iomap_write_end(iter, pos, bytes, copied, folio);
780
781 if (unlikely(copied != status))
782 iov_iter_revert(i, copied - status);
783
784 cond_resched();
785 if (unlikely(status == 0)) {
786
787
788
789
790
791
792 if (copied)
793 bytes = copied;
794 goto again;
795 }
796 pos += status;
797 written += status;
798 length -= status;
799
800 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
801 } while (iov_iter_count(i) && length);
802
803 return written ? written : status;
804}
805
806ssize_t
807iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
808 const struct iomap_ops *ops)
809{
810 struct iomap_iter iter = {
811 .inode = iocb->ki_filp->f_mapping->host,
812 .pos = iocb->ki_pos,
813 .len = iov_iter_count(i),
814 .flags = IOMAP_WRITE,
815 };
816 int ret;
817
818 while ((ret = iomap_iter(&iter, ops)) > 0)
819 iter.processed = iomap_write_iter(&iter, i);
820 if (iter.pos == iocb->ki_pos)
821 return ret;
822 return iter.pos - iocb->ki_pos;
823}
824EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
825
826static loff_t iomap_unshare_iter(struct iomap_iter *iter)
827{
828 struct iomap *iomap = &iter->iomap;
829 const struct iomap *srcmap = iomap_iter_srcmap(iter);
830 loff_t pos = iter->pos;
831 loff_t length = iomap_length(iter);
832 long status = 0;
833 loff_t written = 0;
834
835
836 if (!(iomap->flags & IOMAP_F_SHARED))
837 return length;
838
839 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
840 return length;
841
842 do {
843 unsigned long offset = offset_in_page(pos);
844 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
845 struct folio *folio;
846
847 status = iomap_write_begin(iter, pos, bytes, &folio);
848 if (unlikely(status))
849 return status;
850
851 status = iomap_write_end(iter, pos, bytes, bytes, folio);
852 if (WARN_ON_ONCE(status == 0))
853 return -EIO;
854
855 cond_resched();
856
857 pos += status;
858 written += status;
859 length -= status;
860
861 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
862 } while (length);
863
864 return written;
865}
866
867int
868iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
869 const struct iomap_ops *ops)
870{
871 struct iomap_iter iter = {
872 .inode = inode,
873 .pos = pos,
874 .len = len,
875 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
876 };
877 int ret;
878
879 while ((ret = iomap_iter(&iter, ops)) > 0)
880 iter.processed = iomap_unshare_iter(&iter);
881 return ret;
882}
883EXPORT_SYMBOL_GPL(iomap_file_unshare);
884
885static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
886{
887 const struct iomap *srcmap = iomap_iter_srcmap(iter);
888 loff_t pos = iter->pos;
889 loff_t length = iomap_length(iter);
890 loff_t written = 0;
891
892
893 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
894 return length;
895
896 do {
897 struct folio *folio;
898 int status;
899 size_t offset;
900 size_t bytes = min_t(u64, SIZE_MAX, length);
901
902 status = iomap_write_begin(iter, pos, bytes, &folio);
903 if (status)
904 return status;
905
906 offset = offset_in_folio(folio, pos);
907 if (bytes > folio_size(folio) - offset)
908 bytes = folio_size(folio) - offset;
909
910 folio_zero_range(folio, offset, bytes);
911 folio_mark_accessed(folio);
912
913 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
914 if (WARN_ON_ONCE(bytes == 0))
915 return -EIO;
916
917 pos += bytes;
918 length -= bytes;
919 written += bytes;
920 if (did_zero)
921 *did_zero = true;
922 } while (length > 0);
923
924 return written;
925}
926
927int
928iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
929 const struct iomap_ops *ops)
930{
931 struct iomap_iter iter = {
932 .inode = inode,
933 .pos = pos,
934 .len = len,
935 .flags = IOMAP_ZERO,
936 };
937 int ret;
938
939 while ((ret = iomap_iter(&iter, ops)) > 0)
940 iter.processed = iomap_zero_iter(&iter, did_zero);
941 return ret;
942}
943EXPORT_SYMBOL_GPL(iomap_zero_range);
944
945int
946iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
947 const struct iomap_ops *ops)
948{
949 unsigned int blocksize = i_blocksize(inode);
950 unsigned int off = pos & (blocksize - 1);
951
952
953 if (!off)
954 return 0;
955 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
956}
957EXPORT_SYMBOL_GPL(iomap_truncate_page);
958
959static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
960 struct folio *folio)
961{
962 loff_t length = iomap_length(iter);
963 int ret;
964
965 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
966 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
967 &iter->iomap);
968 if (ret)
969 return ret;
970 block_commit_write(&folio->page, 0, length);
971 } else {
972 WARN_ON_ONCE(!folio_test_uptodate(folio));
973 folio_mark_dirty(folio);
974 }
975
976 return length;
977}
978
979vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
980{
981 struct iomap_iter iter = {
982 .inode = file_inode(vmf->vma->vm_file),
983 .flags = IOMAP_WRITE | IOMAP_FAULT,
984 };
985 struct folio *folio = page_folio(vmf->page);
986 ssize_t ret;
987
988 folio_lock(folio);
989 ret = folio_mkwrite_check_truncate(folio, iter.inode);
990 if (ret < 0)
991 goto out_unlock;
992 iter.pos = folio_pos(folio);
993 iter.len = ret;
994 while ((ret = iomap_iter(&iter, ops)) > 0)
995 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
996
997 if (ret < 0)
998 goto out_unlock;
999 folio_wait_stable(folio);
1000 return VM_FAULT_LOCKED;
1001out_unlock:
1002 folio_unlock(folio);
1003 return block_page_mkwrite_return(ret);
1004}
1005EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1006
1007static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1008 size_t len, int error)
1009{
1010 struct iomap_page *iop = to_iomap_page(folio);
1011
1012 if (error) {
1013 folio_set_error(folio);
1014 mapping_set_error(inode->i_mapping, error);
1015 }
1016
1017 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
1018 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
1019
1020 if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
1021 folio_end_writeback(folio);
1022}
1023
1024
1025
1026
1027
1028
1029static u32
1030iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1031{
1032 struct inode *inode = ioend->io_inode;
1033 struct bio *bio = &ioend->io_inline_bio;
1034 struct bio *last = ioend->io_bio, *next;
1035 u64 start = bio->bi_iter.bi_sector;
1036 loff_t offset = ioend->io_offset;
1037 bool quiet = bio_flagged(bio, BIO_QUIET);
1038 u32 folio_count = 0;
1039
1040 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1041 struct folio_iter fi;
1042
1043
1044
1045
1046
1047 if (bio == last)
1048 next = NULL;
1049 else
1050 next = bio->bi_private;
1051
1052
1053 bio_for_each_folio_all(fi, bio) {
1054 iomap_finish_folio_write(inode, fi.folio, fi.length,
1055 error);
1056 folio_count++;
1057 }
1058 bio_put(bio);
1059 }
1060
1061
1062 if (unlikely(error && !quiet)) {
1063 printk_ratelimited(KERN_ERR
1064"%s: writeback error on inode %lu, offset %lld, sector %llu",
1065 inode->i_sb->s_id, inode->i_ino, offset, start);
1066 }
1067 return folio_count;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078void
1079iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1080{
1081 struct list_head tmp;
1082 u32 completions;
1083
1084 might_sleep();
1085
1086 list_replace_init(&ioend->io_list, &tmp);
1087 completions = iomap_finish_ioend(ioend, error);
1088
1089 while (!list_empty(&tmp)) {
1090 if (completions > IOEND_BATCH_SIZE * 8) {
1091 cond_resched();
1092 completions = 0;
1093 }
1094 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1095 list_del_init(&ioend->io_list);
1096 completions += iomap_finish_ioend(ioend, error);
1097 }
1098}
1099EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1100
1101
1102
1103
1104static bool
1105iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1106{
1107 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1108 return false;
1109 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1110 (next->io_flags & IOMAP_F_SHARED))
1111 return false;
1112 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1113 (next->io_type == IOMAP_UNWRITTEN))
1114 return false;
1115 if (ioend->io_offset + ioend->io_size != next->io_offset)
1116 return false;
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1128 return false;
1129 return true;
1130}
1131
1132void
1133iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1134{
1135 struct iomap_ioend *next;
1136
1137 INIT_LIST_HEAD(&ioend->io_list);
1138
1139 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1140 io_list))) {
1141 if (!iomap_ioend_can_merge(ioend, next))
1142 break;
1143 list_move_tail(&next->io_list, &ioend->io_list);
1144 ioend->io_size += next->io_size;
1145 }
1146}
1147EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1148
1149static int
1150iomap_ioend_compare(void *priv, const struct list_head *a,
1151 const struct list_head *b)
1152{
1153 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1154 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1155
1156 if (ia->io_offset < ib->io_offset)
1157 return -1;
1158 if (ia->io_offset > ib->io_offset)
1159 return 1;
1160 return 0;
1161}
1162
1163void
1164iomap_sort_ioends(struct list_head *ioend_list)
1165{
1166 list_sort(NULL, ioend_list, iomap_ioend_compare);
1167}
1168EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1169
1170static void iomap_writepage_end_bio(struct bio *bio)
1171{
1172 struct iomap_ioend *ioend = bio->bi_private;
1173
1174 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static int
1186iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1187 int error)
1188{
1189 ioend->io_bio->bi_private = ioend;
1190 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1191
1192 if (wpc->ops->prepare_ioend)
1193 error = wpc->ops->prepare_ioend(ioend, error);
1194 if (error) {
1195
1196
1197
1198
1199
1200
1201 ioend->io_bio->bi_status = errno_to_blk_status(error);
1202 bio_endio(ioend->io_bio);
1203 return error;
1204 }
1205
1206 submit_bio(ioend->io_bio);
1207 return 0;
1208}
1209
1210static struct iomap_ioend *
1211iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1212 loff_t offset, sector_t sector, struct writeback_control *wbc)
1213{
1214 struct iomap_ioend *ioend;
1215 struct bio *bio;
1216
1217 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1218 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1219 GFP_NOFS, &iomap_ioend_bioset);
1220 bio->bi_iter.bi_sector = sector;
1221 wbc_init_bio(wbc, bio);
1222
1223 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1224 INIT_LIST_HEAD(&ioend->io_list);
1225 ioend->io_type = wpc->iomap.type;
1226 ioend->io_flags = wpc->iomap.flags;
1227 ioend->io_inode = inode;
1228 ioend->io_size = 0;
1229 ioend->io_folios = 0;
1230 ioend->io_offset = offset;
1231 ioend->io_bio = bio;
1232 ioend->io_sector = sector;
1233 return ioend;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243static struct bio *
1244iomap_chain_bio(struct bio *prev)
1245{
1246 struct bio *new;
1247
1248 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1249 bio_clone_blkg_association(new, prev);
1250 new->bi_iter.bi_sector = bio_end_sector(prev);
1251
1252 bio_chain(prev, new);
1253 bio_get(prev);
1254 submit_bio(prev);
1255 return new;
1256}
1257
1258static bool
1259iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1260 sector_t sector)
1261{
1262 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1263 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1264 return false;
1265 if (wpc->iomap.type != wpc->ioend->io_type)
1266 return false;
1267 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1268 return false;
1269 if (sector != bio_end_sector(wpc->ioend->io_bio))
1270 return false;
1271
1272
1273
1274
1275
1276 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1277 return false;
1278 return true;
1279}
1280
1281
1282
1283
1284
1285static void
1286iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1287 struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
1288 struct writeback_control *wbc, struct list_head *iolist)
1289{
1290 sector_t sector = iomap_sector(&wpc->iomap, pos);
1291 unsigned len = i_blocksize(inode);
1292 size_t poff = offset_in_folio(folio, pos);
1293
1294 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1295 if (wpc->ioend)
1296 list_add(&wpc->ioend->io_list, iolist);
1297 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1298 }
1299
1300 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1301 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1302 bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
1303 }
1304
1305 if (iop)
1306 atomic_add(len, &iop->write_bytes_pending);
1307 wpc->ioend->io_size += len;
1308 wbc_account_cgroup_owner(wbc, &folio->page, len);
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static int
1328iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1329 struct writeback_control *wbc, struct inode *inode,
1330 struct folio *folio, u64 end_pos)
1331{
1332 struct iomap_page *iop = iomap_page_create(inode, folio);
1333 struct iomap_ioend *ioend, *next;
1334 unsigned len = i_blocksize(inode);
1335 unsigned nblocks = i_blocks_per_folio(inode, folio);
1336 u64 pos = folio_pos(folio);
1337 int error = 0, count = 0, i;
1338 LIST_HEAD(submit_list);
1339
1340 WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
1341
1342
1343
1344
1345
1346
1347 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1348 if (iop && !test_bit(i, iop->uptodate))
1349 continue;
1350
1351 error = wpc->ops->map_blocks(wpc, inode, pos);
1352 if (error)
1353 break;
1354 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1355 continue;
1356 if (wpc->iomap.type == IOMAP_HOLE)
1357 continue;
1358 iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
1359 &submit_list);
1360 count++;
1361 }
1362 if (count)
1363 wpc->ioend->io_folios++;
1364
1365 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1366 WARN_ON_ONCE(!folio_test_locked(folio));
1367 WARN_ON_ONCE(folio_test_writeback(folio));
1368 WARN_ON_ONCE(folio_test_dirty(folio));
1369
1370
1371
1372
1373
1374
1375
1376 if (unlikely(error)) {
1377
1378
1379
1380
1381
1382
1383 if (wpc->ops->discard_folio)
1384 wpc->ops->discard_folio(folio, pos);
1385 if (!count) {
1386 folio_unlock(folio);
1387 goto done;
1388 }
1389 }
1390
1391 folio_start_writeback(folio);
1392 folio_unlock(folio);
1393
1394
1395
1396
1397
1398
1399 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1400 int error2;
1401
1402 list_del_init(&ioend->io_list);
1403 error2 = iomap_submit_ioend(wpc, ioend, error);
1404 if (error2 && !error)
1405 error = error2;
1406 }
1407
1408
1409
1410
1411
1412 if (!count)
1413 folio_end_writeback(folio);
1414done:
1415 mapping_set_error(folio->mapping, error);
1416 return error;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426static int
1427iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
1428{
1429 struct folio *folio = page_folio(page);
1430 struct iomap_writepage_ctx *wpc = data;
1431 struct inode *inode = folio->mapping->host;
1432 u64 end_pos, isize;
1433
1434 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1447 PF_MEMALLOC))
1448 goto redirty;
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463 isize = i_size_read(inode);
1464 end_pos = folio_pos(folio) + folio_size(folio);
1465 if (end_pos > isize) {
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 size_t poff = offset_in_folio(folio, isize);
1478 pgoff_t end_index = isize >> PAGE_SHIFT;
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 if (folio->index > end_index ||
1498 (folio->index == end_index && poff == 0))
1499 goto redirty;
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509 folio_zero_segment(folio, poff, folio_size(folio));
1510 end_pos = isize;
1511 }
1512
1513 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1514
1515redirty:
1516 folio_redirty_for_writepage(wbc, folio);
1517 folio_unlock(folio);
1518 return 0;
1519}
1520
1521int
1522iomap_writepage(struct page *page, struct writeback_control *wbc,
1523 struct iomap_writepage_ctx *wpc,
1524 const struct iomap_writeback_ops *ops)
1525{
1526 int ret;
1527
1528 wpc->ops = ops;
1529 ret = iomap_do_writepage(page, wbc, wpc);
1530 if (!wpc->ioend)
1531 return ret;
1532 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1533}
1534EXPORT_SYMBOL_GPL(iomap_writepage);
1535
1536int
1537iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1538 struct iomap_writepage_ctx *wpc,
1539 const struct iomap_writeback_ops *ops)
1540{
1541 int ret;
1542
1543 wpc->ops = ops;
1544 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1545 if (!wpc->ioend)
1546 return ret;
1547 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1548}
1549EXPORT_SYMBOL_GPL(iomap_writepages);
1550
1551static int __init iomap_init(void)
1552{
1553 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1554 offsetof(struct iomap_ioend, io_inline_bio),
1555 BIOSET_NEED_BVECS);
1556}
1557fs_initcall(iomap_init);
1558