1
2
3
4
5
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/backing-dev.h>
11#include <linux/uio.h>
12#include <linux/task_io_accounting_ops.h>
13
14#include "../internal.h"
15
16
17
18
19
20#define IOMAP_DIO_WRITE_FUA (1 << 28)
21#define IOMAP_DIO_NEED_SYNC (1 << 29)
22#define IOMAP_DIO_WRITE (1 << 30)
23#define IOMAP_DIO_DIRTY (1 << 31)
24
25struct iomap_dio {
26 struct kiocb *iocb;
27 const struct iomap_dio_ops *dops;
28 loff_t i_size;
29 loff_t size;
30 atomic_t ref;
31 unsigned flags;
32 int error;
33 bool wait_for_completion;
34
35 union {
36
37 struct {
38 struct iov_iter *iter;
39 struct task_struct *waiter;
40 struct request_queue *last_queue;
41 blk_qc_t cookie;
42 } submit;
43
44
45 struct {
46 struct work_struct work;
47 } aio;
48 };
49};
50
51int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
52{
53 struct request_queue *q = READ_ONCE(kiocb->private);
54
55 if (!q)
56 return 0;
57 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
58}
59EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
60
61static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
62 struct bio *bio, loff_t pos)
63{
64 atomic_inc(&dio->ref);
65
66 if (dio->iocb->ki_flags & IOCB_HIPRI)
67 bio_set_polled(bio, dio->iocb);
68
69 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
70 if (dio->dops && dio->dops->submit_io)
71 dio->submit.cookie = dio->dops->submit_io(
72 file_inode(dio->iocb->ki_filp),
73 iomap, bio, pos);
74 else
75 dio->submit.cookie = submit_bio(bio);
76}
77
78static ssize_t iomap_dio_complete(struct iomap_dio *dio)
79{
80 const struct iomap_dio_ops *dops = dio->dops;
81 struct kiocb *iocb = dio->iocb;
82 struct inode *inode = file_inode(iocb->ki_filp);
83 loff_t offset = iocb->ki_pos;
84 ssize_t ret = dio->error;
85
86 if (dops && dops->end_io)
87 ret = dops->end_io(iocb, dio->size, ret, dio->flags);
88
89 if (likely(!ret)) {
90 ret = dio->size;
91
92 if (offset + ret > dio->i_size &&
93 !(dio->flags & IOMAP_DIO_WRITE))
94 ret = dio->i_size - offset;
95 iocb->ki_pos += ret;
96 }
97
98
99
100
101
102
103
104
105
106
107
108
109
110 if (!dio->error &&
111 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
112 int err;
113 err = invalidate_inode_pages2_range(inode->i_mapping,
114 offset >> PAGE_SHIFT,
115 (offset + dio->size - 1) >> PAGE_SHIFT);
116 if (err)
117 dio_warn_stale_pagecache(iocb->ki_filp);
118 }
119
120
121
122
123
124 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
125 ret = generic_write_sync(iocb, ret);
126
127 inode_dio_end(file_inode(iocb->ki_filp));
128 kfree(dio);
129
130 return ret;
131}
132
133static void iomap_dio_complete_work(struct work_struct *work)
134{
135 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
136 struct kiocb *iocb = dio->iocb;
137
138 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
139}
140
141
142
143
144
145
146static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
147{
148 cmpxchg(&dio->error, 0, ret);
149}
150
151static void iomap_dio_bio_end_io(struct bio *bio)
152{
153 struct iomap_dio *dio = bio->bi_private;
154 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
155
156 if (bio->bi_status)
157 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
158
159 if (atomic_dec_and_test(&dio->ref)) {
160 if (dio->wait_for_completion) {
161 struct task_struct *waiter = dio->submit.waiter;
162 WRITE_ONCE(dio->submit.waiter, NULL);
163 blk_wake_io_task(waiter);
164 } else if (dio->flags & IOMAP_DIO_WRITE) {
165 struct inode *inode = file_inode(dio->iocb->ki_filp);
166
167 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
168 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
169 } else {
170 iomap_dio_complete_work(&dio->aio.work);
171 }
172 }
173
174 if (should_dirty) {
175 bio_check_pages_dirty(bio);
176 } else {
177 bio_release_pages(bio, false);
178 bio_put(bio);
179 }
180}
181
182static void
183iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
184 unsigned len)
185{
186 struct page *page = ZERO_PAGE(0);
187 int flags = REQ_SYNC | REQ_IDLE;
188 struct bio *bio;
189
190 bio = bio_alloc(GFP_KERNEL, 1);
191 bio_set_dev(bio, iomap->bdev);
192 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
193 bio->bi_private = dio;
194 bio->bi_end_io = iomap_dio_bio_end_io;
195
196 get_page(page);
197 __bio_add_page(bio, page, len, 0);
198 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
199 iomap_dio_submit_bio(dio, iomap, bio, pos);
200}
201
202static loff_t
203iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
204 struct iomap_dio *dio, struct iomap *iomap)
205{
206 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
207 unsigned int fs_block_size = i_blocksize(inode), pad;
208 unsigned int align = iov_iter_alignment(dio->submit.iter);
209 struct bio *bio;
210 bool need_zeroout = false;
211 bool use_fua = false;
212 int nr_pages, ret = 0;
213 size_t copied = 0;
214 size_t orig_count;
215
216 if ((pos | length | align) & ((1 << blkbits) - 1))
217 return -EINVAL;
218
219 if (iomap->type == IOMAP_UNWRITTEN) {
220 dio->flags |= IOMAP_DIO_UNWRITTEN;
221 need_zeroout = true;
222 }
223
224 if (iomap->flags & IOMAP_F_SHARED)
225 dio->flags |= IOMAP_DIO_COW;
226
227 if (iomap->flags & IOMAP_F_NEW) {
228 need_zeroout = true;
229 } else if (iomap->type == IOMAP_MAPPED) {
230
231
232
233
234
235
236
237 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
238 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
239 blk_queue_fua(bdev_get_queue(iomap->bdev)))
240 use_fua = true;
241 }
242
243
244
245
246
247
248 orig_count = iov_iter_count(dio->submit.iter);
249 iov_iter_truncate(dio->submit.iter, length);
250
251 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
252 if (nr_pages <= 0) {
253 ret = nr_pages;
254 goto out;
255 }
256
257 if (need_zeroout) {
258
259 pad = pos & (fs_block_size - 1);
260 if (pad)
261 iomap_dio_zero(dio, iomap, pos - pad, pad);
262 }
263
264 do {
265 size_t n;
266 if (dio->error) {
267 iov_iter_revert(dio->submit.iter, copied);
268 copied = ret = 0;
269 goto out;
270 }
271
272 bio = bio_alloc(GFP_KERNEL, nr_pages);
273 bio_set_dev(bio, iomap->bdev);
274 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
275 bio->bi_write_hint = dio->iocb->ki_hint;
276 bio->bi_ioprio = dio->iocb->ki_ioprio;
277 bio->bi_private = dio;
278 bio->bi_end_io = iomap_dio_bio_end_io;
279
280 ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
281 if (unlikely(ret)) {
282
283
284
285
286
287
288 bio_put(bio);
289 goto zero_tail;
290 }
291
292 n = bio->bi_iter.bi_size;
293 if (dio->flags & IOMAP_DIO_WRITE) {
294 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
295 if (use_fua)
296 bio->bi_opf |= REQ_FUA;
297 else
298 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
299 task_io_account_write(n);
300 } else {
301 bio->bi_opf = REQ_OP_READ;
302 if (dio->flags & IOMAP_DIO_DIRTY)
303 bio_set_pages_dirty(bio);
304 }
305
306 dio->size += n;
307 copied += n;
308
309 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
310 iomap_dio_submit_bio(dio, iomap, bio, pos);
311 pos += n;
312 } while (nr_pages);
313
314
315
316
317
318
319
320zero_tail:
321 if (need_zeroout ||
322 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
323
324 pad = pos & (fs_block_size - 1);
325 if (pad)
326 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
327 }
328out:
329
330 iov_iter_reexpand(dio->submit.iter, orig_count - copied);
331 if (copied)
332 return copied;
333 return ret;
334}
335
336static loff_t
337iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
338{
339 length = iov_iter_zero(length, dio->submit.iter);
340 dio->size += length;
341 return length;
342}
343
344static loff_t
345iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
346 struct iomap_dio *dio, struct iomap *iomap)
347{
348 struct iov_iter *iter = dio->submit.iter;
349 size_t copied;
350
351 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
352
353 if (dio->flags & IOMAP_DIO_WRITE) {
354 loff_t size = inode->i_size;
355
356 if (pos > size)
357 memset(iomap->inline_data + size, 0, pos - size);
358 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
359 if (copied) {
360 if (pos + copied > size)
361 i_size_write(inode, pos + copied);
362 mark_inode_dirty(inode);
363 }
364 } else {
365 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
366 }
367 dio->size += copied;
368 return copied;
369}
370
371static loff_t
372iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
373 void *data, struct iomap *iomap, struct iomap *srcmap)
374{
375 struct iomap_dio *dio = data;
376
377 switch (iomap->type) {
378 case IOMAP_HOLE:
379 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
380 return -EIO;
381 return iomap_dio_hole_actor(length, dio);
382 case IOMAP_UNWRITTEN:
383 if (!(dio->flags & IOMAP_DIO_WRITE))
384 return iomap_dio_hole_actor(length, dio);
385 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
386 case IOMAP_MAPPED:
387 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
388 case IOMAP_INLINE:
389 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
390 default:
391 WARN_ON_ONCE(1);
392 return -EIO;
393 }
394}
395
396
397
398
399
400
401
402
403
404
405ssize_t
406iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
407 const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
408 bool wait_for_completion)
409{
410 struct address_space *mapping = iocb->ki_filp->f_mapping;
411 struct inode *inode = file_inode(iocb->ki_filp);
412 size_t count = iov_iter_count(iter);
413 loff_t pos = iocb->ki_pos;
414 loff_t end = iocb->ki_pos + count - 1, ret = 0;
415 unsigned int flags = IOMAP_DIRECT;
416 struct blk_plug plug;
417 struct iomap_dio *dio;
418
419 if (!count)
420 return 0;
421
422 if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
423 return -EIO;
424
425 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
426 if (!dio)
427 return -ENOMEM;
428
429 dio->iocb = iocb;
430 atomic_set(&dio->ref, 1);
431 dio->size = 0;
432 dio->i_size = i_size_read(inode);
433 dio->dops = dops;
434 dio->error = 0;
435 dio->flags = 0;
436
437 dio->submit.iter = iter;
438 dio->submit.waiter = current;
439 dio->submit.cookie = BLK_QC_T_NONE;
440 dio->submit.last_queue = NULL;
441
442 if (iov_iter_rw(iter) == READ) {
443 if (pos >= dio->i_size)
444 goto out_free_dio;
445
446 if (iter_is_iovec(iter))
447 dio->flags |= IOMAP_DIO_DIRTY;
448 } else {
449 flags |= IOMAP_WRITE;
450 dio->flags |= IOMAP_DIO_WRITE;
451
452
453 if (iocb->ki_flags & IOCB_DSYNC)
454 dio->flags |= IOMAP_DIO_NEED_SYNC;
455
456
457
458
459
460
461
462 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
463 dio->flags |= IOMAP_DIO_WRITE_FUA;
464 }
465
466 if (iocb->ki_flags & IOCB_NOWAIT) {
467 if (filemap_range_has_page(mapping, pos, end)) {
468 ret = -EAGAIN;
469 goto out_free_dio;
470 }
471 flags |= IOMAP_NOWAIT;
472 }
473
474 ret = filemap_write_and_wait_range(mapping, pos, end);
475 if (ret)
476 goto out_free_dio;
477
478
479
480
481
482
483
484 ret = invalidate_inode_pages2_range(mapping,
485 pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
486 if (ret)
487 dio_warn_stale_pagecache(iocb->ki_filp);
488 ret = 0;
489
490 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
491 !inode->i_sb->s_dio_done_wq) {
492 ret = sb_init_dio_done_wq(inode->i_sb);
493 if (ret < 0)
494 goto out_free_dio;
495 }
496
497 inode_dio_begin(inode);
498
499 blk_start_plug(&plug);
500 do {
501 ret = iomap_apply(inode, pos, count, flags, ops, dio,
502 iomap_dio_actor);
503 if (ret <= 0) {
504
505 if (ret == -ENOTBLK) {
506 wait_for_completion = true;
507 ret = 0;
508 }
509 break;
510 }
511 pos += ret;
512
513 if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
514
515
516
517
518
519 iov_iter_revert(iter, pos - dio->i_size);
520 break;
521 }
522 } while ((count = iov_iter_count(iter)) > 0);
523 blk_finish_plug(&plug);
524
525 if (ret < 0)
526 iomap_dio_set_error(dio, ret);
527
528
529
530
531
532 if (dio->flags & IOMAP_DIO_WRITE_FUA)
533 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
534
535 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
536 WRITE_ONCE(iocb->private, dio->submit.last_queue);
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553 dio->wait_for_completion = wait_for_completion;
554 if (!atomic_dec_and_test(&dio->ref)) {
555 if (!wait_for_completion)
556 return -EIOCBQUEUED;
557
558 for (;;) {
559 set_current_state(TASK_UNINTERRUPTIBLE);
560 if (!READ_ONCE(dio->submit.waiter))
561 break;
562
563 if (!(iocb->ki_flags & IOCB_HIPRI) ||
564 !dio->submit.last_queue ||
565 !blk_poll(dio->submit.last_queue,
566 dio->submit.cookie, true))
567 blk_io_schedule();
568 }
569 __set_current_state(TASK_RUNNING);
570 }
571
572 return iomap_dio_complete(dio);
573
574out_free_dio:
575 kfree(dio);
576 return ret;
577}
578EXPORT_SYMBOL_GPL(iomap_dio_rw);
579