1
2
3
4
5
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/backing-dev.h>
11#include <linux/uio.h>
12#include <linux/task_io_accounting_ops.h>
13
14#include "../internal.h"
15
16
17
18
19
20#define IOMAP_DIO_WRITE_FUA (1 << 28)
21#define IOMAP_DIO_NEED_SYNC (1 << 29)
22#define IOMAP_DIO_WRITE (1 << 30)
23#define IOMAP_DIO_DIRTY (1 << 31)
24
25struct iomap_dio {
26 struct kiocb *iocb;
27 iomap_dio_end_io_t *end_io;
28 loff_t i_size;
29 loff_t size;
30 atomic_t ref;
31 unsigned flags;
32 int error;
33 bool wait_for_completion;
34
35 union {
36
37 struct {
38 struct iov_iter *iter;
39 struct task_struct *waiter;
40 struct request_queue *last_queue;
41 blk_qc_t cookie;
42 } submit;
43
44
45 struct {
46 struct work_struct work;
47 } aio;
48 };
49};
50
51int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
52{
53 struct request_queue *q = READ_ONCE(kiocb->private);
54
55 if (!q)
56 return 0;
57 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
58}
59EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
60
61static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
62 struct bio *bio)
63{
64 atomic_inc(&dio->ref);
65
66 if (dio->iocb->ki_flags & IOCB_HIPRI)
67 bio_set_polled(bio, dio->iocb);
68
69 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
70 dio->submit.cookie = submit_bio(bio);
71}
72
73static ssize_t iomap_dio_complete(struct iomap_dio *dio)
74{
75 struct kiocb *iocb = dio->iocb;
76 struct inode *inode = file_inode(iocb->ki_filp);
77 loff_t offset = iocb->ki_pos;
78 ssize_t ret;
79
80 if (dio->end_io) {
81 ret = dio->end_io(iocb,
82 dio->error ? dio->error : dio->size,
83 dio->flags);
84 } else {
85 ret = dio->error;
86 }
87
88 if (likely(!ret)) {
89 ret = dio->size;
90
91 if (offset + ret > dio->i_size &&
92 !(dio->flags & IOMAP_DIO_WRITE))
93 ret = dio->i_size - offset;
94 iocb->ki_pos += ret;
95 }
96
97
98
99
100
101
102
103
104
105
106
107
108
109 if (!dio->error &&
110 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
111 int err;
112 err = invalidate_inode_pages2_range(inode->i_mapping,
113 offset >> PAGE_SHIFT,
114 (offset + dio->size - 1) >> PAGE_SHIFT);
115 if (err)
116 dio_warn_stale_pagecache(iocb->ki_filp);
117 }
118
119
120
121
122
123 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
124 ret = generic_write_sync(iocb, ret);
125
126 inode_dio_end(file_inode(iocb->ki_filp));
127 kfree(dio);
128
129 return ret;
130}
131
132static void iomap_dio_complete_work(struct work_struct *work)
133{
134 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
135 struct kiocb *iocb = dio->iocb;
136
137 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
138}
139
140
141
142
143
144
145static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
146{
147 cmpxchg(&dio->error, 0, ret);
148}
149
150static void iomap_dio_bio_end_io(struct bio *bio)
151{
152 struct iomap_dio *dio = bio->bi_private;
153 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
154
155 if (bio->bi_status)
156 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
157
158 if (atomic_dec_and_test(&dio->ref)) {
159 if (dio->wait_for_completion) {
160 struct task_struct *waiter = dio->submit.waiter;
161 WRITE_ONCE(dio->submit.waiter, NULL);
162 blk_wake_io_task(waiter);
163 } else if (dio->flags & IOMAP_DIO_WRITE) {
164 struct inode *inode = file_inode(dio->iocb->ki_filp);
165
166 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
167 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
168 } else {
169 iomap_dio_complete_work(&dio->aio.work);
170 }
171 }
172
173 if (should_dirty) {
174 bio_check_pages_dirty(bio);
175 } else {
176 bio_release_pages(bio, false);
177 bio_put(bio);
178 }
179}
180
181static void
182iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
183 unsigned len)
184{
185 struct page *page = ZERO_PAGE(0);
186 int flags = REQ_SYNC | REQ_IDLE;
187 struct bio *bio;
188
189 bio = bio_alloc(GFP_KERNEL, 1);
190 bio_set_dev(bio, iomap->bdev);
191 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
192 bio->bi_private = dio;
193 bio->bi_end_io = iomap_dio_bio_end_io;
194
195 get_page(page);
196 __bio_add_page(bio, page, len, 0);
197 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
198 iomap_dio_submit_bio(dio, iomap, bio);
199}
200
201static loff_t
202iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
203 struct iomap_dio *dio, struct iomap *iomap)
204{
205 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
206 unsigned int fs_block_size = i_blocksize(inode), pad;
207 unsigned int align = iov_iter_alignment(dio->submit.iter);
208 struct iov_iter iter;
209 struct bio *bio;
210 bool need_zeroout = false;
211 bool use_fua = false;
212 int nr_pages, ret = 0;
213 size_t copied = 0;
214
215 if ((pos | length | align) & ((1 << blkbits) - 1))
216 return -EINVAL;
217
218 if (iomap->type == IOMAP_UNWRITTEN) {
219 dio->flags |= IOMAP_DIO_UNWRITTEN;
220 need_zeroout = true;
221 }
222
223 if (iomap->flags & IOMAP_F_SHARED)
224 dio->flags |= IOMAP_DIO_COW;
225
226 if (iomap->flags & IOMAP_F_NEW) {
227 need_zeroout = true;
228 } else if (iomap->type == IOMAP_MAPPED) {
229
230
231
232
233
234
235
236 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
237 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
238 blk_queue_fua(bdev_get_queue(iomap->bdev)))
239 use_fua = true;
240 }
241
242
243
244
245
246 iter = *dio->submit.iter;
247 iov_iter_truncate(&iter, length);
248
249 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
250 if (nr_pages <= 0)
251 return nr_pages;
252
253 if (need_zeroout) {
254
255 pad = pos & (fs_block_size - 1);
256 if (pad)
257 iomap_dio_zero(dio, iomap, pos - pad, pad);
258 }
259
260 do {
261 size_t n;
262 if (dio->error) {
263 iov_iter_revert(dio->submit.iter, copied);
264 return 0;
265 }
266
267 bio = bio_alloc(GFP_KERNEL, nr_pages);
268 bio_set_dev(bio, iomap->bdev);
269 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
270 bio->bi_write_hint = dio->iocb->ki_hint;
271 bio->bi_ioprio = dio->iocb->ki_ioprio;
272 bio->bi_private = dio;
273 bio->bi_end_io = iomap_dio_bio_end_io;
274
275 ret = bio_iov_iter_get_pages(bio, &iter);
276 if (unlikely(ret)) {
277
278
279
280
281
282
283 bio_put(bio);
284 goto zero_tail;
285 }
286
287 n = bio->bi_iter.bi_size;
288 if (dio->flags & IOMAP_DIO_WRITE) {
289 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
290 if (use_fua)
291 bio->bi_opf |= REQ_FUA;
292 else
293 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
294 task_io_account_write(n);
295 } else {
296 bio->bi_opf = REQ_OP_READ;
297 if (dio->flags & IOMAP_DIO_DIRTY)
298 bio_set_pages_dirty(bio);
299 }
300
301 iov_iter_advance(dio->submit.iter, n);
302
303 dio->size += n;
304 pos += n;
305 copied += n;
306
307 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
308 iomap_dio_submit_bio(dio, iomap, bio);
309 } while (nr_pages);
310
311
312
313
314
315
316
317zero_tail:
318 if (need_zeroout ||
319 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
320
321 pad = pos & (fs_block_size - 1);
322 if (pad)
323 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
324 }
325 return copied ? copied : ret;
326}
327
328static loff_t
329iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
330{
331 length = iov_iter_zero(length, dio->submit.iter);
332 dio->size += length;
333 return length;
334}
335
336static loff_t
337iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
338 struct iomap_dio *dio, struct iomap *iomap)
339{
340 struct iov_iter *iter = dio->submit.iter;
341 size_t copied;
342
343 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
344
345 if (dio->flags & IOMAP_DIO_WRITE) {
346 loff_t size = inode->i_size;
347
348 if (pos > size)
349 memset(iomap->inline_data + size, 0, pos - size);
350 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
351 if (copied) {
352 if (pos + copied > size)
353 i_size_write(inode, pos + copied);
354 mark_inode_dirty(inode);
355 }
356 } else {
357 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
358 }
359 dio->size += copied;
360 return copied;
361}
362
363static loff_t
364iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
365 void *data, struct iomap *iomap)
366{
367 struct iomap_dio *dio = data;
368
369 switch (iomap->type) {
370 case IOMAP_HOLE:
371 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
372 return -EIO;
373 return iomap_dio_hole_actor(length, dio);
374 case IOMAP_UNWRITTEN:
375 if (!(dio->flags & IOMAP_DIO_WRITE))
376 return iomap_dio_hole_actor(length, dio);
377 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
378 case IOMAP_MAPPED:
379 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
380 case IOMAP_INLINE:
381 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
382 default:
383 WARN_ON_ONCE(1);
384 return -EIO;
385 }
386}
387
388
389
390
391
392
393
394
395
396
397ssize_t
398iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
399 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
400{
401 struct address_space *mapping = iocb->ki_filp->f_mapping;
402 struct inode *inode = file_inode(iocb->ki_filp);
403 size_t count = iov_iter_count(iter);
404 loff_t pos = iocb->ki_pos, start = pos;
405 loff_t end = iocb->ki_pos + count - 1, ret = 0;
406 unsigned int flags = IOMAP_DIRECT;
407 bool wait_for_completion = is_sync_kiocb(iocb);
408 struct blk_plug plug;
409 struct iomap_dio *dio;
410
411 lockdep_assert_held(&inode->i_rwsem);
412
413 if (!count)
414 return 0;
415
416 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
417 if (!dio)
418 return -ENOMEM;
419
420 dio->iocb = iocb;
421 atomic_set(&dio->ref, 1);
422 dio->size = 0;
423 dio->i_size = i_size_read(inode);
424 dio->end_io = end_io;
425 dio->error = 0;
426 dio->flags = 0;
427
428 dio->submit.iter = iter;
429 dio->submit.waiter = current;
430 dio->submit.cookie = BLK_QC_T_NONE;
431 dio->submit.last_queue = NULL;
432
433 if (iov_iter_rw(iter) == READ) {
434 if (pos >= dio->i_size)
435 goto out_free_dio;
436
437 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
438 dio->flags |= IOMAP_DIO_DIRTY;
439 } else {
440 flags |= IOMAP_WRITE;
441 dio->flags |= IOMAP_DIO_WRITE;
442
443
444 if (iocb->ki_flags & IOCB_DSYNC)
445 dio->flags |= IOMAP_DIO_NEED_SYNC;
446
447
448
449
450
451
452
453 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
454 dio->flags |= IOMAP_DIO_WRITE_FUA;
455 }
456
457 if (iocb->ki_flags & IOCB_NOWAIT) {
458 if (filemap_range_has_page(mapping, start, end)) {
459 ret = -EAGAIN;
460 goto out_free_dio;
461 }
462 flags |= IOMAP_NOWAIT;
463 }
464
465 ret = filemap_write_and_wait_range(mapping, start, end);
466 if (ret)
467 goto out_free_dio;
468
469
470
471
472
473
474
475 ret = invalidate_inode_pages2_range(mapping,
476 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
477 if (ret)
478 dio_warn_stale_pagecache(iocb->ki_filp);
479 ret = 0;
480
481 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
482 !inode->i_sb->s_dio_done_wq) {
483 ret = sb_init_dio_done_wq(inode->i_sb);
484 if (ret < 0)
485 goto out_free_dio;
486 }
487
488 inode_dio_begin(inode);
489
490 blk_start_plug(&plug);
491 do {
492 ret = iomap_apply(inode, pos, count, flags, ops, dio,
493 iomap_dio_actor);
494 if (ret <= 0) {
495
496 if (ret == -ENOTBLK) {
497 wait_for_completion = true;
498 ret = 0;
499 }
500 break;
501 }
502 pos += ret;
503
504 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
505 break;
506 } while ((count = iov_iter_count(iter)) > 0);
507 blk_finish_plug(&plug);
508
509 if (ret < 0)
510 iomap_dio_set_error(dio, ret);
511
512
513
514
515
516 if (dio->flags & IOMAP_DIO_WRITE_FUA)
517 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
518
519 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
520 WRITE_ONCE(iocb->private, dio->submit.last_queue);
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537 dio->wait_for_completion = wait_for_completion;
538 if (!atomic_dec_and_test(&dio->ref)) {
539 if (!wait_for_completion)
540 return -EIOCBQUEUED;
541
542 for (;;) {
543 set_current_state(TASK_UNINTERRUPTIBLE);
544 if (!READ_ONCE(dio->submit.waiter))
545 break;
546
547 if (!(iocb->ki_flags & IOCB_HIPRI) ||
548 !dio->submit.last_queue ||
549 !blk_poll(dio->submit.last_queue,
550 dio->submit.cookie, true))
551 io_schedule();
552 }
553 __set_current_state(TASK_RUNNING);
554 }
555
556 return iomap_dio_complete(dio);
557
558out_free_dio:
559 kfree(dio);
560 return ret;
561}
562EXPORT_SYMBOL_GPL(iomap_dio_rw);
563