1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/fs.h>
27#include <linux/mm.h>
28#include <linux/slab.h>
29#include <linux/highmem.h>
30#include <linux/pagemap.h>
31#include <linux/task_io_accounting_ops.h>
32#include <linux/bio.h>
33#include <linux/wait.h>
34#include <linux/err.h>
35#include <linux/blkdev.h>
36#include <linux/buffer_head.h>
37#include <linux/rwsem.h>
38#include <linux/uio.h>
39#include <linux/atomic.h>
40#include <linux/prefetch.h>
41
42
43
44
45
46#define DIO_PAGES 64
47
48
49
50
51#define DIO_COMPLETE_ASYNC 0x01
52#define DIO_COMPLETE_INVALIDATE 0x02
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68struct dio_submit {
69 struct bio *bio;
70 unsigned blkbits;
71 unsigned blkfactor;
72
73
74
75
76 unsigned start_zero_done;
77
78
79 int pages_in_io;
80 sector_t block_in_file;
81
82 unsigned blocks_available;
83 int reap_counter;
84 sector_t final_block_in_request;
85 int boundary;
86 get_block_t *get_block;
87 dio_submit_t *submit_io;
88
89 loff_t logical_offset_in_bio;
90 sector_t final_block_in_bio;
91 sector_t next_block_for_io;
92
93
94
95
96
97
98
99 struct page *cur_page;
100 unsigned cur_page_offset;
101 unsigned cur_page_len;
102 sector_t cur_page_block;
103 loff_t cur_page_fs_offset;
104
105 struct iov_iter *iter;
106
107
108
109
110 unsigned head;
111 unsigned tail;
112 size_t from, to;
113};
114
115
116struct dio {
117 int flags;
118 int op;
119 int op_flags;
120 blk_qc_t bio_cookie;
121 struct gendisk *bio_disk;
122 struct inode *inode;
123 loff_t i_size;
124 dio_iodone_t *end_io;
125
126 void *private;
127
128
129 spinlock_t bio_lock;
130 int page_errors;
131 int is_async;
132 bool defer_completion;
133 bool should_dirty;
134 int io_error;
135 unsigned long refcount;
136 struct bio *bio_list;
137 struct task_struct *waiter;
138
139
140 struct kiocb *iocb;
141 ssize_t result;
142
143
144
145
146
147
148 union {
149 struct page *pages[DIO_PAGES];
150 struct work_struct complete_work;
151 };
152} ____cacheline_aligned_in_smp;
153
154static struct kmem_cache *dio_cache __read_mostly;
155
156
157
158
159static inline unsigned dio_pages_present(struct dio_submit *sdio)
160{
161 return sdio->tail - sdio->head;
162}
163
164
165
166
167static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
168{
169 ssize_t ret;
170
171 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
172 &sdio->from);
173
174 if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
175 struct page *page = ZERO_PAGE(0);
176
177
178
179
180
181 if (dio->page_errors == 0)
182 dio->page_errors = ret;
183 get_page(page);
184 dio->pages[0] = page;
185 sdio->head = 0;
186 sdio->tail = 1;
187 sdio->from = 0;
188 sdio->to = PAGE_SIZE;
189 return 0;
190 }
191
192 if (ret >= 0) {
193 iov_iter_advance(sdio->iter, ret);
194 ret += sdio->from;
195 sdio->head = 0;
196 sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
197 sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
198 return 0;
199 }
200 return ret;
201}
202
203
204
205
206
207
208
209static inline struct page *dio_get_page(struct dio *dio,
210 struct dio_submit *sdio)
211{
212 if (dio_pages_present(sdio) == 0) {
213 int ret;
214
215 ret = dio_refill_pages(dio, sdio);
216 if (ret)
217 return ERR_PTR(ret);
218 BUG_ON(dio_pages_present(sdio) == 0);
219 }
220 return dio->pages[sdio->head];
221}
222
223
224
225
226void dio_warn_stale_pagecache(struct file *filp)
227{
228 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
229 char pathname[128];
230 struct inode *inode = file_inode(filp);
231 char *path;
232
233 errseq_set(&inode->i_mapping->wb_err, -EIO);
234 if (__ratelimit(&_rs)) {
235 path = file_path(filp, pathname, sizeof(pathname));
236 if (IS_ERR(path))
237 path = "(unknown)";
238 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
239 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
240 current->comm);
241 }
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
257{
258 loff_t offset = dio->iocb->ki_pos;
259 ssize_t transferred = 0;
260 int err;
261
262
263
264
265
266
267
268 if (ret == -EIOCBQUEUED)
269 ret = 0;
270
271 if (dio->result) {
272 transferred = dio->result;
273
274
275 if ((dio->op == REQ_OP_READ) &&
276 ((offset + transferred) > dio->i_size))
277 transferred = dio->i_size - offset;
278
279 if (unlikely(ret == -EFAULT) && transferred)
280 ret = 0;
281 }
282
283 if (ret == 0)
284 ret = dio->page_errors;
285 if (ret == 0)
286 ret = dio->io_error;
287 if (ret == 0)
288 ret = transferred;
289
290 if (dio->end_io) {
291
292 err = dio->end_io(dio->iocb, offset, ret, dio->private);
293 if (err)
294 ret = err;
295 }
296
297
298
299
300
301
302
303
304
305
306
307
308
309 if (flags & DIO_COMPLETE_INVALIDATE &&
310 ret > 0 && dio->op == REQ_OP_WRITE &&
311 dio->inode->i_mapping->nrpages) {
312 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
313 offset >> PAGE_SHIFT,
314 (offset + ret - 1) >> PAGE_SHIFT);
315 if (err)
316 dio_warn_stale_pagecache(dio->iocb->ki_filp);
317 }
318
319 inode_dio_end(dio->inode);
320
321 if (flags & DIO_COMPLETE_ASYNC) {
322
323
324
325
326
327 dio->iocb->ki_pos += transferred;
328
329 if (ret > 0 && dio->op == REQ_OP_WRITE)
330 ret = generic_write_sync(dio->iocb, ret);
331 dio->iocb->ki_complete(dio->iocb, ret, 0);
332 }
333
334 kmem_cache_free(dio_cache, dio);
335 return ret;
336}
337
338static void dio_aio_complete_work(struct work_struct *work)
339{
340 struct dio *dio = container_of(work, struct dio, complete_work);
341
342 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
343}
344
345static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
346
347
348
349
350static void dio_bio_end_aio(struct bio *bio)
351{
352 struct dio *dio = bio->bi_private;
353 unsigned long remaining;
354 unsigned long flags;
355 bool defer_completion = false;
356
357
358 dio_bio_complete(dio, bio);
359
360 spin_lock_irqsave(&dio->bio_lock, flags);
361 remaining = --dio->refcount;
362 if (remaining == 1 && dio->waiter)
363 wake_up_process(dio->waiter);
364 spin_unlock_irqrestore(&dio->bio_lock, flags);
365
366 if (remaining == 0) {
367
368
369
370
371
372
373
374
375 if (dio->result)
376 defer_completion = dio->defer_completion ||
377 (dio->op == REQ_OP_WRITE &&
378 dio->inode->i_mapping->nrpages);
379 if (defer_completion) {
380 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
381 queue_work(dio->inode->i_sb->s_dio_done_wq,
382 &dio->complete_work);
383 } else {
384 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
385 }
386 }
387}
388
389
390
391
392
393
394
395
396static void dio_bio_end_io(struct bio *bio)
397{
398 struct dio *dio = bio->bi_private;
399 unsigned long flags;
400
401 spin_lock_irqsave(&dio->bio_lock, flags);
402 bio->bi_private = dio->bio_list;
403 dio->bio_list = bio;
404 if (--dio->refcount == 1 && dio->waiter)
405 wake_up_process(dio->waiter);
406 spin_unlock_irqrestore(&dio->bio_lock, flags);
407}
408
409
410
411
412
413
414
415
416
417void dio_end_io(struct bio *bio)
418{
419 struct dio *dio = bio->bi_private;
420
421 if (dio->is_async)
422 dio_bio_end_aio(bio);
423 else
424 dio_bio_end_io(bio);
425}
426EXPORT_SYMBOL_GPL(dio_end_io);
427
428static inline void
429dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
430 struct block_device *bdev,
431 sector_t first_sector, int nr_vecs)
432{
433 struct bio *bio;
434
435
436
437
438
439 bio = bio_alloc(GFP_KERNEL, nr_vecs);
440
441 bio_set_dev(bio, bdev);
442 bio->bi_iter.bi_sector = first_sector;
443 bio_set_op_attrs(bio, dio->op, dio->op_flags);
444 if (dio->is_async)
445 bio->bi_end_io = dio_bio_end_aio;
446 else
447 bio->bi_end_io = dio_bio_end_io;
448
449 bio->bi_write_hint = dio->iocb->ki_hint;
450
451 sdio->bio = bio;
452 sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
453}
454
455
456
457
458
459
460
461
462static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
463{
464 struct bio *bio = sdio->bio;
465 unsigned long flags;
466
467 bio->bi_private = dio;
468
469 spin_lock_irqsave(&dio->bio_lock, flags);
470 dio->refcount++;
471 spin_unlock_irqrestore(&dio->bio_lock, flags);
472
473 if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
474 bio_set_pages_dirty(bio);
475
476 dio->bio_disk = bio->bi_disk;
477
478 if (sdio->submit_io) {
479 sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
480 dio->bio_cookie = BLK_QC_T_NONE;
481 } else
482 dio->bio_cookie = submit_bio(bio);
483
484 sdio->bio = NULL;
485 sdio->boundary = 0;
486 sdio->logical_offset_in_bio = 0;
487}
488
489
490
491
492static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
493{
494 while (sdio->head < sdio->tail)
495 put_page(dio->pages[sdio->head++]);
496}
497
498
499
500
501
502
503
504static struct bio *dio_await_one(struct dio *dio)
505{
506 unsigned long flags;
507 struct bio *bio = NULL;
508
509 spin_lock_irqsave(&dio->bio_lock, flags);
510
511
512
513
514
515
516
517 while (dio->refcount > 1 && dio->bio_list == NULL) {
518 __set_current_state(TASK_UNINTERRUPTIBLE);
519 dio->waiter = current;
520 spin_unlock_irqrestore(&dio->bio_lock, flags);
521 if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
522 !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
523 io_schedule();
524
525 spin_lock_irqsave(&dio->bio_lock, flags);
526 dio->waiter = NULL;
527 }
528 if (dio->bio_list) {
529 bio = dio->bio_list;
530 dio->bio_list = bio->bi_private;
531 }
532 spin_unlock_irqrestore(&dio->bio_lock, flags);
533 return bio;
534}
535
536
537
538
539static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
540{
541 blk_status_t err = bio->bi_status;
542 bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty;
543
544 if (err) {
545 if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
546 dio->io_error = -EAGAIN;
547 else
548 dio->io_error = -EIO;
549 }
550
551 if (dio->is_async && should_dirty) {
552 bio_check_pages_dirty(bio);
553 } else {
554 bio_release_pages(bio, should_dirty);
555 bio_put(bio);
556 }
557 return err;
558}
559
560
561
562
563
564
565
566
567static void dio_await_completion(struct dio *dio)
568{
569 struct bio *bio;
570 do {
571 bio = dio_await_one(dio);
572 if (bio)
573 dio_bio_complete(dio, bio);
574 } while (bio);
575}
576
577
578
579
580
581
582
583
584static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
585{
586 int ret = 0;
587
588 if (sdio->reap_counter++ >= 64) {
589 while (dio->bio_list) {
590 unsigned long flags;
591 struct bio *bio;
592 int ret2;
593
594 spin_lock_irqsave(&dio->bio_lock, flags);
595 bio = dio->bio_list;
596 dio->bio_list = bio->bi_private;
597 spin_unlock_irqrestore(&dio->bio_lock, flags);
598 ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
599 if (ret == 0)
600 ret = ret2;
601 }
602 sdio->reap_counter = 0;
603 }
604 return ret;
605}
606
607
608
609
610
611
612
613int sb_init_dio_done_wq(struct super_block *sb)
614{
615 struct workqueue_struct *old;
616 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
617 WQ_MEM_RECLAIM, 0,
618 sb->s_id);
619 if (!wq)
620 return -ENOMEM;
621
622
623
624 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
625
626 if (old)
627 destroy_workqueue(wq);
628 return 0;
629}
630
631static int dio_set_defer_completion(struct dio *dio)
632{
633 struct super_block *sb = dio->inode->i_sb;
634
635 if (dio->defer_completion)
636 return 0;
637 dio->defer_completion = true;
638 if (!sb->s_dio_done_wq)
639 return sb_init_dio_done_wq(sb);
640 return 0;
641}
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
667 struct buffer_head *map_bh)
668{
669 int ret;
670 sector_t fs_startblk;
671 sector_t fs_endblk;
672 unsigned long fs_count;
673 int create;
674 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
675 loff_t i_size;
676
677
678
679
680
681 ret = dio->page_errors;
682 if (ret == 0) {
683 BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
684 fs_startblk = sdio->block_in_file >> sdio->blkfactor;
685 fs_endblk = (sdio->final_block_in_request - 1) >>
686 sdio->blkfactor;
687 fs_count = fs_endblk - fs_startblk + 1;
688
689 map_bh->b_state = 0;
690 map_bh->b_size = fs_count << i_blkbits;
691
692
693
694
695
696
697
698
699
700
701
702
703 create = dio->op == REQ_OP_WRITE;
704 if (dio->flags & DIO_SKIP_HOLES) {
705 i_size = i_size_read(dio->inode);
706 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
707 create = 0;
708 }
709
710 ret = (*sdio->get_block)(dio->inode, fs_startblk,
711 map_bh, create);
712
713
714 dio->private = map_bh->b_private;
715
716 if (ret == 0 && buffer_defer_completion(map_bh))
717 ret = dio_set_defer_completion(dio);
718 }
719 return ret;
720}
721
722
723
724
725static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
726 sector_t start_sector, struct buffer_head *map_bh)
727{
728 sector_t sector;
729 int ret, nr_pages;
730
731 ret = dio_bio_reap(dio, sdio);
732 if (ret)
733 goto out;
734 sector = start_sector << (sdio->blkbits - 9);
735 nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
736 BUG_ON(nr_pages <= 0);
737 dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
738 sdio->boundary = 0;
739out:
740 return ret;
741}
742
743
744
745
746
747
748
749
750static inline int dio_bio_add_page(struct dio_submit *sdio)
751{
752 int ret;
753
754 ret = bio_add_page(sdio->bio, sdio->cur_page,
755 sdio->cur_page_len, sdio->cur_page_offset);
756 if (ret == sdio->cur_page_len) {
757
758
759
760 if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
761 sdio->pages_in_io--;
762 get_page(sdio->cur_page);
763 sdio->final_block_in_bio = sdio->cur_page_block +
764 (sdio->cur_page_len >> sdio->blkbits);
765 ret = 0;
766 } else {
767 ret = 1;
768 }
769 return ret;
770}
771
772
773
774
775
776
777
778
779
780
781
782static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
783 struct buffer_head *map_bh)
784{
785 int ret = 0;
786
787 if (sdio->bio) {
788 loff_t cur_offset = sdio->cur_page_fs_offset;
789 loff_t bio_next_offset = sdio->logical_offset_in_bio +
790 sdio->bio->bi_iter.bi_size;
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806 if (sdio->final_block_in_bio != sdio->cur_page_block ||
807 cur_offset != bio_next_offset)
808 dio_bio_submit(dio, sdio);
809 }
810
811 if (sdio->bio == NULL) {
812 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
813 if (ret)
814 goto out;
815 }
816
817 if (dio_bio_add_page(sdio) != 0) {
818 dio_bio_submit(dio, sdio);
819 ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
820 if (ret == 0) {
821 ret = dio_bio_add_page(sdio);
822 BUG_ON(ret != 0);
823 }
824 }
825out:
826 return ret;
827}
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846static inline int
847submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
848 unsigned offset, unsigned len, sector_t blocknr,
849 struct buffer_head *map_bh)
850{
851 int ret = 0;
852
853 if (dio->op == REQ_OP_WRITE) {
854
855
856
857 task_io_account_write(len);
858 }
859
860
861
862
863 if (sdio->cur_page == page &&
864 sdio->cur_page_offset + sdio->cur_page_len == offset &&
865 sdio->cur_page_block +
866 (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
867 sdio->cur_page_len += len;
868 goto out;
869 }
870
871
872
873
874 if (sdio->cur_page) {
875 ret = dio_send_cur_page(dio, sdio, map_bh);
876 put_page(sdio->cur_page);
877 sdio->cur_page = NULL;
878 if (ret)
879 return ret;
880 }
881
882 get_page(page);
883 sdio->cur_page = page;
884 sdio->cur_page_offset = offset;
885 sdio->cur_page_len = len;
886 sdio->cur_page_block = blocknr;
887 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
888out:
889
890
891
892
893 if (sdio->boundary) {
894 ret = dio_send_cur_page(dio, sdio, map_bh);
895 if (sdio->bio)
896 dio_bio_submit(dio, sdio);
897 put_page(sdio->cur_page);
898 sdio->cur_page = NULL;
899 }
900 return ret;
901}
902
903
904
905
906
907
908
909
910
911
912static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
913 int end, struct buffer_head *map_bh)
914{
915 unsigned dio_blocks_per_fs_block;
916 unsigned this_chunk_blocks;
917 unsigned this_chunk_bytes;
918 struct page *page;
919
920 sdio->start_zero_done = 1;
921 if (!sdio->blkfactor || !buffer_new(map_bh))
922 return;
923
924 dio_blocks_per_fs_block = 1 << sdio->blkfactor;
925 this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
926
927 if (!this_chunk_blocks)
928 return;
929
930
931
932
933
934 if (end)
935 this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
936
937 this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
938
939 page = ZERO_PAGE(0);
940 if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
941 sdio->next_block_for_io, map_bh))
942 return;
943
944 sdio->next_block_for_io += this_chunk_blocks;
945}
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
964 struct buffer_head *map_bh)
965{
966 const unsigned blkbits = sdio->blkbits;
967 const unsigned i_blkbits = blkbits + sdio->blkfactor;
968 int ret = 0;
969
970 while (sdio->block_in_file < sdio->final_block_in_request) {
971 struct page *page;
972 size_t from, to;
973
974 page = dio_get_page(dio, sdio);
975 if (IS_ERR(page)) {
976 ret = PTR_ERR(page);
977 goto out;
978 }
979 from = sdio->head ? 0 : sdio->from;
980 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
981 sdio->head++;
982
983 while (from < to) {
984 unsigned this_chunk_bytes;
985 unsigned this_chunk_blocks;
986 unsigned u;
987
988 if (sdio->blocks_available == 0) {
989
990
991
992 unsigned long blkmask;
993 unsigned long dio_remainder;
994
995 ret = get_more_blocks(dio, sdio, map_bh);
996 if (ret) {
997 put_page(page);
998 goto out;
999 }
1000 if (!buffer_mapped(map_bh))
1001 goto do_holes;
1002
1003 sdio->blocks_available =
1004 map_bh->b_size >> blkbits;
1005 sdio->next_block_for_io =
1006 map_bh->b_blocknr << sdio->blkfactor;
1007 if (buffer_new(map_bh)) {
1008 clean_bdev_aliases(
1009 map_bh->b_bdev,
1010 map_bh->b_blocknr,
1011 map_bh->b_size >> i_blkbits);
1012 }
1013
1014 if (!sdio->blkfactor)
1015 goto do_holes;
1016
1017 blkmask = (1 << sdio->blkfactor) - 1;
1018 dio_remainder = (sdio->block_in_file & blkmask);
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031 if (!buffer_new(map_bh))
1032 sdio->next_block_for_io += dio_remainder;
1033 sdio->blocks_available -= dio_remainder;
1034 }
1035do_holes:
1036
1037 if (!buffer_mapped(map_bh)) {
1038 loff_t i_size_aligned;
1039
1040
1041 if (dio->op == REQ_OP_WRITE) {
1042 put_page(page);
1043 return -ENOTBLK;
1044 }
1045
1046
1047
1048
1049
1050 i_size_aligned = ALIGN(i_size_read(dio->inode),
1051 1 << blkbits);
1052 if (sdio->block_in_file >=
1053 i_size_aligned >> blkbits) {
1054
1055 put_page(page);
1056 goto out;
1057 }
1058 zero_user(page, from, 1 << blkbits);
1059 sdio->block_in_file++;
1060 from += 1 << blkbits;
1061 dio->result += 1 << blkbits;
1062 goto next_block;
1063 }
1064
1065
1066
1067
1068
1069
1070 if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1071 dio_zero_block(dio, sdio, 0, map_bh);
1072
1073
1074
1075
1076
1077 this_chunk_blocks = sdio->blocks_available;
1078 u = (to - from) >> blkbits;
1079 if (this_chunk_blocks > u)
1080 this_chunk_blocks = u;
1081 u = sdio->final_block_in_request - sdio->block_in_file;
1082 if (this_chunk_blocks > u)
1083 this_chunk_blocks = u;
1084 this_chunk_bytes = this_chunk_blocks << blkbits;
1085 BUG_ON(this_chunk_bytes == 0);
1086
1087 if (this_chunk_blocks == sdio->blocks_available)
1088 sdio->boundary = buffer_boundary(map_bh);
1089 ret = submit_page_section(dio, sdio, page,
1090 from,
1091 this_chunk_bytes,
1092 sdio->next_block_for_io,
1093 map_bh);
1094 if (ret) {
1095 put_page(page);
1096 goto out;
1097 }
1098 sdio->next_block_for_io += this_chunk_blocks;
1099
1100 sdio->block_in_file += this_chunk_blocks;
1101 from += this_chunk_bytes;
1102 dio->result += this_chunk_bytes;
1103 sdio->blocks_available -= this_chunk_blocks;
1104next_block:
1105 BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
1106 if (sdio->block_in_file == sdio->final_block_in_request)
1107 break;
1108 }
1109
1110
1111 put_page(page);
1112 }
1113out:
1114 return ret;
1115}
1116
1117static inline int drop_refcount(struct dio *dio)
1118{
1119 int ret2;
1120 unsigned long flags;
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 spin_lock_irqsave(&dio->bio_lock, flags);
1134 ret2 = --dio->refcount;
1135 spin_unlock_irqrestore(&dio->bio_lock, flags);
1136 return ret2;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164static inline ssize_t
1165do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1166 struct block_device *bdev, struct iov_iter *iter,
1167 get_block_t get_block, dio_iodone_t end_io,
1168 dio_submit_t submit_io, int flags)
1169{
1170 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
1171 unsigned blkbits = i_blkbits;
1172 unsigned blocksize_mask = (1 << blkbits) - 1;
1173 ssize_t retval = -EINVAL;
1174 const size_t count = iov_iter_count(iter);
1175 loff_t offset = iocb->ki_pos;
1176 const loff_t end = offset + count;
1177 struct dio *dio;
1178 struct dio_submit sdio = { 0, };
1179 struct buffer_head map_bh = { 0, };
1180 struct blk_plug plug;
1181 unsigned long align = offset | iov_iter_alignment(iter);
1182
1183
1184
1185
1186
1187
1188 if (align & blocksize_mask) {
1189 if (bdev)
1190 blkbits = blksize_bits(bdev_logical_block_size(bdev));
1191 blocksize_mask = (1 << blkbits) - 1;
1192 if (align & blocksize_mask)
1193 goto out;
1194 }
1195
1196
1197 if (iov_iter_rw(iter) == READ && !count)
1198 return 0;
1199
1200 dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
1201 retval = -ENOMEM;
1202 if (!dio)
1203 goto out;
1204
1205
1206
1207
1208
1209 memset(dio, 0, offsetof(struct dio, pages));
1210
1211 dio->flags = flags;
1212 if (dio->flags & DIO_LOCKING) {
1213 if (iov_iter_rw(iter) == READ) {
1214 struct address_space *mapping =
1215 iocb->ki_filp->f_mapping;
1216
1217
1218 inode_lock(inode);
1219
1220 retval = filemap_write_and_wait_range(mapping, offset,
1221 end - 1);
1222 if (retval) {
1223 inode_unlock(inode);
1224 kmem_cache_free(dio_cache, dio);
1225 goto out;
1226 }
1227 }
1228 }
1229
1230
1231 dio->i_size = i_size_read(inode);
1232 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1233 if (dio->flags & DIO_LOCKING)
1234 inode_unlock(inode);
1235 kmem_cache_free(dio_cache, dio);
1236 retval = 0;
1237 goto out;
1238 }
1239
1240
1241
1242
1243
1244
1245
1246 if (is_sync_kiocb(iocb))
1247 dio->is_async = false;
1248 else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
1249 dio->is_async = false;
1250 else
1251 dio->is_async = true;
1252
1253 dio->inode = inode;
1254 if (iov_iter_rw(iter) == WRITE) {
1255 dio->op = REQ_OP_WRITE;
1256 dio->op_flags = REQ_SYNC | REQ_IDLE;
1257 if (iocb->ki_flags & IOCB_NOWAIT)
1258 dio->op_flags |= REQ_NOWAIT;
1259 } else {
1260 dio->op = REQ_OP_READ;
1261 }
1262 if (iocb->ki_flags & IOCB_HIPRI)
1263 dio->op_flags |= REQ_HIPRI;
1264
1265
1266
1267
1268
1269 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1270 retval = 0;
1271 if (iocb->ki_flags & IOCB_DSYNC)
1272 retval = dio_set_defer_completion(dio);
1273 else if (!dio->inode->i_sb->s_dio_done_wq) {
1274
1275
1276
1277
1278
1279 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1280 }
1281 if (retval) {
1282
1283
1284
1285
1286 kmem_cache_free(dio_cache, dio);
1287 goto out;
1288 }
1289 }
1290
1291
1292
1293
1294 inode_dio_begin(inode);
1295
1296 retval = 0;
1297 sdio.blkbits = blkbits;
1298 sdio.blkfactor = i_blkbits - blkbits;
1299 sdio.block_in_file = offset >> blkbits;
1300
1301 sdio.get_block = get_block;
1302 dio->end_io = end_io;
1303 sdio.submit_io = submit_io;
1304 sdio.final_block_in_bio = -1;
1305 sdio.next_block_for_io = -1;
1306
1307 dio->iocb = iocb;
1308
1309 spin_lock_init(&dio->bio_lock);
1310 dio->refcount = 1;
1311
1312 dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
1313 sdio.iter = iter;
1314 sdio.final_block_in_request = end >> blkbits;
1315
1316
1317
1318
1319
1320 if (unlikely(sdio.blkfactor))
1321 sdio.pages_in_io = 2;
1322
1323 sdio.pages_in_io += iov_iter_npages(iter, INT_MAX);
1324
1325 blk_start_plug(&plug);
1326
1327 retval = do_direct_IO(dio, &sdio, &map_bh);
1328 if (retval)
1329 dio_cleanup(dio, &sdio);
1330
1331 if (retval == -ENOTBLK) {
1332
1333
1334
1335
1336 retval = 0;
1337 }
1338
1339
1340
1341
1342 dio_zero_block(dio, &sdio, 1, &map_bh);
1343
1344 if (sdio.cur_page) {
1345 ssize_t ret2;
1346
1347 ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
1348 if (retval == 0)
1349 retval = ret2;
1350 put_page(sdio.cur_page);
1351 sdio.cur_page = NULL;
1352 }
1353 if (sdio.bio)
1354 dio_bio_submit(dio, &sdio);
1355
1356 blk_finish_plug(&plug);
1357
1358
1359
1360
1361
1362 dio_cleanup(dio, &sdio);
1363
1364
1365
1366
1367
1368
1369 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
1370 inode_unlock(dio->inode);
1371
1372
1373
1374
1375
1376
1377
1378
1379 BUG_ON(retval == -EIOCBQUEUED);
1380 if (dio->is_async && retval == 0 && dio->result &&
1381 (iov_iter_rw(iter) == READ || dio->result == count))
1382 retval = -EIOCBQUEUED;
1383 else
1384 dio_await_completion(dio);
1385
1386 if (drop_refcount(dio) == 0) {
1387 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1388 } else
1389 BUG_ON(retval != -EIOCBQUEUED);
1390
1391out:
1392 return retval;
1393}
1394
1395ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1396 struct block_device *bdev, struct iov_iter *iter,
1397 get_block_t get_block,
1398 dio_iodone_t end_io, dio_submit_t submit_io,
1399 int flags)
1400{
1401
1402
1403
1404
1405
1406
1407
1408
1409 prefetch(&bdev->bd_disk->part_tbl);
1410 prefetch(bdev->bd_queue);
1411 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
1412
1413 return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
1414 end_io, submit_io, flags);
1415}
1416
1417EXPORT_SYMBOL(__blockdev_direct_IO);
1418
1419static __init int dio_init(void)
1420{
1421 dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
1422 return 0;
1423}
1424module_init(dio_init)
1425