1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/compiler.h>
14#include <linux/fs.h>
15#include <linux/uaccess.h>
16#include <linux/aio.h>
17#include <linux/capability.h>
18#include <linux/kernel_stat.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/security.h>
32#include <linux/cpuset.h>
33#include <linux/hardirq.h>
34#include <linux/memcontrol.h>
35#include <linux/cleancache.h>
36#include "internal.h"
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/filemap.h>
40
41
42
43
44#include <linux/buffer_head.h>
45
46#include <asm/mman.h>
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115void __delete_from_page_cache(struct page *page)
116{
117 struct address_space *mapping = page->mapping;
118
119 trace_mm_filemap_delete_from_page_cache(page);
120
121
122
123
124
125 if (PageUptodate(page) && PageMappedToDisk(page))
126 cleancache_put_page(page);
127 else
128 cleancache_invalidate_page(mapping, page);
129
130 radix_tree_delete(&mapping->page_tree, page->index);
131 page->mapping = NULL;
132
133 mapping->nrpages--;
134 __dec_zone_page_state(page, NR_FILE_PAGES);
135 if (PageSwapBacked(page))
136 __dec_zone_page_state(page, NR_SHMEM);
137 BUG_ON(page_mapped(page));
138
139
140
141
142
143
144
145
146 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
147 dec_zone_page_state(page, NR_FILE_DIRTY);
148 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
149 }
150}
151
152
153
154
155
156
157
158
159
160void delete_from_page_cache(struct page *page)
161{
162 struct address_space *mapping = page->mapping;
163 void (*freepage)(struct page *);
164
165 BUG_ON(!PageLocked(page));
166
167 freepage = mapping->a_ops->freepage;
168 spin_lock_irq(&mapping->tree_lock);
169 __delete_from_page_cache(page);
170 spin_unlock_irq(&mapping->tree_lock);
171 mem_cgroup_uncharge_cache_page(page);
172
173 if (freepage)
174 freepage(page);
175 page_cache_release(page);
176}
177EXPORT_SYMBOL(delete_from_page_cache);
178
179static int sleep_on_page(void *word)
180{
181 io_schedule();
182 return 0;
183}
184
185static int sleep_on_page_killable(void *word)
186{
187 sleep_on_page(word);
188 return fatal_signal_pending(current) ? -EINTR : 0;
189}
190
191static int filemap_check_errors(struct address_space *mapping)
192{
193 int ret = 0;
194
195 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
196 ret = -ENOSPC;
197 if (test_and_clear_bit(AS_EIO, &mapping->flags))
198 ret = -EIO;
199 return ret;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
218 loff_t end, int sync_mode)
219{
220 int ret;
221 struct writeback_control wbc = {
222 .sync_mode = sync_mode,
223 .nr_to_write = LONG_MAX,
224 .range_start = start,
225 .range_end = end,
226 };
227
228 if (!mapping_cap_writeback_dirty(mapping))
229 return 0;
230
231 ret = do_writepages(mapping, &wbc);
232 return ret;
233}
234
235static inline int __filemap_fdatawrite(struct address_space *mapping,
236 int sync_mode)
237{
238 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
239}
240
241int filemap_fdatawrite(struct address_space *mapping)
242{
243 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
244}
245EXPORT_SYMBOL(filemap_fdatawrite);
246
247int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
248 loff_t end)
249{
250 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
251}
252EXPORT_SYMBOL(filemap_fdatawrite_range);
253
254
255
256
257
258
259
260
261int filemap_flush(struct address_space *mapping)
262{
263 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
264}
265EXPORT_SYMBOL(filemap_flush);
266
267
268
269
270
271
272
273
274
275
276int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
277 loff_t end_byte)
278{
279 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
280 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
281 struct pagevec pvec;
282 int nr_pages;
283 int ret2, ret = 0;
284
285 if (end_byte < start_byte)
286 goto out;
287
288 pagevec_init(&pvec, 0);
289 while ((index <= end) &&
290 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
291 PAGECACHE_TAG_WRITEBACK,
292 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
293 unsigned i;
294
295 for (i = 0; i < nr_pages; i++) {
296 struct page *page = pvec.pages[i];
297
298
299 if (page->index > end)
300 continue;
301
302 wait_on_page_writeback(page);
303 if (TestClearPageError(page))
304 ret = -EIO;
305 }
306 pagevec_release(&pvec);
307 cond_resched();
308 }
309out:
310 ret2 = filemap_check_errors(mapping);
311 if (!ret)
312 ret = ret2;
313
314 return ret;
315}
316EXPORT_SYMBOL(filemap_fdatawait_range);
317
318
319
320
321
322
323
324
325int filemap_fdatawait(struct address_space *mapping)
326{
327 loff_t i_size = i_size_read(mapping->host);
328
329 if (i_size == 0)
330 return 0;
331
332 return filemap_fdatawait_range(mapping, 0, i_size - 1);
333}
334EXPORT_SYMBOL(filemap_fdatawait);
335
336int filemap_write_and_wait(struct address_space *mapping)
337{
338 int err = 0;
339
340 if (mapping->nrpages) {
341 err = filemap_fdatawrite(mapping);
342
343
344
345
346
347
348 if (err != -EIO) {
349 int err2 = filemap_fdatawait(mapping);
350 if (!err)
351 err = err2;
352 }
353 } else {
354 err = filemap_check_errors(mapping);
355 }
356 return err;
357}
358EXPORT_SYMBOL(filemap_write_and_wait);
359
360
361
362
363
364
365
366
367
368
369
370
371int filemap_write_and_wait_range(struct address_space *mapping,
372 loff_t lstart, loff_t lend)
373{
374 int err = 0;
375
376 if (mapping->nrpages) {
377 err = __filemap_fdatawrite_range(mapping, lstart, lend,
378 WB_SYNC_ALL);
379
380 if (err != -EIO) {
381 int err2 = filemap_fdatawait_range(mapping,
382 lstart, lend);
383 if (!err)
384 err = err2;
385 }
386 } else {
387 err = filemap_check_errors(mapping);
388 }
389 return err;
390}
391EXPORT_SYMBOL(filemap_write_and_wait_range);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
409{
410 int error;
411
412 VM_BUG_ON_PAGE(!PageLocked(old), old);
413 VM_BUG_ON_PAGE(!PageLocked(new), new);
414 VM_BUG_ON_PAGE(new->mapping, new);
415
416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
417 if (!error) {
418 struct address_space *mapping = old->mapping;
419 void (*freepage)(struct page *);
420
421 pgoff_t offset = old->index;
422 freepage = mapping->a_ops->freepage;
423
424 page_cache_get(new);
425 new->mapping = mapping;
426 new->index = offset;
427
428 spin_lock_irq(&mapping->tree_lock);
429 __delete_from_page_cache(old);
430 error = radix_tree_insert(&mapping->page_tree, offset, new);
431 BUG_ON(error);
432 mapping->nrpages++;
433 __inc_zone_page_state(new, NR_FILE_PAGES);
434 if (PageSwapBacked(new))
435 __inc_zone_page_state(new, NR_SHMEM);
436 spin_unlock_irq(&mapping->tree_lock);
437
438 mem_cgroup_replace_page_cache(old, new);
439 radix_tree_preload_end();
440 if (freepage)
441 freepage(old);
442 page_cache_release(old);
443 }
444
445 return error;
446}
447EXPORT_SYMBOL_GPL(replace_page_cache_page);
448
449
450
451
452
453
454
455
456
457
458
459int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 pgoff_t offset, gfp_t gfp_mask)
461{
462 int error;
463
464 VM_BUG_ON_PAGE(!PageLocked(page), page);
465 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
466
467 error = mem_cgroup_cache_charge(page, current->mm,
468 gfp_mask & GFP_RECLAIM_MASK);
469 if (error)
470 return error;
471
472 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
473 if (error) {
474 mem_cgroup_uncharge_cache_page(page);
475 return error;
476 }
477
478 page_cache_get(page);
479 page->mapping = mapping;
480 page->index = offset;
481
482 spin_lock_irq(&mapping->tree_lock);
483 error = radix_tree_insert(&mapping->page_tree, offset, page);
484 radix_tree_preload_end();
485 if (unlikely(error))
486 goto err_insert;
487 mapping->nrpages++;
488 __inc_zone_page_state(page, NR_FILE_PAGES);
489 spin_unlock_irq(&mapping->tree_lock);
490 trace_mm_filemap_add_to_page_cache(page);
491 return 0;
492err_insert:
493 page->mapping = NULL;
494
495 spin_unlock_irq(&mapping->tree_lock);
496 mem_cgroup_uncharge_cache_page(page);
497 page_cache_release(page);
498 return error;
499}
500EXPORT_SYMBOL(add_to_page_cache_locked);
501
502int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
503 pgoff_t offset, gfp_t gfp_mask)
504{
505 int ret;
506
507 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
508 if (ret == 0)
509 lru_cache_add_file(page);
510 return ret;
511}
512EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
513
514#ifdef CONFIG_NUMA
515struct page *__page_cache_alloc(gfp_t gfp)
516{
517 int n;
518 struct page *page;
519
520 if (cpuset_do_page_mem_spread()) {
521 unsigned int cpuset_mems_cookie;
522 do {
523 cpuset_mems_cookie = get_mems_allowed();
524 n = cpuset_mem_spread_node();
525 page = alloc_pages_exact_node(n, gfp, 0);
526 } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
527
528 return page;
529 }
530 return alloc_pages(gfp, 0);
531}
532EXPORT_SYMBOL(__page_cache_alloc);
533#endif
534
535
536
537
538
539
540
541
542
543
544
545static wait_queue_head_t *page_waitqueue(struct page *page)
546{
547 const struct zone *zone = page_zone(page);
548
549 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
550}
551
552static inline void wake_up_page(struct page *page, int bit)
553{
554 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
555}
556
557void wait_on_page_bit(struct page *page, int bit_nr)
558{
559 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
560
561 if (test_bit(bit_nr, &page->flags))
562 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
563 TASK_UNINTERRUPTIBLE);
564}
565EXPORT_SYMBOL(wait_on_page_bit);
566
567int wait_on_page_bit_killable(struct page *page, int bit_nr)
568{
569 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
570
571 if (!test_bit(bit_nr, &page->flags))
572 return 0;
573
574 return __wait_on_bit(page_waitqueue(page), &wait,
575 sleep_on_page_killable, TASK_KILLABLE);
576}
577
578
579
580
581
582
583
584
585void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
586{
587 wait_queue_head_t *q = page_waitqueue(page);
588 unsigned long flags;
589
590 spin_lock_irqsave(&q->lock, flags);
591 __add_wait_queue(q, waiter);
592 spin_unlock_irqrestore(&q->lock, flags);
593}
594EXPORT_SYMBOL_GPL(add_page_wait_queue);
595
596
597
598
599
600
601
602
603
604
605
606
607
608void unlock_page(struct page *page)
609{
610 VM_BUG_ON_PAGE(!PageLocked(page), page);
611 clear_bit_unlock(PG_locked, &page->flags);
612 smp_mb__after_clear_bit();
613 wake_up_page(page, PG_locked);
614}
615EXPORT_SYMBOL(unlock_page);
616
617
618
619
620
621void end_page_writeback(struct page *page)
622{
623 if (TestClearPageReclaim(page))
624 rotate_reclaimable_page(page);
625
626 if (!test_clear_page_writeback(page))
627 BUG();
628
629 smp_mb__after_clear_bit();
630 wake_up_page(page, PG_writeback);
631}
632EXPORT_SYMBOL(end_page_writeback);
633
634
635
636
637
638void __lock_page(struct page *page)
639{
640 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
641
642 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
643 TASK_UNINTERRUPTIBLE);
644}
645EXPORT_SYMBOL(__lock_page);
646
647int __lock_page_killable(struct page *page)
648{
649 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
650
651 return __wait_on_bit_lock(page_waitqueue(page), &wait,
652 sleep_on_page_killable, TASK_KILLABLE);
653}
654EXPORT_SYMBOL_GPL(__lock_page_killable);
655
656int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
657 unsigned int flags)
658{
659 if (flags & FAULT_FLAG_ALLOW_RETRY) {
660
661
662
663
664 if (flags & FAULT_FLAG_RETRY_NOWAIT)
665 return 0;
666
667 up_read(&mm->mmap_sem);
668 if (flags & FAULT_FLAG_KILLABLE)
669 wait_on_page_locked_killable(page);
670 else
671 wait_on_page_locked(page);
672 return 0;
673 } else {
674 if (flags & FAULT_FLAG_KILLABLE) {
675 int ret;
676
677 ret = __lock_page_killable(page);
678 if (ret) {
679 up_read(&mm->mmap_sem);
680 return 0;
681 }
682 } else
683 __lock_page(page);
684 return 1;
685 }
686}
687
688
689
690
691
692
693
694
695
696struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
697{
698 void **pagep;
699 struct page *page;
700
701 rcu_read_lock();
702repeat:
703 page = NULL;
704 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
705 if (pagep) {
706 page = radix_tree_deref_slot(pagep);
707 if (unlikely(!page))
708 goto out;
709 if (radix_tree_exception(page)) {
710 if (radix_tree_deref_retry(page))
711 goto repeat;
712
713
714
715
716
717 goto out;
718 }
719 if (!page_cache_get_speculative(page))
720 goto repeat;
721
722
723
724
725
726
727 if (unlikely(page != *pagep)) {
728 page_cache_release(page);
729 goto repeat;
730 }
731 }
732out:
733 rcu_read_unlock();
734
735 return page;
736}
737EXPORT_SYMBOL(find_get_page);
738
739
740
741
742
743
744
745
746
747
748
749struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
750{
751 struct page *page;
752
753repeat:
754 page = find_get_page(mapping, offset);
755 if (page && !radix_tree_exception(page)) {
756 lock_page(page);
757
758 if (unlikely(page->mapping != mapping)) {
759 unlock_page(page);
760 page_cache_release(page);
761 goto repeat;
762 }
763 VM_BUG_ON_PAGE(page->index != offset, page);
764 }
765 return page;
766}
767EXPORT_SYMBOL(find_lock_page);
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786struct page *find_or_create_page(struct address_space *mapping,
787 pgoff_t index, gfp_t gfp_mask)
788{
789 struct page *page;
790 int err;
791repeat:
792 page = find_lock_page(mapping, index);
793 if (!page) {
794 page = __page_cache_alloc(gfp_mask);
795 if (!page)
796 return NULL;
797
798
799
800
801
802
803 err = add_to_page_cache_lru(page, mapping, index,
804 (gfp_mask & GFP_RECLAIM_MASK));
805 if (unlikely(err)) {
806 page_cache_release(page);
807 page = NULL;
808 if (err == -EEXIST)
809 goto repeat;
810 }
811 }
812 return page;
813}
814EXPORT_SYMBOL(find_or_create_page);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
833 unsigned int nr_pages, struct page **pages)
834{
835 struct radix_tree_iter iter;
836 void **slot;
837 unsigned ret = 0;
838
839 if (unlikely(!nr_pages))
840 return 0;
841
842 rcu_read_lock();
843restart:
844 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
845 struct page *page;
846repeat:
847 page = radix_tree_deref_slot(slot);
848 if (unlikely(!page))
849 continue;
850
851 if (radix_tree_exception(page)) {
852 if (radix_tree_deref_retry(page)) {
853
854
855
856
857
858 WARN_ON(iter.index);
859 goto restart;
860 }
861
862
863
864
865
866 continue;
867 }
868
869 if (!page_cache_get_speculative(page))
870 goto repeat;
871
872
873 if (unlikely(page != *slot)) {
874 page_cache_release(page);
875 goto repeat;
876 }
877
878 pages[ret] = page;
879 if (++ret == nr_pages)
880 break;
881 }
882
883 rcu_read_unlock();
884 return ret;
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
900 unsigned int nr_pages, struct page **pages)
901{
902 struct radix_tree_iter iter;
903 void **slot;
904 unsigned int ret = 0;
905
906 if (unlikely(!nr_pages))
907 return 0;
908
909 rcu_read_lock();
910restart:
911 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
912 struct page *page;
913repeat:
914 page = radix_tree_deref_slot(slot);
915
916 if (unlikely(!page))
917 break;
918
919 if (radix_tree_exception(page)) {
920 if (radix_tree_deref_retry(page)) {
921
922
923
924
925
926 goto restart;
927 }
928
929
930
931
932
933 break;
934 }
935
936 if (!page_cache_get_speculative(page))
937 goto repeat;
938
939
940 if (unlikely(page != *slot)) {
941 page_cache_release(page);
942 goto repeat;
943 }
944
945
946
947
948
949
950 if (page->mapping == NULL || page->index != iter.index) {
951 page_cache_release(page);
952 break;
953 }
954
955 pages[ret] = page;
956 if (++ret == nr_pages)
957 break;
958 }
959 rcu_read_unlock();
960 return ret;
961}
962EXPORT_SYMBOL(find_get_pages_contig);
963
964
965
966
967
968
969
970
971
972
973
974
975unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
976 int tag, unsigned int nr_pages, struct page **pages)
977{
978 struct radix_tree_iter iter;
979 void **slot;
980 unsigned ret = 0;
981
982 if (unlikely(!nr_pages))
983 return 0;
984
985 rcu_read_lock();
986restart:
987 radix_tree_for_each_tagged(slot, &mapping->page_tree,
988 &iter, *index, tag) {
989 struct page *page;
990repeat:
991 page = radix_tree_deref_slot(slot);
992 if (unlikely(!page))
993 continue;
994
995 if (radix_tree_exception(page)) {
996 if (radix_tree_deref_retry(page)) {
997
998
999
1000
1001
1002 goto restart;
1003 }
1004
1005
1006
1007
1008 BUG();
1009 }
1010
1011 if (!page_cache_get_speculative(page))
1012 goto repeat;
1013
1014
1015 if (unlikely(page != *slot)) {
1016 page_cache_release(page);
1017 goto repeat;
1018 }
1019
1020 pages[ret] = page;
1021 if (++ret == nr_pages)
1022 break;
1023 }
1024
1025 rcu_read_unlock();
1026
1027 if (ret)
1028 *index = pages[ret - 1]->index + 1;
1029
1030 return ret;
1031}
1032EXPORT_SYMBOL(find_get_pages_tag);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047struct page *
1048grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1049{
1050 struct page *page = find_get_page(mapping, index);
1051
1052 if (page) {
1053 if (trylock_page(page))
1054 return page;
1055 page_cache_release(page);
1056 return NULL;
1057 }
1058 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1059 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1060 page_cache_release(page);
1061 page = NULL;
1062 }
1063 return page;
1064}
1065EXPORT_SYMBOL(grab_cache_page_nowait);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void shrink_readahead_size_eio(struct file *filp,
1083 struct file_ra_state *ra)
1084{
1085 ra->ra_pages /= 4;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100static void do_generic_file_read(struct file *filp, loff_t *ppos,
1101 read_descriptor_t *desc)
1102{
1103 struct address_space *mapping = filp->f_mapping;
1104 struct inode *inode = mapping->host;
1105 struct file_ra_state *ra = &filp->f_ra;
1106 pgoff_t index;
1107 pgoff_t last_index;
1108 pgoff_t prev_index;
1109 unsigned long offset;
1110 unsigned int prev_offset;
1111 int error;
1112
1113 index = *ppos >> PAGE_CACHE_SHIFT;
1114 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1115 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1116 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1117 offset = *ppos & ~PAGE_CACHE_MASK;
1118
1119 for (;;) {
1120 struct page *page;
1121 pgoff_t end_index;
1122 loff_t isize;
1123 unsigned long nr, ret;
1124
1125 cond_resched();
1126find_page:
1127 page = find_get_page(mapping, index);
1128 if (!page) {
1129 page_cache_sync_readahead(mapping,
1130 ra, filp,
1131 index, last_index - index);
1132 page = find_get_page(mapping, index);
1133 if (unlikely(page == NULL))
1134 goto no_cached_page;
1135 }
1136 if (PageReadahead(page)) {
1137 page_cache_async_readahead(mapping,
1138 ra, filp, page,
1139 index, last_index - index);
1140 }
1141 if (!PageUptodate(page)) {
1142 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1143 !mapping->a_ops->is_partially_uptodate)
1144 goto page_not_up_to_date;
1145 if (!trylock_page(page))
1146 goto page_not_up_to_date;
1147
1148 if (!page->mapping)
1149 goto page_not_up_to_date_locked;
1150 if (!mapping->a_ops->is_partially_uptodate(page,
1151 desc, offset))
1152 goto page_not_up_to_date_locked;
1153 unlock_page(page);
1154 }
1155page_ok:
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 isize = i_size_read(inode);
1166 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1167 if (unlikely(!isize || index > end_index)) {
1168 page_cache_release(page);
1169 goto out;
1170 }
1171
1172
1173 nr = PAGE_CACHE_SIZE;
1174 if (index == end_index) {
1175 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1176 if (nr <= offset) {
1177 page_cache_release(page);
1178 goto out;
1179 }
1180 }
1181 nr = nr - offset;
1182
1183
1184
1185
1186
1187 if (mapping_writably_mapped(mapping))
1188 flush_dcache_page(page);
1189
1190
1191
1192
1193
1194 if (prev_index != index || offset != prev_offset)
1195 mark_page_accessed(page);
1196 prev_index = index;
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 ret = file_read_actor(desc, page, offset, nr);
1210 offset += ret;
1211 index += offset >> PAGE_CACHE_SHIFT;
1212 offset &= ~PAGE_CACHE_MASK;
1213 prev_offset = offset;
1214
1215 page_cache_release(page);
1216 if (ret == nr && desc->count)
1217 continue;
1218 goto out;
1219
1220page_not_up_to_date:
1221
1222 error = lock_page_killable(page);
1223 if (unlikely(error))
1224 goto readpage_error;
1225
1226page_not_up_to_date_locked:
1227
1228 if (!page->mapping) {
1229 unlock_page(page);
1230 page_cache_release(page);
1231 continue;
1232 }
1233
1234
1235 if (PageUptodate(page)) {
1236 unlock_page(page);
1237 goto page_ok;
1238 }
1239
1240readpage:
1241
1242
1243
1244
1245
1246 ClearPageError(page);
1247
1248 error = mapping->a_ops->readpage(filp, page);
1249
1250 if (unlikely(error)) {
1251 if (error == AOP_TRUNCATED_PAGE) {
1252 page_cache_release(page);
1253 goto find_page;
1254 }
1255 goto readpage_error;
1256 }
1257
1258 if (!PageUptodate(page)) {
1259 error = lock_page_killable(page);
1260 if (unlikely(error))
1261 goto readpage_error;
1262 if (!PageUptodate(page)) {
1263 if (page->mapping == NULL) {
1264
1265
1266
1267 unlock_page(page);
1268 page_cache_release(page);
1269 goto find_page;
1270 }
1271 unlock_page(page);
1272 shrink_readahead_size_eio(filp, ra);
1273 error = -EIO;
1274 goto readpage_error;
1275 }
1276 unlock_page(page);
1277 }
1278
1279 goto page_ok;
1280
1281readpage_error:
1282
1283 desc->error = error;
1284 page_cache_release(page);
1285 goto out;
1286
1287no_cached_page:
1288
1289
1290
1291
1292 page = page_cache_alloc_cold(mapping);
1293 if (!page) {
1294 desc->error = -ENOMEM;
1295 goto out;
1296 }
1297 error = add_to_page_cache_lru(page, mapping,
1298 index, GFP_KERNEL);
1299 if (error) {
1300 page_cache_release(page);
1301 if (error == -EEXIST)
1302 goto find_page;
1303 desc->error = error;
1304 goto out;
1305 }
1306 goto readpage;
1307 }
1308
1309out:
1310 ra->prev_pos = prev_index;
1311 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1312 ra->prev_pos |= prev_offset;
1313
1314 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1315 file_accessed(filp);
1316}
1317
1318int file_read_actor(read_descriptor_t *desc, struct page *page,
1319 unsigned long offset, unsigned long size)
1320{
1321 char *kaddr;
1322 unsigned long left, count = desc->count;
1323
1324 if (size > count)
1325 size = count;
1326
1327
1328
1329
1330
1331 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1332 kaddr = kmap_atomic(page);
1333 left = __copy_to_user_inatomic(desc->arg.buf,
1334 kaddr + offset, size);
1335 kunmap_atomic(kaddr);
1336 if (left == 0)
1337 goto success;
1338 }
1339
1340
1341 kaddr = kmap(page);
1342 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1343 kunmap(page);
1344
1345 if (left) {
1346 size -= left;
1347 desc->error = -EFAULT;
1348 }
1349success:
1350 desc->count = count - size;
1351 desc->written += size;
1352 desc->arg.buf += size;
1353 return size;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367int generic_segment_checks(const struct iovec *iov,
1368 unsigned long *nr_segs, size_t *count, int access_flags)
1369{
1370 unsigned long seg;
1371 size_t cnt = 0;
1372 for (seg = 0; seg < *nr_segs; seg++) {
1373 const struct iovec *iv = &iov[seg];
1374
1375
1376
1377
1378
1379 cnt += iv->iov_len;
1380 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1381 return -EINVAL;
1382 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1383 continue;
1384 if (seg == 0)
1385 return -EFAULT;
1386 *nr_segs = seg;
1387 cnt -= iv->iov_len;
1388 break;
1389 }
1390 *count = cnt;
1391 return 0;
1392}
1393EXPORT_SYMBOL(generic_segment_checks);
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405ssize_t
1406generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1407 unsigned long nr_segs, loff_t pos)
1408{
1409 struct file *filp = iocb->ki_filp;
1410 ssize_t retval;
1411 unsigned long seg = 0;
1412 size_t count;
1413 loff_t *ppos = &iocb->ki_pos;
1414
1415 count = 0;
1416 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1417 if (retval)
1418 return retval;
1419
1420
1421 if (filp->f_flags & O_DIRECT) {
1422 loff_t size;
1423 struct address_space *mapping;
1424 struct inode *inode;
1425
1426 mapping = filp->f_mapping;
1427 inode = mapping->host;
1428 if (!count)
1429 goto out;
1430 size = i_size_read(inode);
1431 retval = filemap_write_and_wait_range(mapping, pos,
1432 pos + iov_length(iov, nr_segs) - 1);
1433 if (!retval) {
1434 retval = mapping->a_ops->direct_IO(READ, iocb,
1435 iov, pos, nr_segs);
1436 }
1437 if (retval > 0) {
1438 *ppos = pos + retval;
1439 count -= retval;
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 if (retval < 0 || !count || *ppos >= size) {
1451 file_accessed(filp);
1452 goto out;
1453 }
1454 }
1455
1456 count = retval;
1457 for (seg = 0; seg < nr_segs; seg++) {
1458 read_descriptor_t desc;
1459 loff_t offset = 0;
1460
1461
1462
1463
1464
1465 if (count) {
1466 if (count > iov[seg].iov_len) {
1467 count -= iov[seg].iov_len;
1468 continue;
1469 }
1470 offset = count;
1471 count = 0;
1472 }
1473
1474 desc.written = 0;
1475 desc.arg.buf = iov[seg].iov_base + offset;
1476 desc.count = iov[seg].iov_len - offset;
1477 if (desc.count == 0)
1478 continue;
1479 desc.error = 0;
1480 do_generic_file_read(filp, ppos, &desc);
1481 retval += desc.written;
1482 if (desc.error) {
1483 retval = retval ?: desc.error;
1484 break;
1485 }
1486 if (desc.count > 0)
1487 break;
1488 }
1489out:
1490 return retval;
1491}
1492EXPORT_SYMBOL(generic_file_aio_read);
1493
1494#ifdef CONFIG_MMU
1495
1496
1497
1498
1499
1500
1501
1502
1503static int page_cache_read(struct file *file, pgoff_t offset)
1504{
1505 struct address_space *mapping = file->f_mapping;
1506 struct page *page;
1507 int ret;
1508
1509 do {
1510 page = page_cache_alloc_cold(mapping);
1511 if (!page)
1512 return -ENOMEM;
1513
1514 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1515 if (ret == 0)
1516 ret = mapping->a_ops->readpage(file, page);
1517 else if (ret == -EEXIST)
1518 ret = 0;
1519
1520 page_cache_release(page);
1521
1522 } while (ret == AOP_TRUNCATED_PAGE);
1523
1524 return ret;
1525}
1526
1527#define MMAP_LOTSAMISS (100)
1528
1529
1530
1531
1532
1533static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1534 struct file_ra_state *ra,
1535 struct file *file,
1536 pgoff_t offset)
1537{
1538 unsigned long ra_pages;
1539 struct address_space *mapping = file->f_mapping;
1540
1541
1542 if (vma->vm_flags & VM_RAND_READ)
1543 return;
1544 if (!ra->ra_pages)
1545 return;
1546
1547 if (vma->vm_flags & VM_SEQ_READ) {
1548 page_cache_sync_readahead(mapping, ra, file, offset,
1549 ra->ra_pages);
1550 return;
1551 }
1552
1553
1554 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1555 ra->mmap_miss++;
1556
1557
1558
1559
1560
1561 if (ra->mmap_miss > MMAP_LOTSAMISS)
1562 return;
1563
1564
1565
1566
1567 ra_pages = max_sane_readahead(ra->ra_pages);
1568 ra->start = max_t(long, 0, offset - ra_pages / 2);
1569 ra->size = ra_pages;
1570 ra->async_size = ra_pages / 4;
1571 ra_submit(ra, mapping, file);
1572}
1573
1574
1575
1576
1577
1578static void do_async_mmap_readahead(struct vm_area_struct *vma,
1579 struct file_ra_state *ra,
1580 struct file *file,
1581 struct page *page,
1582 pgoff_t offset)
1583{
1584 struct address_space *mapping = file->f_mapping;
1585
1586
1587 if (vma->vm_flags & VM_RAND_READ)
1588 return;
1589 if (ra->mmap_miss > 0)
1590 ra->mmap_miss--;
1591 if (PageReadahead(page))
1592 page_cache_async_readahead(mapping, ra, file,
1593 page, offset, ra->ra_pages);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1609{
1610 int error;
1611 struct file *file = vma->vm_file;
1612 struct address_space *mapping = file->f_mapping;
1613 struct file_ra_state *ra = &file->f_ra;
1614 struct inode *inode = mapping->host;
1615 pgoff_t offset = vmf->pgoff;
1616 struct page *page;
1617 pgoff_t size;
1618 int ret = 0;
1619
1620 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1621 if (offset >= size)
1622 return VM_FAULT_SIGBUS;
1623
1624
1625
1626
1627 page = find_get_page(mapping, offset);
1628 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1629
1630
1631
1632
1633 do_async_mmap_readahead(vma, ra, file, page, offset);
1634 } else if (!page) {
1635
1636 do_sync_mmap_readahead(vma, ra, file, offset);
1637 count_vm_event(PGMAJFAULT);
1638 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1639 ret = VM_FAULT_MAJOR;
1640retry_find:
1641 page = find_get_page(mapping, offset);
1642 if (!page)
1643 goto no_cached_page;
1644 }
1645
1646 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1647 page_cache_release(page);
1648 return ret | VM_FAULT_RETRY;
1649 }
1650
1651
1652 if (unlikely(page->mapping != mapping)) {
1653 unlock_page(page);
1654 put_page(page);
1655 goto retry_find;
1656 }
1657 VM_BUG_ON_PAGE(page->index != offset, page);
1658
1659
1660
1661
1662
1663 if (unlikely(!PageUptodate(page)))
1664 goto page_not_uptodate;
1665
1666
1667
1668
1669
1670 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1671 if (unlikely(offset >= size)) {
1672 unlock_page(page);
1673 page_cache_release(page);
1674 return VM_FAULT_SIGBUS;
1675 }
1676
1677 vmf->page = page;
1678 return ret | VM_FAULT_LOCKED;
1679
1680no_cached_page:
1681
1682
1683
1684
1685 error = page_cache_read(file, offset);
1686
1687
1688
1689
1690
1691
1692 if (error >= 0)
1693 goto retry_find;
1694
1695
1696
1697
1698
1699
1700 if (error == -ENOMEM)
1701 return VM_FAULT_OOM;
1702 return VM_FAULT_SIGBUS;
1703
1704page_not_uptodate:
1705
1706
1707
1708
1709
1710
1711 ClearPageError(page);
1712 error = mapping->a_ops->readpage(file, page);
1713 if (!error) {
1714 wait_on_page_locked(page);
1715 if (!PageUptodate(page))
1716 error = -EIO;
1717 }
1718 page_cache_release(page);
1719
1720 if (!error || error == AOP_TRUNCATED_PAGE)
1721 goto retry_find;
1722
1723
1724 shrink_readahead_size_eio(file, ra);
1725 return VM_FAULT_SIGBUS;
1726}
1727EXPORT_SYMBOL(filemap_fault);
1728
1729int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1730{
1731 struct page *page = vmf->page;
1732 struct inode *inode = file_inode(vma->vm_file);
1733 int ret = VM_FAULT_LOCKED;
1734
1735 sb_start_pagefault(inode->i_sb);
1736 file_update_time(vma->vm_file);
1737 lock_page(page);
1738 if (page->mapping != inode->i_mapping) {
1739 unlock_page(page);
1740 ret = VM_FAULT_NOPAGE;
1741 goto out;
1742 }
1743
1744
1745
1746
1747
1748 set_page_dirty(page);
1749 wait_for_stable_page(page);
1750out:
1751 sb_end_pagefault(inode->i_sb);
1752 return ret;
1753}
1754EXPORT_SYMBOL(filemap_page_mkwrite);
1755
1756const struct vm_operations_struct generic_file_vm_ops = {
1757 .fault = filemap_fault,
1758 .page_mkwrite = filemap_page_mkwrite,
1759 .remap_pages = generic_file_remap_pages,
1760};
1761
1762
1763
1764int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1765{
1766 struct address_space *mapping = file->f_mapping;
1767
1768 if (!mapping->a_ops->readpage)
1769 return -ENOEXEC;
1770 file_accessed(file);
1771 vma->vm_ops = &generic_file_vm_ops;
1772 return 0;
1773}
1774
1775
1776
1777
1778int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1779{
1780 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1781 return -EINVAL;
1782 return generic_file_mmap(file, vma);
1783}
1784#else
1785int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1786{
1787 return -ENOSYS;
1788}
1789int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1790{
1791 return -ENOSYS;
1792}
1793#endif
1794
1795EXPORT_SYMBOL(generic_file_mmap);
1796EXPORT_SYMBOL(generic_file_readonly_mmap);
1797
1798static struct page *__read_cache_page(struct address_space *mapping,
1799 pgoff_t index,
1800 int (*filler)(void *, struct page *),
1801 void *data,
1802 gfp_t gfp)
1803{
1804 struct page *page;
1805 int err;
1806repeat:
1807 page = find_get_page(mapping, index);
1808 if (!page) {
1809 page = __page_cache_alloc(gfp | __GFP_COLD);
1810 if (!page)
1811 return ERR_PTR(-ENOMEM);
1812 err = add_to_page_cache_lru(page, mapping, index, gfp);
1813 if (unlikely(err)) {
1814 page_cache_release(page);
1815 if (err == -EEXIST)
1816 goto repeat;
1817
1818 return ERR_PTR(err);
1819 }
1820 err = filler(data, page);
1821 if (err < 0) {
1822 page_cache_release(page);
1823 page = ERR_PTR(err);
1824 }
1825 }
1826 return page;
1827}
1828
1829static struct page *do_read_cache_page(struct address_space *mapping,
1830 pgoff_t index,
1831 int (*filler)(void *, struct page *),
1832 void *data,
1833 gfp_t gfp)
1834
1835{
1836 struct page *page;
1837 int err;
1838
1839retry:
1840 page = __read_cache_page(mapping, index, filler, data, gfp);
1841 if (IS_ERR(page))
1842 return page;
1843 if (PageUptodate(page))
1844 goto out;
1845
1846 lock_page(page);
1847 if (!page->mapping) {
1848 unlock_page(page);
1849 page_cache_release(page);
1850 goto retry;
1851 }
1852 if (PageUptodate(page)) {
1853 unlock_page(page);
1854 goto out;
1855 }
1856 err = filler(data, page);
1857 if (err < 0) {
1858 page_cache_release(page);
1859 return ERR_PTR(err);
1860 }
1861out:
1862 mark_page_accessed(page);
1863 return page;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881struct page *read_cache_page_async(struct address_space *mapping,
1882 pgoff_t index,
1883 int (*filler)(void *, struct page *),
1884 void *data)
1885{
1886 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1887}
1888EXPORT_SYMBOL(read_cache_page_async);
1889
1890static struct page *wait_on_page_read(struct page *page)
1891{
1892 if (!IS_ERR(page)) {
1893 wait_on_page_locked(page);
1894 if (!PageUptodate(page)) {
1895 page_cache_release(page);
1896 page = ERR_PTR(-EIO);
1897 }
1898 }
1899 return page;
1900}
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913struct page *read_cache_page_gfp(struct address_space *mapping,
1914 pgoff_t index,
1915 gfp_t gfp)
1916{
1917 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1918
1919 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1920}
1921EXPORT_SYMBOL(read_cache_page_gfp);
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935struct page *read_cache_page(struct address_space *mapping,
1936 pgoff_t index,
1937 int (*filler)(void *, struct page *),
1938 void *data)
1939{
1940 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1941}
1942EXPORT_SYMBOL(read_cache_page);
1943
1944static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1945 const struct iovec *iov, size_t base, size_t bytes)
1946{
1947 size_t copied = 0, left = 0;
1948
1949 while (bytes) {
1950 char __user *buf = iov->iov_base + base;
1951 int copy = min(bytes, iov->iov_len - base);
1952
1953 base = 0;
1954 left = __copy_from_user_inatomic(vaddr, buf, copy);
1955 copied += copy;
1956 bytes -= copy;
1957 vaddr += copy;
1958 iov++;
1959
1960 if (unlikely(left))
1961 break;
1962 }
1963 return copied - left;
1964}
1965
1966
1967
1968
1969
1970
1971size_t iov_iter_copy_from_user_atomic(struct page *page,
1972 struct iov_iter *i, unsigned long offset, size_t bytes)
1973{
1974 char *kaddr;
1975 size_t copied;
1976
1977 BUG_ON(!in_atomic());
1978 kaddr = kmap_atomic(page);
1979 if (likely(i->nr_segs == 1)) {
1980 int left;
1981 char __user *buf = i->iov->iov_base + i->iov_offset;
1982 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1983 copied = bytes - left;
1984 } else {
1985 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1986 i->iov, i->iov_offset, bytes);
1987 }
1988 kunmap_atomic(kaddr);
1989
1990 return copied;
1991}
1992EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1993
1994
1995
1996
1997
1998
1999
2000size_t iov_iter_copy_from_user(struct page *page,
2001 struct iov_iter *i, unsigned long offset, size_t bytes)
2002{
2003 char *kaddr;
2004 size_t copied;
2005
2006 kaddr = kmap(page);
2007 if (likely(i->nr_segs == 1)) {
2008 int left;
2009 char __user *buf = i->iov->iov_base + i->iov_offset;
2010 left = __copy_from_user(kaddr + offset, buf, bytes);
2011 copied = bytes - left;
2012 } else {
2013 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2014 i->iov, i->iov_offset, bytes);
2015 }
2016 kunmap(page);
2017 return copied;
2018}
2019EXPORT_SYMBOL(iov_iter_copy_from_user);
2020
2021void iov_iter_advance(struct iov_iter *i, size_t bytes)
2022{
2023 BUG_ON(i->count < bytes);
2024
2025 if (likely(i->nr_segs == 1)) {
2026 i->iov_offset += bytes;
2027 i->count -= bytes;
2028 } else {
2029 const struct iovec *iov = i->iov;
2030 size_t base = i->iov_offset;
2031 unsigned long nr_segs = i->nr_segs;
2032
2033
2034
2035
2036
2037 while (bytes || unlikely(i->count && !iov->iov_len)) {
2038 int copy;
2039
2040 copy = min(bytes, iov->iov_len - base);
2041 BUG_ON(!i->count || i->count < copy);
2042 i->count -= copy;
2043 bytes -= copy;
2044 base += copy;
2045 if (iov->iov_len == base) {
2046 iov++;
2047 nr_segs--;
2048 base = 0;
2049 }
2050 }
2051 i->iov = iov;
2052 i->iov_offset = base;
2053 i->nr_segs = nr_segs;
2054 }
2055}
2056EXPORT_SYMBOL(iov_iter_advance);
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2068{
2069 char __user *buf = i->iov->iov_base + i->iov_offset;
2070 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2071 return fault_in_pages_readable(buf, bytes);
2072}
2073EXPORT_SYMBOL(iov_iter_fault_in_readable);
2074
2075
2076
2077
2078size_t iov_iter_single_seg_count(const struct iov_iter *i)
2079{
2080 const struct iovec *iov = i->iov;
2081 if (i->nr_segs == 1)
2082 return i->count;
2083 else
2084 return min(i->count, iov->iov_len - i->iov_offset);
2085}
2086EXPORT_SYMBOL(iov_iter_single_seg_count);
2087
2088
2089
2090
2091
2092
2093
2094
2095inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2096{
2097 struct inode *inode = file->f_mapping->host;
2098 unsigned long limit = rlimit(RLIMIT_FSIZE);
2099
2100 if (unlikely(*pos < 0))
2101 return -EINVAL;
2102
2103 if (!isblk) {
2104
2105 if (file->f_flags & O_APPEND)
2106 *pos = i_size_read(inode);
2107
2108 if (limit != RLIM_INFINITY) {
2109 if (*pos >= limit) {
2110 send_sig(SIGXFSZ, current, 0);
2111 return -EFBIG;
2112 }
2113 if (*count > limit - (typeof(limit))*pos) {
2114 *count = limit - (typeof(limit))*pos;
2115 }
2116 }
2117 }
2118
2119
2120
2121
2122 if (unlikely(*pos + *count > MAX_NON_LFS &&
2123 !(file->f_flags & O_LARGEFILE))) {
2124 if (*pos >= MAX_NON_LFS) {
2125 return -EFBIG;
2126 }
2127 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2128 *count = MAX_NON_LFS - (unsigned long)*pos;
2129 }
2130 }
2131
2132
2133
2134
2135
2136
2137
2138
2139 if (likely(!isblk)) {
2140 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2141 if (*count || *pos > inode->i_sb->s_maxbytes) {
2142 return -EFBIG;
2143 }
2144
2145 }
2146
2147 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2148 *count = inode->i_sb->s_maxbytes - *pos;
2149 } else {
2150#ifdef CONFIG_BLOCK
2151 loff_t isize;
2152 if (bdev_read_only(I_BDEV(inode)))
2153 return -EPERM;
2154 isize = i_size_read(inode);
2155 if (*pos >= isize) {
2156 if (*count || *pos > isize)
2157 return -ENOSPC;
2158 }
2159
2160 if (*pos + *count > isize)
2161 *count = isize - *pos;
2162#else
2163 return -EPERM;
2164#endif
2165 }
2166 return 0;
2167}
2168EXPORT_SYMBOL(generic_write_checks);
2169
2170int pagecache_write_begin(struct file *file, struct address_space *mapping,
2171 loff_t pos, unsigned len, unsigned flags,
2172 struct page **pagep, void **fsdata)
2173{
2174 const struct address_space_operations *aops = mapping->a_ops;
2175
2176 return aops->write_begin(file, mapping, pos, len, flags,
2177 pagep, fsdata);
2178}
2179EXPORT_SYMBOL(pagecache_write_begin);
2180
2181int pagecache_write_end(struct file *file, struct address_space *mapping,
2182 loff_t pos, unsigned len, unsigned copied,
2183 struct page *page, void *fsdata)
2184{
2185 const struct address_space_operations *aops = mapping->a_ops;
2186
2187 mark_page_accessed(page);
2188 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2189}
2190EXPORT_SYMBOL(pagecache_write_end);
2191
2192ssize_t
2193generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2194 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2195 size_t count, size_t ocount)
2196{
2197 struct file *file = iocb->ki_filp;
2198 struct address_space *mapping = file->f_mapping;
2199 struct inode *inode = mapping->host;
2200 ssize_t written;
2201 size_t write_len;
2202 pgoff_t end;
2203
2204 if (count != ocount)
2205 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2206
2207 write_len = iov_length(iov, *nr_segs);
2208 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2209
2210 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2211 if (written)
2212 goto out;
2213
2214
2215
2216
2217
2218
2219
2220 if (mapping->nrpages) {
2221 written = invalidate_inode_pages2_range(mapping,
2222 pos >> PAGE_CACHE_SHIFT, end);
2223
2224
2225
2226
2227 if (written) {
2228 if (written == -EBUSY)
2229 return 0;
2230 goto out;
2231 }
2232 }
2233
2234 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244 if (mapping->nrpages) {
2245 invalidate_inode_pages2_range(mapping,
2246 pos >> PAGE_CACHE_SHIFT, end);
2247 }
2248
2249 if (written > 0) {
2250 pos += written;
2251 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2252 i_size_write(inode, pos);
2253 mark_inode_dirty(inode);
2254 }
2255 *ppos = pos;
2256 }
2257out:
2258 return written;
2259}
2260EXPORT_SYMBOL(generic_file_direct_write);
2261
2262
2263
2264
2265
2266struct page *grab_cache_page_write_begin(struct address_space *mapping,
2267 pgoff_t index, unsigned flags)
2268{
2269 int status;
2270 gfp_t gfp_mask;
2271 struct page *page;
2272 gfp_t gfp_notmask = 0;
2273
2274 gfp_mask = mapping_gfp_mask(mapping);
2275 if (mapping_cap_account_dirty(mapping))
2276 gfp_mask |= __GFP_WRITE;
2277 if (flags & AOP_FLAG_NOFS)
2278 gfp_notmask = __GFP_FS;
2279repeat:
2280 page = find_lock_page(mapping, index);
2281 if (page)
2282 goto found;
2283
2284 page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2285 if (!page)
2286 return NULL;
2287 status = add_to_page_cache_lru(page, mapping, index,
2288 GFP_KERNEL & ~gfp_notmask);
2289 if (unlikely(status)) {
2290 page_cache_release(page);
2291 if (status == -EEXIST)
2292 goto repeat;
2293 return NULL;
2294 }
2295found:
2296 wait_for_stable_page(page);
2297 return page;
2298}
2299EXPORT_SYMBOL(grab_cache_page_write_begin);
2300
2301static ssize_t generic_perform_write(struct file *file,
2302 struct iov_iter *i, loff_t pos)
2303{
2304 struct address_space *mapping = file->f_mapping;
2305 const struct address_space_operations *a_ops = mapping->a_ops;
2306 long status = 0;
2307 ssize_t written = 0;
2308 unsigned int flags = 0;
2309
2310
2311
2312
2313 if (segment_eq(get_fs(), KERNEL_DS))
2314 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2315
2316 do {
2317 struct page *page;
2318 unsigned long offset;
2319 unsigned long bytes;
2320 size_t copied;
2321 void *fsdata;
2322
2323 offset = (pos & (PAGE_CACHE_SIZE - 1));
2324 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2325 iov_iter_count(i));
2326
2327again:
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2339 status = -EFAULT;
2340 break;
2341 }
2342
2343 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2344 &page, &fsdata);
2345 if (unlikely(status))
2346 break;
2347
2348 if (mapping_writably_mapped(mapping))
2349 flush_dcache_page(page);
2350
2351 pagefault_disable();
2352 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2353 pagefault_enable();
2354 flush_dcache_page(page);
2355
2356 mark_page_accessed(page);
2357 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2358 page, fsdata);
2359 if (unlikely(status < 0))
2360 break;
2361 copied = status;
2362
2363 cond_resched();
2364
2365 iov_iter_advance(i, copied);
2366 if (unlikely(copied == 0)) {
2367
2368
2369
2370
2371
2372
2373
2374
2375 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2376 iov_iter_single_seg_count(i));
2377 goto again;
2378 }
2379 pos += copied;
2380 written += copied;
2381
2382 balance_dirty_pages_ratelimited(mapping);
2383 if (fatal_signal_pending(current)) {
2384 status = -EINTR;
2385 break;
2386 }
2387 } while (iov_iter_count(i));
2388
2389 return written ? written : status;
2390}
2391
2392ssize_t
2393generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2394 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2395 size_t count, ssize_t written)
2396{
2397 struct file *file = iocb->ki_filp;
2398 ssize_t status;
2399 struct iov_iter i;
2400
2401 iov_iter_init(&i, iov, nr_segs, count, written);
2402 status = generic_perform_write(file, &i, pos);
2403
2404 if (likely(status >= 0)) {
2405 written += status;
2406 *ppos = pos + status;
2407 }
2408
2409 return written ? written : status;
2410}
2411EXPORT_SYMBOL(generic_file_buffered_write);
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2433 unsigned long nr_segs, loff_t *ppos)
2434{
2435 struct file *file = iocb->ki_filp;
2436 struct address_space * mapping = file->f_mapping;
2437 size_t ocount;
2438 size_t count;
2439 struct inode *inode = mapping->host;
2440 loff_t pos;
2441 ssize_t written;
2442 ssize_t err;
2443
2444 ocount = 0;
2445 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2446 if (err)
2447 return err;
2448
2449 count = ocount;
2450 pos = *ppos;
2451
2452
2453 current->backing_dev_info = mapping->backing_dev_info;
2454 written = 0;
2455
2456 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2457 if (err)
2458 goto out;
2459
2460 if (count == 0)
2461 goto out;
2462
2463 err = file_remove_suid(file);
2464 if (err)
2465 goto out;
2466
2467 err = file_update_time(file);
2468 if (err)
2469 goto out;
2470
2471
2472 if (unlikely(file->f_flags & O_DIRECT)) {
2473 loff_t endbyte;
2474 ssize_t written_buffered;
2475
2476 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2477 ppos, count, ocount);
2478 if (written < 0 || written == count)
2479 goto out;
2480
2481
2482
2483
2484 pos += written;
2485 count -= written;
2486 written_buffered = generic_file_buffered_write(iocb, iov,
2487 nr_segs, pos, ppos, count,
2488 written);
2489
2490
2491
2492
2493
2494
2495
2496 if (written_buffered < 0) {
2497 err = written_buffered;
2498 goto out;
2499 }
2500
2501
2502
2503
2504
2505
2506 endbyte = pos + written_buffered - written - 1;
2507 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2508 if (err == 0) {
2509 written = written_buffered;
2510 invalidate_mapping_pages(mapping,
2511 pos >> PAGE_CACHE_SHIFT,
2512 endbyte >> PAGE_CACHE_SHIFT);
2513 } else {
2514
2515
2516
2517
2518 }
2519 } else {
2520 written = generic_file_buffered_write(iocb, iov, nr_segs,
2521 pos, ppos, count, written);
2522 }
2523out:
2524 current->backing_dev_info = NULL;
2525 return written ? written : err;
2526}
2527EXPORT_SYMBOL(__generic_file_aio_write);
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2541 unsigned long nr_segs, loff_t pos)
2542{
2543 struct file *file = iocb->ki_filp;
2544 struct inode *inode = file->f_mapping->host;
2545 ssize_t ret;
2546
2547 BUG_ON(iocb->ki_pos != pos);
2548
2549 mutex_lock(&inode->i_mutex);
2550 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2551 mutex_unlock(&inode->i_mutex);
2552
2553 if (ret > 0) {
2554 ssize_t err;
2555
2556 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2557 if (err < 0)
2558 ret = err;
2559 }
2560 return ret;
2561}
2562EXPORT_SYMBOL(generic_file_aio_write);
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581int try_to_release_page(struct page *page, gfp_t gfp_mask)
2582{
2583 struct address_space * const mapping = page->mapping;
2584
2585 BUG_ON(!PageLocked(page));
2586 if (PageWriteback(page))
2587 return 0;
2588
2589 if (mapping && mapping->a_ops->releasepage)
2590 return mapping->a_ops->releasepage(page, gfp_mask);
2591 return try_to_free_buffers(page);
2592}
2593
2594EXPORT_SYMBOL(try_to_release_page);
2595