1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/compiler.h>
14#include <linux/fs.h>
15#include <linux/uaccess.h>
16#include <linux/aio.h>
17#include <linux/capability.h>
18#include <linux/kernel_stat.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/security.h>
32#include <linux/cpuset.h>
33#include <linux/hardirq.h>
34#include <linux/memcontrol.h>
35#include <linux/cleancache.h>
36#include "internal.h"
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/filemap.h>
40
41
42
43
44#include <linux/buffer_head.h>
45
46#include <asm/mman.h>
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115void __delete_from_page_cache(struct page *page)
116{
117 struct address_space *mapping = page->mapping;
118
119 trace_mm_filemap_delete_from_page_cache(page);
120
121
122
123
124
125 if (PageUptodate(page) && PageMappedToDisk(page))
126 cleancache_put_page(page);
127 else
128 cleancache_invalidate_page(mapping, page);
129
130 radix_tree_delete(&mapping->page_tree, page->index);
131 page->mapping = NULL;
132
133 mapping->nrpages--;
134 __dec_zone_page_state(page, NR_FILE_PAGES);
135 if (PageSwapBacked(page))
136 __dec_zone_page_state(page, NR_SHMEM);
137 BUG_ON(page_mapped(page));
138
139
140
141
142
143
144
145
146 if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
147 dec_zone_page_state(page, NR_FILE_DIRTY);
148 dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
149 }
150}
151
152
153
154
155
156
157
158
159
160void delete_from_page_cache(struct page *page)
161{
162 struct address_space *mapping = page->mapping;
163 void (*freepage)(struct page *);
164
165 BUG_ON(!PageLocked(page));
166
167 freepage = mapping->a_ops->freepage;
168 spin_lock_irq(&mapping->tree_lock);
169 __delete_from_page_cache(page);
170 spin_unlock_irq(&mapping->tree_lock);
171 mem_cgroup_uncharge_cache_page(page);
172
173 if (freepage)
174 freepage(page);
175 page_cache_release(page);
176}
177EXPORT_SYMBOL(delete_from_page_cache);
178
179static int sleep_on_page(void *word)
180{
181 io_schedule();
182 return 0;
183}
184
185static int sleep_on_page_killable(void *word)
186{
187 sleep_on_page(word);
188 return fatal_signal_pending(current) ? -EINTR : 0;
189}
190
191static int filemap_check_errors(struct address_space *mapping)
192{
193 int ret = 0;
194
195 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
196 ret = -ENOSPC;
197 if (test_and_clear_bit(AS_EIO, &mapping->flags))
198 ret = -EIO;
199 return ret;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
218 loff_t end, int sync_mode)
219{
220 int ret;
221 struct writeback_control wbc = {
222 .sync_mode = sync_mode,
223 .nr_to_write = LONG_MAX,
224 .range_start = start,
225 .range_end = end,
226 };
227
228 if (!mapping_cap_writeback_dirty(mapping))
229 return 0;
230
231 ret = do_writepages(mapping, &wbc);
232 return ret;
233}
234
235static inline int __filemap_fdatawrite(struct address_space *mapping,
236 int sync_mode)
237{
238 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
239}
240
241int filemap_fdatawrite(struct address_space *mapping)
242{
243 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
244}
245EXPORT_SYMBOL(filemap_fdatawrite);
246
247int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
248 loff_t end)
249{
250 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
251}
252EXPORT_SYMBOL(filemap_fdatawrite_range);
253
254
255
256
257
258
259
260
261int filemap_flush(struct address_space *mapping)
262{
263 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
264}
265EXPORT_SYMBOL(filemap_flush);
266
267
268
269
270
271
272
273
274
275
276int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
277 loff_t end_byte)
278{
279 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
280 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
281 struct pagevec pvec;
282 int nr_pages;
283 int ret2, ret = 0;
284
285 if (end_byte < start_byte)
286 goto out;
287
288 pagevec_init(&pvec, 0);
289 while ((index <= end) &&
290 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
291 PAGECACHE_TAG_WRITEBACK,
292 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
293 unsigned i;
294
295 for (i = 0; i < nr_pages; i++) {
296 struct page *page = pvec.pages[i];
297
298
299 if (page->index > end)
300 continue;
301
302 wait_on_page_writeback(page);
303 if (TestClearPageError(page))
304 ret = -EIO;
305 }
306 pagevec_release(&pvec);
307 cond_resched();
308 }
309out:
310 ret2 = filemap_check_errors(mapping);
311 if (!ret)
312 ret = ret2;
313
314 return ret;
315}
316EXPORT_SYMBOL(filemap_fdatawait_range);
317
318
319
320
321
322
323
324
325int filemap_fdatawait(struct address_space *mapping)
326{
327 loff_t i_size = i_size_read(mapping->host);
328
329 if (i_size == 0)
330 return 0;
331
332 return filemap_fdatawait_range(mapping, 0, i_size - 1);
333}
334EXPORT_SYMBOL(filemap_fdatawait);
335
336int filemap_write_and_wait(struct address_space *mapping)
337{
338 int err = 0;
339
340 if (mapping->nrpages) {
341 err = filemap_fdatawrite(mapping);
342
343
344
345
346
347
348 if (err != -EIO) {
349 int err2 = filemap_fdatawait(mapping);
350 if (!err)
351 err = err2;
352 }
353 } else {
354 err = filemap_check_errors(mapping);
355 }
356 return err;
357}
358EXPORT_SYMBOL(filemap_write_and_wait);
359
360
361
362
363
364
365
366
367
368
369
370
371int filemap_write_and_wait_range(struct address_space *mapping,
372 loff_t lstart, loff_t lend)
373{
374 int err = 0;
375
376 if (mapping->nrpages) {
377 err = __filemap_fdatawrite_range(mapping, lstart, lend,
378 WB_SYNC_ALL);
379
380 if (err != -EIO) {
381 int err2 = filemap_fdatawait_range(mapping,
382 lstart, lend);
383 if (!err)
384 err = err2;
385 }
386 } else {
387 err = filemap_check_errors(mapping);
388 }
389 return err;
390}
391EXPORT_SYMBOL(filemap_write_and_wait_range);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
409{
410 int error;
411
412 VM_BUG_ON(!PageLocked(old));
413 VM_BUG_ON(!PageLocked(new));
414 VM_BUG_ON(new->mapping);
415
416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
417 if (!error) {
418 struct address_space *mapping = old->mapping;
419 void (*freepage)(struct page *);
420
421 pgoff_t offset = old->index;
422 freepage = mapping->a_ops->freepage;
423
424 page_cache_get(new);
425 new->mapping = mapping;
426 new->index = offset;
427
428 spin_lock_irq(&mapping->tree_lock);
429 __delete_from_page_cache(old);
430 error = radix_tree_insert(&mapping->page_tree, offset, new);
431 BUG_ON(error);
432 mapping->nrpages++;
433 __inc_zone_page_state(new, NR_FILE_PAGES);
434 if (PageSwapBacked(new))
435 __inc_zone_page_state(new, NR_SHMEM);
436 spin_unlock_irq(&mapping->tree_lock);
437
438 mem_cgroup_replace_page_cache(old, new);
439 radix_tree_preload_end();
440 if (freepage)
441 freepage(old);
442 page_cache_release(old);
443 }
444
445 return error;
446}
447EXPORT_SYMBOL_GPL(replace_page_cache_page);
448
449
450
451
452
453
454
455
456
457
458
459int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
460 pgoff_t offset, gfp_t gfp_mask)
461{
462 int error;
463
464 VM_BUG_ON(!PageLocked(page));
465 VM_BUG_ON(PageSwapBacked(page));
466
467 error = mem_cgroup_cache_charge(page, current->mm,
468 gfp_mask & GFP_RECLAIM_MASK);
469 if (error)
470 return error;
471
472 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
473 if (error) {
474 mem_cgroup_uncharge_cache_page(page);
475 return error;
476 }
477
478 page_cache_get(page);
479 page->mapping = mapping;
480 page->index = offset;
481
482 spin_lock_irq(&mapping->tree_lock);
483 error = radix_tree_insert(&mapping->page_tree, offset, page);
484 radix_tree_preload_end();
485 if (unlikely(error))
486 goto err_insert;
487 mapping->nrpages++;
488 __inc_zone_page_state(page, NR_FILE_PAGES);
489 spin_unlock_irq(&mapping->tree_lock);
490 trace_mm_filemap_add_to_page_cache(page);
491 return 0;
492err_insert:
493 page->mapping = NULL;
494
495 spin_unlock_irq(&mapping->tree_lock);
496 mem_cgroup_uncharge_cache_page(page);
497 page_cache_release(page);
498 return error;
499}
500EXPORT_SYMBOL(add_to_page_cache_locked);
501
502int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
503 pgoff_t offset, gfp_t gfp_mask)
504{
505 int ret;
506
507 ret = add_to_page_cache(page, mapping, offset, gfp_mask);
508 if (ret == 0)
509 lru_cache_add_file(page);
510 return ret;
511}
512EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
513
514#ifdef CONFIG_NUMA
515struct page *__page_cache_alloc(gfp_t gfp)
516{
517 int n;
518 struct page *page;
519
520 if (cpuset_do_page_mem_spread()) {
521 unsigned int cpuset_mems_cookie;
522 do {
523 cpuset_mems_cookie = get_mems_allowed();
524 n = cpuset_mem_spread_node();
525 page = alloc_pages_exact_node(n, gfp, 0);
526 } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
527
528 return page;
529 }
530 return alloc_pages(gfp, 0);
531}
532EXPORT_SYMBOL(__page_cache_alloc);
533#endif
534
535
536
537
538
539
540
541
542
543
544
545static wait_queue_head_t *page_waitqueue(struct page *page)
546{
547 const struct zone *zone = page_zone(page);
548
549 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
550}
551
552static inline void wake_up_page(struct page *page, int bit)
553{
554 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
555}
556
557void wait_on_page_bit(struct page *page, int bit_nr)
558{
559 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
560
561 if (test_bit(bit_nr, &page->flags))
562 __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
563 TASK_UNINTERRUPTIBLE);
564}
565EXPORT_SYMBOL(wait_on_page_bit);
566
567int wait_on_page_bit_killable(struct page *page, int bit_nr)
568{
569 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
570
571 if (!test_bit(bit_nr, &page->flags))
572 return 0;
573
574 return __wait_on_bit(page_waitqueue(page), &wait,
575 sleep_on_page_killable, TASK_KILLABLE);
576}
577
578
579
580
581
582
583
584
585void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
586{
587 wait_queue_head_t *q = page_waitqueue(page);
588 unsigned long flags;
589
590 spin_lock_irqsave(&q->lock, flags);
591 __add_wait_queue(q, waiter);
592 spin_unlock_irqrestore(&q->lock, flags);
593}
594EXPORT_SYMBOL_GPL(add_page_wait_queue);
595
596
597
598
599
600
601
602
603
604
605
606
607
608void unlock_page(struct page *page)
609{
610 VM_BUG_ON(!PageLocked(page));
611 clear_bit_unlock(PG_locked, &page->flags);
612 smp_mb__after_clear_bit();
613 wake_up_page(page, PG_locked);
614}
615EXPORT_SYMBOL(unlock_page);
616
617
618
619
620
621void end_page_writeback(struct page *page)
622{
623 if (TestClearPageReclaim(page))
624 rotate_reclaimable_page(page);
625
626 if (!test_clear_page_writeback(page))
627 BUG();
628
629 smp_mb__after_clear_bit();
630 wake_up_page(page, PG_writeback);
631}
632EXPORT_SYMBOL(end_page_writeback);
633
634
635
636
637
638void __lock_page(struct page *page)
639{
640 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
641
642 __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
643 TASK_UNINTERRUPTIBLE);
644}
645EXPORT_SYMBOL(__lock_page);
646
647int __lock_page_killable(struct page *page)
648{
649 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
650
651 return __wait_on_bit_lock(page_waitqueue(page), &wait,
652 sleep_on_page_killable, TASK_KILLABLE);
653}
654EXPORT_SYMBOL_GPL(__lock_page_killable);
655
656int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
657 unsigned int flags)
658{
659 if (flags & FAULT_FLAG_ALLOW_RETRY) {
660
661
662
663
664 if (flags & FAULT_FLAG_RETRY_NOWAIT)
665 return 0;
666
667 up_read(&mm->mmap_sem);
668 if (flags & FAULT_FLAG_KILLABLE)
669 wait_on_page_locked_killable(page);
670 else
671 wait_on_page_locked(page);
672 return 0;
673 } else {
674 if (flags & FAULT_FLAG_KILLABLE) {
675 int ret;
676
677 ret = __lock_page_killable(page);
678 if (ret) {
679 up_read(&mm->mmap_sem);
680 return 0;
681 }
682 } else
683 __lock_page(page);
684 return 1;
685 }
686}
687
688
689
690
691
692
693
694
695
696struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
697{
698 void **pagep;
699 struct page *page;
700
701 rcu_read_lock();
702repeat:
703 page = NULL;
704 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
705 if (pagep) {
706 page = radix_tree_deref_slot(pagep);
707 if (unlikely(!page))
708 goto out;
709 if (radix_tree_exception(page)) {
710 if (radix_tree_deref_retry(page))
711 goto repeat;
712
713
714
715
716
717 goto out;
718 }
719 if (!page_cache_get_speculative(page))
720 goto repeat;
721
722
723
724
725
726
727 if (unlikely(page != *pagep)) {
728 page_cache_release(page);
729 goto repeat;
730 }
731 }
732out:
733 rcu_read_unlock();
734
735 return page;
736}
737EXPORT_SYMBOL(find_get_page);
738
739
740
741
742
743
744
745
746
747
748
749struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
750{
751 struct page *page;
752
753repeat:
754 page = find_get_page(mapping, offset);
755 if (page && !radix_tree_exception(page)) {
756 lock_page(page);
757
758 if (unlikely(page->mapping != mapping)) {
759 unlock_page(page);
760 page_cache_release(page);
761 goto repeat;
762 }
763 VM_BUG_ON(page->index != offset);
764 }
765 return page;
766}
767EXPORT_SYMBOL(find_lock_page);
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786struct page *find_or_create_page(struct address_space *mapping,
787 pgoff_t index, gfp_t gfp_mask)
788{
789 struct page *page;
790 int err;
791repeat:
792 page = find_lock_page(mapping, index);
793 if (!page) {
794 page = __page_cache_alloc(gfp_mask);
795 if (!page)
796 return NULL;
797
798
799
800
801
802
803 err = add_to_page_cache_lru(page, mapping, index,
804 (gfp_mask & GFP_RECLAIM_MASK));
805 if (unlikely(err)) {
806 page_cache_release(page);
807 page = NULL;
808 if (err == -EEXIST)
809 goto repeat;
810 }
811 }
812 return page;
813}
814EXPORT_SYMBOL(find_or_create_page);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
833 unsigned int nr_pages, struct page **pages)
834{
835 struct radix_tree_iter iter;
836 void **slot;
837 unsigned ret = 0;
838
839 if (unlikely(!nr_pages))
840 return 0;
841
842 rcu_read_lock();
843restart:
844 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
845 struct page *page;
846repeat:
847 page = radix_tree_deref_slot(slot);
848 if (unlikely(!page))
849 continue;
850
851 if (radix_tree_exception(page)) {
852 if (radix_tree_deref_retry(page)) {
853
854
855
856
857
858 WARN_ON(iter.index);
859 goto restart;
860 }
861
862
863
864
865
866 continue;
867 }
868
869 if (!page_cache_get_speculative(page))
870 goto repeat;
871
872
873 if (unlikely(page != *slot)) {
874 page_cache_release(page);
875 goto repeat;
876 }
877
878 pages[ret] = page;
879 if (++ret == nr_pages)
880 break;
881 }
882
883 rcu_read_unlock();
884 return ret;
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
900 unsigned int nr_pages, struct page **pages)
901{
902 struct radix_tree_iter iter;
903 void **slot;
904 unsigned int ret = 0;
905
906 if (unlikely(!nr_pages))
907 return 0;
908
909 rcu_read_lock();
910restart:
911 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
912 struct page *page;
913repeat:
914 page = radix_tree_deref_slot(slot);
915
916 if (unlikely(!page))
917 break;
918
919 if (radix_tree_exception(page)) {
920 if (radix_tree_deref_retry(page)) {
921
922
923
924
925
926 goto restart;
927 }
928
929
930
931
932
933 break;
934 }
935
936 if (!page_cache_get_speculative(page))
937 goto repeat;
938
939
940 if (unlikely(page != *slot)) {
941 page_cache_release(page);
942 goto repeat;
943 }
944
945
946
947
948
949
950 if (page->mapping == NULL || page->index != iter.index) {
951 page_cache_release(page);
952 break;
953 }
954
955 pages[ret] = page;
956 if (++ret == nr_pages)
957 break;
958 }
959 rcu_read_unlock();
960 return ret;
961}
962EXPORT_SYMBOL(find_get_pages_contig);
963
964
965
966
967
968
969
970
971
972
973
974
975unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
976 int tag, unsigned int nr_pages, struct page **pages)
977{
978 struct radix_tree_iter iter;
979 void **slot;
980 unsigned ret = 0;
981
982 if (unlikely(!nr_pages))
983 return 0;
984
985 rcu_read_lock();
986restart:
987 radix_tree_for_each_tagged(slot, &mapping->page_tree,
988 &iter, *index, tag) {
989 struct page *page;
990repeat:
991 page = radix_tree_deref_slot(slot);
992 if (unlikely(!page))
993 continue;
994
995 if (radix_tree_exception(page)) {
996 if (radix_tree_deref_retry(page)) {
997
998
999
1000
1001
1002 goto restart;
1003 }
1004
1005
1006
1007
1008 BUG();
1009 }
1010
1011 if (!page_cache_get_speculative(page))
1012 goto repeat;
1013
1014
1015 if (unlikely(page != *slot)) {
1016 page_cache_release(page);
1017 goto repeat;
1018 }
1019
1020 pages[ret] = page;
1021 if (++ret == nr_pages)
1022 break;
1023 }
1024
1025 rcu_read_unlock();
1026
1027 if (ret)
1028 *index = pages[ret - 1]->index + 1;
1029
1030 return ret;
1031}
1032EXPORT_SYMBOL(find_get_pages_tag);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047struct page *
1048grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
1049{
1050 struct page *page = find_get_page(mapping, index);
1051
1052 if (page) {
1053 if (trylock_page(page))
1054 return page;
1055 page_cache_release(page);
1056 return NULL;
1057 }
1058 page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
1059 if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
1060 page_cache_release(page);
1061 page = NULL;
1062 }
1063 return page;
1064}
1065EXPORT_SYMBOL(grab_cache_page_nowait);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void shrink_readahead_size_eio(struct file *filp,
1083 struct file_ra_state *ra)
1084{
1085 ra->ra_pages /= 4;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static void do_generic_file_read(struct file *filp, loff_t *ppos,
1102 read_descriptor_t *desc, read_actor_t actor)
1103{
1104 struct address_space *mapping = filp->f_mapping;
1105 struct inode *inode = mapping->host;
1106 struct file_ra_state *ra = &filp->f_ra;
1107 pgoff_t index;
1108 pgoff_t last_index;
1109 pgoff_t prev_index;
1110 unsigned long offset;
1111 unsigned int prev_offset;
1112 int error;
1113
1114 index = *ppos >> PAGE_CACHE_SHIFT;
1115 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1116 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1117 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1118 offset = *ppos & ~PAGE_CACHE_MASK;
1119
1120 for (;;) {
1121 struct page *page;
1122 pgoff_t end_index;
1123 loff_t isize;
1124 unsigned long nr, ret;
1125
1126 cond_resched();
1127find_page:
1128 page = find_get_page(mapping, index);
1129 if (!page) {
1130 page_cache_sync_readahead(mapping,
1131 ra, filp,
1132 index, last_index - index);
1133 page = find_get_page(mapping, index);
1134 if (unlikely(page == NULL))
1135 goto no_cached_page;
1136 }
1137 if (PageReadahead(page)) {
1138 page_cache_async_readahead(mapping,
1139 ra, filp, page,
1140 index, last_index - index);
1141 }
1142 if (!PageUptodate(page)) {
1143 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1144 !mapping->a_ops->is_partially_uptodate)
1145 goto page_not_up_to_date;
1146 if (!trylock_page(page))
1147 goto page_not_up_to_date;
1148
1149 if (!page->mapping)
1150 goto page_not_up_to_date_locked;
1151 if (!mapping->a_ops->is_partially_uptodate(page,
1152 desc, offset))
1153 goto page_not_up_to_date_locked;
1154 unlock_page(page);
1155 }
1156page_ok:
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 isize = i_size_read(inode);
1167 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1168 if (unlikely(!isize || index > end_index)) {
1169 page_cache_release(page);
1170 goto out;
1171 }
1172
1173
1174 nr = PAGE_CACHE_SIZE;
1175 if (index == end_index) {
1176 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1177 if (nr <= offset) {
1178 page_cache_release(page);
1179 goto out;
1180 }
1181 }
1182 nr = nr - offset;
1183
1184
1185
1186
1187
1188 if (mapping_writably_mapped(mapping))
1189 flush_dcache_page(page);
1190
1191
1192
1193
1194
1195 if (prev_index != index || offset != prev_offset)
1196 mark_page_accessed(page);
1197 prev_index = index;
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 ret = actor(desc, page, offset, nr);
1210 offset += ret;
1211 index += offset >> PAGE_CACHE_SHIFT;
1212 offset &= ~PAGE_CACHE_MASK;
1213 prev_offset = offset;
1214
1215 page_cache_release(page);
1216 if (ret == nr && desc->count)
1217 continue;
1218 goto out;
1219
1220page_not_up_to_date:
1221
1222 error = lock_page_killable(page);
1223 if (unlikely(error))
1224 goto readpage_error;
1225
1226page_not_up_to_date_locked:
1227
1228 if (!page->mapping) {
1229 unlock_page(page);
1230 page_cache_release(page);
1231 continue;
1232 }
1233
1234
1235 if (PageUptodate(page)) {
1236 unlock_page(page);
1237 goto page_ok;
1238 }
1239
1240readpage:
1241
1242
1243
1244
1245
1246 ClearPageError(page);
1247
1248 error = mapping->a_ops->readpage(filp, page);
1249
1250 if (unlikely(error)) {
1251 if (error == AOP_TRUNCATED_PAGE) {
1252 page_cache_release(page);
1253 goto find_page;
1254 }
1255 goto readpage_error;
1256 }
1257
1258 if (!PageUptodate(page)) {
1259 error = lock_page_killable(page);
1260 if (unlikely(error))
1261 goto readpage_error;
1262 if (!PageUptodate(page)) {
1263 if (page->mapping == NULL) {
1264
1265
1266
1267 unlock_page(page);
1268 page_cache_release(page);
1269 goto find_page;
1270 }
1271 unlock_page(page);
1272 shrink_readahead_size_eio(filp, ra);
1273 error = -EIO;
1274 goto readpage_error;
1275 }
1276 unlock_page(page);
1277 }
1278
1279 goto page_ok;
1280
1281readpage_error:
1282
1283 desc->error = error;
1284 page_cache_release(page);
1285 goto out;
1286
1287no_cached_page:
1288
1289
1290
1291
1292 page = page_cache_alloc_cold(mapping);
1293 if (!page) {
1294 desc->error = -ENOMEM;
1295 goto out;
1296 }
1297 error = add_to_page_cache_lru(page, mapping,
1298 index, GFP_KERNEL);
1299 if (error) {
1300 page_cache_release(page);
1301 if (error == -EEXIST)
1302 goto find_page;
1303 desc->error = error;
1304 goto out;
1305 }
1306 goto readpage;
1307 }
1308
1309out:
1310 ra->prev_pos = prev_index;
1311 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1312 ra->prev_pos |= prev_offset;
1313
1314 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1315 file_accessed(filp);
1316}
1317
1318int file_read_actor(read_descriptor_t *desc, struct page *page,
1319 unsigned long offset, unsigned long size)
1320{
1321 char *kaddr;
1322 unsigned long left, count = desc->count;
1323
1324 if (size > count)
1325 size = count;
1326
1327
1328
1329
1330
1331 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1332 kaddr = kmap_atomic(page);
1333 left = __copy_to_user_inatomic(desc->arg.buf,
1334 kaddr + offset, size);
1335 kunmap_atomic(kaddr);
1336 if (left == 0)
1337 goto success;
1338 }
1339
1340
1341 kaddr = kmap(page);
1342 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1343 kunmap(page);
1344
1345 if (left) {
1346 size -= left;
1347 desc->error = -EFAULT;
1348 }
1349success:
1350 desc->count = count - size;
1351 desc->written += size;
1352 desc->arg.buf += size;
1353 return size;
1354}
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367int generic_segment_checks(const struct iovec *iov,
1368 unsigned long *nr_segs, size_t *count, int access_flags)
1369{
1370 unsigned long seg;
1371 size_t cnt = 0;
1372 for (seg = 0; seg < *nr_segs; seg++) {
1373 const struct iovec *iv = &iov[seg];
1374
1375
1376
1377
1378
1379 cnt += iv->iov_len;
1380 if (unlikely((ssize_t)(cnt|iv->iov_len) < 0))
1381 return -EINVAL;
1382 if (access_ok(access_flags, iv->iov_base, iv->iov_len))
1383 continue;
1384 if (seg == 0)
1385 return -EFAULT;
1386 *nr_segs = seg;
1387 cnt -= iv->iov_len;
1388 break;
1389 }
1390 *count = cnt;
1391 return 0;
1392}
1393EXPORT_SYMBOL(generic_segment_checks);
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405ssize_t
1406generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1407 unsigned long nr_segs, loff_t pos)
1408{
1409 struct file *filp = iocb->ki_filp;
1410 ssize_t retval;
1411 unsigned long seg = 0;
1412 size_t count;
1413 loff_t *ppos = &iocb->ki_pos;
1414
1415 count = 0;
1416 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1417 if (retval)
1418 return retval;
1419
1420
1421 if (filp->f_flags & O_DIRECT) {
1422 loff_t size;
1423 struct address_space *mapping;
1424 struct inode *inode;
1425
1426 mapping = filp->f_mapping;
1427 inode = mapping->host;
1428 if (!count)
1429 goto out;
1430 size = i_size_read(inode);
1431 if (pos < size) {
1432 retval = filemap_write_and_wait_range(mapping, pos,
1433 pos + iov_length(iov, nr_segs) - 1);
1434 if (!retval) {
1435 retval = mapping->a_ops->direct_IO(READ, iocb,
1436 iov, pos, nr_segs);
1437 }
1438 if (retval > 0) {
1439 *ppos = pos + retval;
1440 count -= retval;
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 if (retval < 0 || !count || *ppos >= size) {
1452 file_accessed(filp);
1453 goto out;
1454 }
1455 }
1456 }
1457
1458 count = retval;
1459 for (seg = 0; seg < nr_segs; seg++) {
1460 read_descriptor_t desc;
1461 loff_t offset = 0;
1462
1463
1464
1465
1466
1467 if (count) {
1468 if (count > iov[seg].iov_len) {
1469 count -= iov[seg].iov_len;
1470 continue;
1471 }
1472 offset = count;
1473 count = 0;
1474 }
1475
1476 desc.written = 0;
1477 desc.arg.buf = iov[seg].iov_base + offset;
1478 desc.count = iov[seg].iov_len - offset;
1479 if (desc.count == 0)
1480 continue;
1481 desc.error = 0;
1482 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1483 retval += desc.written;
1484 if (desc.error) {
1485 retval = retval ?: desc.error;
1486 break;
1487 }
1488 if (desc.count > 0)
1489 break;
1490 }
1491out:
1492 return retval;
1493}
1494EXPORT_SYMBOL(generic_file_aio_read);
1495
1496#ifdef CONFIG_MMU
1497
1498
1499
1500
1501
1502
1503
1504
1505static int page_cache_read(struct file *file, pgoff_t offset)
1506{
1507 struct address_space *mapping = file->f_mapping;
1508 struct page *page;
1509 int ret;
1510
1511 do {
1512 page = page_cache_alloc_cold(mapping);
1513 if (!page)
1514 return -ENOMEM;
1515
1516 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1517 if (ret == 0)
1518 ret = mapping->a_ops->readpage(file, page);
1519 else if (ret == -EEXIST)
1520 ret = 0;
1521
1522 page_cache_release(page);
1523
1524 } while (ret == AOP_TRUNCATED_PAGE);
1525
1526 return ret;
1527}
1528
1529#define MMAP_LOTSAMISS (100)
1530
1531
1532
1533
1534
1535static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1536 struct file_ra_state *ra,
1537 struct file *file,
1538 pgoff_t offset)
1539{
1540 unsigned long ra_pages;
1541 struct address_space *mapping = file->f_mapping;
1542
1543
1544 if (vma->vm_flags & VM_RAND_READ)
1545 return;
1546 if (!ra->ra_pages)
1547 return;
1548
1549 if (vma->vm_flags & VM_SEQ_READ) {
1550 page_cache_sync_readahead(mapping, ra, file, offset,
1551 ra->ra_pages);
1552 return;
1553 }
1554
1555
1556 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1557 ra->mmap_miss++;
1558
1559
1560
1561
1562
1563 if (ra->mmap_miss > MMAP_LOTSAMISS)
1564 return;
1565
1566
1567
1568
1569 ra_pages = max_sane_readahead(ra->ra_pages);
1570 ra->start = max_t(long, 0, offset - ra_pages / 2);
1571 ra->size = ra_pages;
1572 ra->async_size = ra_pages / 4;
1573 ra_submit(ra, mapping, file);
1574}
1575
1576
1577
1578
1579
1580static void do_async_mmap_readahead(struct vm_area_struct *vma,
1581 struct file_ra_state *ra,
1582 struct file *file,
1583 struct page *page,
1584 pgoff_t offset)
1585{
1586 struct address_space *mapping = file->f_mapping;
1587
1588
1589 if (vma->vm_flags & VM_RAND_READ)
1590 return;
1591 if (ra->mmap_miss > 0)
1592 ra->mmap_miss--;
1593 if (PageReadahead(page))
1594 page_cache_async_readahead(mapping, ra, file,
1595 page, offset, ra->ra_pages);
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1611{
1612 int error;
1613 struct file *file = vma->vm_file;
1614 struct address_space *mapping = file->f_mapping;
1615 struct file_ra_state *ra = &file->f_ra;
1616 struct inode *inode = mapping->host;
1617 pgoff_t offset = vmf->pgoff;
1618 struct page *page;
1619 pgoff_t size;
1620 int ret = 0;
1621
1622 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1623 if (offset >= size)
1624 return VM_FAULT_SIGBUS;
1625
1626
1627
1628
1629 page = find_get_page(mapping, offset);
1630 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1631
1632
1633
1634
1635 do_async_mmap_readahead(vma, ra, file, page, offset);
1636 } else if (!page) {
1637
1638 do_sync_mmap_readahead(vma, ra, file, offset);
1639 count_vm_event(PGMAJFAULT);
1640 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1641 ret = VM_FAULT_MAJOR;
1642retry_find:
1643 page = find_get_page(mapping, offset);
1644 if (!page)
1645 goto no_cached_page;
1646 }
1647
1648 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1649 page_cache_release(page);
1650 return ret | VM_FAULT_RETRY;
1651 }
1652
1653
1654 if (unlikely(page->mapping != mapping)) {
1655 unlock_page(page);
1656 put_page(page);
1657 goto retry_find;
1658 }
1659 VM_BUG_ON(page->index != offset);
1660
1661
1662
1663
1664
1665 if (unlikely(!PageUptodate(page)))
1666 goto page_not_uptodate;
1667
1668
1669
1670
1671
1672 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1673 if (unlikely(offset >= size)) {
1674 unlock_page(page);
1675 page_cache_release(page);
1676 return VM_FAULT_SIGBUS;
1677 }
1678
1679 vmf->page = page;
1680 return ret | VM_FAULT_LOCKED;
1681
1682no_cached_page:
1683
1684
1685
1686
1687 error = page_cache_read(file, offset);
1688
1689
1690
1691
1692
1693
1694 if (error >= 0)
1695 goto retry_find;
1696
1697
1698
1699
1700
1701
1702 if (error == -ENOMEM)
1703 return VM_FAULT_OOM;
1704 return VM_FAULT_SIGBUS;
1705
1706page_not_uptodate:
1707
1708
1709
1710
1711
1712
1713 ClearPageError(page);
1714 error = mapping->a_ops->readpage(file, page);
1715 if (!error) {
1716 wait_on_page_locked(page);
1717 if (!PageUptodate(page))
1718 error = -EIO;
1719 }
1720 page_cache_release(page);
1721
1722 if (!error || error == AOP_TRUNCATED_PAGE)
1723 goto retry_find;
1724
1725
1726 shrink_readahead_size_eio(file, ra);
1727 return VM_FAULT_SIGBUS;
1728}
1729EXPORT_SYMBOL(filemap_fault);
1730
1731int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1732{
1733 struct page *page = vmf->page;
1734 struct inode *inode = file_inode(vma->vm_file);
1735 int ret = VM_FAULT_LOCKED;
1736
1737 sb_start_pagefault(inode->i_sb);
1738 file_update_time(vma->vm_file);
1739 lock_page(page);
1740 if (page->mapping != inode->i_mapping) {
1741 unlock_page(page);
1742 ret = VM_FAULT_NOPAGE;
1743 goto out;
1744 }
1745
1746
1747
1748
1749
1750 set_page_dirty(page);
1751 wait_for_stable_page(page);
1752out:
1753 sb_end_pagefault(inode->i_sb);
1754 return ret;
1755}
1756EXPORT_SYMBOL(filemap_page_mkwrite);
1757
1758const struct vm_operations_struct generic_file_vm_ops = {
1759 .fault = filemap_fault,
1760 .page_mkwrite = filemap_page_mkwrite,
1761 .remap_pages = generic_file_remap_pages,
1762};
1763
1764
1765
1766int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1767{
1768 struct address_space *mapping = file->f_mapping;
1769
1770 if (!mapping->a_ops->readpage)
1771 return -ENOEXEC;
1772 file_accessed(file);
1773 vma->vm_ops = &generic_file_vm_ops;
1774 return 0;
1775}
1776
1777
1778
1779
1780int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1781{
1782 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1783 return -EINVAL;
1784 return generic_file_mmap(file, vma);
1785}
1786#else
1787int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1788{
1789 return -ENOSYS;
1790}
1791int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1792{
1793 return -ENOSYS;
1794}
1795#endif
1796
1797EXPORT_SYMBOL(generic_file_mmap);
1798EXPORT_SYMBOL(generic_file_readonly_mmap);
1799
1800static struct page *__read_cache_page(struct address_space *mapping,
1801 pgoff_t index,
1802 int (*filler)(void *, struct page *),
1803 void *data,
1804 gfp_t gfp)
1805{
1806 struct page *page;
1807 int err;
1808repeat:
1809 page = find_get_page(mapping, index);
1810 if (!page) {
1811 page = __page_cache_alloc(gfp | __GFP_COLD);
1812 if (!page)
1813 return ERR_PTR(-ENOMEM);
1814 err = add_to_page_cache_lru(page, mapping, index, gfp);
1815 if (unlikely(err)) {
1816 page_cache_release(page);
1817 if (err == -EEXIST)
1818 goto repeat;
1819
1820 return ERR_PTR(err);
1821 }
1822 err = filler(data, page);
1823 if (err < 0) {
1824 page_cache_release(page);
1825 page = ERR_PTR(err);
1826 }
1827 }
1828 return page;
1829}
1830
1831static struct page *do_read_cache_page(struct address_space *mapping,
1832 pgoff_t index,
1833 int (*filler)(void *, struct page *),
1834 void *data,
1835 gfp_t gfp)
1836
1837{
1838 struct page *page;
1839 int err;
1840
1841retry:
1842 page = __read_cache_page(mapping, index, filler, data, gfp);
1843 if (IS_ERR(page))
1844 return page;
1845 if (PageUptodate(page))
1846 goto out;
1847
1848 lock_page(page);
1849 if (!page->mapping) {
1850 unlock_page(page);
1851 page_cache_release(page);
1852 goto retry;
1853 }
1854 if (PageUptodate(page)) {
1855 unlock_page(page);
1856 goto out;
1857 }
1858 err = filler(data, page);
1859 if (err < 0) {
1860 page_cache_release(page);
1861 return ERR_PTR(err);
1862 }
1863out:
1864 mark_page_accessed(page);
1865 return page;
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883struct page *read_cache_page_async(struct address_space *mapping,
1884 pgoff_t index,
1885 int (*filler)(void *, struct page *),
1886 void *data)
1887{
1888 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
1889}
1890EXPORT_SYMBOL(read_cache_page_async);
1891
1892static struct page *wait_on_page_read(struct page *page)
1893{
1894 if (!IS_ERR(page)) {
1895 wait_on_page_locked(page);
1896 if (!PageUptodate(page)) {
1897 page_cache_release(page);
1898 page = ERR_PTR(-EIO);
1899 }
1900 }
1901 return page;
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915struct page *read_cache_page_gfp(struct address_space *mapping,
1916 pgoff_t index,
1917 gfp_t gfp)
1918{
1919 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
1920
1921 return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
1922}
1923EXPORT_SYMBOL(read_cache_page_gfp);
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937struct page *read_cache_page(struct address_space *mapping,
1938 pgoff_t index,
1939 int (*filler)(void *, struct page *),
1940 void *data)
1941{
1942 return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
1943}
1944EXPORT_SYMBOL(read_cache_page);
1945
1946static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1947 const struct iovec *iov, size_t base, size_t bytes)
1948{
1949 size_t copied = 0, left = 0;
1950
1951 while (bytes) {
1952 char __user *buf = iov->iov_base + base;
1953 int copy = min(bytes, iov->iov_len - base);
1954
1955 base = 0;
1956 left = __copy_from_user_inatomic(vaddr, buf, copy);
1957 copied += copy;
1958 bytes -= copy;
1959 vaddr += copy;
1960 iov++;
1961
1962 if (unlikely(left))
1963 break;
1964 }
1965 return copied - left;
1966}
1967
1968
1969
1970
1971
1972
1973size_t iov_iter_copy_from_user_atomic(struct page *page,
1974 struct iov_iter *i, unsigned long offset, size_t bytes)
1975{
1976 char *kaddr;
1977 size_t copied;
1978
1979 BUG_ON(!in_atomic());
1980 kaddr = kmap_atomic(page);
1981 if (likely(i->nr_segs == 1)) {
1982 int left;
1983 char __user *buf = i->iov->iov_base + i->iov_offset;
1984 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
1985 copied = bytes - left;
1986 } else {
1987 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
1988 i->iov, i->iov_offset, bytes);
1989 }
1990 kunmap_atomic(kaddr);
1991
1992 return copied;
1993}
1994EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1995
1996
1997
1998
1999
2000
2001
2002size_t iov_iter_copy_from_user(struct page *page,
2003 struct iov_iter *i, unsigned long offset, size_t bytes)
2004{
2005 char *kaddr;
2006 size_t copied;
2007
2008 kaddr = kmap(page);
2009 if (likely(i->nr_segs == 1)) {
2010 int left;
2011 char __user *buf = i->iov->iov_base + i->iov_offset;
2012 left = __copy_from_user(kaddr + offset, buf, bytes);
2013 copied = bytes - left;
2014 } else {
2015 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2016 i->iov, i->iov_offset, bytes);
2017 }
2018 kunmap(page);
2019 return copied;
2020}
2021EXPORT_SYMBOL(iov_iter_copy_from_user);
2022
2023void iov_iter_advance(struct iov_iter *i, size_t bytes)
2024{
2025 BUG_ON(i->count < bytes);
2026
2027 if (likely(i->nr_segs == 1)) {
2028 i->iov_offset += bytes;
2029 i->count -= bytes;
2030 } else {
2031 const struct iovec *iov = i->iov;
2032 size_t base = i->iov_offset;
2033 unsigned long nr_segs = i->nr_segs;
2034
2035
2036
2037
2038
2039 while (bytes || unlikely(i->count && !iov->iov_len)) {
2040 int copy;
2041
2042 copy = min(bytes, iov->iov_len - base);
2043 BUG_ON(!i->count || i->count < copy);
2044 i->count -= copy;
2045 bytes -= copy;
2046 base += copy;
2047 if (iov->iov_len == base) {
2048 iov++;
2049 nr_segs--;
2050 base = 0;
2051 }
2052 }
2053 i->iov = iov;
2054 i->iov_offset = base;
2055 i->nr_segs = nr_segs;
2056 }
2057}
2058EXPORT_SYMBOL(iov_iter_advance);
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
2070{
2071 char __user *buf = i->iov->iov_base + i->iov_offset;
2072 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
2073 return fault_in_pages_readable(buf, bytes);
2074}
2075EXPORT_SYMBOL(iov_iter_fault_in_readable);
2076
2077
2078
2079
2080size_t iov_iter_single_seg_count(const struct iov_iter *i)
2081{
2082 const struct iovec *iov = i->iov;
2083 if (i->nr_segs == 1)
2084 return i->count;
2085 else
2086 return min(i->count, iov->iov_len - i->iov_offset);
2087}
2088EXPORT_SYMBOL(iov_iter_single_seg_count);
2089
2090
2091
2092
2093
2094
2095
2096
2097inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
2098{
2099 struct inode *inode = file->f_mapping->host;
2100 unsigned long limit = rlimit(RLIMIT_FSIZE);
2101
2102 if (unlikely(*pos < 0))
2103 return -EINVAL;
2104
2105 if (!isblk) {
2106
2107 if (file->f_flags & O_APPEND)
2108 *pos = i_size_read(inode);
2109
2110 if (limit != RLIM_INFINITY) {
2111 if (*pos >= limit) {
2112 send_sig(SIGXFSZ, current, 0);
2113 return -EFBIG;
2114 }
2115 if (*count > limit - (typeof(limit))*pos) {
2116 *count = limit - (typeof(limit))*pos;
2117 }
2118 }
2119 }
2120
2121
2122
2123
2124 if (unlikely(*pos + *count > MAX_NON_LFS &&
2125 !(file->f_flags & O_LARGEFILE))) {
2126 if (*pos >= MAX_NON_LFS) {
2127 return -EFBIG;
2128 }
2129 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
2130 *count = MAX_NON_LFS - (unsigned long)*pos;
2131 }
2132 }
2133
2134
2135
2136
2137
2138
2139
2140
2141 if (likely(!isblk)) {
2142 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
2143 if (*count || *pos > inode->i_sb->s_maxbytes) {
2144 return -EFBIG;
2145 }
2146
2147 }
2148
2149 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
2150 *count = inode->i_sb->s_maxbytes - *pos;
2151 } else {
2152#ifdef CONFIG_BLOCK
2153 loff_t isize;
2154 if (bdev_read_only(I_BDEV(inode)))
2155 return -EPERM;
2156 isize = i_size_read(inode);
2157 if (*pos >= isize) {
2158 if (*count || *pos > isize)
2159 return -ENOSPC;
2160 }
2161
2162 if (*pos + *count > isize)
2163 *count = isize - *pos;
2164#else
2165 return -EPERM;
2166#endif
2167 }
2168 return 0;
2169}
2170EXPORT_SYMBOL(generic_write_checks);
2171
2172int pagecache_write_begin(struct file *file, struct address_space *mapping,
2173 loff_t pos, unsigned len, unsigned flags,
2174 struct page **pagep, void **fsdata)
2175{
2176 const struct address_space_operations *aops = mapping->a_ops;
2177
2178 return aops->write_begin(file, mapping, pos, len, flags,
2179 pagep, fsdata);
2180}
2181EXPORT_SYMBOL(pagecache_write_begin);
2182
2183int pagecache_write_end(struct file *file, struct address_space *mapping,
2184 loff_t pos, unsigned len, unsigned copied,
2185 struct page *page, void *fsdata)
2186{
2187 const struct address_space_operations *aops = mapping->a_ops;
2188
2189 mark_page_accessed(page);
2190 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2191}
2192EXPORT_SYMBOL(pagecache_write_end);
2193
2194ssize_t
2195generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2196 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
2197 size_t count, size_t ocount)
2198{
2199 struct file *file = iocb->ki_filp;
2200 struct address_space *mapping = file->f_mapping;
2201 struct inode *inode = mapping->host;
2202 ssize_t written;
2203 size_t write_len;
2204 pgoff_t end;
2205
2206 if (count != ocount)
2207 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
2208
2209 write_len = iov_length(iov, *nr_segs);
2210 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2211
2212 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2213 if (written)
2214 goto out;
2215
2216
2217
2218
2219
2220
2221
2222 if (mapping->nrpages) {
2223 written = invalidate_inode_pages2_range(mapping,
2224 pos >> PAGE_CACHE_SHIFT, end);
2225
2226
2227
2228
2229 if (written) {
2230 if (written == -EBUSY)
2231 return 0;
2232 goto out;
2233 }
2234 }
2235
2236 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246 if (mapping->nrpages) {
2247 invalidate_inode_pages2_range(mapping,
2248 pos >> PAGE_CACHE_SHIFT, end);
2249 }
2250
2251 if (written > 0) {
2252 pos += written;
2253 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2254 i_size_write(inode, pos);
2255 mark_inode_dirty(inode);
2256 }
2257 *ppos = pos;
2258 }
2259out:
2260 return written;
2261}
2262EXPORT_SYMBOL(generic_file_direct_write);
2263
2264
2265
2266
2267
2268struct page *grab_cache_page_write_begin(struct address_space *mapping,
2269 pgoff_t index, unsigned flags)
2270{
2271 int status;
2272 gfp_t gfp_mask;
2273 struct page *page;
2274 gfp_t gfp_notmask = 0;
2275
2276 gfp_mask = mapping_gfp_mask(mapping);
2277 if (mapping_cap_account_dirty(mapping))
2278 gfp_mask |= __GFP_WRITE;
2279 if (flags & AOP_FLAG_NOFS)
2280 gfp_notmask = __GFP_FS;
2281repeat:
2282 page = find_lock_page(mapping, index);
2283 if (page)
2284 goto found;
2285
2286 page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
2287 if (!page)
2288 return NULL;
2289 status = add_to_page_cache_lru(page, mapping, index,
2290 GFP_KERNEL & ~gfp_notmask);
2291 if (unlikely(status)) {
2292 page_cache_release(page);
2293 if (status == -EEXIST)
2294 goto repeat;
2295 return NULL;
2296 }
2297found:
2298 wait_for_stable_page(page);
2299 return page;
2300}
2301EXPORT_SYMBOL(grab_cache_page_write_begin);
2302
2303static ssize_t generic_perform_write(struct file *file,
2304 struct iov_iter *i, loff_t pos)
2305{
2306 struct address_space *mapping = file->f_mapping;
2307 const struct address_space_operations *a_ops = mapping->a_ops;
2308 long status = 0;
2309 ssize_t written = 0;
2310 unsigned int flags = 0;
2311
2312
2313
2314
2315 if (segment_eq(get_fs(), KERNEL_DS))
2316 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2317
2318 do {
2319 struct page *page;
2320 unsigned long offset;
2321 unsigned long bytes;
2322 size_t copied;
2323 void *fsdata;
2324
2325 offset = (pos & (PAGE_CACHE_SIZE - 1));
2326 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2327 iov_iter_count(i));
2328
2329again:
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2341 status = -EFAULT;
2342 break;
2343 }
2344
2345 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2346 &page, &fsdata);
2347 if (unlikely(status))
2348 break;
2349
2350 if (mapping_writably_mapped(mapping))
2351 flush_dcache_page(page);
2352
2353 pagefault_disable();
2354 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2355 pagefault_enable();
2356 flush_dcache_page(page);
2357
2358 mark_page_accessed(page);
2359 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2360 page, fsdata);
2361 if (unlikely(status < 0))
2362 break;
2363 copied = status;
2364
2365 cond_resched();
2366
2367 iov_iter_advance(i, copied);
2368 if (unlikely(copied == 0)) {
2369
2370
2371
2372
2373
2374
2375
2376
2377 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2378 iov_iter_single_seg_count(i));
2379 goto again;
2380 }
2381 pos += copied;
2382 written += copied;
2383
2384 balance_dirty_pages_ratelimited(mapping);
2385 if (fatal_signal_pending(current)) {
2386 status = -EINTR;
2387 break;
2388 }
2389 } while (iov_iter_count(i));
2390
2391 return written ? written : status;
2392}
2393
2394ssize_t
2395generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
2396 unsigned long nr_segs, loff_t pos, loff_t *ppos,
2397 size_t count, ssize_t written)
2398{
2399 struct file *file = iocb->ki_filp;
2400 ssize_t status;
2401 struct iov_iter i;
2402
2403 iov_iter_init(&i, iov, nr_segs, count, written);
2404 status = generic_perform_write(file, &i, pos);
2405
2406 if (likely(status >= 0)) {
2407 written += status;
2408 *ppos = pos + status;
2409 }
2410
2411 return written ? written : status;
2412}
2413EXPORT_SYMBOL(generic_file_buffered_write);
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2435 unsigned long nr_segs, loff_t *ppos)
2436{
2437 struct file *file = iocb->ki_filp;
2438 struct address_space * mapping = file->f_mapping;
2439 size_t ocount;
2440 size_t count;
2441 struct inode *inode = mapping->host;
2442 loff_t pos;
2443 ssize_t written;
2444 ssize_t err;
2445
2446 ocount = 0;
2447 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
2448 if (err)
2449 return err;
2450
2451 count = ocount;
2452 pos = *ppos;
2453
2454
2455 current->backing_dev_info = mapping->backing_dev_info;
2456 written = 0;
2457
2458 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2459 if (err)
2460 goto out;
2461
2462 if (count == 0)
2463 goto out;
2464
2465 err = file_remove_suid(file);
2466 if (err)
2467 goto out;
2468
2469 err = file_update_time(file);
2470 if (err)
2471 goto out;
2472
2473
2474 if (unlikely(file->f_flags & O_DIRECT)) {
2475 loff_t endbyte;
2476 ssize_t written_buffered;
2477
2478 written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
2479 ppos, count, ocount);
2480 if (written < 0 || written == count)
2481 goto out;
2482
2483
2484
2485
2486 pos += written;
2487 count -= written;
2488 written_buffered = generic_file_buffered_write(iocb, iov,
2489 nr_segs, pos, ppos, count,
2490 written);
2491
2492
2493
2494
2495
2496
2497
2498 if (written_buffered < 0) {
2499 err = written_buffered;
2500 goto out;
2501 }
2502
2503
2504
2505
2506
2507
2508 endbyte = pos + written_buffered - written - 1;
2509 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
2510 if (err == 0) {
2511 written = written_buffered;
2512 invalidate_mapping_pages(mapping,
2513 pos >> PAGE_CACHE_SHIFT,
2514 endbyte >> PAGE_CACHE_SHIFT);
2515 } else {
2516
2517
2518
2519
2520 }
2521 } else {
2522 written = generic_file_buffered_write(iocb, iov, nr_segs,
2523 pos, ppos, count, written);
2524 }
2525out:
2526 current->backing_dev_info = NULL;
2527 return written ? written : err;
2528}
2529EXPORT_SYMBOL(__generic_file_aio_write);
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2543 unsigned long nr_segs, loff_t pos)
2544{
2545 struct file *file = iocb->ki_filp;
2546 struct inode *inode = file->f_mapping->host;
2547 ssize_t ret;
2548
2549 BUG_ON(iocb->ki_pos != pos);
2550
2551 mutex_lock(&inode->i_mutex);
2552 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2553 mutex_unlock(&inode->i_mutex);
2554
2555 if (ret > 0) {
2556 ssize_t err;
2557
2558 err = generic_write_sync(file, pos, ret);
2559 if (err < 0 && ret > 0)
2560 ret = err;
2561 }
2562 return ret;
2563}
2564EXPORT_SYMBOL(generic_file_aio_write);
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583int try_to_release_page(struct page *page, gfp_t gfp_mask)
2584{
2585 struct address_space * const mapping = page->mapping;
2586
2587 BUG_ON(!PageLocked(page));
2588 if (PageWriteback(page))
2589 return 0;
2590
2591 if (mapping && mapping->a_ops->releasepage)
2592 return mapping->a_ops->releasepage(page, gfp_mask);
2593 return try_to_free_buffers(page);
2594}
2595
2596EXPORT_SYMBOL(try_to_release_page);
2597