1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/compiler.h>
14#include <linux/fs.h>
15#include <linux/uaccess.h>
16#include <linux/capability.h>
17#include <linux/kernel_stat.h>
18#include <linux/gfp.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/mman.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/hash.h>
26#include <linux/writeback.h>
27#include <linux/backing-dev.h>
28#include <linux/pagevec.h>
29#include <linux/blkdev.h>
30#include <linux/security.h>
31#include <linux/cpuset.h>
32#include <linux/hardirq.h>
33#include <linux/hugetlb.h>
34#include <linux/memcontrol.h>
35#include <linux/cleancache.h>
36#include <linux/rmap.h>
37#include "internal.h"
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/filemap.h>
41
42
43
44
45#include <linux/buffer_head.h>
46
47#include <asm/mman.h>
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112static void page_cache_tree_delete(struct address_space *mapping,
113 struct page *page, void *shadow)
114{
115 struct radix_tree_node *node;
116 unsigned long index;
117 unsigned int offset;
118 unsigned int tag;
119 void **slot;
120
121 VM_BUG_ON(!PageLocked(page));
122
123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
124
125 if (shadow) {
126 mapping->nrshadows++;
127
128
129
130
131
132
133 smp_wmb();
134 }
135 mapping->nrpages--;
136
137 if (!node) {
138
139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
140 radix_tree_replace_slot(slot, shadow);
141 return;
142 }
143
144
145 index = page->index;
146 offset = index & RADIX_TREE_MAP_MASK;
147 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
148 if (test_bit(offset, node->tags[tag]))
149 radix_tree_tag_clear(&mapping->page_tree, index, tag);
150 }
151
152
153 radix_tree_replace_slot(slot, shadow);
154 workingset_node_pages_dec(node);
155 if (shadow)
156 workingset_node_shadows_inc(node);
157 else
158 if (__radix_tree_delete_node(&mapping->page_tree, node))
159 return;
160
161
162
163
164
165
166
167
168 if (!workingset_node_pages(node) &&
169 list_empty(&node->private_list)) {
170 node->private_data = mapping;
171 list_lru_add(&workingset_shadow_nodes, &node->private_list);
172 }
173}
174
175
176
177
178
179
180
181void __delete_from_page_cache(struct page *page, void *shadow,
182 struct mem_cgroup *memcg)
183{
184 struct address_space *mapping = page->mapping;
185
186 trace_mm_filemap_delete_from_page_cache(page);
187
188
189
190
191
192 if (PageUptodate(page) && PageMappedToDisk(page))
193 cleancache_put_page(page);
194 else
195 cleancache_invalidate_page(mapping, page);
196
197 page_cache_tree_delete(mapping, page, shadow);
198
199 page->mapping = NULL;
200
201
202
203 if (!PageHuge(page))
204 __dec_zone_page_state(page, NR_FILE_PAGES);
205 if (PageSwapBacked(page))
206 __dec_zone_page_state(page, NR_SHMEM);
207 BUG_ON(page_mapped(page));
208
209
210
211
212
213
214
215
216
217 if (WARN_ON_ONCE(PageDirty(page)))
218 account_page_cleaned(page, mapping, memcg,
219 inode_to_wb(mapping->host));
220}
221
222
223
224
225
226
227
228
229
230void delete_from_page_cache(struct page *page)
231{
232 struct address_space *mapping = page->mapping;
233 struct mem_cgroup *memcg;
234 unsigned long flags;
235
236 void (*freepage)(struct page *);
237
238 BUG_ON(!PageLocked(page));
239
240 freepage = mapping->a_ops->freepage;
241
242 memcg = mem_cgroup_begin_page_stat(page);
243 spin_lock_irqsave(&mapping->tree_lock, flags);
244 __delete_from_page_cache(page, NULL, memcg);
245 spin_unlock_irqrestore(&mapping->tree_lock, flags);
246 mem_cgroup_end_page_stat(memcg);
247
248 if (freepage)
249 freepage(page);
250 page_cache_release(page);
251}
252EXPORT_SYMBOL(delete_from_page_cache);
253
254static int filemap_check_errors(struct address_space *mapping)
255{
256 int ret = 0;
257
258 if (test_bit(AS_ENOSPC, &mapping->flags) &&
259 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
260 ret = -ENOSPC;
261 if (test_bit(AS_EIO, &mapping->flags) &&
262 test_and_clear_bit(AS_EIO, &mapping->flags))
263 ret = -EIO;
264 return ret;
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
283 loff_t end, int sync_mode)
284{
285 int ret;
286 struct writeback_control wbc = {
287 .sync_mode = sync_mode,
288 .nr_to_write = LONG_MAX,
289 .range_start = start,
290 .range_end = end,
291 };
292
293 if (!mapping_cap_writeback_dirty(mapping))
294 return 0;
295
296 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
297 ret = do_writepages(mapping, &wbc);
298 wbc_detach_inode(&wbc);
299 return ret;
300}
301
302static inline int __filemap_fdatawrite(struct address_space *mapping,
303 int sync_mode)
304{
305 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
306}
307
308int filemap_fdatawrite(struct address_space *mapping)
309{
310 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
311}
312EXPORT_SYMBOL(filemap_fdatawrite);
313
314int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
315 loff_t end)
316{
317 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
318}
319EXPORT_SYMBOL(filemap_fdatawrite_range);
320
321
322
323
324
325
326
327
328int filemap_flush(struct address_space *mapping)
329{
330 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
331}
332EXPORT_SYMBOL(filemap_flush);
333
334
335
336
337
338
339
340
341
342
343int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
344 loff_t end_byte)
345{
346 pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
347 pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
348 struct pagevec pvec;
349 int nr_pages;
350 int ret2, ret = 0;
351
352 if (end_byte < start_byte)
353 goto out;
354
355 pagevec_init(&pvec, 0);
356 while ((index <= end) &&
357 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
358 PAGECACHE_TAG_WRITEBACK,
359 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
360 unsigned i;
361
362 for (i = 0; i < nr_pages; i++) {
363 struct page *page = pvec.pages[i];
364
365
366 if (page->index > end)
367 continue;
368
369 wait_on_page_writeback(page);
370 if (TestClearPageError(page))
371 ret = -EIO;
372 }
373 pagevec_release(&pvec);
374 cond_resched();
375 }
376out:
377 ret2 = filemap_check_errors(mapping);
378 if (!ret)
379 ret = ret2;
380
381 return ret;
382}
383EXPORT_SYMBOL(filemap_fdatawait_range);
384
385
386
387
388
389
390
391
392int filemap_fdatawait(struct address_space *mapping)
393{
394 loff_t i_size = i_size_read(mapping->host);
395
396 if (i_size == 0)
397 return 0;
398
399 return filemap_fdatawait_range(mapping, 0, i_size - 1);
400}
401EXPORT_SYMBOL(filemap_fdatawait);
402
403int filemap_write_and_wait(struct address_space *mapping)
404{
405 int err = 0;
406
407 if (mapping->nrpages) {
408 err = filemap_fdatawrite(mapping);
409
410
411
412
413
414
415 if (err != -EIO) {
416 int err2 = filemap_fdatawait(mapping);
417 if (!err)
418 err = err2;
419 }
420 } else {
421 err = filemap_check_errors(mapping);
422 }
423 return err;
424}
425EXPORT_SYMBOL(filemap_write_and_wait);
426
427
428
429
430
431
432
433
434
435
436
437
438int filemap_write_and_wait_range(struct address_space *mapping,
439 loff_t lstart, loff_t lend)
440{
441 int err = 0;
442
443 if (mapping->nrpages) {
444 err = __filemap_fdatawrite_range(mapping, lstart, lend,
445 WB_SYNC_ALL);
446
447 if (err != -EIO) {
448 int err2 = filemap_fdatawait_range(mapping,
449 lstart, lend);
450 if (!err)
451 err = err2;
452 }
453 } else {
454 err = filemap_check_errors(mapping);
455 }
456 return err;
457}
458EXPORT_SYMBOL(filemap_write_and_wait_range);
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
476{
477 int error;
478
479 VM_BUG_ON_PAGE(!PageLocked(old), old);
480 VM_BUG_ON_PAGE(!PageLocked(new), new);
481 VM_BUG_ON_PAGE(new->mapping, new);
482
483 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
484 if (!error) {
485 struct address_space *mapping = old->mapping;
486 void (*freepage)(struct page *);
487 struct mem_cgroup *memcg;
488 unsigned long flags;
489
490 pgoff_t offset = old->index;
491 freepage = mapping->a_ops->freepage;
492
493 page_cache_get(new);
494 new->mapping = mapping;
495 new->index = offset;
496
497 memcg = mem_cgroup_begin_page_stat(old);
498 spin_lock_irqsave(&mapping->tree_lock, flags);
499 __delete_from_page_cache(old, NULL, memcg);
500 error = radix_tree_insert(&mapping->page_tree, offset, new);
501 BUG_ON(error);
502 mapping->nrpages++;
503
504
505
506
507 if (!PageHuge(new))
508 __inc_zone_page_state(new, NR_FILE_PAGES);
509 if (PageSwapBacked(new))
510 __inc_zone_page_state(new, NR_SHMEM);
511 spin_unlock_irqrestore(&mapping->tree_lock, flags);
512 mem_cgroup_end_page_stat(memcg);
513 mem_cgroup_migrate(old, new, true);
514 radix_tree_preload_end();
515 if (freepage)
516 freepage(old);
517 page_cache_release(old);
518 }
519
520 return error;
521}
522EXPORT_SYMBOL_GPL(replace_page_cache_page);
523
524static int page_cache_tree_insert(struct address_space *mapping,
525 struct page *page, void **shadowp)
526{
527 struct radix_tree_node *node;
528 void **slot;
529 int error;
530
531 error = __radix_tree_create(&mapping->page_tree, page->index,
532 &node, &slot);
533 if (error)
534 return error;
535 if (*slot) {
536 void *p;
537
538 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
539 if (!radix_tree_exceptional_entry(p))
540 return -EEXIST;
541 if (shadowp)
542 *shadowp = p;
543 mapping->nrshadows--;
544 if (node)
545 workingset_node_shadows_dec(node);
546 }
547 radix_tree_replace_slot(slot, page);
548 mapping->nrpages++;
549 if (node) {
550 workingset_node_pages_inc(node);
551
552
553
554
555
556
557
558
559 if (!list_empty(&node->private_list))
560 list_lru_del(&workingset_shadow_nodes,
561 &node->private_list);
562 }
563 return 0;
564}
565
566static int __add_to_page_cache_locked(struct page *page,
567 struct address_space *mapping,
568 pgoff_t offset, gfp_t gfp_mask,
569 void **shadowp)
570{
571 int huge = PageHuge(page);
572 struct mem_cgroup *memcg;
573 int error;
574
575 VM_BUG_ON_PAGE(!PageLocked(page), page);
576 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
577
578 if (!huge) {
579 error = mem_cgroup_try_charge(page, current->mm,
580 gfp_mask, &memcg);
581 if (error)
582 return error;
583 }
584
585 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
586 if (error) {
587 if (!huge)
588 mem_cgroup_cancel_charge(page, memcg);
589 return error;
590 }
591
592 page_cache_get(page);
593 page->mapping = mapping;
594 page->index = offset;
595
596 spin_lock_irq(&mapping->tree_lock);
597 error = page_cache_tree_insert(mapping, page, shadowp);
598 radix_tree_preload_end();
599 if (unlikely(error))
600 goto err_insert;
601
602
603 if (!huge)
604 __inc_zone_page_state(page, NR_FILE_PAGES);
605 spin_unlock_irq(&mapping->tree_lock);
606 if (!huge)
607 mem_cgroup_commit_charge(page, memcg, false);
608 trace_mm_filemap_add_to_page_cache(page);
609 return 0;
610err_insert:
611 page->mapping = NULL;
612
613 spin_unlock_irq(&mapping->tree_lock);
614 if (!huge)
615 mem_cgroup_cancel_charge(page, memcg);
616 page_cache_release(page);
617 return error;
618}
619
620
621
622
623
624
625
626
627
628
629
630int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
631 pgoff_t offset, gfp_t gfp_mask)
632{
633 return __add_to_page_cache_locked(page, mapping, offset,
634 gfp_mask, NULL);
635}
636EXPORT_SYMBOL(add_to_page_cache_locked);
637
638int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
639 pgoff_t offset, gfp_t gfp_mask)
640{
641 void *shadow = NULL;
642 int ret;
643
644 __set_page_locked(page);
645 ret = __add_to_page_cache_locked(page, mapping, offset,
646 gfp_mask, &shadow);
647 if (unlikely(ret))
648 __clear_page_locked(page);
649 else {
650
651
652
653
654
655 if (shadow && workingset_refault(shadow)) {
656 SetPageActive(page);
657 workingset_activation(page);
658 } else
659 ClearPageActive(page);
660 lru_cache_add(page);
661 }
662 return ret;
663}
664EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
665
666#ifdef CONFIG_NUMA
667struct page *__page_cache_alloc(gfp_t gfp)
668{
669 int n;
670 struct page *page;
671
672 if (cpuset_do_page_mem_spread()) {
673 unsigned int cpuset_mems_cookie;
674 do {
675 cpuset_mems_cookie = read_mems_allowed_begin();
676 n = cpuset_mem_spread_node();
677 page = alloc_pages_exact_node(n, gfp, 0);
678 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
679
680 return page;
681 }
682 return alloc_pages(gfp, 0);
683}
684EXPORT_SYMBOL(__page_cache_alloc);
685#endif
686
687
688
689
690
691
692
693
694
695
696
697wait_queue_head_t *page_waitqueue(struct page *page)
698{
699 const struct zone *zone = page_zone(page);
700
701 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
702}
703EXPORT_SYMBOL(page_waitqueue);
704
705void wait_on_page_bit(struct page *page, int bit_nr)
706{
707 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
708
709 if (test_bit(bit_nr, &page->flags))
710 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
711 TASK_UNINTERRUPTIBLE);
712}
713EXPORT_SYMBOL(wait_on_page_bit);
714
715int wait_on_page_bit_killable(struct page *page, int bit_nr)
716{
717 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
718
719 if (!test_bit(bit_nr, &page->flags))
720 return 0;
721
722 return __wait_on_bit(page_waitqueue(page), &wait,
723 bit_wait_io, TASK_KILLABLE);
724}
725
726int wait_on_page_bit_killable_timeout(struct page *page,
727 int bit_nr, unsigned long timeout)
728{
729 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
730
731 wait.key.timeout = jiffies + timeout;
732 if (!test_bit(bit_nr, &page->flags))
733 return 0;
734 return __wait_on_bit(page_waitqueue(page), &wait,
735 bit_wait_io_timeout, TASK_KILLABLE);
736}
737EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout);
738
739
740
741
742
743
744
745
746void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
747{
748 wait_queue_head_t *q = page_waitqueue(page);
749 unsigned long flags;
750
751 spin_lock_irqsave(&q->lock, flags);
752 __add_wait_queue(q, waiter);
753 spin_unlock_irqrestore(&q->lock, flags);
754}
755EXPORT_SYMBOL_GPL(add_page_wait_queue);
756
757
758
759
760
761
762
763
764
765
766
767
768
769void unlock_page(struct page *page)
770{
771 VM_BUG_ON_PAGE(!PageLocked(page), page);
772 clear_bit_unlock(PG_locked, &page->flags);
773 smp_mb__after_atomic();
774 wake_up_page(page, PG_locked);
775}
776EXPORT_SYMBOL(unlock_page);
777
778
779
780
781
782void end_page_writeback(struct page *page)
783{
784
785
786
787
788
789
790
791 if (PageReclaim(page)) {
792 ClearPageReclaim(page);
793 rotate_reclaimable_page(page);
794 }
795
796 if (!test_clear_page_writeback(page))
797 BUG();
798
799 smp_mb__after_atomic();
800 wake_up_page(page, PG_writeback);
801}
802EXPORT_SYMBOL(end_page_writeback);
803
804
805
806
807
808void page_endio(struct page *page, int rw, int err)
809{
810 if (rw == READ) {
811 if (!err) {
812 SetPageUptodate(page);
813 } else {
814 ClearPageUptodate(page);
815 SetPageError(page);
816 }
817 unlock_page(page);
818 } else {
819 if (err) {
820 SetPageError(page);
821 if (page->mapping)
822 mapping_set_error(page->mapping, err);
823 }
824 end_page_writeback(page);
825 }
826}
827EXPORT_SYMBOL_GPL(page_endio);
828
829
830
831
832
833void __lock_page(struct page *page)
834{
835 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
836
837 __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
838 TASK_UNINTERRUPTIBLE);
839}
840EXPORT_SYMBOL(__lock_page);
841
842int __lock_page_killable(struct page *page)
843{
844 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
845
846 return __wait_on_bit_lock(page_waitqueue(page), &wait,
847 bit_wait_io, TASK_KILLABLE);
848}
849EXPORT_SYMBOL_GPL(__lock_page_killable);
850
851
852
853
854
855
856
857
858
859
860
861
862int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
863 unsigned int flags)
864{
865 if (flags & FAULT_FLAG_ALLOW_RETRY) {
866
867
868
869
870 if (flags & FAULT_FLAG_RETRY_NOWAIT)
871 return 0;
872
873 up_read(&mm->mmap_sem);
874 if (flags & FAULT_FLAG_KILLABLE)
875 wait_on_page_locked_killable(page);
876 else
877 wait_on_page_locked(page);
878 return 0;
879 } else {
880 if (flags & FAULT_FLAG_KILLABLE) {
881 int ret;
882
883 ret = __lock_page_killable(page);
884 if (ret) {
885 up_read(&mm->mmap_sem);
886 return 0;
887 }
888 } else
889 __lock_page(page);
890 return 1;
891 }
892}
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915pgoff_t page_cache_next_hole(struct address_space *mapping,
916 pgoff_t index, unsigned long max_scan)
917{
918 unsigned long i;
919
920 for (i = 0; i < max_scan; i++) {
921 struct page *page;
922
923 page = radix_tree_lookup(&mapping->page_tree, index);
924 if (!page || radix_tree_exceptional_entry(page))
925 break;
926 index++;
927 if (index == 0)
928 break;
929 }
930
931 return index;
932}
933EXPORT_SYMBOL(page_cache_next_hole);
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956pgoff_t page_cache_prev_hole(struct address_space *mapping,
957 pgoff_t index, unsigned long max_scan)
958{
959 unsigned long i;
960
961 for (i = 0; i < max_scan; i++) {
962 struct page *page;
963
964 page = radix_tree_lookup(&mapping->page_tree, index);
965 if (!page || radix_tree_exceptional_entry(page))
966 break;
967 index--;
968 if (index == ULONG_MAX)
969 break;
970 }
971
972 return index;
973}
974EXPORT_SYMBOL(page_cache_prev_hole);
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
990{
991 void **pagep;
992 struct page *page;
993
994 rcu_read_lock();
995repeat:
996 page = NULL;
997 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
998 if (pagep) {
999 page = radix_tree_deref_slot(pagep);
1000 if (unlikely(!page))
1001 goto out;
1002 if (radix_tree_exception(page)) {
1003 if (radix_tree_deref_retry(page))
1004 goto repeat;
1005
1006
1007
1008
1009
1010 goto out;
1011 }
1012 if (!page_cache_get_speculative(page))
1013 goto repeat;
1014
1015
1016
1017
1018
1019
1020 if (unlikely(page != *pagep)) {
1021 page_cache_release(page);
1022 goto repeat;
1023 }
1024 }
1025out:
1026 rcu_read_unlock();
1027
1028 return page;
1029}
1030EXPORT_SYMBOL(find_get_entry);
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1049{
1050 struct page *page;
1051
1052repeat:
1053 page = find_get_entry(mapping, offset);
1054 if (page && !radix_tree_exception(page)) {
1055 lock_page(page);
1056
1057 if (unlikely(page->mapping != mapping)) {
1058 unlock_page(page);
1059 page_cache_release(page);
1060 goto repeat;
1061 }
1062 VM_BUG_ON_PAGE(page->index != offset, page);
1063 }
1064 return page;
1065}
1066EXPORT_SYMBOL(find_lock_entry);
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1092 int fgp_flags, gfp_t gfp_mask)
1093{
1094 struct page *page;
1095
1096repeat:
1097 page = find_get_entry(mapping, offset);
1098 if (radix_tree_exceptional_entry(page))
1099 page = NULL;
1100 if (!page)
1101 goto no_page;
1102
1103 if (fgp_flags & FGP_LOCK) {
1104 if (fgp_flags & FGP_NOWAIT) {
1105 if (!trylock_page(page)) {
1106 page_cache_release(page);
1107 return NULL;
1108 }
1109 } else {
1110 lock_page(page);
1111 }
1112
1113
1114 if (unlikely(page->mapping != mapping)) {
1115 unlock_page(page);
1116 page_cache_release(page);
1117 goto repeat;
1118 }
1119 VM_BUG_ON_PAGE(page->index != offset, page);
1120 }
1121
1122 if (page && (fgp_flags & FGP_ACCESSED))
1123 mark_page_accessed(page);
1124
1125no_page:
1126 if (!page && (fgp_flags & FGP_CREAT)) {
1127 int err;
1128 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1129 gfp_mask |= __GFP_WRITE;
1130 if (fgp_flags & FGP_NOFS)
1131 gfp_mask &= ~__GFP_FS;
1132
1133 page = __page_cache_alloc(gfp_mask);
1134 if (!page)
1135 return NULL;
1136
1137 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1138 fgp_flags |= FGP_LOCK;
1139
1140
1141 if (fgp_flags & FGP_ACCESSED)
1142 __SetPageReferenced(page);
1143
1144 err = add_to_page_cache_lru(page, mapping, offset,
1145 gfp_mask & GFP_RECLAIM_MASK);
1146 if (unlikely(err)) {
1147 page_cache_release(page);
1148 page = NULL;
1149 if (err == -EEXIST)
1150 goto repeat;
1151 }
1152 }
1153
1154 return page;
1155}
1156EXPORT_SYMBOL(pagecache_get_page);
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181unsigned find_get_entries(struct address_space *mapping,
1182 pgoff_t start, unsigned int nr_entries,
1183 struct page **entries, pgoff_t *indices)
1184{
1185 void **slot;
1186 unsigned int ret = 0;
1187 struct radix_tree_iter iter;
1188
1189 if (!nr_entries)
1190 return 0;
1191
1192 rcu_read_lock();
1193restart:
1194 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1195 struct page *page;
1196repeat:
1197 page = radix_tree_deref_slot(slot);
1198 if (unlikely(!page))
1199 continue;
1200 if (radix_tree_exception(page)) {
1201 if (radix_tree_deref_retry(page))
1202 goto restart;
1203
1204
1205
1206
1207
1208 goto export;
1209 }
1210 if (!page_cache_get_speculative(page))
1211 goto repeat;
1212
1213
1214 if (unlikely(page != *slot)) {
1215 page_cache_release(page);
1216 goto repeat;
1217 }
1218export:
1219 indices[ret] = iter.index;
1220 entries[ret] = page;
1221 if (++ret == nr_entries)
1222 break;
1223 }
1224 rcu_read_unlock();
1225 return ret;
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1245 unsigned int nr_pages, struct page **pages)
1246{
1247 struct radix_tree_iter iter;
1248 void **slot;
1249 unsigned ret = 0;
1250
1251 if (unlikely(!nr_pages))
1252 return 0;
1253
1254 rcu_read_lock();
1255restart:
1256 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1257 struct page *page;
1258repeat:
1259 page = radix_tree_deref_slot(slot);
1260 if (unlikely(!page))
1261 continue;
1262
1263 if (radix_tree_exception(page)) {
1264 if (radix_tree_deref_retry(page)) {
1265
1266
1267
1268
1269
1270 WARN_ON(iter.index);
1271 goto restart;
1272 }
1273
1274
1275
1276
1277
1278 continue;
1279 }
1280
1281 if (!page_cache_get_speculative(page))
1282 goto repeat;
1283
1284
1285 if (unlikely(page != *slot)) {
1286 page_cache_release(page);
1287 goto repeat;
1288 }
1289
1290 pages[ret] = page;
1291 if (++ret == nr_pages)
1292 break;
1293 }
1294
1295 rcu_read_unlock();
1296 return ret;
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1312 unsigned int nr_pages, struct page **pages)
1313{
1314 struct radix_tree_iter iter;
1315 void **slot;
1316 unsigned int ret = 0;
1317
1318 if (unlikely(!nr_pages))
1319 return 0;
1320
1321 rcu_read_lock();
1322restart:
1323 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1324 struct page *page;
1325repeat:
1326 page = radix_tree_deref_slot(slot);
1327
1328 if (unlikely(!page))
1329 break;
1330
1331 if (radix_tree_exception(page)) {
1332 if (radix_tree_deref_retry(page)) {
1333
1334
1335
1336
1337
1338 goto restart;
1339 }
1340
1341
1342
1343
1344
1345 break;
1346 }
1347
1348 if (!page_cache_get_speculative(page))
1349 goto repeat;
1350
1351
1352 if (unlikely(page != *slot)) {
1353 page_cache_release(page);
1354 goto repeat;
1355 }
1356
1357
1358
1359
1360
1361
1362 if (page->mapping == NULL || page->index != iter.index) {
1363 page_cache_release(page);
1364 break;
1365 }
1366
1367 pages[ret] = page;
1368 if (++ret == nr_pages)
1369 break;
1370 }
1371 rcu_read_unlock();
1372 return ret;
1373}
1374EXPORT_SYMBOL(find_get_pages_contig);
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1388 int tag, unsigned int nr_pages, struct page **pages)
1389{
1390 struct radix_tree_iter iter;
1391 void **slot;
1392 unsigned ret = 0;
1393
1394 if (unlikely(!nr_pages))
1395 return 0;
1396
1397 rcu_read_lock();
1398restart:
1399 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1400 &iter, *index, tag) {
1401 struct page *page;
1402repeat:
1403 page = radix_tree_deref_slot(slot);
1404 if (unlikely(!page))
1405 continue;
1406
1407 if (radix_tree_exception(page)) {
1408 if (radix_tree_deref_retry(page)) {
1409
1410
1411
1412
1413
1414 goto restart;
1415 }
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 continue;
1428 }
1429
1430 if (!page_cache_get_speculative(page))
1431 goto repeat;
1432
1433
1434 if (unlikely(page != *slot)) {
1435 page_cache_release(page);
1436 goto repeat;
1437 }
1438
1439 pages[ret] = page;
1440 if (++ret == nr_pages)
1441 break;
1442 }
1443
1444 rcu_read_unlock();
1445
1446 if (ret)
1447 *index = pages[ret - 1]->index + 1;
1448
1449 return ret;
1450}
1451EXPORT_SYMBOL(find_get_pages_tag);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static void shrink_readahead_size_eio(struct file *filp,
1469 struct file_ra_state *ra)
1470{
1471 ra->ra_pages /= 4;
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1488 struct iov_iter *iter, ssize_t written)
1489{
1490 struct address_space *mapping = filp->f_mapping;
1491 struct inode *inode = mapping->host;
1492 struct file_ra_state *ra = &filp->f_ra;
1493 pgoff_t index;
1494 pgoff_t last_index;
1495 pgoff_t prev_index;
1496 unsigned long offset;
1497 unsigned int prev_offset;
1498 int error = 0;
1499
1500 index = *ppos >> PAGE_CACHE_SHIFT;
1501 prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
1502 prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
1503 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
1504 offset = *ppos & ~PAGE_CACHE_MASK;
1505
1506 for (;;) {
1507 struct page *page;
1508 pgoff_t end_index;
1509 loff_t isize;
1510 unsigned long nr, ret;
1511
1512 cond_resched();
1513find_page:
1514 page = find_get_page(mapping, index);
1515 if (!page) {
1516 page_cache_sync_readahead(mapping,
1517 ra, filp,
1518 index, last_index - index);
1519 page = find_get_page(mapping, index);
1520 if (unlikely(page == NULL))
1521 goto no_cached_page;
1522 }
1523 if (PageReadahead(page)) {
1524 page_cache_async_readahead(mapping,
1525 ra, filp, page,
1526 index, last_index - index);
1527 }
1528 if (!PageUptodate(page)) {
1529 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1530 !mapping->a_ops->is_partially_uptodate)
1531 goto page_not_up_to_date;
1532 if (!trylock_page(page))
1533 goto page_not_up_to_date;
1534
1535 if (!page->mapping)
1536 goto page_not_up_to_date_locked;
1537 if (!mapping->a_ops->is_partially_uptodate(page,
1538 offset, iter->count))
1539 goto page_not_up_to_date_locked;
1540 unlock_page(page);
1541 }
1542page_ok:
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 isize = i_size_read(inode);
1553 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1554 if (unlikely(!isize || index > end_index)) {
1555 page_cache_release(page);
1556 goto out;
1557 }
1558
1559
1560 nr = PAGE_CACHE_SIZE;
1561 if (index == end_index) {
1562 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1563 if (nr <= offset) {
1564 page_cache_release(page);
1565 goto out;
1566 }
1567 }
1568 nr = nr - offset;
1569
1570
1571
1572
1573
1574 if (mapping_writably_mapped(mapping))
1575 flush_dcache_page(page);
1576
1577
1578
1579
1580
1581 if (prev_index != index || offset != prev_offset)
1582 mark_page_accessed(page);
1583 prev_index = index;
1584
1585
1586
1587
1588
1589
1590 ret = copy_page_to_iter(page, offset, nr, iter);
1591 offset += ret;
1592 index += offset >> PAGE_CACHE_SHIFT;
1593 offset &= ~PAGE_CACHE_MASK;
1594 prev_offset = offset;
1595
1596 page_cache_release(page);
1597 written += ret;
1598 if (!iov_iter_count(iter))
1599 goto out;
1600 if (ret < nr) {
1601 error = -EFAULT;
1602 goto out;
1603 }
1604 continue;
1605
1606page_not_up_to_date:
1607
1608 error = lock_page_killable(page);
1609 if (unlikely(error))
1610 goto readpage_error;
1611
1612page_not_up_to_date_locked:
1613
1614 if (!page->mapping) {
1615 unlock_page(page);
1616 page_cache_release(page);
1617 continue;
1618 }
1619
1620
1621 if (PageUptodate(page)) {
1622 unlock_page(page);
1623 goto page_ok;
1624 }
1625
1626readpage:
1627
1628
1629
1630
1631
1632 ClearPageError(page);
1633
1634 error = mapping->a_ops->readpage(filp, page);
1635
1636 if (unlikely(error)) {
1637 if (error == AOP_TRUNCATED_PAGE) {
1638 page_cache_release(page);
1639 error = 0;
1640 goto find_page;
1641 }
1642 goto readpage_error;
1643 }
1644
1645 if (!PageUptodate(page)) {
1646 error = lock_page_killable(page);
1647 if (unlikely(error))
1648 goto readpage_error;
1649 if (!PageUptodate(page)) {
1650 if (page->mapping == NULL) {
1651
1652
1653
1654 unlock_page(page);
1655 page_cache_release(page);
1656 goto find_page;
1657 }
1658 unlock_page(page);
1659 shrink_readahead_size_eio(filp, ra);
1660 error = -EIO;
1661 goto readpage_error;
1662 }
1663 unlock_page(page);
1664 }
1665
1666 goto page_ok;
1667
1668readpage_error:
1669
1670 page_cache_release(page);
1671 goto out;
1672
1673no_cached_page:
1674
1675
1676
1677
1678 page = page_cache_alloc_cold(mapping);
1679 if (!page) {
1680 error = -ENOMEM;
1681 goto out;
1682 }
1683 error = add_to_page_cache_lru(page, mapping, index,
1684 GFP_KERNEL & mapping_gfp_mask(mapping));
1685 if (error) {
1686 page_cache_release(page);
1687 if (error == -EEXIST) {
1688 error = 0;
1689 goto find_page;
1690 }
1691 goto out;
1692 }
1693 goto readpage;
1694 }
1695
1696out:
1697 ra->prev_pos = prev_index;
1698 ra->prev_pos <<= PAGE_CACHE_SHIFT;
1699 ra->prev_pos |= prev_offset;
1700
1701 *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
1702 file_accessed(filp);
1703 return written ? written : error;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714ssize_t
1715generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1716{
1717 struct file *file = iocb->ki_filp;
1718 ssize_t retval = 0;
1719 loff_t *ppos = &iocb->ki_pos;
1720 loff_t pos = *ppos;
1721
1722 if (iocb->ki_flags & IOCB_DIRECT) {
1723 struct address_space *mapping = file->f_mapping;
1724 struct inode *inode = mapping->host;
1725 size_t count = iov_iter_count(iter);
1726 loff_t size;
1727
1728 if (!count)
1729 goto out;
1730 size = i_size_read(inode);
1731 retval = filemap_write_and_wait_range(mapping, pos,
1732 pos + count - 1);
1733 if (!retval) {
1734 struct iov_iter data = *iter;
1735 retval = mapping->a_ops->direct_IO(iocb, &data, pos);
1736 }
1737
1738 if (retval > 0) {
1739 *ppos = pos + retval;
1740 iov_iter_advance(iter, retval);
1741 }
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size ||
1753 IS_DAX(inode)) {
1754 file_accessed(file);
1755 goto out;
1756 }
1757 }
1758
1759 retval = do_generic_file_read(file, ppos, iter, retval);
1760out:
1761 return retval;
1762}
1763EXPORT_SYMBOL(generic_file_read_iter);
1764
1765#ifdef CONFIG_MMU
1766
1767
1768
1769
1770
1771
1772
1773
1774static int page_cache_read(struct file *file, pgoff_t offset)
1775{
1776 struct address_space *mapping = file->f_mapping;
1777 struct page *page;
1778 int ret;
1779
1780 do {
1781 page = page_cache_alloc_cold(mapping);
1782 if (!page)
1783 return -ENOMEM;
1784
1785 ret = add_to_page_cache_lru(page, mapping, offset,
1786 GFP_KERNEL & mapping_gfp_mask(mapping));
1787 if (ret == 0)
1788 ret = mapping->a_ops->readpage(file, page);
1789 else if (ret == -EEXIST)
1790 ret = 0;
1791
1792 page_cache_release(page);
1793
1794 } while (ret == AOP_TRUNCATED_PAGE);
1795
1796 return ret;
1797}
1798
1799#define MMAP_LOTSAMISS (100)
1800
1801
1802
1803
1804
1805static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1806 struct file_ra_state *ra,
1807 struct file *file,
1808 pgoff_t offset)
1809{
1810 unsigned long ra_pages;
1811 struct address_space *mapping = file->f_mapping;
1812
1813
1814 if (vma->vm_flags & VM_RAND_READ)
1815 return;
1816 if (!ra->ra_pages)
1817 return;
1818
1819 if (vma->vm_flags & VM_SEQ_READ) {
1820 page_cache_sync_readahead(mapping, ra, file, offset,
1821 ra->ra_pages);
1822 return;
1823 }
1824
1825
1826 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1827 ra->mmap_miss++;
1828
1829
1830
1831
1832
1833 if (ra->mmap_miss > MMAP_LOTSAMISS)
1834 return;
1835
1836
1837
1838
1839 ra_pages = max_sane_readahead(ra->ra_pages);
1840 ra->start = max_t(long, 0, offset - ra_pages / 2);
1841 ra->size = ra_pages;
1842 ra->async_size = ra_pages / 4;
1843 ra_submit(ra, mapping, file);
1844}
1845
1846
1847
1848
1849
1850static void do_async_mmap_readahead(struct vm_area_struct *vma,
1851 struct file_ra_state *ra,
1852 struct file *file,
1853 struct page *page,
1854 pgoff_t offset)
1855{
1856 struct address_space *mapping = file->f_mapping;
1857
1858
1859 if (vma->vm_flags & VM_RAND_READ)
1860 return;
1861 if (ra->mmap_miss > 0)
1862 ra->mmap_miss--;
1863 if (PageReadahead(page))
1864 page_cache_async_readahead(mapping, ra, file,
1865 page, offset, ra->ra_pages);
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1893{
1894 int error;
1895 struct file *file = vma->vm_file;
1896 struct address_space *mapping = file->f_mapping;
1897 struct file_ra_state *ra = &file->f_ra;
1898 struct inode *inode = mapping->host;
1899 pgoff_t offset = vmf->pgoff;
1900 struct page *page;
1901 loff_t size;
1902 int ret = 0;
1903
1904 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1905 if (offset >= size >> PAGE_CACHE_SHIFT)
1906 return VM_FAULT_SIGBUS;
1907
1908
1909
1910
1911 page = find_get_page(mapping, offset);
1912 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1913
1914
1915
1916
1917 do_async_mmap_readahead(vma, ra, file, page, offset);
1918 } else if (!page) {
1919
1920 do_sync_mmap_readahead(vma, ra, file, offset);
1921 count_vm_event(PGMAJFAULT);
1922 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1923 ret = VM_FAULT_MAJOR;
1924retry_find:
1925 page = find_get_page(mapping, offset);
1926 if (!page)
1927 goto no_cached_page;
1928 }
1929
1930 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1931 page_cache_release(page);
1932 return ret | VM_FAULT_RETRY;
1933 }
1934
1935
1936 if (unlikely(page->mapping != mapping)) {
1937 unlock_page(page);
1938 put_page(page);
1939 goto retry_find;
1940 }
1941 VM_BUG_ON_PAGE(page->index != offset, page);
1942
1943
1944
1945
1946
1947 if (unlikely(!PageUptodate(page)))
1948 goto page_not_uptodate;
1949
1950
1951
1952
1953
1954 size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
1955 if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
1956 unlock_page(page);
1957 page_cache_release(page);
1958 return VM_FAULT_SIGBUS;
1959 }
1960
1961 vmf->page = page;
1962 return ret | VM_FAULT_LOCKED;
1963
1964no_cached_page:
1965
1966
1967
1968
1969 error = page_cache_read(file, offset);
1970
1971
1972
1973
1974
1975
1976 if (error >= 0)
1977 goto retry_find;
1978
1979
1980
1981
1982
1983
1984 if (error == -ENOMEM)
1985 return VM_FAULT_OOM;
1986 return VM_FAULT_SIGBUS;
1987
1988page_not_uptodate:
1989
1990
1991
1992
1993
1994
1995 ClearPageError(page);
1996 error = mapping->a_ops->readpage(file, page);
1997 if (!error) {
1998 wait_on_page_locked(page);
1999 if (!PageUptodate(page))
2000 error = -EIO;
2001 }
2002 page_cache_release(page);
2003
2004 if (!error || error == AOP_TRUNCATED_PAGE)
2005 goto retry_find;
2006
2007
2008 shrink_readahead_size_eio(file, ra);
2009 return VM_FAULT_SIGBUS;
2010}
2011EXPORT_SYMBOL(filemap_fault);
2012
2013void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
2014{
2015 struct radix_tree_iter iter;
2016 void **slot;
2017 struct file *file = vma->vm_file;
2018 struct address_space *mapping = file->f_mapping;
2019 loff_t size;
2020 struct page *page;
2021 unsigned long address = (unsigned long) vmf->virtual_address;
2022 unsigned long addr;
2023 pte_t *pte;
2024
2025 rcu_read_lock();
2026 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
2027 if (iter.index > vmf->max_pgoff)
2028 break;
2029repeat:
2030 page = radix_tree_deref_slot(slot);
2031 if (unlikely(!page))
2032 goto next;
2033 if (radix_tree_exception(page)) {
2034 if (radix_tree_deref_retry(page))
2035 break;
2036 else
2037 goto next;
2038 }
2039
2040 if (!page_cache_get_speculative(page))
2041 goto repeat;
2042
2043
2044 if (unlikely(page != *slot)) {
2045 page_cache_release(page);
2046 goto repeat;
2047 }
2048
2049 if (!PageUptodate(page) ||
2050 PageReadahead(page) ||
2051 PageHWPoison(page))
2052 goto skip;
2053 if (!trylock_page(page))
2054 goto skip;
2055
2056 if (page->mapping != mapping || !PageUptodate(page))
2057 goto unlock;
2058
2059 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE);
2060 if (page->index >= size >> PAGE_CACHE_SHIFT)
2061 goto unlock;
2062
2063 pte = vmf->pte + page->index - vmf->pgoff;
2064 if (!pte_none(*pte))
2065 goto unlock;
2066
2067 if (file->f_ra.mmap_miss > 0)
2068 file->f_ra.mmap_miss--;
2069 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2070 do_set_pte(vma, addr, page, pte, false, false);
2071 unlock_page(page);
2072 goto next;
2073unlock:
2074 unlock_page(page);
2075skip:
2076 page_cache_release(page);
2077next:
2078 if (iter.index == vmf->max_pgoff)
2079 break;
2080 }
2081 rcu_read_unlock();
2082}
2083EXPORT_SYMBOL(filemap_map_pages);
2084
2085int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2086{
2087 struct page *page = vmf->page;
2088 struct inode *inode = file_inode(vma->vm_file);
2089 int ret = VM_FAULT_LOCKED;
2090
2091 sb_start_pagefault(inode->i_sb);
2092 file_update_time(vma->vm_file);
2093 lock_page(page);
2094 if (page->mapping != inode->i_mapping) {
2095 unlock_page(page);
2096 ret = VM_FAULT_NOPAGE;
2097 goto out;
2098 }
2099
2100
2101
2102
2103
2104 set_page_dirty(page);
2105 wait_for_stable_page(page);
2106out:
2107 sb_end_pagefault(inode->i_sb);
2108 return ret;
2109}
2110EXPORT_SYMBOL(filemap_page_mkwrite);
2111
2112const struct vm_operations_struct generic_file_vm_ops = {
2113 .fault = filemap_fault,
2114 .map_pages = filemap_map_pages,
2115 .page_mkwrite = filemap_page_mkwrite,
2116};
2117
2118
2119
2120int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2121{
2122 struct address_space *mapping = file->f_mapping;
2123
2124 if (!mapping->a_ops->readpage)
2125 return -ENOEXEC;
2126 file_accessed(file);
2127 vma->vm_ops = &generic_file_vm_ops;
2128 return 0;
2129}
2130
2131
2132
2133
2134int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2135{
2136 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2137 return -EINVAL;
2138 return generic_file_mmap(file, vma);
2139}
2140#else
2141int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2142{
2143 return -ENOSYS;
2144}
2145int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2146{
2147 return -ENOSYS;
2148}
2149#endif
2150
2151EXPORT_SYMBOL(generic_file_mmap);
2152EXPORT_SYMBOL(generic_file_readonly_mmap);
2153
2154static struct page *wait_on_page_read(struct page *page)
2155{
2156 if (!IS_ERR(page)) {
2157 wait_on_page_locked(page);
2158 if (!PageUptodate(page)) {
2159 page_cache_release(page);
2160 page = ERR_PTR(-EIO);
2161 }
2162 }
2163 return page;
2164}
2165
2166static struct page *__read_cache_page(struct address_space *mapping,
2167 pgoff_t index,
2168 int (*filler)(void *, struct page *),
2169 void *data,
2170 gfp_t gfp)
2171{
2172 struct page *page;
2173 int err;
2174repeat:
2175 page = find_get_page(mapping, index);
2176 if (!page) {
2177 page = __page_cache_alloc(gfp | __GFP_COLD);
2178 if (!page)
2179 return ERR_PTR(-ENOMEM);
2180 err = add_to_page_cache_lru(page, mapping, index, gfp);
2181 if (unlikely(err)) {
2182 page_cache_release(page);
2183 if (err == -EEXIST)
2184 goto repeat;
2185
2186 return ERR_PTR(err);
2187 }
2188 err = filler(data, page);
2189 if (err < 0) {
2190 page_cache_release(page);
2191 page = ERR_PTR(err);
2192 } else {
2193 page = wait_on_page_read(page);
2194 }
2195 }
2196 return page;
2197}
2198
2199static struct page *do_read_cache_page(struct address_space *mapping,
2200 pgoff_t index,
2201 int (*filler)(void *, struct page *),
2202 void *data,
2203 gfp_t gfp)
2204
2205{
2206 struct page *page;
2207 int err;
2208
2209retry:
2210 page = __read_cache_page(mapping, index, filler, data, gfp);
2211 if (IS_ERR(page))
2212 return page;
2213 if (PageUptodate(page))
2214 goto out;
2215
2216 lock_page(page);
2217 if (!page->mapping) {
2218 unlock_page(page);
2219 page_cache_release(page);
2220 goto retry;
2221 }
2222 if (PageUptodate(page)) {
2223 unlock_page(page);
2224 goto out;
2225 }
2226 err = filler(data, page);
2227 if (err < 0) {
2228 page_cache_release(page);
2229 return ERR_PTR(err);
2230 } else {
2231 page = wait_on_page_read(page);
2232 if (IS_ERR(page))
2233 return page;
2234 }
2235out:
2236 mark_page_accessed(page);
2237 return page;
2238}
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252struct page *read_cache_page(struct address_space *mapping,
2253 pgoff_t index,
2254 int (*filler)(void *, struct page *),
2255 void *data)
2256{
2257 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2258}
2259EXPORT_SYMBOL(read_cache_page);
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272struct page *read_cache_page_gfp(struct address_space *mapping,
2273 pgoff_t index,
2274 gfp_t gfp)
2275{
2276 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2277
2278 return do_read_cache_page(mapping, index, filler, NULL, gfp);
2279}
2280EXPORT_SYMBOL(read_cache_page_gfp);
2281
2282
2283
2284
2285
2286
2287
2288
2289inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
2290{
2291 struct file *file = iocb->ki_filp;
2292 struct inode *inode = file->f_mapping->host;
2293 unsigned long limit = rlimit(RLIMIT_FSIZE);
2294 loff_t pos;
2295
2296 if (!iov_iter_count(from))
2297 return 0;
2298
2299
2300 if (iocb->ki_flags & IOCB_APPEND)
2301 iocb->ki_pos = i_size_read(inode);
2302
2303 pos = iocb->ki_pos;
2304
2305 if (limit != RLIM_INFINITY) {
2306 if (iocb->ki_pos >= limit) {
2307 send_sig(SIGXFSZ, current, 0);
2308 return -EFBIG;
2309 }
2310 iov_iter_truncate(from, limit - (unsigned long)pos);
2311 }
2312
2313
2314
2315
2316 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
2317 !(file->f_flags & O_LARGEFILE))) {
2318 if (pos >= MAX_NON_LFS)
2319 return -EFBIG;
2320 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330 if (unlikely(pos >= inode->i_sb->s_maxbytes))
2331 return -EFBIG;
2332
2333 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2334 return iov_iter_count(from);
2335}
2336EXPORT_SYMBOL(generic_write_checks);
2337
2338int pagecache_write_begin(struct file *file, struct address_space *mapping,
2339 loff_t pos, unsigned len, unsigned flags,
2340 struct page **pagep, void **fsdata)
2341{
2342 const struct address_space_operations *aops = mapping->a_ops;
2343
2344 return aops->write_begin(file, mapping, pos, len, flags,
2345 pagep, fsdata);
2346}
2347EXPORT_SYMBOL(pagecache_write_begin);
2348
2349int pagecache_write_end(struct file *file, struct address_space *mapping,
2350 loff_t pos, unsigned len, unsigned copied,
2351 struct page *page, void *fsdata)
2352{
2353 const struct address_space_operations *aops = mapping->a_ops;
2354
2355 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2356}
2357EXPORT_SYMBOL(pagecache_write_end);
2358
2359ssize_t
2360generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2361{
2362 struct file *file = iocb->ki_filp;
2363 struct address_space *mapping = file->f_mapping;
2364 struct inode *inode = mapping->host;
2365 ssize_t written;
2366 size_t write_len;
2367 pgoff_t end;
2368 struct iov_iter data;
2369
2370 write_len = iov_iter_count(from);
2371 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
2372
2373 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2374 if (written)
2375 goto out;
2376
2377
2378
2379
2380
2381
2382
2383 if (mapping->nrpages) {
2384 written = invalidate_inode_pages2_range(mapping,
2385 pos >> PAGE_CACHE_SHIFT, end);
2386
2387
2388
2389
2390 if (written) {
2391 if (written == -EBUSY)
2392 return 0;
2393 goto out;
2394 }
2395 }
2396
2397 data = *from;
2398 written = mapping->a_ops->direct_IO(iocb, &data, pos);
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408 if (mapping->nrpages) {
2409 invalidate_inode_pages2_range(mapping,
2410 pos >> PAGE_CACHE_SHIFT, end);
2411 }
2412
2413 if (written > 0) {
2414 pos += written;
2415 iov_iter_advance(from, written);
2416 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2417 i_size_write(inode, pos);
2418 mark_inode_dirty(inode);
2419 }
2420 iocb->ki_pos = pos;
2421 }
2422out:
2423 return written;
2424}
2425EXPORT_SYMBOL(generic_file_direct_write);
2426
2427
2428
2429
2430
2431struct page *grab_cache_page_write_begin(struct address_space *mapping,
2432 pgoff_t index, unsigned flags)
2433{
2434 struct page *page;
2435 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
2436
2437 if (flags & AOP_FLAG_NOFS)
2438 fgp_flags |= FGP_NOFS;
2439
2440 page = pagecache_get_page(mapping, index, fgp_flags,
2441 mapping_gfp_mask(mapping));
2442 if (page)
2443 wait_for_stable_page(page);
2444
2445 return page;
2446}
2447EXPORT_SYMBOL(grab_cache_page_write_begin);
2448
2449ssize_t generic_perform_write(struct file *file,
2450 struct iov_iter *i, loff_t pos)
2451{
2452 struct address_space *mapping = file->f_mapping;
2453 const struct address_space_operations *a_ops = mapping->a_ops;
2454 long status = 0;
2455 ssize_t written = 0;
2456 unsigned int flags = 0;
2457
2458
2459
2460
2461 if (!iter_is_iovec(i))
2462 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2463
2464 do {
2465 struct page *page;
2466 unsigned long offset;
2467 unsigned long bytes;
2468 size_t copied;
2469 void *fsdata;
2470
2471 offset = (pos & (PAGE_CACHE_SIZE - 1));
2472 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2473 iov_iter_count(i));
2474
2475again:
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2487 status = -EFAULT;
2488 break;
2489 }
2490
2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2492 &page, &fsdata);
2493 if (unlikely(status < 0))
2494 break;
2495
2496 if (mapping_writably_mapped(mapping))
2497 flush_dcache_page(page);
2498
2499 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2500 flush_dcache_page(page);
2501
2502 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2503 page, fsdata);
2504 if (unlikely(status < 0))
2505 break;
2506 copied = status;
2507
2508 cond_resched();
2509
2510 iov_iter_advance(i, copied);
2511 if (unlikely(copied == 0)) {
2512
2513
2514
2515
2516
2517
2518
2519
2520 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
2521 iov_iter_single_seg_count(i));
2522 goto again;
2523 }
2524 pos += copied;
2525 written += copied;
2526
2527 balance_dirty_pages_ratelimited(mapping);
2528 if (fatal_signal_pending(current)) {
2529 status = -EINTR;
2530 break;
2531 }
2532 } while (iov_iter_count(i));
2533
2534 return written ? written : status;
2535}
2536EXPORT_SYMBOL(generic_perform_write);
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2556{
2557 struct file *file = iocb->ki_filp;
2558 struct address_space * mapping = file->f_mapping;
2559 struct inode *inode = mapping->host;
2560 ssize_t written = 0;
2561 ssize_t err;
2562 ssize_t status;
2563
2564
2565 current->backing_dev_info = inode_to_bdi(inode);
2566 err = file_remove_privs(file);
2567 if (err)
2568 goto out;
2569
2570 err = file_update_time(file);
2571 if (err)
2572 goto out;
2573
2574 if (iocb->ki_flags & IOCB_DIRECT) {
2575 loff_t pos, endbyte;
2576
2577 written = generic_file_direct_write(iocb, from, iocb->ki_pos);
2578
2579
2580
2581
2582
2583
2584
2585 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
2586 goto out;
2587
2588 status = generic_perform_write(file, from, pos = iocb->ki_pos);
2589
2590
2591
2592
2593
2594
2595
2596 if (unlikely(status < 0)) {
2597 err = status;
2598 goto out;
2599 }
2600
2601
2602
2603
2604
2605 endbyte = pos + status - 1;
2606 err = filemap_write_and_wait_range(mapping, pos, endbyte);
2607 if (err == 0) {
2608 iocb->ki_pos = endbyte + 1;
2609 written += status;
2610 invalidate_mapping_pages(mapping,
2611 pos >> PAGE_CACHE_SHIFT,
2612 endbyte >> PAGE_CACHE_SHIFT);
2613 } else {
2614
2615
2616
2617
2618 }
2619 } else {
2620 written = generic_perform_write(file, from, iocb->ki_pos);
2621 if (likely(written > 0))
2622 iocb->ki_pos += written;
2623 }
2624out:
2625 current->backing_dev_info = NULL;
2626 return written ? written : err;
2627}
2628EXPORT_SYMBOL(__generic_file_write_iter);
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2640{
2641 struct file *file = iocb->ki_filp;
2642 struct inode *inode = file->f_mapping->host;
2643 ssize_t ret;
2644
2645 mutex_lock(&inode->i_mutex);
2646 ret = generic_write_checks(iocb, from);
2647 if (ret > 0)
2648 ret = __generic_file_write_iter(iocb, from);
2649 mutex_unlock(&inode->i_mutex);
2650
2651 if (ret > 0) {
2652 ssize_t err;
2653
2654 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2655 if (err < 0)
2656 ret = err;
2657 }
2658 return ret;
2659}
2660EXPORT_SYMBOL(generic_file_write_iter);
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679int try_to_release_page(struct page *page, gfp_t gfp_mask)
2680{
2681 struct address_space * const mapping = page->mapping;
2682
2683 BUG_ON(!PageLocked(page));
2684 if (PageWriteback(page))
2685 return 0;
2686
2687 if (mapping && mapping->a_ops->releasepage)
2688 return mapping->a_ops->releasepage(page, gfp_mask);
2689 return try_to_free_buffers(page);
2690}
2691
2692EXPORT_SYMBOL(try_to_release_page);
2693