1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/compiler.h>
14#include <linux/dax.h>
15#include <linux/fs.h>
16#include <linux/uaccess.h>
17#include <linux/capability.h>
18#include <linux/kernel_stat.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/mman.h>
23#include <linux/pagemap.h>
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/hash.h>
27#include <linux/writeback.h>
28#include <linux/backing-dev.h>
29#include <linux/pagevec.h>
30#include <linux/blkdev.h>
31#include <linux/security.h>
32#include <linux/cpuset.h>
33#include <linux/hardirq.h>
34#include <linux/hugetlb.h>
35#include <linux/memcontrol.h>
36#include <linux/cleancache.h>
37#include <linux/rmap.h>
38#include "internal.h"
39
40#define CREATE_TRACE_POINTS
41#include <trace/events/filemap.h>
42
43
44
45
46#include <linux/buffer_head.h>
47
48#include <asm/mman.h>
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113static void page_cache_tree_delete(struct address_space *mapping,
114 struct page *page, void *shadow)
115{
116 struct radix_tree_node *node;
117 unsigned long index;
118 unsigned int offset;
119 unsigned int tag;
120 void **slot;
121
122 VM_BUG_ON(!PageLocked(page));
123
124 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
125
126 if (shadow) {
127 mapping->nrexceptional++;
128
129
130
131
132
133
134 smp_wmb();
135 }
136 mapping->nrpages--;
137
138 if (!node) {
139
140 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
141 radix_tree_replace_slot(slot, shadow);
142 return;
143 }
144
145
146 index = page->index;
147 offset = index & RADIX_TREE_MAP_MASK;
148 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
149 if (test_bit(offset, node->tags[tag]))
150 radix_tree_tag_clear(&mapping->page_tree, index, tag);
151 }
152
153
154 radix_tree_replace_slot(slot, shadow);
155 workingset_node_pages_dec(node);
156 if (shadow)
157 workingset_node_shadows_inc(node);
158 else
159 if (__radix_tree_delete_node(&mapping->page_tree, node))
160 return;
161
162
163
164
165
166
167
168
169 if (!workingset_node_pages(node) &&
170 list_empty(&node->private_list)) {
171 node->private_data = mapping;
172 list_lru_add(&workingset_shadow_nodes, &node->private_list);
173 }
174}
175
176
177
178
179
180
181void __delete_from_page_cache(struct page *page, void *shadow)
182{
183 struct address_space *mapping = page->mapping;
184
185 trace_mm_filemap_delete_from_page_cache(page);
186
187
188
189
190
191 if (PageUptodate(page) && PageMappedToDisk(page))
192 cleancache_put_page(page);
193 else
194 cleancache_invalidate_page(mapping, page);
195
196 VM_BUG_ON_PAGE(page_mapped(page), page);
197 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
198 int mapcount;
199
200 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
201 current->comm, page_to_pfn(page));
202 dump_page(page, "still mapped when deleted");
203 dump_stack();
204 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
205
206 mapcount = page_mapcount(page);
207 if (mapping_exiting(mapping) &&
208 page_count(page) >= mapcount + 2) {
209
210
211
212
213
214
215 page_mapcount_reset(page);
216 atomic_sub(mapcount, &page->_count);
217 }
218 }
219
220 page_cache_tree_delete(mapping, page, shadow);
221
222 page->mapping = NULL;
223
224
225
226 if (!PageHuge(page))
227 __dec_zone_page_state(page, NR_FILE_PAGES);
228 if (PageSwapBacked(page))
229 __dec_zone_page_state(page, NR_SHMEM);
230
231
232
233
234
235
236
237
238
239 if (WARN_ON_ONCE(PageDirty(page)))
240 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
241}
242
243
244
245
246
247
248
249
250
251void delete_from_page_cache(struct page *page)
252{
253 struct address_space *mapping = page->mapping;
254 unsigned long flags;
255
256 void (*freepage)(struct page *);
257
258 BUG_ON(!PageLocked(page));
259
260 freepage = mapping->a_ops->freepage;
261
262 spin_lock_irqsave(&mapping->tree_lock, flags);
263 __delete_from_page_cache(page, NULL);
264 spin_unlock_irqrestore(&mapping->tree_lock, flags);
265
266 if (freepage)
267 freepage(page);
268 put_page(page);
269}
270EXPORT_SYMBOL(delete_from_page_cache);
271
272static int filemap_check_errors(struct address_space *mapping)
273{
274 int ret = 0;
275
276 if (test_bit(AS_ENOSPC, &mapping->flags) &&
277 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
278 ret = -ENOSPC;
279 if (test_bit(AS_EIO, &mapping->flags) &&
280 test_and_clear_bit(AS_EIO, &mapping->flags))
281 ret = -EIO;
282 return ret;
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
301 loff_t end, int sync_mode)
302{
303 int ret;
304 struct writeback_control wbc = {
305 .sync_mode = sync_mode,
306 .nr_to_write = LONG_MAX,
307 .range_start = start,
308 .range_end = end,
309 };
310
311 if (!mapping_cap_writeback_dirty(mapping))
312 return 0;
313
314 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
315 ret = do_writepages(mapping, &wbc);
316 wbc_detach_inode(&wbc);
317 return ret;
318}
319
320static inline int __filemap_fdatawrite(struct address_space *mapping,
321 int sync_mode)
322{
323 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
324}
325
326int filemap_fdatawrite(struct address_space *mapping)
327{
328 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
329}
330EXPORT_SYMBOL(filemap_fdatawrite);
331
332int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
333 loff_t end)
334{
335 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
336}
337EXPORT_SYMBOL(filemap_fdatawrite_range);
338
339
340
341
342
343
344
345
346int filemap_flush(struct address_space *mapping)
347{
348 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
349}
350EXPORT_SYMBOL(filemap_flush);
351
352static int __filemap_fdatawait_range(struct address_space *mapping,
353 loff_t start_byte, loff_t end_byte)
354{
355 pgoff_t index = start_byte >> PAGE_SHIFT;
356 pgoff_t end = end_byte >> PAGE_SHIFT;
357 struct pagevec pvec;
358 int nr_pages;
359 int ret = 0;
360
361 if (end_byte < start_byte)
362 goto out;
363
364 pagevec_init(&pvec, 0);
365 while ((index <= end) &&
366 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
367 PAGECACHE_TAG_WRITEBACK,
368 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
369 unsigned i;
370
371 for (i = 0; i < nr_pages; i++) {
372 struct page *page = pvec.pages[i];
373
374
375 if (page->index > end)
376 continue;
377
378 wait_on_page_writeback(page);
379 if (TestClearPageError(page))
380 ret = -EIO;
381 }
382 pagevec_release(&pvec);
383 cond_resched();
384 }
385out:
386 return ret;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
404 loff_t end_byte)
405{
406 int ret, ret2;
407
408 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
409 ret2 = filemap_check_errors(mapping);
410 if (!ret)
411 ret = ret2;
412
413 return ret;
414}
415EXPORT_SYMBOL(filemap_fdatawait_range);
416
417
418
419
420
421
422
423
424
425
426
427
428
429void filemap_fdatawait_keep_errors(struct address_space *mapping)
430{
431 loff_t i_size = i_size_read(mapping->host);
432
433 if (i_size == 0)
434 return;
435
436 __filemap_fdatawait_range(mapping, 0, i_size - 1);
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451int filemap_fdatawait(struct address_space *mapping)
452{
453 loff_t i_size = i_size_read(mapping->host);
454
455 if (i_size == 0)
456 return 0;
457
458 return filemap_fdatawait_range(mapping, 0, i_size - 1);
459}
460EXPORT_SYMBOL(filemap_fdatawait);
461
462int filemap_write_and_wait(struct address_space *mapping)
463{
464 int err = 0;
465
466 if ((!dax_mapping(mapping) && mapping->nrpages) ||
467 (dax_mapping(mapping) && mapping->nrexceptional)) {
468 err = filemap_fdatawrite(mapping);
469
470
471
472
473
474
475 if (err != -EIO) {
476 int err2 = filemap_fdatawait(mapping);
477 if (!err)
478 err = err2;
479 }
480 } else {
481 err = filemap_check_errors(mapping);
482 }
483 return err;
484}
485EXPORT_SYMBOL(filemap_write_and_wait);
486
487
488
489
490
491
492
493
494
495
496
497
498int filemap_write_and_wait_range(struct address_space *mapping,
499 loff_t lstart, loff_t lend)
500{
501 int err = 0;
502
503 if ((!dax_mapping(mapping) && mapping->nrpages) ||
504 (dax_mapping(mapping) && mapping->nrexceptional)) {
505 err = __filemap_fdatawrite_range(mapping, lstart, lend,
506 WB_SYNC_ALL);
507
508 if (err != -EIO) {
509 int err2 = filemap_fdatawait_range(mapping,
510 lstart, lend);
511 if (!err)
512 err = err2;
513 }
514 } else {
515 err = filemap_check_errors(mapping);
516 }
517 return err;
518}
519EXPORT_SYMBOL(filemap_write_and_wait_range);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
537{
538 int error;
539
540 VM_BUG_ON_PAGE(!PageLocked(old), old);
541 VM_BUG_ON_PAGE(!PageLocked(new), new);
542 VM_BUG_ON_PAGE(new->mapping, new);
543
544 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
545 if (!error) {
546 struct address_space *mapping = old->mapping;
547 void (*freepage)(struct page *);
548 unsigned long flags;
549
550 pgoff_t offset = old->index;
551 freepage = mapping->a_ops->freepage;
552
553 get_page(new);
554 new->mapping = mapping;
555 new->index = offset;
556
557 spin_lock_irqsave(&mapping->tree_lock, flags);
558 __delete_from_page_cache(old, NULL);
559 error = radix_tree_insert(&mapping->page_tree, offset, new);
560 BUG_ON(error);
561 mapping->nrpages++;
562
563
564
565
566 if (!PageHuge(new))
567 __inc_zone_page_state(new, NR_FILE_PAGES);
568 if (PageSwapBacked(new))
569 __inc_zone_page_state(new, NR_SHMEM);
570 spin_unlock_irqrestore(&mapping->tree_lock, flags);
571 mem_cgroup_migrate(old, new);
572 radix_tree_preload_end();
573 if (freepage)
574 freepage(old);
575 put_page(old);
576 }
577
578 return error;
579}
580EXPORT_SYMBOL_GPL(replace_page_cache_page);
581
582static int page_cache_tree_insert(struct address_space *mapping,
583 struct page *page, void **shadowp)
584{
585 struct radix_tree_node *node;
586 void **slot;
587 int error;
588
589 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
590 &node, &slot);
591 if (error)
592 return error;
593 if (*slot) {
594 void *p;
595
596 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
597 if (!radix_tree_exceptional_entry(p))
598 return -EEXIST;
599
600 if (WARN_ON(dax_mapping(mapping)))
601 return -EINVAL;
602
603 if (shadowp)
604 *shadowp = p;
605 mapping->nrexceptional--;
606 if (node)
607 workingset_node_shadows_dec(node);
608 }
609 radix_tree_replace_slot(slot, page);
610 mapping->nrpages++;
611 if (node) {
612 workingset_node_pages_inc(node);
613
614
615
616
617
618
619
620
621 if (!list_empty(&node->private_list))
622 list_lru_del(&workingset_shadow_nodes,
623 &node->private_list);
624 }
625 return 0;
626}
627
628static int __add_to_page_cache_locked(struct page *page,
629 struct address_space *mapping,
630 pgoff_t offset, gfp_t gfp_mask,
631 void **shadowp)
632{
633 int huge = PageHuge(page);
634 struct mem_cgroup *memcg;
635 int error;
636
637 VM_BUG_ON_PAGE(!PageLocked(page), page);
638 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
639
640 if (!huge) {
641 error = mem_cgroup_try_charge(page, current->mm,
642 gfp_mask, &memcg, false);
643 if (error)
644 return error;
645 }
646
647 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
648 if (error) {
649 if (!huge)
650 mem_cgroup_cancel_charge(page, memcg, false);
651 return error;
652 }
653
654 get_page(page);
655 page->mapping = mapping;
656 page->index = offset;
657
658 spin_lock_irq(&mapping->tree_lock);
659 error = page_cache_tree_insert(mapping, page, shadowp);
660 radix_tree_preload_end();
661 if (unlikely(error))
662 goto err_insert;
663
664
665 if (!huge)
666 __inc_zone_page_state(page, NR_FILE_PAGES);
667 spin_unlock_irq(&mapping->tree_lock);
668 if (!huge)
669 mem_cgroup_commit_charge(page, memcg, false, false);
670 trace_mm_filemap_add_to_page_cache(page);
671 return 0;
672err_insert:
673 page->mapping = NULL;
674
675 spin_unlock_irq(&mapping->tree_lock);
676 if (!huge)
677 mem_cgroup_cancel_charge(page, memcg, false);
678 put_page(page);
679 return error;
680}
681
682
683
684
685
686
687
688
689
690
691
692int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
693 pgoff_t offset, gfp_t gfp_mask)
694{
695 return __add_to_page_cache_locked(page, mapping, offset,
696 gfp_mask, NULL);
697}
698EXPORT_SYMBOL(add_to_page_cache_locked);
699
700int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
701 pgoff_t offset, gfp_t gfp_mask)
702{
703 void *shadow = NULL;
704 int ret;
705
706 __SetPageLocked(page);
707 ret = __add_to_page_cache_locked(page, mapping, offset,
708 gfp_mask, &shadow);
709 if (unlikely(ret))
710 __ClearPageLocked(page);
711 else {
712
713
714
715
716
717 if (shadow && workingset_refault(shadow)) {
718 SetPageActive(page);
719 workingset_activation(page);
720 } else
721 ClearPageActive(page);
722 lru_cache_add(page);
723 }
724 return ret;
725}
726EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
727
728#ifdef CONFIG_NUMA
729struct page *__page_cache_alloc(gfp_t gfp)
730{
731 int n;
732 struct page *page;
733
734 if (cpuset_do_page_mem_spread()) {
735 unsigned int cpuset_mems_cookie;
736 do {
737 cpuset_mems_cookie = read_mems_allowed_begin();
738 n = cpuset_mem_spread_node();
739 page = __alloc_pages_node(n, gfp, 0);
740 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
741
742 return page;
743 }
744 return alloc_pages(gfp, 0);
745}
746EXPORT_SYMBOL(__page_cache_alloc);
747#endif
748
749
750
751
752
753
754
755
756
757
758
759wait_queue_head_t *page_waitqueue(struct page *page)
760{
761 const struct zone *zone = page_zone(page);
762
763 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
764}
765EXPORT_SYMBOL(page_waitqueue);
766
767void wait_on_page_bit(struct page *page, int bit_nr)
768{
769 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
770
771 if (test_bit(bit_nr, &page->flags))
772 __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
773 TASK_UNINTERRUPTIBLE);
774}
775EXPORT_SYMBOL(wait_on_page_bit);
776
777int wait_on_page_bit_killable(struct page *page, int bit_nr)
778{
779 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
780
781 if (!test_bit(bit_nr, &page->flags))
782 return 0;
783
784 return __wait_on_bit(page_waitqueue(page), &wait,
785 bit_wait_io, TASK_KILLABLE);
786}
787
788int wait_on_page_bit_killable_timeout(struct page *page,
789 int bit_nr, unsigned long timeout)
790{
791 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
792
793 wait.key.timeout = jiffies + timeout;
794 if (!test_bit(bit_nr, &page->flags))
795 return 0;
796 return __wait_on_bit(page_waitqueue(page), &wait,
797 bit_wait_io_timeout, TASK_KILLABLE);
798}
799EXPORT_SYMBOL_GPL(wait_on_page_bit_killable_timeout);
800
801
802
803
804
805
806
807
808void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
809{
810 wait_queue_head_t *q = page_waitqueue(page);
811 unsigned long flags;
812
813 spin_lock_irqsave(&q->lock, flags);
814 __add_wait_queue(q, waiter);
815 spin_unlock_irqrestore(&q->lock, flags);
816}
817EXPORT_SYMBOL_GPL(add_page_wait_queue);
818
819
820
821
822
823
824
825
826
827
828
829
830
831void unlock_page(struct page *page)
832{
833 page = compound_head(page);
834 VM_BUG_ON_PAGE(!PageLocked(page), page);
835 clear_bit_unlock(PG_locked, &page->flags);
836 smp_mb__after_atomic();
837 wake_up_page(page, PG_locked);
838}
839EXPORT_SYMBOL(unlock_page);
840
841
842
843
844
845void end_page_writeback(struct page *page)
846{
847
848
849
850
851
852
853
854 if (PageReclaim(page)) {
855 ClearPageReclaim(page);
856 rotate_reclaimable_page(page);
857 }
858
859 if (!test_clear_page_writeback(page))
860 BUG();
861
862 smp_mb__after_atomic();
863 wake_up_page(page, PG_writeback);
864}
865EXPORT_SYMBOL(end_page_writeback);
866
867
868
869
870
871void page_endio(struct page *page, int rw, int err)
872{
873 if (rw == READ) {
874 if (!err) {
875 SetPageUptodate(page);
876 } else {
877 ClearPageUptodate(page);
878 SetPageError(page);
879 }
880 unlock_page(page);
881 } else {
882 if (err) {
883 SetPageError(page);
884 if (page->mapping)
885 mapping_set_error(page->mapping, err);
886 }
887 end_page_writeback(page);
888 }
889}
890EXPORT_SYMBOL_GPL(page_endio);
891
892
893
894
895
896void __lock_page(struct page *page)
897{
898 struct page *page_head = compound_head(page);
899 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
900
901 __wait_on_bit_lock(page_waitqueue(page_head), &wait, bit_wait_io,
902 TASK_UNINTERRUPTIBLE);
903}
904EXPORT_SYMBOL(__lock_page);
905
906int __lock_page_killable(struct page *page)
907{
908 struct page *page_head = compound_head(page);
909 DEFINE_WAIT_BIT(wait, &page_head->flags, PG_locked);
910
911 return __wait_on_bit_lock(page_waitqueue(page_head), &wait,
912 bit_wait_io, TASK_KILLABLE);
913}
914EXPORT_SYMBOL_GPL(__lock_page_killable);
915
916
917
918
919
920
921
922
923
924
925
926
927int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
928 unsigned int flags)
929{
930 if (flags & FAULT_FLAG_ALLOW_RETRY) {
931
932
933
934
935 if (flags & FAULT_FLAG_RETRY_NOWAIT)
936 return 0;
937
938 up_read(&mm->mmap_sem);
939 if (flags & FAULT_FLAG_KILLABLE)
940 wait_on_page_locked_killable(page);
941 else
942 wait_on_page_locked(page);
943 return 0;
944 } else {
945 if (flags & FAULT_FLAG_KILLABLE) {
946 int ret;
947
948 ret = __lock_page_killable(page);
949 if (ret) {
950 up_read(&mm->mmap_sem);
951 return 0;
952 }
953 } else
954 __lock_page(page);
955 return 1;
956 }
957}
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980pgoff_t page_cache_next_hole(struct address_space *mapping,
981 pgoff_t index, unsigned long max_scan)
982{
983 unsigned long i;
984
985 for (i = 0; i < max_scan; i++) {
986 struct page *page;
987
988 page = radix_tree_lookup(&mapping->page_tree, index);
989 if (!page || radix_tree_exceptional_entry(page))
990 break;
991 index++;
992 if (index == 0)
993 break;
994 }
995
996 return index;
997}
998EXPORT_SYMBOL(page_cache_next_hole);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021pgoff_t page_cache_prev_hole(struct address_space *mapping,
1022 pgoff_t index, unsigned long max_scan)
1023{
1024 unsigned long i;
1025
1026 for (i = 0; i < max_scan; i++) {
1027 struct page *page;
1028
1029 page = radix_tree_lookup(&mapping->page_tree, index);
1030 if (!page || radix_tree_exceptional_entry(page))
1031 break;
1032 index--;
1033 if (index == ULONG_MAX)
1034 break;
1035 }
1036
1037 return index;
1038}
1039EXPORT_SYMBOL(page_cache_prev_hole);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1055{
1056 void **pagep;
1057 struct page *page;
1058
1059 rcu_read_lock();
1060repeat:
1061 page = NULL;
1062 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
1063 if (pagep) {
1064 page = radix_tree_deref_slot(pagep);
1065 if (unlikely(!page))
1066 goto out;
1067 if (radix_tree_exception(page)) {
1068 if (radix_tree_deref_retry(page))
1069 goto repeat;
1070
1071
1072
1073
1074
1075 goto out;
1076 }
1077 if (!page_cache_get_speculative(page))
1078 goto repeat;
1079
1080
1081
1082
1083
1084
1085 if (unlikely(page != *pagep)) {
1086 put_page(page);
1087 goto repeat;
1088 }
1089 }
1090out:
1091 rcu_read_unlock();
1092
1093 return page;
1094}
1095EXPORT_SYMBOL(find_get_entry);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1114{
1115 struct page *page;
1116
1117repeat:
1118 page = find_get_entry(mapping, offset);
1119 if (page && !radix_tree_exception(page)) {
1120 lock_page(page);
1121
1122 if (unlikely(page->mapping != mapping)) {
1123 unlock_page(page);
1124 put_page(page);
1125 goto repeat;
1126 }
1127 VM_BUG_ON_PAGE(page->index != offset, page);
1128 }
1129 return page;
1130}
1131EXPORT_SYMBOL(find_lock_entry);
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1157 int fgp_flags, gfp_t gfp_mask)
1158{
1159 struct page *page;
1160
1161repeat:
1162 page = find_get_entry(mapping, offset);
1163 if (radix_tree_exceptional_entry(page))
1164 page = NULL;
1165 if (!page)
1166 goto no_page;
1167
1168 if (fgp_flags & FGP_LOCK) {
1169 if (fgp_flags & FGP_NOWAIT) {
1170 if (!trylock_page(page)) {
1171 put_page(page);
1172 return NULL;
1173 }
1174 } else {
1175 lock_page(page);
1176 }
1177
1178
1179 if (unlikely(page->mapping != mapping)) {
1180 unlock_page(page);
1181 put_page(page);
1182 goto repeat;
1183 }
1184 VM_BUG_ON_PAGE(page->index != offset, page);
1185 }
1186
1187 if (page && (fgp_flags & FGP_ACCESSED))
1188 mark_page_accessed(page);
1189
1190no_page:
1191 if (!page && (fgp_flags & FGP_CREAT)) {
1192 int err;
1193 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1194 gfp_mask |= __GFP_WRITE;
1195 if (fgp_flags & FGP_NOFS)
1196 gfp_mask &= ~__GFP_FS;
1197
1198 page = __page_cache_alloc(gfp_mask);
1199 if (!page)
1200 return NULL;
1201
1202 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1203 fgp_flags |= FGP_LOCK;
1204
1205
1206 if (fgp_flags & FGP_ACCESSED)
1207 __SetPageReferenced(page);
1208
1209 err = add_to_page_cache_lru(page, mapping, offset,
1210 gfp_mask & GFP_RECLAIM_MASK);
1211 if (unlikely(err)) {
1212 put_page(page);
1213 page = NULL;
1214 if (err == -EEXIST)
1215 goto repeat;
1216 }
1217 }
1218
1219 return page;
1220}
1221EXPORT_SYMBOL(pagecache_get_page);
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246unsigned find_get_entries(struct address_space *mapping,
1247 pgoff_t start, unsigned int nr_entries,
1248 struct page **entries, pgoff_t *indices)
1249{
1250 void **slot;
1251 unsigned int ret = 0;
1252 struct radix_tree_iter iter;
1253
1254 if (!nr_entries)
1255 return 0;
1256
1257 rcu_read_lock();
1258 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1259 struct page *page;
1260repeat:
1261 page = radix_tree_deref_slot(slot);
1262 if (unlikely(!page))
1263 continue;
1264 if (radix_tree_exception(page)) {
1265 if (radix_tree_deref_retry(page)) {
1266 slot = radix_tree_iter_retry(&iter);
1267 continue;
1268 }
1269
1270
1271
1272
1273
1274 goto export;
1275 }
1276 if (!page_cache_get_speculative(page))
1277 goto repeat;
1278
1279
1280 if (unlikely(page != *slot)) {
1281 put_page(page);
1282 goto repeat;
1283 }
1284export:
1285 indices[ret] = iter.index;
1286 entries[ret] = page;
1287 if (++ret == nr_entries)
1288 break;
1289 }
1290 rcu_read_unlock();
1291 return ret;
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1311 unsigned int nr_pages, struct page **pages)
1312{
1313 struct radix_tree_iter iter;
1314 void **slot;
1315 unsigned ret = 0;
1316
1317 if (unlikely(!nr_pages))
1318 return 0;
1319
1320 rcu_read_lock();
1321 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1322 struct page *page;
1323repeat:
1324 page = radix_tree_deref_slot(slot);
1325 if (unlikely(!page))
1326 continue;
1327
1328 if (radix_tree_exception(page)) {
1329 if (radix_tree_deref_retry(page)) {
1330 slot = radix_tree_iter_retry(&iter);
1331 continue;
1332 }
1333
1334
1335
1336
1337
1338 continue;
1339 }
1340
1341 if (!page_cache_get_speculative(page))
1342 goto repeat;
1343
1344
1345 if (unlikely(page != *slot)) {
1346 put_page(page);
1347 goto repeat;
1348 }
1349
1350 pages[ret] = page;
1351 if (++ret == nr_pages)
1352 break;
1353 }
1354
1355 rcu_read_unlock();
1356 return ret;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1372 unsigned int nr_pages, struct page **pages)
1373{
1374 struct radix_tree_iter iter;
1375 void **slot;
1376 unsigned int ret = 0;
1377
1378 if (unlikely(!nr_pages))
1379 return 0;
1380
1381 rcu_read_lock();
1382 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1383 struct page *page;
1384repeat:
1385 page = radix_tree_deref_slot(slot);
1386
1387 if (unlikely(!page))
1388 break;
1389
1390 if (radix_tree_exception(page)) {
1391 if (radix_tree_deref_retry(page)) {
1392 slot = radix_tree_iter_retry(&iter);
1393 continue;
1394 }
1395
1396
1397
1398
1399
1400 break;
1401 }
1402
1403 if (!page_cache_get_speculative(page))
1404 goto repeat;
1405
1406
1407 if (unlikely(page != *slot)) {
1408 put_page(page);
1409 goto repeat;
1410 }
1411
1412
1413
1414
1415
1416
1417 if (page->mapping == NULL || page->index != iter.index) {
1418 put_page(page);
1419 break;
1420 }
1421
1422 pages[ret] = page;
1423 if (++ret == nr_pages)
1424 break;
1425 }
1426 rcu_read_unlock();
1427 return ret;
1428}
1429EXPORT_SYMBOL(find_get_pages_contig);
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1443 int tag, unsigned int nr_pages, struct page **pages)
1444{
1445 struct radix_tree_iter iter;
1446 void **slot;
1447 unsigned ret = 0;
1448
1449 if (unlikely(!nr_pages))
1450 return 0;
1451
1452 rcu_read_lock();
1453 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1454 &iter, *index, tag) {
1455 struct page *page;
1456repeat:
1457 page = radix_tree_deref_slot(slot);
1458 if (unlikely(!page))
1459 continue;
1460
1461 if (radix_tree_exception(page)) {
1462 if (radix_tree_deref_retry(page)) {
1463 slot = radix_tree_iter_retry(&iter);
1464 continue;
1465 }
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 continue;
1478 }
1479
1480 if (!page_cache_get_speculative(page))
1481 goto repeat;
1482
1483
1484 if (unlikely(page != *slot)) {
1485 put_page(page);
1486 goto repeat;
1487 }
1488
1489 pages[ret] = page;
1490 if (++ret == nr_pages)
1491 break;
1492 }
1493
1494 rcu_read_unlock();
1495
1496 if (ret)
1497 *index = pages[ret - 1]->index + 1;
1498
1499 return ret;
1500}
1501EXPORT_SYMBOL(find_get_pages_tag);
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1516 int tag, unsigned int nr_entries,
1517 struct page **entries, pgoff_t *indices)
1518{
1519 void **slot;
1520 unsigned int ret = 0;
1521 struct radix_tree_iter iter;
1522
1523 if (!nr_entries)
1524 return 0;
1525
1526 rcu_read_lock();
1527 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1528 &iter, start, tag) {
1529 struct page *page;
1530repeat:
1531 page = radix_tree_deref_slot(slot);
1532 if (unlikely(!page))
1533 continue;
1534 if (radix_tree_exception(page)) {
1535 if (radix_tree_deref_retry(page)) {
1536 slot = radix_tree_iter_retry(&iter);
1537 continue;
1538 }
1539
1540
1541
1542
1543
1544
1545 goto export;
1546 }
1547 if (!page_cache_get_speculative(page))
1548 goto repeat;
1549
1550
1551 if (unlikely(page != *slot)) {
1552 put_page(page);
1553 goto repeat;
1554 }
1555export:
1556 indices[ret] = iter.index;
1557 entries[ret] = page;
1558 if (++ret == nr_entries)
1559 break;
1560 }
1561 rcu_read_unlock();
1562 return ret;
1563}
1564EXPORT_SYMBOL(find_get_entries_tag);
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581static void shrink_readahead_size_eio(struct file *filp,
1582 struct file_ra_state *ra)
1583{
1584 ra->ra_pages /= 4;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1601 struct iov_iter *iter, ssize_t written)
1602{
1603 struct address_space *mapping = filp->f_mapping;
1604 struct inode *inode = mapping->host;
1605 struct file_ra_state *ra = &filp->f_ra;
1606 pgoff_t index;
1607 pgoff_t last_index;
1608 pgoff_t prev_index;
1609 unsigned long offset;
1610 unsigned int prev_offset;
1611 int error = 0;
1612
1613 index = *ppos >> PAGE_SHIFT;
1614 prev_index = ra->prev_pos >> PAGE_SHIFT;
1615 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1616 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1617 offset = *ppos & ~PAGE_MASK;
1618
1619 for (;;) {
1620 struct page *page;
1621 pgoff_t end_index;
1622 loff_t isize;
1623 unsigned long nr, ret;
1624
1625 cond_resched();
1626find_page:
1627 page = find_get_page(mapping, index);
1628 if (!page) {
1629 page_cache_sync_readahead(mapping,
1630 ra, filp,
1631 index, last_index - index);
1632 page = find_get_page(mapping, index);
1633 if (unlikely(page == NULL))
1634 goto no_cached_page;
1635 }
1636 if (PageReadahead(page)) {
1637 page_cache_async_readahead(mapping,
1638 ra, filp, page,
1639 index, last_index - index);
1640 }
1641 if (!PageUptodate(page)) {
1642
1643
1644
1645
1646
1647 wait_on_page_locked_killable(page);
1648 if (PageUptodate(page))
1649 goto page_ok;
1650
1651 if (inode->i_blkbits == PAGE_SHIFT ||
1652 !mapping->a_ops->is_partially_uptodate)
1653 goto page_not_up_to_date;
1654 if (!trylock_page(page))
1655 goto page_not_up_to_date;
1656
1657 if (!page->mapping)
1658 goto page_not_up_to_date_locked;
1659 if (!mapping->a_ops->is_partially_uptodate(page,
1660 offset, iter->count))
1661 goto page_not_up_to_date_locked;
1662 unlock_page(page);
1663 }
1664page_ok:
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 isize = i_size_read(inode);
1675 end_index = (isize - 1) >> PAGE_SHIFT;
1676 if (unlikely(!isize || index > end_index)) {
1677 put_page(page);
1678 goto out;
1679 }
1680
1681
1682 nr = PAGE_SIZE;
1683 if (index == end_index) {
1684 nr = ((isize - 1) & ~PAGE_MASK) + 1;
1685 if (nr <= offset) {
1686 put_page(page);
1687 goto out;
1688 }
1689 }
1690 nr = nr - offset;
1691
1692
1693
1694
1695
1696 if (mapping_writably_mapped(mapping))
1697 flush_dcache_page(page);
1698
1699
1700
1701
1702
1703 if (prev_index != index || offset != prev_offset)
1704 mark_page_accessed(page);
1705 prev_index = index;
1706
1707
1708
1709
1710
1711
1712 ret = copy_page_to_iter(page, offset, nr, iter);
1713 offset += ret;
1714 index += offset >> PAGE_SHIFT;
1715 offset &= ~PAGE_MASK;
1716 prev_offset = offset;
1717
1718 put_page(page);
1719 written += ret;
1720 if (!iov_iter_count(iter))
1721 goto out;
1722 if (ret < nr) {
1723 error = -EFAULT;
1724 goto out;
1725 }
1726 continue;
1727
1728page_not_up_to_date:
1729
1730 error = lock_page_killable(page);
1731 if (unlikely(error))
1732 goto readpage_error;
1733
1734page_not_up_to_date_locked:
1735
1736 if (!page->mapping) {
1737 unlock_page(page);
1738 put_page(page);
1739 continue;
1740 }
1741
1742
1743 if (PageUptodate(page)) {
1744 unlock_page(page);
1745 goto page_ok;
1746 }
1747
1748readpage:
1749
1750
1751
1752
1753
1754 ClearPageError(page);
1755
1756 error = mapping->a_ops->readpage(filp, page);
1757
1758 if (unlikely(error)) {
1759 if (error == AOP_TRUNCATED_PAGE) {
1760 put_page(page);
1761 error = 0;
1762 goto find_page;
1763 }
1764 goto readpage_error;
1765 }
1766
1767 if (!PageUptodate(page)) {
1768 error = lock_page_killable(page);
1769 if (unlikely(error))
1770 goto readpage_error;
1771 if (!PageUptodate(page)) {
1772 if (page->mapping == NULL) {
1773
1774
1775
1776 unlock_page(page);
1777 put_page(page);
1778 goto find_page;
1779 }
1780 unlock_page(page);
1781 shrink_readahead_size_eio(filp, ra);
1782 error = -EIO;
1783 goto readpage_error;
1784 }
1785 unlock_page(page);
1786 }
1787
1788 goto page_ok;
1789
1790readpage_error:
1791
1792 put_page(page);
1793 goto out;
1794
1795no_cached_page:
1796
1797
1798
1799
1800 page = page_cache_alloc_cold(mapping);
1801 if (!page) {
1802 error = -ENOMEM;
1803 goto out;
1804 }
1805 error = add_to_page_cache_lru(page, mapping, index,
1806 mapping_gfp_constraint(mapping, GFP_KERNEL));
1807 if (error) {
1808 put_page(page);
1809 if (error == -EEXIST) {
1810 error = 0;
1811 goto find_page;
1812 }
1813 goto out;
1814 }
1815 goto readpage;
1816 }
1817
1818out:
1819 ra->prev_pos = prev_index;
1820 ra->prev_pos <<= PAGE_SHIFT;
1821 ra->prev_pos |= prev_offset;
1822
1823 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
1824 file_accessed(filp);
1825 return written ? written : error;
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836ssize_t
1837generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1838{
1839 struct file *file = iocb->ki_filp;
1840 ssize_t retval = 0;
1841 loff_t *ppos = &iocb->ki_pos;
1842 loff_t pos = *ppos;
1843 size_t count = iov_iter_count(iter);
1844
1845 if (!count)
1846 goto out;
1847
1848 if (iocb->ki_flags & IOCB_DIRECT) {
1849 struct address_space *mapping = file->f_mapping;
1850 struct inode *inode = mapping->host;
1851 loff_t size;
1852
1853 size = i_size_read(inode);
1854 retval = filemap_write_and_wait_range(mapping, pos,
1855 pos + count - 1);
1856 if (!retval) {
1857 struct iov_iter data = *iter;
1858 retval = mapping->a_ops->direct_IO(iocb, &data, pos);
1859 }
1860
1861 if (retval > 0) {
1862 *ppos = pos + retval;
1863 iov_iter_advance(iter, retval);
1864 }
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size ||
1876 IS_DAX(inode)) {
1877 file_accessed(file);
1878 goto out;
1879 }
1880 }
1881
1882 retval = do_generic_file_read(file, ppos, iter, retval);
1883out:
1884 return retval;
1885}
1886EXPORT_SYMBOL(generic_file_read_iter);
1887
1888#ifdef CONFIG_MMU
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
1899{
1900 struct address_space *mapping = file->f_mapping;
1901 struct page *page;
1902 int ret;
1903
1904 do {
1905 page = __page_cache_alloc(gfp_mask|__GFP_COLD);
1906 if (!page)
1907 return -ENOMEM;
1908
1909 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
1910 if (ret == 0)
1911 ret = mapping->a_ops->readpage(file, page);
1912 else if (ret == -EEXIST)
1913 ret = 0;
1914
1915 put_page(page);
1916
1917 } while (ret == AOP_TRUNCATED_PAGE);
1918
1919 return ret;
1920}
1921
1922#define MMAP_LOTSAMISS (100)
1923
1924
1925
1926
1927
1928static void do_sync_mmap_readahead(struct vm_area_struct *vma,
1929 struct file_ra_state *ra,
1930 struct file *file,
1931 pgoff_t offset)
1932{
1933 struct address_space *mapping = file->f_mapping;
1934
1935
1936 if (vma->vm_flags & VM_RAND_READ)
1937 return;
1938 if (!ra->ra_pages)
1939 return;
1940
1941 if (vma->vm_flags & VM_SEQ_READ) {
1942 page_cache_sync_readahead(mapping, ra, file, offset,
1943 ra->ra_pages);
1944 return;
1945 }
1946
1947
1948 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
1949 ra->mmap_miss++;
1950
1951
1952
1953
1954
1955 if (ra->mmap_miss > MMAP_LOTSAMISS)
1956 return;
1957
1958
1959
1960
1961 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
1962 ra->size = ra->ra_pages;
1963 ra->async_size = ra->ra_pages / 4;
1964 ra_submit(ra, mapping, file);
1965}
1966
1967
1968
1969
1970
1971static void do_async_mmap_readahead(struct vm_area_struct *vma,
1972 struct file_ra_state *ra,
1973 struct file *file,
1974 struct page *page,
1975 pgoff_t offset)
1976{
1977 struct address_space *mapping = file->f_mapping;
1978
1979
1980 if (vma->vm_flags & VM_RAND_READ)
1981 return;
1982 if (ra->mmap_miss > 0)
1983 ra->mmap_miss--;
1984 if (PageReadahead(page))
1985 page_cache_async_readahead(mapping, ra, file,
1986 page, offset, ra->ra_pages);
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2014{
2015 int error;
2016 struct file *file = vma->vm_file;
2017 struct address_space *mapping = file->f_mapping;
2018 struct file_ra_state *ra = &file->f_ra;
2019 struct inode *inode = mapping->host;
2020 pgoff_t offset = vmf->pgoff;
2021 struct page *page;
2022 loff_t size;
2023 int ret = 0;
2024
2025 size = round_up(i_size_read(inode), PAGE_SIZE);
2026 if (offset >= size >> PAGE_SHIFT)
2027 return VM_FAULT_SIGBUS;
2028
2029
2030
2031
2032 page = find_get_page(mapping, offset);
2033 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2034
2035
2036
2037
2038 do_async_mmap_readahead(vma, ra, file, page, offset);
2039 } else if (!page) {
2040
2041 do_sync_mmap_readahead(vma, ra, file, offset);
2042 count_vm_event(PGMAJFAULT);
2043 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
2044 ret = VM_FAULT_MAJOR;
2045retry_find:
2046 page = find_get_page(mapping, offset);
2047 if (!page)
2048 goto no_cached_page;
2049 }
2050
2051 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
2052 put_page(page);
2053 return ret | VM_FAULT_RETRY;
2054 }
2055
2056
2057 if (unlikely(page->mapping != mapping)) {
2058 unlock_page(page);
2059 put_page(page);
2060 goto retry_find;
2061 }
2062 VM_BUG_ON_PAGE(page->index != offset, page);
2063
2064
2065
2066
2067
2068 if (unlikely(!PageUptodate(page)))
2069 goto page_not_uptodate;
2070
2071
2072
2073
2074
2075 size = round_up(i_size_read(inode), PAGE_SIZE);
2076 if (unlikely(offset >= size >> PAGE_SHIFT)) {
2077 unlock_page(page);
2078 put_page(page);
2079 return VM_FAULT_SIGBUS;
2080 }
2081
2082 vmf->page = page;
2083 return ret | VM_FAULT_LOCKED;
2084
2085no_cached_page:
2086
2087
2088
2089
2090 error = page_cache_read(file, offset, vmf->gfp_mask);
2091
2092
2093
2094
2095
2096
2097 if (error >= 0)
2098 goto retry_find;
2099
2100
2101
2102
2103
2104
2105 if (error == -ENOMEM)
2106 return VM_FAULT_OOM;
2107 return VM_FAULT_SIGBUS;
2108
2109page_not_uptodate:
2110
2111
2112
2113
2114
2115
2116 ClearPageError(page);
2117 error = mapping->a_ops->readpage(file, page);
2118 if (!error) {
2119 wait_on_page_locked(page);
2120 if (!PageUptodate(page))
2121 error = -EIO;
2122 }
2123 put_page(page);
2124
2125 if (!error || error == AOP_TRUNCATED_PAGE)
2126 goto retry_find;
2127
2128
2129 shrink_readahead_size_eio(file, ra);
2130 return VM_FAULT_SIGBUS;
2131}
2132EXPORT_SYMBOL(filemap_fault);
2133
2134void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
2135{
2136 struct radix_tree_iter iter;
2137 void **slot;
2138 struct file *file = vma->vm_file;
2139 struct address_space *mapping = file->f_mapping;
2140 loff_t size;
2141 struct page *page;
2142 unsigned long address = (unsigned long) vmf->virtual_address;
2143 unsigned long addr;
2144 pte_t *pte;
2145
2146 rcu_read_lock();
2147 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) {
2148 if (iter.index > vmf->max_pgoff)
2149 break;
2150repeat:
2151 page = radix_tree_deref_slot(slot);
2152 if (unlikely(!page))
2153 goto next;
2154 if (radix_tree_exception(page)) {
2155 if (radix_tree_deref_retry(page)) {
2156 slot = radix_tree_iter_retry(&iter);
2157 continue;
2158 }
2159 goto next;
2160 }
2161
2162 if (!page_cache_get_speculative(page))
2163 goto repeat;
2164
2165
2166 if (unlikely(page != *slot)) {
2167 put_page(page);
2168 goto repeat;
2169 }
2170
2171 if (!PageUptodate(page) ||
2172 PageReadahead(page) ||
2173 PageHWPoison(page))
2174 goto skip;
2175 if (!trylock_page(page))
2176 goto skip;
2177
2178 if (page->mapping != mapping || !PageUptodate(page))
2179 goto unlock;
2180
2181 size = round_up(i_size_read(mapping->host), PAGE_SIZE);
2182 if (page->index >= size >> PAGE_SHIFT)
2183 goto unlock;
2184
2185 pte = vmf->pte + page->index - vmf->pgoff;
2186 if (!pte_none(*pte))
2187 goto unlock;
2188
2189 if (file->f_ra.mmap_miss > 0)
2190 file->f_ra.mmap_miss--;
2191 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2192 do_set_pte(vma, addr, page, pte, false, false);
2193 unlock_page(page);
2194 goto next;
2195unlock:
2196 unlock_page(page);
2197skip:
2198 put_page(page);
2199next:
2200 if (iter.index == vmf->max_pgoff)
2201 break;
2202 }
2203 rcu_read_unlock();
2204}
2205EXPORT_SYMBOL(filemap_map_pages);
2206
2207int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2208{
2209 struct page *page = vmf->page;
2210 struct inode *inode = file_inode(vma->vm_file);
2211 int ret = VM_FAULT_LOCKED;
2212
2213 sb_start_pagefault(inode->i_sb);
2214 file_update_time(vma->vm_file);
2215 lock_page(page);
2216 if (page->mapping != inode->i_mapping) {
2217 unlock_page(page);
2218 ret = VM_FAULT_NOPAGE;
2219 goto out;
2220 }
2221
2222
2223
2224
2225
2226 set_page_dirty(page);
2227 wait_for_stable_page(page);
2228out:
2229 sb_end_pagefault(inode->i_sb);
2230 return ret;
2231}
2232EXPORT_SYMBOL(filemap_page_mkwrite);
2233
2234const struct vm_operations_struct generic_file_vm_ops = {
2235 .fault = filemap_fault,
2236 .map_pages = filemap_map_pages,
2237 .page_mkwrite = filemap_page_mkwrite,
2238};
2239
2240
2241
2242int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2243{
2244 struct address_space *mapping = file->f_mapping;
2245
2246 if (!mapping->a_ops->readpage)
2247 return -ENOEXEC;
2248 file_accessed(file);
2249 vma->vm_ops = &generic_file_vm_ops;
2250 return 0;
2251}
2252
2253
2254
2255
2256int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2257{
2258 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2259 return -EINVAL;
2260 return generic_file_mmap(file, vma);
2261}
2262#else
2263int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2264{
2265 return -ENOSYS;
2266}
2267int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2268{
2269 return -ENOSYS;
2270}
2271#endif
2272
2273EXPORT_SYMBOL(generic_file_mmap);
2274EXPORT_SYMBOL(generic_file_readonly_mmap);
2275
2276static struct page *wait_on_page_read(struct page *page)
2277{
2278 if (!IS_ERR(page)) {
2279 wait_on_page_locked(page);
2280 if (!PageUptodate(page)) {
2281 put_page(page);
2282 page = ERR_PTR(-EIO);
2283 }
2284 }
2285 return page;
2286}
2287
2288static struct page *do_read_cache_page(struct address_space *mapping,
2289 pgoff_t index,
2290 int (*filler)(void *, struct page *),
2291 void *data,
2292 gfp_t gfp)
2293{
2294 struct page *page;
2295 int err;
2296repeat:
2297 page = find_get_page(mapping, index);
2298 if (!page) {
2299 page = __page_cache_alloc(gfp | __GFP_COLD);
2300 if (!page)
2301 return ERR_PTR(-ENOMEM);
2302 err = add_to_page_cache_lru(page, mapping, index, gfp);
2303 if (unlikely(err)) {
2304 put_page(page);
2305 if (err == -EEXIST)
2306 goto repeat;
2307
2308 return ERR_PTR(err);
2309 }
2310
2311filler:
2312 err = filler(data, page);
2313 if (err < 0) {
2314 put_page(page);
2315 return ERR_PTR(err);
2316 }
2317
2318 page = wait_on_page_read(page);
2319 if (IS_ERR(page))
2320 return page;
2321 goto out;
2322 }
2323 if (PageUptodate(page))
2324 goto out;
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357 wait_on_page_locked(page);
2358 if (PageUptodate(page))
2359 goto out;
2360
2361
2362 lock_page(page);
2363
2364
2365 if (!page->mapping) {
2366 unlock_page(page);
2367 put_page(page);
2368 goto repeat;
2369 }
2370
2371
2372 if (PageUptodate(page)) {
2373 unlock_page(page);
2374 goto out;
2375 }
2376 goto filler;
2377
2378out:
2379 mark_page_accessed(page);
2380 return page;
2381}
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395struct page *read_cache_page(struct address_space *mapping,
2396 pgoff_t index,
2397 int (*filler)(void *, struct page *),
2398 void *data)
2399{
2400 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2401}
2402EXPORT_SYMBOL(read_cache_page);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415struct page *read_cache_page_gfp(struct address_space *mapping,
2416 pgoff_t index,
2417 gfp_t gfp)
2418{
2419 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2420
2421 return do_read_cache_page(mapping, index, filler, NULL, gfp);
2422}
2423EXPORT_SYMBOL(read_cache_page_gfp);
2424
2425
2426
2427
2428
2429
2430
2431
2432inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
2433{
2434 struct file *file = iocb->ki_filp;
2435 struct inode *inode = file->f_mapping->host;
2436 unsigned long limit = rlimit(RLIMIT_FSIZE);
2437 loff_t pos;
2438
2439 if (!iov_iter_count(from))
2440 return 0;
2441
2442
2443 if (iocb->ki_flags & IOCB_APPEND)
2444 iocb->ki_pos = i_size_read(inode);
2445
2446 pos = iocb->ki_pos;
2447
2448 if (limit != RLIM_INFINITY) {
2449 if (iocb->ki_pos >= limit) {
2450 send_sig(SIGXFSZ, current, 0);
2451 return -EFBIG;
2452 }
2453 iov_iter_truncate(from, limit - (unsigned long)pos);
2454 }
2455
2456
2457
2458
2459 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
2460 !(file->f_flags & O_LARGEFILE))) {
2461 if (pos >= MAX_NON_LFS)
2462 return -EFBIG;
2463 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
2464 }
2465
2466
2467
2468
2469
2470
2471
2472
2473 if (unlikely(pos >= inode->i_sb->s_maxbytes))
2474 return -EFBIG;
2475
2476 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2477 return iov_iter_count(from);
2478}
2479EXPORT_SYMBOL(generic_write_checks);
2480
2481int pagecache_write_begin(struct file *file, struct address_space *mapping,
2482 loff_t pos, unsigned len, unsigned flags,
2483 struct page **pagep, void **fsdata)
2484{
2485 const struct address_space_operations *aops = mapping->a_ops;
2486
2487 return aops->write_begin(file, mapping, pos, len, flags,
2488 pagep, fsdata);
2489}
2490EXPORT_SYMBOL(pagecache_write_begin);
2491
2492int pagecache_write_end(struct file *file, struct address_space *mapping,
2493 loff_t pos, unsigned len, unsigned copied,
2494 struct page *page, void *fsdata)
2495{
2496 const struct address_space_operations *aops = mapping->a_ops;
2497
2498 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2499}
2500EXPORT_SYMBOL(pagecache_write_end);
2501
2502ssize_t
2503generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos)
2504{
2505 struct file *file = iocb->ki_filp;
2506 struct address_space *mapping = file->f_mapping;
2507 struct inode *inode = mapping->host;
2508 ssize_t written;
2509 size_t write_len;
2510 pgoff_t end;
2511 struct iov_iter data;
2512
2513 write_len = iov_iter_count(from);
2514 end = (pos + write_len - 1) >> PAGE_SHIFT;
2515
2516 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2517 if (written)
2518 goto out;
2519
2520
2521
2522
2523
2524
2525
2526 if (mapping->nrpages) {
2527 written = invalidate_inode_pages2_range(mapping,
2528 pos >> PAGE_SHIFT, end);
2529
2530
2531
2532
2533 if (written) {
2534 if (written == -EBUSY)
2535 return 0;
2536 goto out;
2537 }
2538 }
2539
2540 data = *from;
2541 written = mapping->a_ops->direct_IO(iocb, &data, pos);
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551 if (mapping->nrpages) {
2552 invalidate_inode_pages2_range(mapping,
2553 pos >> PAGE_SHIFT, end);
2554 }
2555
2556 if (written > 0) {
2557 pos += written;
2558 iov_iter_advance(from, written);
2559 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2560 i_size_write(inode, pos);
2561 mark_inode_dirty(inode);
2562 }
2563 iocb->ki_pos = pos;
2564 }
2565out:
2566 return written;
2567}
2568EXPORT_SYMBOL(generic_file_direct_write);
2569
2570
2571
2572
2573
2574struct page *grab_cache_page_write_begin(struct address_space *mapping,
2575 pgoff_t index, unsigned flags)
2576{
2577 struct page *page;
2578 int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
2579
2580 if (flags & AOP_FLAG_NOFS)
2581 fgp_flags |= FGP_NOFS;
2582
2583 page = pagecache_get_page(mapping, index, fgp_flags,
2584 mapping_gfp_mask(mapping));
2585 if (page)
2586 wait_for_stable_page(page);
2587
2588 return page;
2589}
2590EXPORT_SYMBOL(grab_cache_page_write_begin);
2591
2592ssize_t generic_perform_write(struct file *file,
2593 struct iov_iter *i, loff_t pos)
2594{
2595 struct address_space *mapping = file->f_mapping;
2596 const struct address_space_operations *a_ops = mapping->a_ops;
2597 long status = 0;
2598 ssize_t written = 0;
2599 unsigned int flags = 0;
2600
2601
2602
2603
2604 if (!iter_is_iovec(i))
2605 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2606
2607 do {
2608 struct page *page;
2609 unsigned long offset;
2610 unsigned long bytes;
2611 size_t copied;
2612 void *fsdata;
2613
2614 offset = (pos & (PAGE_SIZE - 1));
2615 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2616 iov_iter_count(i));
2617
2618again:
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2630 status = -EFAULT;
2631 break;
2632 }
2633
2634 if (fatal_signal_pending(current)) {
2635 status = -EINTR;
2636 break;
2637 }
2638
2639 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2640 &page, &fsdata);
2641 if (unlikely(status < 0))
2642 break;
2643
2644 if (mapping_writably_mapped(mapping))
2645 flush_dcache_page(page);
2646
2647 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2648 flush_dcache_page(page);
2649
2650 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2651 page, fsdata);
2652 if (unlikely(status < 0))
2653 break;
2654 copied = status;
2655
2656 cond_resched();
2657
2658 iov_iter_advance(i, copied);
2659 if (unlikely(copied == 0)) {
2660
2661
2662
2663
2664
2665
2666
2667
2668 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2669 iov_iter_single_seg_count(i));
2670 goto again;
2671 }
2672 pos += copied;
2673 written += copied;
2674
2675 balance_dirty_pages_ratelimited(mapping);
2676 } while (iov_iter_count(i));
2677
2678 return written ? written : status;
2679}
2680EXPORT_SYMBOL(generic_perform_write);
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2700{
2701 struct file *file = iocb->ki_filp;
2702 struct address_space * mapping = file->f_mapping;
2703 struct inode *inode = mapping->host;
2704 ssize_t written = 0;
2705 ssize_t err;
2706 ssize_t status;
2707
2708
2709 current->backing_dev_info = inode_to_bdi(inode);
2710 err = file_remove_privs(file);
2711 if (err)
2712 goto out;
2713
2714 err = file_update_time(file);
2715 if (err)
2716 goto out;
2717
2718 if (iocb->ki_flags & IOCB_DIRECT) {
2719 loff_t pos, endbyte;
2720
2721 written = generic_file_direct_write(iocb, from, iocb->ki_pos);
2722
2723
2724
2725
2726
2727
2728
2729 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
2730 goto out;
2731
2732 status = generic_perform_write(file, from, pos = iocb->ki_pos);
2733
2734
2735
2736
2737
2738
2739
2740 if (unlikely(status < 0)) {
2741 err = status;
2742 goto out;
2743 }
2744
2745
2746
2747
2748
2749 endbyte = pos + status - 1;
2750 err = filemap_write_and_wait_range(mapping, pos, endbyte);
2751 if (err == 0) {
2752 iocb->ki_pos = endbyte + 1;
2753 written += status;
2754 invalidate_mapping_pages(mapping,
2755 pos >> PAGE_SHIFT,
2756 endbyte >> PAGE_SHIFT);
2757 } else {
2758
2759
2760
2761
2762 }
2763 } else {
2764 written = generic_perform_write(file, from, iocb->ki_pos);
2765 if (likely(written > 0))
2766 iocb->ki_pos += written;
2767 }
2768out:
2769 current->backing_dev_info = NULL;
2770 return written ? written : err;
2771}
2772EXPORT_SYMBOL(__generic_file_write_iter);
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2784{
2785 struct file *file = iocb->ki_filp;
2786 struct inode *inode = file->f_mapping->host;
2787 ssize_t ret;
2788
2789 inode_lock(inode);
2790 ret = generic_write_checks(iocb, from);
2791 if (ret > 0)
2792 ret = __generic_file_write_iter(iocb, from);
2793 inode_unlock(inode);
2794
2795 if (ret > 0) {
2796 ssize_t err;
2797
2798 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
2799 if (err < 0)
2800 ret = err;
2801 }
2802 return ret;
2803}
2804EXPORT_SYMBOL(generic_file_write_iter);
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823int try_to_release_page(struct page *page, gfp_t gfp_mask)
2824{
2825 struct address_space * const mapping = page->mapping;
2826
2827 BUG_ON(!PageLocked(page));
2828 if (PageWriteback(page))
2829 return 0;
2830
2831 if (mapping && mapping->a_ops->releasepage)
2832 return mapping->a_ops->releasepage(page, gfp_mask);
2833 return try_to_free_buffers(page);
2834}
2835
2836EXPORT_SYMBOL(try_to_release_page);
2837