1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/compiler.h>
14#include <linux/dax.h>
15#include <linux/fs.h>
16#include <linux/sched/signal.h>
17#include <linux/uaccess.h>
18#include <linux/capability.h>
19#include <linux/kernel_stat.h>
20#include <linux/gfp.h>
21#include <linux/mm.h>
22#include <linux/swap.h>
23#include <linux/mman.h>
24#include <linux/pagemap.h>
25#include <linux/file.h>
26#include <linux/uio.h>
27#include <linux/hash.h>
28#include <linux/writeback.h>
29#include <linux/backing-dev.h>
30#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/security.h>
33#include <linux/cpuset.h>
34#include <linux/hardirq.h>
35#include <linux/hugetlb.h>
36#include <linux/memcontrol.h>
37#include <linux/cleancache.h>
38#include <linux/rmap.h>
39#include "internal.h"
40
41#define CREATE_TRACE_POINTS
42#include <trace/events/filemap.h>
43
44
45
46
47#include <linux/buffer_head.h>
48
49#include <asm/mman.h>
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114static int page_cache_tree_insert(struct address_space *mapping,
115 struct page *page, void **shadowp)
116{
117 struct radix_tree_node *node;
118 void **slot;
119 int error;
120
121 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
122 &node, &slot);
123 if (error)
124 return error;
125 if (*slot) {
126 void *p;
127
128 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
129 if (!radix_tree_exceptional_entry(p))
130 return -EEXIST;
131
132 mapping->nrexceptional--;
133 if (!dax_mapping(mapping)) {
134 if (shadowp)
135 *shadowp = p;
136 } else {
137
138 WARN_ON_ONCE(p !=
139 dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
140
141 dax_wake_mapping_entry_waiter(mapping, page->index, p,
142 true);
143 }
144 }
145 __radix_tree_replace(&mapping->page_tree, node, slot, page,
146 workingset_update_node, mapping);
147 mapping->nrpages++;
148 return 0;
149}
150
151static void page_cache_tree_delete(struct address_space *mapping,
152 struct page *page, void *shadow)
153{
154 int i, nr;
155
156
157 nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
158
159 VM_BUG_ON_PAGE(!PageLocked(page), page);
160 VM_BUG_ON_PAGE(PageTail(page), page);
161 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
162
163 for (i = 0; i < nr; i++) {
164 struct radix_tree_node *node;
165 void **slot;
166
167 __radix_tree_lookup(&mapping->page_tree, page->index + i,
168 &node, &slot);
169
170 VM_BUG_ON_PAGE(!node && nr != 1, page);
171
172 radix_tree_clear_tags(&mapping->page_tree, node, slot);
173 __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
174 workingset_update_node, mapping);
175 }
176
177 if (shadow) {
178 mapping->nrexceptional += nr;
179
180
181
182
183
184
185 smp_wmb();
186 }
187 mapping->nrpages -= nr;
188}
189
190
191
192
193
194
195void __delete_from_page_cache(struct page *page, void *shadow)
196{
197 struct address_space *mapping = page->mapping;
198 int nr = hpage_nr_pages(page);
199
200 trace_mm_filemap_delete_from_page_cache(page);
201
202
203
204
205
206 if (PageUptodate(page) && PageMappedToDisk(page))
207 cleancache_put_page(page);
208 else
209 cleancache_invalidate_page(mapping, page);
210
211 VM_BUG_ON_PAGE(PageTail(page), page);
212 VM_BUG_ON_PAGE(page_mapped(page), page);
213 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
214 int mapcount;
215
216 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
217 current->comm, page_to_pfn(page));
218 dump_page(page, "still mapped when deleted");
219 dump_stack();
220 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
221
222 mapcount = page_mapcount(page);
223 if (mapping_exiting(mapping) &&
224 page_count(page) >= mapcount + 2) {
225
226
227
228
229
230
231 page_mapcount_reset(page);
232 page_ref_sub(page, mapcount);
233 }
234 }
235
236 page_cache_tree_delete(mapping, page, shadow);
237
238 page->mapping = NULL;
239
240
241
242 if (!PageHuge(page))
243 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
244 if (PageSwapBacked(page)) {
245 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
246 if (PageTransHuge(page))
247 __dec_node_page_state(page, NR_SHMEM_THPS);
248 } else {
249 VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page);
250 }
251
252
253
254
255
256
257
258
259
260 if (WARN_ON_ONCE(PageDirty(page)))
261 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
262}
263
264
265
266
267
268
269
270
271
272void delete_from_page_cache(struct page *page)
273{
274 struct address_space *mapping = page_mapping(page);
275 unsigned long flags;
276 void (*freepage)(struct page *);
277
278 BUG_ON(!PageLocked(page));
279
280 freepage = mapping->a_ops->freepage;
281
282 spin_lock_irqsave(&mapping->tree_lock, flags);
283 __delete_from_page_cache(page, NULL);
284 spin_unlock_irqrestore(&mapping->tree_lock, flags);
285
286 if (freepage)
287 freepage(page);
288
289 if (PageTransHuge(page) && !PageHuge(page)) {
290 page_ref_sub(page, HPAGE_PMD_NR);
291 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
292 } else {
293 put_page(page);
294 }
295}
296EXPORT_SYMBOL(delete_from_page_cache);
297
298int filemap_check_errors(struct address_space *mapping)
299{
300 int ret = 0;
301
302 if (test_bit(AS_ENOSPC, &mapping->flags) &&
303 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
304 ret = -ENOSPC;
305 if (test_bit(AS_EIO, &mapping->flags) &&
306 test_and_clear_bit(AS_EIO, &mapping->flags))
307 ret = -EIO;
308 return ret;
309}
310EXPORT_SYMBOL(filemap_check_errors);
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
328 loff_t end, int sync_mode)
329{
330 int ret;
331 struct writeback_control wbc = {
332 .sync_mode = sync_mode,
333 .nr_to_write = LONG_MAX,
334 .range_start = start,
335 .range_end = end,
336 };
337
338 if (!mapping_cap_writeback_dirty(mapping))
339 return 0;
340
341 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
342 ret = do_writepages(mapping, &wbc);
343 wbc_detach_inode(&wbc);
344 return ret;
345}
346
347static inline int __filemap_fdatawrite(struct address_space *mapping,
348 int sync_mode)
349{
350 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
351}
352
353int filemap_fdatawrite(struct address_space *mapping)
354{
355 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
356}
357EXPORT_SYMBOL(filemap_fdatawrite);
358
359int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
360 loff_t end)
361{
362 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
363}
364EXPORT_SYMBOL(filemap_fdatawrite_range);
365
366
367
368
369
370
371
372
373int filemap_flush(struct address_space *mapping)
374{
375 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
376}
377EXPORT_SYMBOL(filemap_flush);
378
379static int __filemap_fdatawait_range(struct address_space *mapping,
380 loff_t start_byte, loff_t end_byte)
381{
382 pgoff_t index = start_byte >> PAGE_SHIFT;
383 pgoff_t end = end_byte >> PAGE_SHIFT;
384 struct pagevec pvec;
385 int nr_pages;
386 int ret = 0;
387
388 if (end_byte < start_byte)
389 goto out;
390
391 pagevec_init(&pvec, 0);
392 while ((index <= end) &&
393 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
394 PAGECACHE_TAG_WRITEBACK,
395 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
396 unsigned i;
397
398 for (i = 0; i < nr_pages; i++) {
399 struct page *page = pvec.pages[i];
400
401
402 if (page->index > end)
403 continue;
404
405 wait_on_page_writeback(page);
406 if (TestClearPageError(page))
407 ret = -EIO;
408 }
409 pagevec_release(&pvec);
410 cond_resched();
411 }
412out:
413 return ret;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
431 loff_t end_byte)
432{
433 int ret, ret2;
434
435 ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
436 ret2 = filemap_check_errors(mapping);
437 if (!ret)
438 ret = ret2;
439
440 return ret;
441}
442EXPORT_SYMBOL(filemap_fdatawait_range);
443
444
445
446
447
448
449
450
451
452
453
454
455
456void filemap_fdatawait_keep_errors(struct address_space *mapping)
457{
458 loff_t i_size = i_size_read(mapping->host);
459
460 if (i_size == 0)
461 return;
462
463 __filemap_fdatawait_range(mapping, 0, i_size - 1);
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478int filemap_fdatawait(struct address_space *mapping)
479{
480 loff_t i_size = i_size_read(mapping->host);
481
482 if (i_size == 0)
483 return 0;
484
485 return filemap_fdatawait_range(mapping, 0, i_size - 1);
486}
487EXPORT_SYMBOL(filemap_fdatawait);
488
489int filemap_write_and_wait(struct address_space *mapping)
490{
491 int err = 0;
492
493 if ((!dax_mapping(mapping) && mapping->nrpages) ||
494 (dax_mapping(mapping) && mapping->nrexceptional)) {
495 err = filemap_fdatawrite(mapping);
496
497
498
499
500
501
502 if (err != -EIO) {
503 int err2 = filemap_fdatawait(mapping);
504 if (!err)
505 err = err2;
506 }
507 } else {
508 err = filemap_check_errors(mapping);
509 }
510 return err;
511}
512EXPORT_SYMBOL(filemap_write_and_wait);
513
514
515
516
517
518
519
520
521
522
523
524
525int filemap_write_and_wait_range(struct address_space *mapping,
526 loff_t lstart, loff_t lend)
527{
528 int err = 0;
529
530 if ((!dax_mapping(mapping) && mapping->nrpages) ||
531 (dax_mapping(mapping) && mapping->nrexceptional)) {
532 err = __filemap_fdatawrite_range(mapping, lstart, lend,
533 WB_SYNC_ALL);
534
535 if (err != -EIO) {
536 int err2 = filemap_fdatawait_range(mapping,
537 lstart, lend);
538 if (!err)
539 err = err2;
540 }
541 } else {
542 err = filemap_check_errors(mapping);
543 }
544 return err;
545}
546EXPORT_SYMBOL(filemap_write_and_wait_range);
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
564{
565 int error;
566
567 VM_BUG_ON_PAGE(!PageLocked(old), old);
568 VM_BUG_ON_PAGE(!PageLocked(new), new);
569 VM_BUG_ON_PAGE(new->mapping, new);
570
571 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
572 if (!error) {
573 struct address_space *mapping = old->mapping;
574 void (*freepage)(struct page *);
575 unsigned long flags;
576
577 pgoff_t offset = old->index;
578 freepage = mapping->a_ops->freepage;
579
580 get_page(new);
581 new->mapping = mapping;
582 new->index = offset;
583
584 spin_lock_irqsave(&mapping->tree_lock, flags);
585 __delete_from_page_cache(old, NULL);
586 error = page_cache_tree_insert(mapping, new, NULL);
587 BUG_ON(error);
588
589
590
591
592 if (!PageHuge(new))
593 __inc_node_page_state(new, NR_FILE_PAGES);
594 if (PageSwapBacked(new))
595 __inc_node_page_state(new, NR_SHMEM);
596 spin_unlock_irqrestore(&mapping->tree_lock, flags);
597 mem_cgroup_migrate(old, new);
598 radix_tree_preload_end();
599 if (freepage)
600 freepage(old);
601 put_page(old);
602 }
603
604 return error;
605}
606EXPORT_SYMBOL_GPL(replace_page_cache_page);
607
608static int __add_to_page_cache_locked(struct page *page,
609 struct address_space *mapping,
610 pgoff_t offset, gfp_t gfp_mask,
611 void **shadowp)
612{
613 int huge = PageHuge(page);
614 struct mem_cgroup *memcg;
615 int error;
616
617 VM_BUG_ON_PAGE(!PageLocked(page), page);
618 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
619
620 if (!huge) {
621 error = mem_cgroup_try_charge(page, current->mm,
622 gfp_mask, &memcg, false);
623 if (error)
624 return error;
625 }
626
627 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
628 if (error) {
629 if (!huge)
630 mem_cgroup_cancel_charge(page, memcg, false);
631 return error;
632 }
633
634 get_page(page);
635 page->mapping = mapping;
636 page->index = offset;
637
638 spin_lock_irq(&mapping->tree_lock);
639 error = page_cache_tree_insert(mapping, page, shadowp);
640 radix_tree_preload_end();
641 if (unlikely(error))
642 goto err_insert;
643
644
645 if (!huge)
646 __inc_node_page_state(page, NR_FILE_PAGES);
647 spin_unlock_irq(&mapping->tree_lock);
648 if (!huge)
649 mem_cgroup_commit_charge(page, memcg, false, false);
650 trace_mm_filemap_add_to_page_cache(page);
651 return 0;
652err_insert:
653 page->mapping = NULL;
654
655 spin_unlock_irq(&mapping->tree_lock);
656 if (!huge)
657 mem_cgroup_cancel_charge(page, memcg, false);
658 put_page(page);
659 return error;
660}
661
662
663
664
665
666
667
668
669
670
671
672int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
673 pgoff_t offset, gfp_t gfp_mask)
674{
675 return __add_to_page_cache_locked(page, mapping, offset,
676 gfp_mask, NULL);
677}
678EXPORT_SYMBOL(add_to_page_cache_locked);
679
680int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
681 pgoff_t offset, gfp_t gfp_mask)
682{
683 void *shadow = NULL;
684 int ret;
685
686 __SetPageLocked(page);
687 ret = __add_to_page_cache_locked(page, mapping, offset,
688 gfp_mask, &shadow);
689 if (unlikely(ret))
690 __ClearPageLocked(page);
691 else {
692
693
694
695
696
697
698
699
700 if (!(gfp_mask & __GFP_WRITE) &&
701 shadow && workingset_refault(shadow)) {
702 SetPageActive(page);
703 workingset_activation(page);
704 } else
705 ClearPageActive(page);
706 lru_cache_add(page);
707 }
708 return ret;
709}
710EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
711
712#ifdef CONFIG_NUMA
713struct page *__page_cache_alloc(gfp_t gfp)
714{
715 int n;
716 struct page *page;
717
718 if (cpuset_do_page_mem_spread()) {
719 unsigned int cpuset_mems_cookie;
720 do {
721 cpuset_mems_cookie = read_mems_allowed_begin();
722 n = cpuset_mem_spread_node();
723 page = __alloc_pages_node(n, gfp, 0);
724 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
725
726 return page;
727 }
728 return alloc_pages(gfp, 0);
729}
730EXPORT_SYMBOL(__page_cache_alloc);
731#endif
732
733
734
735
736
737
738
739
740
741
742
743#define PAGE_WAIT_TABLE_BITS 8
744#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
745static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
746
747static wait_queue_head_t *page_waitqueue(struct page *page)
748{
749 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
750}
751
752void __init pagecache_init(void)
753{
754 int i;
755
756 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
757 init_waitqueue_head(&page_wait_table[i]);
758
759 page_writeback_init();
760}
761
762struct wait_page_key {
763 struct page *page;
764 int bit_nr;
765 int page_match;
766};
767
768struct wait_page_queue {
769 struct page *page;
770 int bit_nr;
771 wait_queue_t wait;
772};
773
774static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
775{
776 struct wait_page_key *key = arg;
777 struct wait_page_queue *wait_page
778 = container_of(wait, struct wait_page_queue, wait);
779
780 if (wait_page->page != key->page)
781 return 0;
782 key->page_match = 1;
783
784 if (wait_page->bit_nr != key->bit_nr)
785 return 0;
786 if (test_bit(key->bit_nr, &key->page->flags))
787 return 0;
788
789 return autoremove_wake_function(wait, mode, sync, key);
790}
791
792static void wake_up_page_bit(struct page *page, int bit_nr)
793{
794 wait_queue_head_t *q = page_waitqueue(page);
795 struct wait_page_key key;
796 unsigned long flags;
797
798 key.page = page;
799 key.bit_nr = bit_nr;
800 key.page_match = 0;
801
802 spin_lock_irqsave(&q->lock, flags);
803 __wake_up_locked_key(q, TASK_NORMAL, &key);
804
805
806
807
808
809
810
811
812
813 if (!waitqueue_active(q) || !key.page_match) {
814 ClearPageWaiters(page);
815
816
817
818
819
820
821
822 }
823 spin_unlock_irqrestore(&q->lock, flags);
824}
825
826static void wake_up_page(struct page *page, int bit)
827{
828 if (!PageWaiters(page))
829 return;
830 wake_up_page_bit(page, bit);
831}
832
833static inline int wait_on_page_bit_common(wait_queue_head_t *q,
834 struct page *page, int bit_nr, int state, bool lock)
835{
836 struct wait_page_queue wait_page;
837 wait_queue_t *wait = &wait_page.wait;
838 int ret = 0;
839
840 init_wait(wait);
841 wait->func = wake_page_function;
842 wait_page.page = page;
843 wait_page.bit_nr = bit_nr;
844
845 for (;;) {
846 spin_lock_irq(&q->lock);
847
848 if (likely(list_empty(&wait->task_list))) {
849 if (lock)
850 __add_wait_queue_tail_exclusive(q, wait);
851 else
852 __add_wait_queue(q, wait);
853 SetPageWaiters(page);
854 }
855
856 set_current_state(state);
857
858 spin_unlock_irq(&q->lock);
859
860 if (likely(test_bit(bit_nr, &page->flags))) {
861 io_schedule();
862 if (unlikely(signal_pending_state(state, current))) {
863 ret = -EINTR;
864 break;
865 }
866 }
867
868 if (lock) {
869 if (!test_and_set_bit_lock(bit_nr, &page->flags))
870 break;
871 } else {
872 if (!test_bit(bit_nr, &page->flags))
873 break;
874 }
875 }
876
877 finish_wait(q, wait);
878
879
880
881
882
883
884
885
886
887 return ret;
888}
889
890void wait_on_page_bit(struct page *page, int bit_nr)
891{
892 wait_queue_head_t *q = page_waitqueue(page);
893 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
894}
895EXPORT_SYMBOL(wait_on_page_bit);
896
897int wait_on_page_bit_killable(struct page *page, int bit_nr)
898{
899 wait_queue_head_t *q = page_waitqueue(page);
900 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
901}
902
903
904
905
906
907
908
909
910void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
911{
912 wait_queue_head_t *q = page_waitqueue(page);
913 unsigned long flags;
914
915 spin_lock_irqsave(&q->lock, flags);
916 __add_wait_queue(q, waiter);
917 SetPageWaiters(page);
918 spin_unlock_irqrestore(&q->lock, flags);
919}
920EXPORT_SYMBOL_GPL(add_page_wait_queue);
921
922#ifndef clear_bit_unlock_is_negative_byte
923
924
925
926
927
928
929
930
931
932
933
934
935
936static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
937{
938 clear_bit_unlock(nr, mem);
939
940 return test_bit(PG_waiters, mem);
941}
942
943#endif
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960void unlock_page(struct page *page)
961{
962 BUILD_BUG_ON(PG_waiters != 7);
963 page = compound_head(page);
964 VM_BUG_ON_PAGE(!PageLocked(page), page);
965 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
966 wake_up_page_bit(page, PG_locked);
967}
968EXPORT_SYMBOL(unlock_page);
969
970
971
972
973
974void end_page_writeback(struct page *page)
975{
976
977
978
979
980
981
982
983 if (PageReclaim(page)) {
984 ClearPageReclaim(page);
985 rotate_reclaimable_page(page);
986 }
987
988 if (!test_clear_page_writeback(page))
989 BUG();
990
991 smp_mb__after_atomic();
992 wake_up_page(page, PG_writeback);
993}
994EXPORT_SYMBOL(end_page_writeback);
995
996
997
998
999
1000void page_endio(struct page *page, bool is_write, int err)
1001{
1002 if (!is_write) {
1003 if (!err) {
1004 SetPageUptodate(page);
1005 } else {
1006 ClearPageUptodate(page);
1007 SetPageError(page);
1008 }
1009 unlock_page(page);
1010 } else {
1011 if (err) {
1012 struct address_space *mapping;
1013
1014 SetPageError(page);
1015 mapping = page_mapping(page);
1016 if (mapping)
1017 mapping_set_error(mapping, err);
1018 }
1019 end_page_writeback(page);
1020 }
1021}
1022EXPORT_SYMBOL_GPL(page_endio);
1023
1024
1025
1026
1027
1028void __lock_page(struct page *__page)
1029{
1030 struct page *page = compound_head(__page);
1031 wait_queue_head_t *q = page_waitqueue(page);
1032 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
1033}
1034EXPORT_SYMBOL(__lock_page);
1035
1036int __lock_page_killable(struct page *__page)
1037{
1038 struct page *page = compound_head(__page);
1039 wait_queue_head_t *q = page_waitqueue(page);
1040 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
1041}
1042EXPORT_SYMBOL_GPL(__lock_page_killable);
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1056 unsigned int flags)
1057{
1058 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1059
1060
1061
1062
1063 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1064 return 0;
1065
1066 up_read(&mm->mmap_sem);
1067 if (flags & FAULT_FLAG_KILLABLE)
1068 wait_on_page_locked_killable(page);
1069 else
1070 wait_on_page_locked(page);
1071 return 0;
1072 } else {
1073 if (flags & FAULT_FLAG_KILLABLE) {
1074 int ret;
1075
1076 ret = __lock_page_killable(page);
1077 if (ret) {
1078 up_read(&mm->mmap_sem);
1079 return 0;
1080 }
1081 } else
1082 __lock_page(page);
1083 return 1;
1084 }
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108pgoff_t page_cache_next_hole(struct address_space *mapping,
1109 pgoff_t index, unsigned long max_scan)
1110{
1111 unsigned long i;
1112
1113 for (i = 0; i < max_scan; i++) {
1114 struct page *page;
1115
1116 page = radix_tree_lookup(&mapping->page_tree, index);
1117 if (!page || radix_tree_exceptional_entry(page))
1118 break;
1119 index++;
1120 if (index == 0)
1121 break;
1122 }
1123
1124 return index;
1125}
1126EXPORT_SYMBOL(page_cache_next_hole);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149pgoff_t page_cache_prev_hole(struct address_space *mapping,
1150 pgoff_t index, unsigned long max_scan)
1151{
1152 unsigned long i;
1153
1154 for (i = 0; i < max_scan; i++) {
1155 struct page *page;
1156
1157 page = radix_tree_lookup(&mapping->page_tree, index);
1158 if (!page || radix_tree_exceptional_entry(page))
1159 break;
1160 index--;
1161 if (index == ULONG_MAX)
1162 break;
1163 }
1164
1165 return index;
1166}
1167EXPORT_SYMBOL(page_cache_prev_hole);
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
1183{
1184 void **pagep;
1185 struct page *head, *page;
1186
1187 rcu_read_lock();
1188repeat:
1189 page = NULL;
1190 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
1191 if (pagep) {
1192 page = radix_tree_deref_slot(pagep);
1193 if (unlikely(!page))
1194 goto out;
1195 if (radix_tree_exception(page)) {
1196 if (radix_tree_deref_retry(page))
1197 goto repeat;
1198
1199
1200
1201
1202
1203 goto out;
1204 }
1205
1206 head = compound_head(page);
1207 if (!page_cache_get_speculative(head))
1208 goto repeat;
1209
1210
1211 if (compound_head(page) != head) {
1212 put_page(head);
1213 goto repeat;
1214 }
1215
1216
1217
1218
1219
1220
1221 if (unlikely(page != *pagep)) {
1222 put_page(head);
1223 goto repeat;
1224 }
1225 }
1226out:
1227 rcu_read_unlock();
1228
1229 return page;
1230}
1231EXPORT_SYMBOL(find_get_entry);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
1250{
1251 struct page *page;
1252
1253repeat:
1254 page = find_get_entry(mapping, offset);
1255 if (page && !radix_tree_exception(page)) {
1256 lock_page(page);
1257
1258 if (unlikely(page_mapping(page) != mapping)) {
1259 unlock_page(page);
1260 put_page(page);
1261 goto repeat;
1262 }
1263 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
1264 }
1265 return page;
1266}
1267EXPORT_SYMBOL(find_lock_entry);
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
1295 int fgp_flags, gfp_t gfp_mask)
1296{
1297 struct page *page;
1298
1299repeat:
1300 page = find_get_entry(mapping, offset);
1301 if (radix_tree_exceptional_entry(page))
1302 page = NULL;
1303 if (!page)
1304 goto no_page;
1305
1306 if (fgp_flags & FGP_LOCK) {
1307 if (fgp_flags & FGP_NOWAIT) {
1308 if (!trylock_page(page)) {
1309 put_page(page);
1310 return NULL;
1311 }
1312 } else {
1313 lock_page(page);
1314 }
1315
1316
1317 if (unlikely(page->mapping != mapping)) {
1318 unlock_page(page);
1319 put_page(page);
1320 goto repeat;
1321 }
1322 VM_BUG_ON_PAGE(page->index != offset, page);
1323 }
1324
1325 if (page && (fgp_flags & FGP_ACCESSED))
1326 mark_page_accessed(page);
1327
1328no_page:
1329 if (!page && (fgp_flags & FGP_CREAT)) {
1330 int err;
1331 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
1332 gfp_mask |= __GFP_WRITE;
1333 if (fgp_flags & FGP_NOFS)
1334 gfp_mask &= ~__GFP_FS;
1335
1336 page = __page_cache_alloc(gfp_mask);
1337 if (!page)
1338 return NULL;
1339
1340 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1341 fgp_flags |= FGP_LOCK;
1342
1343
1344 if (fgp_flags & FGP_ACCESSED)
1345 __SetPageReferenced(page);
1346
1347 err = add_to_page_cache_lru(page, mapping, offset,
1348 gfp_mask & GFP_RECLAIM_MASK);
1349 if (unlikely(err)) {
1350 put_page(page);
1351 page = NULL;
1352 if (err == -EEXIST)
1353 goto repeat;
1354 }
1355 }
1356
1357 return page;
1358}
1359EXPORT_SYMBOL(pagecache_get_page);
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384unsigned find_get_entries(struct address_space *mapping,
1385 pgoff_t start, unsigned int nr_entries,
1386 struct page **entries, pgoff_t *indices)
1387{
1388 void **slot;
1389 unsigned int ret = 0;
1390 struct radix_tree_iter iter;
1391
1392 if (!nr_entries)
1393 return 0;
1394
1395 rcu_read_lock();
1396 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1397 struct page *head, *page;
1398repeat:
1399 page = radix_tree_deref_slot(slot);
1400 if (unlikely(!page))
1401 continue;
1402 if (radix_tree_exception(page)) {
1403 if (radix_tree_deref_retry(page)) {
1404 slot = radix_tree_iter_retry(&iter);
1405 continue;
1406 }
1407
1408
1409
1410
1411
1412 goto export;
1413 }
1414
1415 head = compound_head(page);
1416 if (!page_cache_get_speculative(head))
1417 goto repeat;
1418
1419
1420 if (compound_head(page) != head) {
1421 put_page(head);
1422 goto repeat;
1423 }
1424
1425
1426 if (unlikely(page != *slot)) {
1427 put_page(head);
1428 goto repeat;
1429 }
1430export:
1431 indices[ret] = iter.index;
1432 entries[ret] = page;
1433 if (++ret == nr_entries)
1434 break;
1435 }
1436 rcu_read_unlock();
1437 return ret;
1438}
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
1457 unsigned int nr_pages, struct page **pages)
1458{
1459 struct radix_tree_iter iter;
1460 void **slot;
1461 unsigned ret = 0;
1462
1463 if (unlikely(!nr_pages))
1464 return 0;
1465
1466 rcu_read_lock();
1467 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1468 struct page *head, *page;
1469repeat:
1470 page = radix_tree_deref_slot(slot);
1471 if (unlikely(!page))
1472 continue;
1473
1474 if (radix_tree_exception(page)) {
1475 if (radix_tree_deref_retry(page)) {
1476 slot = radix_tree_iter_retry(&iter);
1477 continue;
1478 }
1479
1480
1481
1482
1483
1484 continue;
1485 }
1486
1487 head = compound_head(page);
1488 if (!page_cache_get_speculative(head))
1489 goto repeat;
1490
1491
1492 if (compound_head(page) != head) {
1493 put_page(head);
1494 goto repeat;
1495 }
1496
1497
1498 if (unlikely(page != *slot)) {
1499 put_page(head);
1500 goto repeat;
1501 }
1502
1503 pages[ret] = page;
1504 if (++ret == nr_pages)
1505 break;
1506 }
1507
1508 rcu_read_unlock();
1509 return ret;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1525 unsigned int nr_pages, struct page **pages)
1526{
1527 struct radix_tree_iter iter;
1528 void **slot;
1529 unsigned int ret = 0;
1530
1531 if (unlikely(!nr_pages))
1532 return 0;
1533
1534 rcu_read_lock();
1535 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
1536 struct page *head, *page;
1537repeat:
1538 page = radix_tree_deref_slot(slot);
1539
1540 if (unlikely(!page))
1541 break;
1542
1543 if (radix_tree_exception(page)) {
1544 if (radix_tree_deref_retry(page)) {
1545 slot = radix_tree_iter_retry(&iter);
1546 continue;
1547 }
1548
1549
1550
1551
1552
1553 break;
1554 }
1555
1556 head = compound_head(page);
1557 if (!page_cache_get_speculative(head))
1558 goto repeat;
1559
1560
1561 if (compound_head(page) != head) {
1562 put_page(head);
1563 goto repeat;
1564 }
1565
1566
1567 if (unlikely(page != *slot)) {
1568 put_page(head);
1569 goto repeat;
1570 }
1571
1572
1573
1574
1575
1576
1577 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
1578 put_page(page);
1579 break;
1580 }
1581
1582 pages[ret] = page;
1583 if (++ret == nr_pages)
1584 break;
1585 }
1586 rcu_read_unlock();
1587 return ret;
1588}
1589EXPORT_SYMBOL(find_get_pages_contig);
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
1603 int tag, unsigned int nr_pages, struct page **pages)
1604{
1605 struct radix_tree_iter iter;
1606 void **slot;
1607 unsigned ret = 0;
1608
1609 if (unlikely(!nr_pages))
1610 return 0;
1611
1612 rcu_read_lock();
1613 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1614 &iter, *index, tag) {
1615 struct page *head, *page;
1616repeat:
1617 page = radix_tree_deref_slot(slot);
1618 if (unlikely(!page))
1619 continue;
1620
1621 if (radix_tree_exception(page)) {
1622 if (radix_tree_deref_retry(page)) {
1623 slot = radix_tree_iter_retry(&iter);
1624 continue;
1625 }
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 continue;
1638 }
1639
1640 head = compound_head(page);
1641 if (!page_cache_get_speculative(head))
1642 goto repeat;
1643
1644
1645 if (compound_head(page) != head) {
1646 put_page(head);
1647 goto repeat;
1648 }
1649
1650
1651 if (unlikely(page != *slot)) {
1652 put_page(head);
1653 goto repeat;
1654 }
1655
1656 pages[ret] = page;
1657 if (++ret == nr_pages)
1658 break;
1659 }
1660
1661 rcu_read_unlock();
1662
1663 if (ret)
1664 *index = pages[ret - 1]->index + 1;
1665
1666 return ret;
1667}
1668EXPORT_SYMBOL(find_get_pages_tag);
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1683 int tag, unsigned int nr_entries,
1684 struct page **entries, pgoff_t *indices)
1685{
1686 void **slot;
1687 unsigned int ret = 0;
1688 struct radix_tree_iter iter;
1689
1690 if (!nr_entries)
1691 return 0;
1692
1693 rcu_read_lock();
1694 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1695 &iter, start, tag) {
1696 struct page *head, *page;
1697repeat:
1698 page = radix_tree_deref_slot(slot);
1699 if (unlikely(!page))
1700 continue;
1701 if (radix_tree_exception(page)) {
1702 if (radix_tree_deref_retry(page)) {
1703 slot = radix_tree_iter_retry(&iter);
1704 continue;
1705 }
1706
1707
1708
1709
1710
1711
1712 goto export;
1713 }
1714
1715 head = compound_head(page);
1716 if (!page_cache_get_speculative(head))
1717 goto repeat;
1718
1719
1720 if (compound_head(page) != head) {
1721 put_page(head);
1722 goto repeat;
1723 }
1724
1725
1726 if (unlikely(page != *slot)) {
1727 put_page(head);
1728 goto repeat;
1729 }
1730export:
1731 indices[ret] = iter.index;
1732 entries[ret] = page;
1733 if (++ret == nr_entries)
1734 break;
1735 }
1736 rcu_read_unlock();
1737 return ret;
1738}
1739EXPORT_SYMBOL(find_get_entries_tag);
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756static void shrink_readahead_size_eio(struct file *filp,
1757 struct file_ra_state *ra)
1758{
1759 ra->ra_pages /= 4;
1760}
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1776 struct iov_iter *iter, ssize_t written)
1777{
1778 struct address_space *mapping = filp->f_mapping;
1779 struct inode *inode = mapping->host;
1780 struct file_ra_state *ra = &filp->f_ra;
1781 pgoff_t index;
1782 pgoff_t last_index;
1783 pgoff_t prev_index;
1784 unsigned long offset;
1785 unsigned int prev_offset;
1786 int error = 0;
1787
1788 if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
1789 return 0;
1790 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
1791
1792 index = *ppos >> PAGE_SHIFT;
1793 prev_index = ra->prev_pos >> PAGE_SHIFT;
1794 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1795 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1796 offset = *ppos & ~PAGE_MASK;
1797
1798 for (;;) {
1799 struct page *page;
1800 pgoff_t end_index;
1801 loff_t isize;
1802 unsigned long nr, ret;
1803
1804 cond_resched();
1805find_page:
1806 if (fatal_signal_pending(current)) {
1807 error = -EINTR;
1808 goto out;
1809 }
1810
1811 page = find_get_page(mapping, index);
1812 if (!page) {
1813 page_cache_sync_readahead(mapping,
1814 ra, filp,
1815 index, last_index - index);
1816 page = find_get_page(mapping, index);
1817 if (unlikely(page == NULL))
1818 goto no_cached_page;
1819 }
1820 if (PageReadahead(page)) {
1821 page_cache_async_readahead(mapping,
1822 ra, filp, page,
1823 index, last_index - index);
1824 }
1825 if (!PageUptodate(page)) {
1826
1827
1828
1829
1830
1831 error = wait_on_page_locked_killable(page);
1832 if (unlikely(error))
1833 goto readpage_error;
1834 if (PageUptodate(page))
1835 goto page_ok;
1836
1837 if (inode->i_blkbits == PAGE_SHIFT ||
1838 !mapping->a_ops->is_partially_uptodate)
1839 goto page_not_up_to_date;
1840
1841 if (unlikely(iter->type & ITER_PIPE))
1842 goto page_not_up_to_date;
1843 if (!trylock_page(page))
1844 goto page_not_up_to_date;
1845
1846 if (!page->mapping)
1847 goto page_not_up_to_date_locked;
1848 if (!mapping->a_ops->is_partially_uptodate(page,
1849 offset, iter->count))
1850 goto page_not_up_to_date_locked;
1851 unlock_page(page);
1852 }
1853page_ok:
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863 isize = i_size_read(inode);
1864 end_index = (isize - 1) >> PAGE_SHIFT;
1865 if (unlikely(!isize || index > end_index)) {
1866 put_page(page);
1867 goto out;
1868 }
1869
1870
1871 nr = PAGE_SIZE;
1872 if (index == end_index) {
1873 nr = ((isize - 1) & ~PAGE_MASK) + 1;
1874 if (nr <= offset) {
1875 put_page(page);
1876 goto out;
1877 }
1878 }
1879 nr = nr - offset;
1880
1881
1882
1883
1884
1885 if (mapping_writably_mapped(mapping))
1886 flush_dcache_page(page);
1887
1888
1889
1890
1891
1892 if (prev_index != index || offset != prev_offset)
1893 mark_page_accessed(page);
1894 prev_index = index;
1895
1896
1897
1898
1899
1900
1901 ret = copy_page_to_iter(page, offset, nr, iter);
1902 offset += ret;
1903 index += offset >> PAGE_SHIFT;
1904 offset &= ~PAGE_MASK;
1905 prev_offset = offset;
1906
1907 put_page(page);
1908 written += ret;
1909 if (!iov_iter_count(iter))
1910 goto out;
1911 if (ret < nr) {
1912 error = -EFAULT;
1913 goto out;
1914 }
1915 continue;
1916
1917page_not_up_to_date:
1918
1919 error = lock_page_killable(page);
1920 if (unlikely(error))
1921 goto readpage_error;
1922
1923page_not_up_to_date_locked:
1924
1925 if (!page->mapping) {
1926 unlock_page(page);
1927 put_page(page);
1928 continue;
1929 }
1930
1931
1932 if (PageUptodate(page)) {
1933 unlock_page(page);
1934 goto page_ok;
1935 }
1936
1937readpage:
1938
1939
1940
1941
1942
1943 ClearPageError(page);
1944
1945 error = mapping->a_ops->readpage(filp, page);
1946
1947 if (unlikely(error)) {
1948 if (error == AOP_TRUNCATED_PAGE) {
1949 put_page(page);
1950 error = 0;
1951 goto find_page;
1952 }
1953 goto readpage_error;
1954 }
1955
1956 if (!PageUptodate(page)) {
1957 error = lock_page_killable(page);
1958 if (unlikely(error))
1959 goto readpage_error;
1960 if (!PageUptodate(page)) {
1961 if (page->mapping == NULL) {
1962
1963
1964
1965 unlock_page(page);
1966 put_page(page);
1967 goto find_page;
1968 }
1969 unlock_page(page);
1970 shrink_readahead_size_eio(filp, ra);
1971 error = -EIO;
1972 goto readpage_error;
1973 }
1974 unlock_page(page);
1975 }
1976
1977 goto page_ok;
1978
1979readpage_error:
1980
1981 put_page(page);
1982 goto out;
1983
1984no_cached_page:
1985
1986
1987
1988
1989 page = page_cache_alloc_cold(mapping);
1990 if (!page) {
1991 error = -ENOMEM;
1992 goto out;
1993 }
1994 error = add_to_page_cache_lru(page, mapping, index,
1995 mapping_gfp_constraint(mapping, GFP_KERNEL));
1996 if (error) {
1997 put_page(page);
1998 if (error == -EEXIST) {
1999 error = 0;
2000 goto find_page;
2001 }
2002 goto out;
2003 }
2004 goto readpage;
2005 }
2006
2007out:
2008 ra->prev_pos = prev_index;
2009 ra->prev_pos <<= PAGE_SHIFT;
2010 ra->prev_pos |= prev_offset;
2011
2012 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
2013 file_accessed(filp);
2014 return written ? written : error;
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025ssize_t
2026generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2027{
2028 struct file *file = iocb->ki_filp;
2029 ssize_t retval = 0;
2030 size_t count = iov_iter_count(iter);
2031
2032 if (!count)
2033 goto out;
2034
2035 if (iocb->ki_flags & IOCB_DIRECT) {
2036 struct address_space *mapping = file->f_mapping;
2037 struct inode *inode = mapping->host;
2038 loff_t size;
2039
2040 size = i_size_read(inode);
2041 retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
2042 iocb->ki_pos + count - 1);
2043 if (retval < 0)
2044 goto out;
2045
2046 file_accessed(file);
2047
2048 retval = mapping->a_ops->direct_IO(iocb, iter);
2049 if (retval >= 0) {
2050 iocb->ki_pos += retval;
2051 count -= retval;
2052 }
2053 iov_iter_revert(iter, count - iov_iter_count(iter));
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 if (retval < 0 || !count || iocb->ki_pos >= size ||
2065 IS_DAX(inode))
2066 goto out;
2067 }
2068
2069 retval = do_generic_file_read(file, &iocb->ki_pos, iter, retval);
2070out:
2071 return retval;
2072}
2073EXPORT_SYMBOL(generic_file_read_iter);
2074
2075#ifdef CONFIG_MMU
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
2086{
2087 struct address_space *mapping = file->f_mapping;
2088 struct page *page;
2089 int ret;
2090
2091 do {
2092 page = __page_cache_alloc(gfp_mask|__GFP_COLD);
2093 if (!page)
2094 return -ENOMEM;
2095
2096 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
2097 if (ret == 0)
2098 ret = mapping->a_ops->readpage(file, page);
2099 else if (ret == -EEXIST)
2100 ret = 0;
2101
2102 put_page(page);
2103
2104 } while (ret == AOP_TRUNCATED_PAGE);
2105
2106 return ret;
2107}
2108
2109#define MMAP_LOTSAMISS (100)
2110
2111
2112
2113
2114
2115static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2116 struct file_ra_state *ra,
2117 struct file *file,
2118 pgoff_t offset)
2119{
2120 struct address_space *mapping = file->f_mapping;
2121
2122
2123 if (vma->vm_flags & VM_RAND_READ)
2124 return;
2125 if (!ra->ra_pages)
2126 return;
2127
2128 if (vma->vm_flags & VM_SEQ_READ) {
2129 page_cache_sync_readahead(mapping, ra, file, offset,
2130 ra->ra_pages);
2131 return;
2132 }
2133
2134
2135 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
2136 ra->mmap_miss++;
2137
2138
2139
2140
2141
2142 if (ra->mmap_miss > MMAP_LOTSAMISS)
2143 return;
2144
2145
2146
2147
2148 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
2149 ra->size = ra->ra_pages;
2150 ra->async_size = ra->ra_pages / 4;
2151 ra_submit(ra, mapping, file);
2152}
2153
2154
2155
2156
2157
2158static void do_async_mmap_readahead(struct vm_area_struct *vma,
2159 struct file_ra_state *ra,
2160 struct file *file,
2161 struct page *page,
2162 pgoff_t offset)
2163{
2164 struct address_space *mapping = file->f_mapping;
2165
2166
2167 if (vma->vm_flags & VM_RAND_READ)
2168 return;
2169 if (ra->mmap_miss > 0)
2170 ra->mmap_miss--;
2171 if (PageReadahead(page))
2172 page_cache_async_readahead(mapping, ra, file,
2173 page, offset, ra->ra_pages);
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199int filemap_fault(struct vm_fault *vmf)
2200{
2201 int error;
2202 struct file *file = vmf->vma->vm_file;
2203 struct address_space *mapping = file->f_mapping;
2204 struct file_ra_state *ra = &file->f_ra;
2205 struct inode *inode = mapping->host;
2206 pgoff_t offset = vmf->pgoff;
2207 pgoff_t max_off;
2208 struct page *page;
2209 int ret = 0;
2210
2211 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2212 if (unlikely(offset >= max_off))
2213 return VM_FAULT_SIGBUS;
2214
2215
2216
2217
2218 page = find_get_page(mapping, offset);
2219 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2220
2221
2222
2223
2224 do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
2225 } else if (!page) {
2226
2227 do_sync_mmap_readahead(vmf->vma, ra, file, offset);
2228 count_vm_event(PGMAJFAULT);
2229 mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
2230 ret = VM_FAULT_MAJOR;
2231retry_find:
2232 page = find_get_page(mapping, offset);
2233 if (!page)
2234 goto no_cached_page;
2235 }
2236
2237 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
2238 put_page(page);
2239 return ret | VM_FAULT_RETRY;
2240 }
2241
2242
2243 if (unlikely(page->mapping != mapping)) {
2244 unlock_page(page);
2245 put_page(page);
2246 goto retry_find;
2247 }
2248 VM_BUG_ON_PAGE(page->index != offset, page);
2249
2250
2251
2252
2253
2254 if (unlikely(!PageUptodate(page)))
2255 goto page_not_uptodate;
2256
2257
2258
2259
2260
2261 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2262 if (unlikely(offset >= max_off)) {
2263 unlock_page(page);
2264 put_page(page);
2265 return VM_FAULT_SIGBUS;
2266 }
2267
2268 vmf->page = page;
2269 return ret | VM_FAULT_LOCKED;
2270
2271no_cached_page:
2272
2273
2274
2275
2276 error = page_cache_read(file, offset, vmf->gfp_mask);
2277
2278
2279
2280
2281
2282
2283 if (error >= 0)
2284 goto retry_find;
2285
2286
2287
2288
2289
2290
2291 if (error == -ENOMEM)
2292 return VM_FAULT_OOM;
2293 return VM_FAULT_SIGBUS;
2294
2295page_not_uptodate:
2296
2297
2298
2299
2300
2301
2302 ClearPageError(page);
2303 error = mapping->a_ops->readpage(file, page);
2304 if (!error) {
2305 wait_on_page_locked(page);
2306 if (!PageUptodate(page))
2307 error = -EIO;
2308 }
2309 put_page(page);
2310
2311 if (!error || error == AOP_TRUNCATED_PAGE)
2312 goto retry_find;
2313
2314
2315 shrink_readahead_size_eio(file, ra);
2316 return VM_FAULT_SIGBUS;
2317}
2318EXPORT_SYMBOL(filemap_fault);
2319
2320void filemap_map_pages(struct vm_fault *vmf,
2321 pgoff_t start_pgoff, pgoff_t end_pgoff)
2322{
2323 struct radix_tree_iter iter;
2324 void **slot;
2325 struct file *file = vmf->vma->vm_file;
2326 struct address_space *mapping = file->f_mapping;
2327 pgoff_t last_pgoff = start_pgoff;
2328 unsigned long max_idx;
2329 struct page *head, *page;
2330
2331 rcu_read_lock();
2332 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
2333 start_pgoff) {
2334 if (iter.index > end_pgoff)
2335 break;
2336repeat:
2337 page = radix_tree_deref_slot(slot);
2338 if (unlikely(!page))
2339 goto next;
2340 if (radix_tree_exception(page)) {
2341 if (radix_tree_deref_retry(page)) {
2342 slot = radix_tree_iter_retry(&iter);
2343 continue;
2344 }
2345 goto next;
2346 }
2347
2348 head = compound_head(page);
2349 if (!page_cache_get_speculative(head))
2350 goto repeat;
2351
2352
2353 if (compound_head(page) != head) {
2354 put_page(head);
2355 goto repeat;
2356 }
2357
2358
2359 if (unlikely(page != *slot)) {
2360 put_page(head);
2361 goto repeat;
2362 }
2363
2364 if (!PageUptodate(page) ||
2365 PageReadahead(page) ||
2366 PageHWPoison(page))
2367 goto skip;
2368 if (!trylock_page(page))
2369 goto skip;
2370
2371 if (page->mapping != mapping || !PageUptodate(page))
2372 goto unlock;
2373
2374 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2375 if (page->index >= max_idx)
2376 goto unlock;
2377
2378 if (file->f_ra.mmap_miss > 0)
2379 file->f_ra.mmap_miss--;
2380
2381 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2382 if (vmf->pte)
2383 vmf->pte += iter.index - last_pgoff;
2384 last_pgoff = iter.index;
2385 if (alloc_set_pte(vmf, NULL, page))
2386 goto unlock;
2387 unlock_page(page);
2388 goto next;
2389unlock:
2390 unlock_page(page);
2391skip:
2392 put_page(page);
2393next:
2394
2395 if (pmd_trans_huge(*vmf->pmd))
2396 break;
2397 if (iter.index == end_pgoff)
2398 break;
2399 }
2400 rcu_read_unlock();
2401}
2402EXPORT_SYMBOL(filemap_map_pages);
2403
2404int filemap_page_mkwrite(struct vm_fault *vmf)
2405{
2406 struct page *page = vmf->page;
2407 struct inode *inode = file_inode(vmf->vma->vm_file);
2408 int ret = VM_FAULT_LOCKED;
2409
2410 sb_start_pagefault(inode->i_sb);
2411 file_update_time(vmf->vma->vm_file);
2412 lock_page(page);
2413 if (page->mapping != inode->i_mapping) {
2414 unlock_page(page);
2415 ret = VM_FAULT_NOPAGE;
2416 goto out;
2417 }
2418
2419
2420
2421
2422
2423 set_page_dirty(page);
2424 wait_for_stable_page(page);
2425out:
2426 sb_end_pagefault(inode->i_sb);
2427 return ret;
2428}
2429EXPORT_SYMBOL(filemap_page_mkwrite);
2430
2431const struct vm_operations_struct generic_file_vm_ops = {
2432 .fault = filemap_fault,
2433 .map_pages = filemap_map_pages,
2434 .page_mkwrite = filemap_page_mkwrite,
2435};
2436
2437
2438
2439int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2440{
2441 struct address_space *mapping = file->f_mapping;
2442
2443 if (!mapping->a_ops->readpage)
2444 return -ENOEXEC;
2445 file_accessed(file);
2446 vma->vm_ops = &generic_file_vm_ops;
2447 return 0;
2448}
2449
2450
2451
2452
2453int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2454{
2455 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2456 return -EINVAL;
2457 return generic_file_mmap(file, vma);
2458}
2459#else
2460int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2461{
2462 return -ENOSYS;
2463}
2464int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2465{
2466 return -ENOSYS;
2467}
2468#endif
2469
2470EXPORT_SYMBOL(generic_file_mmap);
2471EXPORT_SYMBOL(generic_file_readonly_mmap);
2472
2473static struct page *wait_on_page_read(struct page *page)
2474{
2475 if (!IS_ERR(page)) {
2476 wait_on_page_locked(page);
2477 if (!PageUptodate(page)) {
2478 put_page(page);
2479 page = ERR_PTR(-EIO);
2480 }
2481 }
2482 return page;
2483}
2484
2485static struct page *do_read_cache_page(struct address_space *mapping,
2486 pgoff_t index,
2487 int (*filler)(void *, struct page *),
2488 void *data,
2489 gfp_t gfp)
2490{
2491 struct page *page;
2492 int err;
2493repeat:
2494 page = find_get_page(mapping, index);
2495 if (!page) {
2496 page = __page_cache_alloc(gfp | __GFP_COLD);
2497 if (!page)
2498 return ERR_PTR(-ENOMEM);
2499 err = add_to_page_cache_lru(page, mapping, index, gfp);
2500 if (unlikely(err)) {
2501 put_page(page);
2502 if (err == -EEXIST)
2503 goto repeat;
2504
2505 return ERR_PTR(err);
2506 }
2507
2508filler:
2509 err = filler(data, page);
2510 if (err < 0) {
2511 put_page(page);
2512 return ERR_PTR(err);
2513 }
2514
2515 page = wait_on_page_read(page);
2516 if (IS_ERR(page))
2517 return page;
2518 goto out;
2519 }
2520 if (PageUptodate(page))
2521 goto out;
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554 wait_on_page_locked(page);
2555 if (PageUptodate(page))
2556 goto out;
2557
2558
2559 lock_page(page);
2560
2561
2562 if (!page->mapping) {
2563 unlock_page(page);
2564 put_page(page);
2565 goto repeat;
2566 }
2567
2568
2569 if (PageUptodate(page)) {
2570 unlock_page(page);
2571 goto out;
2572 }
2573 goto filler;
2574
2575out:
2576 mark_page_accessed(page);
2577 return page;
2578}
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592struct page *read_cache_page(struct address_space *mapping,
2593 pgoff_t index,
2594 int (*filler)(void *, struct page *),
2595 void *data)
2596{
2597 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2598}
2599EXPORT_SYMBOL(read_cache_page);
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612struct page *read_cache_page_gfp(struct address_space *mapping,
2613 pgoff_t index,
2614 gfp_t gfp)
2615{
2616 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2617
2618 return do_read_cache_page(mapping, index, filler, NULL, gfp);
2619}
2620EXPORT_SYMBOL(read_cache_page_gfp);
2621
2622
2623
2624
2625
2626
2627
2628
2629inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
2630{
2631 struct file *file = iocb->ki_filp;
2632 struct inode *inode = file->f_mapping->host;
2633 unsigned long limit = rlimit(RLIMIT_FSIZE);
2634 loff_t pos;
2635
2636 if (!iov_iter_count(from))
2637 return 0;
2638
2639
2640 if (iocb->ki_flags & IOCB_APPEND)
2641 iocb->ki_pos = i_size_read(inode);
2642
2643 pos = iocb->ki_pos;
2644
2645 if (limit != RLIM_INFINITY) {
2646 if (iocb->ki_pos >= limit) {
2647 send_sig(SIGXFSZ, current, 0);
2648 return -EFBIG;
2649 }
2650 iov_iter_truncate(from, limit - (unsigned long)pos);
2651 }
2652
2653
2654
2655
2656 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
2657 !(file->f_flags & O_LARGEFILE))) {
2658 if (pos >= MAX_NON_LFS)
2659 return -EFBIG;
2660 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
2661 }
2662
2663
2664
2665
2666
2667
2668
2669
2670 if (unlikely(pos >= inode->i_sb->s_maxbytes))
2671 return -EFBIG;
2672
2673 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2674 return iov_iter_count(from);
2675}
2676EXPORT_SYMBOL(generic_write_checks);
2677
2678int pagecache_write_begin(struct file *file, struct address_space *mapping,
2679 loff_t pos, unsigned len, unsigned flags,
2680 struct page **pagep, void **fsdata)
2681{
2682 const struct address_space_operations *aops = mapping->a_ops;
2683
2684 return aops->write_begin(file, mapping, pos, len, flags,
2685 pagep, fsdata);
2686}
2687EXPORT_SYMBOL(pagecache_write_begin);
2688
2689int pagecache_write_end(struct file *file, struct address_space *mapping,
2690 loff_t pos, unsigned len, unsigned copied,
2691 struct page *page, void *fsdata)
2692{
2693 const struct address_space_operations *aops = mapping->a_ops;
2694
2695 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
2696}
2697EXPORT_SYMBOL(pagecache_write_end);
2698
2699ssize_t
2700generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
2701{
2702 struct file *file = iocb->ki_filp;
2703 struct address_space *mapping = file->f_mapping;
2704 struct inode *inode = mapping->host;
2705 loff_t pos = iocb->ki_pos;
2706 ssize_t written;
2707 size_t write_len;
2708 pgoff_t end;
2709
2710 write_len = iov_iter_count(from);
2711 end = (pos + write_len - 1) >> PAGE_SHIFT;
2712
2713 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
2714 if (written)
2715 goto out;
2716
2717
2718
2719
2720
2721
2722
2723 written = invalidate_inode_pages2_range(mapping,
2724 pos >> PAGE_SHIFT, end);
2725
2726
2727
2728
2729 if (written) {
2730 if (written == -EBUSY)
2731 return 0;
2732 goto out;
2733 }
2734
2735 written = mapping->a_ops->direct_IO(iocb, from);
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745 invalidate_inode_pages2_range(mapping,
2746 pos >> PAGE_SHIFT, end);
2747
2748 if (written > 0) {
2749 pos += written;
2750 write_len -= written;
2751 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2752 i_size_write(inode, pos);
2753 mark_inode_dirty(inode);
2754 }
2755 iocb->ki_pos = pos;
2756 }
2757 iov_iter_revert(from, write_len - iov_iter_count(from));
2758out:
2759 return written;
2760}
2761EXPORT_SYMBOL(generic_file_direct_write);
2762
2763
2764
2765
2766
2767struct page *grab_cache_page_write_begin(struct address_space *mapping,
2768 pgoff_t index, unsigned flags)
2769{
2770 struct page *page;
2771 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
2772
2773 if (flags & AOP_FLAG_NOFS)
2774 fgp_flags |= FGP_NOFS;
2775
2776 page = pagecache_get_page(mapping, index, fgp_flags,
2777 mapping_gfp_mask(mapping));
2778 if (page)
2779 wait_for_stable_page(page);
2780
2781 return page;
2782}
2783EXPORT_SYMBOL(grab_cache_page_write_begin);
2784
2785ssize_t generic_perform_write(struct file *file,
2786 struct iov_iter *i, loff_t pos)
2787{
2788 struct address_space *mapping = file->f_mapping;
2789 const struct address_space_operations *a_ops = mapping->a_ops;
2790 long status = 0;
2791 ssize_t written = 0;
2792 unsigned int flags = 0;
2793
2794 do {
2795 struct page *page;
2796 unsigned long offset;
2797 unsigned long bytes;
2798 size_t copied;
2799 void *fsdata;
2800
2801 offset = (pos & (PAGE_SIZE - 1));
2802 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2803 iov_iter_count(i));
2804
2805again:
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
2817 status = -EFAULT;
2818 break;
2819 }
2820
2821 if (fatal_signal_pending(current)) {
2822 status = -EINTR;
2823 break;
2824 }
2825
2826 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
2827 &page, &fsdata);
2828 if (unlikely(status < 0))
2829 break;
2830
2831 if (mapping_writably_mapped(mapping))
2832 flush_dcache_page(page);
2833
2834 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2835 flush_dcache_page(page);
2836
2837 status = a_ops->write_end(file, mapping, pos, bytes, copied,
2838 page, fsdata);
2839 if (unlikely(status < 0))
2840 break;
2841 copied = status;
2842
2843 cond_resched();
2844
2845 iov_iter_advance(i, copied);
2846 if (unlikely(copied == 0)) {
2847
2848
2849
2850
2851
2852
2853
2854
2855 bytes = min_t(unsigned long, PAGE_SIZE - offset,
2856 iov_iter_single_seg_count(i));
2857 goto again;
2858 }
2859 pos += copied;
2860 written += copied;
2861
2862 balance_dirty_pages_ratelimited(mapping);
2863 } while (iov_iter_count(i));
2864
2865 return written ? written : status;
2866}
2867EXPORT_SYMBOL(generic_perform_write);
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2887{
2888 struct file *file = iocb->ki_filp;
2889 struct address_space * mapping = file->f_mapping;
2890 struct inode *inode = mapping->host;
2891 ssize_t written = 0;
2892 ssize_t err;
2893 ssize_t status;
2894
2895
2896 current->backing_dev_info = inode_to_bdi(inode);
2897 err = file_remove_privs(file);
2898 if (err)
2899 goto out;
2900
2901 err = file_update_time(file);
2902 if (err)
2903 goto out;
2904
2905 if (iocb->ki_flags & IOCB_DIRECT) {
2906 loff_t pos, endbyte;
2907
2908 written = generic_file_direct_write(iocb, from);
2909
2910
2911
2912
2913
2914
2915
2916 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
2917 goto out;
2918
2919 status = generic_perform_write(file, from, pos = iocb->ki_pos);
2920
2921
2922
2923
2924
2925
2926
2927 if (unlikely(status < 0)) {
2928 err = status;
2929 goto out;
2930 }
2931
2932
2933
2934
2935
2936 endbyte = pos + status - 1;
2937 err = filemap_write_and_wait_range(mapping, pos, endbyte);
2938 if (err == 0) {
2939 iocb->ki_pos = endbyte + 1;
2940 written += status;
2941 invalidate_mapping_pages(mapping,
2942 pos >> PAGE_SHIFT,
2943 endbyte >> PAGE_SHIFT);
2944 } else {
2945
2946
2947
2948
2949 }
2950 } else {
2951 written = generic_perform_write(file, from, iocb->ki_pos);
2952 if (likely(written > 0))
2953 iocb->ki_pos += written;
2954 }
2955out:
2956 current->backing_dev_info = NULL;
2957 return written ? written : err;
2958}
2959EXPORT_SYMBOL(__generic_file_write_iter);
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2971{
2972 struct file *file = iocb->ki_filp;
2973 struct inode *inode = file->f_mapping->host;
2974 ssize_t ret;
2975
2976 inode_lock(inode);
2977 ret = generic_write_checks(iocb, from);
2978 if (ret > 0)
2979 ret = __generic_file_write_iter(iocb, from);
2980 inode_unlock(inode);
2981
2982 if (ret > 0)
2983 ret = generic_write_sync(iocb, ret);
2984 return ret;
2985}
2986EXPORT_SYMBOL(generic_file_write_iter);
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005int try_to_release_page(struct page *page, gfp_t gfp_mask)
3006{
3007 struct address_space * const mapping = page->mapping;
3008
3009 BUG_ON(!PageLocked(page));
3010 if (PageWriteback(page))
3011 return 0;
3012
3013 if (mapping && mapping->a_ops->releasepage)
3014 return mapping->a_ops->releasepage(page, gfp_mask);
3015 return try_to_free_buffers(page);
3016}
3017
3018EXPORT_SYMBOL(try_to_release_page);
3019