1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21static inline bool mapping_empty(struct address_space *mapping)
22{
23 return xa_empty(&mapping->i_pages);
24}
25
26
27
28
29enum mapping_flags {
30 AS_EIO = 0,
31 AS_ENOSPC = 1,
32 AS_MM_ALL_LOCKS = 2,
33 AS_UNEVICTABLE = 3,
34 AS_EXITING = 4,
35
36 AS_NO_WRITEBACK_TAGS = 5,
37 AS_THP_SUPPORT = 6,
38};
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static inline void mapping_set_error(struct address_space *mapping, int error)
55{
56 if (likely(!error))
57 return;
58
59
60 __filemap_set_wb_err(mapping, error);
61
62
63 if (mapping->host)
64 errseq_set(&mapping->host->i_sb->s_wb_err, error);
65
66
67 if (error == -ENOSPC)
68 set_bit(AS_ENOSPC, &mapping->flags);
69 else
70 set_bit(AS_EIO, &mapping->flags);
71}
72
73static inline void mapping_set_unevictable(struct address_space *mapping)
74{
75 set_bit(AS_UNEVICTABLE, &mapping->flags);
76}
77
78static inline void mapping_clear_unevictable(struct address_space *mapping)
79{
80 clear_bit(AS_UNEVICTABLE, &mapping->flags);
81}
82
83static inline bool mapping_unevictable(struct address_space *mapping)
84{
85 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
86}
87
88static inline void mapping_set_exiting(struct address_space *mapping)
89{
90 set_bit(AS_EXITING, &mapping->flags);
91}
92
93static inline int mapping_exiting(struct address_space *mapping)
94{
95 return test_bit(AS_EXITING, &mapping->flags);
96}
97
98static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
99{
100 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
101}
102
103static inline int mapping_use_writeback_tags(struct address_space *mapping)
104{
105 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
106}
107
108static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
109{
110 return mapping->gfp_mask;
111}
112
113
114static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
115 gfp_t gfp_mask)
116{
117 return mapping_gfp_mask(mapping) & gfp_mask;
118}
119
120
121
122
123
124static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
125{
126 m->gfp_mask = mask;
127}
128
129static inline bool mapping_thp_support(struct address_space *mapping)
130{
131 return test_bit(AS_THP_SUPPORT, &mapping->flags);
132}
133
134static inline int filemap_nr_thps(struct address_space *mapping)
135{
136#ifdef CONFIG_READ_ONLY_THP_FOR_FS
137 return atomic_read(&mapping->nr_thps);
138#else
139 return 0;
140#endif
141}
142
143static inline void filemap_nr_thps_inc(struct address_space *mapping)
144{
145#ifdef CONFIG_READ_ONLY_THP_FOR_FS
146 if (!mapping_thp_support(mapping))
147 atomic_inc(&mapping->nr_thps);
148#else
149 WARN_ON_ONCE(1);
150#endif
151}
152
153static inline void filemap_nr_thps_dec(struct address_space *mapping)
154{
155#ifdef CONFIG_READ_ONLY_THP_FOR_FS
156 if (!mapping_thp_support(mapping))
157 atomic_dec(&mapping->nr_thps);
158#else
159 WARN_ON_ONCE(1);
160#endif
161}
162
163void release_pages(struct page **pages, int nr);
164
165
166
167
168static inline struct address_space *page_mapping_file(struct page *page)
169{
170 if (unlikely(PageSwapCache(page)))
171 return NULL;
172 return page_mapping(page);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static inline int __page_cache_add_speculative(struct page *page, int count)
220{
221#ifdef CONFIG_TINY_RCU
222# ifdef CONFIG_PREEMPT_COUNT
223 VM_BUG_ON(!in_atomic() && !irqs_disabled());
224# endif
225
226
227
228
229
230
231
232
233
234 VM_BUG_ON_PAGE(page_count(page) == 0, page);
235 page_ref_add(page, count);
236
237#else
238 if (unlikely(!page_ref_add_unless(page, count, 0))) {
239
240
241
242
243
244 return 0;
245 }
246#endif
247 VM_BUG_ON_PAGE(PageTail(page), page);
248
249 return 1;
250}
251
252static inline int page_cache_get_speculative(struct page *page)
253{
254 return __page_cache_add_speculative(page, 1);
255}
256
257static inline int page_cache_add_speculative(struct page *page, int count)
258{
259 return __page_cache_add_speculative(page, count);
260}
261
262
263
264
265
266
267
268
269
270static inline void attach_page_private(struct page *page, void *data)
271{
272 get_page(page);
273 set_page_private(page, (unsigned long)data);
274 SetPagePrivate(page);
275}
276
277
278
279
280
281
282
283
284
285
286static inline void *detach_page_private(struct page *page)
287{
288 void *data = (void *)page_private(page);
289
290 if (!PagePrivate(page))
291 return NULL;
292 ClearPagePrivate(page);
293 set_page_private(page, 0);
294 put_page(page);
295
296 return data;
297}
298
299#ifdef CONFIG_NUMA
300extern struct page *__page_cache_alloc(gfp_t gfp);
301#else
302static inline struct page *__page_cache_alloc(gfp_t gfp)
303{
304 return alloc_pages(gfp, 0);
305}
306#endif
307
308static inline struct page *page_cache_alloc(struct address_space *x)
309{
310 return __page_cache_alloc(mapping_gfp_mask(x));
311}
312
313static inline gfp_t readahead_gfp_mask(struct address_space *x)
314{
315 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
316}
317
318typedef int filler_t(void *, struct page *);
319
320pgoff_t page_cache_next_miss(struct address_space *mapping,
321 pgoff_t index, unsigned long max_scan);
322pgoff_t page_cache_prev_miss(struct address_space *mapping,
323 pgoff_t index, unsigned long max_scan);
324
325#define FGP_ACCESSED 0x00000001
326#define FGP_LOCK 0x00000002
327#define FGP_CREAT 0x00000004
328#define FGP_WRITE 0x00000008
329#define FGP_NOFS 0x00000010
330#define FGP_NOWAIT 0x00000020
331#define FGP_FOR_MMAP 0x00000040
332#define FGP_HEAD 0x00000080
333#define FGP_ENTRY 0x00000100
334
335struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
336 int fgp_flags, gfp_t cache_gfp_mask);
337
338
339
340
341
342
343
344
345
346
347
348static inline struct page *find_get_page(struct address_space *mapping,
349 pgoff_t offset)
350{
351 return pagecache_get_page(mapping, offset, 0, 0);
352}
353
354static inline struct page *find_get_page_flags(struct address_space *mapping,
355 pgoff_t offset, int fgp_flags)
356{
357 return pagecache_get_page(mapping, offset, fgp_flags, 0);
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static inline struct page *find_lock_page(struct address_space *mapping,
374 pgoff_t index)
375{
376 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392static inline struct page *find_lock_head(struct address_space *mapping,
393 pgoff_t index)
394{
395 return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static inline struct page *find_or_create_page(struct address_space *mapping,
418 pgoff_t index, gfp_t gfp_mask)
419{
420 return pagecache_get_page(mapping, index,
421 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
422 gfp_mask);
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
439 pgoff_t index)
440{
441 return pagecache_get_page(mapping, index,
442 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
443 mapping_gfp_mask(mapping));
444}
445
446
447static inline bool thp_contains(struct page *head, pgoff_t index)
448{
449
450 if (PageHuge(head))
451 return head->index == index;
452 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
453}
454
455
456
457
458
459static inline struct page *find_subpage(struct page *head, pgoff_t index)
460{
461
462 if (PageHuge(head))
463 return head;
464
465 return head + (index & (thp_nr_pages(head) - 1));
466}
467
468unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
469 pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
470unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
471 pgoff_t end, unsigned int nr_pages,
472 struct page **pages);
473static inline unsigned find_get_pages(struct address_space *mapping,
474 pgoff_t *start, unsigned int nr_pages,
475 struct page **pages)
476{
477 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
478 pages);
479}
480unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
481 unsigned int nr_pages, struct page **pages);
482unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
483 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
484 struct page **pages);
485static inline unsigned find_get_pages_tag(struct address_space *mapping,
486 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
487 struct page **pages)
488{
489 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
490 nr_pages, pages);
491}
492
493struct page *grab_cache_page_write_begin(struct address_space *mapping,
494 pgoff_t index, unsigned flags);
495
496
497
498
499static inline struct page *grab_cache_page(struct address_space *mapping,
500 pgoff_t index)
501{
502 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
503}
504
505extern struct page * read_cache_page(struct address_space *mapping,
506 pgoff_t index, filler_t *filler, void *data);
507extern struct page * read_cache_page_gfp(struct address_space *mapping,
508 pgoff_t index, gfp_t gfp_mask);
509extern int read_cache_pages(struct address_space *mapping,
510 struct list_head *pages, filler_t *filler, void *data);
511
512static inline struct page *read_mapping_page(struct address_space *mapping,
513 pgoff_t index, void *data)
514{
515 return read_cache_page(mapping, index, NULL, data);
516}
517
518
519
520
521
522static inline pgoff_t page_to_index(struct page *page)
523{
524 struct page *head;
525
526 if (likely(!PageTransTail(page)))
527 return page->index;
528
529 head = compound_head(page);
530
531
532
533
534 return head->index + page - head;
535}
536
537extern pgoff_t hugetlb_basepage_index(struct page *page);
538
539
540
541
542
543static inline pgoff_t page_to_pgoff(struct page *page)
544{
545 if (unlikely(PageHuge(page)))
546 return hugetlb_basepage_index(page);
547 return page_to_index(page);
548}
549
550
551
552
553static inline loff_t page_offset(struct page *page)
554{
555 return ((loff_t)page->index) << PAGE_SHIFT;
556}
557
558static inline loff_t page_file_offset(struct page *page)
559{
560 return ((loff_t)page_index(page)) << PAGE_SHIFT;
561}
562
563extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
564 unsigned long address);
565
566static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
567 unsigned long address)
568{
569 pgoff_t pgoff;
570 if (unlikely(is_vm_hugetlb_page(vma)))
571 return linear_hugepage_index(vma, address);
572 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
573 pgoff += vma->vm_pgoff;
574 return pgoff;
575}
576
577struct wait_page_key {
578 struct page *page;
579 int bit_nr;
580 int page_match;
581};
582
583struct wait_page_queue {
584 struct page *page;
585 int bit_nr;
586 wait_queue_entry_t wait;
587};
588
589static inline bool wake_page_match(struct wait_page_queue *wait_page,
590 struct wait_page_key *key)
591{
592 if (wait_page->page != key->page)
593 return false;
594 key->page_match = 1;
595
596 if (wait_page->bit_nr != key->bit_nr)
597 return false;
598
599 return true;
600}
601
602extern void __lock_page(struct page *page);
603extern int __lock_page_killable(struct page *page);
604extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
605extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
606 unsigned int flags);
607extern void unlock_page(struct page *page);
608
609
610
611
612static inline int trylock_page(struct page *page)
613{
614 page = compound_head(page);
615 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
616}
617
618
619
620
621static inline void lock_page(struct page *page)
622{
623 might_sleep();
624 if (!trylock_page(page))
625 __lock_page(page);
626}
627
628
629
630
631
632
633static inline int lock_page_killable(struct page *page)
634{
635 might_sleep();
636 if (!trylock_page(page))
637 return __lock_page_killable(page);
638 return 0;
639}
640
641
642
643
644
645
646
647
648
649static inline int lock_page_async(struct page *page,
650 struct wait_page_queue *wait)
651{
652 if (!trylock_page(page))
653 return __lock_page_async(page, wait);
654 return 0;
655}
656
657
658
659
660
661
662
663
664static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
665 unsigned int flags)
666{
667 might_sleep();
668 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
669}
670
671
672
673
674
675extern void wait_on_page_bit(struct page *page, int bit_nr);
676extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
677
678
679
680
681
682
683
684
685static inline void wait_on_page_locked(struct page *page)
686{
687 if (PageLocked(page))
688 wait_on_page_bit(compound_head(page), PG_locked);
689}
690
691static inline int wait_on_page_locked_killable(struct page *page)
692{
693 if (!PageLocked(page))
694 return 0;
695 return wait_on_page_bit_killable(compound_head(page), PG_locked);
696}
697
698int put_and_wait_on_page_locked(struct page *page, int state);
699void wait_on_page_writeback(struct page *page);
700int wait_on_page_writeback_killable(struct page *page);
701extern void end_page_writeback(struct page *page);
702void wait_for_stable_page(struct page *page);
703
704void __set_page_dirty(struct page *, struct address_space *, int warn);
705int __set_page_dirty_nobuffers(struct page *page);
706int __set_page_dirty_no_writeback(struct page *page);
707
708void page_endio(struct page *page, bool is_write, int err);
709
710
711
712
713
714
715
716
717
718
719static inline void set_page_private_2(struct page *page)
720{
721 page = compound_head(page);
722 get_page(page);
723 SetPagePrivate2(page);
724}
725
726void end_page_private_2(struct page *page);
727void wait_on_page_private_2(struct page *page);
728int wait_on_page_private_2_killable(struct page *page);
729
730
731
732
733extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
734
735
736
737
738static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
739{
740 char __user *end = uaddr + size - 1;
741
742 if (unlikely(size == 0))
743 return 0;
744
745 if (unlikely(uaddr > end))
746 return -EFAULT;
747
748
749
750
751 do {
752 if (unlikely(__put_user(0, uaddr) != 0))
753 return -EFAULT;
754 uaddr += PAGE_SIZE;
755 } while (uaddr <= end);
756
757
758 if (((unsigned long)uaddr & PAGE_MASK) ==
759 ((unsigned long)end & PAGE_MASK))
760 return __put_user(0, end);
761
762 return 0;
763}
764
765static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
766{
767 volatile char c;
768 const char __user *end = uaddr + size - 1;
769
770 if (unlikely(size == 0))
771 return 0;
772
773 if (unlikely(uaddr > end))
774 return -EFAULT;
775
776 do {
777 if (unlikely(__get_user(c, uaddr) != 0))
778 return -EFAULT;
779 uaddr += PAGE_SIZE;
780 } while (uaddr <= end);
781
782
783 if (((unsigned long)uaddr & PAGE_MASK) ==
784 ((unsigned long)end & PAGE_MASK)) {
785 return __get_user(c, end);
786 }
787
788 (void)c;
789 return 0;
790}
791
792int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
793 pgoff_t index, gfp_t gfp_mask);
794int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
795 pgoff_t index, gfp_t gfp_mask);
796extern void delete_from_page_cache(struct page *page);
797extern void __delete_from_page_cache(struct page *page, void *shadow);
798void replace_page_cache_page(struct page *old, struct page *new);
799void delete_from_page_cache_batch(struct address_space *mapping,
800 struct pagevec *pvec);
801loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
802 int whence);
803
804
805
806
807
808static inline int add_to_page_cache(struct page *page,
809 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
810{
811 int error;
812
813 __SetPageLocked(page);
814 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
815 if (unlikely(error))
816 __ClearPageLocked(page);
817 return error;
818}
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836struct readahead_control {
837 struct file *file;
838 struct address_space *mapping;
839 struct file_ra_state *ra;
840
841 pgoff_t _index;
842 unsigned int _nr_pages;
843 unsigned int _batch_count;
844};
845
846#define DEFINE_READAHEAD(ractl, f, r, m, i) \
847 struct readahead_control ractl = { \
848 .file = f, \
849 .mapping = m, \
850 .ra = r, \
851 ._index = i, \
852 }
853
854#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
855
856void page_cache_ra_unbounded(struct readahead_control *,
857 unsigned long nr_to_read, unsigned long lookahead_count);
858void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
859void page_cache_async_ra(struct readahead_control *, struct page *,
860 unsigned long req_count);
861void readahead_expand(struct readahead_control *ractl,
862 loff_t new_start, size_t new_len);
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877static inline
878void page_cache_sync_readahead(struct address_space *mapping,
879 struct file_ra_state *ra, struct file *file, pgoff_t index,
880 unsigned long req_count)
881{
882 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
883 page_cache_sync_ra(&ractl, req_count);
884}
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900static inline
901void page_cache_async_readahead(struct address_space *mapping,
902 struct file_ra_state *ra, struct file *file,
903 struct page *page, pgoff_t index, unsigned long req_count)
904{
905 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
906 page_cache_async_ra(&ractl, page, req_count);
907}
908
909
910
911
912
913
914
915
916
917
918static inline struct page *readahead_page(struct readahead_control *rac)
919{
920 struct page *page;
921
922 BUG_ON(rac->_batch_count > rac->_nr_pages);
923 rac->_nr_pages -= rac->_batch_count;
924 rac->_index += rac->_batch_count;
925
926 if (!rac->_nr_pages) {
927 rac->_batch_count = 0;
928 return NULL;
929 }
930
931 page = xa_load(&rac->mapping->i_pages, rac->_index);
932 VM_BUG_ON_PAGE(!PageLocked(page), page);
933 rac->_batch_count = thp_nr_pages(page);
934
935 return page;
936}
937
938static inline unsigned int __readahead_batch(struct readahead_control *rac,
939 struct page **array, unsigned int array_sz)
940{
941 unsigned int i = 0;
942 XA_STATE(xas, &rac->mapping->i_pages, 0);
943 struct page *page;
944
945 BUG_ON(rac->_batch_count > rac->_nr_pages);
946 rac->_nr_pages -= rac->_batch_count;
947 rac->_index += rac->_batch_count;
948 rac->_batch_count = 0;
949
950 xas_set(&xas, rac->_index);
951 rcu_read_lock();
952 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
953 if (xas_retry(&xas, page))
954 continue;
955 VM_BUG_ON_PAGE(!PageLocked(page), page);
956 VM_BUG_ON_PAGE(PageTail(page), page);
957 array[i++] = page;
958 rac->_batch_count += thp_nr_pages(page);
959
960
961
962
963
964
965
966 if (PageHead(page))
967 xas_set(&xas, rac->_index + rac->_batch_count);
968
969 if (i == array_sz)
970 break;
971 }
972 rcu_read_unlock();
973
974 return i;
975}
976
977
978
979
980
981
982
983
984
985
986
987
988#define readahead_page_batch(rac, array) \
989 __readahead_batch(rac, array, ARRAY_SIZE(array))
990
991
992
993
994
995static inline loff_t readahead_pos(struct readahead_control *rac)
996{
997 return (loff_t)rac->_index * PAGE_SIZE;
998}
999
1000
1001
1002
1003
1004static inline size_t readahead_length(struct readahead_control *rac)
1005{
1006 return rac->_nr_pages * PAGE_SIZE;
1007}
1008
1009
1010
1011
1012
1013static inline pgoff_t readahead_index(struct readahead_control *rac)
1014{
1015 return rac->_index;
1016}
1017
1018
1019
1020
1021
1022static inline unsigned int readahead_count(struct readahead_control *rac)
1023{
1024 return rac->_nr_pages;
1025}
1026
1027
1028
1029
1030
1031static inline size_t readahead_batch_length(struct readahead_control *rac)
1032{
1033 return rac->_batch_count * PAGE_SIZE;
1034}
1035
1036static inline unsigned long dir_pages(struct inode *inode)
1037{
1038 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1039 PAGE_SHIFT;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050static inline int page_mkwrite_check_truncate(struct page *page,
1051 struct inode *inode)
1052{
1053 loff_t size = i_size_read(inode);
1054 pgoff_t index = size >> PAGE_SHIFT;
1055 int offset = offset_in_page(size);
1056
1057 if (page->mapping != inode->i_mapping)
1058 return -EFAULT;
1059
1060
1061 if (page->index < index)
1062 return PAGE_SIZE;
1063
1064 if (page->index > index || !offset)
1065 return -EFAULT;
1066
1067 return offset;
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static inline
1082unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1083{
1084 return thp_size(page) >> inode->i_blkbits;
1085}
1086#endif
1087