1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21static inline bool mapping_empty(struct address_space *mapping)
22{
23 return xa_empty(&mapping->i_pages);
24}
25
26
27
28
29enum mapping_flags {
30 AS_EIO = 0,
31 AS_ENOSPC = 1,
32 AS_MM_ALL_LOCKS = 2,
33 AS_UNEVICTABLE = 3,
34 AS_EXITING = 4,
35
36 AS_NO_WRITEBACK_TAGS = 5,
37 AS_THP_SUPPORT = 6,
38};
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54static inline void mapping_set_error(struct address_space *mapping, int error)
55{
56 if (likely(!error))
57 return;
58
59
60 __filemap_set_wb_err(mapping, error);
61
62
63 if (mapping->host)
64 errseq_set(&mapping->host->i_sb->s_wb_err, error);
65
66
67 if (error == -ENOSPC)
68 set_bit(AS_ENOSPC, &mapping->flags);
69 else
70 set_bit(AS_EIO, &mapping->flags);
71}
72
73static inline void mapping_set_unevictable(struct address_space *mapping)
74{
75 set_bit(AS_UNEVICTABLE, &mapping->flags);
76}
77
78static inline void mapping_clear_unevictable(struct address_space *mapping)
79{
80 clear_bit(AS_UNEVICTABLE, &mapping->flags);
81}
82
83static inline bool mapping_unevictable(struct address_space *mapping)
84{
85 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
86}
87
88static inline void mapping_set_exiting(struct address_space *mapping)
89{
90 set_bit(AS_EXITING, &mapping->flags);
91}
92
93static inline int mapping_exiting(struct address_space *mapping)
94{
95 return test_bit(AS_EXITING, &mapping->flags);
96}
97
98static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
99{
100 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
101}
102
103static inline int mapping_use_writeback_tags(struct address_space *mapping)
104{
105 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
106}
107
108static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
109{
110 return mapping->gfp_mask;
111}
112
113
114static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
115 gfp_t gfp_mask)
116{
117 return mapping_gfp_mask(mapping) & gfp_mask;
118}
119
120
121
122
123
124static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
125{
126 m->gfp_mask = mask;
127}
128
129static inline bool mapping_thp_support(struct address_space *mapping)
130{
131 return test_bit(AS_THP_SUPPORT, &mapping->flags);
132}
133
134static inline int filemap_nr_thps(struct address_space *mapping)
135{
136#ifdef CONFIG_READ_ONLY_THP_FOR_FS
137 return atomic_read(&mapping->nr_thps);
138#else
139 return 0;
140#endif
141}
142
143static inline void filemap_nr_thps_inc(struct address_space *mapping)
144{
145#ifdef CONFIG_READ_ONLY_THP_FOR_FS
146 if (!mapping_thp_support(mapping))
147 atomic_inc(&mapping->nr_thps);
148#else
149 WARN_ON_ONCE(1);
150#endif
151}
152
153static inline void filemap_nr_thps_dec(struct address_space *mapping)
154{
155#ifdef CONFIG_READ_ONLY_THP_FOR_FS
156 if (!mapping_thp_support(mapping))
157 atomic_dec(&mapping->nr_thps);
158#else
159 WARN_ON_ONCE(1);
160#endif
161}
162
163void release_pages(struct page **pages, int nr);
164
165
166
167
168static inline struct address_space *page_mapping_file(struct page *page)
169{
170 if (unlikely(PageSwapCache(page)))
171 return NULL;
172 return page_mapping(page);
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219static inline int __page_cache_add_speculative(struct page *page, int count)
220{
221#ifdef CONFIG_TINY_RCU
222# ifdef CONFIG_PREEMPT_COUNT
223 VM_BUG_ON(!in_atomic() && !irqs_disabled());
224# endif
225
226
227
228
229
230
231
232
233
234 VM_BUG_ON_PAGE(page_count(page) == 0, page);
235 page_ref_add(page, count);
236
237#else
238 if (unlikely(!page_ref_add_unless(page, count, 0))) {
239
240
241
242
243
244 return 0;
245 }
246#endif
247 VM_BUG_ON_PAGE(PageTail(page), page);
248
249 return 1;
250}
251
252static inline int page_cache_get_speculative(struct page *page)
253{
254 return __page_cache_add_speculative(page, 1);
255}
256
257static inline int page_cache_add_speculative(struct page *page, int count)
258{
259 return __page_cache_add_speculative(page, count);
260}
261
262
263
264
265
266
267
268
269
270static inline void attach_page_private(struct page *page, void *data)
271{
272 get_page(page);
273 set_page_private(page, (unsigned long)data);
274 SetPagePrivate(page);
275}
276
277
278
279
280
281
282
283
284
285
286static inline void *detach_page_private(struct page *page)
287{
288 void *data = (void *)page_private(page);
289
290 if (!PagePrivate(page))
291 return NULL;
292 ClearPagePrivate(page);
293 set_page_private(page, 0);
294 put_page(page);
295
296 return data;
297}
298
299#ifdef CONFIG_NUMA
300extern struct page *__page_cache_alloc(gfp_t gfp);
301#else
302static inline struct page *__page_cache_alloc(gfp_t gfp)
303{
304 return alloc_pages(gfp, 0);
305}
306#endif
307
308static inline struct page *page_cache_alloc(struct address_space *x)
309{
310 return __page_cache_alloc(mapping_gfp_mask(x));
311}
312
313static inline gfp_t readahead_gfp_mask(struct address_space *x)
314{
315 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
316}
317
318typedef int filler_t(void *, struct page *);
319
320pgoff_t page_cache_next_miss(struct address_space *mapping,
321 pgoff_t index, unsigned long max_scan);
322pgoff_t page_cache_prev_miss(struct address_space *mapping,
323 pgoff_t index, unsigned long max_scan);
324
325#define FGP_ACCESSED 0x00000001
326#define FGP_LOCK 0x00000002
327#define FGP_CREAT 0x00000004
328#define FGP_WRITE 0x00000008
329#define FGP_NOFS 0x00000010
330#define FGP_NOWAIT 0x00000020
331#define FGP_FOR_MMAP 0x00000040
332#define FGP_HEAD 0x00000080
333#define FGP_ENTRY 0x00000100
334
335struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
336 int fgp_flags, gfp_t cache_gfp_mask);
337
338
339
340
341
342
343
344
345
346
347
348static inline struct page *find_get_page(struct address_space *mapping,
349 pgoff_t offset)
350{
351 return pagecache_get_page(mapping, offset, 0, 0);
352}
353
354static inline struct page *find_get_page_flags(struct address_space *mapping,
355 pgoff_t offset, int fgp_flags)
356{
357 return pagecache_get_page(mapping, offset, fgp_flags, 0);
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static inline struct page *find_lock_page(struct address_space *mapping,
374 pgoff_t index)
375{
376 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
377}
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392static inline struct page *find_lock_head(struct address_space *mapping,
393 pgoff_t index)
394{
395 return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417static inline struct page *find_or_create_page(struct address_space *mapping,
418 pgoff_t index, gfp_t gfp_mask)
419{
420 return pagecache_get_page(mapping, index,
421 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
422 gfp_mask);
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
439 pgoff_t index)
440{
441 return pagecache_get_page(mapping, index,
442 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
443 mapping_gfp_mask(mapping));
444}
445
446
447static inline bool thp_contains(struct page *head, pgoff_t index)
448{
449
450 if (PageHuge(head))
451 return head->index == index;
452 return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
453}
454
455
456
457
458
459static inline struct page *find_subpage(struct page *head, pgoff_t index)
460{
461
462 if (PageHuge(head))
463 return head;
464
465 return head + (index & (thp_nr_pages(head) - 1));
466}
467
468unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
469 pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
470unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
471 pgoff_t end, unsigned int nr_pages,
472 struct page **pages);
473static inline unsigned find_get_pages(struct address_space *mapping,
474 pgoff_t *start, unsigned int nr_pages,
475 struct page **pages)
476{
477 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
478 pages);
479}
480unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
481 unsigned int nr_pages, struct page **pages);
482unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
483 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
484 struct page **pages);
485static inline unsigned find_get_pages_tag(struct address_space *mapping,
486 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
487 struct page **pages)
488{
489 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
490 nr_pages, pages);
491}
492
493struct page *grab_cache_page_write_begin(struct address_space *mapping,
494 pgoff_t index, unsigned flags);
495
496
497
498
499static inline struct page *grab_cache_page(struct address_space *mapping,
500 pgoff_t index)
501{
502 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
503}
504
505extern struct page * read_cache_page(struct address_space *mapping,
506 pgoff_t index, filler_t *filler, void *data);
507extern struct page * read_cache_page_gfp(struct address_space *mapping,
508 pgoff_t index, gfp_t gfp_mask);
509extern int read_cache_pages(struct address_space *mapping,
510 struct list_head *pages, filler_t *filler, void *data);
511
512static inline struct page *read_mapping_page(struct address_space *mapping,
513 pgoff_t index, void *data)
514{
515 return read_cache_page(mapping, index, NULL, data);
516}
517
518
519
520
521
522static inline pgoff_t page_to_index(struct page *page)
523{
524 pgoff_t pgoff;
525
526 if (likely(!PageTransTail(page)))
527 return page->index;
528
529
530
531
532
533 pgoff = compound_head(page)->index;
534 pgoff += page - compound_head(page);
535 return pgoff;
536}
537
538extern pgoff_t hugetlb_basepage_index(struct page *page);
539
540
541
542
543
544static inline pgoff_t page_to_pgoff(struct page *page)
545{
546 if (unlikely(PageHuge(page)))
547 return hugetlb_basepage_index(page);
548 return page_to_index(page);
549}
550
551
552
553
554static inline loff_t page_offset(struct page *page)
555{
556 return ((loff_t)page->index) << PAGE_SHIFT;
557}
558
559static inline loff_t page_file_offset(struct page *page)
560{
561 return ((loff_t)page_index(page)) << PAGE_SHIFT;
562}
563
564extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
565 unsigned long address);
566
567static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
568 unsigned long address)
569{
570 pgoff_t pgoff;
571 if (unlikely(is_vm_hugetlb_page(vma)))
572 return linear_hugepage_index(vma, address);
573 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
574 pgoff += vma->vm_pgoff;
575 return pgoff;
576}
577
578struct wait_page_key {
579 struct page *page;
580 int bit_nr;
581 int page_match;
582};
583
584struct wait_page_queue {
585 struct page *page;
586 int bit_nr;
587 wait_queue_entry_t wait;
588};
589
590static inline bool wake_page_match(struct wait_page_queue *wait_page,
591 struct wait_page_key *key)
592{
593 if (wait_page->page != key->page)
594 return false;
595 key->page_match = 1;
596
597 if (wait_page->bit_nr != key->bit_nr)
598 return false;
599
600 return true;
601}
602
603extern void __lock_page(struct page *page);
604extern int __lock_page_killable(struct page *page);
605extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
606extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
607 unsigned int flags);
608extern void unlock_page(struct page *page);
609
610
611
612
613static inline int trylock_page(struct page *page)
614{
615 page = compound_head(page);
616 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
617}
618
619
620
621
622static inline void lock_page(struct page *page)
623{
624 might_sleep();
625 if (!trylock_page(page))
626 __lock_page(page);
627}
628
629
630
631
632
633
634static inline int lock_page_killable(struct page *page)
635{
636 might_sleep();
637 if (!trylock_page(page))
638 return __lock_page_killable(page);
639 return 0;
640}
641
642
643
644
645
646
647
648
649
650static inline int lock_page_async(struct page *page,
651 struct wait_page_queue *wait)
652{
653 if (!trylock_page(page))
654 return __lock_page_async(page, wait);
655 return 0;
656}
657
658
659
660
661
662
663
664
665static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
666 unsigned int flags)
667{
668 might_sleep();
669 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
670}
671
672
673
674
675
676extern void wait_on_page_bit(struct page *page, int bit_nr);
677extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
678
679
680
681
682
683
684
685
686static inline void wait_on_page_locked(struct page *page)
687{
688 if (PageLocked(page))
689 wait_on_page_bit(compound_head(page), PG_locked);
690}
691
692static inline int wait_on_page_locked_killable(struct page *page)
693{
694 if (!PageLocked(page))
695 return 0;
696 return wait_on_page_bit_killable(compound_head(page), PG_locked);
697}
698
699int put_and_wait_on_page_locked(struct page *page, int state);
700void wait_on_page_writeback(struct page *page);
701int wait_on_page_writeback_killable(struct page *page);
702extern void end_page_writeback(struct page *page);
703void wait_for_stable_page(struct page *page);
704
705void page_endio(struct page *page, bool is_write, int err);
706
707
708
709
710
711
712
713
714
715
716static inline void set_page_private_2(struct page *page)
717{
718 page = compound_head(page);
719 get_page(page);
720 SetPagePrivate2(page);
721}
722
723void end_page_private_2(struct page *page);
724void wait_on_page_private_2(struct page *page);
725int wait_on_page_private_2_killable(struct page *page);
726
727
728
729
730extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
731
732
733
734
735static inline int fault_in_pages_writeable(char __user *uaddr, int size)
736{
737 char __user *end = uaddr + size - 1;
738
739 if (unlikely(size == 0))
740 return 0;
741
742 if (unlikely(uaddr > end))
743 return -EFAULT;
744
745
746
747
748 do {
749 if (unlikely(__put_user(0, uaddr) != 0))
750 return -EFAULT;
751 uaddr += PAGE_SIZE;
752 } while (uaddr <= end);
753
754
755 if (((unsigned long)uaddr & PAGE_MASK) ==
756 ((unsigned long)end & PAGE_MASK))
757 return __put_user(0, end);
758
759 return 0;
760}
761
762static inline int fault_in_pages_readable(const char __user *uaddr, int size)
763{
764 volatile char c;
765 const char __user *end = uaddr + size - 1;
766
767 if (unlikely(size == 0))
768 return 0;
769
770 if (unlikely(uaddr > end))
771 return -EFAULT;
772
773 do {
774 if (unlikely(__get_user(c, uaddr) != 0))
775 return -EFAULT;
776 uaddr += PAGE_SIZE;
777 } while (uaddr <= end);
778
779
780 if (((unsigned long)uaddr & PAGE_MASK) ==
781 ((unsigned long)end & PAGE_MASK)) {
782 return __get_user(c, end);
783 }
784
785 (void)c;
786 return 0;
787}
788
789int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
790 pgoff_t index, gfp_t gfp_mask);
791int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
792 pgoff_t index, gfp_t gfp_mask);
793extern void delete_from_page_cache(struct page *page);
794extern void __delete_from_page_cache(struct page *page, void *shadow);
795void replace_page_cache_page(struct page *old, struct page *new);
796void delete_from_page_cache_batch(struct address_space *mapping,
797 struct pagevec *pvec);
798loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
799 int whence);
800
801
802
803
804
805static inline int add_to_page_cache(struct page *page,
806 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
807{
808 int error;
809
810 __SetPageLocked(page);
811 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
812 if (unlikely(error))
813 __ClearPageLocked(page);
814 return error;
815}
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833struct readahead_control {
834 struct file *file;
835 struct address_space *mapping;
836 struct file_ra_state *ra;
837
838 pgoff_t _index;
839 unsigned int _nr_pages;
840 unsigned int _batch_count;
841};
842
843#define DEFINE_READAHEAD(ractl, f, r, m, i) \
844 struct readahead_control ractl = { \
845 .file = f, \
846 .mapping = m, \
847 .ra = r, \
848 ._index = i, \
849 }
850
851#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
852
853void page_cache_ra_unbounded(struct readahead_control *,
854 unsigned long nr_to_read, unsigned long lookahead_count);
855void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
856void page_cache_async_ra(struct readahead_control *, struct page *,
857 unsigned long req_count);
858void readahead_expand(struct readahead_control *ractl,
859 loff_t new_start, size_t new_len);
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874static inline
875void page_cache_sync_readahead(struct address_space *mapping,
876 struct file_ra_state *ra, struct file *file, pgoff_t index,
877 unsigned long req_count)
878{
879 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
880 page_cache_sync_ra(&ractl, req_count);
881}
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897static inline
898void page_cache_async_readahead(struct address_space *mapping,
899 struct file_ra_state *ra, struct file *file,
900 struct page *page, pgoff_t index, unsigned long req_count)
901{
902 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
903 page_cache_async_ra(&ractl, page, req_count);
904}
905
906
907
908
909
910
911
912
913
914
915static inline struct page *readahead_page(struct readahead_control *rac)
916{
917 struct page *page;
918
919 BUG_ON(rac->_batch_count > rac->_nr_pages);
920 rac->_nr_pages -= rac->_batch_count;
921 rac->_index += rac->_batch_count;
922
923 if (!rac->_nr_pages) {
924 rac->_batch_count = 0;
925 return NULL;
926 }
927
928 page = xa_load(&rac->mapping->i_pages, rac->_index);
929 VM_BUG_ON_PAGE(!PageLocked(page), page);
930 rac->_batch_count = thp_nr_pages(page);
931
932 return page;
933}
934
935static inline unsigned int __readahead_batch(struct readahead_control *rac,
936 struct page **array, unsigned int array_sz)
937{
938 unsigned int i = 0;
939 XA_STATE(xas, &rac->mapping->i_pages, 0);
940 struct page *page;
941
942 BUG_ON(rac->_batch_count > rac->_nr_pages);
943 rac->_nr_pages -= rac->_batch_count;
944 rac->_index += rac->_batch_count;
945 rac->_batch_count = 0;
946
947 xas_set(&xas, rac->_index);
948 rcu_read_lock();
949 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
950 if (xas_retry(&xas, page))
951 continue;
952 VM_BUG_ON_PAGE(!PageLocked(page), page);
953 VM_BUG_ON_PAGE(PageTail(page), page);
954 array[i++] = page;
955 rac->_batch_count += thp_nr_pages(page);
956
957
958
959
960
961
962
963 if (PageHead(page))
964 xas_set(&xas, rac->_index + rac->_batch_count);
965
966 if (i == array_sz)
967 break;
968 }
969 rcu_read_unlock();
970
971 return i;
972}
973
974
975
976
977
978
979
980
981
982
983
984
985#define readahead_page_batch(rac, array) \
986 __readahead_batch(rac, array, ARRAY_SIZE(array))
987
988
989
990
991
992static inline loff_t readahead_pos(struct readahead_control *rac)
993{
994 return (loff_t)rac->_index * PAGE_SIZE;
995}
996
997
998
999
1000
1001static inline size_t readahead_length(struct readahead_control *rac)
1002{
1003 return rac->_nr_pages * PAGE_SIZE;
1004}
1005
1006
1007
1008
1009
1010static inline pgoff_t readahead_index(struct readahead_control *rac)
1011{
1012 return rac->_index;
1013}
1014
1015
1016
1017
1018
1019static inline unsigned int readahead_count(struct readahead_control *rac)
1020{
1021 return rac->_nr_pages;
1022}
1023
1024
1025
1026
1027
1028static inline size_t readahead_batch_length(struct readahead_control *rac)
1029{
1030 return rac->_batch_count * PAGE_SIZE;
1031}
1032
1033static inline unsigned long dir_pages(struct inode *inode)
1034{
1035 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1036 PAGE_SHIFT;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static inline int page_mkwrite_check_truncate(struct page *page,
1048 struct inode *inode)
1049{
1050 loff_t size = i_size_read(inode);
1051 pgoff_t index = size >> PAGE_SHIFT;
1052 int offset = offset_in_page(size);
1053
1054 if (page->mapping != inode->i_mapping)
1055 return -EFAULT;
1056
1057
1058 if (page->index < index)
1059 return PAGE_SIZE;
1060
1061 if (page->index > index || !offset)
1062 return -EFAULT;
1063
1064 return offset;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078static inline
1079unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1080{
1081 return thp_size(page) >> inode->i_blkbits;
1082}
1083#endif
1084