1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27 AS_EXITING = __GFP_BITS_SHIFT + 4,
28};
29
30static inline void mapping_set_error(struct address_space *mapping, int error)
31{
32 if (unlikely(error)) {
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
37 }
38}
39
40static inline void mapping_set_unevictable(struct address_space *mapping)
41{
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
43}
44
45static inline void mapping_clear_unevictable(struct address_space *mapping)
46{
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
48}
49
50static inline int mapping_unevictable(struct address_space *mapping)
51{
52 if (mapping)
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 return !!mapping;
55}
56
57static inline void mapping_set_exiting(struct address_space *mapping)
58{
59 set_bit(AS_EXITING, &mapping->flags);
60}
61
62static inline int mapping_exiting(struct address_space *mapping)
63{
64 return test_bit(AS_EXITING, &mapping->flags);
65}
66
67static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
68{
69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
70}
71
72
73
74
75
76static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
77{
78 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
79 (__force unsigned long)mask;
80}
81
82
83
84
85
86
87
88
89
90#define PAGE_CACHE_SHIFT PAGE_SHIFT
91#define PAGE_CACHE_SIZE PAGE_SIZE
92#define PAGE_CACHE_MASK PAGE_MASK
93#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
94
95#define page_cache_get(page) get_page(page)
96#define page_cache_release(page) put_page(page)
97void release_pages(struct page **pages, int nr, bool cold);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static inline int page_cache_get_speculative(struct page *page)
144{
145 VM_BUG_ON(in_interrupt());
146
147#ifdef CONFIG_TINY_RCU
148# ifdef CONFIG_PREEMPT_COUNT
149 VM_BUG_ON(!in_atomic());
150# endif
151
152
153
154
155
156
157
158
159
160 VM_BUG_ON_PAGE(page_count(page) == 0, page);
161 atomic_inc(&page->_count);
162
163#else
164 if (unlikely(!get_page_unless_zero(page))) {
165
166
167
168
169
170 return 0;
171 }
172#endif
173 VM_BUG_ON_PAGE(PageTail(page), page);
174
175 return 1;
176}
177
178
179
180
181static inline int page_cache_add_speculative(struct page *page, int count)
182{
183 VM_BUG_ON(in_interrupt());
184
185#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
186# ifdef CONFIG_PREEMPT_COUNT
187 VM_BUG_ON(!in_atomic());
188# endif
189 VM_BUG_ON_PAGE(page_count(page) == 0, page);
190 atomic_add(count, &page->_count);
191
192#else
193 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
194 return 0;
195#endif
196 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
197
198 return 1;
199}
200
201static inline int page_freeze_refs(struct page *page, int count)
202{
203 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
204}
205
206static inline void page_unfreeze_refs(struct page *page, int count)
207{
208 VM_BUG_ON_PAGE(page_count(page) != 0, page);
209 VM_BUG_ON(count == 0);
210
211 atomic_set(&page->_count, count);
212}
213
214#ifdef CONFIG_NUMA
215extern struct page *__page_cache_alloc(gfp_t gfp);
216#else
217static inline struct page *__page_cache_alloc(gfp_t gfp)
218{
219 return alloc_pages(gfp, 0);
220}
221#endif
222
223static inline struct page *page_cache_alloc(struct address_space *x)
224{
225 return __page_cache_alloc(mapping_gfp_mask(x));
226}
227
228static inline struct page *page_cache_alloc_cold(struct address_space *x)
229{
230 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
231}
232
233static inline struct page *page_cache_alloc_readahead(struct address_space *x)
234{
235 return __page_cache_alloc(mapping_gfp_mask(x) |
236 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
237}
238
239typedef int filler_t(void *, struct page *);
240
241pgoff_t page_cache_next_hole(struct address_space *mapping,
242 pgoff_t index, unsigned long max_scan);
243pgoff_t page_cache_prev_hole(struct address_space *mapping,
244 pgoff_t index, unsigned long max_scan);
245
246#define FGP_ACCESSED 0x00000001
247#define FGP_LOCK 0x00000002
248#define FGP_CREAT 0x00000004
249#define FGP_WRITE 0x00000008
250#define FGP_NOFS 0x00000010
251#define FGP_NOWAIT 0x00000020
252
253struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
254 int fgp_flags, gfp_t cache_gfp_mask);
255
256
257
258
259
260
261
262
263
264
265
266static inline struct page *find_get_page(struct address_space *mapping,
267 pgoff_t offset)
268{
269 return pagecache_get_page(mapping, offset, 0, 0);
270}
271
272static inline struct page *find_get_page_flags(struct address_space *mapping,
273 pgoff_t offset, int fgp_flags)
274{
275 return pagecache_get_page(mapping, offset, fgp_flags, 0);
276}
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292static inline struct page *find_lock_page(struct address_space *mapping,
293 pgoff_t offset)
294{
295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static inline struct page *find_or_create_page(struct address_space *mapping,
318 pgoff_t offset, gfp_t gfp_mask)
319{
320 return pagecache_get_page(mapping, offset,
321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
322 gfp_mask);
323}
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
339 pgoff_t index)
340{
341 return pagecache_get_page(mapping, index,
342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
343 mapping_gfp_mask(mapping));
344}
345
346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
348unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
349 unsigned int nr_entries, struct page **entries,
350 pgoff_t *indices);
351unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
353unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_pages, struct page **pages);
355unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
356 int tag, unsigned int nr_pages, struct page **pages);
357
358struct page *grab_cache_page_write_begin(struct address_space *mapping,
359 pgoff_t index, unsigned flags);
360
361
362
363
364static inline struct page *grab_cache_page(struct address_space *mapping,
365 pgoff_t index)
366{
367 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
368}
369
370extern struct page * read_cache_page(struct address_space *mapping,
371 pgoff_t index, filler_t *filler, void *data);
372extern struct page * read_cache_page_gfp(struct address_space *mapping,
373 pgoff_t index, gfp_t gfp_mask);
374extern int read_cache_pages(struct address_space *mapping,
375 struct list_head *pages, filler_t *filler, void *data);
376
377static inline struct page *read_mapping_page(struct address_space *mapping,
378 pgoff_t index, void *data)
379{
380 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
381 return read_cache_page(mapping, index, filler, data);
382}
383
384
385
386
387
388static inline pgoff_t page_to_pgoff(struct page *page)
389{
390 if (unlikely(PageHeadHuge(page)))
391 return page->index << compound_order(page);
392 else
393 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
394}
395
396
397
398
399static inline loff_t page_offset(struct page *page)
400{
401 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
402}
403
404static inline loff_t page_file_offset(struct page *page)
405{
406 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
407}
408
409extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
410 unsigned long address);
411
412static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
413 unsigned long address)
414{
415 pgoff_t pgoff;
416 if (unlikely(is_vm_hugetlb_page(vma)))
417 return linear_hugepage_index(vma, address);
418 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
419 pgoff += vma->vm_pgoff;
420 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
421}
422
423extern void __lock_page(struct page *page);
424extern int __lock_page_killable(struct page *page);
425extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
426 unsigned int flags);
427extern void unlock_page(struct page *page);
428
429static inline void __set_page_locked(struct page *page)
430{
431 __set_bit(PG_locked, &page->flags);
432}
433
434static inline void __clear_page_locked(struct page *page)
435{
436 __clear_bit(PG_locked, &page->flags);
437}
438
439static inline int trylock_page(struct page *page)
440{
441 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
442}
443
444
445
446
447static inline void lock_page(struct page *page)
448{
449 might_sleep();
450 if (!trylock_page(page))
451 __lock_page(page);
452}
453
454
455
456
457
458
459static inline int lock_page_killable(struct page *page)
460{
461 might_sleep();
462 if (!trylock_page(page))
463 return __lock_page_killable(page);
464 return 0;
465}
466
467
468
469
470
471
472
473
474static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
475 unsigned int flags)
476{
477 might_sleep();
478 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
479}
480
481
482
483
484
485extern void wait_on_page_bit(struct page *page, int bit_nr);
486
487extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
488extern int wait_on_page_bit_killable_timeout(struct page *page,
489 int bit_nr, unsigned long timeout);
490
491static inline int wait_on_page_locked_killable(struct page *page)
492{
493 if (PageLocked(page))
494 return wait_on_page_bit_killable(page, PG_locked);
495 return 0;
496}
497
498extern wait_queue_head_t *page_waitqueue(struct page *page);
499static inline void wake_up_page(struct page *page, int bit)
500{
501 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
502}
503
504
505
506
507
508
509
510
511static inline void wait_on_page_locked(struct page *page)
512{
513 if (PageLocked(page))
514 wait_on_page_bit(page, PG_locked);
515}
516
517
518
519
520static inline void wait_on_page_writeback(struct page *page)
521{
522 if (PageWriteback(page))
523 wait_on_page_bit(page, PG_writeback);
524}
525
526extern void end_page_writeback(struct page *page);
527void wait_for_stable_page(struct page *page);
528
529void page_endio(struct page *page, int rw, int err);
530
531
532
533
534extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
535
536
537
538
539
540
541
542static inline int fault_in_pages_writeable(char __user *uaddr, int size)
543{
544 int ret;
545
546 if (unlikely(size == 0))
547 return 0;
548
549
550
551
552
553 ret = __put_user(0, uaddr);
554 if (ret == 0) {
555 char __user *end = uaddr + size - 1;
556
557
558
559
560
561 if (((unsigned long)uaddr & PAGE_MASK) !=
562 ((unsigned long)end & PAGE_MASK))
563 ret = __put_user(0, end);
564 }
565 return ret;
566}
567
568static inline int fault_in_pages_readable(const char __user *uaddr, int size)
569{
570 volatile char c;
571 int ret;
572
573 if (unlikely(size == 0))
574 return 0;
575
576 ret = __get_user(c, uaddr);
577 if (ret == 0) {
578 const char __user *end = uaddr + size - 1;
579
580 if (((unsigned long)uaddr & PAGE_MASK) !=
581 ((unsigned long)end & PAGE_MASK)) {
582 ret = __get_user(c, end);
583 (void)c;
584 }
585 }
586 return ret;
587}
588
589
590
591
592
593
594
595static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
596{
597 int ret = 0;
598 char __user *end = uaddr + size - 1;
599
600 if (unlikely(size == 0))
601 return ret;
602
603
604
605
606
607 while (uaddr <= end) {
608 ret = __put_user(0, uaddr);
609 if (ret != 0)
610 return ret;
611 uaddr += PAGE_SIZE;
612 }
613
614
615 if (((unsigned long)uaddr & PAGE_MASK) ==
616 ((unsigned long)end & PAGE_MASK))
617 ret = __put_user(0, end);
618
619 return ret;
620}
621
622static inline int fault_in_multipages_readable(const char __user *uaddr,
623 int size)
624{
625 volatile char c;
626 int ret = 0;
627 const char __user *end = uaddr + size - 1;
628
629 if (unlikely(size == 0))
630 return ret;
631
632 while (uaddr <= end) {
633 ret = __get_user(c, uaddr);
634 if (ret != 0)
635 return ret;
636 uaddr += PAGE_SIZE;
637 }
638
639
640 if (((unsigned long)uaddr & PAGE_MASK) ==
641 ((unsigned long)end & PAGE_MASK)) {
642 ret = __get_user(c, end);
643 (void)c;
644 }
645
646 return ret;
647}
648
649int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
650 pgoff_t index, gfp_t gfp_mask);
651int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
652 pgoff_t index, gfp_t gfp_mask);
653extern void delete_from_page_cache(struct page *page);
654extern void __delete_from_page_cache(struct page *page, void *shadow);
655int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
656
657
658
659
660
661static inline int add_to_page_cache(struct page *page,
662 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
663{
664 int error;
665
666 __set_page_locked(page);
667 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
668 if (unlikely(error))
669 __clear_page_locked(page);
670 return error;
671}
672
673#endif
674