1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27};
28
29static inline void mapping_set_error(struct address_space *mapping, int error)
30{
31 if (unlikely(error)) {
32 if (error == -ENOSPC)
33 set_bit(AS_ENOSPC, &mapping->flags);
34 else
35 set_bit(AS_EIO, &mapping->flags);
36 }
37}
38
39static inline void mapping_set_unevictable(struct address_space *mapping)
40{
41 set_bit(AS_UNEVICTABLE, &mapping->flags);
42}
43
44static inline void mapping_clear_unevictable(struct address_space *mapping)
45{
46 clear_bit(AS_UNEVICTABLE, &mapping->flags);
47}
48
49static inline int mapping_unevictable(struct address_space *mapping)
50{
51 if (mapping)
52 return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 return !!mapping;
54}
55
56static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57{
58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59}
60
61
62
63
64
65static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66{
67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 (__force unsigned long)mask;
69}
70
71
72
73
74
75
76
77
78
79#define PAGE_CACHE_SHIFT PAGE_SHIFT
80#define PAGE_CACHE_SIZE PAGE_SIZE
81#define PAGE_CACHE_MASK PAGE_MASK
82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83
84#define page_cache_get(page) get_page(page)
85#define page_cache_release(page) put_page(page)
86void release_pages(struct page **pages, int nr, int cold);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static inline int page_cache_get_speculative(struct page *page)
133{
134 VM_BUG_ON(in_interrupt());
135
136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137# ifdef CONFIG_PREEMPT
138 VM_BUG_ON(!in_atomic());
139# endif
140
141
142
143
144
145
146
147
148
149 VM_BUG_ON(page_count(page) == 0);
150 atomic_inc(&page->_count);
151
152#else
153 if (unlikely(!get_page_unless_zero(page))) {
154
155
156
157
158
159 return 0;
160 }
161#endif
162 VM_BUG_ON(PageTail(page));
163
164 return 1;
165}
166
167
168
169
170static inline int page_cache_add_speculative(struct page *page, int count)
171{
172 VM_BUG_ON(in_interrupt());
173
174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175# ifdef CONFIG_PREEMPT
176 VM_BUG_ON(!in_atomic());
177# endif
178 VM_BUG_ON(page_count(page) == 0);
179 atomic_add(count, &page->_count);
180
181#else
182 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 return 0;
184#endif
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186
187 return 1;
188}
189
190static inline int page_freeze_refs(struct page *page, int count)
191{
192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193}
194
195static inline void page_unfreeze_refs(struct page *page, int count)
196{
197 VM_BUG_ON(page_count(page) != 0);
198 VM_BUG_ON(count == 0);
199
200 atomic_set(&page->_count, count);
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222typedef int filler_t(void *, struct page *);
223
224extern struct page * find_get_page(struct address_space *mapping,
225 pgoff_t index);
226extern struct page * find_lock_page(struct address_space *mapping,
227 pgoff_t index);
228extern struct page * find_or_create_page(struct address_space *mapping,
229 pgoff_t index, gfp_t gfp_mask);
230unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
231 unsigned int nr_pages, struct page **pages);
232unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
233 unsigned int nr_pages, struct page **pages);
234unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
235 int tag, unsigned int nr_pages, struct page **pages);
236
237struct page *grab_cache_page_write_begin(struct address_space *mapping,
238 pgoff_t index, unsigned flags);
239
240
241
242
243static inline struct page *grab_cache_page(struct address_space *mapping,
244 pgoff_t index)
245{
246 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
247}
248
249extern struct page * grab_cache_page_nowait(struct address_space *mapping,
250 pgoff_t index);
251extern struct page * read_cache_page_async(struct address_space *mapping,
252 pgoff_t index, filler_t *filler,
253 void *data);
254extern struct page * read_cache_page(struct address_space *mapping,
255 pgoff_t index, filler_t *filler,
256 void *data);
257extern struct page * read_cache_page_gfp(struct address_space *mapping,
258 pgoff_t index, gfp_t gfp_mask);
259extern int read_cache_pages(struct address_space *mapping,
260 struct list_head *pages, filler_t *filler, void *data);
261
262static inline struct page *read_mapping_page_async(
263 struct address_space *mapping,
264 pgoff_t index, void *data)
265{
266 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
267 return read_cache_page_async(mapping, index, filler, data);
268}
269
270static inline struct page *read_mapping_page(struct address_space *mapping,
271 pgoff_t index, void *data)
272{
273 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
274 return read_cache_page(mapping, index, filler, data);
275}
276
277
278
279
280static inline loff_t page_offset(struct page *page)
281{
282 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
283}
284
285extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
286 unsigned long address);
287
288static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
289 unsigned long address)
290{
291 pgoff_t pgoff;
292 if (unlikely(is_vm_hugetlb_page(vma)))
293 return linear_hugepage_index(vma, address);
294 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
295 pgoff += vma->vm_pgoff;
296 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
297}
298
299extern void __lock_page(struct page *page);
300extern int __lock_page_killable(struct page *page);
301extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
302 unsigned int flags);
303extern void unlock_page(struct page *page);
304
305static inline void __set_page_locked(struct page *page)
306{
307 __set_bit(PG_locked, &page->flags);
308}
309
310static inline void __clear_page_locked(struct page *page)
311{
312 __clear_bit(PG_locked, &page->flags);
313}
314
315static inline int trylock_page(struct page *page)
316{
317 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
318}
319
320
321
322
323static inline void lock_page(struct page *page)
324{
325 might_sleep();
326 if (!trylock_page(page))
327 __lock_page(page);
328}
329
330
331
332
333
334
335static inline int lock_page_killable(struct page *page)
336{
337 might_sleep();
338 if (!trylock_page(page))
339 return __lock_page_killable(page);
340 return 0;
341}
342
343
344
345
346
347static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
348 unsigned int flags)
349{
350 might_sleep();
351 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
352}
353
354
355
356
357
358extern void wait_on_page_bit(struct page *page, int bit_nr);
359
360
361
362
363
364
365
366
367static inline void wait_on_page_locked(struct page *page)
368{
369 if (PageLocked(page))
370 wait_on_page_bit(page, PG_locked);
371}
372
373
374
375
376static inline void wait_on_page_writeback(struct page *page)
377{
378 if (PageWriteback(page))
379 wait_on_page_bit(page, PG_writeback);
380}
381
382extern void end_page_writeback(struct page *page);
383
384
385
386
387extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
388
389
390
391
392
393
394
395static inline int fault_in_pages_writeable(char __user *uaddr, int size)
396{
397 int ret;
398
399 if (unlikely(size == 0))
400 return 0;
401
402
403
404
405
406 ret = __put_user(0, uaddr);
407 if (ret == 0) {
408 char __user *end = uaddr + size - 1;
409
410
411
412
413
414 if (((unsigned long)uaddr & PAGE_MASK) !=
415 ((unsigned long)end & PAGE_MASK))
416 ret = __put_user(0, end);
417 }
418 return ret;
419}
420
421static inline int fault_in_pages_readable(const char __user *uaddr, int size)
422{
423 volatile char c;
424 int ret;
425
426 if (unlikely(size == 0))
427 return 0;
428
429 ret = __get_user(c, uaddr);
430 if (ret == 0) {
431 const char __user *end = uaddr + size - 1;
432
433 if (((unsigned long)uaddr & PAGE_MASK) !=
434 ((unsigned long)end & PAGE_MASK)) {
435 ret = __get_user(c, end);
436 (void)c;
437 }
438 }
439 return ret;
440}
441
442int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
443 pgoff_t index, gfp_t gfp_mask);
444int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
445 pgoff_t index, gfp_t gfp_mask);
446extern void delete_from_page_cache(struct page *page);
447extern void __delete_from_page_cache(struct page *page);
448int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
449
450
451
452
453
454static inline int add_to_page_cache(struct page *page,
455 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
456{
457 int error;
458
459 __set_page_locked(page);
460 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
461 if (unlikely(error))
462 __clear_page_locked(page);
463 return error;
464}
465
466#endif
467