1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27};
28
29static inline void mapping_set_error(struct address_space *mapping, int error)
30{
31 if (unlikely(error)) {
32 if (error == -ENOSPC)
33 set_bit(AS_ENOSPC, &mapping->flags);
34 else
35 set_bit(AS_EIO, &mapping->flags);
36 }
37}
38
39static inline void mapping_set_unevictable(struct address_space *mapping)
40{
41 set_bit(AS_UNEVICTABLE, &mapping->flags);
42}
43
44static inline void mapping_clear_unevictable(struct address_space *mapping)
45{
46 clear_bit(AS_UNEVICTABLE, &mapping->flags);
47}
48
49static inline int mapping_unevictable(struct address_space *mapping)
50{
51 if (mapping)
52 return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 return !!mapping;
54}
55
56static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57{
58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59}
60
61
62
63
64
65static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66{
67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 (__force unsigned long)mask;
69}
70
71
72
73
74
75
76
77
78
79#define PAGE_CACHE_SHIFT PAGE_SHIFT
80#define PAGE_CACHE_SIZE PAGE_SIZE
81#define PAGE_CACHE_MASK PAGE_MASK
82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83
84#define page_cache_get(page) get_page(page)
85#define page_cache_release(page) put_page(page)
86void release_pages(struct page **pages, int nr, int cold);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static inline int page_cache_get_speculative(struct page *page)
133{
134 VM_BUG_ON(in_interrupt());
135
136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137# ifdef CONFIG_PREEMPT
138 VM_BUG_ON(!in_atomic());
139# endif
140
141
142
143
144
145
146
147
148
149 VM_BUG_ON(page_count(page) == 0);
150 atomic_inc(&page->_count);
151
152#else
153 if (unlikely(!get_page_unless_zero(page))) {
154
155
156
157
158
159 return 0;
160 }
161#endif
162 VM_BUG_ON(PageTail(page));
163
164 return 1;
165}
166
167
168
169
170static inline int page_cache_add_speculative(struct page *page, int count)
171{
172 VM_BUG_ON(in_interrupt());
173
174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175# ifdef CONFIG_PREEMPT
176 VM_BUG_ON(!in_atomic());
177# endif
178 VM_BUG_ON(page_count(page) == 0);
179 atomic_add(count, &page->_count);
180
181#else
182 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 return 0;
184#endif
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186
187 return 1;
188}
189
190static inline int page_freeze_refs(struct page *page, int count)
191{
192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193}
194
195static inline void page_unfreeze_refs(struct page *page, int count)
196{
197 VM_BUG_ON(page_count(page) != 0);
198 VM_BUG_ON(count == 0);
199
200 atomic_set(&page->_count, count);
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
228typedef int filler_t(void *, struct page *);
229
230extern struct page * find_get_page(struct address_space *mapping,
231 pgoff_t index);
232extern struct page * find_lock_page(struct address_space *mapping,
233 pgoff_t index);
234extern struct page * find_or_create_page(struct address_space *mapping,
235 pgoff_t index, gfp_t gfp_mask);
236unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
237 unsigned int nr_pages, struct page **pages);
238unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
239 unsigned int nr_pages, struct page **pages);
240unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
241 int tag, unsigned int nr_pages, struct page **pages);
242
243struct page *grab_cache_page_write_begin(struct address_space *mapping,
244 pgoff_t index, unsigned flags);
245
246
247
248
249static inline struct page *grab_cache_page(struct address_space *mapping,
250 pgoff_t index)
251{
252 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
253}
254
255extern struct page * grab_cache_page_nowait(struct address_space *mapping,
256 pgoff_t index);
257extern struct page * read_cache_page_async(struct address_space *mapping,
258 pgoff_t index, filler_t *filler,
259 void *data);
260extern struct page * read_cache_page(struct address_space *mapping,
261 pgoff_t index, filler_t *filler,
262 void *data);
263extern struct page * read_cache_page_gfp(struct address_space *mapping,
264 pgoff_t index, gfp_t gfp_mask);
265extern int read_cache_pages(struct address_space *mapping,
266 struct list_head *pages, filler_t *filler, void *data);
267
268static inline struct page *read_mapping_page_async(
269 struct address_space *mapping,
270 pgoff_t index, void *data)
271{
272 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
273 return read_cache_page_async(mapping, index, filler, data);
274}
275
276static inline struct page *read_mapping_page(struct address_space *mapping,
277 pgoff_t index, void *data)
278{
279 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
280 return read_cache_page(mapping, index, filler, data);
281}
282
283
284
285
286static inline loff_t page_offset(struct page *page)
287{
288 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
289}
290
291extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
292 unsigned long address);
293
294static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
295 unsigned long address)
296{
297 pgoff_t pgoff;
298 if (unlikely(is_vm_hugetlb_page(vma)))
299 return linear_hugepage_index(vma, address);
300 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
301 pgoff += vma->vm_pgoff;
302 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
303}
304
305extern void __lock_page(struct page *page);
306extern int __lock_page_killable(struct page *page);
307extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
308 unsigned int flags);
309extern void unlock_page(struct page *page);
310
311static inline void __set_page_locked(struct page *page)
312{
313 __set_bit(PG_locked, &page->flags);
314}
315
316static inline void __clear_page_locked(struct page *page)
317{
318 __clear_bit(PG_locked, &page->flags);
319}
320
321static inline int trylock_page(struct page *page)
322{
323 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
324}
325
326
327
328
329static inline void lock_page(struct page *page)
330{
331 might_sleep();
332 if (!trylock_page(page))
333 __lock_page(page);
334}
335
336
337
338
339
340
341static inline int lock_page_killable(struct page *page)
342{
343 might_sleep();
344 if (!trylock_page(page))
345 return __lock_page_killable(page);
346 return 0;
347}
348
349
350
351
352
353static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
354 unsigned int flags)
355{
356 might_sleep();
357 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
358}
359
360
361
362
363
364extern void wait_on_page_bit(struct page *page, int bit_nr);
365
366extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
367
368static inline int wait_on_page_locked_killable(struct page *page)
369{
370 if (PageLocked(page))
371 return wait_on_page_bit_killable(page, PG_locked);
372 return 0;
373}
374
375
376
377
378
379
380
381
382static inline void wait_on_page_locked(struct page *page)
383{
384 if (PageLocked(page))
385 wait_on_page_bit(page, PG_locked);
386}
387
388
389
390
391static inline void wait_on_page_writeback(struct page *page)
392{
393 if (PageWriteback(page))
394 wait_on_page_bit(page, PG_writeback);
395}
396
397extern void end_page_writeback(struct page *page);
398
399
400
401
402extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
403
404
405
406
407
408
409
410static inline int fault_in_pages_writeable(char __user *uaddr, int size)
411{
412 int ret;
413
414 if (unlikely(size == 0))
415 return 0;
416
417
418
419
420
421 ret = __put_user(0, uaddr);
422 if (ret == 0) {
423 char __user *end = uaddr + size - 1;
424
425
426
427
428
429 if (((unsigned long)uaddr & PAGE_MASK) !=
430 ((unsigned long)end & PAGE_MASK))
431 ret = __put_user(0, end);
432 }
433 return ret;
434}
435
436static inline int fault_in_pages_readable(const char __user *uaddr, int size)
437{
438 volatile char c;
439 int ret;
440
441 if (unlikely(size == 0))
442 return 0;
443
444 ret = __get_user(c, uaddr);
445 if (ret == 0) {
446 const char __user *end = uaddr + size - 1;
447
448 if (((unsigned long)uaddr & PAGE_MASK) !=
449 ((unsigned long)end & PAGE_MASK)) {
450 ret = __get_user(c, end);
451 (void)c;
452 }
453 }
454 return ret;
455}
456
457int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
458 pgoff_t index, gfp_t gfp_mask);
459int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
460 pgoff_t index, gfp_t gfp_mask);
461extern void delete_from_page_cache(struct page *page);
462extern void __delete_from_page_cache(struct page *page);
463int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
464
465
466
467
468
469static inline int add_to_page_cache(struct page *page,
470 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
471{
472 int error;
473
474 __set_page_locked(page);
475 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
476 if (unlikely(error))
477 __clear_page_locked(page);
478 return error;
479}
480
481#endif
482