1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16
17
18
19
20
21enum mapping_flags {
22 AS_EIO = __GFP_BITS_SHIFT + 0,
23 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
25 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
26};
27
28static inline void mapping_set_error(struct address_space *mapping, int error)
29{
30 if (unlikely(error)) {
31 if (error == -ENOSPC)
32 set_bit(AS_ENOSPC, &mapping->flags);
33 else
34 set_bit(AS_EIO, &mapping->flags);
35 }
36}
37
38static inline void mapping_set_unevictable(struct address_space *mapping)
39{
40 set_bit(AS_UNEVICTABLE, &mapping->flags);
41}
42
43static inline void mapping_clear_unevictable(struct address_space *mapping)
44{
45 clear_bit(AS_UNEVICTABLE, &mapping->flags);
46}
47
48static inline int mapping_unevictable(struct address_space *mapping)
49{
50 if (likely(mapping))
51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
52 return !!mapping;
53}
54
55static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
56{
57 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
58}
59
60
61
62
63
64static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
65{
66 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
67 (__force unsigned long)mask;
68}
69
70
71
72
73
74
75
76
77
78#define PAGE_CACHE_SHIFT PAGE_SHIFT
79#define PAGE_CACHE_SIZE PAGE_SIZE
80#define PAGE_CACHE_MASK PAGE_MASK
81#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
82
83#define page_cache_get(page) get_page(page)
84#define page_cache_release(page) put_page(page)
85void release_pages(struct page **pages, int nr, int cold);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131static inline int page_cache_get_speculative(struct page *page)
132{
133 VM_BUG_ON(in_interrupt());
134
135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic());
138# endif
139
140
141
142
143
144
145
146
147
148 VM_BUG_ON(page_count(page) == 0);
149 atomic_inc(&page->_count);
150
151#else
152 if (unlikely(!get_page_unless_zero(page))) {
153
154
155
156
157
158 return 0;
159 }
160#endif
161 VM_BUG_ON(PageTail(page));
162
163 return 1;
164}
165
166
167
168
169static inline int page_cache_add_speculative(struct page *page, int count)
170{
171 VM_BUG_ON(in_interrupt());
172
173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic());
176# endif
177 VM_BUG_ON(page_count(page) == 0);
178 atomic_add(count, &page->_count);
179
180#else
181 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
182 return 0;
183#endif
184 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
185
186 return 1;
187}
188
189static inline int page_freeze_refs(struct page *page, int count)
190{
191 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
192}
193
194static inline void page_unfreeze_refs(struct page *page, int count)
195{
196 VM_BUG_ON(page_count(page) != 0);
197 VM_BUG_ON(count == 0);
198
199 atomic_set(&page->_count, count);
200}
201
202#ifdef CONFIG_NUMA
203extern struct page *__page_cache_alloc(gfp_t gfp);
204#else
205static inline struct page *__page_cache_alloc(gfp_t gfp)
206{
207 return alloc_pages(gfp, 0);
208}
209#endif
210
211static inline struct page *page_cache_alloc(struct address_space *x)
212{
213 return __page_cache_alloc(mapping_gfp_mask(x));
214}
215
216static inline struct page *page_cache_alloc_cold(struct address_space *x)
217{
218 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
219}
220
221typedef int filler_t(void *, struct page *);
222
223extern struct page * find_get_page(struct address_space *mapping,
224 pgoff_t index);
225extern struct page * find_lock_page(struct address_space *mapping,
226 pgoff_t index);
227extern struct page * find_or_create_page(struct address_space *mapping,
228 pgoff_t index, gfp_t gfp_mask);
229unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
230 unsigned int nr_pages, struct page **pages);
231unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
232 unsigned int nr_pages, struct page **pages);
233unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
234 int tag, unsigned int nr_pages, struct page **pages);
235
236struct page *grab_cache_page_write_begin(struct address_space *mapping,
237 pgoff_t index, unsigned flags);
238
239
240
241
242static inline struct page *grab_cache_page(struct address_space *mapping,
243 pgoff_t index)
244{
245 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
246}
247
248extern struct page * grab_cache_page_nowait(struct address_space *mapping,
249 pgoff_t index);
250extern struct page * read_cache_page_async(struct address_space *mapping,
251 pgoff_t index, filler_t *filler,
252 void *data);
253extern struct page * read_cache_page(struct address_space *mapping,
254 pgoff_t index, filler_t *filler,
255 void *data);
256extern int read_cache_pages(struct address_space *mapping,
257 struct list_head *pages, filler_t *filler, void *data);
258
259static inline struct page *read_mapping_page_async(
260 struct address_space *mapping,
261 pgoff_t index, void *data)
262{
263 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
264 return read_cache_page_async(mapping, index, filler, data);
265}
266
267static inline struct page *read_mapping_page(struct address_space *mapping,
268 pgoff_t index, void *data)
269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page(mapping, index, filler, data);
272}
273
274
275
276
277static inline loff_t page_offset(struct page *page)
278{
279 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
280}
281
282static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
283 unsigned long address)
284{
285 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
286 pgoff += vma->vm_pgoff;
287 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
288}
289
290extern void __lock_page(struct page *page);
291extern int __lock_page_killable(struct page *page);
292extern void __lock_page_nosync(struct page *page);
293extern void unlock_page(struct page *page);
294
295static inline void __set_page_locked(struct page *page)
296{
297 __set_bit(PG_locked, &page->flags);
298}
299
300static inline void __clear_page_locked(struct page *page)
301{
302 __clear_bit(PG_locked, &page->flags);
303}
304
305static inline int trylock_page(struct page *page)
306{
307 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
308}
309
310
311
312
313static inline void lock_page(struct page *page)
314{
315 might_sleep();
316 if (!trylock_page(page))
317 __lock_page(page);
318}
319
320
321
322
323
324
325static inline int lock_page_killable(struct page *page)
326{
327 might_sleep();
328 if (!trylock_page(page))
329 return __lock_page_killable(page);
330 return 0;
331}
332
333
334
335
336
337static inline void lock_page_nosync(struct page *page)
338{
339 might_sleep();
340 if (!trylock_page(page))
341 __lock_page_nosync(page);
342}
343
344
345
346
347
348extern void wait_on_page_bit(struct page *page, int bit_nr);
349
350
351
352
353
354
355
356
357static inline void wait_on_page_locked(struct page *page)
358{
359 if (PageLocked(page))
360 wait_on_page_bit(page, PG_locked);
361}
362
363
364
365
366static inline void wait_on_page_writeback(struct page *page)
367{
368 if (PageWriteback(page))
369 wait_on_page_bit(page, PG_writeback);
370}
371
372extern void end_page_writeback(struct page *page);
373
374
375
376
377extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
378
379
380
381
382
383
384
385static inline int fault_in_pages_writeable(char __user *uaddr, int size)
386{
387 int ret;
388
389 if (unlikely(size == 0))
390 return 0;
391
392
393
394
395
396 ret = __put_user(0, uaddr);
397 if (ret == 0) {
398 char __user *end = uaddr + size - 1;
399
400
401
402
403
404 if (((unsigned long)uaddr & PAGE_MASK) !=
405 ((unsigned long)end & PAGE_MASK))
406 ret = __put_user(0, end);
407 }
408 return ret;
409}
410
411static inline int fault_in_pages_readable(const char __user *uaddr, int size)
412{
413 volatile char c;
414 int ret;
415
416 if (unlikely(size == 0))
417 return 0;
418
419 ret = __get_user(c, uaddr);
420 if (ret == 0) {
421 const char __user *end = uaddr + size - 1;
422
423 if (((unsigned long)uaddr & PAGE_MASK) !=
424 ((unsigned long)end & PAGE_MASK))
425 ret = __get_user(c, end);
426 }
427 return ret;
428}
429
430int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
431 pgoff_t index, gfp_t gfp_mask);
432int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
433 pgoff_t index, gfp_t gfp_mask);
434extern void remove_from_page_cache(struct page *page);
435extern void __remove_from_page_cache(struct page *page);
436
437
438
439
440
441static inline int add_to_page_cache(struct page *page,
442 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
443{
444 int error;
445
446 __set_page_locked(page);
447 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
448 if (unlikely(error))
449 __clear_page_locked(page);
450 return error;
451}
452
453#endif
454