1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27};
28
29static inline void mapping_set_error(struct address_space *mapping, int error)
30{
31 if (unlikely(error)) {
32 if (error == -ENOSPC)
33 set_bit(AS_ENOSPC, &mapping->flags);
34 else
35 set_bit(AS_EIO, &mapping->flags);
36 }
37}
38
39static inline void mapping_set_unevictable(struct address_space *mapping)
40{
41 set_bit(AS_UNEVICTABLE, &mapping->flags);
42}
43
44static inline void mapping_clear_unevictable(struct address_space *mapping)
45{
46 clear_bit(AS_UNEVICTABLE, &mapping->flags);
47}
48
49static inline int mapping_unevictable(struct address_space *mapping)
50{
51 if (mapping)
52 return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 return !!mapping;
54}
55
56static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57{
58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59}
60
61
62
63
64
65static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66{
67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 (__force unsigned long)mask;
69}
70
71
72
73
74
75
76
77
78
79#define PAGE_CACHE_SHIFT PAGE_SHIFT
80#define PAGE_CACHE_SIZE PAGE_SIZE
81#define PAGE_CACHE_MASK PAGE_MASK
82#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83
84#define page_cache_get(page) get_page(page)
85#define page_cache_release(page) put_page(page)
86void release_pages(struct page **pages, int nr, int cold);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static inline int page_cache_get_speculative(struct page *page)
133{
134 VM_BUG_ON(in_interrupt());
135
136#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137# ifdef CONFIG_PREEMPT_COUNT
138 VM_BUG_ON(!in_atomic());
139# endif
140
141
142
143
144
145
146
147
148
149 VM_BUG_ON(page_count(page) == 0);
150 atomic_inc(&page->_count);
151
152#else
153 if (unlikely(!get_page_unless_zero(page))) {
154
155
156
157
158
159 return 0;
160 }
161#endif
162 VM_BUG_ON(PageTail(page));
163
164 return 1;
165}
166
167
168
169
170static inline int page_cache_add_speculative(struct page *page, int count)
171{
172 VM_BUG_ON(in_interrupt());
173
174#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175# ifdef CONFIG_PREEMPT_COUNT
176 VM_BUG_ON(!in_atomic());
177# endif
178 VM_BUG_ON(page_count(page) == 0);
179 atomic_add(count, &page->_count);
180
181#else
182 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 return 0;
184#endif
185 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186
187 return 1;
188}
189
190static inline int page_freeze_refs(struct page *page, int count)
191{
192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193}
194
195static inline void page_unfreeze_refs(struct page *page, int count)
196{
197 VM_BUG_ON(page_count(page) != 0);
198 VM_BUG_ON(count == 0);
199
200 atomic_set(&page->_count, count);
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222static inline struct page *page_cache_alloc_readahead(struct address_space *x)
223{
224 return __page_cache_alloc(mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
226}
227
228typedef int filler_t(void *, struct page *);
229
230extern struct page * find_get_page(struct address_space *mapping,
231 pgoff_t index);
232extern struct page * find_lock_page(struct address_space *mapping,
233 pgoff_t index);
234extern struct page * find_or_create_page(struct address_space *mapping,
235 pgoff_t index, gfp_t gfp_mask);
236unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
237 unsigned int nr_pages, struct page **pages);
238unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
239 unsigned int nr_pages, struct page **pages);
240unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
241 int tag, unsigned int nr_pages, struct page **pages);
242
243struct page *grab_cache_page_write_begin(struct address_space *mapping,
244 pgoff_t index, unsigned flags);
245
246
247
248
249static inline struct page *grab_cache_page(struct address_space *mapping,
250 pgoff_t index)
251{
252 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
253}
254
255extern struct page * grab_cache_page_nowait(struct address_space *mapping,
256 pgoff_t index);
257extern struct page * read_cache_page_async(struct address_space *mapping,
258 pgoff_t index, filler_t *filler, void *data);
259extern struct page * read_cache_page(struct address_space *mapping,
260 pgoff_t index, filler_t *filler, void *data);
261extern struct page * read_cache_page_gfp(struct address_space *mapping,
262 pgoff_t index, gfp_t gfp_mask);
263extern int read_cache_pages(struct address_space *mapping,
264 struct list_head *pages, filler_t *filler, void *data);
265
266static inline struct page *read_mapping_page_async(
267 struct address_space *mapping,
268 pgoff_t index, void *data)
269{
270 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
271 return read_cache_page_async(mapping, index, filler, data);
272}
273
274static inline struct page *read_mapping_page(struct address_space *mapping,
275 pgoff_t index, void *data)
276{
277 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
278 return read_cache_page(mapping, index, filler, data);
279}
280
281
282
283
284static inline loff_t page_offset(struct page *page)
285{
286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
287}
288
289extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
290 unsigned long address);
291
292static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
293 unsigned long address)
294{
295 pgoff_t pgoff;
296 if (unlikely(is_vm_hugetlb_page(vma)))
297 return linear_hugepage_index(vma, address);
298 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
299 pgoff += vma->vm_pgoff;
300 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
301}
302
303extern void __lock_page(struct page *page);
304extern int __lock_page_killable(struct page *page);
305extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
306 unsigned int flags);
307extern void unlock_page(struct page *page);
308
309static inline void __set_page_locked(struct page *page)
310{
311 __set_bit(PG_locked, &page->flags);
312}
313
314static inline void __clear_page_locked(struct page *page)
315{
316 __clear_bit(PG_locked, &page->flags);
317}
318
319static inline int trylock_page(struct page *page)
320{
321 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
322}
323
324
325
326
327static inline void lock_page(struct page *page)
328{
329 might_sleep();
330 if (!trylock_page(page))
331 __lock_page(page);
332}
333
334
335
336
337
338
339static inline int lock_page_killable(struct page *page)
340{
341 might_sleep();
342 if (!trylock_page(page))
343 return __lock_page_killable(page);
344 return 0;
345}
346
347
348
349
350
351static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
352 unsigned int flags)
353{
354 might_sleep();
355 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
356}
357
358
359
360
361
362extern void wait_on_page_bit(struct page *page, int bit_nr);
363
364extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
365
366static inline int wait_on_page_locked_killable(struct page *page)
367{
368 if (PageLocked(page))
369 return wait_on_page_bit_killable(page, PG_locked);
370 return 0;
371}
372
373
374
375
376
377
378
379
380static inline void wait_on_page_locked(struct page *page)
381{
382 if (PageLocked(page))
383 wait_on_page_bit(page, PG_locked);
384}
385
386
387
388
389static inline void wait_on_page_writeback(struct page *page)
390{
391 if (PageWriteback(page))
392 wait_on_page_bit(page, PG_writeback);
393}
394
395extern void end_page_writeback(struct page *page);
396
397
398
399
400extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
401
402
403
404
405
406
407
408static inline int fault_in_pages_writeable(char __user *uaddr, int size)
409{
410 int ret;
411
412 if (unlikely(size == 0))
413 return 0;
414
415
416
417
418
419 ret = __put_user(0, uaddr);
420 if (ret == 0) {
421 char __user *end = uaddr + size - 1;
422
423
424
425
426
427 if (((unsigned long)uaddr & PAGE_MASK) !=
428 ((unsigned long)end & PAGE_MASK))
429 ret = __put_user(0, end);
430 }
431 return ret;
432}
433
434static inline int fault_in_pages_readable(const char __user *uaddr, int size)
435{
436 volatile char c;
437 int ret;
438
439 if (unlikely(size == 0))
440 return 0;
441
442 ret = __get_user(c, uaddr);
443 if (ret == 0) {
444 const char __user *end = uaddr + size - 1;
445
446 if (((unsigned long)uaddr & PAGE_MASK) !=
447 ((unsigned long)end & PAGE_MASK)) {
448 ret = __get_user(c, end);
449 (void)c;
450 }
451 }
452 return ret;
453}
454
455
456
457
458
459
460
461static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
462{
463 int ret = 0;
464 char __user *end = uaddr + size - 1;
465
466 if (unlikely(size == 0))
467 return ret;
468
469
470
471
472
473 while (uaddr <= end) {
474 ret = __put_user(0, uaddr);
475 if (ret != 0)
476 return ret;
477 uaddr += PAGE_SIZE;
478 }
479
480
481 if (((unsigned long)uaddr & PAGE_MASK) ==
482 ((unsigned long)end & PAGE_MASK))
483 ret = __put_user(0, end);
484
485 return ret;
486}
487
488static inline int fault_in_multipages_readable(const char __user *uaddr,
489 int size)
490{
491 volatile char c;
492 int ret = 0;
493 const char __user *end = uaddr + size - 1;
494
495 if (unlikely(size == 0))
496 return ret;
497
498 while (uaddr <= end) {
499 ret = __get_user(c, uaddr);
500 if (ret != 0)
501 return ret;
502 uaddr += PAGE_SIZE;
503 }
504
505
506 if (((unsigned long)uaddr & PAGE_MASK) ==
507 ((unsigned long)end & PAGE_MASK)) {
508 ret = __get_user(c, end);
509 (void)c;
510 }
511
512 return ret;
513}
514
515int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
516 pgoff_t index, gfp_t gfp_mask);
517int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
518 pgoff_t index, gfp_t gfp_mask);
519extern void delete_from_page_cache(struct page *page);
520extern void __delete_from_page_cache(struct page *page);
521int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
522
523
524
525
526
527static inline int add_to_page_cache(struct page *page,
528 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
529{
530 int error;
531
532 __set_page_locked(page);
533 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
534 if (unlikely(error))
535 __clear_page_locked(page);
536 return error;
537}
538
539#endif
540