1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4,
28};
29
30static inline void mapping_set_error(struct address_space *mapping, int error)
31{
32 if (unlikely(error)) {
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
37 }
38}
39
40static inline void mapping_set_unevictable(struct address_space *mapping)
41{
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
43}
44
45static inline void mapping_clear_unevictable(struct address_space *mapping)
46{
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
48}
49
50static inline int mapping_unevictable(struct address_space *mapping)
51{
52 if (mapping)
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 return !!mapping;
55}
56
57static inline void mapping_set_balloon(struct address_space *mapping)
58{
59 set_bit(AS_BALLOON_MAP, &mapping->flags);
60}
61
62static inline void mapping_clear_balloon(struct address_space *mapping)
63{
64 clear_bit(AS_BALLOON_MAP, &mapping->flags);
65}
66
67static inline int mapping_balloon(struct address_space *mapping)
68{
69 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
70}
71
72static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
73{
74 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
75}
76
77
78
79
80
81static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
82{
83 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
84 (__force unsigned long)mask;
85}
86
87
88
89
90
91
92
93
94
95#define PAGE_CACHE_SHIFT PAGE_SHIFT
96#define PAGE_CACHE_SIZE PAGE_SIZE
97#define PAGE_CACHE_MASK PAGE_MASK
98#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
99
100#define page_cache_get(page) get_page(page)
101#define page_cache_release(page) put_page(page)
102void release_pages(struct page **pages, int nr, int cold);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148static inline int page_cache_get_speculative(struct page *page)
149{
150 VM_BUG_ON(in_interrupt());
151
152#ifdef CONFIG_TINY_RCU
153# ifdef CONFIG_PREEMPT_COUNT
154 VM_BUG_ON(!in_atomic());
155# endif
156
157
158
159
160
161
162
163
164
165 VM_BUG_ON(page_count(page) == 0);
166 atomic_inc(&page->_count);
167
168#else
169 if (unlikely(!get_page_unless_zero(page))) {
170
171
172
173
174
175 return 0;
176 }
177#endif
178 VM_BUG_ON(PageTail(page));
179
180 return 1;
181}
182
183
184
185
186static inline int page_cache_add_speculative(struct page *page, int count)
187{
188 VM_BUG_ON(in_interrupt());
189
190#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
191# ifdef CONFIG_PREEMPT_COUNT
192 VM_BUG_ON(!in_atomic());
193# endif
194 VM_BUG_ON(page_count(page) == 0);
195 atomic_add(count, &page->_count);
196
197#else
198 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
199 return 0;
200#endif
201 VM_BUG_ON(PageCompound(page) && page != compound_head(page));
202
203 return 1;
204}
205
206static inline int page_freeze_refs(struct page *page, int count)
207{
208 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
209}
210
211static inline void page_unfreeze_refs(struct page *page, int count)
212{
213 VM_BUG_ON(page_count(page) != 0);
214 VM_BUG_ON(count == 0);
215
216 atomic_set(&page->_count, count);
217}
218
219#ifdef CONFIG_NUMA
220extern struct page *__page_cache_alloc(gfp_t gfp);
221#else
222static inline struct page *__page_cache_alloc(gfp_t gfp)
223{
224 return alloc_pages(gfp, 0);
225}
226#endif
227
228static inline struct page *page_cache_alloc(struct address_space *x)
229{
230 return __page_cache_alloc(mapping_gfp_mask(x));
231}
232
233static inline struct page *page_cache_alloc_cold(struct address_space *x)
234{
235 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
236}
237
238static inline struct page *page_cache_alloc_readahead(struct address_space *x)
239{
240 return __page_cache_alloc(mapping_gfp_mask(x) |
241 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
242}
243
244typedef int filler_t(void *, struct page *);
245
246extern struct page * find_get_page(struct address_space *mapping,
247 pgoff_t index);
248extern struct page * find_lock_page(struct address_space *mapping,
249 pgoff_t index);
250extern struct page * find_or_create_page(struct address_space *mapping,
251 pgoff_t index, gfp_t gfp_mask);
252unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
253 unsigned int nr_pages, struct page **pages);
254unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
255 unsigned int nr_pages, struct page **pages);
256unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
257 int tag, unsigned int nr_pages, struct page **pages);
258
259struct page *grab_cache_page_write_begin(struct address_space *mapping,
260 pgoff_t index, unsigned flags);
261
262
263
264
265static inline struct page *grab_cache_page(struct address_space *mapping,
266 pgoff_t index)
267{
268 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
269}
270
271extern struct page * grab_cache_page_nowait(struct address_space *mapping,
272 pgoff_t index);
273extern struct page * read_cache_page_async(struct address_space *mapping,
274 pgoff_t index, filler_t *filler, void *data);
275extern struct page * read_cache_page(struct address_space *mapping,
276 pgoff_t index, filler_t *filler, void *data);
277extern struct page * read_cache_page_gfp(struct address_space *mapping,
278 pgoff_t index, gfp_t gfp_mask);
279extern int read_cache_pages(struct address_space *mapping,
280 struct list_head *pages, filler_t *filler, void *data);
281
282static inline struct page *read_mapping_page_async(
283 struct address_space *mapping,
284 pgoff_t index, void *data)
285{
286 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
287 return read_cache_page_async(mapping, index, filler, data);
288}
289
290static inline struct page *read_mapping_page(struct address_space *mapping,
291 pgoff_t index, void *data)
292{
293 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
294 return read_cache_page(mapping, index, filler, data);
295}
296
297
298
299
300static inline loff_t page_offset(struct page *page)
301{
302 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
303}
304
305static inline loff_t page_file_offset(struct page *page)
306{
307 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
308}
309
310extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
311 unsigned long address);
312
313static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
314 unsigned long address)
315{
316 pgoff_t pgoff;
317 if (unlikely(is_vm_hugetlb_page(vma)))
318 return linear_hugepage_index(vma, address);
319 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
320 pgoff += vma->vm_pgoff;
321 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
322}
323
324extern void __lock_page(struct page *page);
325extern int __lock_page_killable(struct page *page);
326extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
327 unsigned int flags);
328extern void unlock_page(struct page *page);
329
330static inline void __set_page_locked(struct page *page)
331{
332 __set_bit(PG_locked, &page->flags);
333}
334
335static inline void __clear_page_locked(struct page *page)
336{
337 __clear_bit(PG_locked, &page->flags);
338}
339
340static inline int trylock_page(struct page *page)
341{
342 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
343}
344
345
346
347
348static inline void lock_page(struct page *page)
349{
350 might_sleep();
351 if (!trylock_page(page))
352 __lock_page(page);
353}
354
355
356
357
358
359
360static inline int lock_page_killable(struct page *page)
361{
362 might_sleep();
363 if (!trylock_page(page))
364 return __lock_page_killable(page);
365 return 0;
366}
367
368
369
370
371
372static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
373 unsigned int flags)
374{
375 might_sleep();
376 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
377}
378
379
380
381
382
383extern void wait_on_page_bit(struct page *page, int bit_nr);
384
385extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
386
387static inline int wait_on_page_locked_killable(struct page *page)
388{
389 if (PageLocked(page))
390 return wait_on_page_bit_killable(page, PG_locked);
391 return 0;
392}
393
394
395
396
397
398
399
400
401static inline void wait_on_page_locked(struct page *page)
402{
403 if (PageLocked(page))
404 wait_on_page_bit(page, PG_locked);
405}
406
407
408
409
410static inline void wait_on_page_writeback(struct page *page)
411{
412 if (PageWriteback(page))
413 wait_on_page_bit(page, PG_writeback);
414}
415
416extern void end_page_writeback(struct page *page);
417void wait_for_stable_page(struct page *page);
418
419
420
421
422extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
423
424
425
426
427
428
429
430static inline int fault_in_pages_writeable(char __user *uaddr, int size)
431{
432 int ret;
433
434 if (unlikely(size == 0))
435 return 0;
436
437
438
439
440
441 ret = __put_user(0, uaddr);
442 if (ret == 0) {
443 char __user *end = uaddr + size - 1;
444
445
446
447
448
449 if (((unsigned long)uaddr & PAGE_MASK) !=
450 ((unsigned long)end & PAGE_MASK))
451 ret = __put_user(0, end);
452 }
453 return ret;
454}
455
456static inline int fault_in_pages_readable(const char __user *uaddr, int size)
457{
458 volatile char c;
459 int ret;
460
461 if (unlikely(size == 0))
462 return 0;
463
464 ret = __get_user(c, uaddr);
465 if (ret == 0) {
466 const char __user *end = uaddr + size - 1;
467
468 if (((unsigned long)uaddr & PAGE_MASK) !=
469 ((unsigned long)end & PAGE_MASK)) {
470 ret = __get_user(c, end);
471 (void)c;
472 }
473 }
474 return ret;
475}
476
477
478
479
480
481
482
483static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
484{
485 int ret = 0;
486 char __user *end = uaddr + size - 1;
487
488 if (unlikely(size == 0))
489 return ret;
490
491
492
493
494
495 while (uaddr <= end) {
496 ret = __put_user(0, uaddr);
497 if (ret != 0)
498 return ret;
499 uaddr += PAGE_SIZE;
500 }
501
502
503 if (((unsigned long)uaddr & PAGE_MASK) ==
504 ((unsigned long)end & PAGE_MASK))
505 ret = __put_user(0, end);
506
507 return ret;
508}
509
510static inline int fault_in_multipages_readable(const char __user *uaddr,
511 int size)
512{
513 volatile char c;
514 int ret = 0;
515 const char __user *end = uaddr + size - 1;
516
517 if (unlikely(size == 0))
518 return ret;
519
520 while (uaddr <= end) {
521 ret = __get_user(c, uaddr);
522 if (ret != 0)
523 return ret;
524 uaddr += PAGE_SIZE;
525 }
526
527
528 if (((unsigned long)uaddr & PAGE_MASK) ==
529 ((unsigned long)end & PAGE_MASK)) {
530 ret = __get_user(c, end);
531 (void)c;
532 }
533
534 return ret;
535}
536
537int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
538 pgoff_t index, gfp_t gfp_mask);
539int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
540 pgoff_t index, gfp_t gfp_mask);
541extern void delete_from_page_cache(struct page *page);
542extern void __delete_from_page_cache(struct page *page);
543int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
544
545
546
547
548
549static inline int add_to_page_cache(struct page *page,
550 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
551{
552 int error;
553
554 __set_page_locked(page);
555 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
556 if (unlikely(error))
557 __clear_page_locked(page);
558 return error;
559}
560
561#endif
562