1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4,
28 AS_EXITING = __GFP_BITS_SHIFT + 5,
29};
30
31static inline void mapping_set_error(struct address_space *mapping, int error)
32{
33 if (unlikely(error)) {
34 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
41static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
46static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
51static inline int mapping_unevictable(struct address_space *mapping)
52{
53 if (mapping)
54 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
56}
57
58static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
73static inline void mapping_set_exiting(struct address_space *mapping)
74{
75 set_bit(AS_EXITING, &mapping->flags);
76}
77
78static inline int mapping_exiting(struct address_space *mapping)
79{
80 return test_bit(AS_EXITING, &mapping->flags);
81}
82
83static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
84{
85 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
86}
87
88
89
90
91
92static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
93{
94 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
95 (__force unsigned long)mask;
96}
97
98
99
100
101
102
103
104
105
106#define PAGE_CACHE_SHIFT PAGE_SHIFT
107#define PAGE_CACHE_SIZE PAGE_SIZE
108#define PAGE_CACHE_MASK PAGE_MASK
109#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
110
111#define page_cache_get(page) get_page(page)
112#define page_cache_release(page) put_page(page)
113void release_pages(struct page **pages, int nr, int cold);
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159static inline int page_cache_get_speculative(struct page *page)
160{
161 VM_BUG_ON(in_interrupt());
162
163#ifdef CONFIG_TINY_RCU
164# ifdef CONFIG_PREEMPT_COUNT
165 VM_BUG_ON(!in_atomic());
166# endif
167
168
169
170
171
172
173
174
175
176 VM_BUG_ON_PAGE(page_count(page) == 0, page);
177 atomic_inc(&page->_count);
178
179#else
180 if (unlikely(!get_page_unless_zero(page))) {
181
182
183
184
185
186 return 0;
187 }
188#endif
189 VM_BUG_ON_PAGE(PageTail(page), page);
190
191 return 1;
192}
193
194
195
196
197static inline int page_cache_add_speculative(struct page *page, int count)
198{
199 VM_BUG_ON(in_interrupt());
200
201#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
202# ifdef CONFIG_PREEMPT_COUNT
203 VM_BUG_ON(!in_atomic());
204# endif
205 VM_BUG_ON_PAGE(page_count(page) == 0, page);
206 atomic_add(count, &page->_count);
207
208#else
209 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
210 return 0;
211#endif
212 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
213
214 return 1;
215}
216
217static inline int page_freeze_refs(struct page *page, int count)
218{
219 return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
220}
221
222static inline void page_unfreeze_refs(struct page *page, int count)
223{
224 VM_BUG_ON_PAGE(page_count(page) != 0, page);
225 VM_BUG_ON(count == 0);
226
227 atomic_set(&page->_count, count);
228}
229
230#ifdef CONFIG_NUMA
231extern struct page *__page_cache_alloc(gfp_t gfp);
232#else
233static inline struct page *__page_cache_alloc(gfp_t gfp)
234{
235 return alloc_pages(gfp, 0);
236}
237#endif
238
239static inline struct page *page_cache_alloc(struct address_space *x)
240{
241 return __page_cache_alloc(mapping_gfp_mask(x));
242}
243
244static inline struct page *page_cache_alloc_cold(struct address_space *x)
245{
246 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
247}
248
249static inline struct page *page_cache_alloc_readahead(struct address_space *x)
250{
251 return __page_cache_alloc(mapping_gfp_mask(x) |
252 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
253}
254
255typedef int filler_t(void *, struct page *);
256
257pgoff_t page_cache_next_hole(struct address_space *mapping,
258 pgoff_t index, unsigned long max_scan);
259pgoff_t page_cache_prev_hole(struct address_space *mapping,
260 pgoff_t index, unsigned long max_scan);
261
262struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
263struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
264struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
265struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
266struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
267 gfp_t gfp_mask);
268unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
269 unsigned int nr_entries, struct page **entries,
270 pgoff_t *indices);
271unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
272 unsigned int nr_pages, struct page **pages);
273unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
274 unsigned int nr_pages, struct page **pages);
275unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
276 int tag, unsigned int nr_pages, struct page **pages);
277
278struct page *grab_cache_page_write_begin(struct address_space *mapping,
279 pgoff_t index, unsigned flags);
280
281
282
283
284static inline struct page *grab_cache_page(struct address_space *mapping,
285 pgoff_t index)
286{
287 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
288}
289
290extern struct page * grab_cache_page_nowait(struct address_space *mapping,
291 pgoff_t index);
292extern struct page * read_cache_page(struct address_space *mapping,
293 pgoff_t index, filler_t *filler, void *data);
294extern struct page * read_cache_page_gfp(struct address_space *mapping,
295 pgoff_t index, gfp_t gfp_mask);
296extern int read_cache_pages(struct address_space *mapping,
297 struct list_head *pages, filler_t *filler, void *data);
298
299static inline struct page *read_mapping_page(struct address_space *mapping,
300 pgoff_t index, void *data)
301{
302 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
303 return read_cache_page(mapping, index, filler, data);
304}
305
306
307
308
309static inline loff_t page_offset(struct page *page)
310{
311 return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
312}
313
314static inline loff_t page_file_offset(struct page *page)
315{
316 return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
317}
318
319extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
320 unsigned long address);
321
322static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
323 unsigned long address)
324{
325 pgoff_t pgoff;
326 if (unlikely(is_vm_hugetlb_page(vma)))
327 return linear_hugepage_index(vma, address);
328 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
329 pgoff += vma->vm_pgoff;
330 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
331}
332
333extern void __lock_page(struct page *page);
334extern int __lock_page_killable(struct page *page);
335extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
336 unsigned int flags);
337extern void unlock_page(struct page *page);
338
339static inline void __set_page_locked(struct page *page)
340{
341 __set_bit(PG_locked, &page->flags);
342}
343
344static inline void __clear_page_locked(struct page *page)
345{
346 __clear_bit(PG_locked, &page->flags);
347}
348
349static inline int trylock_page(struct page *page)
350{
351 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
352}
353
354
355
356
357static inline void lock_page(struct page *page)
358{
359 might_sleep();
360 if (!trylock_page(page))
361 __lock_page(page);
362}
363
364
365
366
367
368
369static inline int lock_page_killable(struct page *page)
370{
371 might_sleep();
372 if (!trylock_page(page))
373 return __lock_page_killable(page);
374 return 0;
375}
376
377
378
379
380
381static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
382 unsigned int flags)
383{
384 might_sleep();
385 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
386}
387
388
389
390
391
392extern void wait_on_page_bit(struct page *page, int bit_nr);
393
394extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
395
396static inline int wait_on_page_locked_killable(struct page *page)
397{
398 if (PageLocked(page))
399 return wait_on_page_bit_killable(page, PG_locked);
400 return 0;
401}
402
403
404
405
406
407
408
409
410static inline void wait_on_page_locked(struct page *page)
411{
412 if (PageLocked(page))
413 wait_on_page_bit(page, PG_locked);
414}
415
416
417
418
419static inline void wait_on_page_writeback(struct page *page)
420{
421 if (PageWriteback(page))
422 wait_on_page_bit(page, PG_writeback);
423}
424
425extern void end_page_writeback(struct page *page);
426void wait_for_stable_page(struct page *page);
427
428
429
430
431extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
432
433
434
435
436
437
438
439static inline int fault_in_pages_writeable(char __user *uaddr, int size)
440{
441 int ret;
442
443 if (unlikely(size == 0))
444 return 0;
445
446
447
448
449
450 ret = __put_user(0, uaddr);
451 if (ret == 0) {
452 char __user *end = uaddr + size - 1;
453
454
455
456
457
458 if (((unsigned long)uaddr & PAGE_MASK) !=
459 ((unsigned long)end & PAGE_MASK))
460 ret = __put_user(0, end);
461 }
462 return ret;
463}
464
465static inline int fault_in_pages_readable(const char __user *uaddr, int size)
466{
467 volatile char c;
468 int ret;
469
470 if (unlikely(size == 0))
471 return 0;
472
473 ret = __get_user(c, uaddr);
474 if (ret == 0) {
475 const char __user *end = uaddr + size - 1;
476
477 if (((unsigned long)uaddr & PAGE_MASK) !=
478 ((unsigned long)end & PAGE_MASK)) {
479 ret = __get_user(c, end);
480 (void)c;
481 }
482 }
483 return ret;
484}
485
486
487
488
489
490
491
492static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
493{
494 int ret = 0;
495 char __user *end = uaddr + size - 1;
496
497 if (unlikely(size == 0))
498 return ret;
499
500
501
502
503
504 while (uaddr <= end) {
505 ret = __put_user(0, uaddr);
506 if (ret != 0)
507 return ret;
508 uaddr += PAGE_SIZE;
509 }
510
511
512 if (((unsigned long)uaddr & PAGE_MASK) ==
513 ((unsigned long)end & PAGE_MASK))
514 ret = __put_user(0, end);
515
516 return ret;
517}
518
519static inline int fault_in_multipages_readable(const char __user *uaddr,
520 int size)
521{
522 volatile char c;
523 int ret = 0;
524 const char __user *end = uaddr + size - 1;
525
526 if (unlikely(size == 0))
527 return ret;
528
529 while (uaddr <= end) {
530 ret = __get_user(c, uaddr);
531 if (ret != 0)
532 return ret;
533 uaddr += PAGE_SIZE;
534 }
535
536
537 if (((unsigned long)uaddr & PAGE_MASK) ==
538 ((unsigned long)end & PAGE_MASK)) {
539 ret = __get_user(c, end);
540 (void)c;
541 }
542
543 return ret;
544}
545
546int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
547 pgoff_t index, gfp_t gfp_mask);
548int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
549 pgoff_t index, gfp_t gfp_mask);
550extern void delete_from_page_cache(struct page *page);
551extern void __delete_from_page_cache(struct page *page, void *shadow);
552int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
553
554
555
556
557
558static inline int add_to_page_cache(struct page *page,
559 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
560{
561 int error;
562
563 __set_page_locked(page);
564 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
565 if (unlikely(error))
566 __clear_page_locked(page);
567 return error;
568}
569
570#endif
571