1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21enum mapping_flags {
22 AS_EIO = 0,
23 AS_ENOSPC = 1,
24 AS_MM_ALL_LOCKS = 2,
25 AS_UNEVICTABLE = 3,
26 AS_EXITING = 4,
27
28 AS_NO_WRITEBACK_TAGS = 5,
29};
30
31static inline void mapping_set_error(struct address_space *mapping, int error)
32{
33 if (unlikely(error)) {
34 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
41static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
46static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
51static inline int mapping_unevictable(struct address_space *mapping)
52{
53 if (mapping)
54 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
56}
57
58static inline void mapping_set_exiting(struct address_space *mapping)
59{
60 set_bit(AS_EXITING, &mapping->flags);
61}
62
63static inline int mapping_exiting(struct address_space *mapping)
64{
65 return test_bit(AS_EXITING, &mapping->flags);
66}
67
68static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
69{
70 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
71}
72
73static inline int mapping_use_writeback_tags(struct address_space *mapping)
74{
75 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
76}
77
78static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
79{
80 return mapping->gfp_mask;
81}
82
83
84static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
85 gfp_t gfp_mask)
86{
87 return mapping_gfp_mask(mapping) & gfp_mask;
88}
89
90
91
92
93
94static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
95{
96 m->gfp_mask = mask;
97}
98
99void release_pages(struct page **pages, int nr, bool cold);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145static inline int page_cache_get_speculative(struct page *page)
146{
147 VM_BUG_ON(in_interrupt());
148
149#ifdef CONFIG_TINY_RCU
150# ifdef CONFIG_PREEMPT_COUNT
151 VM_BUG_ON(!in_atomic());
152# endif
153
154
155
156
157
158
159
160
161
162 VM_BUG_ON_PAGE(page_count(page) == 0, page);
163 page_ref_inc(page);
164
165#else
166 if (unlikely(!get_page_unless_zero(page))) {
167
168
169
170
171
172 return 0;
173 }
174#endif
175 VM_BUG_ON_PAGE(PageTail(page), page);
176
177 return 1;
178}
179
180
181
182
183static inline int page_cache_add_speculative(struct page *page, int count)
184{
185 VM_BUG_ON(in_interrupt());
186
187#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
188# ifdef CONFIG_PREEMPT_COUNT
189 VM_BUG_ON(!in_atomic());
190# endif
191 VM_BUG_ON_PAGE(page_count(page) == 0, page);
192 page_ref_add(page, count);
193
194#else
195 if (unlikely(!page_ref_add_unless(page, count, 0)))
196 return 0;
197#endif
198 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
199
200 return 1;
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222static inline gfp_t readahead_gfp_mask(struct address_space *x)
223{
224 return mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
226}
227
228typedef int filler_t(void *, struct page *);
229
230pgoff_t page_cache_next_hole(struct address_space *mapping,
231 pgoff_t index, unsigned long max_scan);
232pgoff_t page_cache_prev_hole(struct address_space *mapping,
233 pgoff_t index, unsigned long max_scan);
234
235#define FGP_ACCESSED 0x00000001
236#define FGP_LOCK 0x00000002
237#define FGP_CREAT 0x00000004
238#define FGP_WRITE 0x00000008
239#define FGP_NOFS 0x00000010
240#define FGP_NOWAIT 0x00000020
241
242struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
243 int fgp_flags, gfp_t cache_gfp_mask);
244
245
246
247
248
249
250
251
252
253
254
255static inline struct page *find_get_page(struct address_space *mapping,
256 pgoff_t offset)
257{
258 return pagecache_get_page(mapping, offset, 0, 0);
259}
260
261static inline struct page *find_get_page_flags(struct address_space *mapping,
262 pgoff_t offset, int fgp_flags)
263{
264 return pagecache_get_page(mapping, offset, fgp_flags, 0);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281static inline struct page *find_lock_page(struct address_space *mapping,
282 pgoff_t offset)
283{
284 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306static inline struct page *find_or_create_page(struct address_space *mapping,
307 pgoff_t offset, gfp_t gfp_mask)
308{
309 return pagecache_get_page(mapping, offset,
310 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
311 gfp_mask);
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
328 pgoff_t index)
329{
330 return pagecache_get_page(mapping, index,
331 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
332 mapping_gfp_mask(mapping));
333}
334
335struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
336struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
337unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
338 unsigned int nr_entries, struct page **entries,
339 pgoff_t *indices);
340unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
341 unsigned int nr_pages, struct page **pages);
342unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
343 unsigned int nr_pages, struct page **pages);
344unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
345 int tag, unsigned int nr_pages, struct page **pages);
346unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
347 int tag, unsigned int nr_entries,
348 struct page **entries, pgoff_t *indices);
349
350struct page *grab_cache_page_write_begin(struct address_space *mapping,
351 pgoff_t index, unsigned flags);
352
353
354
355
356static inline struct page *grab_cache_page(struct address_space *mapping,
357 pgoff_t index)
358{
359 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
360}
361
362extern struct page * read_cache_page(struct address_space *mapping,
363 pgoff_t index, filler_t *filler, void *data);
364extern struct page * read_cache_page_gfp(struct address_space *mapping,
365 pgoff_t index, gfp_t gfp_mask);
366extern int read_cache_pages(struct address_space *mapping,
367 struct list_head *pages, filler_t *filler, void *data);
368
369static inline struct page *read_mapping_page(struct address_space *mapping,
370 pgoff_t index, void *data)
371{
372 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
373 return read_cache_page(mapping, index, filler, data);
374}
375
376
377
378
379
380static inline pgoff_t page_to_index(struct page *page)
381{
382 pgoff_t pgoff;
383
384 if (likely(!PageTransTail(page)))
385 return page->index;
386
387
388
389
390
391 pgoff = compound_head(page)->index;
392 pgoff += page - compound_head(page);
393 return pgoff;
394}
395
396
397
398
399
400static inline pgoff_t page_to_pgoff(struct page *page)
401{
402 if (unlikely(PageHeadHuge(page)))
403 return page->index << compound_order(page);
404
405 return page_to_index(page);
406}
407
408
409
410
411static inline loff_t page_offset(struct page *page)
412{
413 return ((loff_t)page->index) << PAGE_SHIFT;
414}
415
416static inline loff_t page_file_offset(struct page *page)
417{
418 return ((loff_t)page_index(page)) << PAGE_SHIFT;
419}
420
421extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
422 unsigned long address);
423
424static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
425 unsigned long address)
426{
427 pgoff_t pgoff;
428 if (unlikely(is_vm_hugetlb_page(vma)))
429 return linear_hugepage_index(vma, address);
430 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
431 pgoff += vma->vm_pgoff;
432 return pgoff;
433}
434
435extern void __lock_page(struct page *page);
436extern int __lock_page_killable(struct page *page);
437extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
438 unsigned int flags);
439extern void unlock_page(struct page *page);
440
441static inline int trylock_page(struct page *page)
442{
443 page = compound_head(page);
444 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
445}
446
447
448
449
450static inline void lock_page(struct page *page)
451{
452 might_sleep();
453 if (!trylock_page(page))
454 __lock_page(page);
455}
456
457
458
459
460
461
462static inline int lock_page_killable(struct page *page)
463{
464 might_sleep();
465 if (!trylock_page(page))
466 return __lock_page_killable(page);
467 return 0;
468}
469
470
471
472
473
474
475
476
477static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
478 unsigned int flags)
479{
480 might_sleep();
481 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
482}
483
484
485
486
487
488extern void wait_on_page_bit(struct page *page, int bit_nr);
489
490extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
491extern int wait_on_page_bit_killable_timeout(struct page *page,
492 int bit_nr, unsigned long timeout);
493
494static inline int wait_on_page_locked_killable(struct page *page)
495{
496 if (!PageLocked(page))
497 return 0;
498 return wait_on_page_bit_killable(compound_head(page), PG_locked);
499}
500
501extern wait_queue_head_t *page_waitqueue(struct page *page);
502static inline void wake_up_page(struct page *page, int bit)
503{
504 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
505}
506
507
508
509
510
511
512
513
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
517 wait_on_page_bit(compound_head(page), PG_locked);
518}
519
520
521
522
523static inline void wait_on_page_writeback(struct page *page)
524{
525 if (PageWriteback(page))
526 wait_on_page_bit(page, PG_writeback);
527}
528
529extern void end_page_writeback(struct page *page);
530void wait_for_stable_page(struct page *page);
531
532void page_endio(struct page *page, bool is_write, int err);
533
534
535
536
537extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
538
539
540
541
542static inline int fault_in_pages_writeable(char __user *uaddr, int size)
543{
544 char __user *end = uaddr + size - 1;
545
546 if (unlikely(size == 0))
547 return 0;
548
549 if (unlikely(uaddr > end))
550 return -EFAULT;
551
552
553
554
555 do {
556 if (unlikely(__put_user(0, uaddr) != 0))
557 return -EFAULT;
558 uaddr += PAGE_SIZE;
559 } while (uaddr <= end);
560
561
562 if (((unsigned long)uaddr & PAGE_MASK) ==
563 ((unsigned long)end & PAGE_MASK))
564 return __put_user(0, end);
565
566 return 0;
567}
568
569static inline int fault_in_pages_readable(const char __user *uaddr, int size)
570{
571 volatile char c;
572 const char __user *end = uaddr + size - 1;
573
574 if (unlikely(size == 0))
575 return 0;
576
577 if (unlikely(uaddr > end))
578 return -EFAULT;
579
580 do {
581 if (unlikely(__get_user(c, uaddr) != 0))
582 return -EFAULT;
583 uaddr += PAGE_SIZE;
584 } while (uaddr <= end);
585
586
587 if (((unsigned long)uaddr & PAGE_MASK) ==
588 ((unsigned long)end & PAGE_MASK)) {
589 return __get_user(c, end);
590 }
591
592 (void)c;
593 return 0;
594}
595
596int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
597 pgoff_t index, gfp_t gfp_mask);
598int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
599 pgoff_t index, gfp_t gfp_mask);
600extern void delete_from_page_cache(struct page *page);
601extern void __delete_from_page_cache(struct page *page, void *shadow);
602int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
603
604
605
606
607
608static inline int add_to_page_cache(struct page *page,
609 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
610{
611 int error;
612
613 __SetPageLocked(page);
614 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
615 if (unlikely(error))
616 __ClearPageLocked(page);
617 return error;
618}
619
620static inline unsigned long dir_pages(struct inode *inode)
621{
622 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
623 PAGE_SHIFT;
624}
625
626#endif
627