1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <linux/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21enum mapping_flags {
22 AS_EIO = 0,
23 AS_ENOSPC = 1,
24 AS_MM_ALL_LOCKS = 2,
25 AS_UNEVICTABLE = 3,
26 AS_EXITING = 4,
27
28 AS_NO_WRITEBACK_TAGS = 5,
29};
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static inline void mapping_set_error(struct address_space *mapping, int error)
46{
47 if (likely(!error))
48 return;
49
50
51 filemap_set_wb_err(mapping, error);
52
53
54 if (error == -ENOSPC)
55 set_bit(AS_ENOSPC, &mapping->flags);
56 else
57 set_bit(AS_EIO, &mapping->flags);
58}
59
60static inline void mapping_set_unevictable(struct address_space *mapping)
61{
62 set_bit(AS_UNEVICTABLE, &mapping->flags);
63}
64
65static inline void mapping_clear_unevictable(struct address_space *mapping)
66{
67 clear_bit(AS_UNEVICTABLE, &mapping->flags);
68}
69
70static inline int mapping_unevictable(struct address_space *mapping)
71{
72 if (mapping)
73 return test_bit(AS_UNEVICTABLE, &mapping->flags);
74 return !!mapping;
75}
76
77static inline void mapping_set_exiting(struct address_space *mapping)
78{
79 set_bit(AS_EXITING, &mapping->flags);
80}
81
82static inline int mapping_exiting(struct address_space *mapping)
83{
84 return test_bit(AS_EXITING, &mapping->flags);
85}
86
87static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
88{
89 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
90}
91
92static inline int mapping_use_writeback_tags(struct address_space *mapping)
93{
94 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
95}
96
97static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
98{
99 return mapping->gfp_mask;
100}
101
102
103static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
104 gfp_t gfp_mask)
105{
106 return mapping_gfp_mask(mapping) & gfp_mask;
107}
108
109
110
111
112
113static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
114{
115 m->gfp_mask = mask;
116}
117
118void release_pages(struct page **pages, int nr, bool cold);
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164static inline int page_cache_get_speculative(struct page *page)
165{
166#ifdef CONFIG_TINY_RCU
167# ifdef CONFIG_PREEMPT_COUNT
168 VM_BUG_ON(!in_atomic() && !irqs_disabled());
169# endif
170
171
172
173
174
175
176
177
178
179 VM_BUG_ON_PAGE(page_count(page) == 0, page);
180 page_ref_inc(page);
181
182#else
183 if (unlikely(!get_page_unless_zero(page))) {
184
185
186
187
188
189 return 0;
190 }
191#endif
192 VM_BUG_ON_PAGE(PageTail(page), page);
193
194 return 1;
195}
196
197
198
199
200static inline int page_cache_add_speculative(struct page *page, int count)
201{
202 VM_BUG_ON(in_interrupt());
203
204#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
205# ifdef CONFIG_PREEMPT_COUNT
206 VM_BUG_ON(!in_atomic() && !irqs_disabled());
207# endif
208 VM_BUG_ON_PAGE(page_count(page) == 0, page);
209 page_ref_add(page, count);
210
211#else
212 if (unlikely(!page_ref_add_unless(page, count, 0)))
213 return 0;
214#endif
215 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
216
217 return 1;
218}
219
220#ifdef CONFIG_NUMA
221extern struct page *__page_cache_alloc(gfp_t gfp);
222#else
223static inline struct page *__page_cache_alloc(gfp_t gfp)
224{
225 return alloc_pages(gfp, 0);
226}
227#endif
228
229static inline struct page *page_cache_alloc(struct address_space *x)
230{
231 return __page_cache_alloc(mapping_gfp_mask(x));
232}
233
234static inline struct page *page_cache_alloc_cold(struct address_space *x)
235{
236 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
237}
238
239static inline gfp_t readahead_gfp_mask(struct address_space *x)
240{
241 return mapping_gfp_mask(x) |
242 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
243}
244
245typedef int filler_t(void *, struct page *);
246
247pgoff_t page_cache_next_hole(struct address_space *mapping,
248 pgoff_t index, unsigned long max_scan);
249pgoff_t page_cache_prev_hole(struct address_space *mapping,
250 pgoff_t index, unsigned long max_scan);
251
252#define FGP_ACCESSED 0x00000001
253#define FGP_LOCK 0x00000002
254#define FGP_CREAT 0x00000004
255#define FGP_WRITE 0x00000008
256#define FGP_NOFS 0x00000010
257#define FGP_NOWAIT 0x00000020
258
259struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
260 int fgp_flags, gfp_t cache_gfp_mask);
261
262
263
264
265
266
267
268
269
270
271
272static inline struct page *find_get_page(struct address_space *mapping,
273 pgoff_t offset)
274{
275 return pagecache_get_page(mapping, offset, 0, 0);
276}
277
278static inline struct page *find_get_page_flags(struct address_space *mapping,
279 pgoff_t offset, int fgp_flags)
280{
281 return pagecache_get_page(mapping, offset, fgp_flags, 0);
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297static inline struct page *find_lock_page(struct address_space *mapping,
298 pgoff_t offset)
299{
300 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
301}
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322static inline struct page *find_or_create_page(struct address_space *mapping,
323 pgoff_t offset, gfp_t gfp_mask)
324{
325 return pagecache_get_page(mapping, offset,
326 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
327 gfp_mask);
328}
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
344 pgoff_t index)
345{
346 return pagecache_get_page(mapping, index,
347 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
348 mapping_gfp_mask(mapping));
349}
350
351struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
352struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
353unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_entries, struct page **entries,
355 pgoff_t *indices);
356unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
357 unsigned int nr_pages, struct page **pages);
358unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
359 unsigned int nr_pages, struct page **pages);
360unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
361 int tag, unsigned int nr_pages, struct page **pages);
362unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
363 int tag, unsigned int nr_entries,
364 struct page **entries, pgoff_t *indices);
365
366struct page *grab_cache_page_write_begin(struct address_space *mapping,
367 pgoff_t index, unsigned flags);
368
369
370
371
372static inline struct page *grab_cache_page(struct address_space *mapping,
373 pgoff_t index)
374{
375 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
376}
377
378extern struct page * read_cache_page(struct address_space *mapping,
379 pgoff_t index, filler_t *filler, void *data);
380extern struct page * read_cache_page_gfp(struct address_space *mapping,
381 pgoff_t index, gfp_t gfp_mask);
382extern int read_cache_pages(struct address_space *mapping,
383 struct list_head *pages, filler_t *filler, void *data);
384
385static inline struct page *read_mapping_page(struct address_space *mapping,
386 pgoff_t index, void *data)
387{
388 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
389 return read_cache_page(mapping, index, filler, data);
390}
391
392
393
394
395
396static inline pgoff_t page_to_index(struct page *page)
397{
398 pgoff_t pgoff;
399
400 if (likely(!PageTransTail(page)))
401 return page->index;
402
403
404
405
406
407 pgoff = compound_head(page)->index;
408 pgoff += page - compound_head(page);
409 return pgoff;
410}
411
412
413
414
415
416static inline pgoff_t page_to_pgoff(struct page *page)
417{
418 if (unlikely(PageHeadHuge(page)))
419 return page->index << compound_order(page);
420
421 return page_to_index(page);
422}
423
424
425
426
427static inline loff_t page_offset(struct page *page)
428{
429 return ((loff_t)page->index) << PAGE_SHIFT;
430}
431
432static inline loff_t page_file_offset(struct page *page)
433{
434 return ((loff_t)page_index(page)) << PAGE_SHIFT;
435}
436
437extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
438 unsigned long address);
439
440static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
441 unsigned long address)
442{
443 pgoff_t pgoff;
444 if (unlikely(is_vm_hugetlb_page(vma)))
445 return linear_hugepage_index(vma, address);
446 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
447 pgoff += vma->vm_pgoff;
448 return pgoff;
449}
450
451extern void __lock_page(struct page *page);
452extern int __lock_page_killable(struct page *page);
453extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
454 unsigned int flags);
455extern void unlock_page(struct page *page);
456
457static inline int trylock_page(struct page *page)
458{
459 page = compound_head(page);
460 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
461}
462
463
464
465
466static inline void lock_page(struct page *page)
467{
468 might_sleep();
469 if (!trylock_page(page))
470 __lock_page(page);
471}
472
473
474
475
476
477
478static inline int lock_page_killable(struct page *page)
479{
480 might_sleep();
481 if (!trylock_page(page))
482 return __lock_page_killable(page);
483 return 0;
484}
485
486
487
488
489
490
491
492
493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
494 unsigned int flags)
495{
496 might_sleep();
497 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
498}
499
500
501
502
503
504extern void wait_on_page_bit(struct page *page, int bit_nr);
505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
506
507
508
509
510
511
512
513
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
517 wait_on_page_bit(compound_head(page), PG_locked);
518}
519
520static inline int wait_on_page_locked_killable(struct page *page)
521{
522 if (!PageLocked(page))
523 return 0;
524 return wait_on_page_bit_killable(compound_head(page), PG_locked);
525}
526
527
528
529
530static inline void wait_on_page_writeback(struct page *page)
531{
532 if (PageWriteback(page))
533 wait_on_page_bit(page, PG_writeback);
534}
535
536extern void end_page_writeback(struct page *page);
537void wait_for_stable_page(struct page *page);
538
539void page_endio(struct page *page, bool is_write, int err);
540
541
542
543
544extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
545
546
547
548
549static inline int fault_in_pages_writeable(char __user *uaddr, int size)
550{
551 char __user *end = uaddr + size - 1;
552
553 if (unlikely(size == 0))
554 return 0;
555
556 if (unlikely(uaddr > end))
557 return -EFAULT;
558
559
560
561
562 do {
563 if (unlikely(__put_user(0, uaddr) != 0))
564 return -EFAULT;
565 uaddr += PAGE_SIZE;
566 } while (uaddr <= end);
567
568
569 if (((unsigned long)uaddr & PAGE_MASK) ==
570 ((unsigned long)end & PAGE_MASK))
571 return __put_user(0, end);
572
573 return 0;
574}
575
576static inline int fault_in_pages_readable(const char __user *uaddr, int size)
577{
578 volatile char c;
579 const char __user *end = uaddr + size - 1;
580
581 if (unlikely(size == 0))
582 return 0;
583
584 if (unlikely(uaddr > end))
585 return -EFAULT;
586
587 do {
588 if (unlikely(__get_user(c, uaddr) != 0))
589 return -EFAULT;
590 uaddr += PAGE_SIZE;
591 } while (uaddr <= end);
592
593
594 if (((unsigned long)uaddr & PAGE_MASK) ==
595 ((unsigned long)end & PAGE_MASK)) {
596 return __get_user(c, end);
597 }
598
599 (void)c;
600 return 0;
601}
602
603int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
604 pgoff_t index, gfp_t gfp_mask);
605int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
606 pgoff_t index, gfp_t gfp_mask);
607extern void delete_from_page_cache(struct page *page);
608extern void __delete_from_page_cache(struct page *page, void *shadow);
609int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
610
611
612
613
614
615static inline int add_to_page_cache(struct page *page,
616 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
617{
618 int error;
619
620 __SetPageLocked(page);
621 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
622 if (unlikely(error))
623 __ClearPageLocked(page);
624 return error;
625}
626
627static inline unsigned long dir_pages(struct inode *inode)
628{
629 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
630 PAGE_SHIFT;
631}
632
633#endif
634