1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <linux/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21enum mapping_flags {
22 AS_EIO = 0,
23 AS_ENOSPC = 1,
24 AS_MM_ALL_LOCKS = 2,
25 AS_UNEVICTABLE = 3,
26 AS_EXITING = 4,
27
28 AS_NO_WRITEBACK_TAGS = 5,
29};
30
31static inline void mapping_set_error(struct address_space *mapping, int error)
32{
33 if (unlikely(error)) {
34 if (error == -ENOSPC)
35 set_bit(AS_ENOSPC, &mapping->flags);
36 else
37 set_bit(AS_EIO, &mapping->flags);
38 }
39}
40
41static inline void mapping_set_unevictable(struct address_space *mapping)
42{
43 set_bit(AS_UNEVICTABLE, &mapping->flags);
44}
45
46static inline void mapping_clear_unevictable(struct address_space *mapping)
47{
48 clear_bit(AS_UNEVICTABLE, &mapping->flags);
49}
50
51static inline int mapping_unevictable(struct address_space *mapping)
52{
53 if (mapping)
54 return test_bit(AS_UNEVICTABLE, &mapping->flags);
55 return !!mapping;
56}
57
58static inline void mapping_set_exiting(struct address_space *mapping)
59{
60 set_bit(AS_EXITING, &mapping->flags);
61}
62
63static inline int mapping_exiting(struct address_space *mapping)
64{
65 return test_bit(AS_EXITING, &mapping->flags);
66}
67
68static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
69{
70 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
71}
72
73static inline int mapping_use_writeback_tags(struct address_space *mapping)
74{
75 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
76}
77
78static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
79{
80 return mapping->gfp_mask;
81}
82
83
84static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
85 gfp_t gfp_mask)
86{
87 return mapping_gfp_mask(mapping) & gfp_mask;
88}
89
90
91
92
93
94static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
95{
96 m->gfp_mask = mask;
97}
98
99void release_pages(struct page **pages, int nr, bool cold);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145static inline int page_cache_get_speculative(struct page *page)
146{
147 VM_BUG_ON(in_interrupt());
148
149#ifdef CONFIG_TINY_RCU
150# ifdef CONFIG_PREEMPT_COUNT
151 VM_BUG_ON(!in_atomic() && !irqs_disabled());
152# endif
153
154
155
156
157
158
159
160
161
162 VM_BUG_ON_PAGE(page_count(page) == 0, page);
163 page_ref_inc(page);
164
165#else
166 if (unlikely(!get_page_unless_zero(page))) {
167
168
169
170
171
172 return 0;
173 }
174#endif
175 VM_BUG_ON_PAGE(PageTail(page), page);
176
177 return 1;
178}
179
180
181
182
183static inline int page_cache_add_speculative(struct page *page, int count)
184{
185 VM_BUG_ON(in_interrupt());
186
187#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
188# ifdef CONFIG_PREEMPT_COUNT
189 VM_BUG_ON(!in_atomic() && !irqs_disabled());
190# endif
191 VM_BUG_ON_PAGE(page_count(page) == 0, page);
192 page_ref_add(page, count);
193
194#else
195 if (unlikely(!page_ref_add_unless(page, count, 0)))
196 return 0;
197#endif
198 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
199
200 return 1;
201}
202
203#ifdef CONFIG_NUMA
204extern struct page *__page_cache_alloc(gfp_t gfp);
205#else
206static inline struct page *__page_cache_alloc(gfp_t gfp)
207{
208 return alloc_pages(gfp, 0);
209}
210#endif
211
212static inline struct page *page_cache_alloc(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x));
215}
216
217static inline struct page *page_cache_alloc_cold(struct address_space *x)
218{
219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220}
221
222static inline gfp_t readahead_gfp_mask(struct address_space *x)
223{
224 return mapping_gfp_mask(x) |
225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
226}
227
228typedef int filler_t(void *, struct page *);
229
230pgoff_t page_cache_next_hole(struct address_space *mapping,
231 pgoff_t index, unsigned long max_scan);
232pgoff_t page_cache_prev_hole(struct address_space *mapping,
233 pgoff_t index, unsigned long max_scan);
234
235#define FGP_ACCESSED 0x00000001
236#define FGP_LOCK 0x00000002
237#define FGP_CREAT 0x00000004
238#define FGP_WRITE 0x00000008
239#define FGP_NOFS 0x00000010
240#define FGP_NOWAIT 0x00000020
241
242struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
243 int fgp_flags, gfp_t cache_gfp_mask);
244
245
246
247
248
249
250
251
252
253
254
255static inline struct page *find_get_page(struct address_space *mapping,
256 pgoff_t offset)
257{
258 return pagecache_get_page(mapping, offset, 0, 0);
259}
260
261static inline struct page *find_get_page_flags(struct address_space *mapping,
262 pgoff_t offset, int fgp_flags)
263{
264 return pagecache_get_page(mapping, offset, fgp_flags, 0);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280static inline struct page *find_lock_page(struct address_space *mapping,
281 pgoff_t offset)
282{
283 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305static inline struct page *find_or_create_page(struct address_space *mapping,
306 pgoff_t offset, gfp_t gfp_mask)
307{
308 return pagecache_get_page(mapping, offset,
309 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
310 gfp_mask);
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
327 pgoff_t index)
328{
329 return pagecache_get_page(mapping, index,
330 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
331 mapping_gfp_mask(mapping));
332}
333
334struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
335struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
336unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
337 unsigned int nr_entries, struct page **entries,
338 pgoff_t *indices);
339unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
340 unsigned int nr_pages, struct page **pages);
341unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
342 unsigned int nr_pages, struct page **pages);
343unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
344 int tag, unsigned int nr_pages, struct page **pages);
345unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
346 int tag, unsigned int nr_entries,
347 struct page **entries, pgoff_t *indices);
348
349struct page *grab_cache_page_write_begin(struct address_space *mapping,
350 pgoff_t index, unsigned flags);
351
352
353
354
355static inline struct page *grab_cache_page(struct address_space *mapping,
356 pgoff_t index)
357{
358 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
359}
360
361extern struct page * read_cache_page(struct address_space *mapping,
362 pgoff_t index, filler_t *filler, void *data);
363extern struct page * read_cache_page_gfp(struct address_space *mapping,
364 pgoff_t index, gfp_t gfp_mask);
365extern int read_cache_pages(struct address_space *mapping,
366 struct list_head *pages, filler_t *filler, void *data);
367
368static inline struct page *read_mapping_page(struct address_space *mapping,
369 pgoff_t index, void *data)
370{
371 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
372 return read_cache_page(mapping, index, filler, data);
373}
374
375
376
377
378
379static inline pgoff_t page_to_index(struct page *page)
380{
381 pgoff_t pgoff;
382
383 if (likely(!PageTransTail(page)))
384 return page->index;
385
386
387
388
389
390 pgoff = compound_head(page)->index;
391 pgoff += page - compound_head(page);
392 return pgoff;
393}
394
395
396
397
398
399static inline pgoff_t page_to_pgoff(struct page *page)
400{
401 if (unlikely(PageHeadHuge(page)))
402 return page->index << compound_order(page);
403
404 return page_to_index(page);
405}
406
407
408
409
410static inline loff_t page_offset(struct page *page)
411{
412 return ((loff_t)page->index) << PAGE_SHIFT;
413}
414
415static inline loff_t page_file_offset(struct page *page)
416{
417 return ((loff_t)page_index(page)) << PAGE_SHIFT;
418}
419
420extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
421 unsigned long address);
422
423static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
424 unsigned long address)
425{
426 pgoff_t pgoff;
427 if (unlikely(is_vm_hugetlb_page(vma)))
428 return linear_hugepage_index(vma, address);
429 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
430 pgoff += vma->vm_pgoff;
431 return pgoff;
432}
433
434extern void __lock_page(struct page *page);
435extern int __lock_page_killable(struct page *page);
436extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
437 unsigned int flags);
438extern void unlock_page(struct page *page);
439
440static inline int trylock_page(struct page *page)
441{
442 page = compound_head(page);
443 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
444}
445
446
447
448
449static inline void lock_page(struct page *page)
450{
451 might_sleep();
452 if (!trylock_page(page))
453 __lock_page(page);
454}
455
456
457
458
459
460
461static inline int lock_page_killable(struct page *page)
462{
463 might_sleep();
464 if (!trylock_page(page))
465 return __lock_page_killable(page);
466 return 0;
467}
468
469
470
471
472
473
474
475
476static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
477 unsigned int flags)
478{
479 might_sleep();
480 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
481}
482
483
484
485
486
487extern void wait_on_page_bit(struct page *page, int bit_nr);
488extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
489
490
491
492
493
494
495
496
497static inline void wait_on_page_locked(struct page *page)
498{
499 if (PageLocked(page))
500 wait_on_page_bit(compound_head(page), PG_locked);
501}
502
503static inline int wait_on_page_locked_killable(struct page *page)
504{
505 if (!PageLocked(page))
506 return 0;
507 return wait_on_page_bit_killable(compound_head(page), PG_locked);
508}
509
510
511
512
513static inline void wait_on_page_writeback(struct page *page)
514{
515 if (PageWriteback(page))
516 wait_on_page_bit(page, PG_writeback);
517}
518
519extern void end_page_writeback(struct page *page);
520void wait_for_stable_page(struct page *page);
521
522void page_endio(struct page *page, bool is_write, int err);
523
524
525
526
527extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
528
529
530
531
532static inline int fault_in_pages_writeable(char __user *uaddr, int size)
533{
534 char __user *end = uaddr + size - 1;
535
536 if (unlikely(size == 0))
537 return 0;
538
539 if (unlikely(uaddr > end))
540 return -EFAULT;
541
542
543
544
545 do {
546 if (unlikely(__put_user(0, uaddr) != 0))
547 return -EFAULT;
548 uaddr += PAGE_SIZE;
549 } while (uaddr <= end);
550
551
552 if (((unsigned long)uaddr & PAGE_MASK) ==
553 ((unsigned long)end & PAGE_MASK))
554 return __put_user(0, end);
555
556 return 0;
557}
558
559static inline int fault_in_pages_readable(const char __user *uaddr, int size)
560{
561 volatile char c;
562 const char __user *end = uaddr + size - 1;
563
564 if (unlikely(size == 0))
565 return 0;
566
567 if (unlikely(uaddr > end))
568 return -EFAULT;
569
570 do {
571 if (unlikely(__get_user(c, uaddr) != 0))
572 return -EFAULT;
573 uaddr += PAGE_SIZE;
574 } while (uaddr <= end);
575
576
577 if (((unsigned long)uaddr & PAGE_MASK) ==
578 ((unsigned long)end & PAGE_MASK)) {
579 return __get_user(c, end);
580 }
581
582 (void)c;
583 return 0;
584}
585
586int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
587 pgoff_t index, gfp_t gfp_mask);
588int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
589 pgoff_t index, gfp_t gfp_mask);
590extern void delete_from_page_cache(struct page *page);
591extern void __delete_from_page_cache(struct page *page, void *shadow);
592int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
593
594
595
596
597
598static inline int add_to_page_cache(struct page *page,
599 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
600{
601 int error;
602
603 __SetPageLocked(page);
604 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
605 if (unlikely(error))
606 __ClearPageLocked(page);
607 return error;
608}
609
610static inline unsigned long dir_pages(struct inode *inode)
611{
612 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
613 PAGE_SHIFT;
614}
615
616#endif
617