1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19
20
21
22enum mapping_flags {
23 AS_EIO = 0,
24 AS_ENOSPC = 1,
25 AS_MM_ALL_LOCKS = 2,
26 AS_UNEVICTABLE = 3,
27 AS_EXITING = 4,
28
29 AS_NO_WRITEBACK_TAGS = 5,
30};
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46static inline void mapping_set_error(struct address_space *mapping, int error)
47{
48 if (likely(!error))
49 return;
50
51
52 filemap_set_wb_err(mapping, error);
53
54
55 if (error == -ENOSPC)
56 set_bit(AS_ENOSPC, &mapping->flags);
57 else
58 set_bit(AS_EIO, &mapping->flags);
59}
60
61static inline void mapping_set_unevictable(struct address_space *mapping)
62{
63 set_bit(AS_UNEVICTABLE, &mapping->flags);
64}
65
66static inline void mapping_clear_unevictable(struct address_space *mapping)
67{
68 clear_bit(AS_UNEVICTABLE, &mapping->flags);
69}
70
71static inline int mapping_unevictable(struct address_space *mapping)
72{
73 if (mapping)
74 return test_bit(AS_UNEVICTABLE, &mapping->flags);
75 return !!mapping;
76}
77
78static inline void mapping_set_exiting(struct address_space *mapping)
79{
80 set_bit(AS_EXITING, &mapping->flags);
81}
82
83static inline int mapping_exiting(struct address_space *mapping)
84{
85 return test_bit(AS_EXITING, &mapping->flags);
86}
87
88static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
89{
90 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
91}
92
93static inline int mapping_use_writeback_tags(struct address_space *mapping)
94{
95 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
96}
97
98static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
99{
100 return mapping->gfp_mask;
101}
102
103
104static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
105 gfp_t gfp_mask)
106{
107 return mapping_gfp_mask(mapping) & gfp_mask;
108}
109
110
111
112
113
114static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
115{
116 m->gfp_mask = mask;
117}
118
119void release_pages(struct page **pages, int nr, bool cold);
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165static inline int page_cache_get_speculative(struct page *page)
166{
167#ifdef CONFIG_TINY_RCU
168# ifdef CONFIG_PREEMPT_COUNT
169 VM_BUG_ON(!in_atomic() && !irqs_disabled());
170# endif
171
172
173
174
175
176
177
178
179
180 VM_BUG_ON_PAGE(page_count(page) == 0, page);
181 page_ref_inc(page);
182
183#else
184 if (unlikely(!get_page_unless_zero(page))) {
185
186
187
188
189
190 return 0;
191 }
192#endif
193 VM_BUG_ON_PAGE(PageTail(page), page);
194
195 return 1;
196}
197
198
199
200
201static inline int page_cache_add_speculative(struct page *page, int count)
202{
203 VM_BUG_ON(in_interrupt());
204
205#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
206# ifdef CONFIG_PREEMPT_COUNT
207 VM_BUG_ON(!in_atomic() && !irqs_disabled());
208# endif
209 VM_BUG_ON_PAGE(page_count(page) == 0, page);
210 page_ref_add(page, count);
211
212#else
213 if (unlikely(!page_ref_add_unless(page, count, 0)))
214 return 0;
215#endif
216 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
217
218 return 1;
219}
220
221#ifdef CONFIG_NUMA
222extern struct page *__page_cache_alloc(gfp_t gfp);
223#else
224static inline struct page *__page_cache_alloc(gfp_t gfp)
225{
226 return alloc_pages(gfp, 0);
227}
228#endif
229
230static inline struct page *page_cache_alloc(struct address_space *x)
231{
232 return __page_cache_alloc(mapping_gfp_mask(x));
233}
234
235static inline struct page *page_cache_alloc_cold(struct address_space *x)
236{
237 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
238}
239
240static inline gfp_t readahead_gfp_mask(struct address_space *x)
241{
242 return mapping_gfp_mask(x) |
243 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
244}
245
246typedef int filler_t(void *, struct page *);
247
248pgoff_t page_cache_next_hole(struct address_space *mapping,
249 pgoff_t index, unsigned long max_scan);
250pgoff_t page_cache_prev_hole(struct address_space *mapping,
251 pgoff_t index, unsigned long max_scan);
252
253#define FGP_ACCESSED 0x00000001
254#define FGP_LOCK 0x00000002
255#define FGP_CREAT 0x00000004
256#define FGP_WRITE 0x00000008
257#define FGP_NOFS 0x00000010
258#define FGP_NOWAIT 0x00000020
259
260struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
261 int fgp_flags, gfp_t cache_gfp_mask);
262
263
264
265
266
267
268
269
270
271
272
273static inline struct page *find_get_page(struct address_space *mapping,
274 pgoff_t offset)
275{
276 return pagecache_get_page(mapping, offset, 0, 0);
277}
278
279static inline struct page *find_get_page_flags(struct address_space *mapping,
280 pgoff_t offset, int fgp_flags)
281{
282 return pagecache_get_page(mapping, offset, fgp_flags, 0);
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298static inline struct page *find_lock_page(struct address_space *mapping,
299 pgoff_t offset)
300{
301 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static inline struct page *find_or_create_page(struct address_space *mapping,
324 pgoff_t offset, gfp_t gfp_mask)
325{
326 return pagecache_get_page(mapping, offset,
327 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
328 gfp_mask);
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
345 pgoff_t index)
346{
347 return pagecache_get_page(mapping, index,
348 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
349 mapping_gfp_mask(mapping));
350}
351
352struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
353struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
354unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
355 unsigned int nr_entries, struct page **entries,
356 pgoff_t *indices);
357unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
358 pgoff_t end, unsigned int nr_pages,
359 struct page **pages);
360static inline unsigned find_get_pages(struct address_space *mapping,
361 pgoff_t *start, unsigned int nr_pages,
362 struct page **pages)
363{
364 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
365 pages);
366}
367unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
368 unsigned int nr_pages, struct page **pages);
369unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
370 int tag, unsigned int nr_pages, struct page **pages);
371unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
372 int tag, unsigned int nr_entries,
373 struct page **entries, pgoff_t *indices);
374
375struct page *grab_cache_page_write_begin(struct address_space *mapping,
376 pgoff_t index, unsigned flags);
377
378
379
380
381static inline struct page *grab_cache_page(struct address_space *mapping,
382 pgoff_t index)
383{
384 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
385}
386
387extern struct page * read_cache_page(struct address_space *mapping,
388 pgoff_t index, filler_t *filler, void *data);
389extern struct page * read_cache_page_gfp(struct address_space *mapping,
390 pgoff_t index, gfp_t gfp_mask);
391extern int read_cache_pages(struct address_space *mapping,
392 struct list_head *pages, filler_t *filler, void *data);
393
394static inline struct page *read_mapping_page(struct address_space *mapping,
395 pgoff_t index, void *data)
396{
397 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
398 return read_cache_page(mapping, index, filler, data);
399}
400
401
402
403
404
405static inline pgoff_t page_to_index(struct page *page)
406{
407 pgoff_t pgoff;
408
409 if (likely(!PageTransTail(page)))
410 return page->index;
411
412
413
414
415
416 pgoff = compound_head(page)->index;
417 pgoff += page - compound_head(page);
418 return pgoff;
419}
420
421
422
423
424
425static inline pgoff_t page_to_pgoff(struct page *page)
426{
427 if (unlikely(PageHeadHuge(page)))
428 return page->index << compound_order(page);
429
430 return page_to_index(page);
431}
432
433
434
435
436static inline loff_t page_offset(struct page *page)
437{
438 return ((loff_t)page->index) << PAGE_SHIFT;
439}
440
441static inline loff_t page_file_offset(struct page *page)
442{
443 return ((loff_t)page_index(page)) << PAGE_SHIFT;
444}
445
446extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
447 unsigned long address);
448
449static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
450 unsigned long address)
451{
452 pgoff_t pgoff;
453 if (unlikely(is_vm_hugetlb_page(vma)))
454 return linear_hugepage_index(vma, address);
455 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
456 pgoff += vma->vm_pgoff;
457 return pgoff;
458}
459
460extern void __lock_page(struct page *page);
461extern int __lock_page_killable(struct page *page);
462extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
463 unsigned int flags);
464extern void unlock_page(struct page *page);
465
466static inline int trylock_page(struct page *page)
467{
468 page = compound_head(page);
469 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
470}
471
472
473
474
475static inline void lock_page(struct page *page)
476{
477 might_sleep();
478 if (!trylock_page(page))
479 __lock_page(page);
480}
481
482
483
484
485
486
487static inline int lock_page_killable(struct page *page)
488{
489 might_sleep();
490 if (!trylock_page(page))
491 return __lock_page_killable(page);
492 return 0;
493}
494
495
496
497
498
499
500
501
502static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
503 unsigned int flags)
504{
505 might_sleep();
506 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
507}
508
509
510
511
512
513extern void wait_on_page_bit(struct page *page, int bit_nr);
514extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
515
516
517
518
519
520
521
522
523static inline void wait_on_page_locked(struct page *page)
524{
525 if (PageLocked(page))
526 wait_on_page_bit(compound_head(page), PG_locked);
527}
528
529static inline int wait_on_page_locked_killable(struct page *page)
530{
531 if (!PageLocked(page))
532 return 0;
533 return wait_on_page_bit_killable(compound_head(page), PG_locked);
534}
535
536
537
538
539static inline void wait_on_page_writeback(struct page *page)
540{
541 if (PageWriteback(page))
542 wait_on_page_bit(page, PG_writeback);
543}
544
545extern void end_page_writeback(struct page *page);
546void wait_for_stable_page(struct page *page);
547
548void page_endio(struct page *page, bool is_write, int err);
549
550
551
552
553extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
554
555
556
557
558static inline int fault_in_pages_writeable(char __user *uaddr, int size)
559{
560 char __user *end = uaddr + size - 1;
561
562 if (unlikely(size == 0))
563 return 0;
564
565 if (unlikely(uaddr > end))
566 return -EFAULT;
567
568
569
570
571 do {
572 if (unlikely(__put_user(0, uaddr) != 0))
573 return -EFAULT;
574 uaddr += PAGE_SIZE;
575 } while (uaddr <= end);
576
577
578 if (((unsigned long)uaddr & PAGE_MASK) ==
579 ((unsigned long)end & PAGE_MASK))
580 return __put_user(0, end);
581
582 return 0;
583}
584
585static inline int fault_in_pages_readable(const char __user *uaddr, int size)
586{
587 volatile char c;
588 const char __user *end = uaddr + size - 1;
589
590 if (unlikely(size == 0))
591 return 0;
592
593 if (unlikely(uaddr > end))
594 return -EFAULT;
595
596 do {
597 if (unlikely(__get_user(c, uaddr) != 0))
598 return -EFAULT;
599 uaddr += PAGE_SIZE;
600 } while (uaddr <= end);
601
602
603 if (((unsigned long)uaddr & PAGE_MASK) ==
604 ((unsigned long)end & PAGE_MASK)) {
605 return __get_user(c, end);
606 }
607
608 (void)c;
609 return 0;
610}
611
612int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
613 pgoff_t index, gfp_t gfp_mask);
614int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
615 pgoff_t index, gfp_t gfp_mask);
616extern void delete_from_page_cache(struct page *page);
617extern void __delete_from_page_cache(struct page *page, void *shadow);
618int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
619
620
621
622
623
624static inline int add_to_page_cache(struct page *page,
625 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
626{
627 int error;
628
629 __SetPageLocked(page);
630 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
631 if (unlikely(error))
632 __ClearPageLocked(page);
633 return error;
634}
635
636static inline unsigned long dir_pages(struct inode *inode)
637{
638 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
639 PAGE_SHIFT;
640}
641
642#endif
643