1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21
22
23
24enum mapping_flags {
25 AS_EIO = 0,
26 AS_ENOSPC = 1,
27 AS_MM_ALL_LOCKS = 2,
28 AS_UNEVICTABLE = 3,
29 AS_EXITING = 4,
30
31 AS_NO_WRITEBACK_TAGS = 5,
32};
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
50 if (likely(!error))
51 return;
52
53
54 filemap_set_wb_err(mapping, error);
55
56
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
61}
62
63static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
68static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
73static inline int mapping_unevictable(struct address_space *mapping)
74{
75 if (mapping)
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
78}
79
80static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101{
102 return mapping->gfp_mask;
103}
104
105
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
112
113
114
115
116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117{
118 m->gfp_mask = mask;
119}
120
121void release_pages(struct page **pages, int nr);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static inline int __page_cache_add_speculative(struct page *page, int count)
168{
169#ifdef CONFIG_TINY_RCU
170# ifdef CONFIG_PREEMPT_COUNT
171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172# endif
173
174
175
176
177
178
179
180
181
182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_add(page, count);
184
185#else
186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
187
188
189
190
191
192 return 0;
193 }
194#endif
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197 return 1;
198}
199
200static inline int page_cache_get_speculative(struct page *page)
201{
202 return __page_cache_add_speculative(page, 1);
203}
204
205static inline int page_cache_add_speculative(struct page *page, int count)
206{
207 return __page_cache_add_speculative(page, count);
208}
209
210#ifdef CONFIG_NUMA
211extern struct page *__page_cache_alloc(gfp_t gfp);
212#else
213static inline struct page *__page_cache_alloc(gfp_t gfp)
214{
215 return alloc_pages(gfp, 0);
216}
217#endif
218
219static inline struct page *page_cache_alloc(struct address_space *x)
220{
221 return __page_cache_alloc(mapping_gfp_mask(x));
222}
223
224static inline gfp_t readahead_gfp_mask(struct address_space *x)
225{
226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
227}
228
229typedef int filler_t(void *, struct page *);
230
231pgoff_t page_cache_next_miss(struct address_space *mapping,
232 pgoff_t index, unsigned long max_scan);
233pgoff_t page_cache_prev_miss(struct address_space *mapping,
234 pgoff_t index, unsigned long max_scan);
235
236#define FGP_ACCESSED 0x00000001
237#define FGP_LOCK 0x00000002
238#define FGP_CREAT 0x00000004
239#define FGP_WRITE 0x00000008
240#define FGP_NOFS 0x00000010
241#define FGP_NOWAIT 0x00000020
242#define FGP_FOR_MMAP 0x00000040
243
244struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
245 int fgp_flags, gfp_t cache_gfp_mask);
246
247
248
249
250
251
252
253
254
255
256
257static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259{
260 return pagecache_get_page(mapping, offset, 0, 0);
261}
262
263static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265{
266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309{
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
312 gfp_mask);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330{
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
333 mapping_gfp_mask(mapping));
334}
335
336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
339 unsigned int nr_entries, struct page **entries,
340 pgoff_t *indices);
341unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
342 pgoff_t end, unsigned int nr_pages,
343 struct page **pages);
344static inline unsigned find_get_pages(struct address_space *mapping,
345 pgoff_t *start, unsigned int nr_pages,
346 struct page **pages)
347{
348 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
349 pages);
350}
351unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
353unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
354 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
355 struct page **pages);
356static inline unsigned find_get_pages_tag(struct address_space *mapping,
357 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
358 struct page **pages)
359{
360 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
361 nr_pages, pages);
362}
363unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
364 xa_mark_t tag, unsigned int nr_entries,
365 struct page **entries, pgoff_t *indices);
366
367struct page *grab_cache_page_write_begin(struct address_space *mapping,
368 pgoff_t index, unsigned flags);
369
370
371
372
373static inline struct page *grab_cache_page(struct address_space *mapping,
374 pgoff_t index)
375{
376 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
377}
378
379extern struct page * read_cache_page(struct address_space *mapping,
380 pgoff_t index, filler_t *filler, void *data);
381extern struct page * read_cache_page_gfp(struct address_space *mapping,
382 pgoff_t index, gfp_t gfp_mask);
383extern int read_cache_pages(struct address_space *mapping,
384 struct list_head *pages, filler_t *filler, void *data);
385
386static inline struct page *read_mapping_page(struct address_space *mapping,
387 pgoff_t index, void *data)
388{
389 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
390 return read_cache_page(mapping, index, filler, data);
391}
392
393
394
395
396
397static inline pgoff_t page_to_index(struct page *page)
398{
399 pgoff_t pgoff;
400
401 if (likely(!PageTransTail(page)))
402 return page->index;
403
404
405
406
407
408 pgoff = compound_head(page)->index;
409 pgoff += page - compound_head(page);
410 return pgoff;
411}
412
413
414
415
416
417static inline pgoff_t page_to_pgoff(struct page *page)
418{
419 if (unlikely(PageHeadHuge(page)))
420 return page->index << compound_order(page);
421
422 return page_to_index(page);
423}
424
425
426
427
428static inline loff_t page_offset(struct page *page)
429{
430 return ((loff_t)page->index) << PAGE_SHIFT;
431}
432
433static inline loff_t page_file_offset(struct page *page)
434{
435 return ((loff_t)page_index(page)) << PAGE_SHIFT;
436}
437
438extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
439 unsigned long address);
440
441static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
442 unsigned long address)
443{
444 pgoff_t pgoff;
445 if (unlikely(is_vm_hugetlb_page(vma)))
446 return linear_hugepage_index(vma, address);
447 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
448 pgoff += vma->vm_pgoff;
449 return pgoff;
450}
451
452extern void __lock_page(struct page *page);
453extern int __lock_page_killable(struct page *page);
454extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
455 unsigned int flags);
456extern void unlock_page(struct page *page);
457
458static inline int trylock_page(struct page *page)
459{
460 page = compound_head(page);
461 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
462}
463
464
465
466
467static inline void lock_page(struct page *page)
468{
469 might_sleep();
470 if (!trylock_page(page))
471 __lock_page(page);
472}
473
474
475
476
477
478
479static inline int lock_page_killable(struct page *page)
480{
481 might_sleep();
482 if (!trylock_page(page))
483 return __lock_page_killable(page);
484 return 0;
485}
486
487
488
489
490
491
492
493
494static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
495 unsigned int flags)
496{
497 might_sleep();
498 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
499}
500
501
502
503
504
505extern void wait_on_page_bit(struct page *page, int bit_nr);
506extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
507
508
509
510
511
512
513
514
515static inline void wait_on_page_locked(struct page *page)
516{
517 if (PageLocked(page))
518 wait_on_page_bit(compound_head(page), PG_locked);
519}
520
521static inline int wait_on_page_locked_killable(struct page *page)
522{
523 if (!PageLocked(page))
524 return 0;
525 return wait_on_page_bit_killable(compound_head(page), PG_locked);
526}
527
528extern void put_and_wait_on_page_locked(struct page *page);
529
530
531
532
533static inline void wait_on_page_writeback(struct page *page)
534{
535 if (PageWriteback(page))
536 wait_on_page_bit(page, PG_writeback);
537}
538
539extern void end_page_writeback(struct page *page);
540void wait_for_stable_page(struct page *page);
541
542void page_endio(struct page *page, bool is_write, int err);
543
544
545
546
547extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
548
549
550
551
552static inline int fault_in_pages_writeable(char __user *uaddr, int size)
553{
554 char __user *end = uaddr + size - 1;
555
556 if (unlikely(size == 0))
557 return 0;
558
559 if (unlikely(uaddr > end))
560 return -EFAULT;
561
562
563
564
565 do {
566 if (unlikely(__put_user(0, uaddr) != 0))
567 return -EFAULT;
568 uaddr += PAGE_SIZE;
569 } while (uaddr <= end);
570
571
572 if (((unsigned long)uaddr & PAGE_MASK) ==
573 ((unsigned long)end & PAGE_MASK))
574 return __put_user(0, end);
575
576 return 0;
577}
578
579static inline int fault_in_pages_readable(const char __user *uaddr, int size)
580{
581 volatile char c;
582 const char __user *end = uaddr + size - 1;
583
584 if (unlikely(size == 0))
585 return 0;
586
587 if (unlikely(uaddr > end))
588 return -EFAULT;
589
590 do {
591 if (unlikely(__get_user(c, uaddr) != 0))
592 return -EFAULT;
593 uaddr += PAGE_SIZE;
594 } while (uaddr <= end);
595
596
597 if (((unsigned long)uaddr & PAGE_MASK) ==
598 ((unsigned long)end & PAGE_MASK)) {
599 return __get_user(c, end);
600 }
601
602 (void)c;
603 return 0;
604}
605
606int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
607 pgoff_t index, gfp_t gfp_mask);
608int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
609 pgoff_t index, gfp_t gfp_mask);
610extern void delete_from_page_cache(struct page *page);
611extern void __delete_from_page_cache(struct page *page, void *shadow);
612int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
613void delete_from_page_cache_batch(struct address_space *mapping,
614 struct pagevec *pvec);
615
616
617
618
619
620static inline int add_to_page_cache(struct page *page,
621 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
622{
623 int error;
624
625 __SetPageLocked(page);
626 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
627 if (unlikely(error))
628 __ClearPageLocked(page);
629 return error;
630}
631
632static inline unsigned long dir_pages(struct inode *inode)
633{
634 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
635 PAGE_SHIFT;
636}
637
638#endif
639