1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21
22
23
24enum mapping_flags {
25 AS_EIO = 0,
26 AS_ENOSPC = 1,
27 AS_MM_ALL_LOCKS = 2,
28 AS_UNEVICTABLE = 3,
29 AS_EXITING = 4,
30
31 AS_NO_WRITEBACK_TAGS = 5,
32};
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
50 if (likely(!error))
51 return;
52
53
54 filemap_set_wb_err(mapping, error);
55
56
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
61}
62
63static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
68static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
73static inline int mapping_unevictable(struct address_space *mapping)
74{
75 if (mapping)
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
78}
79
80static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101{
102 return mapping->gfp_mask;
103}
104
105
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
112
113
114
115
116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117{
118 m->gfp_mask = mask;
119}
120
121void release_pages(struct page **pages, int nr);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static inline int __page_cache_add_speculative(struct page *page, int count)
168{
169#ifdef CONFIG_TINY_RCU
170# ifdef CONFIG_PREEMPT_COUNT
171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172# endif
173
174
175
176
177
178
179
180
181
182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_add(page, count);
184
185#else
186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
187
188
189
190
191
192 return 0;
193 }
194#endif
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197 return 1;
198}
199
200static inline int page_cache_get_speculative(struct page *page)
201{
202 return __page_cache_add_speculative(page, 1);
203}
204
205static inline int page_cache_add_speculative(struct page *page, int count)
206{
207 return __page_cache_add_speculative(page, count);
208}
209
210#ifdef CONFIG_NUMA
211extern struct page *__page_cache_alloc(gfp_t gfp);
212#else
213static inline struct page *__page_cache_alloc(gfp_t gfp)
214{
215 return alloc_pages(gfp, 0);
216}
217#endif
218
219static inline struct page *page_cache_alloc(struct address_space *x)
220{
221 return __page_cache_alloc(mapping_gfp_mask(x));
222}
223
224static inline gfp_t readahead_gfp_mask(struct address_space *x)
225{
226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
227}
228
229typedef int filler_t(void *, struct page *);
230
231pgoff_t page_cache_next_miss(struct address_space *mapping,
232 pgoff_t index, unsigned long max_scan);
233pgoff_t page_cache_prev_miss(struct address_space *mapping,
234 pgoff_t index, unsigned long max_scan);
235
236#define FGP_ACCESSED 0x00000001
237#define FGP_LOCK 0x00000002
238#define FGP_CREAT 0x00000004
239#define FGP_WRITE 0x00000008
240#define FGP_NOFS 0x00000010
241#define FGP_NOWAIT 0x00000020
242#define FGP_FOR_MMAP 0x00000040
243
244struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
245 int fgp_flags, gfp_t cache_gfp_mask);
246
247
248
249
250
251
252
253
254
255
256
257static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259{
260 return pagecache_get_page(mapping, offset, 0, 0);
261}
262
263static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265{
266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309{
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
312 gfp_mask);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330{
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
333 mapping_gfp_mask(mapping));
334}
335
336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
339 unsigned int nr_entries, struct page **entries,
340 pgoff_t *indices);
341unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
342 pgoff_t end, unsigned int nr_pages,
343 struct page **pages);
344static inline unsigned find_get_pages(struct address_space *mapping,
345 pgoff_t *start, unsigned int nr_pages,
346 struct page **pages)
347{
348 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
349 pages);
350}
351unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
353unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
354 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
355 struct page **pages);
356static inline unsigned find_get_pages_tag(struct address_space *mapping,
357 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
358 struct page **pages)
359{
360 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
361 nr_pages, pages);
362}
363
364struct page *grab_cache_page_write_begin(struct address_space *mapping,
365 pgoff_t index, unsigned flags);
366
367
368
369
370static inline struct page *grab_cache_page(struct address_space *mapping,
371 pgoff_t index)
372{
373 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
374}
375
376extern struct page * read_cache_page(struct address_space *mapping,
377 pgoff_t index, filler_t *filler, void *data);
378extern struct page * read_cache_page_gfp(struct address_space *mapping,
379 pgoff_t index, gfp_t gfp_mask);
380extern int read_cache_pages(struct address_space *mapping,
381 struct list_head *pages, filler_t *filler, void *data);
382
383static inline struct page *read_mapping_page(struct address_space *mapping,
384 pgoff_t index, void *data)
385{
386 return read_cache_page(mapping, index, NULL, data);
387}
388
389
390
391
392
393static inline pgoff_t page_to_index(struct page *page)
394{
395 pgoff_t pgoff;
396
397 if (likely(!PageTransTail(page)))
398 return page->index;
399
400
401
402
403
404 pgoff = compound_head(page)->index;
405 pgoff += page - compound_head(page);
406 return pgoff;
407}
408
409
410
411
412
413static inline pgoff_t page_to_pgoff(struct page *page)
414{
415 if (unlikely(PageHeadHuge(page)))
416 return page->index << compound_order(page);
417
418 return page_to_index(page);
419}
420
421
422
423
424static inline loff_t page_offset(struct page *page)
425{
426 return ((loff_t)page->index) << PAGE_SHIFT;
427}
428
429static inline loff_t page_file_offset(struct page *page)
430{
431 return ((loff_t)page_index(page)) << PAGE_SHIFT;
432}
433
434extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
435 unsigned long address);
436
437static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
438 unsigned long address)
439{
440 pgoff_t pgoff;
441 if (unlikely(is_vm_hugetlb_page(vma)))
442 return linear_hugepage_index(vma, address);
443 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
444 pgoff += vma->vm_pgoff;
445 return pgoff;
446}
447
448extern void __lock_page(struct page *page);
449extern int __lock_page_killable(struct page *page);
450extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
451 unsigned int flags);
452extern void unlock_page(struct page *page);
453
454
455
456
457static inline int trylock_page(struct page *page)
458{
459 page = compound_head(page);
460 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
461}
462
463
464
465
466static inline void lock_page(struct page *page)
467{
468 might_sleep();
469 if (!trylock_page(page))
470 __lock_page(page);
471}
472
473
474
475
476
477
478static inline int lock_page_killable(struct page *page)
479{
480 might_sleep();
481 if (!trylock_page(page))
482 return __lock_page_killable(page);
483 return 0;
484}
485
486
487
488
489
490
491
492
493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
494 unsigned int flags)
495{
496 might_sleep();
497 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
498}
499
500
501
502
503
504extern void wait_on_page_bit(struct page *page, int bit_nr);
505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
506
507
508
509
510
511
512
513
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
517 wait_on_page_bit(compound_head(page), PG_locked);
518}
519
520static inline int wait_on_page_locked_killable(struct page *page)
521{
522 if (!PageLocked(page))
523 return 0;
524 return wait_on_page_bit_killable(compound_head(page), PG_locked);
525}
526
527extern void put_and_wait_on_page_locked(struct page *page);
528
529void wait_on_page_writeback(struct page *page);
530extern void end_page_writeback(struct page *page);
531void wait_for_stable_page(struct page *page);
532
533void page_endio(struct page *page, bool is_write, int err);
534
535
536
537
538extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
539
540
541
542
543static inline int fault_in_pages_writeable(char __user *uaddr, int size)
544{
545 char __user *end = uaddr + size - 1;
546
547 if (unlikely(size == 0))
548 return 0;
549
550 if (unlikely(uaddr > end))
551 return -EFAULT;
552
553
554
555
556 do {
557 if (unlikely(__put_user(0, uaddr) != 0))
558 return -EFAULT;
559 uaddr += PAGE_SIZE;
560 } while (uaddr <= end);
561
562
563 if (((unsigned long)uaddr & PAGE_MASK) ==
564 ((unsigned long)end & PAGE_MASK))
565 return __put_user(0, end);
566
567 return 0;
568}
569
570static inline int fault_in_pages_readable(const char __user *uaddr, int size)
571{
572 volatile char c;
573 const char __user *end = uaddr + size - 1;
574
575 if (unlikely(size == 0))
576 return 0;
577
578 if (unlikely(uaddr > end))
579 return -EFAULT;
580
581 do {
582 if (unlikely(__get_user(c, uaddr) != 0))
583 return -EFAULT;
584 uaddr += PAGE_SIZE;
585 } while (uaddr <= end);
586
587
588 if (((unsigned long)uaddr & PAGE_MASK) ==
589 ((unsigned long)end & PAGE_MASK)) {
590 return __get_user(c, end);
591 }
592
593 (void)c;
594 return 0;
595}
596
597int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
598 pgoff_t index, gfp_t gfp_mask);
599int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
600 pgoff_t index, gfp_t gfp_mask);
601extern void delete_from_page_cache(struct page *page);
602extern void __delete_from_page_cache(struct page *page, void *shadow);
603int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
604void delete_from_page_cache_batch(struct address_space *mapping,
605 struct pagevec *pvec);
606
607
608
609
610
611static inline int add_to_page_cache(struct page *page,
612 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
613{
614 int error;
615
616 __SetPageLocked(page);
617 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
618 if (unlikely(error))
619 __ClearPageLocked(page);
620 return error;
621}
622
623static inline unsigned long dir_pages(struct inode *inode)
624{
625 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
626 PAGE_SHIFT;
627}
628
629#endif
630