1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21
22
23
24enum mapping_flags {
25 AS_EIO = 0,
26 AS_ENOSPC = 1,
27 AS_MM_ALL_LOCKS = 2,
28 AS_UNEVICTABLE = 3,
29 AS_EXITING = 4,
30
31 AS_NO_WRITEBACK_TAGS = 5,
32};
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
50 if (likely(!error))
51 return;
52
53
54 filemap_set_wb_err(mapping, error);
55
56
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
61}
62
63static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
68static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
73static inline int mapping_unevictable(struct address_space *mapping)
74{
75 if (mapping)
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
78}
79
80static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101{
102 return mapping->gfp_mask;
103}
104
105
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
112
113
114
115
116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117{
118 m->gfp_mask = mask;
119}
120
121void release_pages(struct page **pages, int nr);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static inline int __page_cache_add_speculative(struct page *page, int count)
168{
169#ifdef CONFIG_TINY_RCU
170# ifdef CONFIG_PREEMPT_COUNT
171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172# endif
173
174
175
176
177
178
179
180
181
182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_add(page, count);
184
185#else
186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
187
188
189
190
191
192 return 0;
193 }
194#endif
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197 return 1;
198}
199
200static inline int page_cache_get_speculative(struct page *page)
201{
202 return __page_cache_add_speculative(page, 1);
203}
204
205static inline int page_cache_add_speculative(struct page *page, int count)
206{
207 return __page_cache_add_speculative(page, count);
208}
209
210#ifdef CONFIG_NUMA
211extern struct page *__page_cache_alloc(gfp_t gfp);
212#else
213static inline struct page *__page_cache_alloc(gfp_t gfp)
214{
215 return alloc_pages(gfp, 0);
216}
217#endif
218
219static inline struct page *page_cache_alloc(struct address_space *x)
220{
221 return __page_cache_alloc(mapping_gfp_mask(x));
222}
223
224static inline gfp_t readahead_gfp_mask(struct address_space *x)
225{
226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
227}
228
229typedef int filler_t(void *, struct page *);
230
231pgoff_t page_cache_next_miss(struct address_space *mapping,
232 pgoff_t index, unsigned long max_scan);
233pgoff_t page_cache_prev_miss(struct address_space *mapping,
234 pgoff_t index, unsigned long max_scan);
235
236#define FGP_ACCESSED 0x00000001
237#define FGP_LOCK 0x00000002
238#define FGP_CREAT 0x00000004
239#define FGP_WRITE 0x00000008
240#define FGP_NOFS 0x00000010
241#define FGP_NOWAIT 0x00000020
242#define FGP_FOR_MMAP 0x00000040
243
244struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
245 int fgp_flags, gfp_t cache_gfp_mask);
246
247
248
249
250
251
252
253
254
255
256
257static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259{
260 return pagecache_get_page(mapping, offset, 0, 0);
261}
262
263static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265{
266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
267}
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284{
285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309{
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
312 gfp_mask);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330{
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
333 mapping_gfp_mask(mapping));
334}
335
336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
339 unsigned int nr_entries, struct page **entries,
340 pgoff_t *indices);
341unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
342 pgoff_t end, unsigned int nr_pages,
343 struct page **pages);
344static inline unsigned find_get_pages(struct address_space *mapping,
345 pgoff_t *start, unsigned int nr_pages,
346 struct page **pages)
347{
348 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
349 pages);
350}
351unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
353unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
354 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
355 struct page **pages);
356static inline unsigned find_get_pages_tag(struct address_space *mapping,
357 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
358 struct page **pages)
359{
360 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
361 nr_pages, pages);
362}
363
364struct page *grab_cache_page_write_begin(struct address_space *mapping,
365 pgoff_t index, unsigned flags);
366
367
368
369
370static inline struct page *grab_cache_page(struct address_space *mapping,
371 pgoff_t index)
372{
373 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
374}
375
376extern struct page * read_cache_page(struct address_space *mapping,
377 pgoff_t index, filler_t *filler, void *data);
378extern struct page * read_cache_page_gfp(struct address_space *mapping,
379 pgoff_t index, gfp_t gfp_mask);
380extern int read_cache_pages(struct address_space *mapping,
381 struct list_head *pages, filler_t *filler, void *data);
382
383static inline struct page *read_mapping_page(struct address_space *mapping,
384 pgoff_t index, void *data)
385{
386 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
387 return read_cache_page(mapping, index, filler, data);
388}
389
390
391
392
393
394static inline pgoff_t page_to_index(struct page *page)
395{
396 pgoff_t pgoff;
397
398 if (likely(!PageTransTail(page)))
399 return page->index;
400
401
402
403
404
405 pgoff = compound_head(page)->index;
406 pgoff += page - compound_head(page);
407 return pgoff;
408}
409
410
411
412
413
414static inline pgoff_t page_to_pgoff(struct page *page)
415{
416 if (unlikely(PageHeadHuge(page)))
417 return page->index << compound_order(page);
418
419 return page_to_index(page);
420}
421
422
423
424
425static inline loff_t page_offset(struct page *page)
426{
427 return ((loff_t)page->index) << PAGE_SHIFT;
428}
429
430static inline loff_t page_file_offset(struct page *page)
431{
432 return ((loff_t)page_index(page)) << PAGE_SHIFT;
433}
434
435extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
436 unsigned long address);
437
438static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
439 unsigned long address)
440{
441 pgoff_t pgoff;
442 if (unlikely(is_vm_hugetlb_page(vma)))
443 return linear_hugepage_index(vma, address);
444 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
445 pgoff += vma->vm_pgoff;
446 return pgoff;
447}
448
449extern void __lock_page(struct page *page);
450extern int __lock_page_killable(struct page *page);
451extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
452 unsigned int flags);
453extern void unlock_page(struct page *page);
454
455static inline int trylock_page(struct page *page)
456{
457 page = compound_head(page);
458 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
459}
460
461
462
463
464static inline void lock_page(struct page *page)
465{
466 might_sleep();
467 if (!trylock_page(page))
468 __lock_page(page);
469}
470
471
472
473
474
475
476static inline int lock_page_killable(struct page *page)
477{
478 might_sleep();
479 if (!trylock_page(page))
480 return __lock_page_killable(page);
481 return 0;
482}
483
484
485
486
487
488
489
490
491static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
492 unsigned int flags)
493{
494 might_sleep();
495 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
496}
497
498
499
500
501
502extern void wait_on_page_bit(struct page *page, int bit_nr);
503extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
504
505
506
507
508
509
510
511
512static inline void wait_on_page_locked(struct page *page)
513{
514 if (PageLocked(page))
515 wait_on_page_bit(compound_head(page), PG_locked);
516}
517
518static inline int wait_on_page_locked_killable(struct page *page)
519{
520 if (!PageLocked(page))
521 return 0;
522 return wait_on_page_bit_killable(compound_head(page), PG_locked);
523}
524
525extern void put_and_wait_on_page_locked(struct page *page);
526
527void wait_on_page_writeback(struct page *page);
528extern void end_page_writeback(struct page *page);
529void wait_for_stable_page(struct page *page);
530
531void page_endio(struct page *page, bool is_write, int err);
532
533
534
535
536extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
537
538
539
540
541static inline int fault_in_pages_writeable(char __user *uaddr, int size)
542{
543 char __user *end = uaddr + size - 1;
544
545 if (unlikely(size == 0))
546 return 0;
547
548 if (unlikely(uaddr > end))
549 return -EFAULT;
550
551
552
553
554 do {
555 if (unlikely(__put_user(0, uaddr) != 0))
556 return -EFAULT;
557 uaddr += PAGE_SIZE;
558 } while (uaddr <= end);
559
560
561 if (((unsigned long)uaddr & PAGE_MASK) ==
562 ((unsigned long)end & PAGE_MASK))
563 return __put_user(0, end);
564
565 return 0;
566}
567
568static inline int fault_in_pages_readable(const char __user *uaddr, int size)
569{
570 volatile char c;
571 const char __user *end = uaddr + size - 1;
572
573 if (unlikely(size == 0))
574 return 0;
575
576 if (unlikely(uaddr > end))
577 return -EFAULT;
578
579 do {
580 if (unlikely(__get_user(c, uaddr) != 0))
581 return -EFAULT;
582 uaddr += PAGE_SIZE;
583 } while (uaddr <= end);
584
585
586 if (((unsigned long)uaddr & PAGE_MASK) ==
587 ((unsigned long)end & PAGE_MASK)) {
588 return __get_user(c, end);
589 }
590
591 (void)c;
592 return 0;
593}
594
595int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
596 pgoff_t index, gfp_t gfp_mask);
597int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
598 pgoff_t index, gfp_t gfp_mask);
599extern void delete_from_page_cache(struct page *page);
600extern void __delete_from_page_cache(struct page *page, void *shadow);
601int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
602void delete_from_page_cache_batch(struct address_space *mapping,
603 struct pagevec *pvec);
604
605
606
607
608
609static inline int add_to_page_cache(struct page *page,
610 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
611{
612 int error;
613
614 __SetPageLocked(page);
615 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
616 if (unlikely(error))
617 __ClearPageLocked(page);
618 return error;
619}
620
621static inline unsigned long dir_pages(struct inode *inode)
622{
623 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
624 PAGE_SHIFT;
625}
626
627#endif
628