1
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5
6
7
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
13#include <linux/uaccess.h>
14#include <linux/gfp.h>
15#include <linux/bitops.h>
16#include <linux/hardirq.h>
17#include <linux/hugetlb_inline.h>
18
19struct pagevec;
20
21
22
23
24enum mapping_flags {
25 AS_EIO = 0,
26 AS_ENOSPC = 1,
27 AS_MM_ALL_LOCKS = 2,
28 AS_UNEVICTABLE = 3,
29 AS_EXITING = 4,
30
31 AS_NO_WRITEBACK_TAGS = 5,
32};
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
50 if (likely(!error))
51 return;
52
53
54 filemap_set_wb_err(mapping, error);
55
56
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
61}
62
63static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
68static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
73static inline int mapping_unevictable(struct address_space *mapping)
74{
75 if (mapping)
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
78}
79
80static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
101{
102 return mapping->gfp_mask;
103}
104
105
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
112
113
114
115
116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
117{
118 m->gfp_mask = mask;
119}
120
121void release_pages(struct page **pages, int nr);
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167static inline int page_cache_get_speculative(struct page *page)
168{
169#ifdef CONFIG_TINY_RCU
170# ifdef CONFIG_PREEMPT_COUNT
171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172# endif
173
174
175
176
177
178
179
180
181
182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
183 page_ref_inc(page);
184
185#else
186 if (unlikely(!get_page_unless_zero(page))) {
187
188
189
190
191
192 return 0;
193 }
194#endif
195 VM_BUG_ON_PAGE(PageTail(page), page);
196
197 return 1;
198}
199
200
201
202
203static inline int page_cache_add_speculative(struct page *page, int count)
204{
205 VM_BUG_ON(in_interrupt());
206
207#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
208# ifdef CONFIG_PREEMPT_COUNT
209 VM_BUG_ON(!in_atomic() && !irqs_disabled());
210# endif
211 VM_BUG_ON_PAGE(page_count(page) == 0, page);
212 page_ref_add(page, count);
213
214#else
215 if (unlikely(!page_ref_add_unless(page, count, 0)))
216 return 0;
217#endif
218 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
219
220 return 1;
221}
222
223#ifdef CONFIG_NUMA
224extern struct page *__page_cache_alloc(gfp_t gfp);
225#else
226static inline struct page *__page_cache_alloc(gfp_t gfp)
227{
228 return alloc_pages(gfp, 0);
229}
230#endif
231
232static inline struct page *page_cache_alloc(struct address_space *x)
233{
234 return __page_cache_alloc(mapping_gfp_mask(x));
235}
236
237static inline gfp_t readahead_gfp_mask(struct address_space *x)
238{
239 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
240}
241
242typedef int filler_t(void *, struct page *);
243
244pgoff_t page_cache_next_miss(struct address_space *mapping,
245 pgoff_t index, unsigned long max_scan);
246pgoff_t page_cache_prev_miss(struct address_space *mapping,
247 pgoff_t index, unsigned long max_scan);
248
249#define FGP_ACCESSED 0x00000001
250#define FGP_LOCK 0x00000002
251#define FGP_CREAT 0x00000004
252#define FGP_WRITE 0x00000008
253#define FGP_NOFS 0x00000010
254#define FGP_NOWAIT 0x00000020
255
256struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
257 int fgp_flags, gfp_t cache_gfp_mask);
258
259
260
261
262
263
264
265
266
267
268
269static inline struct page *find_get_page(struct address_space *mapping,
270 pgoff_t offset)
271{
272 return pagecache_get_page(mapping, offset, 0, 0);
273}
274
275static inline struct page *find_get_page_flags(struct address_space *mapping,
276 pgoff_t offset, int fgp_flags)
277{
278 return pagecache_get_page(mapping, offset, fgp_flags, 0);
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294static inline struct page *find_lock_page(struct address_space *mapping,
295 pgoff_t offset)
296{
297 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319static inline struct page *find_or_create_page(struct address_space *mapping,
320 pgoff_t offset, gfp_t gfp_mask)
321{
322 return pagecache_get_page(mapping, offset,
323 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
324 gfp_mask);
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
341 pgoff_t index)
342{
343 return pagecache_get_page(mapping, index,
344 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
345 mapping_gfp_mask(mapping));
346}
347
348struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
349struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
350unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
351 unsigned int nr_entries, struct page **entries,
352 pgoff_t *indices);
353unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
354 pgoff_t end, unsigned int nr_pages,
355 struct page **pages);
356static inline unsigned find_get_pages(struct address_space *mapping,
357 pgoff_t *start, unsigned int nr_pages,
358 struct page **pages)
359{
360 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
361 pages);
362}
363unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
364 unsigned int nr_pages, struct page **pages);
365unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
366 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
367 struct page **pages);
368static inline unsigned find_get_pages_tag(struct address_space *mapping,
369 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
370 struct page **pages)
371{
372 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
373 nr_pages, pages);
374}
375unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
376 xa_mark_t tag, unsigned int nr_entries,
377 struct page **entries, pgoff_t *indices);
378
379struct page *grab_cache_page_write_begin(struct address_space *mapping,
380 pgoff_t index, unsigned flags);
381
382
383
384
385static inline struct page *grab_cache_page(struct address_space *mapping,
386 pgoff_t index)
387{
388 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
389}
390
391extern struct page * read_cache_page(struct address_space *mapping,
392 pgoff_t index, filler_t *filler, void *data);
393extern struct page * read_cache_page_gfp(struct address_space *mapping,
394 pgoff_t index, gfp_t gfp_mask);
395extern int read_cache_pages(struct address_space *mapping,
396 struct list_head *pages, filler_t *filler, void *data);
397
398static inline struct page *read_mapping_page(struct address_space *mapping,
399 pgoff_t index, void *data)
400{
401 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
402 return read_cache_page(mapping, index, filler, data);
403}
404
405
406
407
408
409static inline pgoff_t page_to_index(struct page *page)
410{
411 pgoff_t pgoff;
412
413 if (likely(!PageTransTail(page)))
414 return page->index;
415
416
417
418
419
420 pgoff = compound_head(page)->index;
421 pgoff += page - compound_head(page);
422 return pgoff;
423}
424
425
426
427
428
429static inline pgoff_t page_to_pgoff(struct page *page)
430{
431 if (unlikely(PageHeadHuge(page)))
432 return page->index << compound_order(page);
433
434 return page_to_index(page);
435}
436
437
438
439
440static inline loff_t page_offset(struct page *page)
441{
442 return ((loff_t)page->index) << PAGE_SHIFT;
443}
444
445static inline loff_t page_file_offset(struct page *page)
446{
447 return ((loff_t)page_index(page)) << PAGE_SHIFT;
448}
449
450extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
451 unsigned long address);
452
453static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
454 unsigned long address)
455{
456 pgoff_t pgoff;
457 if (unlikely(is_vm_hugetlb_page(vma)))
458 return linear_hugepage_index(vma, address);
459 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
460 pgoff += vma->vm_pgoff;
461 return pgoff;
462}
463
464extern void __lock_page(struct page *page);
465extern int __lock_page_killable(struct page *page);
466extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
467 unsigned int flags);
468extern void unlock_page(struct page *page);
469
470static inline int trylock_page(struct page *page)
471{
472 page = compound_head(page);
473 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
474}
475
476
477
478
479static inline void lock_page(struct page *page)
480{
481 might_sleep();
482 if (!trylock_page(page))
483 __lock_page(page);
484}
485
486
487
488
489
490
491static inline int lock_page_killable(struct page *page)
492{
493 might_sleep();
494 if (!trylock_page(page))
495 return __lock_page_killable(page);
496 return 0;
497}
498
499
500
501
502
503
504
505
506static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
507 unsigned int flags)
508{
509 might_sleep();
510 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
511}
512
513
514
515
516
517extern void wait_on_page_bit(struct page *page, int bit_nr);
518extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
519
520
521
522
523
524
525
526
527static inline void wait_on_page_locked(struct page *page)
528{
529 if (PageLocked(page))
530 wait_on_page_bit(compound_head(page), PG_locked);
531}
532
533static inline int wait_on_page_locked_killable(struct page *page)
534{
535 if (!PageLocked(page))
536 return 0;
537 return wait_on_page_bit_killable(compound_head(page), PG_locked);
538}
539
540
541
542
543static inline void wait_on_page_writeback(struct page *page)
544{
545 if (PageWriteback(page))
546 wait_on_page_bit(page, PG_writeback);
547}
548
549extern void end_page_writeback(struct page *page);
550void wait_for_stable_page(struct page *page);
551
552void page_endio(struct page *page, bool is_write, int err);
553
554
555
556
557extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
558
559
560
561
562static inline int fault_in_pages_writeable(char __user *uaddr, int size)
563{
564 char __user *end = uaddr + size - 1;
565
566 if (unlikely(size == 0))
567 return 0;
568
569 if (unlikely(uaddr > end))
570 return -EFAULT;
571
572
573
574
575 do {
576 if (unlikely(__put_user(0, uaddr) != 0))
577 return -EFAULT;
578 uaddr += PAGE_SIZE;
579 } while (uaddr <= end);
580
581
582 if (((unsigned long)uaddr & PAGE_MASK) ==
583 ((unsigned long)end & PAGE_MASK))
584 return __put_user(0, end);
585
586 return 0;
587}
588
589static inline int fault_in_pages_readable(const char __user *uaddr, int size)
590{
591 volatile char c;
592 const char __user *end = uaddr + size - 1;
593
594 if (unlikely(size == 0))
595 return 0;
596
597 if (unlikely(uaddr > end))
598 return -EFAULT;
599
600 do {
601 if (unlikely(__get_user(c, uaddr) != 0))
602 return -EFAULT;
603 uaddr += PAGE_SIZE;
604 } while (uaddr <= end);
605
606
607 if (((unsigned long)uaddr & PAGE_MASK) ==
608 ((unsigned long)end & PAGE_MASK)) {
609 return __get_user(c, end);
610 }
611
612 (void)c;
613 return 0;
614}
615
616int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
617 pgoff_t index, gfp_t gfp_mask);
618int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
619 pgoff_t index, gfp_t gfp_mask);
620extern void delete_from_page_cache(struct page *page);
621extern void __delete_from_page_cache(struct page *page, void *shadow);
622int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
623void delete_from_page_cache_batch(struct address_space *mapping,
624 struct pagevec *pvec);
625
626
627
628
629
630static inline int add_to_page_cache(struct page *page,
631 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
632{
633 int error;
634
635 __SetPageLocked(page);
636 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
637 if (unlikely(error))
638 __ClearPageLocked(page);
639 return error;
640}
641
642static inline unsigned long dir_pages(struct inode *inode)
643{
644 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
645 PAGE_SHIFT;
646}
647
648#endif
649