1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4
5
6
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
12#include <asm/uaccess.h>
13#include <linux/gfp.h>
14#include <linux/bitops.h>
15#include <linux/hardirq.h>
16#include <linux/hugetlb_inline.h>
17
18
19
20
21
22enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0,
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1,
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2,
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3,
27 AS_EXITING = __GFP_BITS_SHIFT + 4,
28};
29
30static inline void mapping_set_error(struct address_space *mapping, int error)
31{
32 if (unlikely(error)) {
33 if (error == -ENOSPC)
34 set_bit(AS_ENOSPC, &mapping->flags);
35 else
36 set_bit(AS_EIO, &mapping->flags);
37 }
38}
39
40static inline void mapping_set_unevictable(struct address_space *mapping)
41{
42 set_bit(AS_UNEVICTABLE, &mapping->flags);
43}
44
45static inline void mapping_clear_unevictable(struct address_space *mapping)
46{
47 clear_bit(AS_UNEVICTABLE, &mapping->flags);
48}
49
50static inline int mapping_unevictable(struct address_space *mapping)
51{
52 if (mapping)
53 return test_bit(AS_UNEVICTABLE, &mapping->flags);
54 return !!mapping;
55}
56
57static inline void mapping_set_exiting(struct address_space *mapping)
58{
59 set_bit(AS_EXITING, &mapping->flags);
60}
61
62static inline int mapping_exiting(struct address_space *mapping)
63{
64 return test_bit(AS_EXITING, &mapping->flags);
65}
66
67static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
68{
69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
70}
71
72
73static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
74 gfp_t gfp_mask)
75{
76 return mapping_gfp_mask(mapping) & gfp_mask;
77}
78
79
80
81
82
83static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
84{
85 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
86 (__force unsigned long)mask;
87}
88
89void release_pages(struct page **pages, int nr, bool cold);
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static inline int page_cache_get_speculative(struct page *page)
136{
137 VM_BUG_ON(in_interrupt());
138
139#ifdef CONFIG_TINY_RCU
140# ifdef CONFIG_PREEMPT_COUNT
141 VM_BUG_ON(!in_atomic());
142# endif
143
144
145
146
147
148
149
150
151
152 VM_BUG_ON_PAGE(page_count(page) == 0, page);
153 page_ref_inc(page);
154
155#else
156 if (unlikely(!get_page_unless_zero(page))) {
157
158
159
160
161
162 return 0;
163 }
164#endif
165 VM_BUG_ON_PAGE(PageTail(page), page);
166
167 return 1;
168}
169
170
171
172
173static inline int page_cache_add_speculative(struct page *page, int count)
174{
175 VM_BUG_ON(in_interrupt());
176
177#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
178# ifdef CONFIG_PREEMPT_COUNT
179 VM_BUG_ON(!in_atomic());
180# endif
181 VM_BUG_ON_PAGE(page_count(page) == 0, page);
182 page_ref_add(page, count);
183
184#else
185 if (unlikely(!page_ref_add_unless(page, count, 0)))
186 return 0;
187#endif
188 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
189
190 return 1;
191}
192
193#ifdef CONFIG_NUMA
194extern struct page *__page_cache_alloc(gfp_t gfp);
195#else
196static inline struct page *__page_cache_alloc(gfp_t gfp)
197{
198 return alloc_pages(gfp, 0);
199}
200#endif
201
202static inline struct page *page_cache_alloc(struct address_space *x)
203{
204 return __page_cache_alloc(mapping_gfp_mask(x));
205}
206
207static inline struct page *page_cache_alloc_cold(struct address_space *x)
208{
209 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
210}
211
212static inline struct page *page_cache_alloc_readahead(struct address_space *x)
213{
214 return __page_cache_alloc(mapping_gfp_mask(x) |
215 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
216}
217
218typedef int filler_t(void *, struct page *);
219
220pgoff_t page_cache_next_hole(struct address_space *mapping,
221 pgoff_t index, unsigned long max_scan);
222pgoff_t page_cache_prev_hole(struct address_space *mapping,
223 pgoff_t index, unsigned long max_scan);
224
225#define FGP_ACCESSED 0x00000001
226#define FGP_LOCK 0x00000002
227#define FGP_CREAT 0x00000004
228#define FGP_WRITE 0x00000008
229#define FGP_NOFS 0x00000010
230#define FGP_NOWAIT 0x00000020
231
232struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
233 int fgp_flags, gfp_t cache_gfp_mask);
234
235
236
237
238
239
240
241
242
243
244
245static inline struct page *find_get_page(struct address_space *mapping,
246 pgoff_t offset)
247{
248 return pagecache_get_page(mapping, offset, 0, 0);
249}
250
251static inline struct page *find_get_page_flags(struct address_space *mapping,
252 pgoff_t offset, int fgp_flags)
253{
254 return pagecache_get_page(mapping, offset, fgp_flags, 0);
255}
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271static inline struct page *find_lock_page(struct address_space *mapping,
272 pgoff_t offset)
273{
274 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
275}
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static inline struct page *find_or_create_page(struct address_space *mapping,
297 pgoff_t offset, gfp_t gfp_mask)
298{
299 return pagecache_get_page(mapping, offset,
300 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
301 gfp_mask);
302}
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
318 pgoff_t index)
319{
320 return pagecache_get_page(mapping, index,
321 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
322 mapping_gfp_mask(mapping));
323}
324
325struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
326struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
327unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
328 unsigned int nr_entries, struct page **entries,
329 pgoff_t *indices);
330unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
331 unsigned int nr_pages, struct page **pages);
332unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
333 unsigned int nr_pages, struct page **pages);
334unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
335 int tag, unsigned int nr_pages, struct page **pages);
336unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
337 int tag, unsigned int nr_entries,
338 struct page **entries, pgoff_t *indices);
339
340struct page *grab_cache_page_write_begin(struct address_space *mapping,
341 pgoff_t index, unsigned flags);
342
343
344
345
346static inline struct page *grab_cache_page(struct address_space *mapping,
347 pgoff_t index)
348{
349 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
350}
351
352extern struct page * read_cache_page(struct address_space *mapping,
353 pgoff_t index, filler_t *filler, void *data);
354extern struct page * read_cache_page_gfp(struct address_space *mapping,
355 pgoff_t index, gfp_t gfp_mask);
356extern int read_cache_pages(struct address_space *mapping,
357 struct list_head *pages, filler_t *filler, void *data);
358
359static inline struct page *read_mapping_page(struct address_space *mapping,
360 pgoff_t index, void *data)
361{
362 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
363 return read_cache_page(mapping, index, filler, data);
364}
365
366
367
368
369
370static inline pgoff_t page_to_pgoff(struct page *page)
371{
372 pgoff_t pgoff;
373
374 if (unlikely(PageHeadHuge(page)))
375 return page->index << compound_order(page);
376
377 if (likely(!PageTransTail(page)))
378 return page->index;
379
380
381
382
383
384 pgoff = compound_head(page)->index;
385 pgoff += page - compound_head(page);
386 return pgoff;
387}
388
389
390
391
392static inline loff_t page_offset(struct page *page)
393{
394 return ((loff_t)page->index) << PAGE_SHIFT;
395}
396
397static inline loff_t page_file_offset(struct page *page)
398{
399 return ((loff_t)page_file_index(page)) << PAGE_SHIFT;
400}
401
402extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
403 unsigned long address);
404
405static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
406 unsigned long address)
407{
408 pgoff_t pgoff;
409 if (unlikely(is_vm_hugetlb_page(vma)))
410 return linear_hugepage_index(vma, address);
411 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
412 pgoff += vma->vm_pgoff;
413 return pgoff;
414}
415
416extern void __lock_page(struct page *page);
417extern int __lock_page_killable(struct page *page);
418extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
419 unsigned int flags);
420extern void unlock_page(struct page *page);
421
422static inline int trylock_page(struct page *page)
423{
424 page = compound_head(page);
425 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
426}
427
428
429
430
431static inline void lock_page(struct page *page)
432{
433 might_sleep();
434 if (!trylock_page(page))
435 __lock_page(page);
436}
437
438
439
440
441
442
443static inline int lock_page_killable(struct page *page)
444{
445 might_sleep();
446 if (!trylock_page(page))
447 return __lock_page_killable(page);
448 return 0;
449}
450
451
452
453
454
455
456
457
458static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
459 unsigned int flags)
460{
461 might_sleep();
462 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
463}
464
465
466
467
468
469extern void wait_on_page_bit(struct page *page, int bit_nr);
470
471extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
472extern int wait_on_page_bit_killable_timeout(struct page *page,
473 int bit_nr, unsigned long timeout);
474
475static inline int wait_on_page_locked_killable(struct page *page)
476{
477 if (!PageLocked(page))
478 return 0;
479 return wait_on_page_bit_killable(compound_head(page), PG_locked);
480}
481
482extern wait_queue_head_t *page_waitqueue(struct page *page);
483static inline void wake_up_page(struct page *page, int bit)
484{
485 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
486}
487
488
489
490
491
492
493
494
495static inline void wait_on_page_locked(struct page *page)
496{
497 if (PageLocked(page))
498 wait_on_page_bit(compound_head(page), PG_locked);
499}
500
501
502
503
504static inline void wait_on_page_writeback(struct page *page)
505{
506 if (PageWriteback(page))
507 wait_on_page_bit(page, PG_writeback);
508}
509
510extern void end_page_writeback(struct page *page);
511void wait_for_stable_page(struct page *page);
512
513void page_endio(struct page *page, int rw, int err);
514
515
516
517
518extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
519
520
521
522
523
524
525static inline int fault_in_pages_writeable(char __user *uaddr, int size)
526{
527 int ret;
528
529 if (unlikely(size == 0))
530 return 0;
531
532
533
534
535
536 ret = __put_user(0, uaddr);
537 if (ret == 0) {
538 char __user *end = uaddr + size - 1;
539
540
541
542
543
544 if (((unsigned long)uaddr & PAGE_MASK) !=
545 ((unsigned long)end & PAGE_MASK))
546 ret = __put_user(0, end);
547 }
548 return ret;
549}
550
551static inline int fault_in_pages_readable(const char __user *uaddr, int size)
552{
553 volatile char c;
554 int ret;
555
556 if (unlikely(size == 0))
557 return 0;
558
559 ret = __get_user(c, uaddr);
560 if (ret == 0) {
561 const char __user *end = uaddr + size - 1;
562
563 if (((unsigned long)uaddr & PAGE_MASK) !=
564 ((unsigned long)end & PAGE_MASK)) {
565 ret = __get_user(c, end);
566 (void)c;
567 }
568 }
569 return ret;
570}
571
572
573
574
575
576
577
578static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
579{
580 int ret = 0;
581 char __user *end = uaddr + size - 1;
582
583 if (unlikely(size == 0))
584 return ret;
585
586
587
588
589
590 while (uaddr <= end) {
591 ret = __put_user(0, uaddr);
592 if (ret != 0)
593 return ret;
594 uaddr += PAGE_SIZE;
595 }
596
597
598 if (((unsigned long)uaddr & PAGE_MASK) ==
599 ((unsigned long)end & PAGE_MASK))
600 ret = __put_user(0, end);
601
602 return ret;
603}
604
605static inline int fault_in_multipages_readable(const char __user *uaddr,
606 int size)
607{
608 volatile char c;
609 int ret = 0;
610 const char __user *end = uaddr + size - 1;
611
612 if (unlikely(size == 0))
613 return ret;
614
615 while (uaddr <= end) {
616 ret = __get_user(c, uaddr);
617 if (ret != 0)
618 return ret;
619 uaddr += PAGE_SIZE;
620 }
621
622
623 if (((unsigned long)uaddr & PAGE_MASK) ==
624 ((unsigned long)end & PAGE_MASK)) {
625 ret = __get_user(c, end);
626 (void)c;
627 }
628
629 return ret;
630}
631
632int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
633 pgoff_t index, gfp_t gfp_mask);
634int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
635 pgoff_t index, gfp_t gfp_mask);
636extern void delete_from_page_cache(struct page *page);
637extern void __delete_from_page_cache(struct page *page, void *shadow);
638int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
639
640
641
642
643
644static inline int add_to_page_cache(struct page *page,
645 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
646{
647 int error;
648
649 __SetPageLocked(page);
650 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
651 if (unlikely(error))
652 __ClearPageLocked(page);
653 return error;
654}
655
656static inline unsigned long dir_pages(struct inode *inode)
657{
658 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
659 PAGE_SHIFT;
660}
661
662#endif
663