1#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
4#ifndef __ASSEMBLY__
5#ifdef CONFIG_MMU
6
7#include <linux/mm_types.h>
8#include <linux/bug.h>
9
10
11
12
13
14
15
16#ifndef USER_PGTABLES_CEILING
17#define USER_PGTABLES_CEILING 0UL
18#endif
19
20#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
21extern int ptep_set_access_flags(struct vm_area_struct *vma,
22 unsigned long address, pte_t *ptep,
23 pte_t entry, int dirty);
24#endif
25
26#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
27extern int pmdp_set_access_flags(struct vm_area_struct *vma,
28 unsigned long address, pmd_t *pmdp,
29 pmd_t entry, int dirty);
30#endif
31
32#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
33static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
34 unsigned long address,
35 pte_t *ptep)
36{
37 pte_t pte = *ptep;
38 int r = 1;
39 if (!pte_young(pte))
40 r = 0;
41 else
42 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
43 return r;
44}
45#endif
46
47#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
48#ifdef CONFIG_TRANSPARENT_HUGEPAGE
49static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
50 unsigned long address,
51 pmd_t *pmdp)
52{
53 pmd_t pmd = *pmdp;
54 int r = 1;
55 if (!pmd_young(pmd))
56 r = 0;
57 else
58 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
59 return r;
60}
61#else
62static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
63 unsigned long address,
64 pmd_t *pmdp)
65{
66 BUG();
67 return 0;
68}
69#endif
70#endif
71
72#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
73int ptep_clear_flush_young(struct vm_area_struct *vma,
74 unsigned long address, pte_t *ptep);
75#endif
76
77#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
78int pmdp_clear_flush_young(struct vm_area_struct *vma,
79 unsigned long address, pmd_t *pmdp);
80#endif
81
82#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
83static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
84 unsigned long address,
85 pte_t *ptep)
86{
87 pte_t pte = *ptep;
88 pte_clear(mm, address, ptep);
89 return pte;
90}
91#endif
92
93#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
94#ifdef CONFIG_TRANSPARENT_HUGEPAGE
95static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
96 unsigned long address,
97 pmd_t *pmdp)
98{
99 pmd_t pmd = *pmdp;
100 pmd_clear(pmdp);
101 return pmd;
102}
103#endif
104#endif
105
106#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
107static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
108 unsigned long address, pte_t *ptep,
109 int full)
110{
111 pte_t pte;
112 pte = ptep_get_and_clear(mm, address, ptep);
113 return pte;
114}
115#endif
116
117
118
119
120
121
122#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
123static inline void pte_clear_not_present_full(struct mm_struct *mm,
124 unsigned long address,
125 pte_t *ptep,
126 int full)
127{
128 pte_clear(mm, address, ptep);
129}
130#endif
131
132#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
133extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
134 unsigned long address,
135 pte_t *ptep);
136#endif
137
138#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
139extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
140 unsigned long address,
141 pmd_t *pmdp);
142#endif
143
144#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
145struct mm_struct;
146static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
147{
148 pte_t old_pte = *ptep;
149 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
150}
151#endif
152
153#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
154#ifdef CONFIG_TRANSPARENT_HUGEPAGE
155static inline void pmdp_set_wrprotect(struct mm_struct *mm,
156 unsigned long address, pmd_t *pmdp)
157{
158 pmd_t old_pmd = *pmdp;
159 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
160}
161#else
162static inline void pmdp_set_wrprotect(struct mm_struct *mm,
163 unsigned long address, pmd_t *pmdp)
164{
165 BUG();
166}
167#endif
168#endif
169
170#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
171extern void pmdp_splitting_flush(struct vm_area_struct *vma,
172 unsigned long address, pmd_t *pmdp);
173#endif
174
175#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
176extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
177 pgtable_t pgtable);
178#endif
179
180#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
181extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
182#endif
183
184#ifndef __HAVE_ARCH_PMDP_INVALIDATE
185extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
186 pmd_t *pmdp);
187#endif
188
189#ifndef __HAVE_ARCH_PTE_SAME
190static inline int pte_same(pte_t pte_a, pte_t pte_b)
191{
192 return pte_val(pte_a) == pte_val(pte_b);
193}
194#endif
195
196#ifndef __HAVE_ARCH_PMD_SAME
197#ifdef CONFIG_TRANSPARENT_HUGEPAGE
198static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
199{
200 return pmd_val(pmd_a) == pmd_val(pmd_b);
201}
202#else
203static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
204{
205 BUG();
206 return 0;
207}
208#endif
209#endif
210
211#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
212#define page_test_and_clear_young(pfn) (0)
213#endif
214
215#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
216#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
217#endif
218
219#ifndef __HAVE_ARCH_MOVE_PTE
220#define move_pte(pte, prot, old_addr, new_addr) (pte)
221#endif
222
223#ifndef pte_accessible
224# define pte_accessible(pte) ((void)(pte),1)
225#endif
226
227#ifndef flush_tlb_fix_spurious_fault
228#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
229#endif
230
231#ifndef pgprot_noncached
232#define pgprot_noncached(prot) (prot)
233#endif
234
235#ifndef pgprot_writecombine
236#define pgprot_writecombine pgprot_noncached
237#endif
238
239
240
241
242
243
244
245#define pgd_addr_end(addr, end) \
246({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
247 (__boundary - 1 < (end) - 1)? __boundary: (end); \
248})
249
250#ifndef pud_addr_end
251#define pud_addr_end(addr, end) \
252({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
253 (__boundary - 1 < (end) - 1)? __boundary: (end); \
254})
255#endif
256
257#ifndef pmd_addr_end
258#define pmd_addr_end(addr, end) \
259({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
260 (__boundary - 1 < (end) - 1)? __boundary: (end); \
261})
262#endif
263
264
265
266
267
268
269void pgd_clear_bad(pgd_t *);
270void pud_clear_bad(pud_t *);
271void pmd_clear_bad(pmd_t *);
272
273static inline int pgd_none_or_clear_bad(pgd_t *pgd)
274{
275 if (pgd_none(*pgd))
276 return 1;
277 if (unlikely(pgd_bad(*pgd))) {
278 pgd_clear_bad(pgd);
279 return 1;
280 }
281 return 0;
282}
283
284static inline int pud_none_or_clear_bad(pud_t *pud)
285{
286 if (pud_none(*pud))
287 return 1;
288 if (unlikely(pud_bad(*pud))) {
289 pud_clear_bad(pud);
290 return 1;
291 }
292 return 0;
293}
294
295static inline int pmd_none_or_clear_bad(pmd_t *pmd)
296{
297 if (pmd_none(*pmd))
298 return 1;
299 if (unlikely(pmd_bad(*pmd))) {
300 pmd_clear_bad(pmd);
301 return 1;
302 }
303 return 0;
304}
305
306static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
307 unsigned long addr,
308 pte_t *ptep)
309{
310
311
312
313
314
315 return ptep_get_and_clear(mm, addr, ptep);
316}
317
318static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
319 unsigned long addr,
320 pte_t *ptep, pte_t pte)
321{
322
323
324
325
326 set_pte_at(mm, addr, ptep, pte);
327}
328
329#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
345 unsigned long addr,
346 pte_t *ptep)
347{
348 return __ptep_modify_prot_start(mm, addr, ptep);
349}
350
351
352
353
354
355static inline void ptep_modify_prot_commit(struct mm_struct *mm,
356 unsigned long addr,
357 pte_t *ptep, pte_t pte)
358{
359 __ptep_modify_prot_commit(mm, addr, ptep, pte);
360}
361#endif
362#endif
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
380#define arch_enter_lazy_mmu_mode() do {} while (0)
381#define arch_leave_lazy_mmu_mode() do {} while (0)
382#define arch_flush_lazy_mmu_mode() do {} while (0)
383#endif
384
385
386
387
388
389
390
391
392
393
394
395
396#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
397#define arch_start_context_switch(prev) do {} while (0)
398#endif
399
400#ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY
401static inline int pte_soft_dirty(pte_t pte)
402{
403 return 0;
404}
405
406static inline int pmd_soft_dirty(pmd_t pmd)
407{
408 return 0;
409}
410
411static inline pte_t pte_mksoft_dirty(pte_t pte)
412{
413 return pte;
414}
415
416static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
417{
418 return pmd;
419}
420
421static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
422{
423 return pte;
424}
425
426static inline int pte_swp_soft_dirty(pte_t pte)
427{
428 return 0;
429}
430
431static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
432{
433 return pte;
434}
435
436static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
437{
438 return pte;
439}
440
441static inline pte_t pte_file_mksoft_dirty(pte_t pte)
442{
443 return pte;
444}
445
446static inline int pte_file_soft_dirty(pte_t pte)
447{
448 return 0;
449}
450#endif
451
452#ifndef __HAVE_PFNMAP_TRACKING
453
454
455
456
457
458
459
460
461
462
463static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
464 unsigned long pfn, unsigned long addr,
465 unsigned long size)
466{
467 return 0;
468}
469
470
471
472
473
474static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
475 unsigned long pfn)
476{
477 return 0;
478}
479
480
481
482
483
484static inline int track_pfn_copy(struct vm_area_struct *vma)
485{
486 return 0;
487}
488
489
490
491
492
493
494static inline void untrack_pfn(struct vm_area_struct *vma,
495 unsigned long pfn, unsigned long size)
496{
497}
498#else
499extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
500 unsigned long pfn, unsigned long addr,
501 unsigned long size);
502extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
503 unsigned long pfn);
504extern int track_pfn_copy(struct vm_area_struct *vma);
505extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
506 unsigned long size);
507#endif
508
509#ifdef __HAVE_COLOR_ZERO_PAGE
510static inline int is_zero_pfn(unsigned long pfn)
511{
512 extern unsigned long zero_pfn;
513 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
514 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
515}
516
517#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
518
519#else
520static inline int is_zero_pfn(unsigned long pfn)
521{
522 extern unsigned long zero_pfn;
523 return pfn == zero_pfn;
524}
525
526static inline unsigned long my_zero_pfn(unsigned long addr)
527{
528 extern unsigned long zero_pfn;
529 return zero_pfn;
530}
531#endif
532
533#ifdef CONFIG_MMU
534
535#ifndef CONFIG_TRANSPARENT_HUGEPAGE
536static inline int pmd_trans_huge(pmd_t pmd)
537{
538 return 0;
539}
540static inline int pmd_trans_splitting(pmd_t pmd)
541{
542 return 0;
543}
544#ifndef __HAVE_ARCH_PMD_WRITE
545static inline int pmd_write(pmd_t pmd)
546{
547 BUG();
548 return 0;
549}
550#endif
551#endif
552
553#ifndef pmd_read_atomic
554static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
555{
556
557
558
559
560
561 return *pmdp;
562}
563#endif
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
587{
588 pmd_t pmdval = pmd_read_atomic(pmd);
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603#ifdef CONFIG_TRANSPARENT_HUGEPAGE
604 barrier();
605#endif
606 if (pmd_none(pmdval))
607 return 1;
608 if (unlikely(pmd_bad(pmdval))) {
609 if (!pmd_trans_huge(pmdval))
610 pmd_clear_bad(pmd);
611 return 1;
612 }
613 return 0;
614}
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629static inline int pmd_trans_unstable(pmd_t *pmd)
630{
631#ifdef CONFIG_TRANSPARENT_HUGEPAGE
632 return pmd_none_or_trans_huge_or_clear_bad(pmd);
633#else
634 return 0;
635#endif
636}
637
638#ifdef CONFIG_NUMA_BALANCING
639#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
640
641
642
643
644
645
646
647
648
649#ifndef pte_numa
650static inline int pte_numa(pte_t pte)
651{
652 return (pte_flags(pte) &
653 (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
654}
655#endif
656
657#ifndef pmd_numa
658static inline int pmd_numa(pmd_t pmd)
659{
660 return (pmd_flags(pmd) &
661 (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
662}
663#endif
664
665
666
667
668
669
670
671
672
673#ifndef pte_mknonnuma
674static inline pte_t pte_mknonnuma(pte_t pte)
675{
676 pte = pte_clear_flags(pte, _PAGE_NUMA);
677 return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
678}
679#endif
680
681#ifndef pmd_mknonnuma
682static inline pmd_t pmd_mknonnuma(pmd_t pmd)
683{
684 pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
685 return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
686}
687#endif
688
689#ifndef pte_mknuma
690static inline pte_t pte_mknuma(pte_t pte)
691{
692 pte = pte_set_flags(pte, _PAGE_NUMA);
693 return pte_clear_flags(pte, _PAGE_PRESENT);
694}
695#endif
696
697#ifndef pmd_mknuma
698static inline pmd_t pmd_mknuma(pmd_t pmd)
699{
700 pmd = pmd_set_flags(pmd, _PAGE_NUMA);
701 return pmd_clear_flags(pmd, _PAGE_PRESENT);
702}
703#endif
704#else
705extern int pte_numa(pte_t pte);
706extern int pmd_numa(pmd_t pmd);
707extern pte_t pte_mknonnuma(pte_t pte);
708extern pmd_t pmd_mknonnuma(pmd_t pmd);
709extern pte_t pte_mknuma(pte_t pte);
710extern pmd_t pmd_mknuma(pmd_t pmd);
711#endif
712#else
713static inline int pmd_numa(pmd_t pmd)
714{
715 return 0;
716}
717
718static inline int pte_numa(pte_t pte)
719{
720 return 0;
721}
722
723static inline pte_t pte_mknonnuma(pte_t pte)
724{
725 return pte;
726}
727
728static inline pmd_t pmd_mknonnuma(pmd_t pmd)
729{
730 return pmd;
731}
732
733static inline pte_t pte_mknuma(pte_t pte)
734{
735 return pte;
736}
737
738static inline pmd_t pmd_mknuma(pmd_t pmd)
739{
740 return pmd;
741}
742#endif
743
744#endif
745
746#endif
747
748#ifndef io_remap_pfn_range
749#define io_remap_pfn_range remap_pfn_range
750#endif
751
752#endif
753