1#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3
4#include <asm-generic/5level-fixup.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/mmdebug.h>
8#endif
9
10
11
12
13#define _PAGE_BIT_SWAP_TYPE 0
14
15#define _PAGE_RO 0
16
17#define _PAGE_EXEC 0x00001
18#define _PAGE_WRITE 0x00002
19#define _PAGE_READ 0x00004
20#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
21#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
22#define _PAGE_PRIVILEGED 0x00008
23#define _PAGE_SAO 0x00010
24#define _PAGE_NON_IDEMPOTENT 0x00020
25#define _PAGE_TOLERANT 0x00030
26#define _PAGE_DIRTY 0x00080
27#define _PAGE_ACCESSED 0x00100
28
29
30
31#define _RPAGE_SW0 0x2000000000000000UL
32#define _RPAGE_SW1 0x00800
33#define _RPAGE_SW2 0x00400
34#define _RPAGE_SW3 0x00200
35#define _RPAGE_RSV1 0x1000000000000000UL
36#define _RPAGE_RSV2 0x0800000000000000UL
37#define _RPAGE_RSV3 0x0400000000000000UL
38#define _RPAGE_RSV4 0x0200000000000000UL
39
40#ifdef CONFIG_MEM_SOFT_DIRTY
41#define _PAGE_SOFT_DIRTY _RPAGE_SW3
42#else
43#define _PAGE_SOFT_DIRTY 0x00000
44#endif
45#define _PAGE_SPECIAL _RPAGE_SW2
46
47
48
49
50#define _PAGE_LARGE _RPAGE_RSV1
51
52
53#define _PAGE_PTE (1ul << 62)
54#define _PAGE_PRESENT (1ul << 63)
55
56
57
58
59
60#define _PAGE_NO_CACHE _PAGE_TOLERANT
61
62
63
64
65#define PTE_RPN_MASK (((1UL << 57) - 1) & (PAGE_MASK))
66
67
68
69
70#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
71 _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
72 _PAGE_SOFT_DIRTY)
73
74
75
76#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
77#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
78#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
79 _PAGE_RW | _PAGE_EXEC)
80
81
82
83#define _PAGE_PSIZE 0
84
85
86
87
88#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
89 _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
90 _PAGE_SOFT_DIRTY)
91
92
93
94#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
95 H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
96 _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
97 _PAGE_SOFT_DIRTY)
98
99
100
101
102
103
104#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
105#define _PAGE_BASE (_PAGE_BASE_NC)
106
107
108
109
110
111
112
113
114
115
116
117#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
118#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
119#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC)
120#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ)
121#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
122#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
123#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
124
125#define __P000 PAGE_NONE
126#define __P001 PAGE_READONLY
127#define __P010 PAGE_COPY
128#define __P011 PAGE_COPY
129#define __P100 PAGE_READONLY_X
130#define __P101 PAGE_READONLY_X
131#define __P110 PAGE_COPY_X
132#define __P111 PAGE_COPY_X
133
134#define __S000 PAGE_NONE
135#define __S001 PAGE_READONLY
136#define __S010 PAGE_SHARED
137#define __S011 PAGE_SHARED
138#define __S100 PAGE_READONLY_X
139#define __S101 PAGE_READONLY_X
140#define __S110 PAGE_SHARED_X
141#define __S111 PAGE_SHARED_X
142
143
144#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
145#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
146 _PAGE_TOLERANT)
147#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
148 _PAGE_NON_IDEMPOTENT)
149#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
150#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
151#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
152
153
154
155
156
157
158#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
159 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
160#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
161#else
162#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
163#endif
164
165
166#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
167#define PAGE_AGP (PAGE_KERNEL_NC)
168
169#ifndef __ASSEMBLY__
170
171
172
173extern unsigned long __pte_index_size;
174extern unsigned long __pmd_index_size;
175extern unsigned long __pud_index_size;
176extern unsigned long __pgd_index_size;
177extern unsigned long __pmd_cache_index;
178#define PTE_INDEX_SIZE __pte_index_size
179#define PMD_INDEX_SIZE __pmd_index_size
180#define PUD_INDEX_SIZE __pud_index_size
181#define PGD_INDEX_SIZE __pgd_index_size
182#define PMD_CACHE_INDEX __pmd_cache_index
183
184
185
186
187extern unsigned long __pte_table_size;
188extern unsigned long __pmd_table_size;
189extern unsigned long __pud_table_size;
190extern unsigned long __pgd_table_size;
191#define PTE_TABLE_SIZE __pte_table_size
192#define PMD_TABLE_SIZE __pmd_table_size
193#define PUD_TABLE_SIZE __pud_table_size
194#define PGD_TABLE_SIZE __pgd_table_size
195
196extern unsigned long __pmd_val_bits;
197extern unsigned long __pud_val_bits;
198extern unsigned long __pgd_val_bits;
199#define PMD_VAL_BITS __pmd_val_bits
200#define PUD_VAL_BITS __pud_val_bits
201#define PGD_VAL_BITS __pgd_val_bits
202
203extern unsigned long __pte_frag_nr;
204#define PTE_FRAG_NR __pte_frag_nr
205extern unsigned long __pte_frag_size_shift;
206#define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift
207#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
208
209
210
211#define MAX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
212
213#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
214#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
215#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
216#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
217
218
219#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
220#define PMD_SIZE (1UL << PMD_SHIFT)
221#define PMD_MASK (~(PMD_SIZE-1))
222
223
224#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
225#define PUD_SIZE (1UL << PUD_SHIFT)
226#define PUD_MASK (~(PUD_SIZE-1))
227
228
229#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
230#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
231#define PGDIR_MASK (~(PGDIR_SIZE-1))
232
233
234#define PMD_MASKED_BITS 0xc0000000000000ffUL
235
236#define PUD_MASKED_BITS 0xc0000000000000ffUL
237
238#define PGD_MASKED_BITS 0xc0000000000000ffUL
239
240extern unsigned long __vmalloc_start;
241extern unsigned long __vmalloc_end;
242#define VMALLOC_START __vmalloc_start
243#define VMALLOC_END __vmalloc_end
244
245extern unsigned long __kernel_virt_start;
246extern unsigned long __kernel_virt_size;
247#define KERN_VIRT_START __kernel_virt_start
248#define KERN_VIRT_SIZE __kernel_virt_size
249extern struct page *vmemmap;
250extern unsigned long ioremap_bot;
251extern unsigned long pci_io_base;
252#endif
253
254#include <asm/book3s/64/hash.h>
255#include <asm/book3s/64/radix.h>
256
257#ifdef CONFIG_PPC_64K_PAGES
258#include <asm/book3s/64/pgtable-64k.h>
259#else
260#include <asm/book3s/64/pgtable-4k.h>
261#endif
262
263#include <asm/barrier.h>
264
265
266
267
268
269
270
271
272
273#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
274#define FULL_IO_SIZE 0x80000000ul
275#define ISA_IO_BASE (KERN_IO_START)
276#define ISA_IO_END (KERN_IO_START + 0x10000ul)
277#define PHB_IO_BASE (ISA_IO_END)
278#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
279#define IOREMAP_BASE (PHB_IO_END)
280#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
281
282
283#define HAVE_PAGE_AGP
284
285
286#define __HAVE_ARCH_PTE_SPECIAL
287
288#ifndef __ASSEMBLY__
289
290
291
292
293
294
295#ifndef __real_pte
296
297#define __real_pte(e,p) ((real_pte_t){(e)})
298#define __rpte_to_pte(r) ((r).pte)
299#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
300
301#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
302 do { \
303 index = 0; \
304 shift = mmu_psize_defs[psize].shift; \
305
306#define pte_iterate_hashed_end() } while(0)
307
308
309
310
311
312#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
313
314#endif
315
316static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
317 pte_t *ptep, unsigned long clr,
318 unsigned long set, int huge)
319{
320 if (radix_enabled())
321 return radix__pte_update(mm, addr, ptep, clr, set, huge);
322 return hash__pte_update(mm, addr, ptep, clr, set, huge);
323}
324
325
326
327
328
329
330
331
332
333
334static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
335 unsigned long addr, pte_t *ptep)
336{
337 unsigned long old;
338
339 if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
340 return 0;
341 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
342 return (old & _PAGE_ACCESSED) != 0;
343}
344
345#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
346#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
347({ \
348 int __r; \
349 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
350 __r; \
351})
352
353static inline int __pte_write(pte_t pte)
354{
355 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
356}
357
358#ifdef CONFIG_NUMA_BALANCING
359#define pte_savedwrite pte_savedwrite
360static inline bool pte_savedwrite(pte_t pte)
361{
362
363
364
365
366
367
368
369 return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
370}
371#else
372#define pte_savedwrite pte_savedwrite
373static inline bool pte_savedwrite(pte_t pte)
374{
375 return false;
376}
377#endif
378
379static inline int pte_write(pte_t pte)
380{
381 return __pte_write(pte) || pte_savedwrite(pte);
382}
383
384#define __HAVE_ARCH_PTEP_SET_WRPROTECT
385static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
386 pte_t *ptep)
387{
388 if (__pte_write(*ptep))
389 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
390 else if (unlikely(pte_savedwrite(*ptep)))
391 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
392}
393
394static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
395 unsigned long addr, pte_t *ptep)
396{
397
398
399
400
401 if (__pte_write(*ptep))
402 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
403 else if (unlikely(pte_savedwrite(*ptep)))
404 pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
405}
406
407#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
408static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
409 unsigned long addr, pte_t *ptep)
410{
411 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
412 return __pte(old);
413}
414
415#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
416static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
417 unsigned long addr,
418 pte_t *ptep, int full)
419{
420 if (full && radix_enabled()) {
421
422
423
424
425
426 return radix__ptep_get_and_clear_full(mm, addr, ptep, full);
427 }
428 return ptep_get_and_clear(mm, addr, ptep);
429}
430
431
432static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
433 pte_t * ptep)
434{
435 pte_update(mm, addr, ptep, ~0UL, 0, 0);
436}
437
438static inline int pte_dirty(pte_t pte)
439{
440 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
441}
442
443static inline int pte_young(pte_t pte)
444{
445 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED));
446}
447
448static inline int pte_special(pte_t pte)
449{
450 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
451}
452
453static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
454
455#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
456static inline bool pte_soft_dirty(pte_t pte)
457{
458 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY));
459}
460
461static inline pte_t pte_mksoft_dirty(pte_t pte)
462{
463 return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
464}
465
466static inline pte_t pte_clear_soft_dirty(pte_t pte)
467{
468 return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
469}
470#endif
471
472#ifdef CONFIG_NUMA_BALANCING
473static inline int pte_protnone(pte_t pte)
474{
475 return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) ==
476 cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
477}
478
479#define pte_mk_savedwrite pte_mk_savedwrite
480static inline pte_t pte_mk_savedwrite(pte_t pte)
481{
482
483
484
485
486
487 VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
488 cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
489 return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
490}
491
492#define pte_clear_savedwrite pte_clear_savedwrite
493static inline pte_t pte_clear_savedwrite(pte_t pte)
494{
495
496
497
498 VM_BUG_ON(!pte_protnone(pte));
499 return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
500}
501#else
502#define pte_clear_savedwrite pte_clear_savedwrite
503static inline pte_t pte_clear_savedwrite(pte_t pte)
504{
505 VM_WARN_ON(1);
506 return __pte(pte_val(pte) & ~_PAGE_WRITE);
507}
508#endif
509
510static inline int pte_present(pte_t pte)
511{
512 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
513}
514
515
516
517
518
519
520
521static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
522{
523 return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) |
524 pgprot_val(pgprot));
525}
526
527static inline unsigned long pte_pfn(pte_t pte)
528{
529 return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
530}
531
532
533static inline pte_t pte_wrprotect(pte_t pte)
534{
535 if (unlikely(pte_savedwrite(pte)))
536 return pte_clear_savedwrite(pte);
537 return __pte(pte_val(pte) & ~_PAGE_WRITE);
538}
539
540static inline pte_t pte_mkclean(pte_t pte)
541{
542 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
543}
544
545static inline pte_t pte_mkold(pte_t pte)
546{
547 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
548}
549
550static inline pte_t pte_mkwrite(pte_t pte)
551{
552
553
554
555 return __pte(pte_val(pte) | _PAGE_RW);
556}
557
558static inline pte_t pte_mkdirty(pte_t pte)
559{
560 return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
561}
562
563static inline pte_t pte_mkyoung(pte_t pte)
564{
565 return __pte(pte_val(pte) | _PAGE_ACCESSED);
566}
567
568static inline pte_t pte_mkspecial(pte_t pte)
569{
570 return __pte(pte_val(pte) | _PAGE_SPECIAL);
571}
572
573static inline pte_t pte_mkhuge(pte_t pte)
574{
575 return pte;
576}
577
578static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
579{
580
581 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
582}
583
584static inline bool pte_user(pte_t pte)
585{
586 return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
587}
588
589
590#define MAX_SWAPFILES_CHECK() do { \
591 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
592
593
594
595 \
596 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
597 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
598 } while (0)
599
600
601
602#define SWP_TYPE_BITS 5
603#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
604 & ((1UL << SWP_TYPE_BITS) - 1))
605#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
606#define __swp_entry(type, offset) ((swp_entry_t) { \
607 ((type) << _PAGE_BIT_SWAP_TYPE) \
608 | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
609
610
611
612
613
614
615#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
616#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
617
618#ifdef CONFIG_MEM_SOFT_DIRTY
619#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
620#else
621#define _PAGE_SWP_SOFT_DIRTY 0UL
622#endif
623
624#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
625static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
626{
627 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
628}
629
630static inline bool pte_swp_soft_dirty(pte_t pte)
631{
632 return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
633}
634
635static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
636{
637 return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
638}
639#endif
640
641static inline bool check_pte_access(unsigned long access, unsigned long ptev)
642{
643
644
645
646 if (access & ~ptev)
647 return false;
648
649
650
651 if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
652 return false;
653
654 return true;
655}
656
657
658
659
660static inline void __ptep_set_access_flags(struct mm_struct *mm,
661 pte_t *ptep, pte_t entry,
662 unsigned long address)
663{
664 if (radix_enabled())
665 return radix__ptep_set_access_flags(mm, ptep, entry, address);
666 return hash__ptep_set_access_flags(ptep, entry);
667}
668
669#define __HAVE_ARCH_PTE_SAME
670static inline int pte_same(pte_t pte_a, pte_t pte_b)
671{
672 if (radix_enabled())
673 return radix__pte_same(pte_a, pte_b);
674 return hash__pte_same(pte_a, pte_b);
675}
676
677static inline int pte_none(pte_t pte)
678{
679 if (radix_enabled())
680 return radix__pte_none(pte);
681 return hash__pte_none(pte);
682}
683
684static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
685 pte_t *ptep, pte_t pte, int percpu)
686{
687 if (radix_enabled())
688 return radix__set_pte_at(mm, addr, ptep, pte, percpu);
689 return hash__set_pte_at(mm, addr, ptep, pte, percpu);
690}
691
692#define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
693
694#define pgprot_noncached pgprot_noncached
695static inline pgprot_t pgprot_noncached(pgprot_t prot)
696{
697 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
698 _PAGE_NON_IDEMPOTENT);
699}
700
701#define pgprot_noncached_wc pgprot_noncached_wc
702static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
703{
704 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
705 _PAGE_TOLERANT);
706}
707
708#define pgprot_cached pgprot_cached
709static inline pgprot_t pgprot_cached(pgprot_t prot)
710{
711 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL));
712}
713
714#define pgprot_writecombine pgprot_writecombine
715static inline pgprot_t pgprot_writecombine(pgprot_t prot)
716{
717 return pgprot_noncached_wc(prot);
718}
719
720
721
722static inline bool pte_ci(pte_t pte)
723{
724 unsigned long pte_v = pte_val(pte);
725
726 if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
727 ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
728 return true;
729 return false;
730}
731
732static inline void pmd_set(pmd_t *pmdp, unsigned long val)
733{
734 *pmdp = __pmd(val);
735}
736
737static inline void pmd_clear(pmd_t *pmdp)
738{
739 *pmdp = __pmd(0);
740}
741
742static inline int pmd_none(pmd_t pmd)
743{
744 return !pmd_raw(pmd);
745}
746
747static inline int pmd_present(pmd_t pmd)
748{
749
750 return !pmd_none(pmd);
751}
752
753static inline int pmd_bad(pmd_t pmd)
754{
755 if (radix_enabled())
756 return radix__pmd_bad(pmd);
757 return hash__pmd_bad(pmd);
758}
759
760static inline void pud_set(pud_t *pudp, unsigned long val)
761{
762 *pudp = __pud(val);
763}
764
765static inline void pud_clear(pud_t *pudp)
766{
767 *pudp = __pud(0);
768}
769
770static inline int pud_none(pud_t pud)
771{
772 return !pud_raw(pud);
773}
774
775static inline int pud_present(pud_t pud)
776{
777 return !pud_none(pud);
778}
779
780extern struct page *pud_page(pud_t pud);
781extern struct page *pmd_page(pmd_t pmd);
782static inline pte_t pud_pte(pud_t pud)
783{
784 return __pte_raw(pud_raw(pud));
785}
786
787static inline pud_t pte_pud(pte_t pte)
788{
789 return __pud_raw(pte_raw(pte));
790}
791#define pud_write(pud) pte_write(pud_pte(pud))
792
793static inline int pud_bad(pud_t pud)
794{
795 if (radix_enabled())
796 return radix__pud_bad(pud);
797 return hash__pud_bad(pud);
798}
799
800
801#define pgd_write(pgd) pte_write(pgd_pte(pgd))
802static inline void pgd_set(pgd_t *pgdp, unsigned long val)
803{
804 *pgdp = __pgd(val);
805}
806
807static inline void pgd_clear(pgd_t *pgdp)
808{
809 *pgdp = __pgd(0);
810}
811
812static inline int pgd_none(pgd_t pgd)
813{
814 return !pgd_raw(pgd);
815}
816
817static inline int pgd_present(pgd_t pgd)
818{
819 return !pgd_none(pgd);
820}
821
822static inline pte_t pgd_pte(pgd_t pgd)
823{
824 return __pte_raw(pgd_raw(pgd));
825}
826
827static inline pgd_t pte_pgd(pte_t pte)
828{
829 return __pgd_raw(pte_raw(pte));
830}
831
832static inline int pgd_bad(pgd_t pgd)
833{
834 if (radix_enabled())
835 return radix__pgd_bad(pgd);
836 return hash__pgd_bad(pgd);
837}
838
839extern struct page *pgd_page(pgd_t pgd);
840
841
842#define __pgtable_ptr_val(ptr) __pa(ptr)
843
844#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
845#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
846#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
847
848#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
849#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
850#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
851#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
852
853
854
855
856
857
858#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
859
860#define pud_offset(pgdp, addr) \
861 (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
862#define pmd_offset(pudp,addr) \
863 (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
864#define pte_offset_kernel(dir,addr) \
865 (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
866
867#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
868#define pte_unmap(pte) do { } while(0)
869
870
871
872#define pgd_offset_k(address) pgd_offset(&init_mm, address)
873
874#define pte_ERROR(e) \
875 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
876#define pmd_ERROR(e) \
877 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
878#define pud_ERROR(e) \
879 pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
880#define pgd_ERROR(e) \
881 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
882
883static inline int map_kernel_page(unsigned long ea, unsigned long pa,
884 unsigned long flags)
885{
886 if (radix_enabled()) {
887#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
888 unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
889 WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
890#endif
891 return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
892 }
893 return hash__map_kernel_page(ea, pa, flags);
894}
895
896static inline int __meminit vmemmap_create_mapping(unsigned long start,
897 unsigned long page_size,
898 unsigned long phys)
899{
900 if (radix_enabled())
901 return radix__vmemmap_create_mapping(start, page_size, phys);
902 return hash__vmemmap_create_mapping(start, page_size, phys);
903}
904
905#ifdef CONFIG_MEMORY_HOTPLUG
906static inline void vmemmap_remove_mapping(unsigned long start,
907 unsigned long page_size)
908{
909 if (radix_enabled())
910 return radix__vmemmap_remove_mapping(start, page_size);
911 return hash__vmemmap_remove_mapping(start, page_size);
912}
913#endif
914struct page *realmode_pfn_to_page(unsigned long pfn);
915
916static inline pte_t pmd_pte(pmd_t pmd)
917{
918 return __pte_raw(pmd_raw(pmd));
919}
920
921static inline pmd_t pte_pmd(pte_t pte)
922{
923 return __pmd_raw(pte_raw(pte));
924}
925
926static inline pte_t *pmdp_ptep(pmd_t *pmd)
927{
928 return (pte_t *)pmd;
929}
930#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
931#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
932#define pmd_young(pmd) pte_young(pmd_pte(pmd))
933#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
934#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
935#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
936#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
937#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
938#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
939#define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd)))
940#define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd)))
941
942#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
943#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
944#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
945#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
946#endif
947
948#ifdef CONFIG_NUMA_BALANCING
949static inline int pmd_protnone(pmd_t pmd)
950{
951 return pte_protnone(pmd_pte(pmd));
952}
953#endif
954
955#define __HAVE_ARCH_PMD_WRITE
956#define pmd_write(pmd) pte_write(pmd_pte(pmd))
957#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
958#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
959
960#ifdef CONFIG_TRANSPARENT_HUGEPAGE
961extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
962extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
963extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
964extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
965 pmd_t *pmdp, pmd_t pmd);
966extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
967 pmd_t *pmd);
968extern int hash__has_transparent_hugepage(void);
969static inline int has_transparent_hugepage(void)
970{
971 if (radix_enabled())
972 return radix__has_transparent_hugepage();
973 return hash__has_transparent_hugepage();
974}
975#define has_transparent_hugepage has_transparent_hugepage
976
977static inline unsigned long
978pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
979 unsigned long clr, unsigned long set)
980{
981 if (radix_enabled())
982 return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set);
983 return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
984}
985
986static inline int pmd_large(pmd_t pmd)
987{
988 return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
989}
990
991static inline pmd_t pmd_mknotpresent(pmd_t pmd)
992{
993 return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
994}
995
996
997
998
999static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
1000 unsigned long addr, pmd_t *pmdp)
1001{
1002 unsigned long old;
1003
1004 if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0)
1005 return 0;
1006 old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
1007 return ((old & _PAGE_ACCESSED) != 0);
1008}
1009
1010#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1011static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
1012 pmd_t *pmdp)
1013{
1014 if (__pmd_write((*pmdp)))
1015 pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
1016 else if (unlikely(pmd_savedwrite(*pmdp)))
1017 pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
1018}
1019
1020static inline int pmd_trans_huge(pmd_t pmd)
1021{
1022 if (radix_enabled())
1023 return radix__pmd_trans_huge(pmd);
1024 return hash__pmd_trans_huge(pmd);
1025}
1026
1027#define __HAVE_ARCH_PMD_SAME
1028static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1029{
1030 if (radix_enabled())
1031 return radix__pmd_same(pmd_a, pmd_b);
1032 return hash__pmd_same(pmd_a, pmd_b);
1033}
1034
1035static inline pmd_t pmd_mkhuge(pmd_t pmd)
1036{
1037 if (radix_enabled())
1038 return radix__pmd_mkhuge(pmd);
1039 return hash__pmd_mkhuge(pmd);
1040}
1041
1042#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1043extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1044 unsigned long address, pmd_t *pmdp,
1045 pmd_t entry, int dirty);
1046
1047#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1048extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1049 unsigned long address, pmd_t *pmdp);
1050
1051#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1052static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1053 unsigned long addr, pmd_t *pmdp)
1054{
1055 if (radix_enabled())
1056 return radix__pmdp_huge_get_and_clear(mm, addr, pmdp);
1057 return hash__pmdp_huge_get_and_clear(mm, addr, pmdp);
1058}
1059
1060static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1061 unsigned long address, pmd_t *pmdp)
1062{
1063 if (radix_enabled())
1064 return radix__pmdp_collapse_flush(vma, address, pmdp);
1065 return hash__pmdp_collapse_flush(vma, address, pmdp);
1066}
1067#define pmdp_collapse_flush pmdp_collapse_flush
1068
1069#define __HAVE_ARCH_PGTABLE_DEPOSIT
1070static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
1071 pmd_t *pmdp, pgtable_t pgtable)
1072{
1073 if (radix_enabled())
1074 return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1075 return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable);
1076}
1077
1078#define __HAVE_ARCH_PGTABLE_WITHDRAW
1079static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm,
1080 pmd_t *pmdp)
1081{
1082 if (radix_enabled())
1083 return radix__pgtable_trans_huge_withdraw(mm, pmdp);
1084 return hash__pgtable_trans_huge_withdraw(mm, pmdp);
1085}
1086
1087#define __HAVE_ARCH_PMDP_INVALIDATE
1088extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1089 pmd_t *pmdp);
1090
1091#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
1092static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
1093 unsigned long address, pmd_t *pmdp)
1094{
1095 if (radix_enabled())
1096 return radix__pmdp_huge_split_prepare(vma, address, pmdp);
1097 return hash__pmdp_huge_split_prepare(vma, address, pmdp);
1098}
1099
1100#define pmd_move_must_withdraw pmd_move_must_withdraw
1101struct spinlock;
1102static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
1103 struct spinlock *old_pmd_ptl,
1104 struct vm_area_struct *vma)
1105{
1106 if (radix_enabled())
1107 return false;
1108
1109
1110
1111
1112
1113 return true;
1114}
1115
1116
1117#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
1118static inline bool arch_needs_pgtable_deposit(void)
1119{
1120 if (radix_enabled())
1121 return false;
1122 return true;
1123}
1124
1125#endif
1126#endif
1127#endif
1128