1
2
3
4
5
6
7
8
9
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
13#include <asm/cacheflush.h>
14#include <asm/machdep.h>
15
16unsigned int hpage_shift;
17EXPORT_SYMBOL(hpage_shift);
18
19int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
20 pte_t *ptep, unsigned long trap, unsigned long flags,
21 int ssize, unsigned int shift, unsigned int mmu_psize)
22{
23 real_pte_t rpte;
24 unsigned long vpn;
25 unsigned long old_pte, new_pte;
26 unsigned long rflags, pa;
27 long slot, offset;
28
29 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
30
31
32 vpn = hpt_vpn(ea, vsid, ssize);
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 do {
48 old_pte = pte_val(*ptep);
49
50 if (unlikely(old_pte & H_PAGE_BUSY))
51 return 0;
52
53 if (unlikely(!check_pte_access(access, old_pte)))
54 return 1;
55
56
57
58
59
60 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
61 if (access & _PAGE_WRITE)
62 new_pte |= _PAGE_DIRTY;
63 } while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
64
65
66 if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
67 return 0;
68
69 rflags = htab_convert_pte_flags(new_pte, flags);
70 if (unlikely(mmu_psize == MMU_PAGE_16G))
71 offset = PTRS_PER_PUD;
72 else
73 offset = PTRS_PER_PMD;
74 rpte = __real_pte(__pte(old_pte), ptep, offset);
75
76 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
77
78
79
80
81 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
82
83
84 if (unlikely(old_pte & H_PAGE_HASHPTE)) {
85
86 unsigned long gslot;
87
88 gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0);
89 if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, mmu_psize,
90 mmu_psize, ssize, flags) == -1)
91 old_pte &= ~_PAGE_HPTEFLAGS;
92 }
93
94 if (likely(!(old_pte & H_PAGE_HASHPTE))) {
95 unsigned long hash = hpt_hash(vpn, shift, ssize);
96
97 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
98
99
100 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
101
102 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
103 mmu_psize, ssize);
104
105
106
107
108
109 if (unlikely(slot == -2)) {
110 *ptep = __pte(old_pte);
111 hash_failure_debug(ea, access, vsid, trap, ssize,
112 mmu_psize, mmu_psize, old_pte);
113 return -1;
114 }
115
116 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
117 }
118
119
120
121
122 *ptep = __pte(new_pte & ~H_PAGE_BUSY);
123 return 0;
124}
125
126pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
127 unsigned long addr, pte_t *ptep)
128{
129 unsigned long pte_val;
130
131
132
133
134
135 pte_val = pte_update(vma->vm_mm, addr, ptep,
136 _PAGE_PRESENT, _PAGE_INVALID, 1);
137
138 return __pte(pte_val);
139}
140
141void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
142 pte_t *ptep, pte_t old_pte, pte_t pte)
143{
144
145 if (radix_enabled())
146 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
147 old_pte, pte);
148 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
149}
150
151void hugetlbpage_init_default(void)
152{
153
154
155
156 if (mmu_psize_defs[MMU_PAGE_16M].shift)
157 hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift;
158 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
159 hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift;
160 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
161 hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift;
162}
163