1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/mm.h>
19#include <asm/machdep.h>
20
21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22 pmd_t *pmdp, unsigned long trap, unsigned long flags,
23 int ssize, unsigned int psize)
24{
25 unsigned int index, valid;
26 unsigned char *hpte_slot_array;
27 unsigned long rflags, pa, hidx;
28 unsigned long old_pmd, new_pmd;
29 int ret, lpsize = MMU_PAGE_16M;
30 unsigned long vpn, hash, shift, slot;
31
32
33
34
35 do {
36 pmd_t pmd = READ_ONCE(*pmdp);
37
38 old_pmd = pmd_val(pmd);
39
40 if (unlikely(old_pmd & H_PAGE_BUSY))
41 return 0;
42
43 if (unlikely(!check_pte_access(access, old_pmd)))
44 return 1;
45
46
47
48
49 new_pmd = old_pmd | H_PAGE_BUSY | _PAGE_ACCESSED;
50 if (access & _PAGE_WRITE)
51 new_pmd |= _PAGE_DIRTY;
52 } while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
53
54 rflags = htab_convert_pte_flags(new_pmd);
55
56#if 0
57 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
58
59
60
61
62
63 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
64 }
65#endif
66
67
68
69 shift = mmu_psize_defs[psize].shift;
70 index = (ea & ~HPAGE_PMD_MASK) >> shift;
71 BUG_ON(index >= PTE_FRAG_SIZE);
72
73 vpn = hpt_vpn(ea, vsid, ssize);
74 hpte_slot_array = get_hpte_slot_array(pmdp);
75 if (psize == MMU_PAGE_4K) {
76
77
78
79
80
81 if ((old_pmd & H_PAGE_HASHPTE) && !(old_pmd & H_PAGE_COMBO)) {
82 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
83 ssize, flags);
84
85
86
87
88
89
90
91
92 memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
93 }
94 }
95
96 valid = hpte_valid(hpte_slot_array, index);
97 if (valid) {
98
99 hash = hpt_hash(vpn, shift, ssize);
100 hidx = hpte_hash_index(hpte_slot_array, index);
101 if (hidx & _PTEIDX_SECONDARY)
102 hash = ~hash;
103 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
104 slot += hidx & _PTEIDX_GROUP_IX;
105
106 ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn,
107 psize, lpsize, ssize, flags);
108
109
110
111 if (ret == -1) {
112
113
114
115
116
117 valid = 0;
118 hpte_slot_array[index] = 0;
119 }
120 }
121
122 if (!valid) {
123 unsigned long hpte_group;
124
125 hash = hpt_hash(vpn, shift, ssize);
126
127 pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
128 new_pmd |= H_PAGE_HASHPTE;
129
130repeat:
131 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
132
133
134 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0,
135 psize, lpsize, ssize);
136
137
138
139 if (unlikely(slot == -1)) {
140 hpte_group = ((~hash & htab_hash_mask) *
141 HPTES_PER_GROUP) & ~0x7UL;
142 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa,
143 rflags,
144 HPTE_V_SECONDARY,
145 psize, lpsize, ssize);
146 if (slot == -1) {
147 if (mftb() & 0x1)
148 hpte_group = ((hash & htab_hash_mask) *
149 HPTES_PER_GROUP) & ~0x7UL;
150
151 mmu_hash_ops.hpte_remove(hpte_group);
152 goto repeat;
153 }
154 }
155
156
157
158
159 if (unlikely(slot == -2)) {
160 *pmdp = __pmd(old_pmd);
161 hash_failure_debug(ea, access, vsid, trap, ssize,
162 psize, lpsize, old_pmd);
163 return -1;
164 }
165
166
167
168
169
170 mark_hpte_slot_valid(hpte_slot_array, index, slot);
171 }
172
173
174
175
176 if (psize == MMU_PAGE_4K)
177 new_pmd |= H_PAGE_COMBO;
178
179
180
181
182
183 smp_wmb();
184 *pmdp = __pmd(new_pmd & ~H_PAGE_BUSY);
185 return 0;
186}
187