1
2
3
4
5
6
7
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10
11static inline unsigned long __pte_to_rste(pte_t pte)
12{
13 unsigned long rste;
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 if (pte_present(pte)) {
33 rste = pte_val(pte) & PAGE_MASK;
34 rste |= (pte_val(pte) & _PAGE_READ) >> 4;
35 rste |= (pte_val(pte) & _PAGE_WRITE) >> 4;
36 rste |= (pte_val(pte) & _PAGE_INVALID) >> 5;
37 rste |= (pte_val(pte) & _PAGE_PROTECT);
38 rste |= (pte_val(pte) & _PAGE_DIRTY) << 10;
39 rste |= (pte_val(pte) & _PAGE_YOUNG) << 10;
40 rste |= (pte_val(pte) & _PAGE_NOEXEC);
41 } else
42 rste = _SEGMENT_ENTRY_INVALID;
43 return rste;
44}
45
46static inline pte_t __rste_to_pte(unsigned long rste)
47{
48 int present;
49 pte_t pte;
50
51 if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
52 present = pud_present(__pud(rste));
53 else
54 present = pmd_present(__pmd(rste));
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73 if (present) {
74 pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
75 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
76 pte_val(pte) |= (rste & _SEGMENT_ENTRY_READ) << 4;
77 pte_val(pte) |= (rste & _SEGMENT_ENTRY_WRITE) << 4;
78 pte_val(pte) |= (rste & _SEGMENT_ENTRY_INVALID) << 5;
79 pte_val(pte) |= (rste & _SEGMENT_ENTRY_PROTECT);
80 pte_val(pte) |= (rste & _SEGMENT_ENTRY_DIRTY) >> 10;
81 pte_val(pte) |= (rste & _SEGMENT_ENTRY_YOUNG) >> 10;
82 pte_val(pte) |= (rste & _SEGMENT_ENTRY_NOEXEC);
83 } else
84 pte_val(pte) = _PAGE_INVALID;
85 return pte;
86}
87
88void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
89 pte_t *ptep, pte_t pte)
90{
91 unsigned long rste;
92
93 rste = __pte_to_rste(pte);
94 if (!MACHINE_HAS_NX)
95 rste &= ~_SEGMENT_ENTRY_NOEXEC;
96
97
98 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
99 rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
100 else {
101 if (!MACHINE_HAS_HPAGE) {
102 rste &= ~_SEGMENT_ENTRY_ORIGIN;
103 rste |= pte_page(pte)[1].index;
104 } else
105 rste |= _SEGMENT_ENTRY_LARGE;
106 }
107 pte_val(*ptep) = rste;
108}
109
110pte_t huge_ptep_get(pte_t *ptep)
111{
112 unsigned long origin, rste;
113
114 rste = pte_val(*ptep);
115 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
116 if (!MACHINE_HAS_HPAGE && pmd_present(__pmd(rste))) {
117 origin = rste & _SEGMENT_ENTRY_ORIGIN;
118 rste &= ~_SEGMENT_ENTRY_ORIGIN;
119 rste |= *(unsigned long *) origin;
120
121 rste |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
122 }
123 return __rste_to_pte(rste);
124}
125
126pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
127 unsigned long addr, pte_t *ptep)
128{
129 pte_t pte = huge_ptep_get(ptep);
130 pmd_t *pmdp = (pmd_t *) ptep;
131 pud_t *pudp = (pud_t *) ptep;
132
133 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
134 __pudp_idte(addr, pudp);
135 pud_val(*pudp) = _REGION3_ENTRY_EMPTY;
136 } else {
137 if (MACHINE_HAS_IDTE)
138 __pmd_idte(addr, pmdp);
139 else
140 __pmd_csp(pmdp);
141 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
142 }
143 return pte;
144}
145
146int arch_prepare_hugepage(struct page *page)
147{
148 unsigned long addr = page_to_phys(page);
149 pte_t pte;
150 pte_t *ptep;
151 int i;
152
153 if (MACHINE_HAS_HPAGE)
154 return 0;
155
156 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
157 if (!ptep)
158 return -ENOMEM;
159
160 pte_val(pte) = addr;
161 for (i = 0; i < PTRS_PER_PTE; i++) {
162 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
163 pte_val(pte) += PAGE_SIZE;
164 }
165 page[1].index = (unsigned long) ptep;
166 return 0;
167}
168
169void arch_release_hugepage(struct page *page)
170{
171 pte_t *ptep;
172
173 if (MACHINE_HAS_HPAGE)
174 return;
175
176 ptep = (pte_t *) page[1].index;
177 if (!ptep)
178 return;
179 clear_table((unsigned long *) ptep, _PAGE_INVALID,
180 PTRS_PER_PTE * sizeof(pte_t));
181 page_table_free(&init_mm, (unsigned long *) ptep);
182 page[1].index = 0;
183}
184
185pte_t *huge_pte_alloc(struct mm_struct *mm,
186 unsigned long addr, unsigned long sz)
187{
188 pgd_t *pgdp;
189 pud_t *pudp;
190 pmd_t *pmdp = NULL;
191
192 pgdp = pgd_offset(mm, addr);
193 pudp = pud_alloc(mm, pgdp, addr);
194 if (pudp) {
195 if (sz == PUD_SIZE)
196 return (pte_t *) pudp;
197 else if (sz == PMD_SIZE)
198 pmdp = pmd_alloc(mm, pudp, addr);
199 }
200 return (pte_t *) pmdp;
201}
202
203pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
204{
205 pgd_t *pgdp;
206 pud_t *pudp;
207 pmd_t *pmdp = NULL;
208
209 pgdp = pgd_offset(mm, addr);
210 if (pgd_present(*pgdp)) {
211 pudp = pud_offset(pgdp, addr);
212 if (pud_present(*pudp)) {
213 if (pud_large(*pudp))
214 return (pte_t *) pudp;
215 pmdp = pmd_offset(pudp, addr);
216 }
217 }
218 return (pte_t *) pmdp;
219}
220
221int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
222{
223 return 0;
224}
225
226int pmd_huge(pmd_t pmd)
227{
228 if (!MACHINE_HAS_HPAGE)
229 return 0;
230
231 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
232}
233
234int pud_huge(pud_t pud)
235{
236 return pud_large(pud);
237}
238
239struct page *
240follow_huge_pud(struct mm_struct *mm, unsigned long address,
241 pud_t *pud, int flags)
242{
243 if (flags & FOLL_GET)
244 return NULL;
245
246 return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
247}
248
249static __init int setup_hugepagesz(char *opt)
250{
251 unsigned long size;
252 char *string = opt;
253
254 size = memparse(opt, &opt);
255 if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) {
256 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
257 } else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
258 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
259 } else {
260 pr_err("hugepagesz= specifies an unsupported page size %s\n",
261 string);
262 return 0;
263 }
264 return 1;
265}
266__setup("hugepagesz=", setup_hugepagesz);
267