1
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#ifndef __ASSEMBLY__
8#include <linux/sched.h>
9#include <linux/threads.h>
10#include <asm/mmu.h>
11
12#ifdef CONFIG_44x
13extern int icache_44x_need_flush;
14#endif
15
16#endif
17
18#define PTE_INDEX_SIZE PTE_SHIFT
19#define PMD_INDEX_SIZE 0
20#define PUD_INDEX_SIZE 0
21#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
22
23#define PMD_CACHE_INDEX PMD_INDEX_SIZE
24#define PUD_CACHE_INDEX PUD_INDEX_SIZE
25
26#ifndef __ASSEMBLY__
27#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
28#define PMD_TABLE_SIZE 0
29#define PUD_TABLE_SIZE 0
30#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
31
32#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
33#endif
34
35#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
37
38
39
40
41
42
43
44
45
46
47
48
49#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51#define PGDIR_MASK (~(PGDIR_SIZE-1))
52
53
54#define PGD_MASKED_BITS 0
55
56#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
57
58#define pte_ERROR(e) \
59 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
60 (unsigned long long)pte_val(e))
61#define pgd_ERROR(e) \
62 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
63
64#ifndef __ASSEMBLY__
65
66int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
67
68#endif
69
70
71
72
73
74
75
76#include <asm/fixmap.h>
77
78
79
80
81
82
83#ifdef CONFIG_HIGHMEM
84#define IOREMAP_TOP PKMAP_BASE
85#else
86#define IOREMAP_TOP FIXADDR_START
87#endif
88
89
90#define IOREMAP_START VMALLOC_START
91#define IOREMAP_END VMALLOC_END
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110#define VMALLOC_OFFSET (0x1000000)
111#ifdef PPC_PIN_SIZE
112#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
113#else
114#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
115#endif
116
117#ifdef CONFIG_KASAN_VMALLOC
118#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
119#else
120#define VMALLOC_END ioremap_bot
121#endif
122
123
124
125
126
127
128#if defined(CONFIG_40x)
129#include <asm/nohash/32/pte-40x.h>
130#elif defined(CONFIG_44x)
131#include <asm/nohash/32/pte-44x.h>
132#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
133#include <asm/nohash/pte-book3e.h>
134#elif defined(CONFIG_FSL_BOOKE)
135#include <asm/nohash/32/pte-fsl-booke.h>
136#elif defined(CONFIG_PPC_8xx)
137#include <asm/nohash/32/pte-8xx.h>
138#endif
139
140
141
142
143
144
145#ifndef PTE_RPN_SHIFT
146#define PTE_RPN_SHIFT (PAGE_SHIFT)
147#endif
148
149
150
151
152
153#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
154#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
155#define MAX_POSSIBLE_PHYSMEM_BITS 36
156#else
157#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
158#define MAX_POSSIBLE_PHYSMEM_BITS 32
159#endif
160
161
162
163
164
165#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
166
167#ifndef __ASSEMBLY__
168
169#define pte_clear(mm, addr, ptep) \
170 do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
171
172#ifndef pte_mkwrite
173static inline pte_t pte_mkwrite(pte_t pte)
174{
175 return __pte(pte_val(pte) | _PAGE_RW);
176}
177#endif
178
179static inline pte_t pte_mkdirty(pte_t pte)
180{
181 return __pte(pte_val(pte) | _PAGE_DIRTY);
182}
183
184static inline pte_t pte_mkyoung(pte_t pte)
185{
186 return __pte(pte_val(pte) | _PAGE_ACCESSED);
187}
188
189#ifndef pte_wrprotect
190static inline pte_t pte_wrprotect(pte_t pte)
191{
192 return __pte(pte_val(pte) & ~_PAGE_RW);
193}
194#endif
195
196static inline pte_t pte_mkexec(pte_t pte)
197{
198 return __pte(pte_val(pte) | _PAGE_EXEC);
199}
200
201#define pmd_none(pmd) (!pmd_val(pmd))
202#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
203#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
204static inline void pmd_clear(pmd_t *pmdp)
205{
206 *pmdp = __pmd(0);
207}
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229#ifdef CONFIG_PPC_8xx
230static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
231static int hugepd_ok(hugepd_t hpd);
232
233static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
234{
235 if (!huge)
236 return PAGE_SIZE / SZ_4K;
237 else if (hugepd_ok(*((hugepd_t *)pmd)))
238 return 1;
239 else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
240 return SZ_16K / SZ_4K;
241 else
242 return SZ_512K / SZ_4K;
243}
244
245static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
246 unsigned long clr, unsigned long set, int huge)
247{
248 pte_basic_t *entry = &p->pte;
249 pte_basic_t old = pte_val(*p);
250 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
251 int num, i;
252 pmd_t *pmd = pmd_off(mm, addr);
253
254 num = number_of_cells_per_pte(pmd, new, huge);
255
256 for (i = 0; i < num; i++, entry++, new += SZ_4K)
257 *entry = new;
258
259 return old;
260}
261
262#ifdef CONFIG_PPC_16K_PAGES
263#define __HAVE_ARCH_PTEP_GET
264static inline pte_t ptep_get(pte_t *ptep)
265{
266 pte_basic_t val = READ_ONCE(ptep->pte);
267 pte_t pte = {val, val, val, val};
268
269 return pte;
270}
271#endif
272
273#else
274static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
275 unsigned long clr, unsigned long set, int huge)
276{
277 pte_basic_t old = pte_val(*p);
278 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
279
280 *p = __pte(new);
281
282#ifdef CONFIG_44x
283 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
284 icache_44x_need_flush = 1;
285#endif
286 return old;
287}
288#endif
289
290#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
291static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
292 unsigned long addr, pte_t *ptep)
293{
294 unsigned long old;
295 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
296 return (old & _PAGE_ACCESSED) != 0;
297}
298#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
299 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
300
301#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
302static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
303 pte_t *ptep)
304{
305 return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
306}
307
308#define __HAVE_ARCH_PTEP_SET_WRPROTECT
309static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep)
311{
312 unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
313 unsigned long set = pte_val(pte_wrprotect(__pte(0)));
314
315 pte_update(mm, addr, ptep, clr, set, 0);
316}
317
318static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
319 pte_t *ptep, pte_t entry,
320 unsigned long address,
321 int psize)
322{
323 pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
324 pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
325 unsigned long set = pte_val(entry) & pte_val(pte_set);
326 unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
327 int huge = psize > mmu_virtual_psize ? 1 : 0;
328
329 pte_update(vma->vm_mm, address, ptep, clr, set, huge);
330
331 flush_tlb_page(vma, address);
332}
333
334static inline int pte_young(pte_t pte)
335{
336 return pte_val(pte) & _PAGE_ACCESSED;
337}
338
339#define __HAVE_ARCH_PTE_SAME
340#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
341
342
343
344
345
346
347
348
349#ifndef CONFIG_BOOKE
350#define pmd_page(pmd) \
351 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
352#else
353#define pmd_page_vaddr(pmd) \
354 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
355#define pmd_page(pmd) \
356 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
357#endif
358
359
360
361
362
363
364
365#define __swp_type(entry) ((entry).val & 0x1f)
366#define __swp_offset(entry) ((entry).val >> 5)
367#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
368#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
369#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
370
371#endif
372
373#endif
374