1
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#ifndef __ASSEMBLY__
8#include <linux/sched.h>
9#include <linux/threads.h>
10#include <asm/mmu.h>
11
12#ifdef CONFIG_44x
13extern int icache_44x_need_flush;
14#endif
15
16#endif
17
18#define PTE_INDEX_SIZE PTE_SHIFT
19#define PMD_INDEX_SIZE 0
20#define PUD_INDEX_SIZE 0
21#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
22
23#define PMD_CACHE_INDEX PMD_INDEX_SIZE
24#define PUD_CACHE_INDEX PUD_INDEX_SIZE
25
26#ifndef __ASSEMBLY__
27#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
28#define PMD_TABLE_SIZE 0
29#define PUD_TABLE_SIZE 0
30#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
31
32#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
33#endif
34
35#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
37
38
39
40
41
42
43
44
45
46
47
48
49#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51#define PGDIR_MASK (~(PGDIR_SIZE-1))
52
53
54#define PGD_MASKED_BITS 0
55
56#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
57
58#define pte_ERROR(e) \
59 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
60 (unsigned long long)pte_val(e))
61#define pgd_ERROR(e) \
62 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
63
64#ifndef __ASSEMBLY__
65
66int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
67void unmap_kernel_page(unsigned long va);
68
69#endif
70
71
72
73
74
75
76
77#include <asm/fixmap.h>
78
79
80
81
82
83
84#ifdef CONFIG_HIGHMEM
85#define IOREMAP_TOP PKMAP_BASE
86#else
87#define IOREMAP_TOP FIXADDR_START
88#endif
89
90
91#define IOREMAP_START VMALLOC_START
92#define IOREMAP_END VMALLOC_END
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111#define VMALLOC_OFFSET (0x1000000)
112#ifdef PPC_PIN_SIZE
113#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
114#else
115#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
116#endif
117
118#ifdef CONFIG_KASAN_VMALLOC
119#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
120#else
121#define VMALLOC_END ioremap_bot
122#endif
123
124
125
126
127
128
129#if defined(CONFIG_40x)
130#include <asm/nohash/32/pte-40x.h>
131#elif defined(CONFIG_44x)
132#include <asm/nohash/32/pte-44x.h>
133#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
134#include <asm/nohash/pte-book3e.h>
135#elif defined(CONFIG_FSL_BOOKE)
136#include <asm/nohash/32/pte-fsl-booke.h>
137#elif defined(CONFIG_PPC_8xx)
138#include <asm/nohash/32/pte-8xx.h>
139#endif
140
141
142
143
144
145
146#ifndef PTE_RPN_SHIFT
147#define PTE_RPN_SHIFT (PAGE_SHIFT)
148#endif
149
150
151
152
153
154#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
155#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
156#define MAX_POSSIBLE_PHYSMEM_BITS 36
157#else
158#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
159#define MAX_POSSIBLE_PHYSMEM_BITS 32
160#endif
161
162
163
164
165
166#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
167
168#ifndef __ASSEMBLY__
169
170#define pte_clear(mm, addr, ptep) \
171 do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
172
173#ifndef pte_mkwrite
174static inline pte_t pte_mkwrite(pte_t pte)
175{
176 return __pte(pte_val(pte) | _PAGE_RW);
177}
178#endif
179
180static inline pte_t pte_mkdirty(pte_t pte)
181{
182 return __pte(pte_val(pte) | _PAGE_DIRTY);
183}
184
185static inline pte_t pte_mkyoung(pte_t pte)
186{
187 return __pte(pte_val(pte) | _PAGE_ACCESSED);
188}
189
190#ifndef pte_wrprotect
191static inline pte_t pte_wrprotect(pte_t pte)
192{
193 return __pte(pte_val(pte) & ~_PAGE_RW);
194}
195#endif
196
197#ifndef pte_mkexec
198static inline pte_t pte_mkexec(pte_t pte)
199{
200 return __pte(pte_val(pte) | _PAGE_EXEC);
201}
202#endif
203
204#define pmd_none(pmd) (!pmd_val(pmd))
205#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
206#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
207static inline void pmd_clear(pmd_t *pmdp)
208{
209 *pmdp = __pmd(0);
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232#ifdef CONFIG_PPC_8xx
233static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
234static int hugepd_ok(hugepd_t hpd);
235
236static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
237{
238 if (!huge)
239 return PAGE_SIZE / SZ_4K;
240 else if (hugepd_ok(*((hugepd_t *)pmd)))
241 return 1;
242 else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
243 return SZ_16K / SZ_4K;
244 else
245 return SZ_512K / SZ_4K;
246}
247
248static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
249 unsigned long clr, unsigned long set, int huge)
250{
251 pte_basic_t *entry = (pte_basic_t *)p;
252 pte_basic_t old = pte_val(*p);
253 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
254 int num, i;
255 pmd_t *pmd = pmd_off(mm, addr);
256
257 num = number_of_cells_per_pte(pmd, new, huge);
258
259 for (i = 0; i < num; i++, entry++, new += SZ_4K)
260 *entry = new;
261
262 return old;
263}
264
265#ifdef CONFIG_PPC_16K_PAGES
266#define __HAVE_ARCH_PTEP_GET
267static inline pte_t ptep_get(pte_t *ptep)
268{
269 pte_basic_t val = READ_ONCE(ptep->pte);
270 pte_t pte = {val, val, val, val};
271
272 return pte;
273}
274#endif
275
276#else
277static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
278 unsigned long clr, unsigned long set, int huge)
279{
280 pte_basic_t old = pte_val(*p);
281 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
282
283 *p = __pte(new);
284
285#ifdef CONFIG_44x
286 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
287 icache_44x_need_flush = 1;
288#endif
289 return old;
290}
291#endif
292
293#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
294static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
295 unsigned long addr, pte_t *ptep)
296{
297 unsigned long old;
298 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
299 return (old & _PAGE_ACCESSED) != 0;
300}
301#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
302 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
303
304#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
305static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
306 pte_t *ptep)
307{
308 return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
309}
310
311#define __HAVE_ARCH_PTEP_SET_WRPROTECT
312#ifndef ptep_set_wrprotect
313static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
314 pte_t *ptep)
315{
316 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
317}
318#endif
319
320#ifndef __ptep_set_access_flags
321static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
322 pte_t *ptep, pte_t entry,
323 unsigned long address,
324 int psize)
325{
326 unsigned long set = pte_val(entry) &
327 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
328 int huge = psize > mmu_virtual_psize ? 1 : 0;
329
330 pte_update(vma->vm_mm, address, ptep, 0, set, huge);
331
332 flush_tlb_page(vma, address);
333}
334#endif
335
336static inline int pte_young(pte_t pte)
337{
338 return pte_val(pte) & _PAGE_ACCESSED;
339}
340
341
342
343
344
345
346
347
348#ifndef CONFIG_BOOKE
349#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
350#else
351#define pmd_page_vaddr(pmd) \
352 ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
353#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
354#endif
355
356#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
357
358
359
360
361
362
363#define __swp_type(entry) ((entry).val & 0x1f)
364#define __swp_offset(entry) ((entry).val >> 5)
365#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
366#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
367#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
368
369#endif
370
371#endif
372