1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
9#include <asm/io.h>
10
11extern unsigned long ioremap_bot;
12
13#ifdef CONFIG_44x
14extern int icache_44x_need_flush;
15#endif
16
17#endif
18
19
20
21
22
23
24
25
26
27
28
29
30#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
31#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
32#define PGDIR_MASK (~(PGDIR_SIZE-1))
33
34
35
36
37
38#ifndef __ASSEMBLY__
39#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
40#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
41#endif
42
43#define PTRS_PER_PTE (1 << PTE_SHIFT)
44#define PTRS_PER_PMD 1
45#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
46
47#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
48#define FIRST_USER_ADDRESS 0UL
49
50#define pte_ERROR(e) \
51 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
52 (unsigned long long)pte_val(e))
53#define pgd_ERROR(e) \
54 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
55
56
57
58
59
60
61#ifdef CONFIG_HIGHMEM
62#define KVIRT_TOP PKMAP_BASE
63#else
64#define KVIRT_TOP (0xfe000000UL)
65#endif
66
67
68
69
70
71
72#ifdef CONFIG_NOT_COHERENT_CACHE
73#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
74#else
75#define IOREMAP_TOP KVIRT_TOP
76#endif
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95#define VMALLOC_OFFSET (0x1000000)
96#ifdef PPC_PIN_SIZE
97#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
98#else
99#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#endif
101#define VMALLOC_END ioremap_bot
102
103
104
105
106
107
108#if defined(CONFIG_40x)
109#include <asm/pte-40x.h>
110#elif defined(CONFIG_44x)
111#include <asm/pte-44x.h>
112#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
113#include <asm/pte-book3e.h>
114#elif defined(CONFIG_FSL_BOOKE)
115#include <asm/pte-fsl-booke.h>
116#elif defined(CONFIG_8xx)
117#include <asm/pte-8xx.h>
118#else
119#include <asm/pte-hash32.h>
120#endif
121
122
123#include <asm/pte-common.h>
124
125#ifndef __ASSEMBLY__
126
127#define pte_clear(mm, addr, ptep) \
128 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
129
130#define pmd_none(pmd) (!pmd_val(pmd))
131#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
132#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
133#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
134
135
136
137
138
139extern int flush_hash_pages(unsigned context, unsigned long va,
140 unsigned long pmdval, int count);
141
142
143extern void add_hash_page(unsigned context, unsigned long va,
144 unsigned long pmdval);
145
146
147extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
148 unsigned long address);
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165#ifndef CONFIG_PTE_64BIT
166static inline unsigned long pte_update(pte_t *p,
167 unsigned long clr,
168 unsigned long set)
169{
170#ifdef PTE_ATOMIC_UPDATES
171 unsigned long old, tmp;
172
173 __asm__ __volatile__("\
1741: lwarx %0,0,%3\n\
175 andc %1,%0,%4\n\
176 or %1,%1,%5\n"
177 PPC405_ERR77(0,%3)
178" stwcx. %1,0,%3\n\
179 bne- 1b"
180 : "=&r" (old), "=&r" (tmp), "=m" (*p)
181 : "r" (p), "r" (clr), "r" (set), "m" (*p)
182 : "cc" );
183#else
184 unsigned long old = pte_val(*p);
185 *p = __pte((old & ~clr) | set);
186#endif
187
188#ifdef CONFIG_44x
189 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
190 icache_44x_need_flush = 1;
191#endif
192 return old;
193}
194#else
195static inline unsigned long long pte_update(pte_t *p,
196 unsigned long clr,
197 unsigned long set)
198{
199#ifdef PTE_ATOMIC_UPDATES
200 unsigned long long old;
201 unsigned long tmp;
202
203 __asm__ __volatile__("\
2041: lwarx %L0,0,%4\n\
205 lwzx %0,0,%3\n\
206 andc %1,%L0,%5\n\
207 or %1,%1,%6\n"
208 PPC405_ERR77(0,%3)
209" stwcx. %1,0,%4\n\
210 bne- 1b"
211 : "=&r" (old), "=&r" (tmp), "=m" (*p)
212 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
213 : "cc" );
214#else
215 unsigned long long old = pte_val(*p);
216 *p = __pte((old & ~(unsigned long long)clr) | set);
217#endif
218
219#ifdef CONFIG_44x
220 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
221 icache_44x_need_flush = 1;
222#endif
223 return old;
224}
225#endif
226
227
228
229
230
231#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
232static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
233{
234 unsigned long old;
235 old = pte_update(ptep, _PAGE_ACCESSED, 0);
236#if _PAGE_HASHPTE != 0
237 if (old & _PAGE_HASHPTE) {
238 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
239 flush_hash_pages(context, addr, ptephys, 1);
240 }
241#endif
242 return (old & _PAGE_ACCESSED) != 0;
243}
244#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
245 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
246
247#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
248static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
249 pte_t *ptep)
250{
251 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
252}
253
254#define __HAVE_ARCH_PTEP_SET_WRPROTECT
255static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep)
257{
258 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
259}
260static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
261 unsigned long addr, pte_t *ptep)
262{
263 ptep_set_wrprotect(mm, addr, ptep);
264}
265
266
267static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
268{
269 unsigned long set = pte_val(entry) &
270 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
271 unsigned long clr = ~pte_val(entry) & _PAGE_RO;
272
273 pte_update(ptep, clr, set);
274}
275
276#define __HAVE_ARCH_PTE_SAME
277#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
278
279
280
281
282
283
284
285
286#ifndef CONFIG_BOOKE
287#define pmd_page_vaddr(pmd) \
288 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
289#define pmd_page(pmd) \
290 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
291#else
292#define pmd_page_vaddr(pmd) \
293 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
294#define pmd_page(pmd) \
295 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
296#endif
297
298
299#define pgd_offset_k(address) pgd_offset(&init_mm, address)
300
301
302#define pgd_index(address) ((address) >> PGDIR_SHIFT)
303#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
304
305
306#define pte_index(address) \
307 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
308#define pte_offset_kernel(dir, addr) \
309 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
310#define pte_offset_map(dir, addr) \
311 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
312#define pte_unmap(pte) kunmap_atomic(pte)
313
314
315
316
317
318
319
320#define __swp_type(entry) ((entry).val & 0x1f)
321#define __swp_offset(entry) ((entry).val >> 5)
322#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
323#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
324#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
325
326#ifndef CONFIG_PPC_4K_PAGES
327void pgtable_cache_init(void);
328#else
329
330
331
332#define pgtable_cache_init() do { } while (0)
333#endif
334
335extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
336 pmd_t **pmdp);
337
338#endif
339
340#endif
341