1
2
3
4
5
6
7
8#ifndef _ASM_MICROBLAZE_PGTABLE_H
9#define _ASM_MICROBLAZE_PGTABLE_H
10
11#include <asm/setup.h>
12
13#ifndef __ASSEMBLY__
14extern int mem_init_done;
15#endif
16
17#include <asm-generic/pgtable-nopmd.h>
18
19#ifdef __KERNEL__
20#ifndef __ASSEMBLY__
21
22#include <linux/sched.h>
23#include <linux/threads.h>
24#include <asm/processor.h>
25#include <asm/mmu.h>
26#include <asm/page.h>
27
28#define FIRST_USER_ADDRESS 0UL
29
30extern unsigned long va_to_phys(unsigned long address);
31extern pte_t *va_to_pte(unsigned long address);
32
33
34
35
36
37
38
39
40
41#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
42#define VMALLOC_END ioremap_bot
43
44#endif
45
46
47
48
49
50#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
51 _PAGE_WRITETHRU)
52
53#define pgprot_noncached(prot) \
54 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
55 _PAGE_NO_CACHE | _PAGE_GUARDED))
56
57#define pgprot_noncached_wc(prot) \
58 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
59 _PAGE_NO_CACHE))
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
92#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
93#define PGDIR_MASK (~(PGDIR_SIZE-1))
94
95
96
97
98
99#define PTRS_PER_PTE (1 << PTE_SHIFT)
100#define PTRS_PER_PMD 1
101#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
102
103#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
104#define FIRST_USER_PGD_NR 0
105
106#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
107#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
108
109#define pte_ERROR(e) \
110 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
111 __FILE__, __LINE__, pte_val(e))
112#define pgd_ERROR(e) \
113 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
114 __FILE__, __LINE__, pgd_val(e))
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148#define _PAGE_GUARDED 0x001
149#define _PAGE_PRESENT 0x002
150#define _PAGE_NO_CACHE 0x004
151#define _PAGE_WRITETHRU 0x008
152#define _PAGE_USER 0x010
153#define _PAGE_RW 0x040
154#define _PAGE_DIRTY 0x080
155#define _PAGE_HWWRITE 0x100
156#define _PAGE_HWEXEC 0x200
157#define _PAGE_ACCESSED 0x400
158#define _PMD_PRESENT PAGE_MASK
159
160
161
162
163#ifndef _PAGE_HASHPTE
164#define _PAGE_HASHPTE 0
165#endif
166#ifndef _PTE_NONE_MASK
167#define _PTE_NONE_MASK 0
168#endif
169#ifndef _PAGE_SHARED
170#define _PAGE_SHARED 0
171#endif
172#ifndef _PAGE_EXEC
173#define _PAGE_EXEC 0
174#endif
175
176#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
177
178
179
180
181
182
183
184#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
185#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
186
187#define _PAGE_KERNEL \
188 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
189
190#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
191
192#define PAGE_NONE __pgprot(_PAGE_BASE)
193#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
194#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
195#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
196#define PAGE_SHARED_X \
197 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
198#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
199#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
200
201#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
202#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
203#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
204
205
206
207
208
209#define __P000 PAGE_NONE
210#define __P001 PAGE_READONLY_X
211#define __P010 PAGE_COPY
212#define __P011 PAGE_COPY_X
213#define __P100 PAGE_READONLY
214#define __P101 PAGE_READONLY_X
215#define __P110 PAGE_COPY
216#define __P111 PAGE_COPY_X
217
218#define __S000 PAGE_NONE
219#define __S001 PAGE_READONLY_X
220#define __S010 PAGE_SHARED
221#define __S011 PAGE_SHARED_X
222#define __S100 PAGE_READONLY
223#define __S101 PAGE_READONLY_X
224#define __S110 PAGE_SHARED
225#define __S111 PAGE_SHARED_X
226
227#ifndef __ASSEMBLY__
228
229
230
231
232extern unsigned long empty_zero_page[1024];
233#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
234
235#endif
236
237#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
238#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
239#define pte_clear(mm, addr, ptep) \
240 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
241
242#define pmd_none(pmd) (!pmd_val(pmd))
243#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
244#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
245#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
246
247#define pte_page(x) (mem_map + (unsigned long) \
248 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
249#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
250
251#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
252
253#define pfn_pte(pfn, prot) \
254 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
255
256#ifndef __ASSEMBLY__
257
258
259
260
261static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
262static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
263static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
264static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
265static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
266
267static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
268static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
269
270static inline pte_t pte_rdprotect(pte_t pte) \
271 { pte_val(pte) &= ~_PAGE_USER; return pte; }
272static inline pte_t pte_wrprotect(pte_t pte) \
273 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
274static inline pte_t pte_exprotect(pte_t pte) \
275 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
276static inline pte_t pte_mkclean(pte_t pte) \
277 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
278static inline pte_t pte_mkold(pte_t pte) \
279 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
280
281static inline pte_t pte_mkread(pte_t pte) \
282 { pte_val(pte) |= _PAGE_USER; return pte; }
283static inline pte_t pte_mkexec(pte_t pte) \
284 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
285static inline pte_t pte_mkwrite(pte_t pte) \
286 { pte_val(pte) |= _PAGE_RW; return pte; }
287static inline pte_t pte_mkdirty(pte_t pte) \
288 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
289static inline pte_t pte_mkyoung(pte_t pte) \
290 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
291
292
293
294
295
296
297static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
298{
299 pte_t pte;
300 pte_val(pte) = physpage | pgprot_val(pgprot);
301 return pte;
302}
303
304#define mk_pte(page, pgprot) \
305({ \
306 pte_t pte; \
307 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
308 pgprot_val(pgprot); \
309 pte; \
310})
311
312static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
313{
314 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
315 return pte;
316}
317
318
319
320
321
322
323
324
325
326static inline unsigned long pte_update(pte_t *p, unsigned long clr,
327 unsigned long set)
328{
329 unsigned long flags, old, tmp;
330
331 raw_local_irq_save(flags);
332
333 __asm__ __volatile__( "lw %0, %2, r0 \n"
334 "andn %1, %0, %3 \n"
335 "or %1, %1, %4 \n"
336 "sw %1, %2, r0 \n"
337 : "=&r" (old), "=&r" (tmp)
338 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
339 : "cc");
340
341 raw_local_irq_restore(flags);
342
343 return old;
344}
345
346
347
348
349static inline void set_pte(struct mm_struct *mm, unsigned long addr,
350 pte_t *ptep, pte_t pte)
351{
352 *ptep = pte;
353}
354
355static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
356 pte_t *ptep, pte_t pte)
357{
358 *ptep = pte;
359}
360
361#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
362static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
363 unsigned long address, pte_t *ptep)
364{
365 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
366}
367
368static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
369 unsigned long addr, pte_t *ptep)
370{
371 return (pte_update(ptep, \
372 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
373}
374
375#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
376static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
377 unsigned long addr, pte_t *ptep)
378{
379 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
380}
381
382
383
384
385
386
387
388static inline void ptep_mkdirty(struct mm_struct *mm,
389 unsigned long addr, pte_t *ptep)
390{
391 pte_update(ptep, 0, _PAGE_DIRTY);
392}
393
394
395
396
397
398
399static inline unsigned long pmd_page_vaddr(pmd_t pmd)
400{
401 return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
402}
403
404
405#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
406
407
408
409extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
410
411
412
413
414
415
416
417#define __swp_type(entry) ((entry).val & 0x3f)
418#define __swp_offset(entry) ((entry).val >> 6)
419#define __swp_entry(type, offset) \
420 ((swp_entry_t) { (type) | ((offset) << 6) })
421#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
422#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
423
424extern unsigned long iopa(unsigned long addr);
425
426
427
428
429
430#define IOMAP_FULL_CACHING 0
431#define IOMAP_NOCACHE_SER 1
432#define IOMAP_NOCACHE_NONSER 2
433#define IOMAP_NO_COPYBACK 3
434
435
436#define kern_addr_valid(addr) (1)
437
438void do_page_fault(struct pt_regs *regs, unsigned long address,
439 unsigned long error_code);
440
441void mapin_ram(void);
442int map_page(unsigned long va, phys_addr_t pa, int flags);
443
444extern int mem_init_done;
445
446asmlinkage void __init mmu_init(void);
447
448void __init *early_get_page(void);
449
450#endif
451#endif
452
453#ifndef __ASSEMBLY__
454extern unsigned long ioremap_bot, ioremap_base;
455
456void setup_memory(void);
457#endif
458
459#endif
460