1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_MICROBLAZE_PGTABLE_H
12#define _ASM_MICROBLAZE_PGTABLE_H
13
14#include <asm/setup.h>
15
16#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
17 remap_pfn_range(vma, vaddr, pfn, size, prot)
18
19#ifndef __ASSEMBLY__
20extern int mem_init_done;
21#endif
22
23#ifndef CONFIG_MMU
24
25#define pgd_present(pgd) (1)
26#define pgd_none(pgd) (0)
27#define pgd_bad(pgd) (0)
28#define pgd_clear(pgdp)
29#define kern_addr_valid(addr) (1)
30#define pmd_offset(a, b) ((void *) 0)
31
32#define PAGE_NONE __pgprot(0)
33#define PAGE_SHARED __pgprot(0)
34#define PAGE_COPY __pgprot(0)
35#define PAGE_READONLY __pgprot(0)
36#define PAGE_KERNEL __pgprot(0)
37
38#define pgprot_noncached(x) (x)
39
40#define __swp_type(x) (0)
41#define __swp_offset(x) (0)
42#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
43#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
44#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
45
46#ifndef __ASSEMBLY__
47static inline int pte_file(pte_t pte) { return 0; }
48#endif
49
50#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
51
52#define swapper_pg_dir ((pgd_t *) NULL)
53
54#define pgtable_cache_init() do {} while (0)
55
56#define arch_enter_lazy_cpu_mode() do {} while (0)
57
58#define pgprot_noncached_wc(prot) prot
59
60
61
62
63
64#define VMALLOC_START 0
65#define VMALLOC_END 0xffffffff
66
67#else
68
69#include <asm-generic/4level-fixup.h>
70
71#ifdef __KERNEL__
72#ifndef __ASSEMBLY__
73
74#include <linux/sched.h>
75#include <linux/threads.h>
76#include <asm/processor.h>
77#include <asm/mmu.h>
78#include <asm/page.h>
79
80#define FIRST_USER_ADDRESS 0
81
82extern unsigned long va_to_phys(unsigned long address);
83extern pte_t *va_to_pte(unsigned long address);
84
85
86
87
88
89
90static inline int pte_special(pte_t pte) { return 0; }
91
92static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
93
94
95
96
97#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
98#define VMALLOC_END ioremap_bot
99
100#endif
101
102
103
104
105
106#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
107 _PAGE_WRITETHRU)
108
109#define pgprot_noncached(prot) \
110 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
111 _PAGE_NO_CACHE | _PAGE_GUARDED))
112
113#define pgprot_noncached_wc(prot) \
114 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
115 _PAGE_NO_CACHE))
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
148#define PMD_SIZE (1UL << PMD_SHIFT)
149#define PMD_MASK (~(PMD_SIZE-1))
150
151
152#define PGDIR_SHIFT PMD_SHIFT
153#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
154#define PGDIR_MASK (~(PGDIR_SIZE-1))
155
156
157
158
159
160#define PTRS_PER_PTE (1 << PTE_SHIFT)
161#define PTRS_PER_PMD 1
162#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
163
164#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
165#define FIRST_USER_PGD_NR 0
166
167#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
168#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
169
170#define pte_ERROR(e) \
171 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
172 __FILE__, __LINE__, pte_val(e))
173#define pmd_ERROR(e) \
174 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
175 __FILE__, __LINE__, pmd_val(e))
176#define pgd_ERROR(e) \
177 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
178 __FILE__, __LINE__, pgd_val(e))
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212#define _PAGE_GUARDED 0x001
213#define _PAGE_FILE 0x001
214#define _PAGE_PRESENT 0x002
215#define _PAGE_NO_CACHE 0x004
216#define _PAGE_WRITETHRU 0x008
217#define _PAGE_USER 0x010
218#define _PAGE_RW 0x040
219#define _PAGE_DIRTY 0x080
220#define _PAGE_HWWRITE 0x100
221#define _PAGE_HWEXEC 0x200
222#define _PAGE_ACCESSED 0x400
223#define _PMD_PRESENT PAGE_MASK
224
225
226
227
228#ifndef _PAGE_HASHPTE
229#define _PAGE_HASHPTE 0
230#endif
231#ifndef _PTE_NONE_MASK
232#define _PTE_NONE_MASK 0
233#endif
234#ifndef _PAGE_SHARED
235#define _PAGE_SHARED 0
236#endif
237#ifndef _PAGE_EXEC
238#define _PAGE_EXEC 0
239#endif
240
241#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
242
243
244
245
246
247
248
249#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
250#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
251
252#define _PAGE_KERNEL \
253 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
254
255#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
256
257#define PAGE_NONE __pgprot(_PAGE_BASE)
258#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
259#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
260#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
261#define PAGE_SHARED_X \
262 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
263#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
264#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
265
266#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
267#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
268#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
269
270
271
272
273
274#define __P000 PAGE_NONE
275#define __P001 PAGE_READONLY_X
276#define __P010 PAGE_COPY
277#define __P011 PAGE_COPY_X
278#define __P100 PAGE_READONLY
279#define __P101 PAGE_READONLY_X
280#define __P110 PAGE_COPY
281#define __P111 PAGE_COPY_X
282
283#define __S000 PAGE_NONE
284#define __S001 PAGE_READONLY_X
285#define __S010 PAGE_SHARED
286#define __S011 PAGE_SHARED_X
287#define __S100 PAGE_READONLY
288#define __S101 PAGE_READONLY_X
289#define __S110 PAGE_SHARED
290#define __S111 PAGE_SHARED_X
291
292#ifndef __ASSEMBLY__
293
294
295
296
297extern unsigned long empty_zero_page[1024];
298#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
299
300#endif
301
302#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
303#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
304#define pte_clear(mm, addr, ptep) \
305 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
306
307#define pmd_none(pmd) (!pmd_val(pmd))
308#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
309#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
310#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
311
312#define pte_page(x) (mem_map + (unsigned long) \
313 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
314#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
315
316#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
317
318#define pfn_pte(pfn, prot) \
319 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
320
321#ifndef __ASSEMBLY__
322
323
324
325
326
327static inline int pgd_none(pgd_t pgd) { return 0; }
328static inline int pgd_bad(pgd_t pgd) { return 0; }
329static inline int pgd_present(pgd_t pgd) { return 1; }
330#define pgd_clear(xp) do { } while (0)
331#define pgd_page(pgd) \
332 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
333
334
335
336
337
338static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
339static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
340static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
341static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
342static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
343static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
344
345static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
346static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
347
348static inline pte_t pte_rdprotect(pte_t pte) \
349 { pte_val(pte) &= ~_PAGE_USER; return pte; }
350static inline pte_t pte_wrprotect(pte_t pte) \
351 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
352static inline pte_t pte_exprotect(pte_t pte) \
353 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
354static inline pte_t pte_mkclean(pte_t pte) \
355 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
356static inline pte_t pte_mkold(pte_t pte) \
357 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
358
359static inline pte_t pte_mkread(pte_t pte) \
360 { pte_val(pte) |= _PAGE_USER; return pte; }
361static inline pte_t pte_mkexec(pte_t pte) \
362 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
363static inline pte_t pte_mkwrite(pte_t pte) \
364 { pte_val(pte) |= _PAGE_RW; return pte; }
365static inline pte_t pte_mkdirty(pte_t pte) \
366 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
367static inline pte_t pte_mkyoung(pte_t pte) \
368 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
369
370
371
372
373
374
375static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
376{
377 pte_t pte;
378 pte_val(pte) = physpage | pgprot_val(pgprot);
379 return pte;
380}
381
382#define mk_pte(page, pgprot) \
383({ \
384 pte_t pte; \
385 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
386 pgprot_val(pgprot); \
387 pte; \
388})
389
390static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
391{
392 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
393 return pte;
394}
395
396
397
398
399
400
401
402
403
404static inline unsigned long pte_update(pte_t *p, unsigned long clr,
405 unsigned long set)
406{
407 unsigned long flags, old, tmp;
408
409 raw_local_irq_save(flags);
410
411 __asm__ __volatile__( "lw %0, %2, r0 \n"
412 "andn %1, %0, %3 \n"
413 "or %1, %1, %4 \n"
414 "sw %1, %2, r0 \n"
415 : "=&r" (old), "=&r" (tmp)
416 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
417 : "cc");
418
419 raw_local_irq_restore(flags);
420
421 return old;
422}
423
424
425
426
427static inline void set_pte(struct mm_struct *mm, unsigned long addr,
428 pte_t *ptep, pte_t pte)
429{
430 *ptep = pte;
431}
432
433static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
434 pte_t *ptep, pte_t pte)
435{
436 *ptep = pte;
437}
438
439#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
440static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
441 unsigned long address, pte_t *ptep)
442{
443 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
444}
445
446static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
447 unsigned long addr, pte_t *ptep)
448{
449 return (pte_update(ptep, \
450 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
451}
452
453#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
454static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
455 unsigned long addr, pte_t *ptep)
456{
457 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
458}
459
460
461
462
463
464
465
466static inline void ptep_mkdirty(struct mm_struct *mm,
467 unsigned long addr, pte_t *ptep)
468{
469 pte_update(ptep, 0, _PAGE_DIRTY);
470}
471
472
473
474
475
476
477#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
478
479
480#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
481
482
483#define pgd_offset_k(address) pgd_offset(&init_mm, address)
484
485
486#define pgd_index(address) ((address) >> PGDIR_SHIFT)
487#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
488
489
490static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
491{
492 return (pmd_t *) dir;
493}
494
495
496#define pte_index(address) \
497 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
498#define pte_offset_kernel(dir, addr) \
499 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
500#define pte_offset_map(dir, addr) \
501 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
502
503#define pte_unmap(pte) kunmap_atomic(pte)
504
505
506#define PTE_FILE_MAX_BITS 29
507#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
508#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
509
510extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
511
512
513
514
515
516
517
518#define __swp_type(entry) ((entry).val & 0x3f)
519#define __swp_offset(entry) ((entry).val >> 6)
520#define __swp_entry(type, offset) \
521 ((swp_entry_t) { (type) | ((offset) << 6) })
522#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
523#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
524
525extern unsigned long iopa(unsigned long addr);
526
527
528
529
530
531#define IOMAP_FULL_CACHING 0
532#define IOMAP_NOCACHE_SER 1
533#define IOMAP_NOCACHE_NONSER 2
534#define IOMAP_NO_COPYBACK 3
535
536
537#define kern_addr_valid(addr) (1)
538
539
540
541
542#define pgtable_cache_init() do { } while (0)
543
544void do_page_fault(struct pt_regs *regs, unsigned long address,
545 unsigned long error_code);
546
547void mapin_ram(void);
548int map_page(unsigned long va, phys_addr_t pa, int flags);
549
550extern int mem_init_done;
551
552asmlinkage void __init mmu_init(void);
553
554void __init *early_get_page(void);
555
556#endif
557#endif
558
559#endif
560
561#ifndef __ASSEMBLY__
562#include <asm-generic/pgtable.h>
563
564extern unsigned long ioremap_bot, ioremap_base;
565
566void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
567void consistent_free(size_t size, void *vaddr);
568void consistent_sync(void *vaddr, size_t size, int direction);
569void consistent_sync_page(struct page *page, unsigned long offset,
570 size_t size, int direction);
571
572void setup_memory(void);
573#endif
574
575#endif
576