1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_MICROBLAZE_PGTABLE_H
12#define _ASM_MICROBLAZE_PGTABLE_H
13
14#include <asm/setup.h>
15
16#ifndef __ASSEMBLY__
17extern int mem_init_done;
18#endif
19
20#ifndef CONFIG_MMU
21
22#define pgd_present(pgd) (1)
23#define pgd_none(pgd) (0)
24#define pgd_bad(pgd) (0)
25#define pgd_clear(pgdp)
26#define kern_addr_valid(addr) (1)
27#define pmd_offset(a, b) ((void *) 0)
28
29#define PAGE_NONE __pgprot(0)
30#define PAGE_SHARED __pgprot(0)
31#define PAGE_COPY __pgprot(0)
32#define PAGE_READONLY __pgprot(0)
33#define PAGE_KERNEL __pgprot(0)
34
35#define pgprot_noncached(x) (x)
36
37#define __swp_type(x) (0)
38#define __swp_offset(x) (0)
39#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
40#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
41#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
42
43#ifndef __ASSEMBLY__
44static inline int pte_file(pte_t pte) { return 0; }
45#endif
46
47#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
48
49#define swapper_pg_dir ((pgd_t *) NULL)
50
51#define pgtable_cache_init() do {} while (0)
52
53#define arch_enter_lazy_cpu_mode() do {} while (0)
54
55#define pgprot_noncached_wc(prot) prot
56
57
58
59
60
61#define VMALLOC_START 0
62#define VMALLOC_END 0xffffffff
63
64#else
65
66#include <asm-generic/4level-fixup.h>
67
68#ifdef __KERNEL__
69#ifndef __ASSEMBLY__
70
71#include <linux/sched.h>
72#include <linux/threads.h>
73#include <asm/processor.h>
74#include <asm/mmu.h>
75#include <asm/page.h>
76
77#define FIRST_USER_ADDRESS 0
78
79extern unsigned long va_to_phys(unsigned long address);
80extern pte_t *va_to_pte(unsigned long address);
81
82
83
84
85
86
87static inline int pte_special(pte_t pte) { return 0; }
88
89static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
90
91
92
93
94#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
95#define VMALLOC_END ioremap_bot
96
97#endif
98
99
100
101
102
103#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
104 _PAGE_WRITETHRU)
105
106#define pgprot_noncached(prot) \
107 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
108 _PAGE_NO_CACHE | _PAGE_GUARDED))
109
110#define pgprot_noncached_wc(prot) \
111 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
112 _PAGE_NO_CACHE))
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144#define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT)
145#define PMD_SIZE (1UL << PMD_SHIFT)
146#define PMD_MASK (~(PMD_SIZE-1))
147
148
149#define PGDIR_SHIFT PMD_SHIFT
150#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
151#define PGDIR_MASK (~(PGDIR_SIZE-1))
152
153
154
155
156
157#define PTRS_PER_PTE (1 << PTE_SHIFT)
158#define PTRS_PER_PMD 1
159#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
160
161#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
162#define FIRST_USER_PGD_NR 0
163
164#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
165#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
166
167#define pte_ERROR(e) \
168 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
169 __FILE__, __LINE__, pte_val(e))
170#define pmd_ERROR(e) \
171 printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
172 __FILE__, __LINE__, pmd_val(e))
173#define pgd_ERROR(e) \
174 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
175 __FILE__, __LINE__, pgd_val(e))
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209#define _PAGE_GUARDED 0x001
210#define _PAGE_FILE 0x001
211#define _PAGE_PRESENT 0x002
212#define _PAGE_NO_CACHE 0x004
213#define _PAGE_WRITETHRU 0x008
214#define _PAGE_USER 0x010
215#define _PAGE_RW 0x040
216#define _PAGE_DIRTY 0x080
217#define _PAGE_HWWRITE 0x100
218#define _PAGE_HWEXEC 0x200
219#define _PAGE_ACCESSED 0x400
220#define _PMD_PRESENT PAGE_MASK
221
222
223
224
225#ifndef _PAGE_HASHPTE
226#define _PAGE_HASHPTE 0
227#endif
228#ifndef _PTE_NONE_MASK
229#define _PTE_NONE_MASK 0
230#endif
231#ifndef _PAGE_SHARED
232#define _PAGE_SHARED 0
233#endif
234#ifndef _PAGE_EXEC
235#define _PAGE_EXEC 0
236#endif
237
238#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
239
240
241
242
243
244
245
246#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
247#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
248
249#define _PAGE_KERNEL \
250 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
251
252#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
253
254#define PAGE_NONE __pgprot(_PAGE_BASE)
255#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
256#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
257#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
258#define PAGE_SHARED_X \
259 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
260#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
261#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
262
263#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
264#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
265#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
266
267
268
269
270
271#define __P000 PAGE_NONE
272#define __P001 PAGE_READONLY_X
273#define __P010 PAGE_COPY
274#define __P011 PAGE_COPY_X
275#define __P100 PAGE_READONLY
276#define __P101 PAGE_READONLY_X
277#define __P110 PAGE_COPY
278#define __P111 PAGE_COPY_X
279
280#define __S000 PAGE_NONE
281#define __S001 PAGE_READONLY_X
282#define __S010 PAGE_SHARED
283#define __S011 PAGE_SHARED_X
284#define __S100 PAGE_READONLY
285#define __S101 PAGE_READONLY_X
286#define __S110 PAGE_SHARED
287#define __S111 PAGE_SHARED_X
288
289#ifndef __ASSEMBLY__
290
291
292
293
294extern unsigned long empty_zero_page[1024];
295#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
296
297#endif
298
299#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
300#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
301#define pte_clear(mm, addr, ptep) \
302 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
303
304#define pmd_none(pmd) (!pmd_val(pmd))
305#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
306#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
307#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
308
309#define pte_page(x) (mem_map + (unsigned long) \
310 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
311#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
312
313#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
314
315#define pfn_pte(pfn, prot) \
316 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
317
318#ifndef __ASSEMBLY__
319
320
321
322
323
324static inline int pgd_none(pgd_t pgd) { return 0; }
325static inline int pgd_bad(pgd_t pgd) { return 0; }
326static inline int pgd_present(pgd_t pgd) { return 1; }
327#define pgd_clear(xp) do { } while (0)
328#define pgd_page(pgd) \
329 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
330
331
332
333
334
335static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
336static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
337static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
338static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
339static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
340static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
341
342static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
343static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
344
345static inline pte_t pte_rdprotect(pte_t pte) \
346 { pte_val(pte) &= ~_PAGE_USER; return pte; }
347static inline pte_t pte_wrprotect(pte_t pte) \
348 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
349static inline pte_t pte_exprotect(pte_t pte) \
350 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
351static inline pte_t pte_mkclean(pte_t pte) \
352 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
353static inline pte_t pte_mkold(pte_t pte) \
354 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
355
356static inline pte_t pte_mkread(pte_t pte) \
357 { pte_val(pte) |= _PAGE_USER; return pte; }
358static inline pte_t pte_mkexec(pte_t pte) \
359 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
360static inline pte_t pte_mkwrite(pte_t pte) \
361 { pte_val(pte) |= _PAGE_RW; return pte; }
362static inline pte_t pte_mkdirty(pte_t pte) \
363 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
364static inline pte_t pte_mkyoung(pte_t pte) \
365 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
366
367
368
369
370
371
372static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
373{
374 pte_t pte;
375 pte_val(pte) = physpage | pgprot_val(pgprot);
376 return pte;
377}
378
379#define mk_pte(page, pgprot) \
380({ \
381 pte_t pte; \
382 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
383 pgprot_val(pgprot); \
384 pte; \
385})
386
387static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
388{
389 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
390 return pte;
391}
392
393
394
395
396
397
398
399
400
401static inline unsigned long pte_update(pte_t *p, unsigned long clr,
402 unsigned long set)
403{
404 unsigned long flags, old, tmp;
405
406 raw_local_irq_save(flags);
407
408 __asm__ __volatile__( "lw %0, %2, r0 \n"
409 "andn %1, %0, %3 \n"
410 "or %1, %1, %4 \n"
411 "sw %1, %2, r0 \n"
412 : "=&r" (old), "=&r" (tmp)
413 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
414 : "cc");
415
416 raw_local_irq_restore(flags);
417
418 return old;
419}
420
421
422
423
424static inline void set_pte(struct mm_struct *mm, unsigned long addr,
425 pte_t *ptep, pte_t pte)
426{
427 *ptep = pte;
428}
429
430static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
431 pte_t *ptep, pte_t pte)
432{
433 *ptep = pte;
434}
435
436#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
437static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
438 unsigned long address, pte_t *ptep)
439{
440 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
441}
442
443static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
444 unsigned long addr, pte_t *ptep)
445{
446 return (pte_update(ptep, \
447 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
448}
449
450#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
451static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
452 unsigned long addr, pte_t *ptep)
453{
454 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
455}
456
457
458
459
460
461
462
463static inline void ptep_mkdirty(struct mm_struct *mm,
464 unsigned long addr, pte_t *ptep)
465{
466 pte_update(ptep, 0, _PAGE_DIRTY);
467}
468
469
470
471
472
473
474#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
475
476
477#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
478
479
480#define pgd_offset_k(address) pgd_offset(&init_mm, address)
481
482
483#define pgd_index(address) ((address) >> PGDIR_SHIFT)
484#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
485
486
487static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
488{
489 return (pmd_t *) dir;
490}
491
492
493#define pte_index(address) \
494 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
495#define pte_offset_kernel(dir, addr) \
496 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
497#define pte_offset_map(dir, addr) \
498 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
499
500#define pte_unmap(pte) kunmap_atomic(pte)
501
502
503#define PTE_FILE_MAX_BITS 29
504#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
505#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
506
507extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
508
509
510
511
512
513
514
515#define __swp_type(entry) ((entry).val & 0x3f)
516#define __swp_offset(entry) ((entry).val >> 6)
517#define __swp_entry(type, offset) \
518 ((swp_entry_t) { (type) | ((offset) << 6) })
519#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
520#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
521
522extern unsigned long iopa(unsigned long addr);
523
524
525
526
527
528#define IOMAP_FULL_CACHING 0
529#define IOMAP_NOCACHE_SER 1
530#define IOMAP_NOCACHE_NONSER 2
531#define IOMAP_NO_COPYBACK 3
532
533
534#define kern_addr_valid(addr) (1)
535
536
537
538
539#define pgtable_cache_init() do { } while (0)
540
541void do_page_fault(struct pt_regs *regs, unsigned long address,
542 unsigned long error_code);
543
544void mapin_ram(void);
545int map_page(unsigned long va, phys_addr_t pa, int flags);
546
547extern int mem_init_done;
548
549asmlinkage void __init mmu_init(void);
550
551void __init *early_get_page(void);
552
553#endif
554#endif
555
556#endif
557
558#ifndef __ASSEMBLY__
559#include <asm-generic/pgtable.h>
560
561extern unsigned long ioremap_bot, ioremap_base;
562
563void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle);
564void consistent_free(size_t size, void *vaddr);
565void consistent_sync(void *vaddr, size_t size, int direction);
566void consistent_sync_page(struct page *page, unsigned long offset,
567 size_t size, int direction);
568
569void setup_memory(void);
570#endif
571
572#endif
573