1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef _ASM_PGTABLE_H
17#define _ASM_PGTABLE_H
18
19#include <asm-generic/5level-fixup.h>
20#include <asm/mem-layout.h>
21#include <asm/setup.h>
22#include <asm/processor.h>
23
24#ifndef __ASSEMBLY__
25#include <linux/threads.h>
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/spinlock.h>
29#include <linux/sched.h>
30struct vm_area_struct;
31#endif
32
33#ifndef __ASSEMBLY__
34#if defined(CONFIG_HIGHPTE)
35typedef unsigned long pte_addr_t;
36#else
37typedef pte_t *pte_addr_t;
38#endif
39#endif
40
41
42
43
44
45#ifndef CONFIG_MMU
46
47#define pgd_present(pgd) (1)
48#define pgd_none(pgd) (0)
49#define pgd_bad(pgd) (0)
50#define pgd_clear(pgdp)
51#define kern_addr_valid(addr) (1)
52#define pmd_offset(a, b) ((void *) 0)
53
54#define PAGE_NONE __pgprot(0)
55#define PAGE_SHARED __pgprot(0)
56#define PAGE_COPY __pgprot(0)
57#define PAGE_READONLY __pgprot(0)
58#define PAGE_KERNEL __pgprot(0)
59
60#define __swp_type(x) (0)
61#define __swp_offset(x) (0)
62#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
63#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
64#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
65
66#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
67
68#define swapper_pg_dir ((pgd_t *) NULL)
69
70#define pgtable_cache_init() do {} while (0)
71
72#include <asm-generic/pgtable.h>
73
74#else
75
76
77
78
79
80
81
82
83
84#ifndef __ASSEMBLY__
85extern unsigned long empty_zero_page;
86#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
87#endif
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122#define PGDIR_SHIFT 26
123#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
124#define PGDIR_MASK (~(PGDIR_SIZE - 1))
125#define PTRS_PER_PGD 64
126
127#define __PAGETABLE_PUD_FOLDED
128#define PUD_SHIFT 26
129#define PTRS_PER_PUD 1
130#define PUD_SIZE (1UL << PUD_SHIFT)
131#define PUD_MASK (~(PUD_SIZE - 1))
132#define PUE_SIZE 256
133
134#define __PAGETABLE_PMD_FOLDED
135#define PMD_SHIFT 26
136#define PMD_SIZE (1UL << PMD_SHIFT)
137#define PMD_MASK (~(PMD_SIZE - 1))
138#define PTRS_PER_PMD 1
139#define PME_SIZE 256
140
141#define __frv_PT_SIZE 256
142
143#define PTRS_PER_PTE 4096
144
145#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE)
146#define FIRST_USER_ADDRESS 0UL
147
148#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
149#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
150
151#define TWOLEVEL_PGDIR_SHIFT 26
152#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
153#define BOOT_KERNEL_PGD_PTRS (PTRS_PER_PGD - BOOT_USER_PGD_PTRS)
154
155#ifndef __ASSEMBLY__
156
157extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
158
159#define pte_ERROR(e) \
160 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte)
161#define pmd_ERROR(e) \
162 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
163#define pud_ERROR(e) \
164 printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e)))
165#define pgd_ERROR(e) \
166 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e))))
167
168
169
170
171
172
173#define set_pte(pteptr, pteval) \
174do { \
175 *(pteptr) = (pteval); \
176 asm volatile("dcf %M0" :: "U"(*pteptr)); \
177} while(0)
178#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
179
180
181
182
183
184#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
185
186
187
188
189
190#define pgd_offset_k(address) pgd_offset(&init_mm, address)
191
192
193
194
195
196
197static inline int pgd_none(pgd_t pgd) { return 0; }
198static inline int pgd_bad(pgd_t pgd) { return 0; }
199static inline int pgd_present(pgd_t pgd) { return 1; }
200static inline void pgd_clear(pgd_t *pgd) { }
201
202#define pgd_populate(mm, pgd, pud) do { } while (0)
203
204
205
206
207#define set_pgd(pgdptr, pgdval) \
208do { \
209 memcpy((pgdptr), &(pgdval), sizeof(pgd_t)); \
210 asm volatile("dcf %M0" :: "U"(*(pgdptr))); \
211} while(0)
212
213static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
214{
215 return (pud_t *) pgd;
216}
217
218#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
219#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
220
221
222
223
224
225#define pud_alloc_one(mm, address) NULL
226#define pud_free(mm, x) do { } while (0)
227#define __pud_free_tlb(tlb, x, address) do { } while (0)
228
229
230
231
232
233
234static inline int pud_none(pud_t pud) { return 0; }
235static inline int pud_bad(pud_t pud) { return 0; }
236static inline int pud_present(pud_t pud) { return 1; }
237static inline void pud_clear(pud_t *pud) { }
238
239#define pud_populate(mm, pmd, pte) do { } while (0)
240
241
242
243
244
245#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
246
247#define pud_page(pud) (pmd_page((pmd_t){ pud }))
248#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
249
250
251
252
253
254extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd);
255
256#define set_pmd(pmdptr, pmdval) \
257do { \
258 __set_pmd((pmdptr), (pmdval).ste[0]); \
259} while(0)
260
261#define __pmd_index(address) 0
262
263static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
264{
265 return (pmd_t *) dir + __pmd_index(address);
266}
267
268#define pte_same(a, b) ((a).pte == (b).pte)
269#define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT))))
270#define pte_none(x) (!(x).pte)
271#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
272#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
273#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
274
275#define VMALLOC_VMADDR(x) ((unsigned long) (x))
276
277#endif
278
279
280
281
282#define _PAGE_BIT_PRESENT xAMPRx_V_BIT
283#define _PAGE_BIT_WP DAMPRx_WP_BIT
284#define _PAGE_BIT_NOCACHE xAMPRx_C_BIT
285#define _PAGE_BIT_SUPER xAMPRx_S_BIT
286#define _PAGE_BIT_ACCESSED xAMPRx_RESERVED8_BIT
287#define _PAGE_BIT_DIRTY xAMPRx_M_BIT
288#define _PAGE_BIT_NOTGLOBAL xAMPRx_NG_BIT
289
290#define _PAGE_PRESENT xAMPRx_V
291#define _PAGE_WP DAMPRx_WP
292#define _PAGE_NOCACHE xAMPRx_C
293#define _PAGE_SUPER xAMPRx_S
294#define _PAGE_ACCESSED xAMPRx_RESERVED8
295#define _PAGE_DIRTY xAMPRx_M
296#define _PAGE_NOTGLOBAL xAMPRx_NG
297
298#define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
299
300#define _PAGE_PROTNONE 0x000
301
302#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
303
304#define __PGPROT_BASE \
305 (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED)
306
307#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
308#define PAGE_SHARED __pgprot(__PGPROT_BASE)
309#define PAGE_COPY __pgprot(__PGPROT_BASE | _PAGE_WP)
310#define PAGE_READONLY __pgprot(__PGPROT_BASE | _PAGE_WP)
311
312#define __PAGE_KERNEL (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY)
313#define __PAGE_KERNEL_NOCACHE (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE)
314#define __PAGE_KERNEL_RO (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP)
315
316#define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL)
317
318#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
319#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
320#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
321
322#define _PAGE_TABLE (_PAGE_PRESENT | xAMPRx_SS_16Kb)
323
324#ifndef __ASSEMBLY__
325
326
327
328
329
330
331#define __P000 PAGE_NONE
332#define __P001 PAGE_READONLY
333#define __P010 PAGE_COPY
334#define __P011 PAGE_COPY
335#define __P100 PAGE_READONLY
336#define __P101 PAGE_READONLY
337#define __P110 PAGE_COPY
338#define __P111 PAGE_COPY
339
340#define __S000 PAGE_NONE
341#define __S001 PAGE_READONLY
342#define __S010 PAGE_SHARED
343#define __S011 PAGE_SHARED
344#define __S100 PAGE_READONLY
345#define __S101 PAGE_READONLY
346#define __S110 PAGE_SHARED
347#define __S111 PAGE_SHARED
348
349
350
351
352
353#undef TEST_ACCESS_OK
354
355#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
356#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
357
358#define pmd_none(x) (!pmd_val(x))
359#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
360#define pmd_bad(x) (pmd_val(x) & xAMPRx_SS)
361#define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0)
362
363#define pmd_page_vaddr(pmd) \
364 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
365
366#ifndef CONFIG_DISCONTIGMEM
367#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
368#endif
369
370#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
371
372
373
374
375
376static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
377static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
378static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
379static inline int pte_special(pte_t pte) { return 0; }
380
381static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
382static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
383static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; }
384static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
385static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
386static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
387static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
388
389static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
390{
391 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
392 asm volatile("dcf %M0" :: "U"(*ptep));
393 return i;
394}
395
396static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
397{
398 unsigned long x = xchg(&ptep->pte, 0);
399 asm volatile("dcf %M0" :: "U"(*ptep));
400 return __pte(x);
401}
402
403static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
404{
405 set_bit(_PAGE_BIT_WP, ptep);
406 asm volatile("dcf %M0" :: "U"(*ptep));
407}
408
409
410
411
412#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
413
414
415
416
417
418
419#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
420#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
421
422
423#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
424
425static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
426{
427 pte.pte &= _PAGE_CHG_MASK;
428 pte.pte |= pgprot_val(newprot);
429 return pte;
430}
431
432
433#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
434#define pgd_index_k(addr) pgd_index(addr)
435
436
437#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
438
439
440
441
442
443
444
445#define pte_index(address) \
446 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
447#define pte_offset_kernel(dir, address) \
448 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
449
450#if defined(CONFIG_HIGHPTE)
451#define pte_offset_map(dir, address) \
452 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + pte_index(address))
453#define pte_unmap(pte) kunmap_atomic(pte)
454#else
455#define pte_offset_map(dir, address) \
456 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
457#define pte_unmap(pte) do { } while (0)
458#endif
459
460
461
462
463
464
465
466
467#define __swp_type(x) (((x).val >> 1) & 0x1f)
468#define __swp_offset(x) ((x).val >> 7)
469#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 7) })
470#define __pte_to_swp_entry(_pte) ((swp_entry_t) { (_pte).pte })
471#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
472
473
474#define PageSkip(page) (0)
475#define kern_addr_valid(addr) (1)
476
477#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
478#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
479#define __HAVE_ARCH_PTEP_SET_WRPROTECT
480#define __HAVE_ARCH_PTE_SAME
481#include <asm-generic/pgtable.h>
482
483
484
485
486static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
487{
488 struct mm_struct *mm;
489 unsigned long ampr;
490
491 mm = current->mm;
492 if (mm) {
493 pgd_t *pge = pgd_offset(mm, address);
494 pud_t *pue = pud_offset(pge, address);
495 pmd_t *pme = pmd_offset(pue, address);
496
497 ampr = pme->ste[0] & 0xffffff00;
498 ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
499 xAMPRx_V;
500 } else {
501 address = ULONG_MAX;
502 ampr = 0;
503 }
504
505 asm volatile("movgs %0,scr0\n"
506 "movgs %0,scr1\n"
507 "movgs %1,dampr4\n"
508 "movgs %1,dampr5\n"
509 :
510 : "r"(address), "r"(ampr)
511 );
512}
513
514#ifdef CONFIG_PROC_FS
515extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer);
516#endif
517
518extern void __init pgtable_cache_init(void);
519
520#endif
521#endif
522
523#ifndef __ASSEMBLY__
524extern void __init paging_init(void);
525#endif
526#define HAVE_ARCH_UNMAPPED_AREA
527
528#endif
529