1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H
13
14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h>
16
17
18
19
20
21#define USER_RING 1
22#define KERNEL_RING 0
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define PGDIR_SHIFT 22
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE-1))
50
51
52
53
54
55#define PTRS_PER_PTE 1024
56#define PTRS_PER_PTE_SHIFT 10
57#define PTRS_PER_PGD 1024
58#define PGD_ORDER 0
59#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
60#define FIRST_USER_ADDRESS 0
61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
62
63
64
65
66
67#define VMALLOC_START 0xC0000000
68#define VMALLOC_END 0xC7FEFFFF
69#define TLBTEMP_BASE_1 0xC7FF0000
70#define TLBTEMP_BASE_2 0xC7FF8000
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#define _PAGE_ATTRIB_MASK 0xf
124
125#define _PAGE_HW_EXEC (1<<0)
126#define _PAGE_HW_WRITE (1<<1)
127
128#define _PAGE_CA_BYPASS (0<<2)
129#define _PAGE_CA_WB (1<<2)
130#define _PAGE_CA_WT (2<<2)
131#define _PAGE_CA_MASK (3<<2)
132#define _PAGE_CA_INVALID (3<<2)
133
134
135#if XCHAL_HW_VERSION_MAJOR < 2000
136#define _PAGE_HW_VALID 0x01
137#define _PAGE_NONE 0x04
138#else
139#define _PAGE_HW_VALID 0x00
140#define _PAGE_NONE 0x0f
141#endif
142#define _PAGE_FILE (1<<1)
143
144#define _PAGE_USER (1<<4)
145
146
147#define _PAGE_WRITABLE_BIT 6
148#define _PAGE_WRITABLE (1<<6)
149#define _PAGE_DIRTY (1<<7)
150#define _PAGE_ACCESSED (1<<8)
151
152#ifdef CONFIG_MMU
153
154#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
155#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
156
157#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
158#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
159#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
160#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
161#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
162#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
163#define PAGE_SHARED_EXEC \
164 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
165#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
166#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
167
168#if (DCACHE_WAY_SIZE > PAGE_SIZE)
169# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
170#else
171# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
172#endif
173
174#else
175
176# define PAGE_NONE __pgprot(0)
177# define PAGE_SHARED __pgprot(0)
178# define PAGE_COPY __pgprot(0)
179# define PAGE_READONLY __pgprot(0)
180# define PAGE_KERNEL __pgprot(0)
181
182#endif
183
184
185
186
187
188
189
190
191#define __P000 PAGE_NONE
192#define __P001 PAGE_READONLY
193#define __P010 PAGE_COPY
194#define __P011 PAGE_COPY
195#define __P100 PAGE_READONLY_EXEC
196#define __P101 PAGE_READONLY_EXEC
197#define __P110 PAGE_COPY_EXEC
198#define __P111 PAGE_COPY_EXEC
199
200#define __S000 PAGE_NONE
201#define __S001 PAGE_READONLY
202#define __S010 PAGE_SHARED
203#define __S011 PAGE_SHARED
204#define __S100 PAGE_READONLY_EXEC
205#define __S101 PAGE_READONLY_EXEC
206#define __S110 PAGE_SHARED_EXEC
207#define __S111 PAGE_SHARED_EXEC
208
209#ifndef __ASSEMBLY__
210
211#define pte_ERROR(e) \
212 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
213#define pgd_ERROR(e) \
214 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
215
216extern unsigned long empty_zero_page[1024];
217
218#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
219
220#ifdef CONFIG_MMU
221extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
222extern void paging_init(void);
223extern void pgtable_cache_init(void);
224#else
225# define swapper_pg_dir NULL
226static inline void paging_init(void) { }
227static inline void pgtable_cache_init(void) { }
228#endif
229
230
231
232
233#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
234#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
235
236
237
238
239# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
240#if XCHAL_HW_VERSION_MAJOR < 2000
241# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
242#else
243# define pte_present(pte) \
244 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
245 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
246#endif
247#define pte_clear(mm,addr,ptep) \
248 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
249
250#define pmd_none(pmd) (!pmd_val(pmd))
251#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
252#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
253#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
254
255static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
256static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
257static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
258static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
259static inline int pte_special(pte_t pte) { return 0; }
260
261static inline pte_t pte_wrprotect(pte_t pte)
262 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
263static inline pte_t pte_mkclean(pte_t pte)
264 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
265static inline pte_t pte_mkold(pte_t pte)
266 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
267static inline pte_t pte_mkdirty(pte_t pte)
268 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
269static inline pte_t pte_mkyoung(pte_t pte)
270 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
271static inline pte_t pte_mkwrite(pte_t pte)
272 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
273static inline pte_t pte_mkspecial(pte_t pte)
274 { return pte; }
275
276
277
278
279
280
281#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
282#define pte_same(a,b) (pte_val(a) == pte_val(b))
283#define pte_page(x) pfn_to_page(pte_pfn(x))
284#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
285#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
286
287static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
288{
289 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
290}
291
292
293
294
295
296
297static inline void update_pte(pte_t *ptep, pte_t pteval)
298{
299 *ptep = pteval;
300#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
301 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
302#endif
303
304}
305
306struct mm_struct;
307
308static inline void
309set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
310{
311 update_pte(ptep, pteval);
312}
313
314
315static inline void
316set_pmd(pmd_t *pmdp, pmd_t pmdval)
317{
318 *pmdp = pmdval;
319}
320
321struct vm_area_struct;
322
323static inline int
324ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
325 pte_t *ptep)
326{
327 pte_t pte = *ptep;
328 if (!pte_young(pte))
329 return 0;
330 update_pte(ptep, pte_mkold(pte));
331 return 1;
332}
333
334static inline pte_t
335ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
336{
337 pte_t pte = *ptep;
338 pte_clear(mm, addr, ptep);
339 return pte;
340}
341
342static inline void
343ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
344{
345 pte_t pte = *ptep;
346 update_pte(ptep, pte_wrprotect(pte));
347}
348
349
350#define pgd_offset_k(address) pgd_offset(&init_mm, address)
351
352
353#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
354
355#define pgd_index(address) ((address) >> PGDIR_SHIFT)
356
357
358#define pmd_offset(dir,address) ((pmd_t*)(dir))
359
360
361#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
362#define pte_offset_kernel(dir,addr) \
363 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
364#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
365#define pte_unmap(pte) do { } while (0)
366
367
368
369
370
371#define SWP_TYPE_BITS 5
372#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
373
374#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
375#define __swp_offset(entry) ((entry).val >> 11)
376#define __swp_entry(type,offs) \
377 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
378 _PAGE_CA_INVALID | _PAGE_USER})
379#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
380#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
381
382#define PTE_FILE_MAX_BITS 26
383#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
384#define pgoff_to_pte(off) \
385 ((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
386
387#endif
388
389
390#ifdef __ASSEMBLY__
391
392
393
394
395
396
397
398
399
400
401
402
403#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
404#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
405
406#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
407 _PGD_INDEX(tmp, adr); \
408 addx4 mm, tmp, mm
409
410#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
411 srli pmd, pmd, PAGE_SHIFT; \
412 slli pmd, pmd, PAGE_SHIFT; \
413 addx4 pmd, tmp, pmd
414
415#else
416
417#define kern_addr_valid(addr) (1)
418
419extern void update_mmu_cache(struct vm_area_struct * vma,
420 unsigned long address, pte_t *ptep);
421
422typedef pte_t *pte_addr_t;
423
424#endif
425
426#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
427#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
428#define __HAVE_ARCH_PTEP_SET_WRPROTECT
429#define __HAVE_ARCH_PTEP_MKDIRTY
430#define __HAVE_ARCH_PTE_SAME
431
432
433
434#define HAVE_ARCH_UNMAPPED_AREA
435
436#include <asm-generic/pgtable.h>
437
438#endif
439