1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H
13
14#define __ARCH_USE_5LEVEL_HACK
15#include <asm/page.h>
16#include <asm/kmem_layout.h>
17#include <asm-generic/pgtable-nopmd.h>
18
19
20
21
22
23#ifdef CONFIG_MMU
24#define USER_RING 1
25#else
26#define USER_RING 0
27#endif
28#define KERNEL_RING 0
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define PGDIR_SHIFT 22
54#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
55#define PGDIR_MASK (~(PGDIR_SIZE-1))
56
57
58
59
60
61#define PTRS_PER_PTE 1024
62#define PTRS_PER_PTE_SHIFT 10
63#define PTRS_PER_PGD 1024
64#define PGD_ORDER 0
65#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
66#define FIRST_USER_ADDRESS 0UL
67#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
68
69#ifdef CONFIG_MMU
70
71
72
73
74#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
75#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
76#define TLBTEMP_BASE_1 (VMALLOC_END + 1)
77#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
78#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
79#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
80#else
81#define TLBTEMP_SIZE ICACHE_WAY_SIZE
82#endif
83
84#else
85
86#define VMALLOC_START __XTENSA_UL_CONST(0)
87#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
88
89#endif
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139#define _PAGE_ATTRIB_MASK 0xf
140
141#define _PAGE_HW_EXEC (1<<0)
142#define _PAGE_HW_WRITE (1<<1)
143
144#define _PAGE_CA_BYPASS (0<<2)
145#define _PAGE_CA_WB (1<<2)
146#define _PAGE_CA_WT (2<<2)
147#define _PAGE_CA_MASK (3<<2)
148#define _PAGE_CA_INVALID (3<<2)
149
150
151#if XCHAL_HW_VERSION_MAJOR < 2000
152#define _PAGE_HW_VALID 0x01
153#define _PAGE_NONE 0x04
154#else
155#define _PAGE_HW_VALID 0x00
156#define _PAGE_NONE 0x0f
157#endif
158
159#define _PAGE_USER (1<<4)
160
161
162#define _PAGE_WRITABLE_BIT 6
163#define _PAGE_WRITABLE (1<<6)
164#define _PAGE_DIRTY (1<<7)
165#define _PAGE_ACCESSED (1<<8)
166
167#ifdef CONFIG_MMU
168
169#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
170#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
171
172#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
173#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
174#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
175#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
176#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
177#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
178#define PAGE_SHARED_EXEC \
179 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
180#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
181#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
182#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
183
184#if (DCACHE_WAY_SIZE > PAGE_SIZE)
185# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
186#else
187# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
188#endif
189
190#else
191
192# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
193# define PAGE_NONE __pgprot(0)
194# define PAGE_SHARED __pgprot(0)
195# define PAGE_COPY __pgprot(0)
196# define PAGE_READONLY __pgprot(0)
197# define PAGE_KERNEL __pgprot(0)
198
199#endif
200
201
202
203
204
205
206
207
208#define __P000 PAGE_NONE
209#define __P001 PAGE_READONLY
210#define __P010 PAGE_COPY
211#define __P011 PAGE_COPY
212#define __P100 PAGE_READONLY_EXEC
213#define __P101 PAGE_READONLY_EXEC
214#define __P110 PAGE_COPY_EXEC
215#define __P111 PAGE_COPY_EXEC
216
217#define __S000 PAGE_NONE
218#define __S001 PAGE_READONLY
219#define __S010 PAGE_SHARED
220#define __S011 PAGE_SHARED
221#define __S100 PAGE_READONLY_EXEC
222#define __S101 PAGE_READONLY_EXEC
223#define __S110 PAGE_SHARED_EXEC
224#define __S111 PAGE_SHARED_EXEC
225
226#ifndef __ASSEMBLY__
227
228#define pte_ERROR(e) \
229 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
230#define pgd_ERROR(e) \
231 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
232
233extern unsigned long empty_zero_page[1024];
234
235#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
236
237#ifdef CONFIG_MMU
238extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
239extern void paging_init(void);
240#else
241# define swapper_pg_dir NULL
242static inline void paging_init(void) { }
243#endif
244static inline void pgtable_cache_init(void) { }
245
246
247
248
249#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
250#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
251
252
253
254
255# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
256#if XCHAL_HW_VERSION_MAJOR < 2000
257# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
258#else
259# define pte_present(pte) \
260 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
261 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
262#endif
263#define pte_clear(mm,addr,ptep) \
264 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
265
266#define pmd_none(pmd) (!pmd_val(pmd))
267#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
268#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
269#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
270
271static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
272static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
273static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
274static inline int pte_special(pte_t pte) { return 0; }
275
276static inline pte_t pte_wrprotect(pte_t pte)
277 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
278static inline pte_t pte_mkclean(pte_t pte)
279 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
280static inline pte_t pte_mkold(pte_t pte)
281 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
282static inline pte_t pte_mkdirty(pte_t pte)
283 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
284static inline pte_t pte_mkyoung(pte_t pte)
285 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
286static inline pte_t pte_mkwrite(pte_t pte)
287 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
288static inline pte_t pte_mkspecial(pte_t pte)
289 { return pte; }
290
291#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
292
293
294
295
296
297
298#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
299#define pte_same(a,b) (pte_val(a) == pte_val(b))
300#define pte_page(x) pfn_to_page(pte_pfn(x))
301#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
302#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
303
304static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
305{
306 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
307}
308
309
310
311
312
313
314static inline void update_pte(pte_t *ptep, pte_t pteval)
315{
316 *ptep = pteval;
317#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
318 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
319#endif
320
321}
322
323struct mm_struct;
324
325static inline void
326set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
327{
328 update_pte(ptep, pteval);
329}
330
331static inline void set_pte(pte_t *ptep, pte_t pteval)
332{
333 update_pte(ptep, pteval);
334}
335
336static inline void
337set_pmd(pmd_t *pmdp, pmd_t pmdval)
338{
339 *pmdp = pmdval;
340}
341
342struct vm_area_struct;
343
344static inline int
345ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
346 pte_t *ptep)
347{
348 pte_t pte = *ptep;
349 if (!pte_young(pte))
350 return 0;
351 update_pte(ptep, pte_mkold(pte));
352 return 1;
353}
354
355static inline pte_t
356ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
357{
358 pte_t pte = *ptep;
359 pte_clear(mm, addr, ptep);
360 return pte;
361}
362
363static inline void
364ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
365{
366 pte_t pte = *ptep;
367 update_pte(ptep, pte_wrprotect(pte));
368}
369
370
371#define pgd_offset_k(address) pgd_offset(&init_mm, address)
372
373
374#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
375
376#define pgd_index(address) ((address) >> PGDIR_SHIFT)
377
378
379#define pmd_offset(dir,address) ((pmd_t*)(dir))
380
381
382#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
383#define pte_offset_kernel(dir,addr) \
384 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
385#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
386#define pte_unmap(pte) do { } while (0)
387
388
389
390
391
392#define SWP_TYPE_BITS 5
393#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
394
395#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
396#define __swp_offset(entry) ((entry).val >> 11)
397#define __swp_entry(type,offs) \
398 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
399 _PAGE_CA_INVALID | _PAGE_USER})
400#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
401#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
402
403#endif
404
405
406#ifdef __ASSEMBLY__
407
408
409
410
411
412
413
414
415
416
417
418
419#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
420#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
421
422#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
423 _PGD_INDEX(tmp, adr); \
424 addx4 mm, tmp, mm
425
426#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
427 srli pmd, pmd, PAGE_SHIFT; \
428 slli pmd, pmd, PAGE_SHIFT; \
429 addx4 pmd, tmp, pmd
430
431#else
432
433#define kern_addr_valid(addr) (1)
434
435extern void update_mmu_cache(struct vm_area_struct * vma,
436 unsigned long address, pte_t *ptep);
437
438typedef pte_t *pte_addr_t;
439
440#endif
441
442#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
443#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
444#define __HAVE_ARCH_PTEP_SET_WRPROTECT
445#define __HAVE_ARCH_PTEP_MKDIRTY
446#define __HAVE_ARCH_PTE_SAME
447
448
449
450#define HAVE_ARCH_UNMAPPED_AREA
451
452#include <asm-generic/pgtable.h>
453
454#endif
455