1
2
3
4
5
6
7
8#ifndef _XTENSA_PGTABLE_H
9#define _XTENSA_PGTABLE_H
10
11#include <asm/page.h>
12#include <asm/kmem_layout.h>
13#include <asm-generic/pgtable-nopmd.h>
14
15
16
17
18
19#ifdef CONFIG_MMU
20#define USER_RING 1
21#else
22#define USER_RING 0
23#endif
24#define KERNEL_RING 0
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define PGDIR_SHIFT 22
50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51#define PGDIR_MASK (~(PGDIR_SIZE-1))
52
53
54
55
56
57#define PTRS_PER_PTE 1024
58#define PTRS_PER_PTE_SHIFT 10
59#define PTRS_PER_PGD 1024
60#define PGD_ORDER 0
61#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
62#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
63
64#ifdef CONFIG_MMU
65
66
67
68
69#define VMALLOC_START (XCHAL_KSEG_CACHED_VADDR - 0x10000000)
70#define VMALLOC_END (VMALLOC_START + 0x07FEFFFF)
71#define TLBTEMP_BASE_1 (VMALLOC_START + 0x08000000)
72#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
73#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
74#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
75#else
76#define TLBTEMP_SIZE ICACHE_WAY_SIZE
77#endif
78
79#else
80
81#define VMALLOC_START __XTENSA_UL_CONST(0)
82#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
83
84#endif
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134#define _PAGE_ATTRIB_MASK 0xf
135
136#define _PAGE_HW_EXEC (1<<0)
137#define _PAGE_HW_WRITE (1<<1)
138
139#define _PAGE_CA_BYPASS (0<<2)
140#define _PAGE_CA_WB (1<<2)
141#define _PAGE_CA_WT (2<<2)
142#define _PAGE_CA_MASK (3<<2)
143#define _PAGE_CA_INVALID (3<<2)
144
145
146#if XCHAL_HW_VERSION_MAJOR < 2000
147#define _PAGE_HW_VALID 0x01
148#define _PAGE_NONE 0x04
149#else
150#define _PAGE_HW_VALID 0x00
151#define _PAGE_NONE 0x0f
152#endif
153
154#define _PAGE_USER (1<<4)
155
156
157#define _PAGE_WRITABLE_BIT 6
158#define _PAGE_WRITABLE (1<<6)
159#define _PAGE_DIRTY (1<<7)
160#define _PAGE_ACCESSED (1<<8)
161
162#ifdef CONFIG_MMU
163
164#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
165#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
166
167#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
168#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
169#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
170#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
171#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
172#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
173#define PAGE_SHARED_EXEC \
174 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
175#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
176#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
177#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
178
179#if (DCACHE_WAY_SIZE > PAGE_SIZE)
180# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
181#else
182# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
183#endif
184
185#else
186
187# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
188# define PAGE_NONE __pgprot(0)
189# define PAGE_SHARED __pgprot(0)
190# define PAGE_COPY __pgprot(0)
191# define PAGE_READONLY __pgprot(0)
192# define PAGE_KERNEL __pgprot(0)
193
194#endif
195
196
197
198
199
200
201
202
203#define __P000 PAGE_NONE
204#define __P001 PAGE_READONLY
205#define __P010 PAGE_COPY
206#define __P011 PAGE_COPY
207#define __P100 PAGE_READONLY_EXEC
208#define __P101 PAGE_READONLY_EXEC
209#define __P110 PAGE_COPY_EXEC
210#define __P111 PAGE_COPY_EXEC
211
212#define __S000 PAGE_NONE
213#define __S001 PAGE_READONLY
214#define __S010 PAGE_SHARED
215#define __S011 PAGE_SHARED
216#define __S100 PAGE_READONLY_EXEC
217#define __S101 PAGE_READONLY_EXEC
218#define __S110 PAGE_SHARED_EXEC
219#define __S111 PAGE_SHARED_EXEC
220
221#ifndef __ASSEMBLY__
222
223#define pte_ERROR(e) \
224 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
225#define pgd_ERROR(e) \
226 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
227
228extern unsigned long empty_zero_page[1024];
229
230#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
231
232#ifdef CONFIG_MMU
233extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
234extern void paging_init(void);
235#else
236# define swapper_pg_dir NULL
237static inline void paging_init(void) { }
238#endif
239
240
241
242
243#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
244#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
245
246
247
248
249# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
250#if XCHAL_HW_VERSION_MAJOR < 2000
251# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
252#else
253# define pte_present(pte) \
254 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
255 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
256#endif
257#define pte_clear(mm,addr,ptep) \
258 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
259
260#define pmd_none(pmd) (!pmd_val(pmd))
261#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
262#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
263#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
264
265static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
266static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
267static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
268
269static inline pte_t pte_wrprotect(pte_t pte)
270 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
271static inline pte_t pte_mkclean(pte_t pte)
272 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
273static inline pte_t pte_mkold(pte_t pte)
274 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
275static inline pte_t pte_mkdirty(pte_t pte)
276 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
277static inline pte_t pte_mkyoung(pte_t pte)
278 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
279static inline pte_t pte_mkwrite(pte_t pte)
280 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
281
282#define pgprot_noncached(prot) \
283 ((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \
284 _PAGE_CA_BYPASS)))
285
286
287
288
289
290
291#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
292#define pte_same(a,b) (pte_val(a) == pte_val(b))
293#define pte_page(x) pfn_to_page(pte_pfn(x))
294#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
295#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
296
297static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
298{
299 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
300}
301
302
303
304
305
306
307static inline void update_pte(pte_t *ptep, pte_t pteval)
308{
309 *ptep = pteval;
310#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
311 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
312#endif
313
314}
315
316struct mm_struct;
317
318static inline void
319set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
320{
321 update_pte(ptep, pteval);
322}
323
324static inline void set_pte(pte_t *ptep, pte_t pteval)
325{
326 update_pte(ptep, pteval);
327}
328
329static inline void
330set_pmd(pmd_t *pmdp, pmd_t pmdval)
331{
332 *pmdp = pmdval;
333}
334
335struct vm_area_struct;
336
337static inline int
338ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
339 pte_t *ptep)
340{
341 pte_t pte = *ptep;
342 if (!pte_young(pte))
343 return 0;
344 update_pte(ptep, pte_mkold(pte));
345 return 1;
346}
347
348static inline pte_t
349ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
350{
351 pte_t pte = *ptep;
352 pte_clear(mm, addr, ptep);
353 return pte;
354}
355
356static inline void
357ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
358{
359 pte_t pte = *ptep;
360 update_pte(ptep, pte_wrprotect(pte));
361}
362
363
364
365
366#define SWP_TYPE_BITS 5
367#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
368
369#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
370#define __swp_offset(entry) ((entry).val >> 11)
371#define __swp_entry(type,offs) \
372 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
373 _PAGE_CA_INVALID | _PAGE_USER})
374#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
375#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
376
377#endif
378
379
380#ifdef __ASSEMBLY__
381
382
383
384
385
386
387
388
389
390
391
392
393#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
394#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
395
396#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
397 _PGD_INDEX(tmp, adr); \
398 addx4 mm, tmp, mm
399
400#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
401 srli pmd, pmd, PAGE_SHIFT; \
402 slli pmd, pmd, PAGE_SHIFT; \
403 addx4 pmd, tmp, pmd
404
405#else
406
407#define kern_addr_valid(addr) (1)
408
409extern void update_mmu_cache(struct vm_area_struct * vma,
410 unsigned long address, pte_t *ptep);
411
412typedef pte_t *pte_addr_t;
413
414#endif
415
416#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
417#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
418#define __HAVE_ARCH_PTEP_SET_WRPROTECT
419#define __HAVE_ARCH_PTEP_MKDIRTY
420#define __HAVE_ARCH_PTE_SAME
421
422
423
424#define HAVE_ARCH_UNMAPPED_AREA
425
426#endif
427