1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H
13
14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h>
16
17
18
19
20
21#ifdef CONFIG_MMU
22#define USER_RING 1
23#else
24#define USER_RING 0
25#endif
26#define KERNEL_RING 0
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#define PGDIR_SHIFT 22
52#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
53#define PGDIR_MASK (~(PGDIR_SIZE-1))
54
55
56
57
58
59#define PTRS_PER_PTE 1024
60#define PTRS_PER_PTE_SHIFT 10
61#define PTRS_PER_PGD 1024
62#define PGD_ORDER 0
63#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
64#define FIRST_USER_ADDRESS 0UL
65#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
66
67
68
69
70
71#define VMALLOC_START 0xC0000000
72#define VMALLOC_END 0xC7FEFFFF
73#define TLBTEMP_BASE_1 0xC7FF0000
74#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
75#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
76#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
77#else
78#define TLBTEMP_SIZE ICACHE_WAY_SIZE
79#endif
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129#define _PAGE_ATTRIB_MASK 0xf
130
131#define _PAGE_HW_EXEC (1<<0)
132#define _PAGE_HW_WRITE (1<<1)
133
134#define _PAGE_CA_BYPASS (0<<2)
135#define _PAGE_CA_WB (1<<2)
136#define _PAGE_CA_WT (2<<2)
137#define _PAGE_CA_MASK (3<<2)
138#define _PAGE_CA_INVALID (3<<2)
139
140
141#if XCHAL_HW_VERSION_MAJOR < 2000
142#define _PAGE_HW_VALID 0x01
143#define _PAGE_NONE 0x04
144#else
145#define _PAGE_HW_VALID 0x00
146#define _PAGE_NONE 0x0f
147#endif
148
149#define _PAGE_USER (1<<4)
150
151
152#define _PAGE_WRITABLE_BIT 6
153#define _PAGE_WRITABLE (1<<6)
154#define _PAGE_DIRTY (1<<7)
155#define _PAGE_ACCESSED (1<<8)
156
157#ifdef CONFIG_MMU
158
159#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
160#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
161
162#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
163#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
164#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
165#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
166#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
167#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
168#define PAGE_SHARED_EXEC \
169 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
170#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
171#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
172
173#if (DCACHE_WAY_SIZE > PAGE_SIZE)
174# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
175#else
176# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
177#endif
178
179#else
180
181# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
182# define PAGE_NONE __pgprot(0)
183# define PAGE_SHARED __pgprot(0)
184# define PAGE_COPY __pgprot(0)
185# define PAGE_READONLY __pgprot(0)
186# define PAGE_KERNEL __pgprot(0)
187
188#endif
189
190
191
192
193
194
195
196
197#define __P000 PAGE_NONE
198#define __P001 PAGE_READONLY
199#define __P010 PAGE_COPY
200#define __P011 PAGE_COPY
201#define __P100 PAGE_READONLY_EXEC
202#define __P101 PAGE_READONLY_EXEC
203#define __P110 PAGE_COPY_EXEC
204#define __P111 PAGE_COPY_EXEC
205
206#define __S000 PAGE_NONE
207#define __S001 PAGE_READONLY
208#define __S010 PAGE_SHARED
209#define __S011 PAGE_SHARED
210#define __S100 PAGE_READONLY_EXEC
211#define __S101 PAGE_READONLY_EXEC
212#define __S110 PAGE_SHARED_EXEC
213#define __S111 PAGE_SHARED_EXEC
214
215#ifndef __ASSEMBLY__
216
217#define pte_ERROR(e) \
218 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
219#define pgd_ERROR(e) \
220 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
221
222extern unsigned long empty_zero_page[1024];
223
224#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
225
226#ifdef CONFIG_MMU
227extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
228extern void paging_init(void);
229#else
230# define swapper_pg_dir NULL
231static inline void paging_init(void) { }
232#endif
233static inline void pgtable_cache_init(void) { }
234
235
236
237
238#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
239#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
240
241
242
243
244# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
245#if XCHAL_HW_VERSION_MAJOR < 2000
246# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
247#else
248# define pte_present(pte) \
249 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
250 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
251#endif
252#define pte_clear(mm,addr,ptep) \
253 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
254
255#define pmd_none(pmd) (!pmd_val(pmd))
256#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
257#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
258#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
259
260static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
261static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
262static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
263static inline int pte_special(pte_t pte) { return 0; }
264
265static inline pte_t pte_wrprotect(pte_t pte)
266 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
267static inline pte_t pte_mkclean(pte_t pte)
268 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
269static inline pte_t pte_mkold(pte_t pte)
270 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
271static inline pte_t pte_mkdirty(pte_t pte)
272 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
273static inline pte_t pte_mkyoung(pte_t pte)
274 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
275static inline pte_t pte_mkwrite(pte_t pte)
276 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
277static inline pte_t pte_mkspecial(pte_t pte)
278 { return pte; }
279
280#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
281
282
283
284
285
286
287#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
288#define pte_same(a,b) (pte_val(a) == pte_val(b))
289#define pte_page(x) pfn_to_page(pte_pfn(x))
290#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
291#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
292
293static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
294{
295 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
296}
297
298
299
300
301
302
303static inline void update_pte(pte_t *ptep, pte_t pteval)
304{
305 *ptep = pteval;
306#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
307 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
308#endif
309
310}
311
312struct mm_struct;
313
314static inline void
315set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
316{
317 update_pte(ptep, pteval);
318}
319
320static inline void set_pte(pte_t *ptep, pte_t pteval)
321{
322 update_pte(ptep, pteval);
323}
324
325static inline void
326set_pmd(pmd_t *pmdp, pmd_t pmdval)
327{
328 *pmdp = pmdval;
329}
330
331struct vm_area_struct;
332
333static inline int
334ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
335 pte_t *ptep)
336{
337 pte_t pte = *ptep;
338 if (!pte_young(pte))
339 return 0;
340 update_pte(ptep, pte_mkold(pte));
341 return 1;
342}
343
344static inline pte_t
345ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
346{
347 pte_t pte = *ptep;
348 pte_clear(mm, addr, ptep);
349 return pte;
350}
351
352static inline void
353ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
354{
355 pte_t pte = *ptep;
356 update_pte(ptep, pte_wrprotect(pte));
357}
358
359
360#define pgd_offset_k(address) pgd_offset(&init_mm, address)
361
362
363#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
364
365#define pgd_index(address) ((address) >> PGDIR_SHIFT)
366
367
368#define pmd_offset(dir,address) ((pmd_t*)(dir))
369
370
371#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
372#define pte_offset_kernel(dir,addr) \
373 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
374#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
375#define pte_unmap(pte) do { } while (0)
376
377
378
379
380
381#define SWP_TYPE_BITS 5
382#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
383
384#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
385#define __swp_offset(entry) ((entry).val >> 11)
386#define __swp_entry(type,offs) \
387 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
388 _PAGE_CA_INVALID | _PAGE_USER})
389#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
390#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
391
392#endif
393
394
395#ifdef __ASSEMBLY__
396
397
398
399
400
401
402
403
404
405
406
407
408#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
409#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
410
411#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
412 _PGD_INDEX(tmp, adr); \
413 addx4 mm, tmp, mm
414
415#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
416 srli pmd, pmd, PAGE_SHIFT; \
417 slli pmd, pmd, PAGE_SHIFT; \
418 addx4 pmd, tmp, pmd
419
420#else
421
422#define kern_addr_valid(addr) (1)
423
424extern void update_mmu_cache(struct vm_area_struct * vma,
425 unsigned long address, pte_t *ptep);
426
427typedef pte_t *pte_addr_t;
428
429#endif
430
431#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
432#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
433#define __HAVE_ARCH_PTEP_SET_WRPROTECT
434#define __HAVE_ARCH_PTEP_MKDIRTY
435#define __HAVE_ARCH_PTE_SAME
436
437
438
439#define HAVE_ARCH_UNMAPPED_AREA
440
441#include <asm-generic/pgtable.h>
442
443#endif
444