1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_PGTABLE_H
12#define _XTENSA_PGTABLE_H
13
14#include <asm-generic/pgtable-nopmd.h>
15#include <asm/page.h>
16
17
18
19
20
21#define USER_RING 1
22#define KERNEL_RING 0
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47#define PGDIR_SHIFT 22
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE-1))
50
51
52
53
54
55#define PTRS_PER_PTE 1024
56#define PTRS_PER_PTE_SHIFT 10
57#define PTRS_PER_PGD 1024
58#define PGD_ORDER 0
59#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
60#define FIRST_USER_ADDRESS 0UL
61#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
62
63
64
65
66
67#define VMALLOC_START 0xC0000000
68#define VMALLOC_END 0xC7FEFFFF
69#define TLBTEMP_BASE_1 0xC7FF0000
70#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE)
71#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE
72#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE)
73#else
74#define TLBTEMP_SIZE ICACHE_WAY_SIZE
75#endif
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125#define _PAGE_ATTRIB_MASK 0xf
126
127#define _PAGE_HW_EXEC (1<<0)
128#define _PAGE_HW_WRITE (1<<1)
129
130#define _PAGE_CA_BYPASS (0<<2)
131#define _PAGE_CA_WB (1<<2)
132#define _PAGE_CA_WT (2<<2)
133#define _PAGE_CA_MASK (3<<2)
134#define _PAGE_CA_INVALID (3<<2)
135
136
137#if XCHAL_HW_VERSION_MAJOR < 2000
138#define _PAGE_HW_VALID 0x01
139#define _PAGE_NONE 0x04
140#else
141#define _PAGE_HW_VALID 0x00
142#define _PAGE_NONE 0x0f
143#endif
144
145#define _PAGE_USER (1<<4)
146
147
148#define _PAGE_WRITABLE_BIT 6
149#define _PAGE_WRITABLE (1<<6)
150#define _PAGE_DIRTY (1<<7)
151#define _PAGE_ACCESSED (1<<8)
152
153#ifdef CONFIG_MMU
154
155#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
156#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
157
158#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
159#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
160#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
161#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
162#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
163#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
164#define PAGE_SHARED_EXEC \
165 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
166#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
167#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
168
169#if (DCACHE_WAY_SIZE > PAGE_SIZE)
170# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
171#else
172# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
173#endif
174
175#else
176
177# define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
178# define PAGE_NONE __pgprot(0)
179# define PAGE_SHARED __pgprot(0)
180# define PAGE_COPY __pgprot(0)
181# define PAGE_READONLY __pgprot(0)
182# define PAGE_KERNEL __pgprot(0)
183
184#endif
185
186
187
188
189
190
191
192
193#define __P000 PAGE_NONE
194#define __P001 PAGE_READONLY
195#define __P010 PAGE_COPY
196#define __P011 PAGE_COPY
197#define __P100 PAGE_READONLY_EXEC
198#define __P101 PAGE_READONLY_EXEC
199#define __P110 PAGE_COPY_EXEC
200#define __P111 PAGE_COPY_EXEC
201
202#define __S000 PAGE_NONE
203#define __S001 PAGE_READONLY
204#define __S010 PAGE_SHARED
205#define __S011 PAGE_SHARED
206#define __S100 PAGE_READONLY_EXEC
207#define __S101 PAGE_READONLY_EXEC
208#define __S110 PAGE_SHARED_EXEC
209#define __S111 PAGE_SHARED_EXEC
210
211#ifndef __ASSEMBLY__
212
213#define pte_ERROR(e) \
214 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
215#define pgd_ERROR(e) \
216 printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
217
218extern unsigned long empty_zero_page[1024];
219
220#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
221
222#ifdef CONFIG_MMU
223extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
224extern void paging_init(void);
225#else
226# define swapper_pg_dir NULL
227static inline void paging_init(void) { }
228#endif
229static inline void pgtable_cache_init(void) { }
230
231
232
233
234#define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
235#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
236
237
238
239
240# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
241#if XCHAL_HW_VERSION_MAJOR < 2000
242# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
243#else
244# define pte_present(pte) \
245 (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
246 || ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
247#endif
248#define pte_clear(mm,addr,ptep) \
249 do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
250
251#define pmd_none(pmd) (!pmd_val(pmd))
252#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
253#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
254#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
255
256static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
257static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
258static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
259static inline int pte_special(pte_t pte) { return 0; }
260
261static inline pte_t pte_wrprotect(pte_t pte)
262 { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
263static inline pte_t pte_mkclean(pte_t pte)
264 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
265static inline pte_t pte_mkold(pte_t pte)
266 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
267static inline pte_t pte_mkdirty(pte_t pte)
268 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
269static inline pte_t pte_mkyoung(pte_t pte)
270 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
271static inline pte_t pte_mkwrite(pte_t pte)
272 { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
273static inline pte_t pte_mkspecial(pte_t pte)
274 { return pte; }
275
276#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
277
278
279
280
281
282
283#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
284#define pte_same(a,b) (pte_val(a) == pte_val(b))
285#define pte_page(x) pfn_to_page(pte_pfn(x))
286#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
287#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
288
289static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
290{
291 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
292}
293
294
295
296
297
298
299static inline void update_pte(pte_t *ptep, pte_t pteval)
300{
301 *ptep = pteval;
302#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
303 __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
304#endif
305
306}
307
308struct mm_struct;
309
310static inline void
311set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
312{
313 update_pte(ptep, pteval);
314}
315
316static inline void set_pte(pte_t *ptep, pte_t pteval)
317{
318 update_pte(ptep, pteval);
319}
320
321static inline void
322set_pmd(pmd_t *pmdp, pmd_t pmdval)
323{
324 *pmdp = pmdval;
325}
326
327struct vm_area_struct;
328
329static inline int
330ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
331 pte_t *ptep)
332{
333 pte_t pte = *ptep;
334 if (!pte_young(pte))
335 return 0;
336 update_pte(ptep, pte_mkold(pte));
337 return 1;
338}
339
340static inline pte_t
341ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
342{
343 pte_t pte = *ptep;
344 pte_clear(mm, addr, ptep);
345 return pte;
346}
347
348static inline void
349ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
350{
351 pte_t pte = *ptep;
352 update_pte(ptep, pte_wrprotect(pte));
353}
354
355
356#define pgd_offset_k(address) pgd_offset(&init_mm, address)
357
358
359#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
360
361#define pgd_index(address) ((address) >> PGDIR_SHIFT)
362
363
364#define pmd_offset(dir,address) ((pmd_t*)(dir))
365
366
367#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
368#define pte_offset_kernel(dir,addr) \
369 ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
370#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
371#define pte_unmap(pte) do { } while (0)
372
373
374
375
376
377#define SWP_TYPE_BITS 5
378#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
379
380#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
381#define __swp_offset(entry) ((entry).val >> 11)
382#define __swp_entry(type,offs) \
383 ((swp_entry_t){((type) << 6) | ((offs) << 11) | \
384 _PAGE_CA_INVALID | _PAGE_USER})
385#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
386#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
387
388#endif
389
390
391#ifdef __ASSEMBLY__
392
393
394
395
396
397
398
399
400
401
402
403
404#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
405#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
406
407#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
408 _PGD_INDEX(tmp, adr); \
409 addx4 mm, tmp, mm
410
411#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
412 srli pmd, pmd, PAGE_SHIFT; \
413 slli pmd, pmd, PAGE_SHIFT; \
414 addx4 pmd, tmp, pmd
415
416#else
417
418#define kern_addr_valid(addr) (1)
419
420extern void update_mmu_cache(struct vm_area_struct * vma,
421 unsigned long address, pte_t *ptep);
422
423typedef pte_t *pte_addr_t;
424
425#endif
426
427#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
428#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
429#define __HAVE_ARCH_PTEP_SET_WRPROTECT
430#define __HAVE_ARCH_PTEP_MKDIRTY
431#define __HAVE_ARCH_PTE_SAME
432
433
434
435#define HAVE_ARCH_UNMAPPED_AREA
436
437#include <asm-generic/pgtable.h>
438
439#endif
440