1
2
3
4
5
6
7
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#include <linux/mmzone.h>
12#ifdef CONFIG_32BIT
13#include <asm/pgtable-32.h>
14#endif
15#ifdef CONFIG_64BIT
16#include <asm/pgtable-64.h>
17#endif
18
19#include <asm/io.h>
20#include <asm/pgtable-bits.h>
21
22struct mm_struct;
23struct vm_area_struct;
24
25#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
26#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
27 _page_cachable_default)
28#define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
29 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
30#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
31 _page_cachable_default)
32#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
33 _PAGE_GLOBAL | _page_cachable_default)
34#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
35 _page_cachable_default)
36#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
37 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
38
39
40
41
42
43
44
45
46
47
48
49
50#define __P000 __pgprot(0)
51#define __P001 __pgprot(0)
52#define __P010 __pgprot(0)
53#define __P011 __pgprot(0)
54#define __P100 __pgprot(0)
55#define __P101 __pgprot(0)
56#define __P110 __pgprot(0)
57#define __P111 __pgprot(0)
58
59#define __S000 __pgprot(0)
60#define __S001 __pgprot(0)
61#define __S010 __pgprot(0)
62#define __S011 __pgprot(0)
63#define __S100 __pgprot(0)
64#define __S101 __pgprot(0)
65#define __S110 __pgprot(0)
66#define __S111 __pgprot(0)
67
68extern unsigned long _page_cachable_default;
69
70
71
72
73
74
75extern unsigned long empty_zero_page;
76extern unsigned long zero_page_mask;
77
78#define ZERO_PAGE(vaddr) \
79 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
80#define __HAVE_COLOR_ZERO_PAGE
81
82extern void paging_init(void);
83
84
85
86
87
88#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
89
90#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
91#ifndef CONFIG_TRANSPARENT_HUGEPAGE
92#define pmd_page(pmd) __pmd_page(pmd)
93#endif
94
95#define pmd_page_vaddr(pmd) pmd_val(pmd)
96
97#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
98
99#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
100#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
101
102static inline void set_pte(pte_t *ptep, pte_t pte)
103{
104 ptep->pte_high = pte.pte_high;
105 smp_wmb();
106 ptep->pte_low = pte.pte_low;
107
108 if (pte.pte_low & _PAGE_GLOBAL) {
109 pte_t *buddy = ptep_buddy(ptep);
110
111
112
113
114 if (pte_none(*buddy)) {
115 buddy->pte_low |= _PAGE_GLOBAL;
116 buddy->pte_high |= _PAGE_GLOBAL;
117 }
118 }
119}
120#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
121
122static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
123{
124 pte_t null = __pte(0);
125
126
127 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
128 null.pte_low = null.pte_high = _PAGE_GLOBAL;
129
130 set_pte_at(mm, addr, ptep, null);
131}
132#else
133
134#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
135#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
136
137
138
139
140
141
142static inline void set_pte(pte_t *ptep, pte_t pteval)
143{
144 *ptep = pteval;
145#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
146 if (pte_val(pteval) & _PAGE_GLOBAL) {
147 pte_t *buddy = ptep_buddy(ptep);
148
149
150
151
152 if (pte_none(*buddy))
153 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
154 }
155#endif
156}
157#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
162
163 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
164 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
165 else
166#endif
167 set_pte_at(mm, addr, ptep, __pte(0));
168}
169#endif
170
171
172
173
174
175#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
176
177#ifndef __PAGETABLE_PMD_FOLDED
178
179
180
181
182#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
183#endif
184
185#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
186#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
187#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
188
189
190
191
192
193extern pgd_t swapper_pg_dir[];
194
195
196
197
198
199#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
200static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
201static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
202static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
203static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
204
205static inline pte_t pte_wrprotect(pte_t pte)
206{
207 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
208 pte.pte_high &= ~_PAGE_SILENT_WRITE;
209 return pte;
210}
211
212static inline pte_t pte_mkclean(pte_t pte)
213{
214 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
215 pte.pte_high &= ~_PAGE_SILENT_WRITE;
216 return pte;
217}
218
219static inline pte_t pte_mkold(pte_t pte)
220{
221 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
222 pte.pte_high &= ~_PAGE_SILENT_READ;
223 return pte;
224}
225
226static inline pte_t pte_mkwrite(pte_t pte)
227{
228 pte.pte_low |= _PAGE_WRITE;
229 if (pte.pte_low & _PAGE_MODIFIED) {
230 pte.pte_low |= _PAGE_SILENT_WRITE;
231 pte.pte_high |= _PAGE_SILENT_WRITE;
232 }
233 return pte;
234}
235
236static inline pte_t pte_mkdirty(pte_t pte)
237{
238 pte.pte_low |= _PAGE_MODIFIED;
239 if (pte.pte_low & _PAGE_WRITE) {
240 pte.pte_low |= _PAGE_SILENT_WRITE;
241 pte.pte_high |= _PAGE_SILENT_WRITE;
242 }
243 return pte;
244}
245
246static inline pte_t pte_mkyoung(pte_t pte)
247{
248 pte.pte_low |= _PAGE_ACCESSED;
249 if (pte.pte_low & _PAGE_READ) {
250 pte.pte_low |= _PAGE_SILENT_READ;
251 pte.pte_high |= _PAGE_SILENT_READ;
252 }
253 return pte;
254}
255#else
256static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
257static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
258static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
259static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
260
261static inline pte_t pte_wrprotect(pte_t pte)
262{
263 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
264 return pte;
265}
266
267static inline pte_t pte_mkclean(pte_t pte)
268{
269 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
270 return pte;
271}
272
273static inline pte_t pte_mkold(pte_t pte)
274{
275 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
276 return pte;
277}
278
279static inline pte_t pte_mkwrite(pte_t pte)
280{
281 pte_val(pte) |= _PAGE_WRITE;
282 if (pte_val(pte) & _PAGE_MODIFIED)
283 pte_val(pte) |= _PAGE_SILENT_WRITE;
284 return pte;
285}
286
287static inline pte_t pte_mkdirty(pte_t pte)
288{
289 pte_val(pte) |= _PAGE_MODIFIED;
290 if (pte_val(pte) & _PAGE_WRITE)
291 pte_val(pte) |= _PAGE_SILENT_WRITE;
292 return pte;
293}
294
295static inline pte_t pte_mkyoung(pte_t pte)
296{
297 pte_val(pte) |= _PAGE_ACCESSED;
298 if (cpu_has_rixi) {
299 if (!(pte_val(pte) & _PAGE_NO_READ))
300 pte_val(pte) |= _PAGE_SILENT_READ;
301 } else {
302 if (pte_val(pte) & _PAGE_READ)
303 pte_val(pte) |= _PAGE_SILENT_READ;
304 }
305 return pte;
306}
307
308#ifdef _PAGE_HUGE
309static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
310
311static inline pte_t pte_mkhuge(pte_t pte)
312{
313 pte_val(pte) |= _PAGE_HUGE;
314 return pte;
315}
316#endif
317#endif
318static inline int pte_special(pte_t pte) { return 0; }
319static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
320
321
322
323
324
325
326
327#define pgprot_noncached pgprot_noncached
328
329static inline pgprot_t pgprot_noncached(pgprot_t _prot)
330{
331 unsigned long prot = pgprot_val(_prot);
332
333 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
334
335 return __pgprot(prot);
336}
337
338
339
340
341
342#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
343
344#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
345static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
346{
347 pte.pte_low &= _PAGE_CHG_MASK;
348 pte.pte_high &= ~0x3f;
349 pte.pte_low |= pgprot_val(newprot);
350 pte.pte_high |= pgprot_val(newprot) & 0x3f;
351 return pte;
352}
353#else
354static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
355{
356 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
357}
358#endif
359
360
361extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
362 pte_t pte);
363extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
364 pte_t pte);
365
366static inline void update_mmu_cache(struct vm_area_struct *vma,
367 unsigned long address, pte_t *ptep)
368{
369 pte_t pte = *ptep;
370 __update_tlb(vma, address, pte);
371 __update_cache(vma, address, pte);
372}
373
374static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
375 unsigned long address, pmd_t *pmdp)
376{
377 pte_t pte = *(pte_t *)pmdp;
378
379 __update_tlb(vma, address, pte);
380}
381
382#define kern_addr_valid(addr) (1)
383
384#ifdef CONFIG_64BIT_PHYS_ADDR
385extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
386
387static inline int io_remap_pfn_range(struct vm_area_struct *vma,
388 unsigned long vaddr,
389 unsigned long pfn,
390 unsigned long size,
391 pgprot_t prot)
392{
393 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
394 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
395}
396#else
397#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
398 remap_pfn_range(vma, vaddr, pfn, size, prot)
399#endif
400
401#ifdef CONFIG_TRANSPARENT_HUGEPAGE
402
403extern int has_transparent_hugepage(void);
404
405static inline int pmd_trans_huge(pmd_t pmd)
406{
407 return !!(pmd_val(pmd) & _PAGE_HUGE);
408}
409
410static inline pmd_t pmd_mkhuge(pmd_t pmd)
411{
412 pmd_val(pmd) |= _PAGE_HUGE;
413
414 return pmd;
415}
416
417static inline int pmd_trans_splitting(pmd_t pmd)
418{
419 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
420}
421
422static inline pmd_t pmd_mksplitting(pmd_t pmd)
423{
424 pmd_val(pmd) |= _PAGE_SPLITTING;
425
426 return pmd;
427}
428
429extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
430 pmd_t *pmdp, pmd_t pmd);
431
432#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
433
434extern void pmdp_splitting_flush(struct vm_area_struct *vma,
435 unsigned long address,
436 pmd_t *pmdp);
437
438#define __HAVE_ARCH_PMD_WRITE
439static inline int pmd_write(pmd_t pmd)
440{
441 return !!(pmd_val(pmd) & _PAGE_WRITE);
442}
443
444static inline pmd_t pmd_wrprotect(pmd_t pmd)
445{
446 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
447 return pmd;
448}
449
450static inline pmd_t pmd_mkwrite(pmd_t pmd)
451{
452 pmd_val(pmd) |= _PAGE_WRITE;
453 if (pmd_val(pmd) & _PAGE_MODIFIED)
454 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
455
456 return pmd;
457}
458
459static inline int pmd_dirty(pmd_t pmd)
460{
461 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
462}
463
464static inline pmd_t pmd_mkclean(pmd_t pmd)
465{
466 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
467 return pmd;
468}
469
470static inline pmd_t pmd_mkdirty(pmd_t pmd)
471{
472 pmd_val(pmd) |= _PAGE_MODIFIED;
473 if (pmd_val(pmd) & _PAGE_WRITE)
474 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
475
476 return pmd;
477}
478
479static inline int pmd_young(pmd_t pmd)
480{
481 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
482}
483
484static inline pmd_t pmd_mkold(pmd_t pmd)
485{
486 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
487
488 return pmd;
489}
490
491static inline pmd_t pmd_mkyoung(pmd_t pmd)
492{
493 pmd_val(pmd) |= _PAGE_ACCESSED;
494
495 if (cpu_has_rixi) {
496 if (!(pmd_val(pmd) & _PAGE_NO_READ))
497 pmd_val(pmd) |= _PAGE_SILENT_READ;
498 } else {
499 if (pmd_val(pmd) & _PAGE_READ)
500 pmd_val(pmd) |= _PAGE_SILENT_READ;
501 }
502
503 return pmd;
504}
505
506
507extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
508
509static inline unsigned long pmd_pfn(pmd_t pmd)
510{
511 return pmd_val(pmd) >> _PFN_SHIFT;
512}
513
514static inline struct page *pmd_page(pmd_t pmd)
515{
516 if (pmd_trans_huge(pmd))
517 return pfn_to_page(pmd_pfn(pmd));
518
519 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
520}
521
522static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
523{
524 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
525 return pmd;
526}
527
528static inline pmd_t pmd_mknotpresent(pmd_t pmd)
529{
530 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
531
532 return pmd;
533}
534
535
536
537
538
539#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
540static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
541 unsigned long address, pmd_t *pmdp)
542{
543 pmd_t old = *pmdp;
544
545 pmd_clear(pmdp);
546
547 return old;
548}
549
550#endif
551
552#include <asm-generic/pgtable.h>
553
554
555
556
557#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
558#define __HAVE_PHYS_MEM_ACCESS_PROT
559
560struct file;
561pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
562 unsigned long size, pgprot_t vma_prot);
563int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
564 unsigned long size, pgprot_t *vma_prot);
565#endif
566
567
568
569
570
571#define HAVE_ARCH_UNMAPPED_AREA
572#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
573
574
575
576
577#define pgtable_cache_init() do { } while (0)
578
579#endif
580