1#ifndef _ASM_X86_PGTABLE_32_H
2#define _ASM_X86_PGTABLE_32_H
3
4#include <asm/pgtable_32_types.h>
5
6
7
8
9
10
11
12
13
14
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#include <asm/fixmap.h>
18#include <linux/threads.h>
19#include <asm/paravirt.h>
20
21#include <linux/bitops.h>
22#include <linux/slab.h>
23#include <linux/list.h>
24#include <linux/spinlock.h>
25
26struct mm_struct;
27struct vm_area_struct;
28
29extern pgd_t swapper_pg_dir[1024];
30
31static inline void pgtable_cache_init(void) { }
32static inline void check_pgt_cache(void) { }
33void paging_init(void);
34
35extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
36
37
38
39
40
41
42
43#undef TEST_ACCESS_OK
44
45#ifdef CONFIG_X86_PAE
46# include <asm/pgtable-3level.h>
47#else
48# include <asm/pgtable-2level.h>
49#endif
50
51#if defined(CONFIG_HIGHPTE)
52#define __KM_PTE \
53 (in_nmi() ? KM_NMI_PTE : \
54 in_irq() ? KM_IRQ_PTE : \
55 KM_PTE0)
56#define pte_offset_map(dir, address) \
57 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
58 pte_index((address)))
59#define pte_offset_map_nested(dir, address) \
60 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
61 pte_index((address)))
62#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
63#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
64#else
65#define pte_offset_map(dir, address) \
66 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
67#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
68#define pte_unmap(pte) do { } while (0)
69#define pte_unmap_nested(pte) do { } while (0)
70#endif
71
72
73#define kpte_clear_flush(ptep, vaddr) \
74do { \
75 pte_clear(&init_mm, (vaddr), (ptep)); \
76 __flush_tlb_one((vaddr)); \
77} while (0)
78
79
80
81
82
83#define update_mmu_cache(vma, address, pte) do { } while (0)
84
85#endif
86
87
88
89
90
91#ifdef CONFIG_FLATMEM
92#define kern_addr_valid(addr) (1)
93#else
94#define kern_addr_valid(kaddr) (0)
95#endif
96
97#endif
98