1
2#ifndef _ASM_PGALLOC_H
3#define _ASM_PGALLOC_H
4
5#include <linux/gfp.h>
6#include <linux/mm.h>
7#include <linux/threads.h>
8#include <asm/processor.h>
9#include <asm/fixmap.h>
10
11#include <asm/cache.h>
12
13
14
15
16
17
18
19
20
21
22static inline pgd_t *pgd_alloc(struct mm_struct *mm)
23{
24 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
25 PGD_ALLOC_ORDER);
26 pgd_t *actual_pgd = pgd;
27
28 if (likely(pgd != NULL)) {
29 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
30#if CONFIG_PGTABLE_LEVELS == 3
31 actual_pgd += PTRS_PER_PGD;
32
33
34
35 __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
36 PxD_FLAG_VALID |
37 PxD_FLAG_ATTACHED)
38 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
39
40
41 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
42#endif
43 }
44 return actual_pgd;
45}
46
47static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
48{
49#if CONFIG_PGTABLE_LEVELS == 3
50 pgd -= PTRS_PER_PGD;
51#endif
52 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
53}
54
55#if CONFIG_PGTABLE_LEVELS == 3
56
57
58
59static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
60{
61 __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
62 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
63}
64
65static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
66{
67 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
68 if (pmd)
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
70 return pmd;
71}
72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{
75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76
77
78
79
80
81
82 mm_inc_nr_pmds(mm);
83 return;
84 }
85 free_pages((unsigned long)pmd, PMD_ORDER);
86}
87
88#else
89
90
91
92
93
94
95
96
97#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
98#define pmd_free(mm, x) do { } while (0)
99#define pgd_populate(mm, pmd, pte) BUG()
100
101#endif
102
103static inline void
104pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
105{
106#if CONFIG_PGTABLE_LEVELS == 3
107
108
109 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
110 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
111 PxD_FLAG_VALID |
112 PxD_FLAG_ATTACHED)
113 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
114 else
115#endif
116 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
117 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
118}
119
120#define pmd_populate(mm, pmd, pte_page) \
121 pmd_populate_kernel(mm, pmd, page_address(pte_page))
122#define pmd_pgtable(pmd) pmd_page(pmd)
123
124static inline pgtable_t
125pte_alloc_one(struct mm_struct *mm, unsigned long address)
126{
127 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
128 if (!page)
129 return NULL;
130 if (!pgtable_page_ctor(page)) {
131 __free_page(page);
132 return NULL;
133 }
134 return page;
135}
136
137static inline pte_t *
138pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139{
140 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
141 return pte;
142}
143
144static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
145{
146 free_page((unsigned long)pte);
147}
148
149static inline void pte_free(struct mm_struct *mm, struct page *pte)
150{
151 pgtable_page_dtor(pte);
152 pte_free_kernel(mm, page_address(pte));
153}
154
155#define check_pgt_cache() do { } while (0)
156
157#endif
158