1#ifndef _ASM_PGALLOC_H
2#define _ASM_PGALLOC_H
3
4#include <linux/gfp.h>
5#include <linux/mm.h>
6#include <linux/threads.h>
7#include <asm/processor.h>
8#include <asm/fixmap.h>
9
10#include <asm/cache.h>
11
12
13
14
15
16
17
18
19
20
21static inline pgd_t *pgd_alloc(struct mm_struct *mm)
22{
23 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
24 PGD_ALLOC_ORDER);
25 pgd_t *actual_pgd = pgd;
26
27 if (likely(pgd != NULL)) {
28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#if CONFIG_PGTABLE_LEVELS == 3
30 actual_pgd += PTRS_PER_PGD;
31
32
33
34 __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
35 PxD_FLAG_VALID |
36 PxD_FLAG_ATTACHED)
37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38
39
40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif
42 }
43 return actual_pgd;
44}
45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{
48#if CONFIG_PGTABLE_LEVELS == 3
49 pgd -= PTRS_PER_PGD;
50#endif
51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
52}
53
54#if CONFIG_PGTABLE_LEVELS == 3
55
56
57
58static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
59{
60 __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
61 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
62}
63
64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
65{
66 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
67 if (pmd)
68 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
69 return pmd;
70}
71
72static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
73{
74 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
75
76
77
78
79
80
81 mm_inc_nr_pmds(mm);
82 return;
83 }
84 free_pages((unsigned long)pmd, PMD_ORDER);
85}
86
87#else
88
89
90
91
92
93
94
95
96#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
97#define pmd_free(mm, x) do { } while (0)
98#define pgd_populate(mm, pmd, pte) BUG()
99
100#endif
101
102static inline void
103pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
104{
105#if CONFIG_PGTABLE_LEVELS == 3
106
107
108 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
109 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
110 PxD_FLAG_VALID |
111 PxD_FLAG_ATTACHED)
112 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
113 else
114#endif
115 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
116 + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
117}
118
119#define pmd_populate(mm, pmd, pte_page) \
120 pmd_populate_kernel(mm, pmd, page_address(pte_page))
121#define pmd_pgtable(pmd) pmd_page(pmd)
122
123static inline pgtable_t
124pte_alloc_one(struct mm_struct *mm, unsigned long address)
125{
126 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
127 if (!page)
128 return NULL;
129 if (!pgtable_page_ctor(page)) {
130 __free_page(page);
131 return NULL;
132 }
133 return page;
134}
135
136static inline pte_t *
137pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
138{
139 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
140 return pte;
141}
142
143static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
144{
145 free_page((unsigned long)pte);
146}
147
148static inline void pte_free(struct mm_struct *mm, struct page *pte)
149{
150 pgtable_page_dtor(pte);
151 pte_free_kernel(mm, page_address(pte));
152}
153
154#define check_pgt_cache() do { } while (0)
155
156#endif
157