1#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
2#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3
4
5
6
7
8
9
10#include <linux/slab.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13
14struct vmemmap_backing {
15 struct vmemmap_backing *list;
16 unsigned long phys;
17 unsigned long virt_addr;
18};
19extern struct vmemmap_backing *vmemmap_list;
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#define MAX_PGTABLE_INDEX_SIZE 0xf
37
38extern struct kmem_cache *pgtable_cache[];
39#define PGT_CACHE(shift) ({ \
40 BUG_ON(!(shift)); \
41 pgtable_cache[(shift) - 1]; \
42 })
43
44extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
45extern void pte_fragment_free(unsigned long *, int);
46extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
47#ifdef CONFIG_SMP
48extern void __tlb_remove_table(void *_table);
49#endif
50
51static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
52{
53#ifdef CONFIG_PPC_64K_PAGES
54 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
55#else
56 struct page *page;
57 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
58 4);
59 if (!page)
60 return NULL;
61 return (pgd_t *) page_address(page);
62#endif
63}
64
65static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
66{
67#ifdef CONFIG_PPC_64K_PAGES
68 free_page((unsigned long)pgd);
69#else
70 free_pages((unsigned long)pgd, 4);
71#endif
72}
73
74static inline pgd_t *pgd_alloc(struct mm_struct *mm)
75{
76 if (radix_enabled())
77 return radix__pgd_alloc(mm);
78 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
79 pgtable_gfp_flags(mm, GFP_KERNEL));
80}
81
82static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
83{
84 if (radix_enabled())
85 return radix__pgd_free(mm, pgd);
86 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
87}
88
89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
90{
91 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
92}
93
94static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95{
96 return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
97 pgtable_gfp_flags(mm, GFP_KERNEL));
98}
99
100static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101{
102 kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
103}
104
105static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
106{
107 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
108}
109
110static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
111 unsigned long address)
112{
113
114
115
116
117 flush_tlb_pgtable(tlb, address);
118 pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
119}
120
121static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
122{
123 return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
124 pgtable_gfp_flags(mm, GFP_KERNEL));
125}
126
127static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
128{
129 kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
130}
131
132static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
133 unsigned long address)
134{
135
136
137
138
139 flush_tlb_pgtable(tlb, address);
140 return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
141}
142
143static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
144 pte_t *pte)
145{
146 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
147}
148
149static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
150 pgtable_t pte_page)
151{
152 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
153}
154
155static inline pgtable_t pmd_pgtable(pmd_t pmd)
156{
157 return (pgtable_t)pmd_page_vaddr(pmd);
158}
159
160#ifdef CONFIG_PPC_4K_PAGES
161static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
162 unsigned long address)
163{
164 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
165}
166
167static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
168 unsigned long address)
169{
170 struct page *page;
171 pte_t *pte;
172
173 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
174 if (!pte)
175 return NULL;
176 page = virt_to_page(pte);
177 if (!pgtable_page_ctor(page)) {
178 __free_page(page);
179 return NULL;
180 }
181 return pte;
182}
183#else
184
185static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
186 unsigned long address)
187{
188 return (pte_t *)pte_fragment_alloc(mm, address, 1);
189}
190
191static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
192 unsigned long address)
193{
194 return (pgtable_t)pte_fragment_alloc(mm, address, 0);
195}
196#endif
197
198static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
199{
200 pte_fragment_free((unsigned long *)pte, 1);
201}
202
203static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
204{
205 pte_fragment_free((unsigned long *)ptepage, 0);
206}
207
208static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
209 unsigned long address)
210{
211
212
213
214
215 flush_tlb_pgtable(tlb, address);
216 pgtable_free_tlb(tlb, table, 0);
217}
218
219#define check_pgt_cache() do { } while (0)
220
221#endif
222