1#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
2#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3
4
5
6
7
8
9
10#include <linux/slab.h>
11#include <linux/cpumask.h>
12#include <linux/kmemleak.h>
13#include <linux/percpu.h>
14
15struct vmemmap_backing {
16 struct vmemmap_backing *list;
17 unsigned long phys;
18 unsigned long virt_addr;
19};
20extern struct vmemmap_backing *vmemmap_list;
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#define MAX_PGTABLE_INDEX_SIZE 0xf
38
39extern struct kmem_cache *pgtable_cache[];
40#define PGT_CACHE(shift) ({ \
41 BUG_ON(!(shift)); \
42 pgtable_cache[(shift) - 1]; \
43 })
44
45extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
46extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
47extern void pte_fragment_free(unsigned long *, int);
48extern void pmd_fragment_free(unsigned long *);
49extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
50extern void __tlb_remove_table(void *_table);
51
52static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
53{
54#ifdef CONFIG_PPC_64K_PAGES
55 return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
56#else
57 struct page *page;
58 page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
59 4);
60 if (!page)
61 return NULL;
62 return (pgd_t *) page_address(page);
63#endif
64}
65
66static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
67{
68#ifdef CONFIG_PPC_64K_PAGES
69 free_page((unsigned long)pgd);
70#else
71 free_pages((unsigned long)pgd, 4);
72#endif
73}
74
75static inline pgd_t *pgd_alloc(struct mm_struct *mm)
76{
77 pgd_t *pgd;
78
79 if (radix_enabled())
80 return radix__pgd_alloc(mm);
81
82 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
83 pgtable_gfp_flags(mm, GFP_KERNEL));
84 if (unlikely(!pgd))
85 return pgd;
86
87
88
89
90
91
92 kmemleak_no_scan(pgd);
93
94
95
96
97
98
99
100
101#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
102 (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
103 memset(pgd, 0, PGD_TABLE_SIZE);
104#endif
105 return pgd;
106}
107
108static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
109{
110 if (radix_enabled())
111 return radix__pgd_free(mm, pgd);
112 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
113}
114
115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
116{
117 pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
118}
119
120static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
121{
122 pud_t *pud;
123
124 pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
125 pgtable_gfp_flags(mm, GFP_KERNEL));
126
127
128
129
130
131
132 kmemleak_ignore(pud);
133
134 return pud;
135}
136
137static inline void __pud_free(pud_t *pud)
138{
139 struct page *page = virt_to_page(pud);
140
141
142
143
144
145 if (PageReserved(page))
146 free_reserved_page(page);
147 else
148 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
149}
150
151static inline void pud_free(struct mm_struct *mm, pud_t *pud)
152{
153 return __pud_free(pud);
154}
155
156static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
157{
158 pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
159}
160
161static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
162 unsigned long address)
163{
164
165
166
167
168 flush_tlb_pgtable(tlb, address);
169 pgtable_free_tlb(tlb, pud, PUD_INDEX);
170}
171
172static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
173{
174 return pmd_fragment_alloc(mm, addr);
175}
176
177static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
178{
179 pmd_fragment_free((unsigned long *)pmd);
180}
181
182static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
183 unsigned long address)
184{
185
186
187
188
189 flush_tlb_pgtable(tlb, address);
190 return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
191}
192
193static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
194 pte_t *pte)
195{
196 pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
197}
198
199static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
200 pgtable_t pte_page)
201{
202 pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
203}
204
205static inline pgtable_t pmd_pgtable(pmd_t pmd)
206{
207 return (pgtable_t)pmd_page_vaddr(pmd);
208}
209
210static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
211{
212 return (pte_t *)pte_fragment_alloc(mm, 1);
213}
214
215static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
216{
217 return (pgtable_t)pte_fragment_alloc(mm, 0);
218}
219
220static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
221{
222 pte_fragment_free((unsigned long *)pte, 1);
223}
224
225static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
226{
227 pte_fragment_free((unsigned long *)ptepage, 0);
228}
229
230static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
231 unsigned long address)
232{
233
234
235
236
237 flush_tlb_pgtable(tlb, address);
238 pgtable_free_tlb(tlb, table, PTE_INDEX);
239}
240
241#define check_pgt_cache() do { } while (0)
242
243#endif
244