1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/export.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/slab.h>
37#include <linux/hugetlb.h>
38
39#include <asm/pgalloc.h>
40#include <asm/page.h>
41#include <asm/prom.h>
42#include <asm/io.h>
43#include <asm/mmu_context.h>
44#include <asm/pgtable.h>
45#include <asm/mmu.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/tlb.h>
49#include <asm/processor.h>
50#include <asm/cputable.h>
51#include <asm/sections.h>
52#include <asm/firmware.h>
53#include <asm/dma.h>
54
55#include "mmu_decl.h"
56
57
58#ifdef CONFIG_PPC_BOOK3S_64
59
60
61
62struct prtb_entry *process_tb;
63struct patb_entry *partition_tb;
64
65
66
67unsigned long __pte_index_size;
68EXPORT_SYMBOL(__pte_index_size);
69unsigned long __pmd_index_size;
70EXPORT_SYMBOL(__pmd_index_size);
71unsigned long __pud_index_size;
72EXPORT_SYMBOL(__pud_index_size);
73unsigned long __pgd_index_size;
74EXPORT_SYMBOL(__pgd_index_size);
75unsigned long __pud_cache_index;
76EXPORT_SYMBOL(__pud_cache_index);
77unsigned long __pte_table_size;
78EXPORT_SYMBOL(__pte_table_size);
79unsigned long __pmd_table_size;
80EXPORT_SYMBOL(__pmd_table_size);
81unsigned long __pud_table_size;
82EXPORT_SYMBOL(__pud_table_size);
83unsigned long __pgd_table_size;
84EXPORT_SYMBOL(__pgd_table_size);
85unsigned long __pmd_val_bits;
86EXPORT_SYMBOL(__pmd_val_bits);
87unsigned long __pud_val_bits;
88EXPORT_SYMBOL(__pud_val_bits);
89unsigned long __pgd_val_bits;
90EXPORT_SYMBOL(__pgd_val_bits);
91unsigned long __kernel_virt_start;
92EXPORT_SYMBOL(__kernel_virt_start);
93unsigned long __kernel_virt_size;
94EXPORT_SYMBOL(__kernel_virt_size);
95unsigned long __vmalloc_start;
96EXPORT_SYMBOL(__vmalloc_start);
97unsigned long __vmalloc_end;
98EXPORT_SYMBOL(__vmalloc_end);
99unsigned long __kernel_io_start;
100EXPORT_SYMBOL(__kernel_io_start);
101struct page *vmemmap;
102EXPORT_SYMBOL(vmemmap);
103unsigned long __pte_frag_nr;
104EXPORT_SYMBOL(__pte_frag_nr);
105unsigned long __pte_frag_size_shift;
106EXPORT_SYMBOL(__pte_frag_size_shift);
107unsigned long ioremap_bot;
108#else
109unsigned long ioremap_bot = IOREMAP_BASE;
110#endif
111
112
113
114
115
116void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
117 unsigned long flags)
118{
119 unsigned long i;
120
121
122 if ((flags & _PAGE_PRESENT) == 0)
123 flags |= pgprot_val(PAGE_KERNEL);
124
125
126 if (flags & H_PAGE_4K_PFN)
127 return NULL;
128
129 WARN_ON(pa & ~PAGE_MASK);
130 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
131 WARN_ON(size & ~PAGE_MASK);
132
133 for (i = 0; i < size; i += PAGE_SIZE)
134 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
135 return NULL;
136
137 return (void __iomem *)ea;
138}
139
140
141
142
143
144
145
146void __iounmap_at(void *ea, unsigned long size)
147{
148 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
149 WARN_ON(size & ~PAGE_MASK);
150
151 unmap_kernel_range((unsigned long)ea, size);
152}
153
154void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
155 unsigned long flags, void *caller)
156{
157 phys_addr_t paligned;
158 void __iomem *ret;
159
160
161
162
163
164
165
166
167
168
169 paligned = addr & PAGE_MASK;
170 size = PAGE_ALIGN(addr + size) - paligned;
171
172 if ((size == 0) || (paligned == 0))
173 return NULL;
174
175 if (slab_is_available()) {
176 struct vm_struct *area;
177
178 area = __get_vm_area_caller(size, VM_IOREMAP,
179 ioremap_bot, IOREMAP_END,
180 caller);
181 if (area == NULL)
182 return NULL;
183
184 area->phys_addr = paligned;
185 ret = __ioremap_at(paligned, area->addr, size, flags);
186 if (!ret)
187 vunmap(area->addr);
188 } else {
189 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
190 if (ret)
191 ioremap_bot += size;
192 }
193
194 if (ret)
195 ret += addr & ~PAGE_MASK;
196 return ret;
197}
198
199void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
200 unsigned long flags)
201{
202 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
203}
204
205void __iomem * ioremap(phys_addr_t addr, unsigned long size)
206{
207 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
208 void *caller = __builtin_return_address(0);
209
210 if (ppc_md.ioremap)
211 return ppc_md.ioremap(addr, size, flags, caller);
212 return __ioremap_caller(addr, size, flags, caller);
213}
214
215void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
216{
217 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
218 void *caller = __builtin_return_address(0);
219
220 if (ppc_md.ioremap)
221 return ppc_md.ioremap(addr, size, flags, caller);
222 return __ioremap_caller(addr, size, flags, caller);
223}
224
225void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
226 unsigned long flags)
227{
228 void *caller = __builtin_return_address(0);
229
230
231 if (flags & _PAGE_WRITE)
232 flags |= _PAGE_DIRTY;
233
234
235 flags &= ~_PAGE_EXEC;
236
237
238
239 flags &= ~_PAGE_USER;
240 flags |= _PAGE_PRIVILEGED;
241
242 if (ppc_md.ioremap)
243 return ppc_md.ioremap(addr, size, flags, caller);
244 return __ioremap_caller(addr, size, flags, caller);
245}
246
247
248
249
250
251
252void __iounmap(volatile void __iomem *token)
253{
254 void *addr;
255
256 if (!slab_is_available())
257 return;
258
259 addr = (void *) ((unsigned long __force)
260 PCI_FIX_ADDR(token) & PAGE_MASK);
261 if ((unsigned long)addr < ioremap_bot) {
262 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
263 " at 0x%p\n", addr);
264 return;
265 }
266 vunmap(addr);
267}
268
269void iounmap(volatile void __iomem *token)
270{
271 if (ppc_md.iounmap)
272 ppc_md.iounmap(token);
273 else
274 __iounmap(token);
275}
276
277EXPORT_SYMBOL(ioremap);
278EXPORT_SYMBOL(ioremap_wc);
279EXPORT_SYMBOL(ioremap_prot);
280EXPORT_SYMBOL(__ioremap);
281EXPORT_SYMBOL(__ioremap_at);
282EXPORT_SYMBOL(iounmap);
283EXPORT_SYMBOL(__iounmap);
284EXPORT_SYMBOL(__iounmap_at);
285
286#ifndef __PAGETABLE_PUD_FOLDED
287
288struct page *pgd_page(pgd_t pgd)
289{
290 if (pgd_huge(pgd))
291 return pte_page(pgd_pte(pgd));
292 return virt_to_page(pgd_page_vaddr(pgd));
293}
294#endif
295
296struct page *pud_page(pud_t pud)
297{
298 if (pud_huge(pud))
299 return pte_page(pud_pte(pud));
300 return virt_to_page(pud_page_vaddr(pud));
301}
302
303
304
305
306
307struct page *pmd_page(pmd_t pmd)
308{
309 if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
310 return pte_page(pmd_pte(pmd));
311 return virt_to_page(pmd_page_vaddr(pmd));
312}
313
314#ifdef CONFIG_STRICT_KERNEL_RWX
315void mark_rodata_ro(void)
316{
317 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
318 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
319 return;
320 }
321
322 if (radix_enabled())
323 radix__mark_rodata_ro();
324 else
325 hash__mark_rodata_ro();
326}
327
328void mark_initmem_nx(void)
329{
330 if (radix_enabled())
331 radix__mark_initmem_nx();
332 else
333 hash__mark_initmem_nx();
334}
335#endif
336