1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/errno.h>
26#include <linux/mm.h>
27#include <linux/vmalloc.h>
28#include <linux/io.h>
29#include <linux/sizes.h>
30
31#include <asm/cp15.h>
32#include <asm/cputype.h>
33#include <asm/cacheflush.h>
34#include <asm/early_ioremap.h>
35#include <asm/mmu_context.h>
36#include <asm/pgalloc.h>
37#include <asm/tlbflush.h>
38#include <asm/system_info.h>
39
40#include <asm/mach/map.h>
41#include <asm/mach/pci.h>
42#include "mm.h"
43
44
45LIST_HEAD(static_vmlist);
46
47static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
48 size_t size, unsigned int mtype)
49{
50 struct static_vm *svm;
51 struct vm_struct *vm;
52
53 list_for_each_entry(svm, &static_vmlist, list) {
54 vm = &svm->vm;
55 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
56 continue;
57 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
58 continue;
59
60 if (vm->phys_addr > paddr ||
61 paddr + size - 1 > vm->phys_addr + vm->size - 1)
62 continue;
63
64 return svm;
65 }
66
67 return NULL;
68}
69
70struct static_vm *find_static_vm_vaddr(void *vaddr)
71{
72 struct static_vm *svm;
73 struct vm_struct *vm;
74
75 list_for_each_entry(svm, &static_vmlist, list) {
76 vm = &svm->vm;
77
78
79 if (vm->addr > vaddr)
80 break;
81
82 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
83 return svm;
84 }
85
86 return NULL;
87}
88
89void __init add_static_vm_early(struct static_vm *svm)
90{
91 struct static_vm *curr_svm;
92 struct vm_struct *vm;
93 void *vaddr;
94
95 vm = &svm->vm;
96 vm_area_add_early(vm);
97 vaddr = vm->addr;
98
99 list_for_each_entry(curr_svm, &static_vmlist, list) {
100 vm = &curr_svm->vm;
101
102 if (vm->addr > vaddr)
103 break;
104 }
105 list_add_tail(&svm->list, &curr_svm->list);
106}
107
108int ioremap_page(unsigned long virt, unsigned long phys,
109 const struct mem_type *mtype)
110{
111 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
112 __pgprot(mtype->prot_pte));
113}
114EXPORT_SYMBOL(ioremap_page);
115
116void __check_vmalloc_seq(struct mm_struct *mm)
117{
118 unsigned int seq;
119
120 do {
121 seq = init_mm.context.vmalloc_seq;
122 memcpy(pgd_offset(mm, VMALLOC_START),
123 pgd_offset_k(VMALLOC_START),
124 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
125 pgd_index(VMALLOC_START)));
126 mm->context.vmalloc_seq = seq;
127 } while (seq != init_mm.context.vmalloc_seq);
128}
129
130#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
131
132
133
134
135
136
137
138
139
140
141static void unmap_area_sections(unsigned long virt, unsigned long size)
142{
143 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
144 pmd_t *pmdp = pmd_off_k(addr);
145
146 do {
147 pmd_t pmd = *pmdp;
148
149 if (!pmd_none(pmd)) {
150
151
152
153
154
155
156
157 pmd_clear(pmdp);
158 init_mm.context.vmalloc_seq++;
159
160
161
162
163 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
164 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
165 }
166
167 addr += PMD_SIZE;
168 pmdp += 2;
169 } while (addr < end);
170
171
172
173
174
175 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
176 __check_vmalloc_seq(current->active_mm);
177
178 flush_tlb_kernel_range(virt, end);
179}
180
181static int
182remap_area_sections(unsigned long virt, unsigned long pfn,
183 size_t size, const struct mem_type *type)
184{
185 unsigned long addr = virt, end = virt + size;
186 pmd_t *pmd = pmd_off_k(addr);
187
188
189
190
191
192 unmap_area_sections(virt, size);
193
194 do {
195 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
196 pfn += SZ_1M >> PAGE_SHIFT;
197 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
198 pfn += SZ_1M >> PAGE_SHIFT;
199 flush_pmd_entry(pmd);
200
201 addr += PMD_SIZE;
202 pmd += 2;
203 } while (addr < end);
204
205 return 0;
206}
207
208static int
209remap_area_supersections(unsigned long virt, unsigned long pfn,
210 size_t size, const struct mem_type *type)
211{
212 unsigned long addr = virt, end = virt + size;
213 pmd_t *pmd = pmd_off_k(addr);
214
215
216
217
218
219 unmap_area_sections(virt, size);
220 do {
221 unsigned long super_pmd_val, i;
222
223 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
224 PMD_SECT_SUPER;
225 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
226
227 for (i = 0; i < 8; i++) {
228 pmd[0] = __pmd(super_pmd_val);
229 pmd[1] = __pmd(super_pmd_val);
230 flush_pmd_entry(pmd);
231
232 addr += PMD_SIZE;
233 pmd += 2;
234 }
235
236 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
237 } while (addr < end);
238
239 return 0;
240}
241#endif
242
243static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
244 unsigned long offset, size_t size, unsigned int mtype, void *caller)
245{
246 const struct mem_type *type;
247 int err;
248 unsigned long addr;
249 struct vm_struct *area;
250 phys_addr_t paddr = __pfn_to_phys(pfn);
251
252#ifndef CONFIG_ARM_LPAE
253
254
255
256 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
257 return NULL;
258#endif
259
260 type = get_mem_type(mtype);
261 if (!type)
262 return NULL;
263
264
265
266
267 size = PAGE_ALIGN(offset + size);
268
269
270
271
272 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
273 struct static_vm *svm;
274
275 svm = find_static_vm_paddr(paddr, size, mtype);
276 if (svm) {
277 addr = (unsigned long)svm->vm.addr;
278 addr += paddr - svm->vm.phys_addr;
279 return (void __iomem *) (offset + addr);
280 }
281 }
282
283
284
285
286
287 if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
288 return NULL;
289
290 area = get_vm_area_caller(size, VM_IOREMAP, caller);
291 if (!area)
292 return NULL;
293 addr = (unsigned long)area->addr;
294 area->phys_addr = paddr;
295
296#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
297 if (DOMAIN_IO == 0 &&
298 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
299 cpu_is_xsc3()) && pfn >= 0x100000 &&
300 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
301 area->flags |= VM_ARM_SECTION_MAPPING;
302 err = remap_area_supersections(addr, pfn, size, type);
303 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
304 area->flags |= VM_ARM_SECTION_MAPPING;
305 err = remap_area_sections(addr, pfn, size, type);
306 } else
307#endif
308 err = ioremap_page_range(addr, addr + size, paddr,
309 __pgprot(type->prot_pte));
310
311 if (err) {
312 vunmap((void *)addr);
313 return NULL;
314 }
315
316 flush_cache_vmap(addr, addr + size);
317 return (void __iomem *) (offset + addr);
318}
319
320void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
321 unsigned int mtype, void *caller)
322{
323 phys_addr_t last_addr;
324 unsigned long offset = phys_addr & ~PAGE_MASK;
325 unsigned long pfn = __phys_to_pfn(phys_addr);
326
327
328
329
330 last_addr = phys_addr + size - 1;
331 if (!size || last_addr < phys_addr)
332 return NULL;
333
334 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
335 caller);
336}
337
338
339
340
341
342
343
344
345
346
347void __iomem *
348__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
349 unsigned int mtype)
350{
351 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
352 __builtin_return_address(0));
353}
354EXPORT_SYMBOL(__arm_ioremap_pfn);
355
356void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
357 unsigned int, void *) =
358 __arm_ioremap_caller;
359
360void __iomem *ioremap(resource_size_t res_cookie, size_t size)
361{
362 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
363 __builtin_return_address(0));
364}
365EXPORT_SYMBOL(ioremap);
366
367void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
368{
369 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
370 __builtin_return_address(0));
371}
372EXPORT_SYMBOL(ioremap_cache);
373
374void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
375{
376 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap_wc);
380
381
382
383
384
385
386
387
388void __iomem *
389__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
390{
391 unsigned int mtype;
392
393 if (cached)
394 mtype = MT_MEMORY_RWX;
395 else
396 mtype = MT_MEMORY_RWX_NONCACHED;
397
398 return __arm_ioremap_caller(phys_addr, size, mtype,
399 __builtin_return_address(0));
400}
401
402void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
403{
404 return (__force void *)arch_ioremap_caller(phys_addr, size,
405 MT_MEMORY_RW,
406 __builtin_return_address(0));
407}
408
409void __iounmap(volatile void __iomem *io_addr)
410{
411 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
412 struct static_vm *svm;
413
414
415 svm = find_static_vm_vaddr(addr);
416 if (svm)
417 return;
418
419#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
420 {
421 struct vm_struct *vm;
422
423 vm = find_vm_area(addr);
424
425
426
427
428
429
430 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
431 unmap_area_sections((unsigned long)vm->addr, vm->size);
432 }
433#endif
434
435 vunmap(addr);
436}
437
438void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
439
440void iounmap(volatile void __iomem *cookie)
441{
442 arch_iounmap(cookie);
443}
444EXPORT_SYMBOL(iounmap);
445
446#ifdef CONFIG_PCI
447static int pci_ioremap_mem_type = MT_DEVICE;
448
449void pci_ioremap_set_mem_type(int mem_type)
450{
451 pci_ioremap_mem_type = mem_type;
452}
453
454int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
455{
456 BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
457
458 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
459 PCI_IO_VIRT_BASE + offset + SZ_64K,
460 phys_addr,
461 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
462}
463EXPORT_SYMBOL_GPL(pci_ioremap_io);
464
465void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
466{
467 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
468 __builtin_return_address(0));
469}
470EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
471#endif
472
473
474
475
476void __init early_ioremap_init(void)
477{
478 early_ioremap_setup();
479}
480