1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
27
28#include <asm/cacheflush.h>
29#include <asm/io.h>
30#include <asm/mmu_context.h>
31#include <asm/pgalloc.h>
32#include <asm/tlbflush.h>
33#include <asm/sizes.h>
34
35#include <asm/mach/map.h>
36#include "mm.h"
37
38
39
40
41
42#define VM_ARM_SECTION_MAPPING 0x80000000
43
44static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
45 unsigned long phys_addr, const struct mem_type *type)
46{
47 pgprot_t prot = __pgprot(type->prot_pte);
48 pte_t *pte;
49
50 pte = pte_alloc_kernel(pmd, addr);
51 if (!pte)
52 return -ENOMEM;
53
54 do {
55 if (!pte_none(*pte))
56 goto bad;
57
58 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
59 type->prot_pte_ext);
60 phys_addr += PAGE_SIZE;
61 } while (pte++, addr += PAGE_SIZE, addr != end);
62 return 0;
63
64 bad:
65 printk(KERN_CRIT "remap_area_pte: page already exists\n");
66 BUG();
67}
68
69static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
70 unsigned long end, unsigned long phys_addr,
71 const struct mem_type *type)
72{
73 unsigned long next;
74 pmd_t *pmd;
75 int ret = 0;
76
77 pmd = pmd_alloc(&init_mm, pgd, addr);
78 if (!pmd)
79 return -ENOMEM;
80
81 do {
82 next = pmd_addr_end(addr, end);
83 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
84 if (ret)
85 return ret;
86 phys_addr += next - addr;
87 } while (pmd++, addr = next, addr != end);
88 return ret;
89}
90
91static int remap_area_pages(unsigned long start, unsigned long pfn,
92 size_t size, const struct mem_type *type)
93{
94 unsigned long addr = start;
95 unsigned long next, end = start + size;
96 unsigned long phys_addr = __pfn_to_phys(pfn);
97 pgd_t *pgd;
98 int err = 0;
99
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
102 do {
103 next = pgd_addr_end(addr, end);
104 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
105 if (err)
106 break;
107 phys_addr += next - addr;
108 } while (pgd++, addr = next, addr != end);
109
110 return err;
111}
112
113
114void __check_kvm_seq(struct mm_struct *mm)
115{
116 unsigned int seq;
117
118 do {
119 seq = init_mm.context.kvm_seq;
120 memcpy(pgd_offset(mm, VMALLOC_START),
121 pgd_offset_k(VMALLOC_START),
122 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
123 pgd_index(VMALLOC_START)));
124 mm->context.kvm_seq = seq;
125 } while (seq != init_mm.context.kvm_seq);
126}
127
128#ifndef CONFIG_SMP
129
130
131
132
133
134
135
136
137
138
139static void unmap_area_sections(unsigned long virt, unsigned long size)
140{
141 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
142 pgd_t *pgd;
143
144 flush_cache_vunmap(addr, end);
145 pgd = pgd_offset_k(addr);
146 do {
147 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
148
149 pmd = *pmdp;
150 if (!pmd_none(pmd)) {
151
152
153
154
155
156
157
158 pmd_clear(pmdp);
159 init_mm.context.kvm_seq++;
160
161
162
163
164 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
165 pte_free_kernel(pmd_page_vaddr(pmd));
166 }
167
168 addr += PGDIR_SIZE;
169 pgd++;
170 } while (addr < end);
171
172
173
174
175
176 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
177 __check_kvm_seq(current->active_mm);
178
179 flush_tlb_kernel_range(virt, end);
180}
181
182static int
183remap_area_sections(unsigned long virt, unsigned long pfn,
184 size_t size, const struct mem_type *type)
185{
186 unsigned long addr = virt, end = virt + size;
187 pgd_t *pgd;
188
189
190
191
192
193 unmap_area_sections(virt, size);
194
195 pgd = pgd_offset_k(addr);
196 do {
197 pmd_t *pmd = pmd_offset(pgd, addr);
198
199 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
200 pfn += SZ_1M >> PAGE_SHIFT;
201 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
202 pfn += SZ_1M >> PAGE_SHIFT;
203 flush_pmd_entry(pmd);
204
205 addr += PGDIR_SIZE;
206 pgd++;
207 } while (addr < end);
208
209 return 0;
210}
211
212static int
213remap_area_supersections(unsigned long virt, unsigned long pfn,
214 size_t size, const struct mem_type *type)
215{
216 unsigned long addr = virt, end = virt + size;
217 pgd_t *pgd;
218
219
220
221
222
223 unmap_area_sections(virt, size);
224
225 pgd = pgd_offset_k(virt);
226 do {
227 unsigned long super_pmd_val, i;
228
229 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
230 PMD_SECT_SUPER;
231 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
232
233 for (i = 0; i < 8; i++) {
234 pmd_t *pmd = pmd_offset(pgd, addr);
235
236 pmd[0] = __pmd(super_pmd_val);
237 pmd[1] = __pmd(super_pmd_val);
238 flush_pmd_entry(pmd);
239
240 addr += PGDIR_SIZE;
241 pgd++;
242 }
243
244 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
245 } while (addr < end);
246
247 return 0;
248}
249#endif
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264void __iomem *
265__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
266 unsigned int mtype)
267{
268 const struct mem_type *type;
269 int err;
270 unsigned long addr;
271 struct vm_struct * area;
272
273
274
275
276 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
277 return NULL;
278
279 type = get_mem_type(mtype);
280 if (!type)
281 return NULL;
282
283
284
285
286 size = PAGE_ALIGN(offset + size);
287
288 area = get_vm_area(size, VM_IOREMAP);
289 if (!area)
290 return NULL;
291 addr = (unsigned long)area->addr;
292
293#ifndef CONFIG_SMP
294 if (DOMAIN_IO == 0 &&
295 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
296 cpu_is_xsc3()) && pfn >= 0x100000 &&
297 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
298 area->flags |= VM_ARM_SECTION_MAPPING;
299 err = remap_area_supersections(addr, pfn, size, type);
300 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
301 area->flags |= VM_ARM_SECTION_MAPPING;
302 err = remap_area_sections(addr, pfn, size, type);
303 } else
304#endif
305 err = remap_area_pages(addr, pfn, size, type);
306
307 if (err) {
308 vunmap((void *)addr);
309 return NULL;
310 }
311
312 flush_cache_vmap(addr, addr + size);
313 return (void __iomem *) (offset + addr);
314}
315EXPORT_SYMBOL(__arm_ioremap_pfn);
316
317void __iomem *
318__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
319{
320 unsigned long last_addr;
321 unsigned long offset = phys_addr & ~PAGE_MASK;
322 unsigned long pfn = __phys_to_pfn(phys_addr);
323
324
325
326
327 last_addr = phys_addr + size - 1;
328 if (!size || last_addr < phys_addr)
329 return NULL;
330
331 return __arm_ioremap_pfn(pfn, offset, size, mtype);
332}
333EXPORT_SYMBOL(__arm_ioremap);
334
335void __iounmap(volatile void __iomem *addr)
336{
337#ifndef CONFIG_SMP
338 struct vm_struct **p, *tmp;
339#endif
340 unsigned int section_mapping = 0;
341
342 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
343
344#ifndef CONFIG_SMP
345
346
347
348
349
350
351
352 write_lock(&vmlist_lock);
353 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
354 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
355 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
356 *p = tmp->next;
357 unmap_area_sections((unsigned long)tmp->addr,
358 tmp->size);
359 kfree(tmp);
360 section_mapping = 1;
361 }
362 break;
363 }
364 }
365 write_unlock(&vmlist_lock);
366#endif
367
368 if (!section_mapping)
369 vunmap((void __force *)addr);
370}
371EXPORT_SYMBOL(__iounmap);
372