1
2
3
4
5
6
7
8
9#include <linux/bootmem.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mmiotrace.h>
16
17#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
22#include <asm/pgalloc.h>
23#include <asm/pat.h>
24
25#include "physaddr.h"
26
27
28
29
30
31int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 enum page_cache_mode pcm)
33{
34 unsigned long nrpages = size >> PAGE_SHIFT;
35 int err;
36
37 switch (pcm) {
38 case _PAGE_CACHE_MODE_UC:
39 default:
40 err = _set_memory_uc(vaddr, nrpages);
41 break;
42 case _PAGE_CACHE_MODE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
45 case _PAGE_CACHE_MODE_WT:
46 err = _set_memory_wt(vaddr, nrpages);
47 break;
48 case _PAGE_CACHE_MODE_WB:
49 err = _set_memory_wb(vaddr, nrpages);
50 break;
51 }
52
53 return err;
54}
55
56static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
57 void *arg)
58{
59 unsigned long i;
60
61 for (i = 0; i < nr_pages; ++i)
62 if (pfn_valid(start_pfn + i) &&
63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1;
65
66 return 0;
67}
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83static void __iomem *__ioremap_caller(resource_size_t phys_addr,
84 unsigned long size, enum page_cache_mode pcm, void *caller)
85{
86 unsigned long offset, vaddr;
87 resource_size_t pfn, last_pfn, last_addr;
88 const resource_size_t unaligned_phys_addr = phys_addr;
89 const unsigned long unaligned_size = size;
90 struct vm_struct *area;
91 enum page_cache_mode new_pcm;
92 pgprot_t prot;
93 int retval;
94 void __iomem *ret_addr;
95
96
97 last_addr = phys_addr + size - 1;
98 if (!size || last_addr < phys_addr)
99 return NULL;
100
101 if (!phys_addr_valid(phys_addr)) {
102 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
103 (unsigned long long)phys_addr);
104 WARN_ON_ONCE(1);
105 return NULL;
106 }
107
108
109
110
111 if (is_ISA_range(phys_addr, last_addr))
112 return (__force void __iomem *)phys_to_virt(phys_addr);
113
114
115
116
117 pfn = phys_addr >> PAGE_SHIFT;
118 last_pfn = last_addr >> PAGE_SHIFT;
119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
120 __ioremap_check_ram) == 1) {
121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
122 &phys_addr, &last_addr);
123 return NULL;
124 }
125
126
127
128
129 offset = phys_addr & ~PAGE_MASK;
130 phys_addr &= PHYSICAL_PAGE_MASK;
131 size = PAGE_ALIGN(last_addr+1) - phys_addr;
132
133 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
134 pcm, &new_pcm);
135 if (retval) {
136 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
137 return NULL;
138 }
139
140 if (pcm != new_pcm) {
141 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
142 printk(KERN_ERR
143 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
144 (unsigned long long)phys_addr,
145 (unsigned long long)(phys_addr + size),
146 pcm, new_pcm);
147 goto err_free_memtype;
148 }
149 pcm = new_pcm;
150 }
151
152 prot = PAGE_KERNEL_IO;
153 switch (pcm) {
154 case _PAGE_CACHE_MODE_UC:
155 default:
156 prot = __pgprot(pgprot_val(prot) |
157 cachemode2protval(_PAGE_CACHE_MODE_UC));
158 break;
159 case _PAGE_CACHE_MODE_UC_MINUS:
160 prot = __pgprot(pgprot_val(prot) |
161 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
162 break;
163 case _PAGE_CACHE_MODE_WC:
164 prot = __pgprot(pgprot_val(prot) |
165 cachemode2protval(_PAGE_CACHE_MODE_WC));
166 break;
167 case _PAGE_CACHE_MODE_WT:
168 prot = __pgprot(pgprot_val(prot) |
169 cachemode2protval(_PAGE_CACHE_MODE_WT));
170 break;
171 case _PAGE_CACHE_MODE_WB:
172 break;
173 }
174
175
176
177
178 area = get_vm_area_caller(size, VM_IOREMAP, caller);
179 if (!area)
180 goto err_free_memtype;
181 area->phys_addr = phys_addr;
182 vaddr = (unsigned long) area->addr;
183
184 if (kernel_map_sync_memtype(phys_addr, size, pcm))
185 goto err_free_area;
186
187 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
188 goto err_free_area;
189
190 ret_addr = (void __iomem *) (vaddr + offset);
191 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
192
193
194
195
196
197 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
198 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
199
200 return ret_addr;
201err_free_area:
202 free_vm_area(area);
203err_free_memtype:
204 free_memtype(phys_addr, phys_addr + size);
205 return NULL;
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
230{
231
232
233
234
235
236
237
238
239 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
240
241 return __ioremap_caller(phys_addr, size, pcm,
242 __builtin_return_address(0));
243}
244EXPORT_SYMBOL(ioremap_nocache);
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
271{
272 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
273
274 return __ioremap_caller(phys_addr, size, pcm,
275 __builtin_return_address(0));
276}
277EXPORT_SYMBOL_GPL(ioremap_uc);
278
279
280
281
282
283
284
285
286
287
288
289void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
290{
291 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
292 __builtin_return_address(0));
293}
294EXPORT_SYMBOL(ioremap_wc);
295
296
297
298
299
300
301
302
303
304
305
306void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
307{
308 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
309 __builtin_return_address(0));
310}
311EXPORT_SYMBOL(ioremap_wt);
312
313void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
314{
315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
316 __builtin_return_address(0));
317}
318EXPORT_SYMBOL(ioremap_cache);
319
320void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
321 unsigned long prot_val)
322{
323 return __ioremap_caller(phys_addr, size,
324 pgprot2cachemode(__pgprot(prot_val)),
325 __builtin_return_address(0));
326}
327EXPORT_SYMBOL(ioremap_prot);
328
329
330
331
332
333
334
335void iounmap(volatile void __iomem *addr)
336{
337 struct vm_struct *p, *o;
338
339 if ((void __force *)addr <= high_memory)
340 return;
341
342
343
344
345
346
347 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
348 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
349 return;
350
351 addr = (volatile void __iomem *)
352 (PAGE_MASK & (unsigned long __force)addr);
353
354 mmiotrace_iounmap(addr);
355
356
357
358
359
360
361 p = find_vm_area((void __force *)addr);
362
363 if (!p) {
364 printk(KERN_ERR "iounmap: bad address %p\n", addr);
365 dump_stack();
366 return;
367 }
368
369 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
370
371
372 o = remove_vm_area((void __force *)addr);
373 BUG_ON(p != o || o == NULL);
374 kfree(p);
375}
376EXPORT_SYMBOL(iounmap);
377
378int __init arch_ioremap_pud_supported(void)
379{
380#ifdef CONFIG_X86_64
381 return cpu_has_gbpages;
382#else
383 return 0;
384#endif
385}
386
387int __init arch_ioremap_pmd_supported(void)
388{
389 return cpu_has_pse;
390}
391
392
393
394
395
396void *xlate_dev_mem_ptr(phys_addr_t phys)
397{
398 unsigned long start = phys & PAGE_MASK;
399 unsigned long offset = phys & ~PAGE_MASK;
400 void *vaddr;
401
402
403 if (page_is_ram(start >> PAGE_SHIFT))
404 return __va(phys);
405
406 vaddr = ioremap_cache(start, PAGE_SIZE);
407
408 if (vaddr)
409 vaddr += offset;
410
411 return vaddr;
412}
413
414void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
415{
416 if (page_is_ram(phys >> PAGE_SHIFT))
417 return;
418
419 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
420}
421
422static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
423
424static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
425{
426
427 pgd_t *base = __va(read_cr3());
428 pgd_t *pgd = &base[pgd_index(addr)];
429 pud_t *pud = pud_offset(pgd, addr);
430 pmd_t *pmd = pmd_offset(pud, addr);
431
432 return pmd;
433}
434
435static inline pte_t * __init early_ioremap_pte(unsigned long addr)
436{
437 return &bm_pte[pte_index(addr)];
438}
439
440bool __init is_early_ioremap_ptep(pte_t *ptep)
441{
442 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
443}
444
445void __init early_ioremap_init(void)
446{
447 pmd_t *pmd;
448
449#ifdef CONFIG_X86_64
450 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
451#else
452 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
453#endif
454
455 early_ioremap_setup();
456
457 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
458 memset(bm_pte, 0, sizeof(bm_pte));
459 pmd_populate_kernel(&init_mm, pmd, bm_pte);
460
461
462
463
464
465#define __FIXADDR_TOP (-PAGE_SIZE)
466 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
467 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
468#undef __FIXADDR_TOP
469 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
470 WARN_ON(1);
471 printk(KERN_WARNING "pmd %p != %p\n",
472 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
473 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
474 fix_to_virt(FIX_BTMAP_BEGIN));
475 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
476 fix_to_virt(FIX_BTMAP_END));
477
478 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
479 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
480 FIX_BTMAP_BEGIN);
481 }
482}
483
484void __init __early_set_fixmap(enum fixed_addresses idx,
485 phys_addr_t phys, pgprot_t flags)
486{
487 unsigned long addr = __fix_to_virt(idx);
488 pte_t *pte;
489
490 if (idx >= __end_of_fixed_addresses) {
491 BUG();
492 return;
493 }
494 pte = early_ioremap_pte(addr);
495
496 if (pgprot_val(flags))
497 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
498 else
499 pte_clear(&init_mm, addr, pte);
500 __flush_tlb_one(addr);
501}
502