1
2
3
4
5
6
7
8
9#include <linux/bootmem.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/mmiotrace.h>
15
16#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21#include <asm/pgalloc.h>
22#include <asm/pat.h>
23
24#include "physaddr.h"
25
26
27
28
29
30int ioremap_change_attr(unsigned long vaddr, unsigned long size,
31 enum page_cache_mode pcm)
32{
33 unsigned long nrpages = size >> PAGE_SHIFT;
34 int err;
35
36 switch (pcm) {
37 case _PAGE_CACHE_MODE_UC:
38 default:
39 err = _set_memory_uc(vaddr, nrpages);
40 break;
41 case _PAGE_CACHE_MODE_WC:
42 err = _set_memory_wc(vaddr, nrpages);
43 break;
44 case _PAGE_CACHE_MODE_WT:
45 err = _set_memory_wt(vaddr, nrpages);
46 break;
47 case _PAGE_CACHE_MODE_WB:
48 err = _set_memory_wb(vaddr, nrpages);
49 break;
50 }
51
52 return err;
53}
54
55static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
56 void *arg)
57{
58 unsigned long i;
59
60 for (i = 0; i < nr_pages; ++i)
61 if (pfn_valid(start_pfn + i) &&
62 !PageReserved(pfn_to_page(start_pfn + i)))
63 return 1;
64
65 return 0;
66}
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83 unsigned long size, enum page_cache_mode pcm, void *caller)
84{
85 unsigned long offset, vaddr;
86 resource_size_t pfn, last_pfn, last_addr;
87 const resource_size_t unaligned_phys_addr = phys_addr;
88 const unsigned long unaligned_size = size;
89 struct vm_struct *area;
90 enum page_cache_mode new_pcm;
91 pgprot_t prot;
92 int retval;
93 void __iomem *ret_addr;
94
95
96 last_addr = phys_addr + size - 1;
97 if (!size || last_addr < phys_addr)
98 return NULL;
99
100 if (!phys_addr_valid(phys_addr)) {
101 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
102 (unsigned long long)phys_addr);
103 WARN_ON_ONCE(1);
104 return NULL;
105 }
106
107
108
109
110 if (is_ISA_range(phys_addr, last_addr))
111 return (__force void __iomem *)phys_to_virt(phys_addr);
112
113
114
115
116 pfn = phys_addr >> PAGE_SHIFT;
117 last_pfn = last_addr >> PAGE_SHIFT;
118 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
119 __ioremap_check_ram) == 1) {
120 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
121 &phys_addr, &last_addr);
122 return NULL;
123 }
124
125
126
127
128 offset = phys_addr & ~PAGE_MASK;
129 phys_addr &= PHYSICAL_PAGE_MASK;
130 size = PAGE_ALIGN(last_addr+1) - phys_addr;
131
132 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
133 pcm, &new_pcm);
134 if (retval) {
135 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
136 return NULL;
137 }
138
139 if (pcm != new_pcm) {
140 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
141 printk(KERN_ERR
142 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
143 (unsigned long long)phys_addr,
144 (unsigned long long)(phys_addr + size),
145 pcm, new_pcm);
146 goto err_free_memtype;
147 }
148 pcm = new_pcm;
149 }
150
151 prot = PAGE_KERNEL_IO;
152 switch (pcm) {
153 case _PAGE_CACHE_MODE_UC:
154 default:
155 prot = __pgprot(pgprot_val(prot) |
156 cachemode2protval(_PAGE_CACHE_MODE_UC));
157 break;
158 case _PAGE_CACHE_MODE_UC_MINUS:
159 prot = __pgprot(pgprot_val(prot) |
160 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
161 break;
162 case _PAGE_CACHE_MODE_WC:
163 prot = __pgprot(pgprot_val(prot) |
164 cachemode2protval(_PAGE_CACHE_MODE_WC));
165 break;
166 case _PAGE_CACHE_MODE_WT:
167 prot = __pgprot(pgprot_val(prot) |
168 cachemode2protval(_PAGE_CACHE_MODE_WT));
169 break;
170 case _PAGE_CACHE_MODE_WB:
171 break;
172 }
173
174
175
176
177 area = get_vm_area_caller(size, VM_IOREMAP, caller);
178 if (!area)
179 goto err_free_memtype;
180 area->phys_addr = phys_addr;
181 vaddr = (unsigned long) area->addr;
182
183 if (kernel_map_sync_memtype(phys_addr, size, pcm))
184 goto err_free_area;
185
186 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
187 goto err_free_area;
188
189 ret_addr = (void __iomem *) (vaddr + offset);
190 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
191
192
193
194
195
196 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
197 pr_warn("caller %pS mapping multiple BARs\n", caller);
198
199 return ret_addr;
200err_free_area:
201 free_vm_area(area);
202err_free_memtype:
203 free_memtype(phys_addr, phys_addr + size);
204 return NULL;
205}
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
229{
230
231
232
233
234
235
236
237
238 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
239
240 return __ioremap_caller(phys_addr, size, pcm,
241 __builtin_return_address(0));
242}
243EXPORT_SYMBOL(ioremap_nocache);
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
270{
271 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
272
273 return __ioremap_caller(phys_addr, size, pcm,
274 __builtin_return_address(0));
275}
276EXPORT_SYMBOL_GPL(ioremap_uc);
277
278
279
280
281
282
283
284
285
286
287
288void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
289{
290 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
291 __builtin_return_address(0));
292}
293EXPORT_SYMBOL(ioremap_wc);
294
295
296
297
298
299
300
301
302
303
304
305void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
306{
307 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
308 __builtin_return_address(0));
309}
310EXPORT_SYMBOL(ioremap_wt);
311
312void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
313{
314 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
315 __builtin_return_address(0));
316}
317EXPORT_SYMBOL(ioremap_cache);
318
319void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
320 unsigned long prot_val)
321{
322 return __ioremap_caller(phys_addr, size,
323 pgprot2cachemode(__pgprot(prot_val)),
324 __builtin_return_address(0));
325}
326EXPORT_SYMBOL(ioremap_prot);
327
328
329
330
331
332
333
334void iounmap(volatile void __iomem *addr)
335{
336 struct vm_struct *p, *o;
337
338 if ((void __force *)addr <= high_memory)
339 return;
340
341
342
343
344
345
346 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
347 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
348 return;
349
350 addr = (volatile void __iomem *)
351 (PAGE_MASK & (unsigned long __force)addr);
352
353 mmiotrace_iounmap(addr);
354
355
356
357
358
359
360 p = find_vm_area((void __force *)addr);
361
362 if (!p) {
363 printk(KERN_ERR "iounmap: bad address %p\n", addr);
364 dump_stack();
365 return;
366 }
367
368 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
369
370
371 o = remove_vm_area((void __force *)addr);
372 BUG_ON(p != o || o == NULL);
373 kfree(p);
374}
375EXPORT_SYMBOL(iounmap);
376
377int __init arch_ioremap_pud_supported(void)
378{
379#ifdef CONFIG_X86_64
380 return boot_cpu_has(X86_FEATURE_GBPAGES);
381#else
382 return 0;
383#endif
384}
385
386int __init arch_ioremap_pmd_supported(void)
387{
388 return boot_cpu_has(X86_FEATURE_PSE);
389}
390
391
392
393
394
395void *xlate_dev_mem_ptr(phys_addr_t phys)
396{
397 unsigned long start = phys & PAGE_MASK;
398 unsigned long offset = phys & ~PAGE_MASK;
399 void *vaddr;
400
401
402 if (page_is_ram(start >> PAGE_SHIFT))
403 return __va(phys);
404
405 vaddr = ioremap_cache(start, PAGE_SIZE);
406
407 if (vaddr)
408 vaddr += offset;
409
410 return vaddr;
411}
412
413void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
414{
415 if (page_is_ram(phys >> PAGE_SHIFT))
416 return;
417
418 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
419}
420
421static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
422
423static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
424{
425
426 pgd_t *base = __va(read_cr3());
427 pgd_t *pgd = &base[pgd_index(addr)];
428 pud_t *pud = pud_offset(pgd, addr);
429 pmd_t *pmd = pmd_offset(pud, addr);
430
431 return pmd;
432}
433
434static inline pte_t * __init early_ioremap_pte(unsigned long addr)
435{
436 return &bm_pte[pte_index(addr)];
437}
438
439bool __init is_early_ioremap_ptep(pte_t *ptep)
440{
441 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
442}
443
444void __init early_ioremap_init(void)
445{
446 pmd_t *pmd;
447
448#ifdef CONFIG_X86_64
449 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
450#else
451 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
452#endif
453
454 early_ioremap_setup();
455
456 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
457 memset(bm_pte, 0, sizeof(bm_pte));
458 pmd_populate_kernel(&init_mm, pmd, bm_pte);
459
460
461
462
463
464#define __FIXADDR_TOP (-PAGE_SIZE)
465 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
466 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
467#undef __FIXADDR_TOP
468 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
469 WARN_ON(1);
470 printk(KERN_WARNING "pmd %p != %p\n",
471 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
472 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
473 fix_to_virt(FIX_BTMAP_BEGIN));
474 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
475 fix_to_virt(FIX_BTMAP_END));
476
477 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
478 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
479 FIX_BTMAP_BEGIN);
480 }
481}
482
483void __init __early_set_fixmap(enum fixed_addresses idx,
484 phys_addr_t phys, pgprot_t flags)
485{
486 unsigned long addr = __fix_to_virt(idx);
487 pte_t *pte;
488
489 if (idx >= __end_of_fixed_addresses) {
490 BUG();
491 return;
492 }
493 pte = early_ioremap_pte(addr);
494
495 if (pgprot_val(flags))
496 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
497 else
498 pte_clear(&init_mm, addr, pte);
499 __flush_tlb_one(addr);
500}
501