1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/vmalloc.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mm.h>
19#include <linux/pci.h>
20#include <linux/io.h>
21#include <asm/io_trapped.h>
22#include <asm/page.h>
23#include <asm/pgalloc.h>
24#include <asm/addrspace.h>
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm/mmu.h>
28#include "ioremap.h"
29
30
31
32
33
34
35
36
37
38#ifdef CONFIG_29BIT
39static void __iomem *
40__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
41{
42 phys_addr_t last_addr = offset + size - 1;
43
44
45
46
47
48
49
50 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
51 u64 flags = pgprot_val(prot);
52
53
54
55
56
57 if (unlikely(flags & _PAGE_PCC_MASK))
58 return NULL;
59 if (unlikely(flags & _PAGE_CACHABLE))
60 return (void __iomem *)P1SEGADDR(offset);
61
62 return (void __iomem *)P2SEGADDR(offset);
63 }
64
65
66 if (unlikely(offset >= P3_ADDR_MAX))
67 return (void __iomem *)P4SEGADDR(offset);
68
69 return NULL;
70}
71#else
72#define __ioremap_29bit(offset, size, prot) NULL
73#endif
74
75
76
77
78
79
80
81
82
83
84void __iomem * __ref
85__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
86 pgprot_t pgprot, void *caller)
87{
88 struct vm_struct *area;
89 unsigned long offset, last_addr, addr, orig_addr;
90 void __iomem *mapped;
91
92 mapped = __ioremap_trapped(phys_addr, size);
93 if (mapped)
94 return mapped;
95
96 mapped = __ioremap_29bit(phys_addr, size, pgprot);
97 if (mapped)
98 return mapped;
99
100
101 last_addr = phys_addr + size - 1;
102 if (!size || last_addr < phys_addr)
103 return NULL;
104
105
106
107
108 if (!mem_init_done)
109 return ioremap_fixed(phys_addr, size, pgprot);
110
111
112
113
114
115 mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
116 if (mapped && !IS_ERR(mapped))
117 return mapped;
118
119
120
121
122 offset = phys_addr & ~PAGE_MASK;
123 phys_addr &= PAGE_MASK;
124 size = PAGE_ALIGN(last_addr+1) - phys_addr;
125
126
127
128
129 area = get_vm_area_caller(size, VM_IOREMAP, caller);
130 if (!area)
131 return NULL;
132 area->phys_addr = phys_addr;
133 orig_addr = addr = (unsigned long)area->addr;
134
135 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
136 vunmap((void *)orig_addr);
137 return NULL;
138 }
139
140 return (void __iomem *)(offset + (char *)orig_addr);
141}
142EXPORT_SYMBOL(__ioremap_caller);
143
144
145
146
147static inline int iomapping_nontranslatable(unsigned long offset)
148{
149#ifdef CONFIG_29BIT
150
151
152
153
154 if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
155 return 1;
156#endif
157
158 return 0;
159}
160
161void iounmap(void __iomem *addr)
162{
163 unsigned long vaddr = (unsigned long __force)addr;
164 struct vm_struct *p;
165
166
167
168
169 if (iomapping_nontranslatable(vaddr))
170 return;
171
172
173
174
175 if (iounmap_fixed(addr) == 0)
176 return;
177
178
179
180
181 if (pmb_unmap(addr) == 0)
182 return;
183
184 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
185 if (!p) {
186 printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
187 return;
188 }
189
190 kfree(p);
191}
192EXPORT_SYMBOL(iounmap);
193