1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/kernel.h>
31#include <linux/types.h>
32#include <linux/vmalloc.h>
33#include <linux/init.h>
34
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <asm/mmu.h>
39#include <asm/sections.h>
40#include <asm/fixmap.h>
41
42unsigned long ioremap_base;
43unsigned long ioremap_bot;
44EXPORT_SYMBOL(ioremap_bot);
45
46#ifndef CONFIG_SMP
47struct pgtable_cache_struct quicklists;
48#endif
49
50static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
51 unsigned long flags)
52{
53 unsigned long v, i;
54 phys_addr_t p;
55 int err;
56
57
58
59
60
61
62
63 p = addr & PAGE_MASK;
64 size = PAGE_ALIGN(addr + size) - p;
65
66
67
68
69
70
71
72
73 if (mem_init_done &&
74 p >= memory_start && p < virt_to_phys(high_memory) &&
75 !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
76 p < __virt_to_phys((phys_addr_t)__bss_stop))) {
77 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
78 (unsigned long)p, __builtin_return_address(0));
79 return NULL;
80 }
81
82 if (size == 0)
83 return NULL;
84
85
86
87
88
89
90
91
92
93
94
95
96 if (mem_init_done) {
97 struct vm_struct *area;
98 area = get_vm_area(size, VM_IOREMAP);
99 if (area == NULL)
100 return NULL;
101 v = (unsigned long) area->addr;
102 } else {
103 v = (ioremap_bot -= size);
104 }
105
106 if ((flags & _PAGE_PRESENT) == 0)
107 flags |= _PAGE_KERNEL;
108 if (flags & _PAGE_NO_CACHE)
109 flags |= _PAGE_GUARDED;
110
111 err = 0;
112 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
113 err = map_page(v + i, p + i, flags);
114 if (err) {
115 if (mem_init_done)
116 vfree((void *)v);
117 return NULL;
118 }
119
120 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
121}
122
123void __iomem *ioremap(phys_addr_t addr, unsigned long size)
124{
125 return __ioremap(addr, size, _PAGE_NO_CACHE);
126}
127EXPORT_SYMBOL(ioremap);
128
129void iounmap(void __iomem *addr)
130{
131 if ((__force void *)addr > high_memory &&
132 (unsigned long) addr < ioremap_bot)
133 vfree((void *) (PAGE_MASK & (unsigned long) addr));
134}
135EXPORT_SYMBOL(iounmap);
136
137
138int map_page(unsigned long va, phys_addr_t pa, int flags)
139{
140 pmd_t *pd;
141 pte_t *pg;
142 int err = -ENOMEM;
143
144 pd = pmd_offset(pgd_offset_k(va), va);
145
146 pg = pte_alloc_kernel(pd, va);
147
148
149 if (pg != NULL) {
150 err = 0;
151 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
152 __pgprot(flags)));
153 if (unlikely(mem_init_done))
154 _tlbie(va);
155 }
156 return err;
157}
158
159
160
161
162void __init mapin_ram(void)
163{
164 unsigned long v, p, s, f;
165
166 v = CONFIG_KERNEL_START;
167 p = memory_start;
168 for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
169 f = _PAGE_PRESENT | _PAGE_ACCESSED |
170 _PAGE_SHARED | _PAGE_HWEXEC;
171 if ((char *) v < _stext || (char *) v >= _etext)
172 f |= _PAGE_WRENABLE;
173 else
174
175
176 f |= _PAGE_USER;
177 map_page(v, p, f);
178 v += PAGE_SIZE;
179 p += PAGE_SIZE;
180 }
181}
182
183
184#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
185
186
187
188
189
190
191static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
192{
193 pgd_t *pgd;
194 pmd_t *pmd;
195 pte_t *pte;
196 int retval = 0;
197
198 pgd = pgd_offset(mm, addr & PAGE_MASK);
199 if (pgd) {
200 pmd = pmd_offset(pgd, addr & PAGE_MASK);
201 if (pmd_present(*pmd)) {
202 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
203 if (pte) {
204 retval = 1;
205 *ptep = pte;
206 }
207 }
208 }
209 return retval;
210}
211
212
213
214
215unsigned long iopa(unsigned long addr)
216{
217 unsigned long pa;
218
219 pte_t *pte;
220 struct mm_struct *mm;
221
222
223
224
225 if (addr < TASK_SIZE)
226 mm = current->mm;
227 else
228 mm = &init_mm;
229
230 pa = 0;
231 if (get_pteptr(mm, addr, &pte))
232 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
233
234 return pa;
235}
236
237__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
238 unsigned long address)
239{
240 pte_t *pte;
241 if (mem_init_done) {
242 pte = (pte_t *)__get_free_page(GFP_KERNEL |
243 __GFP_REPEAT | __GFP_ZERO);
244 } else {
245 pte = (pte_t *)early_get_page();
246 if (pte)
247 clear_page(pte);
248 }
249 return pte;
250}
251
252void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
253{
254 unsigned long address = __fix_to_virt(idx);
255
256 if (idx >= __end_of_fixed_addresses)
257 BUG();
258
259 map_page(address, phys, pgprot_val(flags));
260}
261