1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/vmalloc.h>
33#include <linux/init.h>
34
35#include <asm/pgtable.h>
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <asm/mmu.h>
39#include <asm/sections.h>
40
41#define flush_HPTE(X, va, pg) _tlbie(va)
42
43unsigned long ioremap_base;
44unsigned long ioremap_bot;
45EXPORT_SYMBOL(ioremap_bot);
46
47
48
49
50#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
51
52#ifndef CONFIG_SMP
53struct pgtable_cache_struct quicklists;
54#endif
55
56static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
57 unsigned long flags)
58{
59 unsigned long v, i;
60 phys_addr_t p;
61 int err;
62
63
64
65
66
67
68
69 p = addr & PAGE_MASK;
70 size = PAGE_ALIGN(addr + size) - p;
71
72
73
74
75
76
77
78 if (mem_init_done &&
79 p >= memory_start && p < virt_to_phys(high_memory) &&
80 !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
81 p < virt_to_phys((unsigned long)__bss_stop))) {
82 printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
83 " is RAM lr %p\n", (unsigned long)p,
84 __builtin_return_address(0));
85 return NULL;
86 }
87
88 if (size == 0)
89 return NULL;
90
91
92
93
94
95
96
97
98
99
100
101
102 if (mem_init_done) {
103 struct vm_struct *area;
104 area = get_vm_area(size, VM_IOREMAP);
105 if (area == NULL)
106 return NULL;
107 v = (unsigned long) area->addr;
108 } else {
109 v = (ioremap_bot -= size);
110 }
111
112 if ((flags & _PAGE_PRESENT) == 0)
113 flags |= _PAGE_KERNEL;
114 if (flags & _PAGE_NO_CACHE)
115 flags |= _PAGE_GUARDED;
116
117 err = 0;
118 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
119 err = map_page(v + i, p + i, flags);
120 if (err) {
121 if (mem_init_done)
122 vfree((void *)v);
123 return NULL;
124 }
125
126 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
127}
128
129void __iomem *ioremap(phys_addr_t addr, unsigned long size)
130{
131 return __ioremap(addr, size, _PAGE_NO_CACHE);
132}
133EXPORT_SYMBOL(ioremap);
134
135void iounmap(void *addr)
136{
137 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
138 vfree((void *) (PAGE_MASK & (unsigned long) addr));
139}
140EXPORT_SYMBOL(iounmap);
141
142
143int map_page(unsigned long va, phys_addr_t pa, int flags)
144{
145 pmd_t *pd;
146 pte_t *pg;
147 int err = -ENOMEM;
148
149 pd = pmd_offset(pgd_offset_k(va), va);
150
151 pg = pte_alloc_kernel(pd, va);
152
153
154 if (pg != NULL) {
155 err = 0;
156 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
157 __pgprot(flags)));
158 if (unlikely(mem_init_done))
159 flush_HPTE(0, va, pmd_val(*pd));
160
161 }
162 return err;
163}
164
165
166
167
168void __init mapin_ram(void)
169{
170 unsigned long v, p, s, f;
171
172 v = CONFIG_KERNEL_START;
173 p = memory_start;
174 for (s = 0; s < memory_size; s += PAGE_SIZE) {
175 f = _PAGE_PRESENT | _PAGE_ACCESSED |
176 _PAGE_SHARED | _PAGE_HWEXEC;
177 if ((char *) v < _stext || (char *) v >= _etext)
178 f |= _PAGE_WRENABLE;
179 else
180
181
182 f |= _PAGE_USER;
183 map_page(v, p, f);
184 v += PAGE_SIZE;
185 p += PAGE_SIZE;
186 }
187}
188
189
190#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
191
192
193
194
195
196
197static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
198{
199 pgd_t *pgd;
200 pmd_t *pmd;
201 pte_t *pte;
202 int retval = 0;
203
204 pgd = pgd_offset(mm, addr & PAGE_MASK);
205 if (pgd) {
206 pmd = pmd_offset(pgd, addr & PAGE_MASK);
207 if (pmd_present(*pmd)) {
208 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
209 if (pte) {
210 retval = 1;
211 *ptep = pte;
212 }
213 }
214 }
215 return retval;
216}
217
218
219
220
221unsigned long iopa(unsigned long addr)
222{
223 unsigned long pa;
224
225 pte_t *pte;
226 struct mm_struct *mm;
227
228
229
230
231 if (addr < TASK_SIZE)
232 mm = current->mm;
233 else
234 mm = &init_mm;
235
236 pa = 0;
237 if (get_pteptr(mm, addr, &pte))
238 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
239
240 return pa;
241}
242
243__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
244 unsigned long address)
245{
246 pte_t *pte;
247 if (mem_init_done) {
248 pte = (pte_t *)__get_free_page(GFP_KERNEL |
249 __GFP_REPEAT | __GFP_ZERO);
250 } else {
251 pte = (pte_t *)early_get_page();
252 if (pte)
253 clear_page(pte);
254 }
255 return pte;
256}
257