1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/highmem.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <asm/homecache.h>
19
20#define kmap_get_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
22 (vaddr)), (vaddr))
23
24
25void *kmap(struct page *page)
26{
27 void *kva;
28 unsigned long flags;
29 pte_t *ptep;
30
31 might_sleep();
32 if (!PageHighMem(page))
33 return page_address(page);
34 kva = kmap_high(page);
35
36
37
38
39
40 ptep = kmap_get_pte((unsigned long)kva);
41 flags = homecache_kpte_lock();
42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page)));
43 homecache_kpte_unlock(flags);
44
45 return kva;
46}
47EXPORT_SYMBOL(kmap);
48
49void kunmap(struct page *page)
50{
51 if (in_interrupt())
52 BUG();
53 if (!PageHighMem(page))
54 return;
55 kunmap_high(page);
56}
57EXPORT_SYMBOL(kunmap);
58
59
60
61
62
63struct atomic_mapped_page {
64 struct list_head list;
65 struct page *page;
66 int cpu;
67 unsigned long va;
68};
69
70static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock);
71static struct list_head amp_list = LIST_HEAD_INIT(amp_list);
72
73
74
75
76
77struct kmap_amps {
78 struct atomic_mapped_page per_type[KM_TYPE_NR];
79};
80static DEFINE_PER_CPU(struct kmap_amps, amps);
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static void kmap_atomic_register(struct page *page, enum km_type type,
97 unsigned long va, pte_t *ptep, pte_t pteval)
98{
99 unsigned long flags;
100 struct atomic_mapped_page *amp;
101
102 flags = homecache_kpte_lock();
103 spin_lock(&_lock);
104
105
106 amp = &__get_cpu_var(amps).per_type[type];
107 amp->page = page;
108 amp->cpu = smp_processor_id();
109 amp->va = va;
110
111
112 if (!pte_read(pteval))
113 pteval = mk_pte(page, page_to_kpgprot(page));
114
115 list_add(&->list, &_list);
116 set_pte(ptep, pteval);
117 arch_flush_lazy_mmu_mode();
118
119 spin_unlock(&_lock);
120 homecache_kpte_unlock(flags);
121}
122
123
124
125
126
127
128
129
130static void kmap_atomic_unregister(struct page *page, unsigned long va)
131{
132 unsigned long flags;
133 struct atomic_mapped_page *amp;
134 int cpu = smp_processor_id();
135 spin_lock_irqsave(&_lock, flags);
136 list_for_each_entry(amp, &_list, list) {
137 if (amp->page == page && amp->cpu == cpu && amp->va == va)
138 break;
139 }
140 BUG_ON(&->list == &_list);
141 list_del(&->list);
142 spin_unlock_irqrestore(&_lock, flags);
143}
144
145
146static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp,
147 int finished)
148{
149 pte_t *ptep = kmap_get_pte(amp->va);
150 if (!finished) {
151 set_pte(ptep, pte_mkmigrate(*ptep));
152 flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE,
153 cpumask_of(amp->cpu), NULL, 0);
154 } else {
155
156
157
158
159
160 pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page));
161 set_pte(ptep, pte);
162 }
163}
164
165
166
167
168
169
170
171
172
173
174
175void kmap_atomic_fix_kpte(struct page *page, int finished)
176{
177 struct atomic_mapped_page *amp;
178 unsigned long flags;
179 spin_lock_irqsave(&_lock, flags);
180 list_for_each_entry(amp, &_list, list) {
181 if (amp->page == page)
182 kmap_atomic_fix_one_kpte(amp, finished);
183 }
184 spin_unlock_irqrestore(&_lock, flags);
185}
186
187
188
189
190
191
192
193
194
195
196
197
198
199void *kmap_atomic_prot(struct page *page, pgprot_t prot)
200{
201 unsigned long vaddr;
202 int idx, type;
203 pte_t *pte;
204
205
206 pagefault_disable();
207
208
209 BUG_ON(pte_exec(prot));
210
211 if (!PageHighMem(page))
212 return page_address(page);
213
214 type = kmap_atomic_idx_push();
215 idx = type + KM_TYPE_NR*smp_processor_id();
216 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
217 pte = kmap_get_pte(vaddr);
218 BUG_ON(!pte_none(*pte));
219
220
221 kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot));
222
223 return (void *)vaddr;
224}
225EXPORT_SYMBOL(kmap_atomic_prot);
226
227void *kmap_atomic(struct page *page)
228{
229
230 return kmap_atomic_prot(page, PAGE_NONE);
231}
232EXPORT_SYMBOL(kmap_atomic);
233
234void __kunmap_atomic(void *kvaddr)
235{
236 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
237
238 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
239 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
240 pte_t *pte = kmap_get_pte(vaddr);
241 pte_t pteval = *pte;
242 int idx, type;
243
244 type = kmap_atomic_idx();
245 idx = type + KM_TYPE_NR*smp_processor_id();
246
247
248
249
250
251
252 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
253 kmap_atomic_unregister(pte_page(pteval), vaddr);
254 kpte_clear_flush(pte, vaddr);
255 kmap_atomic_idx_pop();
256 } else {
257
258 BUG_ON(vaddr < PAGE_OFFSET);
259 BUG_ON(vaddr >= (unsigned long)high_memory);
260 }
261
262 arch_flush_lazy_mmu_mode();
263 pagefault_enable();
264}
265EXPORT_SYMBOL(__kunmap_atomic);
266
267
268
269
270
271void *kmap_atomic_pfn(unsigned long pfn)
272{
273 return kmap_atomic(pfn_to_page(pfn));
274}
275void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
276{
277 return kmap_atomic_prot(pfn_to_page(pfn), prot);
278}
279
280struct page *kmap_atomic_to_page(void *ptr)
281{
282 pte_t *pte;
283 unsigned long vaddr = (unsigned long)ptr;
284
285 if (vaddr < FIXADDR_START)
286 return virt_to_page(ptr);
287
288 pte = kmap_get_pte(vaddr);
289 return pte_page(*pte);
290}
291