1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/highmem.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <asm/homecache.h>
19
20#define kmap_get_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
22 (vaddr)), (vaddr))
23
24
25void *kmap(struct page *page)
26{
27 void *kva;
28 unsigned long flags;
29 pte_t *ptep;
30
31 might_sleep();
32 if (!PageHighMem(page))
33 return page_address(page);
34 kva = kmap_high(page);
35
36
37
38
39
40 ptep = kmap_get_pte((unsigned long)kva);
41 flags = homecache_kpte_lock();
42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page)));
43 homecache_kpte_unlock(flags);
44
45 return kva;
46}
47EXPORT_SYMBOL(kmap);
48
49void kunmap(struct page *page)
50{
51 if (in_interrupt())
52 BUG();
53 if (!PageHighMem(page))
54 return;
55 kunmap_high(page);
56}
57EXPORT_SYMBOL(kunmap);
58
59
60
61
62
63struct atomic_mapped_page {
64 struct list_head list;
65 struct page *page;
66 int cpu;
67 unsigned long va;
68};
69
70static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock);
71static struct list_head amp_list = LIST_HEAD_INIT(amp_list);
72
73
74
75
76
77struct kmap_amps {
78 struct atomic_mapped_page per_type[KM_TYPE_NR];
79};
80static DEFINE_PER_CPU(struct kmap_amps, amps);
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96static void kmap_atomic_register(struct page *page, int type,
97 unsigned long va, pte_t *ptep, pte_t pteval)
98{
99 unsigned long flags;
100 struct atomic_mapped_page *amp;
101
102 flags = homecache_kpte_lock();
103 spin_lock(&_lock);
104
105
106 amp = this_cpu_ptr(&s.per_type[type]);
107 amp->page = page;
108 amp->cpu = smp_processor_id();
109 amp->va = va;
110
111
112 if (!pte_read(pteval))
113 pteval = mk_pte(page, page_to_kpgprot(page));
114
115 list_add(&->list, &_list);
116 set_pte(ptep, pteval);
117
118 spin_unlock(&_lock);
119 homecache_kpte_unlock(flags);
120}
121
122
123
124
125
126
127
128
129static void kmap_atomic_unregister(struct page *page, unsigned long va)
130{
131 unsigned long flags;
132 struct atomic_mapped_page *amp;
133 int cpu = smp_processor_id();
134 spin_lock_irqsave(&_lock, flags);
135 list_for_each_entry(amp, &_list, list) {
136 if (amp->page == page && amp->cpu == cpu && amp->va == va)
137 break;
138 }
139 BUG_ON(&->list == &_list);
140 list_del(&->list);
141 spin_unlock_irqrestore(&_lock, flags);
142}
143
144
145static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp,
146 int finished)
147{
148 pte_t *ptep = kmap_get_pte(amp->va);
149 if (!finished) {
150 set_pte(ptep, pte_mkmigrate(*ptep));
151 flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE,
152 cpumask_of(amp->cpu), NULL, 0);
153 } else {
154
155
156
157
158
159 pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page));
160 set_pte(ptep, pte);
161 }
162}
163
164
165
166
167
168
169
170
171
172
173
174void kmap_atomic_fix_kpte(struct page *page, int finished)
175{
176 struct atomic_mapped_page *amp;
177 unsigned long flags;
178 spin_lock_irqsave(&_lock, flags);
179 list_for_each_entry(amp, &_list, list) {
180 if (amp->page == page)
181 kmap_atomic_fix_one_kpte(amp, finished);
182 }
183 spin_unlock_irqrestore(&_lock, flags);
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198void *kmap_atomic_prot(struct page *page, pgprot_t prot)
199{
200 unsigned long vaddr;
201 int idx, type;
202 pte_t *pte;
203
204 preempt_disable();
205 pagefault_disable();
206
207
208 BUG_ON(pte_exec(prot));
209
210 if (!PageHighMem(page))
211 return page_address(page);
212
213 type = kmap_atomic_idx_push();
214 idx = type + KM_TYPE_NR*smp_processor_id();
215 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
216 pte = kmap_get_pte(vaddr);
217 BUG_ON(!pte_none(*pte));
218
219
220 kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot));
221
222 return (void *)vaddr;
223}
224EXPORT_SYMBOL(kmap_atomic_prot);
225
226void *kmap_atomic(struct page *page)
227{
228
229 return kmap_atomic_prot(page, PAGE_NONE);
230}
231EXPORT_SYMBOL(kmap_atomic);
232
233void __kunmap_atomic(void *kvaddr)
234{
235 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
236
237 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
238 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
239 pte_t *pte = kmap_get_pte(vaddr);
240 pte_t pteval = *pte;
241 int idx, type;
242
243 type = kmap_atomic_idx();
244 idx = type + KM_TYPE_NR*smp_processor_id();
245
246
247
248
249
250
251 BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
252 kmap_atomic_unregister(pte_page(pteval), vaddr);
253 kpte_clear_flush(pte, vaddr);
254 kmap_atomic_idx_pop();
255 } else {
256
257 BUG_ON(vaddr < PAGE_OFFSET);
258 BUG_ON(vaddr >= (unsigned long)high_memory);
259 }
260
261 pagefault_enable();
262 preempt_enable();
263}
264EXPORT_SYMBOL(__kunmap_atomic);
265
266
267
268
269
270void *kmap_atomic_pfn(unsigned long pfn)
271{
272 return kmap_atomic(pfn_to_page(pfn));
273}
274void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
275{
276 return kmap_atomic_prot(pfn_to_page(pfn), prot);
277}
278