1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/mm.h>
20#include <linux/export.h>
21#include <linux/swap.h>
22#include <linux/bio.h>
23#include <linux/pagemap.h>
24#include <linux/mempool.h>
25#include <linux/blkdev.h>
26#include <linux/init.h>
27#include <linux/hash.h>
28#include <linux/highmem.h>
29#include <linux/kgdb.h>
30#include <asm/tlbflush.h>
31
32
33#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
34DEFINE_PER_CPU(int, __kmap_atomic_idx);
35#endif
36
37
38
39
40
41
42
43
44
45#ifdef CONFIG_HIGHMEM
46
47
48
49
50
51
52#ifndef get_pkmap_color
53
54
55
56
57static inline unsigned int get_pkmap_color(struct page *page)
58{
59 return 0;
60}
61#define get_pkmap_color get_pkmap_color
62
63
64
65
66static inline unsigned int get_next_pkmap_nr(unsigned int color)
67{
68 static unsigned int last_pkmap_nr;
69
70 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
71 return last_pkmap_nr;
72}
73
74
75
76
77
78
79static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
80{
81 return pkmap_nr == 0;
82}
83
84
85
86
87
88
89static inline int get_pkmap_entries_count(unsigned int color)
90{
91 return LAST_PKMAP;
92}
93
94
95
96
97
98
99static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
100{
101 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
102
103 return &pkmap_map_wait;
104}
105#endif
106
107unsigned long totalhigh_pages __read_mostly;
108EXPORT_SYMBOL(totalhigh_pages);
109
110
111EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
112
113unsigned int nr_free_highpages (void)
114{
115 pg_data_t *pgdat;
116 unsigned int pages = 0;
117
118 for_each_online_pgdat(pgdat) {
119 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
120 NR_FREE_PAGES);
121 if (zone_movable_is_highmem())
122 pages += zone_page_state(
123 &pgdat->node_zones[ZONE_MOVABLE],
124 NR_FREE_PAGES);
125 }
126
127 return pages;
128}
129
130static int pkmap_count[LAST_PKMAP];
131static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
132
133pte_t * pkmap_page_table;
134
135
136
137
138
139
140#ifdef ARCH_NEEDS_KMAP_HIGH_GET
141#define lock_kmap() spin_lock_irq(&kmap_lock)
142#define unlock_kmap() spin_unlock_irq(&kmap_lock)
143#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
144#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
145#else
146#define lock_kmap() spin_lock(&kmap_lock)
147#define unlock_kmap() spin_unlock(&kmap_lock)
148#define lock_kmap_any(flags) \
149 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
150#define unlock_kmap_any(flags) \
151 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
152#endif
153
154struct page *kmap_to_page(void *vaddr)
155{
156 unsigned long addr = (unsigned long)vaddr;
157
158 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
159 int i = PKMAP_NR(addr);
160 return pte_page(pkmap_page_table[i]);
161 }
162
163 return virt_to_page(addr);
164}
165EXPORT_SYMBOL(kmap_to_page);
166
167static void flush_all_zero_pkmaps(void)
168{
169 int i;
170 int need_flush = 0;
171
172 flush_cache_kmaps();
173
174 for (i = 0; i < LAST_PKMAP; i++) {
175 struct page *page;
176
177
178
179
180
181
182
183 if (pkmap_count[i] != 1)
184 continue;
185 pkmap_count[i] = 0;
186
187
188 BUG_ON(pte_none(pkmap_page_table[i]));
189
190
191
192
193
194
195
196
197 page = pte_page(pkmap_page_table[i]);
198 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
199
200 set_page_address(page, NULL);
201 need_flush = 1;
202 }
203 if (need_flush)
204 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
205}
206
207
208
209
210void kmap_flush_unused(void)
211{
212 lock_kmap();
213 flush_all_zero_pkmaps();
214 unlock_kmap();
215}
216
217static inline unsigned long map_new_virtual(struct page *page)
218{
219 unsigned long vaddr;
220 int count;
221 unsigned int last_pkmap_nr;
222 unsigned int color = get_pkmap_color(page);
223
224start:
225 count = get_pkmap_entries_count(color);
226
227 for (;;) {
228 last_pkmap_nr = get_next_pkmap_nr(color);
229 if (no_more_pkmaps(last_pkmap_nr, color)) {
230 flush_all_zero_pkmaps();
231 count = get_pkmap_entries_count(color);
232 }
233 if (!pkmap_count[last_pkmap_nr])
234 break;
235 if (--count)
236 continue;
237
238
239
240
241 {
242 DECLARE_WAITQUEUE(wait, current);
243 wait_queue_head_t *pkmap_map_wait =
244 get_pkmap_wait_queue_head(color);
245
246 __set_current_state(TASK_UNINTERRUPTIBLE);
247 add_wait_queue(pkmap_map_wait, &wait);
248 unlock_kmap();
249 schedule();
250 remove_wait_queue(pkmap_map_wait, &wait);
251 lock_kmap();
252
253
254 if (page_address(page))
255 return (unsigned long)page_address(page);
256
257
258 goto start;
259 }
260 }
261 vaddr = PKMAP_ADDR(last_pkmap_nr);
262 set_pte_at(&init_mm, vaddr,
263 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
264
265 pkmap_count[last_pkmap_nr] = 1;
266 set_page_address(page, (void *)vaddr);
267
268 return vaddr;
269}
270
271
272
273
274
275
276
277
278
279void *kmap_high(struct page *page)
280{
281 unsigned long vaddr;
282
283
284
285
286
287 lock_kmap();
288 vaddr = (unsigned long)page_address(page);
289 if (!vaddr)
290 vaddr = map_new_virtual(page);
291 pkmap_count[PKMAP_NR(vaddr)]++;
292 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
293 unlock_kmap();
294 return (void*) vaddr;
295}
296
297EXPORT_SYMBOL(kmap_high);
298
299#ifdef ARCH_NEEDS_KMAP_HIGH_GET
300
301
302
303
304
305
306
307
308
309
310void *kmap_high_get(struct page *page)
311{
312 unsigned long vaddr, flags;
313
314 lock_kmap_any(flags);
315 vaddr = (unsigned long)page_address(page);
316 if (vaddr) {
317 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
318 pkmap_count[PKMAP_NR(vaddr)]++;
319 }
320 unlock_kmap_any(flags);
321 return (void*) vaddr;
322}
323#endif
324
325
326
327
328
329
330
331
332void kunmap_high(struct page *page)
333{
334 unsigned long vaddr;
335 unsigned long nr;
336 unsigned long flags;
337 int need_wakeup;
338 unsigned int color = get_pkmap_color(page);
339 wait_queue_head_t *pkmap_map_wait;
340
341 lock_kmap_any(flags);
342 vaddr = (unsigned long)page_address(page);
343 BUG_ON(!vaddr);
344 nr = PKMAP_NR(vaddr);
345
346
347
348
349
350 need_wakeup = 0;
351 switch (--pkmap_count[nr]) {
352 case 0:
353 BUG();
354 case 1:
355
356
357
358
359
360
361
362
363
364
365 pkmap_map_wait = get_pkmap_wait_queue_head(color);
366 need_wakeup = waitqueue_active(pkmap_map_wait);
367 }
368 unlock_kmap_any(flags);
369
370
371 if (need_wakeup)
372 wake_up(pkmap_map_wait);
373}
374
375EXPORT_SYMBOL(kunmap_high);
376#endif
377
378#if defined(HASHED_PAGE_VIRTUAL)
379
380#define PA_HASH_ORDER 7
381
382
383
384
385struct page_address_map {
386 struct page *page;
387 void *virtual;
388 struct list_head list;
389};
390
391static struct page_address_map page_address_maps[LAST_PKMAP];
392
393
394
395
396static struct page_address_slot {
397 struct list_head lh;
398 spinlock_t lock;
399} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
400
401static struct page_address_slot *page_slot(const struct page *page)
402{
403 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
404}
405
406
407
408
409
410
411
412void *page_address(const struct page *page)
413{
414 unsigned long flags;
415 void *ret;
416 struct page_address_slot *pas;
417
418 if (!PageHighMem(page))
419 return lowmem_page_address(page);
420
421 pas = page_slot(page);
422 ret = NULL;
423 spin_lock_irqsave(&pas->lock, flags);
424 if (!list_empty(&pas->lh)) {
425 struct page_address_map *pam;
426
427 list_for_each_entry(pam, &pas->lh, list) {
428 if (pam->page == page) {
429 ret = pam->virtual;
430 goto done;
431 }
432 }
433 }
434done:
435 spin_unlock_irqrestore(&pas->lock, flags);
436 return ret;
437}
438
439EXPORT_SYMBOL(page_address);
440
441
442
443
444
445
446void set_page_address(struct page *page, void *virtual)
447{
448 unsigned long flags;
449 struct page_address_slot *pas;
450 struct page_address_map *pam;
451
452 BUG_ON(!PageHighMem(page));
453
454 pas = page_slot(page);
455 if (virtual) {
456 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
457 pam->page = page;
458 pam->virtual = virtual;
459
460 spin_lock_irqsave(&pas->lock, flags);
461 list_add_tail(&pam->list, &pas->lh);
462 spin_unlock_irqrestore(&pas->lock, flags);
463 } else {
464 spin_lock_irqsave(&pas->lock, flags);
465 list_for_each_entry(pam, &pas->lh, list) {
466 if (pam->page == page) {
467 list_del(&pam->list);
468 spin_unlock_irqrestore(&pas->lock, flags);
469 goto done;
470 }
471 }
472 spin_unlock_irqrestore(&pas->lock, flags);
473 }
474done:
475 return;
476}
477
478void __init page_address_init(void)
479{
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
483 INIT_LIST_HEAD(&page_address_htable[i].lh);
484 spin_lock_init(&page_address_htable[i].lock);
485 }
486}
487
488#endif
489