1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mm.h>
21#include <linux/export.h>
22#include <linux/swap.h>
23#include <linux/bio.h>
24#include <linux/pagemap.h>
25#include <linux/mempool.h>
26#include <linux/blkdev.h>
27#include <linux/init.h>
28#include <linux/hash.h>
29#include <linux/highmem.h>
30#include <linux/kgdb.h>
31#include <asm/tlbflush.h>
32#include <linux/vmalloc.h>
33
34
35
36
37
38
39
40
41
42#ifdef CONFIG_HIGHMEM
43
44
45
46
47
48
49#ifndef get_pkmap_color
50
51
52
53
54static inline unsigned int get_pkmap_color(struct page *page)
55{
56 return 0;
57}
58#define get_pkmap_color get_pkmap_color
59
60
61
62
63static inline unsigned int get_next_pkmap_nr(unsigned int color)
64{
65 static unsigned int last_pkmap_nr;
66
67 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
68 return last_pkmap_nr;
69}
70
71
72
73
74
75
76static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
77{
78 return pkmap_nr == 0;
79}
80
81
82
83
84
85
86static inline int get_pkmap_entries_count(unsigned int color)
87{
88 return LAST_PKMAP;
89}
90
91
92
93
94
95
96static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
97{
98 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
99
100 return &pkmap_map_wait;
101}
102#endif
103
104atomic_long_t _totalhigh_pages __read_mostly;
105EXPORT_SYMBOL(_totalhigh_pages);
106
107unsigned int __nr_free_highpages (void)
108{
109 struct zone *zone;
110 unsigned int pages = 0;
111
112 for_each_populated_zone(zone) {
113 if (is_highmem(zone))
114 pages += zone_page_state(zone, NR_FREE_PAGES);
115 }
116
117 return pages;
118}
119
120static int pkmap_count[LAST_PKMAP];
121static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
122
123pte_t * pkmap_page_table;
124
125
126
127
128
129
130#ifdef ARCH_NEEDS_KMAP_HIGH_GET
131#define lock_kmap() spin_lock_irq(&kmap_lock)
132#define unlock_kmap() spin_unlock_irq(&kmap_lock)
133#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
134#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
135#else
136#define lock_kmap() spin_lock(&kmap_lock)
137#define unlock_kmap() spin_unlock(&kmap_lock)
138#define lock_kmap_any(flags) \
139 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
140#define unlock_kmap_any(flags) \
141 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
142#endif
143
144struct page *__kmap_to_page(void *vaddr)
145{
146 unsigned long addr = (unsigned long)vaddr;
147
148 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
149 int i = PKMAP_NR(addr);
150 return pte_page(pkmap_page_table[i]);
151 }
152
153 return virt_to_page(addr);
154}
155EXPORT_SYMBOL(__kmap_to_page);
156
157static void flush_all_zero_pkmaps(void)
158{
159 int i;
160 int need_flush = 0;
161
162 flush_cache_kmaps();
163
164 for (i = 0; i < LAST_PKMAP; i++) {
165 struct page *page;
166
167
168
169
170
171
172
173 if (pkmap_count[i] != 1)
174 continue;
175 pkmap_count[i] = 0;
176
177
178 BUG_ON(pte_none(pkmap_page_table[i]));
179
180
181
182
183
184
185
186
187 page = pte_page(pkmap_page_table[i]);
188 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
189
190 set_page_address(page, NULL);
191 need_flush = 1;
192 }
193 if (need_flush)
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
195}
196
197void __kmap_flush_unused(void)
198{
199 lock_kmap();
200 flush_all_zero_pkmaps();
201 unlock_kmap();
202}
203
204static inline unsigned long map_new_virtual(struct page *page)
205{
206 unsigned long vaddr;
207 int count;
208 unsigned int last_pkmap_nr;
209 unsigned int color = get_pkmap_color(page);
210
211start:
212 count = get_pkmap_entries_count(color);
213
214 for (;;) {
215 last_pkmap_nr = get_next_pkmap_nr(color);
216 if (no_more_pkmaps(last_pkmap_nr, color)) {
217 flush_all_zero_pkmaps();
218 count = get_pkmap_entries_count(color);
219 }
220 if (!pkmap_count[last_pkmap_nr])
221 break;
222 if (--count)
223 continue;
224
225
226
227
228 {
229 DECLARE_WAITQUEUE(wait, current);
230 wait_queue_head_t *pkmap_map_wait =
231 get_pkmap_wait_queue_head(color);
232
233 __set_current_state(TASK_UNINTERRUPTIBLE);
234 add_wait_queue(pkmap_map_wait, &wait);
235 unlock_kmap();
236 schedule();
237 remove_wait_queue(pkmap_map_wait, &wait);
238 lock_kmap();
239
240
241 if (page_address(page))
242 return (unsigned long)page_address(page);
243
244
245 goto start;
246 }
247 }
248 vaddr = PKMAP_ADDR(last_pkmap_nr);
249 set_pte_at(&init_mm, vaddr,
250 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
251
252 pkmap_count[last_pkmap_nr] = 1;
253 set_page_address(page, (void *)vaddr);
254
255 return vaddr;
256}
257
258
259
260
261
262
263
264
265
266void *kmap_high(struct page *page)
267{
268 unsigned long vaddr;
269
270
271
272
273
274 lock_kmap();
275 vaddr = (unsigned long)page_address(page);
276 if (!vaddr)
277 vaddr = map_new_virtual(page);
278 pkmap_count[PKMAP_NR(vaddr)]++;
279 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
280 unlock_kmap();
281 return (void*) vaddr;
282}
283
284EXPORT_SYMBOL(kmap_high);
285
286#ifdef ARCH_NEEDS_KMAP_HIGH_GET
287
288
289
290
291
292
293
294
295
296
297void *kmap_high_get(struct page *page)
298{
299 unsigned long vaddr, flags;
300
301 lock_kmap_any(flags);
302 vaddr = (unsigned long)page_address(page);
303 if (vaddr) {
304 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
305 pkmap_count[PKMAP_NR(vaddr)]++;
306 }
307 unlock_kmap_any(flags);
308 return (void*) vaddr;
309}
310#endif
311
312
313
314
315
316
317
318
319void kunmap_high(struct page *page)
320{
321 unsigned long vaddr;
322 unsigned long nr;
323 unsigned long flags;
324 int need_wakeup;
325 unsigned int color = get_pkmap_color(page);
326 wait_queue_head_t *pkmap_map_wait;
327
328 lock_kmap_any(flags);
329 vaddr = (unsigned long)page_address(page);
330 BUG_ON(!vaddr);
331 nr = PKMAP_NR(vaddr);
332
333
334
335
336
337 need_wakeup = 0;
338 switch (--pkmap_count[nr]) {
339 case 0:
340 BUG();
341 case 1:
342
343
344
345
346
347
348
349
350
351
352 pkmap_map_wait = get_pkmap_wait_queue_head(color);
353 need_wakeup = waitqueue_active(pkmap_map_wait);
354 }
355 unlock_kmap_any(flags);
356
357
358 if (need_wakeup)
359 wake_up(pkmap_map_wait);
360}
361EXPORT_SYMBOL(kunmap_high);
362
363#ifdef CONFIG_TRANSPARENT_HUGEPAGE
364void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
365 unsigned start2, unsigned end2)
366{
367 unsigned int i;
368
369 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
370
371 if (start1 >= end1)
372 start1 = end1 = 0;
373 if (start2 >= end2)
374 start2 = end2 = 0;
375
376 for (i = 0; i < compound_nr(page); i++) {
377 void *kaddr = NULL;
378
379 if (start1 >= PAGE_SIZE) {
380 start1 -= PAGE_SIZE;
381 end1 -= PAGE_SIZE;
382 } else {
383 unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
384
385 if (end1 > start1) {
386 kaddr = kmap_atomic(page + i);
387 memset(kaddr + start1, 0, this_end - start1);
388 }
389 end1 -= this_end;
390 start1 = 0;
391 }
392
393 if (start2 >= PAGE_SIZE) {
394 start2 -= PAGE_SIZE;
395 end2 -= PAGE_SIZE;
396 } else {
397 unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
398
399 if (end2 > start2) {
400 if (!kaddr)
401 kaddr = kmap_atomic(page + i);
402 memset(kaddr + start2, 0, this_end - start2);
403 }
404 end2 -= this_end;
405 start2 = 0;
406 }
407
408 if (kaddr) {
409 kunmap_atomic(kaddr);
410 flush_dcache_page(page + i);
411 }
412
413 if (!end1 && !end2)
414 break;
415 }
416
417 BUG_ON((start1 | start2 | end1 | end2) != 0);
418}
419EXPORT_SYMBOL(zero_user_segments);
420#endif
421#endif
422
423#ifdef CONFIG_KMAP_LOCAL
424
425#include <asm/kmap_size.h>
426
427
428
429
430
431#ifdef CONFIG_DEBUG_KMAP_LOCAL
432# define KM_INCR 2
433#else
434# define KM_INCR 1
435#endif
436
437static inline int kmap_local_idx_push(void)
438{
439 WARN_ON_ONCE(in_irq() && !irqs_disabled());
440 current->kmap_ctrl.idx += KM_INCR;
441 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
442 return current->kmap_ctrl.idx - 1;
443}
444
445static inline int kmap_local_idx(void)
446{
447 return current->kmap_ctrl.idx - 1;
448}
449
450static inline void kmap_local_idx_pop(void)
451{
452 current->kmap_ctrl.idx -= KM_INCR;
453 BUG_ON(current->kmap_ctrl.idx < 0);
454}
455
456#ifndef arch_kmap_local_post_map
457# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
458#endif
459
460#ifndef arch_kmap_local_pre_unmap
461# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
462#endif
463
464#ifndef arch_kmap_local_post_unmap
465# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
466#endif
467
468#ifndef arch_kmap_local_map_idx
469#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
470#endif
471
472#ifndef arch_kmap_local_unmap_idx
473#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
474#endif
475
476#ifndef arch_kmap_local_high_get
477static inline void *arch_kmap_local_high_get(struct page *page)
478{
479 return NULL;
480}
481#endif
482
483#ifndef arch_kmap_local_set_pte
484#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
485 set_pte_at(mm, vaddr, ptep, ptev)
486#endif
487
488
489static inline bool kmap_high_unmap_local(unsigned long vaddr)
490{
491#ifdef ARCH_NEEDS_KMAP_HIGH_GET
492 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
493 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
494 return true;
495 }
496#endif
497 return false;
498}
499
500static inline int kmap_local_calc_idx(int idx)
501{
502 return idx + KM_MAX_IDX * smp_processor_id();
503}
504
505static pte_t *__kmap_pte;
506
507static pte_t *kmap_get_pte(void)
508{
509 if (!__kmap_pte)
510 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
511 return __kmap_pte;
512}
513
514void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
515{
516 pte_t pteval, *kmap_pte = kmap_get_pte();
517 unsigned long vaddr;
518 int idx;
519
520
521
522
523
524 migrate_disable();
525 preempt_disable();
526 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
527 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
528 BUG_ON(!pte_none(*(kmap_pte - idx)));
529 pteval = pfn_pte(pfn, prot);
530 arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
531 arch_kmap_local_post_map(vaddr, pteval);
532 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
533 preempt_enable();
534
535 return (void *)vaddr;
536}
537EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
538
539void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
540{
541 void *kmap;
542
543
544
545
546
547
548 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
549 return page_address(page);
550
551
552 kmap = arch_kmap_local_high_get(page);
553 if (kmap)
554 return kmap;
555
556 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
557}
558EXPORT_SYMBOL(__kmap_local_page_prot);
559
560void kunmap_local_indexed(void *vaddr)
561{
562 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
563 pte_t *kmap_pte = kmap_get_pte();
564 int idx;
565
566 if (addr < __fix_to_virt(FIX_KMAP_END) ||
567 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
568 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
569
570 WARN_ON_ONCE(1);
571 return;
572 }
573
574
575
576
577
578
579 if (!kmap_high_unmap_local(addr))
580 WARN_ON_ONCE(addr < PAGE_OFFSET);
581 return;
582 }
583
584 preempt_disable();
585 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
586 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
587
588 arch_kmap_local_pre_unmap(addr);
589 pte_clear(&init_mm, addr, kmap_pte - idx);
590 arch_kmap_local_post_unmap(addr);
591 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
592 kmap_local_idx_pop();
593 preempt_enable();
594 migrate_enable();
595}
596EXPORT_SYMBOL(kunmap_local_indexed);
597
598
599
600
601
602
603
604
605
606
607
608void __kmap_local_sched_out(void)
609{
610 struct task_struct *tsk = current;
611 pte_t *kmap_pte = kmap_get_pte();
612 int i;
613
614
615 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
616 pte_t pteval = tsk->kmap_ctrl.pteval[i];
617 unsigned long addr;
618 int idx;
619
620
621 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
622 WARN_ON_ONCE(!pte_none(pteval));
623 continue;
624 }
625 if (WARN_ON_ONCE(pte_none(pteval)))
626 continue;
627
628
629
630
631
632
633
634
635 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
636
637 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
638 arch_kmap_local_pre_unmap(addr);
639 pte_clear(&init_mm, addr, kmap_pte - idx);
640 arch_kmap_local_post_unmap(addr);
641 }
642}
643
644void __kmap_local_sched_in(void)
645{
646 struct task_struct *tsk = current;
647 pte_t *kmap_pte = kmap_get_pte();
648 int i;
649
650
651 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
652 pte_t pteval = tsk->kmap_ctrl.pteval[i];
653 unsigned long addr;
654 int idx;
655
656
657 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
658 WARN_ON_ONCE(!pte_none(pteval));
659 continue;
660 }
661 if (WARN_ON_ONCE(pte_none(pteval)))
662 continue;
663
664
665 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
666 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
667 set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
668 arch_kmap_local_post_map(addr, pteval);
669 }
670}
671
672void kmap_local_fork(struct task_struct *tsk)
673{
674 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
675 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
676}
677
678#endif
679
680#if defined(HASHED_PAGE_VIRTUAL)
681
682#define PA_HASH_ORDER 7
683
684
685
686
687struct page_address_map {
688 struct page *page;
689 void *virtual;
690 struct list_head list;
691};
692
693static struct page_address_map page_address_maps[LAST_PKMAP];
694
695
696
697
698static struct page_address_slot {
699 struct list_head lh;
700 spinlock_t lock;
701} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
702
703static struct page_address_slot *page_slot(const struct page *page)
704{
705 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
706}
707
708
709
710
711
712
713
714void *page_address(const struct page *page)
715{
716 unsigned long flags;
717 void *ret;
718 struct page_address_slot *pas;
719
720 if (!PageHighMem(page))
721 return lowmem_page_address(page);
722
723 pas = page_slot(page);
724 ret = NULL;
725 spin_lock_irqsave(&pas->lock, flags);
726 if (!list_empty(&pas->lh)) {
727 struct page_address_map *pam;
728
729 list_for_each_entry(pam, &pas->lh, list) {
730 if (pam->page == page) {
731 ret = pam->virtual;
732 goto done;
733 }
734 }
735 }
736done:
737 spin_unlock_irqrestore(&pas->lock, flags);
738 return ret;
739}
740
741EXPORT_SYMBOL(page_address);
742
743
744
745
746
747
748void set_page_address(struct page *page, void *virtual)
749{
750 unsigned long flags;
751 struct page_address_slot *pas;
752 struct page_address_map *pam;
753
754 BUG_ON(!PageHighMem(page));
755
756 pas = page_slot(page);
757 if (virtual) {
758 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
759 pam->page = page;
760 pam->virtual = virtual;
761
762 spin_lock_irqsave(&pas->lock, flags);
763 list_add_tail(&pam->list, &pas->lh);
764 spin_unlock_irqrestore(&pas->lock, flags);
765 } else {
766 spin_lock_irqsave(&pas->lock, flags);
767 list_for_each_entry(pam, &pas->lh, list) {
768 if (pam->page == page) {
769 list_del(&pam->list);
770 spin_unlock_irqrestore(&pas->lock, flags);
771 goto done;
772 }
773 }
774 spin_unlock_irqrestore(&pas->lock, flags);
775 }
776done:
777 return;
778}
779
780void __init page_address_init(void)
781{
782 int i;
783
784 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
785 INIT_LIST_HEAD(&page_address_htable[i].lh);
786 spin_lock_init(&page_address_htable[i].lock);
787 }
788}
789
790#endif
791