1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mm.h>
21#include <linux/export.h>
22#include <linux/swap.h>
23#include <linux/bio.h>
24#include <linux/pagemap.h>
25#include <linux/mempool.h>
26#include <linux/init.h>
27#include <linux/hash.h>
28#include <linux/highmem.h>
29#include <linux/kgdb.h>
30#include <asm/tlbflush.h>
31#include <linux/vmalloc.h>
32
33
34
35
36
37
38
39
40
41#ifdef CONFIG_HIGHMEM
42
43
44
45
46
47
48#ifndef get_pkmap_color
49
50
51
52
53static inline unsigned int get_pkmap_color(struct page *page)
54{
55 return 0;
56}
57#define get_pkmap_color get_pkmap_color
58
59
60
61
62static inline unsigned int get_next_pkmap_nr(unsigned int color)
63{
64 static unsigned int last_pkmap_nr;
65
66 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
67 return last_pkmap_nr;
68}
69
70
71
72
73
74
75static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
76{
77 return pkmap_nr == 0;
78}
79
80
81
82
83
84
85static inline int get_pkmap_entries_count(unsigned int color)
86{
87 return LAST_PKMAP;
88}
89
90
91
92
93
94
95static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
96{
97 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
98
99 return &pkmap_map_wait;
100}
101#endif
102
103atomic_long_t _totalhigh_pages __read_mostly;
104EXPORT_SYMBOL(_totalhigh_pages);
105
106unsigned int __nr_free_highpages(void)
107{
108 struct zone *zone;
109 unsigned int pages = 0;
110
111 for_each_populated_zone(zone) {
112 if (is_highmem(zone))
113 pages += zone_page_state(zone, NR_FREE_PAGES);
114 }
115
116 return pages;
117}
118
119static int pkmap_count[LAST_PKMAP];
120static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
121
122pte_t *pkmap_page_table;
123
124
125
126
127
128
129#ifdef ARCH_NEEDS_KMAP_HIGH_GET
130#define lock_kmap() spin_lock_irq(&kmap_lock)
131#define unlock_kmap() spin_unlock_irq(&kmap_lock)
132#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
133#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
134#else
135#define lock_kmap() spin_lock(&kmap_lock)
136#define unlock_kmap() spin_unlock(&kmap_lock)
137#define lock_kmap_any(flags) \
138 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
139#define unlock_kmap_any(flags) \
140 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
141#endif
142
143struct page *__kmap_to_page(void *vaddr)
144{
145 unsigned long addr = (unsigned long)vaddr;
146
147 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
148 int i = PKMAP_NR(addr);
149
150 return pte_page(pkmap_page_table[i]);
151 }
152
153 return virt_to_page(addr);
154}
155EXPORT_SYMBOL(__kmap_to_page);
156
157static void flush_all_zero_pkmaps(void)
158{
159 int i;
160 int need_flush = 0;
161
162 flush_cache_kmaps();
163
164 for (i = 0; i < LAST_PKMAP; i++) {
165 struct page *page;
166
167
168
169
170
171
172
173 if (pkmap_count[i] != 1)
174 continue;
175 pkmap_count[i] = 0;
176
177
178 BUG_ON(pte_none(pkmap_page_table[i]));
179
180
181
182
183
184
185
186
187 page = pte_page(pkmap_page_table[i]);
188 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
189
190 set_page_address(page, NULL);
191 need_flush = 1;
192 }
193 if (need_flush)
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
195}
196
197void __kmap_flush_unused(void)
198{
199 lock_kmap();
200 flush_all_zero_pkmaps();
201 unlock_kmap();
202}
203
204static inline unsigned long map_new_virtual(struct page *page)
205{
206 unsigned long vaddr;
207 int count;
208 unsigned int last_pkmap_nr;
209 unsigned int color = get_pkmap_color(page);
210
211start:
212 count = get_pkmap_entries_count(color);
213
214 for (;;) {
215 last_pkmap_nr = get_next_pkmap_nr(color);
216 if (no_more_pkmaps(last_pkmap_nr, color)) {
217 flush_all_zero_pkmaps();
218 count = get_pkmap_entries_count(color);
219 }
220 if (!pkmap_count[last_pkmap_nr])
221 break;
222 if (--count)
223 continue;
224
225
226
227
228 {
229 DECLARE_WAITQUEUE(wait, current);
230 wait_queue_head_t *pkmap_map_wait =
231 get_pkmap_wait_queue_head(color);
232
233 __set_current_state(TASK_UNINTERRUPTIBLE);
234 add_wait_queue(pkmap_map_wait, &wait);
235 unlock_kmap();
236 schedule();
237 remove_wait_queue(pkmap_map_wait, &wait);
238 lock_kmap();
239
240
241 if (page_address(page))
242 return (unsigned long)page_address(page);
243
244
245 goto start;
246 }
247 }
248 vaddr = PKMAP_ADDR(last_pkmap_nr);
249 set_pte_at(&init_mm, vaddr,
250 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
251
252 pkmap_count[last_pkmap_nr] = 1;
253 set_page_address(page, (void *)vaddr);
254
255 return vaddr;
256}
257
258
259
260
261
262
263
264
265
266void *kmap_high(struct page *page)
267{
268 unsigned long vaddr;
269
270
271
272
273
274 lock_kmap();
275 vaddr = (unsigned long)page_address(page);
276 if (!vaddr)
277 vaddr = map_new_virtual(page);
278 pkmap_count[PKMAP_NR(vaddr)]++;
279 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
280 unlock_kmap();
281 return (void *) vaddr;
282}
283EXPORT_SYMBOL(kmap_high);
284
285#ifdef ARCH_NEEDS_KMAP_HIGH_GET
286
287
288
289
290
291
292
293
294
295
296void *kmap_high_get(struct page *page)
297{
298 unsigned long vaddr, flags;
299
300 lock_kmap_any(flags);
301 vaddr = (unsigned long)page_address(page);
302 if (vaddr) {
303 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
304 pkmap_count[PKMAP_NR(vaddr)]++;
305 }
306 unlock_kmap_any(flags);
307 return (void *) vaddr;
308}
309#endif
310
311
312
313
314
315
316
317
318void kunmap_high(struct page *page)
319{
320 unsigned long vaddr;
321 unsigned long nr;
322 unsigned long flags;
323 int need_wakeup;
324 unsigned int color = get_pkmap_color(page);
325 wait_queue_head_t *pkmap_map_wait;
326
327 lock_kmap_any(flags);
328 vaddr = (unsigned long)page_address(page);
329 BUG_ON(!vaddr);
330 nr = PKMAP_NR(vaddr);
331
332
333
334
335
336 need_wakeup = 0;
337 switch (--pkmap_count[nr]) {
338 case 0:
339 BUG();
340 case 1:
341
342
343
344
345
346
347
348
349
350
351 pkmap_map_wait = get_pkmap_wait_queue_head(color);
352 need_wakeup = waitqueue_active(pkmap_map_wait);
353 }
354 unlock_kmap_any(flags);
355
356
357 if (need_wakeup)
358 wake_up(pkmap_map_wait);
359}
360EXPORT_SYMBOL(kunmap_high);
361
362void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
363 unsigned start2, unsigned end2)
364{
365 unsigned int i;
366
367 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
368
369 if (start1 >= end1)
370 start1 = end1 = 0;
371 if (start2 >= end2)
372 start2 = end2 = 0;
373
374 for (i = 0; i < compound_nr(page); i++) {
375 void *kaddr = NULL;
376
377 if (start1 >= PAGE_SIZE) {
378 start1 -= PAGE_SIZE;
379 end1 -= PAGE_SIZE;
380 } else {
381 unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
382
383 if (end1 > start1) {
384 kaddr = kmap_local_page(page + i);
385 memset(kaddr + start1, 0, this_end - start1);
386 }
387 end1 -= this_end;
388 start1 = 0;
389 }
390
391 if (start2 >= PAGE_SIZE) {
392 start2 -= PAGE_SIZE;
393 end2 -= PAGE_SIZE;
394 } else {
395 unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
396
397 if (end2 > start2) {
398 if (!kaddr)
399 kaddr = kmap_local_page(page + i);
400 memset(kaddr + start2, 0, this_end - start2);
401 }
402 end2 -= this_end;
403 start2 = 0;
404 }
405
406 if (kaddr) {
407 kunmap_local(kaddr);
408 flush_dcache_page(page + i);
409 }
410
411 if (!end1 && !end2)
412 break;
413 }
414
415 BUG_ON((start1 | start2 | end1 | end2) != 0);
416}
417EXPORT_SYMBOL(zero_user_segments);
418#endif
419
420#ifdef CONFIG_KMAP_LOCAL
421
422#include <asm/kmap_size.h>
423
424
425
426
427
428#ifdef CONFIG_DEBUG_KMAP_LOCAL
429# define KM_INCR 2
430#else
431# define KM_INCR 1
432#endif
433
434static inline int kmap_local_idx_push(void)
435{
436 WARN_ON_ONCE(in_hardirq() && !irqs_disabled());
437 current->kmap_ctrl.idx += KM_INCR;
438 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
439 return current->kmap_ctrl.idx - 1;
440}
441
442static inline int kmap_local_idx(void)
443{
444 return current->kmap_ctrl.idx - 1;
445}
446
447static inline void kmap_local_idx_pop(void)
448{
449 current->kmap_ctrl.idx -= KM_INCR;
450 BUG_ON(current->kmap_ctrl.idx < 0);
451}
452
453#ifndef arch_kmap_local_post_map
454# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
455#endif
456
457#ifndef arch_kmap_local_pre_unmap
458# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
459#endif
460
461#ifndef arch_kmap_local_post_unmap
462# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
463#endif
464
465#ifndef arch_kmap_local_map_idx
466#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
467#endif
468
469#ifndef arch_kmap_local_unmap_idx
470#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
471#endif
472
473#ifndef arch_kmap_local_high_get
474static inline void *arch_kmap_local_high_get(struct page *page)
475{
476 return NULL;
477}
478#endif
479
480#ifndef arch_kmap_local_set_pte
481#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
482 set_pte_at(mm, vaddr, ptep, ptev)
483#endif
484
485
486static inline bool kmap_high_unmap_local(unsigned long vaddr)
487{
488#ifdef ARCH_NEEDS_KMAP_HIGH_GET
489 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
490 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
491 return true;
492 }
493#endif
494 return false;
495}
496
497static inline int kmap_local_calc_idx(int idx)
498{
499 return idx + KM_MAX_IDX * smp_processor_id();
500}
501
502static pte_t *__kmap_pte;
503
504static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
505{
506 if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
507
508
509
510
511 return virt_to_kpte(vaddr);
512 if (!__kmap_pte)
513 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
514 return &__kmap_pte[-idx];
515}
516
517void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
518{
519 pte_t pteval, *kmap_pte;
520 unsigned long vaddr;
521 int idx;
522
523
524
525
526
527 migrate_disable();
528 preempt_disable();
529 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
530 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
531 kmap_pte = kmap_get_pte(vaddr, idx);
532 BUG_ON(!pte_none(*kmap_pte));
533 pteval = pfn_pte(pfn, prot);
534 arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
535 arch_kmap_local_post_map(vaddr, pteval);
536 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
537 preempt_enable();
538
539 return (void *)vaddr;
540}
541EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
542
543void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
544{
545 void *kmap;
546
547
548
549
550
551
552 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
553 return page_address(page);
554
555
556 kmap = arch_kmap_local_high_get(page);
557 if (kmap)
558 return kmap;
559
560 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
561}
562EXPORT_SYMBOL(__kmap_local_page_prot);
563
564void kunmap_local_indexed(void *vaddr)
565{
566 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
567 pte_t *kmap_pte;
568 int idx;
569
570 if (addr < __fix_to_virt(FIX_KMAP_END) ||
571 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
572 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
573
574 WARN_ON_ONCE(1);
575 return;
576 }
577
578
579
580
581
582
583 if (!kmap_high_unmap_local(addr))
584 WARN_ON_ONCE(addr < PAGE_OFFSET);
585 return;
586 }
587
588 preempt_disable();
589 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
590 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
591
592 kmap_pte = kmap_get_pte(addr, idx);
593 arch_kmap_local_pre_unmap(addr);
594 pte_clear(&init_mm, addr, kmap_pte);
595 arch_kmap_local_post_unmap(addr);
596 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
597 kmap_local_idx_pop();
598 preempt_enable();
599 migrate_enable();
600}
601EXPORT_SYMBOL(kunmap_local_indexed);
602
603
604
605
606
607
608
609
610
611
612
613void __kmap_local_sched_out(void)
614{
615 struct task_struct *tsk = current;
616 pte_t *kmap_pte;
617 int i;
618
619
620 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
621 pte_t pteval = tsk->kmap_ctrl.pteval[i];
622 unsigned long addr;
623 int idx;
624
625
626 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
627 WARN_ON_ONCE(pte_val(pteval) != 0);
628 continue;
629 }
630 if (WARN_ON_ONCE(pte_none(pteval)))
631 continue;
632
633
634
635
636
637
638
639
640 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
641
642 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
643 kmap_pte = kmap_get_pte(addr, idx);
644 arch_kmap_local_pre_unmap(addr);
645 pte_clear(&init_mm, addr, kmap_pte);
646 arch_kmap_local_post_unmap(addr);
647 }
648}
649
650void __kmap_local_sched_in(void)
651{
652 struct task_struct *tsk = current;
653 pte_t *kmap_pte;
654 int i;
655
656
657 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
658 pte_t pteval = tsk->kmap_ctrl.pteval[i];
659 unsigned long addr;
660 int idx;
661
662
663 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
664 WARN_ON_ONCE(pte_val(pteval) != 0);
665 continue;
666 }
667 if (WARN_ON_ONCE(pte_none(pteval)))
668 continue;
669
670
671 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
672 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
673 kmap_pte = kmap_get_pte(addr, idx);
674 set_pte_at(&init_mm, addr, kmap_pte, pteval);
675 arch_kmap_local_post_map(addr, pteval);
676 }
677}
678
679void kmap_local_fork(struct task_struct *tsk)
680{
681 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
682 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
683}
684
685#endif
686
687#if defined(HASHED_PAGE_VIRTUAL)
688
689#define PA_HASH_ORDER 7
690
691
692
693
694struct page_address_map {
695 struct page *page;
696 void *virtual;
697 struct list_head list;
698};
699
700static struct page_address_map page_address_maps[LAST_PKMAP];
701
702
703
704
705static struct page_address_slot {
706 struct list_head lh;
707 spinlock_t lock;
708} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
709
710static struct page_address_slot *page_slot(const struct page *page)
711{
712 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
713}
714
715
716
717
718
719
720
721void *page_address(const struct page *page)
722{
723 unsigned long flags;
724 void *ret;
725 struct page_address_slot *pas;
726
727 if (!PageHighMem(page))
728 return lowmem_page_address(page);
729
730 pas = page_slot(page);
731 ret = NULL;
732 spin_lock_irqsave(&pas->lock, flags);
733 if (!list_empty(&pas->lh)) {
734 struct page_address_map *pam;
735
736 list_for_each_entry(pam, &pas->lh, list) {
737 if (pam->page == page) {
738 ret = pam->virtual;
739 break;
740 }
741 }
742 }
743
744 spin_unlock_irqrestore(&pas->lock, flags);
745 return ret;
746}
747EXPORT_SYMBOL(page_address);
748
749
750
751
752
753
754void set_page_address(struct page *page, void *virtual)
755{
756 unsigned long flags;
757 struct page_address_slot *pas;
758 struct page_address_map *pam;
759
760 BUG_ON(!PageHighMem(page));
761
762 pas = page_slot(page);
763 if (virtual) {
764 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
765 pam->page = page;
766 pam->virtual = virtual;
767
768 spin_lock_irqsave(&pas->lock, flags);
769 list_add_tail(&pam->list, &pas->lh);
770 spin_unlock_irqrestore(&pas->lock, flags);
771 } else {
772 spin_lock_irqsave(&pas->lock, flags);
773 list_for_each_entry(pam, &pas->lh, list) {
774 if (pam->page == page) {
775 list_del(&pam->list);
776 break;
777 }
778 }
779 spin_unlock_irqrestore(&pas->lock, flags);
780 }
781
782 return;
783}
784
785void __init page_address_init(void)
786{
787 int i;
788
789 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
790 INIT_LIST_HEAD(&page_address_htable[i].lh);
791 spin_lock_init(&page_address_htable[i].lock);
792 }
793}
794
795#endif
796