1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/notifier.h>
25#include <linux/rbtree.h>
26#include <linux/radix-tree.h>
27#include <linux/rcupdate.h>
28#include <linux/pfn.h>
29#include <linux/kmemleak.h>
30#include <linux/atomic.h>
31#include <linux/compiler.h>
32#include <linux/llist.h>
33#include <linux/bitops.h>
34
35#include <asm/uaccess.h>
36#include <asm/tlbflush.h>
37#include <asm/shmparam.h>
38
39#include "internal.h"
40
41struct vfree_deferred {
42 struct llist_head list;
43 struct work_struct wq;
44};
45static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
46
47static void __vunmap(const void *, int);
48
49static void free_work(struct work_struct *w)
50{
51 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
52 struct llist_node *llnode = llist_del_all(&p->list);
53 while (llnode) {
54 void *p = llnode;
55 llnode = llist_next(llnode);
56 __vunmap(p, 1);
57 }
58}
59
60
61
62static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
63{
64 pte_t *pte;
65
66 pte = pte_offset_kernel(pmd, addr);
67 do {
68 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70 } while (pte++, addr += PAGE_SIZE, addr != end);
71}
72
73static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
74{
75 pmd_t *pmd;
76 unsigned long next;
77
78 pmd = pmd_offset(pud, addr);
79 do {
80 next = pmd_addr_end(addr, end);
81 if (pmd_clear_huge(pmd))
82 continue;
83 if (pmd_none_or_clear_bad(pmd))
84 continue;
85 vunmap_pte_range(pmd, addr, next);
86 } while (pmd++, addr = next, addr != end);
87}
88
89static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
90{
91 pud_t *pud;
92 unsigned long next;
93
94 pud = pud_offset(pgd, addr);
95 do {
96 next = pud_addr_end(addr, end);
97 if (pud_clear_huge(pud))
98 continue;
99 if (pud_none_or_clear_bad(pud))
100 continue;
101 vunmap_pmd_range(pud, addr, next);
102 } while (pud++, addr = next, addr != end);
103}
104
105static void vunmap_page_range(unsigned long addr, unsigned long end)
106{
107 pgd_t *pgd;
108 unsigned long next;
109
110 BUG_ON(addr >= end);
111 pgd = pgd_offset_k(addr);
112 do {
113 next = pgd_addr_end(addr, end);
114 if (pgd_none_or_clear_bad(pgd))
115 continue;
116 vunmap_pud_range(pgd, addr, next);
117 } while (pgd++, addr = next, addr != end);
118}
119
120static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
121 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
122{
123 pte_t *pte;
124
125
126
127
128
129
130 pte = pte_alloc_kernel(pmd, addr);
131 if (!pte)
132 return -ENOMEM;
133 do {
134 struct page *page = pages[*nr];
135
136 if (WARN_ON(!pte_none(*pte)))
137 return -EBUSY;
138 if (WARN_ON(!page))
139 return -ENOMEM;
140 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
141 (*nr)++;
142 } while (pte++, addr += PAGE_SIZE, addr != end);
143 return 0;
144}
145
146static int vmap_pmd_range(pud_t *pud, unsigned long addr,
147 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
148{
149 pmd_t *pmd;
150 unsigned long next;
151
152 pmd = pmd_alloc(&init_mm, pud, addr);
153 if (!pmd)
154 return -ENOMEM;
155 do {
156 next = pmd_addr_end(addr, end);
157 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
158 return -ENOMEM;
159 } while (pmd++, addr = next, addr != end);
160 return 0;
161}
162
163static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
164 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
165{
166 pud_t *pud;
167 unsigned long next;
168
169 pud = pud_alloc(&init_mm, pgd, addr);
170 if (!pud)
171 return -ENOMEM;
172 do {
173 next = pud_addr_end(addr, end);
174 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
175 return -ENOMEM;
176 } while (pud++, addr = next, addr != end);
177 return 0;
178}
179
180
181
182
183
184
185
186static int vmap_page_range_noflush(unsigned long start, unsigned long end,
187 pgprot_t prot, struct page **pages)
188{
189 pgd_t *pgd;
190 unsigned long next;
191 unsigned long addr = start;
192 int err = 0;
193 int nr = 0;
194
195 BUG_ON(addr >= end);
196 pgd = pgd_offset_k(addr);
197 do {
198 next = pgd_addr_end(addr, end);
199 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
200 if (err)
201 return err;
202 } while (pgd++, addr = next, addr != end);
203
204 return nr;
205}
206
207static int vmap_page_range(unsigned long start, unsigned long end,
208 pgprot_t prot, struct page **pages)
209{
210 int ret;
211
212 ret = vmap_page_range_noflush(start, end, prot, pages);
213 flush_cache_vmap(start, end);
214 return ret;
215}
216
217int is_vmalloc_or_module_addr(const void *x)
218{
219
220
221
222
223
224#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
225 unsigned long addr = (unsigned long)x;
226 if (addr >= MODULES_VADDR && addr < MODULES_END)
227 return 1;
228#endif
229 return is_vmalloc_addr(x);
230}
231
232
233
234
235struct page *vmalloc_to_page(const void *vmalloc_addr)
236{
237 unsigned long addr = (unsigned long) vmalloc_addr;
238 struct page *page = NULL;
239 pgd_t *pgd = pgd_offset_k(addr);
240
241
242
243
244
245 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
246
247 if (!pgd_none(*pgd)) {
248 pud_t *pud = pud_offset(pgd, addr);
249 if (!pud_none(*pud)) {
250 pmd_t *pmd = pmd_offset(pud, addr);
251 if (!pmd_none(*pmd)) {
252 pte_t *ptep, pte;
253
254 ptep = pte_offset_map(pmd, addr);
255 pte = *ptep;
256 if (pte_present(pte))
257 page = pte_page(pte);
258 pte_unmap(ptep);
259 }
260 }
261 }
262 return page;
263}
264EXPORT_SYMBOL(vmalloc_to_page);
265
266
267
268
269unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
270{
271 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
272}
273EXPORT_SYMBOL(vmalloc_to_pfn);
274
275
276
277
278#define VM_VM_AREA 0x04
279
280static DEFINE_SPINLOCK(vmap_area_lock);
281
282LIST_HEAD(vmap_area_list);
283static LLIST_HEAD(vmap_purge_list);
284static struct rb_root vmap_area_root = RB_ROOT;
285
286
287static struct rb_node *free_vmap_cache;
288static unsigned long cached_hole_size;
289static unsigned long cached_vstart;
290static unsigned long cached_align;
291
292static unsigned long vmap_area_pcpu_hole;
293
294static struct vmap_area *__find_vmap_area(unsigned long addr)
295{
296 struct rb_node *n = vmap_area_root.rb_node;
297
298 while (n) {
299 struct vmap_area *va;
300
301 va = rb_entry(n, struct vmap_area, rb_node);
302 if (addr < va->va_start)
303 n = n->rb_left;
304 else if (addr >= va->va_end)
305 n = n->rb_right;
306 else
307 return va;
308 }
309
310 return NULL;
311}
312
313static void __insert_vmap_area(struct vmap_area *va)
314{
315 struct rb_node **p = &vmap_area_root.rb_node;
316 struct rb_node *parent = NULL;
317 struct rb_node *tmp;
318
319 while (*p) {
320 struct vmap_area *tmp_va;
321
322 parent = *p;
323 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
324 if (va->va_start < tmp_va->va_end)
325 p = &(*p)->rb_left;
326 else if (va->va_end > tmp_va->va_start)
327 p = &(*p)->rb_right;
328 else
329 BUG();
330 }
331
332 rb_link_node(&va->rb_node, parent, p);
333 rb_insert_color(&va->rb_node, &vmap_area_root);
334
335
336 tmp = rb_prev(&va->rb_node);
337 if (tmp) {
338 struct vmap_area *prev;
339 prev = rb_entry(tmp, struct vmap_area, rb_node);
340 list_add_rcu(&va->list, &prev->list);
341 } else
342 list_add_rcu(&va->list, &vmap_area_list);
343}
344
345static void purge_vmap_area_lazy(void);
346
347static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
348
349
350
351
352
353static struct vmap_area *alloc_vmap_area(unsigned long size,
354 unsigned long align,
355 unsigned long vstart, unsigned long vend,
356 int node, gfp_t gfp_mask)
357{
358 struct vmap_area *va;
359 struct rb_node *n;
360 unsigned long addr;
361 int purged = 0;
362 struct vmap_area *first;
363
364 BUG_ON(!size);
365 BUG_ON(offset_in_page(size));
366 BUG_ON(!is_power_of_2(align));
367
368 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
369
370 va = kmalloc_node(sizeof(struct vmap_area),
371 gfp_mask & GFP_RECLAIM_MASK, node);
372 if (unlikely(!va))
373 return ERR_PTR(-ENOMEM);
374
375
376
377
378
379 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
380
381retry:
382 spin_lock(&vmap_area_lock);
383
384
385
386
387
388
389
390
391
392 if (!free_vmap_cache ||
393 size < cached_hole_size ||
394 vstart < cached_vstart ||
395 align < cached_align) {
396nocache:
397 cached_hole_size = 0;
398 free_vmap_cache = NULL;
399 }
400
401 cached_vstart = vstart;
402 cached_align = align;
403
404
405 if (free_vmap_cache) {
406 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
407 addr = ALIGN(first->va_end, align);
408 if (addr < vstart)
409 goto nocache;
410 if (addr + size < addr)
411 goto overflow;
412
413 } else {
414 addr = ALIGN(vstart, align);
415 if (addr + size < addr)
416 goto overflow;
417
418 n = vmap_area_root.rb_node;
419 first = NULL;
420
421 while (n) {
422 struct vmap_area *tmp;
423 tmp = rb_entry(n, struct vmap_area, rb_node);
424 if (tmp->va_end >= addr) {
425 first = tmp;
426 if (tmp->va_start <= addr)
427 break;
428 n = n->rb_left;
429 } else
430 n = n->rb_right;
431 }
432
433 if (!first)
434 goto found;
435 }
436
437
438 while (addr + size > first->va_start && addr + size <= vend) {
439 if (addr + cached_hole_size < first->va_start)
440 cached_hole_size = first->va_start - addr;
441 addr = ALIGN(first->va_end, align);
442 if (addr + size < addr)
443 goto overflow;
444
445 if (list_is_last(&first->list, &vmap_area_list))
446 goto found;
447
448 first = list_next_entry(first, list);
449 }
450
451found:
452 if (addr + size > vend)
453 goto overflow;
454
455 va->va_start = addr;
456 va->va_end = addr + size;
457 va->flags = 0;
458 __insert_vmap_area(va);
459 free_vmap_cache = &va->rb_node;
460 spin_unlock(&vmap_area_lock);
461
462 BUG_ON(!IS_ALIGNED(va->va_start, align));
463 BUG_ON(va->va_start < vstart);
464 BUG_ON(va->va_end > vend);
465
466 return va;
467
468overflow:
469 spin_unlock(&vmap_area_lock);
470 if (!purged) {
471 purge_vmap_area_lazy();
472 purged = 1;
473 goto retry;
474 }
475
476 if (gfpflags_allow_blocking(gfp_mask)) {
477 unsigned long freed = 0;
478 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
479 if (freed > 0) {
480 purged = 0;
481 goto retry;
482 }
483 }
484
485 if (printk_ratelimit())
486 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
487 size);
488 kfree(va);
489 return ERR_PTR(-EBUSY);
490}
491
492int register_vmap_purge_notifier(struct notifier_block *nb)
493{
494 return blocking_notifier_chain_register(&vmap_notify_list, nb);
495}
496EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
497
498int unregister_vmap_purge_notifier(struct notifier_block *nb)
499{
500 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
501}
502EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
503
504static void __free_vmap_area(struct vmap_area *va)
505{
506 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
507
508 if (free_vmap_cache) {
509 if (va->va_end < cached_vstart) {
510 free_vmap_cache = NULL;
511 } else {
512 struct vmap_area *cache;
513 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
514 if (va->va_start <= cache->va_start) {
515 free_vmap_cache = rb_prev(&va->rb_node);
516
517
518
519
520 }
521 }
522 }
523 rb_erase(&va->rb_node, &vmap_area_root);
524 RB_CLEAR_NODE(&va->rb_node);
525 list_del_rcu(&va->list);
526
527
528
529
530
531
532
533 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
534 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
535
536 kfree_rcu(va, rcu_head);
537}
538
539
540
541
542static void free_vmap_area(struct vmap_area *va)
543{
544 spin_lock(&vmap_area_lock);
545 __free_vmap_area(va);
546 spin_unlock(&vmap_area_lock);
547}
548
549
550
551
552static void unmap_vmap_area(struct vmap_area *va)
553{
554 vunmap_page_range(va->va_start, va->va_end);
555}
556
557static void vmap_debug_free_range(unsigned long start, unsigned long end)
558{
559
560
561
562
563
564
565
566
567
568
569
570
571 if (debug_pagealloc_enabled()) {
572 vunmap_page_range(start, end);
573 flush_tlb_kernel_range(start, end);
574 }
575}
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593static unsigned long lazy_max_pages(void)
594{
595 unsigned int log;
596
597 log = fls(num_online_cpus());
598
599 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
600}
601
602static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
603
604
605static void purge_fragmented_blocks_allcpus(void);
606
607
608
609
610
611void set_iounmap_nonlazy(void)
612{
613 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
614}
615
616
617
618
619
620
621
622
623
624
625
626static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
627 int sync, int force_flush)
628{
629 static DEFINE_SPINLOCK(purge_lock);
630 struct llist_node *valist;
631 struct vmap_area *va;
632 struct vmap_area *n_va;
633 int nr = 0;
634
635
636
637
638
639
640 if (!sync && !force_flush) {
641 if (!spin_trylock(&purge_lock))
642 return;
643 } else
644 spin_lock(&purge_lock);
645
646 if (sync)
647 purge_fragmented_blocks_allcpus();
648
649 valist = llist_del_all(&vmap_purge_list);
650 llist_for_each_entry(va, valist, purge_list) {
651 if (va->va_start < *start)
652 *start = va->va_start;
653 if (va->va_end > *end)
654 *end = va->va_end;
655 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
656 }
657
658 if (nr)
659 atomic_sub(nr, &vmap_lazy_nr);
660
661 if (nr || force_flush)
662 flush_tlb_kernel_range(*start, *end);
663
664 if (nr) {
665 spin_lock(&vmap_area_lock);
666 llist_for_each_entry_safe(va, n_va, valist, purge_list)
667 __free_vmap_area(va);
668 spin_unlock(&vmap_area_lock);
669 }
670 spin_unlock(&purge_lock);
671}
672
673
674
675
676
677static void try_purge_vmap_area_lazy(void)
678{
679 unsigned long start = ULONG_MAX, end = 0;
680
681 __purge_vmap_area_lazy(&start, &end, 0, 0);
682}
683
684
685
686
687static void purge_vmap_area_lazy(void)
688{
689 unsigned long start = ULONG_MAX, end = 0;
690
691 __purge_vmap_area_lazy(&start, &end, 1, 0);
692}
693
694
695
696
697
698
699static void free_vmap_area_noflush(struct vmap_area *va)
700{
701 int nr_lazy;
702
703 nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
704 &vmap_lazy_nr);
705
706
707 llist_add(&va->purge_list, &vmap_purge_list);
708
709 if (unlikely(nr_lazy > lazy_max_pages()))
710 try_purge_vmap_area_lazy();
711}
712
713
714
715
716
717static void free_unmap_vmap_area_noflush(struct vmap_area *va)
718{
719 unmap_vmap_area(va);
720 free_vmap_area_noflush(va);
721}
722
723
724
725
726static void free_unmap_vmap_area(struct vmap_area *va)
727{
728 flush_cache_vunmap(va->va_start, va->va_end);
729 free_unmap_vmap_area_noflush(va);
730}
731
732static struct vmap_area *find_vmap_area(unsigned long addr)
733{
734 struct vmap_area *va;
735
736 spin_lock(&vmap_area_lock);
737 va = __find_vmap_area(addr);
738 spin_unlock(&vmap_area_lock);
739
740 return va;
741}
742
743static void free_unmap_vmap_area_addr(unsigned long addr)
744{
745 struct vmap_area *va;
746
747 va = find_vmap_area(addr);
748 BUG_ON(!va);
749 free_unmap_vmap_area(va);
750}
751
752
753
754
755
756
757
758
759
760
761
762
763
764#if BITS_PER_LONG == 32
765#define VMALLOC_SPACE (128UL*1024*1024)
766#else
767#define VMALLOC_SPACE (128UL*1024*1024*1024)
768#endif
769
770#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
771#define VMAP_MAX_ALLOC BITS_PER_LONG
772#define VMAP_BBMAP_BITS_MAX 1024
773#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
774#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
775#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
776#define VMAP_BBMAP_BITS \
777 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
778 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
779 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
780
781#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
782
783static bool vmap_initialized __read_mostly = false;
784
785struct vmap_block_queue {
786 spinlock_t lock;
787 struct list_head free;
788};
789
790struct vmap_block {
791 spinlock_t lock;
792 struct vmap_area *va;
793 unsigned long free, dirty;
794 unsigned long dirty_min, dirty_max;
795 struct list_head free_list;
796 struct rcu_head rcu_head;
797 struct list_head purge;
798};
799
800
801static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
802
803
804
805
806
807
808static DEFINE_SPINLOCK(vmap_block_tree_lock);
809static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
810
811
812
813
814
815
816
817
818static unsigned long addr_to_vb_idx(unsigned long addr)
819{
820 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
821 addr /= VMAP_BLOCK_SIZE;
822 return addr;
823}
824
825static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
826{
827 unsigned long addr;
828
829 addr = va_start + (pages_off << PAGE_SHIFT);
830 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
831 return (void *)addr;
832}
833
834
835
836
837
838
839
840
841
842static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
843{
844 struct vmap_block_queue *vbq;
845 struct vmap_block *vb;
846 struct vmap_area *va;
847 unsigned long vb_idx;
848 int node, err;
849 void *vaddr;
850
851 node = numa_node_id();
852
853 vb = kmalloc_node(sizeof(struct vmap_block),
854 gfp_mask & GFP_RECLAIM_MASK, node);
855 if (unlikely(!vb))
856 return ERR_PTR(-ENOMEM);
857
858 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
859 VMALLOC_START, VMALLOC_END,
860 node, gfp_mask);
861 if (IS_ERR(va)) {
862 kfree(vb);
863 return ERR_CAST(va);
864 }
865
866 err = radix_tree_preload(gfp_mask);
867 if (unlikely(err)) {
868 kfree(vb);
869 free_vmap_area(va);
870 return ERR_PTR(err);
871 }
872
873 vaddr = vmap_block_vaddr(va->va_start, 0);
874 spin_lock_init(&vb->lock);
875 vb->va = va;
876
877 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
878 vb->free = VMAP_BBMAP_BITS - (1UL << order);
879 vb->dirty = 0;
880 vb->dirty_min = VMAP_BBMAP_BITS;
881 vb->dirty_max = 0;
882 INIT_LIST_HEAD(&vb->free_list);
883
884 vb_idx = addr_to_vb_idx(va->va_start);
885 spin_lock(&vmap_block_tree_lock);
886 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
887 spin_unlock(&vmap_block_tree_lock);
888 BUG_ON(err);
889 radix_tree_preload_end();
890
891 vbq = &get_cpu_var(vmap_block_queue);
892 spin_lock(&vbq->lock);
893 list_add_tail_rcu(&vb->free_list, &vbq->free);
894 spin_unlock(&vbq->lock);
895 put_cpu_var(vmap_block_queue);
896
897 return vaddr;
898}
899
900static void free_vmap_block(struct vmap_block *vb)
901{
902 struct vmap_block *tmp;
903 unsigned long vb_idx;
904
905 vb_idx = addr_to_vb_idx(vb->va->va_start);
906 spin_lock(&vmap_block_tree_lock);
907 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
908 spin_unlock(&vmap_block_tree_lock);
909 BUG_ON(tmp != vb);
910
911 free_vmap_area_noflush(vb->va);
912 kfree_rcu(vb, rcu_head);
913}
914
915static void purge_fragmented_blocks(int cpu)
916{
917 LIST_HEAD(purge);
918 struct vmap_block *vb;
919 struct vmap_block *n_vb;
920 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
921
922 rcu_read_lock();
923 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
924
925 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
926 continue;
927
928 spin_lock(&vb->lock);
929 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
930 vb->free = 0;
931 vb->dirty = VMAP_BBMAP_BITS;
932 vb->dirty_min = 0;
933 vb->dirty_max = VMAP_BBMAP_BITS;
934 spin_lock(&vbq->lock);
935 list_del_rcu(&vb->free_list);
936 spin_unlock(&vbq->lock);
937 spin_unlock(&vb->lock);
938 list_add_tail(&vb->purge, &purge);
939 } else
940 spin_unlock(&vb->lock);
941 }
942 rcu_read_unlock();
943
944 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
945 list_del(&vb->purge);
946 free_vmap_block(vb);
947 }
948}
949
950static void purge_fragmented_blocks_allcpus(void)
951{
952 int cpu;
953
954 for_each_possible_cpu(cpu)
955 purge_fragmented_blocks(cpu);
956}
957
958static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
959{
960 struct vmap_block_queue *vbq;
961 struct vmap_block *vb;
962 void *vaddr = NULL;
963 unsigned int order;
964
965 BUG_ON(offset_in_page(size));
966 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
967 if (WARN_ON(size == 0)) {
968
969
970
971
972
973 return NULL;
974 }
975 order = get_order(size);
976
977 rcu_read_lock();
978 vbq = &get_cpu_var(vmap_block_queue);
979 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
980 unsigned long pages_off;
981
982 spin_lock(&vb->lock);
983 if (vb->free < (1UL << order)) {
984 spin_unlock(&vb->lock);
985 continue;
986 }
987
988 pages_off = VMAP_BBMAP_BITS - vb->free;
989 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
990 vb->free -= 1UL << order;
991 if (vb->free == 0) {
992 spin_lock(&vbq->lock);
993 list_del_rcu(&vb->free_list);
994 spin_unlock(&vbq->lock);
995 }
996
997 spin_unlock(&vb->lock);
998 break;
999 }
1000
1001 put_cpu_var(vmap_block_queue);
1002 rcu_read_unlock();
1003
1004
1005 if (!vaddr)
1006 vaddr = new_vmap_block(order, gfp_mask);
1007
1008 return vaddr;
1009}
1010
1011static void vb_free(const void *addr, unsigned long size)
1012{
1013 unsigned long offset;
1014 unsigned long vb_idx;
1015 unsigned int order;
1016 struct vmap_block *vb;
1017
1018 BUG_ON(offset_in_page(size));
1019 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1020
1021 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1022
1023 order = get_order(size);
1024
1025 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1026 offset >>= PAGE_SHIFT;
1027
1028 vb_idx = addr_to_vb_idx((unsigned long)addr);
1029 rcu_read_lock();
1030 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1031 rcu_read_unlock();
1032 BUG_ON(!vb);
1033
1034 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1035
1036 spin_lock(&vb->lock);
1037
1038
1039 vb->dirty_min = min(vb->dirty_min, offset);
1040 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1041
1042 vb->dirty += 1UL << order;
1043 if (vb->dirty == VMAP_BBMAP_BITS) {
1044 BUG_ON(vb->free);
1045 spin_unlock(&vb->lock);
1046 free_vmap_block(vb);
1047 } else
1048 spin_unlock(&vb->lock);
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064void vm_unmap_aliases(void)
1065{
1066 unsigned long start = ULONG_MAX, end = 0;
1067 int cpu;
1068 int flush = 0;
1069
1070 if (unlikely(!vmap_initialized))
1071 return;
1072
1073 for_each_possible_cpu(cpu) {
1074 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1075 struct vmap_block *vb;
1076
1077 rcu_read_lock();
1078 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1079 spin_lock(&vb->lock);
1080 if (vb->dirty) {
1081 unsigned long va_start = vb->va->va_start;
1082 unsigned long s, e;
1083
1084 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1085 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1086
1087 start = min(s, start);
1088 end = max(e, end);
1089
1090 flush = 1;
1091 }
1092 spin_unlock(&vb->lock);
1093 }
1094 rcu_read_unlock();
1095 }
1096
1097 __purge_vmap_area_lazy(&start, &end, 1, flush);
1098}
1099EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1100
1101
1102
1103
1104
1105
1106void vm_unmap_ram(const void *mem, unsigned int count)
1107{
1108 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1109 unsigned long addr = (unsigned long)mem;
1110
1111 BUG_ON(!addr);
1112 BUG_ON(addr < VMALLOC_START);
1113 BUG_ON(addr > VMALLOC_END);
1114 BUG_ON(!PAGE_ALIGNED(addr));
1115
1116 debug_check_no_locks_freed(mem, size);
1117 vmap_debug_free_range(addr, addr+size);
1118
1119 if (likely(count <= VMAP_MAX_ALLOC))
1120 vb_free(mem, size);
1121 else
1122 free_unmap_vmap_area_addr(addr);
1123}
1124EXPORT_SYMBOL(vm_unmap_ram);
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1142{
1143 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1144 unsigned long addr;
1145 void *mem;
1146
1147 if (likely(count <= VMAP_MAX_ALLOC)) {
1148 mem = vb_alloc(size, GFP_KERNEL);
1149 if (IS_ERR(mem))
1150 return NULL;
1151 addr = (unsigned long)mem;
1152 } else {
1153 struct vmap_area *va;
1154 va = alloc_vmap_area(size, PAGE_SIZE,
1155 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1156 if (IS_ERR(va))
1157 return NULL;
1158
1159 addr = va->va_start;
1160 mem = (void *)addr;
1161 }
1162 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1163 vm_unmap_ram(mem, count);
1164 return NULL;
1165 }
1166 return mem;
1167}
1168EXPORT_SYMBOL(vm_map_ram);
1169
1170static struct vm_struct *vmlist __initdata;
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181void __init vm_area_add_early(struct vm_struct *vm)
1182{
1183 struct vm_struct *tmp, **p;
1184
1185 BUG_ON(vmap_initialized);
1186 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1187 if (tmp->addr >= vm->addr) {
1188 BUG_ON(tmp->addr < vm->addr + vm->size);
1189 break;
1190 } else
1191 BUG_ON(tmp->addr + tmp->size > vm->addr);
1192 }
1193 vm->next = *p;
1194 *p = vm;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1210{
1211 static size_t vm_init_off __initdata;
1212 unsigned long addr;
1213
1214 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1215 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1216
1217 vm->addr = (void *)addr;
1218
1219 vm_area_add_early(vm);
1220}
1221
1222void __init vmalloc_init(void)
1223{
1224 struct vmap_area *va;
1225 struct vm_struct *tmp;
1226 int i;
1227
1228 for_each_possible_cpu(i) {
1229 struct vmap_block_queue *vbq;
1230 struct vfree_deferred *p;
1231
1232 vbq = &per_cpu(vmap_block_queue, i);
1233 spin_lock_init(&vbq->lock);
1234 INIT_LIST_HEAD(&vbq->free);
1235 p = &per_cpu(vfree_deferred, i);
1236 init_llist_head(&p->list);
1237 INIT_WORK(&p->wq, free_work);
1238 }
1239
1240
1241 for (tmp = vmlist; tmp; tmp = tmp->next) {
1242 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1243 va->flags = VM_VM_AREA;
1244 va->va_start = (unsigned long)tmp->addr;
1245 va->va_end = va->va_start + tmp->size;
1246 va->vm = tmp;
1247 __insert_vmap_area(va);
1248 }
1249
1250 vmap_area_pcpu_hole = VMALLOC_END;
1251
1252 vmap_initialized = true;
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1275 pgprot_t prot, struct page **pages)
1276{
1277 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1295{
1296 vunmap_page_range(addr, addr + size);
1297}
1298EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308void unmap_kernel_range(unsigned long addr, unsigned long size)
1309{
1310 unsigned long end = addr + size;
1311
1312 flush_cache_vunmap(addr, end);
1313 vunmap_page_range(addr, end);
1314 flush_tlb_kernel_range(addr, end);
1315}
1316EXPORT_SYMBOL_GPL(unmap_kernel_range);
1317
1318int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1319{
1320 unsigned long addr = (unsigned long)area->addr;
1321 unsigned long end = addr + get_vm_area_size(area);
1322 int err;
1323
1324 err = vmap_page_range(addr, end, prot, pages);
1325
1326 return err > 0 ? 0 : err;
1327}
1328EXPORT_SYMBOL_GPL(map_vm_area);
1329
1330static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1331 unsigned long flags, const void *caller)
1332{
1333 spin_lock(&vmap_area_lock);
1334 vm->flags = flags;
1335 vm->addr = (void *)va->va_start;
1336 vm->size = va->va_end - va->va_start;
1337 vm->caller = caller;
1338 va->vm = vm;
1339 va->flags |= VM_VM_AREA;
1340 spin_unlock(&vmap_area_lock);
1341}
1342
1343static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1344{
1345
1346
1347
1348
1349
1350 smp_wmb();
1351 vm->flags &= ~VM_UNINITIALIZED;
1352}
1353
1354static struct vm_struct *__get_vm_area_node(unsigned long size,
1355 unsigned long align, unsigned long flags, unsigned long start,
1356 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1357{
1358 struct vmap_area *va;
1359 struct vm_struct *area;
1360
1361 BUG_ON(in_interrupt());
1362 if (flags & VM_IOREMAP)
1363 align = 1ul << clamp_t(int, fls_long(size),
1364 PAGE_SHIFT, IOREMAP_MAX_ORDER);
1365
1366 size = PAGE_ALIGN(size);
1367 if (unlikely(!size))
1368 return NULL;
1369
1370 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1371 if (unlikely(!area))
1372 return NULL;
1373
1374 if (!(flags & VM_NO_GUARD))
1375 size += PAGE_SIZE;
1376
1377 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1378 if (IS_ERR(va)) {
1379 kfree(area);
1380 return NULL;
1381 }
1382
1383 setup_vmalloc_vm(area, va, flags, caller);
1384
1385 return area;
1386}
1387
1388struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1389 unsigned long start, unsigned long end)
1390{
1391 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1392 GFP_KERNEL, __builtin_return_address(0));
1393}
1394EXPORT_SYMBOL_GPL(__get_vm_area);
1395
1396struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1397 unsigned long start, unsigned long end,
1398 const void *caller)
1399{
1400 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1401 GFP_KERNEL, caller);
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1414{
1415 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1416 NUMA_NO_NODE, GFP_KERNEL,
1417 __builtin_return_address(0));
1418}
1419
1420struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1421 const void *caller)
1422{
1423 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1424 NUMA_NO_NODE, GFP_KERNEL, caller);
1425}
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435struct vm_struct *find_vm_area(const void *addr)
1436{
1437 struct vmap_area *va;
1438
1439 va = find_vmap_area((unsigned long)addr);
1440 if (va && va->flags & VM_VM_AREA)
1441 return va->vm;
1442
1443 return NULL;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454struct vm_struct *remove_vm_area(const void *addr)
1455{
1456 struct vmap_area *va;
1457
1458 va = find_vmap_area((unsigned long)addr);
1459 if (va && va->flags & VM_VM_AREA) {
1460 struct vm_struct *vm = va->vm;
1461
1462 spin_lock(&vmap_area_lock);
1463 va->vm = NULL;
1464 va->flags &= ~VM_VM_AREA;
1465 spin_unlock(&vmap_area_lock);
1466
1467 vmap_debug_free_range(va->va_start, va->va_end);
1468 kasan_free_shadow(vm);
1469 free_unmap_vmap_area(va);
1470
1471 return vm;
1472 }
1473 return NULL;
1474}
1475
1476static void __vunmap(const void *addr, int deallocate_pages)
1477{
1478 struct vm_struct *area;
1479
1480 if (!addr)
1481 return;
1482
1483 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1484 addr))
1485 return;
1486
1487 area = remove_vm_area(addr);
1488 if (unlikely(!area)) {
1489 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1490 addr);
1491 return;
1492 }
1493
1494 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1495 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1496
1497 if (deallocate_pages) {
1498 int i;
1499
1500 for (i = 0; i < area->nr_pages; i++) {
1501 struct page *page = area->pages[i];
1502
1503 BUG_ON(!page);
1504 __free_pages(page, 0);
1505 }
1506
1507 kvfree(area->pages);
1508 }
1509
1510 kfree(area);
1511 return;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528void vfree(const void *addr)
1529{
1530 BUG_ON(in_nmi());
1531
1532 kmemleak_free(addr);
1533
1534 if (!addr)
1535 return;
1536 if (unlikely(in_interrupt())) {
1537 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
1538 if (llist_add((struct llist_node *)addr, &p->list))
1539 schedule_work(&p->wq);
1540 } else
1541 __vunmap(addr, 1);
1542}
1543EXPORT_SYMBOL(vfree);
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554void vunmap(const void *addr)
1555{
1556 BUG_ON(in_interrupt());
1557 might_sleep();
1558 if (addr)
1559 __vunmap(addr, 0);
1560}
1561EXPORT_SYMBOL(vunmap);
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573void *vmap(struct page **pages, unsigned int count,
1574 unsigned long flags, pgprot_t prot)
1575{
1576 struct vm_struct *area;
1577 unsigned long size;
1578
1579 might_sleep();
1580
1581 if (count > totalram_pages)
1582 return NULL;
1583
1584 size = (unsigned long)count << PAGE_SHIFT;
1585 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1586 if (!area)
1587 return NULL;
1588
1589 if (map_vm_area(area, prot, pages)) {
1590 vunmap(area->addr);
1591 return NULL;
1592 }
1593
1594 return area->addr;
1595}
1596EXPORT_SYMBOL(vmap);
1597
1598static void *__vmalloc_node(unsigned long size, unsigned long align,
1599 gfp_t gfp_mask, pgprot_t prot,
1600 int node, const void *caller);
1601static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1602 pgprot_t prot, int node)
1603{
1604 const int order = 0;
1605 struct page **pages;
1606 unsigned int nr_pages, array_size, i;
1607 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1608 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1609
1610 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1611 array_size = (nr_pages * sizeof(struct page *));
1612
1613 area->nr_pages = nr_pages;
1614
1615 if (array_size > PAGE_SIZE) {
1616 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1617 PAGE_KERNEL, node, area->caller);
1618 } else {
1619 pages = kmalloc_node(array_size, nested_gfp, node);
1620 }
1621 area->pages = pages;
1622 if (!area->pages) {
1623 remove_vm_area(area->addr);
1624 kfree(area);
1625 return NULL;
1626 }
1627
1628 for (i = 0; i < area->nr_pages; i++) {
1629 struct page *page;
1630
1631 if (node == NUMA_NO_NODE)
1632 page = alloc_pages(alloc_mask, order);
1633 else
1634 page = alloc_pages_node(node, alloc_mask, order);
1635
1636 if (unlikely(!page)) {
1637
1638 area->nr_pages = i;
1639 goto fail;
1640 }
1641 area->pages[i] = page;
1642 if (gfpflags_allow_blocking(gfp_mask))
1643 cond_resched();
1644 }
1645
1646 if (map_vm_area(area, prot, pages))
1647 goto fail;
1648 return area->addr;
1649
1650fail:
1651 warn_alloc_failed(gfp_mask, order,
1652 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1653 (area->nr_pages*PAGE_SIZE), area->size);
1654 vfree(area->addr);
1655 return NULL;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674void *__vmalloc_node_range(unsigned long size, unsigned long align,
1675 unsigned long start, unsigned long end, gfp_t gfp_mask,
1676 pgprot_t prot, unsigned long vm_flags, int node,
1677 const void *caller)
1678{
1679 struct vm_struct *area;
1680 void *addr;
1681 unsigned long real_size = size;
1682
1683 size = PAGE_ALIGN(size);
1684 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1685 goto fail;
1686
1687 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1688 vm_flags, start, end, node, gfp_mask, caller);
1689 if (!area)
1690 goto fail;
1691
1692 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1693 if (!addr)
1694 return NULL;
1695
1696
1697
1698
1699
1700
1701 clear_vm_uninitialized_flag(area);
1702
1703
1704
1705
1706
1707
1708 kmemleak_alloc(addr, real_size, 2, gfp_mask);
1709
1710 return addr;
1711
1712fail:
1713 warn_alloc_failed(gfp_mask, 0,
1714 "vmalloc: allocation failure: %lu bytes\n",
1715 real_size);
1716 return NULL;
1717}
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static void *__vmalloc_node(unsigned long size, unsigned long align,
1733 gfp_t gfp_mask, pgprot_t prot,
1734 int node, const void *caller)
1735{
1736 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1737 gfp_mask, prot, 0, node, caller);
1738}
1739
1740void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1741{
1742 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1743 __builtin_return_address(0));
1744}
1745EXPORT_SYMBOL(__vmalloc);
1746
1747static inline void *__vmalloc_node_flags(unsigned long size,
1748 int node, gfp_t flags)
1749{
1750 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1751 node, __builtin_return_address(0));
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763void *vmalloc(unsigned long size)
1764{
1765 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1766 GFP_KERNEL | __GFP_HIGHMEM);
1767}
1768EXPORT_SYMBOL(vmalloc);
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780void *vzalloc(unsigned long size)
1781{
1782 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1783 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1784}
1785EXPORT_SYMBOL(vzalloc);
1786
1787
1788
1789
1790
1791
1792
1793
1794void *vmalloc_user(unsigned long size)
1795{
1796 struct vm_struct *area;
1797 void *ret;
1798
1799 ret = __vmalloc_node(size, SHMLBA,
1800 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1801 PAGE_KERNEL, NUMA_NO_NODE,
1802 __builtin_return_address(0));
1803 if (ret) {
1804 area = find_vm_area(ret);
1805 area->flags |= VM_USERMAP;
1806 }
1807 return ret;
1808}
1809EXPORT_SYMBOL(vmalloc_user);
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822void *vmalloc_node(unsigned long size, int node)
1823{
1824 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1825 node, __builtin_return_address(0));
1826}
1827EXPORT_SYMBOL(vmalloc_node);
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841void *vzalloc_node(unsigned long size, int node)
1842{
1843 return __vmalloc_node_flags(size, node,
1844 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1845}
1846EXPORT_SYMBOL(vzalloc_node);
1847
1848#ifndef PAGE_KERNEL_EXEC
1849# define PAGE_KERNEL_EXEC PAGE_KERNEL
1850#endif
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864void *vmalloc_exec(unsigned long size)
1865{
1866 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1867 NUMA_NO_NODE, __builtin_return_address(0));
1868}
1869
1870#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1871#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1872#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1873#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1874#else
1875#define GFP_VMALLOC32 GFP_KERNEL
1876#endif
1877
1878
1879
1880
1881
1882
1883
1884
1885void *vmalloc_32(unsigned long size)
1886{
1887 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1888 NUMA_NO_NODE, __builtin_return_address(0));
1889}
1890EXPORT_SYMBOL(vmalloc_32);
1891
1892
1893
1894
1895
1896
1897
1898
1899void *vmalloc_32_user(unsigned long size)
1900{
1901 struct vm_struct *area;
1902 void *ret;
1903
1904 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1905 NUMA_NO_NODE, __builtin_return_address(0));
1906 if (ret) {
1907 area = find_vm_area(ret);
1908 area->flags |= VM_USERMAP;
1909 }
1910 return ret;
1911}
1912EXPORT_SYMBOL(vmalloc_32_user);
1913
1914
1915
1916
1917
1918
1919static int aligned_vread(char *buf, char *addr, unsigned long count)
1920{
1921 struct page *p;
1922 int copied = 0;
1923
1924 while (count) {
1925 unsigned long offset, length;
1926
1927 offset = offset_in_page(addr);
1928 length = PAGE_SIZE - offset;
1929 if (length > count)
1930 length = count;
1931 p = vmalloc_to_page(addr);
1932
1933
1934
1935
1936
1937
1938
1939 if (p) {
1940
1941
1942
1943
1944 void *map = kmap_atomic(p);
1945 memcpy(buf, map + offset, length);
1946 kunmap_atomic(map);
1947 } else
1948 memset(buf, 0, length);
1949
1950 addr += length;
1951 buf += length;
1952 copied += length;
1953 count -= length;
1954 }
1955 return copied;
1956}
1957
1958static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1959{
1960 struct page *p;
1961 int copied = 0;
1962
1963 while (count) {
1964 unsigned long offset, length;
1965
1966 offset = offset_in_page(addr);
1967 length = PAGE_SIZE - offset;
1968 if (length > count)
1969 length = count;
1970 p = vmalloc_to_page(addr);
1971
1972
1973
1974
1975
1976
1977
1978 if (p) {
1979
1980
1981
1982
1983 void *map = kmap_atomic(p);
1984 memcpy(map + offset, buf, length);
1985 kunmap_atomic(map);
1986 }
1987 addr += length;
1988 buf += length;
1989 copied += length;
1990 count -= length;
1991 }
1992 return copied;
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021long vread(char *buf, char *addr, unsigned long count)
2022{
2023 struct vmap_area *va;
2024 struct vm_struct *vm;
2025 char *vaddr, *buf_start = buf;
2026 unsigned long buflen = count;
2027 unsigned long n;
2028
2029
2030 if ((unsigned long) addr + count < count)
2031 count = -(unsigned long) addr;
2032
2033 spin_lock(&vmap_area_lock);
2034 list_for_each_entry(va, &vmap_area_list, list) {
2035 if (!count)
2036 break;
2037
2038 if (!(va->flags & VM_VM_AREA))
2039 continue;
2040
2041 vm = va->vm;
2042 vaddr = (char *) vm->addr;
2043 if (addr >= vaddr + get_vm_area_size(vm))
2044 continue;
2045 while (addr < vaddr) {
2046 if (count == 0)
2047 goto finished;
2048 *buf = '\0';
2049 buf++;
2050 addr++;
2051 count--;
2052 }
2053 n = vaddr + get_vm_area_size(vm) - addr;
2054 if (n > count)
2055 n = count;
2056 if (!(vm->flags & VM_IOREMAP))
2057 aligned_vread(buf, addr, n);
2058 else
2059 memset(buf, 0, n);
2060 buf += n;
2061 addr += n;
2062 count -= n;
2063 }
2064finished:
2065 spin_unlock(&vmap_area_lock);
2066
2067 if (buf == buf_start)
2068 return 0;
2069
2070 if (buf != buf_start + buflen)
2071 memset(buf, 0, buflen - (buf - buf_start));
2072
2073 return buflen;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102long vwrite(char *buf, char *addr, unsigned long count)
2103{
2104 struct vmap_area *va;
2105 struct vm_struct *vm;
2106 char *vaddr;
2107 unsigned long n, buflen;
2108 int copied = 0;
2109
2110
2111 if ((unsigned long) addr + count < count)
2112 count = -(unsigned long) addr;
2113 buflen = count;
2114
2115 spin_lock(&vmap_area_lock);
2116 list_for_each_entry(va, &vmap_area_list, list) {
2117 if (!count)
2118 break;
2119
2120 if (!(va->flags & VM_VM_AREA))
2121 continue;
2122
2123 vm = va->vm;
2124 vaddr = (char *) vm->addr;
2125 if (addr >= vaddr + get_vm_area_size(vm))
2126 continue;
2127 while (addr < vaddr) {
2128 if (count == 0)
2129 goto finished;
2130 buf++;
2131 addr++;
2132 count--;
2133 }
2134 n = vaddr + get_vm_area_size(vm) - addr;
2135 if (n > count)
2136 n = count;
2137 if (!(vm->flags & VM_IOREMAP)) {
2138 aligned_vwrite(buf, addr, n);
2139 copied++;
2140 }
2141 buf += n;
2142 addr += n;
2143 count -= n;
2144 }
2145finished:
2146 spin_unlock(&vmap_area_lock);
2147 if (!copied)
2148 return 0;
2149 return buflen;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2169 void *kaddr, unsigned long size)
2170{
2171 struct vm_struct *area;
2172
2173 size = PAGE_ALIGN(size);
2174
2175 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2176 return -EINVAL;
2177
2178 area = find_vm_area(kaddr);
2179 if (!area)
2180 return -EINVAL;
2181
2182 if (!(area->flags & VM_USERMAP))
2183 return -EINVAL;
2184
2185 if (kaddr + size > area->addr + area->size)
2186 return -EINVAL;
2187
2188 do {
2189 struct page *page = vmalloc_to_page(kaddr);
2190 int ret;
2191
2192 ret = vm_insert_page(vma, uaddr, page);
2193 if (ret)
2194 return ret;
2195
2196 uaddr += PAGE_SIZE;
2197 kaddr += PAGE_SIZE;
2198 size -= PAGE_SIZE;
2199 } while (size > 0);
2200
2201 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2202
2203 return 0;
2204}
2205EXPORT_SYMBOL(remap_vmalloc_range_partial);
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2222 unsigned long pgoff)
2223{
2224 return remap_vmalloc_range_partial(vma, vma->vm_start,
2225 addr + (pgoff << PAGE_SHIFT),
2226 vma->vm_end - vma->vm_start);
2227}
2228EXPORT_SYMBOL(remap_vmalloc_range);
2229
2230
2231
2232
2233
2234void __weak vmalloc_sync_all(void)
2235{
2236}
2237
2238
2239static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2240{
2241 pte_t ***p = data;
2242
2243 if (p) {
2244 *(*p) = pte;
2245 (*p)++;
2246 }
2247 return 0;
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2265{
2266 struct vm_struct *area;
2267
2268 area = get_vm_area_caller(size, VM_IOREMAP,
2269 __builtin_return_address(0));
2270 if (area == NULL)
2271 return NULL;
2272
2273
2274
2275
2276
2277 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2278 size, f, ptes ? &ptes : NULL)) {
2279 free_vm_area(area);
2280 return NULL;
2281 }
2282
2283 return area;
2284}
2285EXPORT_SYMBOL_GPL(alloc_vm_area);
2286
2287void free_vm_area(struct vm_struct *area)
2288{
2289 struct vm_struct *ret;
2290 ret = remove_vm_area(area->addr);
2291 BUG_ON(ret != area);
2292 kfree(area);
2293}
2294EXPORT_SYMBOL_GPL(free_vm_area);
2295
2296#ifdef CONFIG_SMP
2297static struct vmap_area *node_to_va(struct rb_node *n)
2298{
2299 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314static bool pvm_find_next_prev(unsigned long end,
2315 struct vmap_area **pnext,
2316 struct vmap_area **pprev)
2317{
2318 struct rb_node *n = vmap_area_root.rb_node;
2319 struct vmap_area *va = NULL;
2320
2321 while (n) {
2322 va = rb_entry(n, struct vmap_area, rb_node);
2323 if (end < va->va_end)
2324 n = n->rb_left;
2325 else if (end > va->va_end)
2326 n = n->rb_right;
2327 else
2328 break;
2329 }
2330
2331 if (!va)
2332 return false;
2333
2334 if (va->va_end > end) {
2335 *pnext = va;
2336 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2337 } else {
2338 *pprev = va;
2339 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2340 }
2341 return true;
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360static unsigned long pvm_determine_end(struct vmap_area **pnext,
2361 struct vmap_area **pprev,
2362 unsigned long align)
2363{
2364 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2365 unsigned long addr;
2366
2367 if (*pnext)
2368 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2369 else
2370 addr = vmalloc_end;
2371
2372 while (*pprev && (*pprev)->va_end > addr) {
2373 *pnext = *pprev;
2374 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2375 }
2376
2377 return addr;
2378}
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2405 const size_t *sizes, int nr_vms,
2406 size_t align)
2407{
2408 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2409 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2410 struct vmap_area **vas, *prev, *next;
2411 struct vm_struct **vms;
2412 int area, area2, last_area, term_area;
2413 unsigned long base, start, end, last_end;
2414 bool purged = false;
2415
2416
2417 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2418 for (last_area = 0, area = 0; area < nr_vms; area++) {
2419 start = offsets[area];
2420 end = start + sizes[area];
2421
2422
2423 BUG_ON(!IS_ALIGNED(offsets[area], align));
2424 BUG_ON(!IS_ALIGNED(sizes[area], align));
2425
2426
2427 if (start > offsets[last_area])
2428 last_area = area;
2429
2430 for (area2 = 0; area2 < nr_vms; area2++) {
2431 unsigned long start2 = offsets[area2];
2432 unsigned long end2 = start2 + sizes[area2];
2433
2434 if (area2 == area)
2435 continue;
2436
2437 BUG_ON(start2 >= start && start2 < end);
2438 BUG_ON(end2 <= end && end2 > start);
2439 }
2440 }
2441 last_end = offsets[last_area] + sizes[last_area];
2442
2443 if (vmalloc_end - vmalloc_start < last_end) {
2444 WARN_ON(true);
2445 return NULL;
2446 }
2447
2448 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2449 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2450 if (!vas || !vms)
2451 goto err_free2;
2452
2453 for (area = 0; area < nr_vms; area++) {
2454 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2455 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2456 if (!vas[area] || !vms[area])
2457 goto err_free;
2458 }
2459retry:
2460 spin_lock(&vmap_area_lock);
2461
2462
2463 area = term_area = last_area;
2464 start = offsets[area];
2465 end = start + sizes[area];
2466
2467 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2468 base = vmalloc_end - last_end;
2469 goto found;
2470 }
2471 base = pvm_determine_end(&next, &prev, align) - end;
2472
2473 while (true) {
2474 BUG_ON(next && next->va_end <= base + end);
2475 BUG_ON(prev && prev->va_end > base + end);
2476
2477
2478
2479
2480
2481 if (base + last_end < vmalloc_start + last_end) {
2482 spin_unlock(&vmap_area_lock);
2483 if (!purged) {
2484 purge_vmap_area_lazy();
2485 purged = true;
2486 goto retry;
2487 }
2488 goto err_free;
2489 }
2490
2491
2492
2493
2494
2495 if (next && next->va_start < base + end) {
2496 base = pvm_determine_end(&next, &prev, align) - end;
2497 term_area = area;
2498 continue;
2499 }
2500
2501
2502
2503
2504
2505
2506 if (prev && prev->va_end > base + start) {
2507 next = prev;
2508 prev = node_to_va(rb_prev(&next->rb_node));
2509 base = pvm_determine_end(&next, &prev, align) - end;
2510 term_area = area;
2511 continue;
2512 }
2513
2514
2515
2516
2517
2518 area = (area + nr_vms - 1) % nr_vms;
2519 if (area == term_area)
2520 break;
2521 start = offsets[area];
2522 end = start + sizes[area];
2523 pvm_find_next_prev(base + end, &next, &prev);
2524 }
2525found:
2526
2527 for (area = 0; area < nr_vms; area++) {
2528 struct vmap_area *va = vas[area];
2529
2530 va->va_start = base + offsets[area];
2531 va->va_end = va->va_start + sizes[area];
2532 __insert_vmap_area(va);
2533 }
2534
2535 vmap_area_pcpu_hole = base + offsets[last_area];
2536
2537 spin_unlock(&vmap_area_lock);
2538
2539
2540 for (area = 0; area < nr_vms; area++)
2541 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2542 pcpu_get_vm_areas);
2543
2544 kfree(vas);
2545 return vms;
2546
2547err_free:
2548 for (area = 0; area < nr_vms; area++) {
2549 kfree(vas[area]);
2550 kfree(vms[area]);
2551 }
2552err_free2:
2553 kfree(vas);
2554 kfree(vms);
2555 return NULL;
2556}
2557
2558
2559
2560
2561
2562
2563
2564
2565void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2566{
2567 int i;
2568
2569 for (i = 0; i < nr_vms; i++)
2570 free_vm_area(vms[i]);
2571 kfree(vms);
2572}
2573#endif
2574
2575#ifdef CONFIG_PROC_FS
2576static void *s_start(struct seq_file *m, loff_t *pos)
2577 __acquires(&vmap_area_lock)
2578{
2579 loff_t n = *pos;
2580 struct vmap_area *va;
2581
2582 spin_lock(&vmap_area_lock);
2583 va = list_first_entry(&vmap_area_list, typeof(*va), list);
2584 while (n > 0 && &va->list != &vmap_area_list) {
2585 n--;
2586 va = list_next_entry(va, list);
2587 }
2588 if (!n && &va->list != &vmap_area_list)
2589 return va;
2590
2591 return NULL;
2592
2593}
2594
2595static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2596{
2597 struct vmap_area *va = p, *next;
2598
2599 ++*pos;
2600 next = list_next_entry(va, list);
2601 if (&next->list != &vmap_area_list)
2602 return next;
2603
2604 return NULL;
2605}
2606
2607static void s_stop(struct seq_file *m, void *p)
2608 __releases(&vmap_area_lock)
2609{
2610 spin_unlock(&vmap_area_lock);
2611}
2612
2613static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2614{
2615 if (IS_ENABLED(CONFIG_NUMA)) {
2616 unsigned int nr, *counters = m->private;
2617
2618 if (!counters)
2619 return;
2620
2621 if (v->flags & VM_UNINITIALIZED)
2622 return;
2623
2624 smp_rmb();
2625
2626 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2627
2628 for (nr = 0; nr < v->nr_pages; nr++)
2629 counters[page_to_nid(v->pages[nr])]++;
2630
2631 for_each_node_state(nr, N_HIGH_MEMORY)
2632 if (counters[nr])
2633 seq_printf(m, " N%u=%u", nr, counters[nr]);
2634 }
2635}
2636
2637static int s_show(struct seq_file *m, void *p)
2638{
2639 struct vmap_area *va = p;
2640 struct vm_struct *v;
2641
2642
2643
2644
2645
2646 if (!(va->flags & VM_VM_AREA))
2647 return 0;
2648
2649 v = va->vm;
2650
2651 seq_printf(m, "0x%pK-0x%pK %7ld",
2652 v->addr, v->addr + v->size, v->size);
2653
2654 if (v->caller)
2655 seq_printf(m, " %pS", v->caller);
2656
2657 if (v->nr_pages)
2658 seq_printf(m, " pages=%d", v->nr_pages);
2659
2660 if (v->phys_addr)
2661 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2662
2663 if (v->flags & VM_IOREMAP)
2664 seq_puts(m, " ioremap");
2665
2666 if (v->flags & VM_ALLOC)
2667 seq_puts(m, " vmalloc");
2668
2669 if (v->flags & VM_MAP)
2670 seq_puts(m, " vmap");
2671
2672 if (v->flags & VM_USERMAP)
2673 seq_puts(m, " user");
2674
2675 if (is_vmalloc_addr(v->pages))
2676 seq_puts(m, " vpages");
2677
2678 show_numa_info(m, v);
2679 seq_putc(m, '\n');
2680 return 0;
2681}
2682
2683static const struct seq_operations vmalloc_op = {
2684 .start = s_start,
2685 .next = s_next,
2686 .stop = s_stop,
2687 .show = s_show,
2688};
2689
2690static int vmalloc_open(struct inode *inode, struct file *file)
2691{
2692 if (IS_ENABLED(CONFIG_NUMA))
2693 return seq_open_private(file, &vmalloc_op,
2694 nr_node_ids * sizeof(unsigned int));
2695 else
2696 return seq_open(file, &vmalloc_op);
2697}
2698
2699static const struct file_operations proc_vmalloc_operations = {
2700 .open = vmalloc_open,
2701 .read = seq_read,
2702 .llseek = seq_lseek,
2703 .release = seq_release_private,
2704};
2705
2706static int __init proc_vmalloc_init(void)
2707{
2708 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2709 return 0;
2710}
2711module_init(proc_vmalloc_init);
2712
2713#endif
2714
2715