1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
27#include <linux/pfn.h>
28#include <linux/kmemleak.h>
29#include <linux/atomic.h>
30#include <linux/llist.h>
31#include <asm/uaccess.h>
32#include <asm/tlbflush.h>
33#include <asm/shmparam.h>
34
35struct vfree_deferred {
36 struct llist_head list;
37 struct work_struct wq;
38};
39static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
40
41static void __vunmap(const void *, int);
42
43static void free_work(struct work_struct *w)
44{
45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
46 struct llist_node *llnode = llist_del_all(&p->list);
47 while (llnode) {
48 void *p = llnode;
49 llnode = llist_next(llnode);
50 __vunmap(p, 1);
51 }
52}
53
54
55
56static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
57{
58 pte_t *pte;
59
60 pte = pte_offset_kernel(pmd, addr);
61 do {
62 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
63 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64 } while (pte++, addr += PAGE_SIZE, addr != end);
65}
66
67static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
68{
69 pmd_t *pmd;
70 unsigned long next;
71
72 pmd = pmd_offset(pud, addr);
73 do {
74 next = pmd_addr_end(addr, end);
75 if (pmd_none_or_clear_bad(pmd))
76 continue;
77 vunmap_pte_range(pmd, addr, next);
78 } while (pmd++, addr = next, addr != end);
79}
80
81static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
82{
83 pud_t *pud;
84 unsigned long next;
85
86 pud = pud_offset(pgd, addr);
87 do {
88 next = pud_addr_end(addr, end);
89 if (pud_none_or_clear_bad(pud))
90 continue;
91 vunmap_pmd_range(pud, addr, next);
92 } while (pud++, addr = next, addr != end);
93}
94
95static void vunmap_page_range(unsigned long addr, unsigned long end)
96{
97 pgd_t *pgd;
98 unsigned long next;
99
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
102 do {
103 next = pgd_addr_end(addr, end);
104 if (pgd_none_or_clear_bad(pgd))
105 continue;
106 vunmap_pud_range(pgd, addr, next);
107 } while (pgd++, addr = next, addr != end);
108}
109
110static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
111 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
112{
113 pte_t *pte;
114
115
116
117
118
119
120 pte = pte_alloc_kernel(pmd, addr);
121 if (!pte)
122 return -ENOMEM;
123 do {
124 struct page *page = pages[*nr];
125
126 if (WARN_ON(!pte_none(*pte)))
127 return -EBUSY;
128 if (WARN_ON(!page))
129 return -ENOMEM;
130 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
131 (*nr)++;
132 } while (pte++, addr += PAGE_SIZE, addr != end);
133 return 0;
134}
135
136static int vmap_pmd_range(pud_t *pud, unsigned long addr,
137 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
138{
139 pmd_t *pmd;
140 unsigned long next;
141
142 pmd = pmd_alloc(&init_mm, pud, addr);
143 if (!pmd)
144 return -ENOMEM;
145 do {
146 next = pmd_addr_end(addr, end);
147 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
148 return -ENOMEM;
149 } while (pmd++, addr = next, addr != end);
150 return 0;
151}
152
153static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
154 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
155{
156 pud_t *pud;
157 unsigned long next;
158
159 pud = pud_alloc(&init_mm, pgd, addr);
160 if (!pud)
161 return -ENOMEM;
162 do {
163 next = pud_addr_end(addr, end);
164 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
165 return -ENOMEM;
166 } while (pud++, addr = next, addr != end);
167 return 0;
168}
169
170
171
172
173
174
175
176static int vmap_page_range_noflush(unsigned long start, unsigned long end,
177 pgprot_t prot, struct page **pages)
178{
179 pgd_t *pgd;
180 unsigned long next;
181 unsigned long addr = start;
182 int err = 0;
183 int nr = 0;
184
185 BUG_ON(addr >= end);
186 pgd = pgd_offset_k(addr);
187 do {
188 next = pgd_addr_end(addr, end);
189 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
190 if (err)
191 return err;
192 } while (pgd++, addr = next, addr != end);
193
194 return nr;
195}
196
197static int vmap_page_range(unsigned long start, unsigned long end,
198 pgprot_t prot, struct page **pages)
199{
200 int ret;
201
202 ret = vmap_page_range_noflush(start, end, prot, pages);
203 flush_cache_vmap(start, end);
204 return ret;
205}
206
207int is_vmalloc_or_module_addr(const void *x)
208{
209
210
211
212
213
214#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
215 unsigned long addr = (unsigned long)x;
216 if (addr >= MODULES_VADDR && addr < MODULES_END)
217 return 1;
218#endif
219 return is_vmalloc_addr(x);
220}
221
222
223
224
225struct page *vmalloc_to_page(const void *vmalloc_addr)
226{
227 unsigned long addr = (unsigned long) vmalloc_addr;
228 struct page *page = NULL;
229 pgd_t *pgd = pgd_offset_k(addr);
230
231
232
233
234
235 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
236
237 if (!pgd_none(*pgd)) {
238 pud_t *pud = pud_offset(pgd, addr);
239 if (!pud_none(*pud)) {
240 pmd_t *pmd = pmd_offset(pud, addr);
241 if (!pmd_none(*pmd)) {
242 pte_t *ptep, pte;
243
244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep;
246 if (pte_present(pte))
247 page = pte_page(pte);
248 pte_unmap(ptep);
249 }
250 }
251 }
252 return page;
253}
254EXPORT_SYMBOL(vmalloc_to_page);
255
256
257
258
259unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
260{
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
262}
263EXPORT_SYMBOL(vmalloc_to_pfn);
264
265
266
267
268#define VM_LAZY_FREE 0x01
269#define VM_LAZY_FREEING 0x02
270#define VM_VM_AREA 0x04
271
272static DEFINE_SPINLOCK(vmap_area_lock);
273
274LIST_HEAD(vmap_area_list);
275static struct rb_root vmap_area_root = RB_ROOT;
276
277
278static struct rb_node *free_vmap_cache;
279static unsigned long cached_hole_size;
280static unsigned long cached_vstart;
281static unsigned long cached_align;
282
283static unsigned long vmap_area_pcpu_hole;
284
285static struct vmap_area *__find_vmap_area(unsigned long addr)
286{
287 struct rb_node *n = vmap_area_root.rb_node;
288
289 while (n) {
290 struct vmap_area *va;
291
292 va = rb_entry(n, struct vmap_area, rb_node);
293 if (addr < va->va_start)
294 n = n->rb_left;
295 else if (addr >= va->va_end)
296 n = n->rb_right;
297 else
298 return va;
299 }
300
301 return NULL;
302}
303
304static void __insert_vmap_area(struct vmap_area *va)
305{
306 struct rb_node **p = &vmap_area_root.rb_node;
307 struct rb_node *parent = NULL;
308 struct rb_node *tmp;
309
310 while (*p) {
311 struct vmap_area *tmp_va;
312
313 parent = *p;
314 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
315 if (va->va_start < tmp_va->va_end)
316 p = &(*p)->rb_left;
317 else if (va->va_end > tmp_va->va_start)
318 p = &(*p)->rb_right;
319 else
320 BUG();
321 }
322
323 rb_link_node(&va->rb_node, parent, p);
324 rb_insert_color(&va->rb_node, &vmap_area_root);
325
326
327 tmp = rb_prev(&va->rb_node);
328 if (tmp) {
329 struct vmap_area *prev;
330 prev = rb_entry(tmp, struct vmap_area, rb_node);
331 list_add_rcu(&va->list, &prev->list);
332 } else
333 list_add_rcu(&va->list, &vmap_area_list);
334}
335
336static void purge_vmap_area_lazy(void);
337
338
339
340
341
342static struct vmap_area *alloc_vmap_area(unsigned long size,
343 unsigned long align,
344 unsigned long vstart, unsigned long vend,
345 int node, gfp_t gfp_mask)
346{
347 struct vmap_area *va;
348 struct rb_node *n;
349 unsigned long addr;
350 int purged = 0;
351 struct vmap_area *first;
352
353 BUG_ON(!size);
354 BUG_ON(size & ~PAGE_MASK);
355 BUG_ON(!is_power_of_2(align));
356
357 va = kmalloc_node(sizeof(struct vmap_area),
358 gfp_mask & GFP_RECLAIM_MASK, node);
359 if (unlikely(!va))
360 return ERR_PTR(-ENOMEM);
361
362retry:
363 spin_lock(&vmap_area_lock);
364
365
366
367
368
369
370
371
372
373 if (!free_vmap_cache ||
374 size < cached_hole_size ||
375 vstart < cached_vstart ||
376 align < cached_align) {
377nocache:
378 cached_hole_size = 0;
379 free_vmap_cache = NULL;
380 }
381
382 cached_vstart = vstart;
383 cached_align = align;
384
385
386 if (free_vmap_cache) {
387 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
388 addr = ALIGN(first->va_end, align);
389 if (addr < vstart)
390 goto nocache;
391 if (addr + size < addr)
392 goto overflow;
393
394 } else {
395 addr = ALIGN(vstart, align);
396 if (addr + size < addr)
397 goto overflow;
398
399 n = vmap_area_root.rb_node;
400 first = NULL;
401
402 while (n) {
403 struct vmap_area *tmp;
404 tmp = rb_entry(n, struct vmap_area, rb_node);
405 if (tmp->va_end >= addr) {
406 first = tmp;
407 if (tmp->va_start <= addr)
408 break;
409 n = n->rb_left;
410 } else
411 n = n->rb_right;
412 }
413
414 if (!first)
415 goto found;
416 }
417
418
419 while (addr + size > first->va_start && addr + size <= vend) {
420 if (addr + cached_hole_size < first->va_start)
421 cached_hole_size = first->va_start - addr;
422 addr = ALIGN(first->va_end, align);
423 if (addr + size < addr)
424 goto overflow;
425
426 if (list_is_last(&first->list, &vmap_area_list))
427 goto found;
428
429 first = list_entry(first->list.next,
430 struct vmap_area, list);
431 }
432
433found:
434 if (addr + size > vend)
435 goto overflow;
436
437 va->va_start = addr;
438 va->va_end = addr + size;
439 va->flags = 0;
440 __insert_vmap_area(va);
441 free_vmap_cache = &va->rb_node;
442 spin_unlock(&vmap_area_lock);
443
444 BUG_ON(va->va_start & (align-1));
445 BUG_ON(va->va_start < vstart);
446 BUG_ON(va->va_end > vend);
447
448 return va;
449
450overflow:
451 spin_unlock(&vmap_area_lock);
452 if (!purged) {
453 purge_vmap_area_lazy();
454 purged = 1;
455 goto retry;
456 }
457 if (printk_ratelimit())
458 printk(KERN_WARNING
459 "vmap allocation for size %lu failed: "
460 "use vmalloc=<size> to increase size.\n", size);
461 kfree(va);
462 return ERR_PTR(-EBUSY);
463}
464
465static void __free_vmap_area(struct vmap_area *va)
466{
467 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
468
469 if (free_vmap_cache) {
470 if (va->va_end < cached_vstart) {
471 free_vmap_cache = NULL;
472 } else {
473 struct vmap_area *cache;
474 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
475 if (va->va_start <= cache->va_start) {
476 free_vmap_cache = rb_prev(&va->rb_node);
477
478
479
480
481 }
482 }
483 }
484 rb_erase(&va->rb_node, &vmap_area_root);
485 RB_CLEAR_NODE(&va->rb_node);
486 list_del_rcu(&va->list);
487
488
489
490
491
492
493
494 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
495 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
496
497 kfree_rcu(va, rcu_head);
498}
499
500
501
502
503static void free_vmap_area(struct vmap_area *va)
504{
505 spin_lock(&vmap_area_lock);
506 __free_vmap_area(va);
507 spin_unlock(&vmap_area_lock);
508}
509
510
511
512
513static void unmap_vmap_area(struct vmap_area *va)
514{
515 vunmap_page_range(va->va_start, va->va_end);
516}
517
518static void vmap_debug_free_range(unsigned long start, unsigned long end)
519{
520
521
522
523
524
525
526
527
528
529
530
531
532
533#ifdef CONFIG_DEBUG_PAGEALLOC
534 vunmap_page_range(start, end);
535 flush_tlb_kernel_range(start, end);
536#endif
537}
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static unsigned long lazy_max_pages(void)
556{
557 unsigned int log;
558
559 log = fls(num_online_cpus());
560
561 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
562}
563
564static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
565
566
567static void purge_fragmented_blocks_allcpus(void);
568
569
570
571
572
573void set_iounmap_nonlazy(void)
574{
575 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
576}
577
578
579
580
581
582
583
584
585
586
587
588static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
589 int sync, int force_flush)
590{
591 static DEFINE_SPINLOCK(purge_lock);
592 LIST_HEAD(valist);
593 struct vmap_area *va;
594 struct vmap_area *n_va;
595 int nr = 0;
596
597
598
599
600
601
602 if (!sync && !force_flush) {
603 if (!spin_trylock(&purge_lock))
604 return;
605 } else
606 spin_lock(&purge_lock);
607
608 if (sync)
609 purge_fragmented_blocks_allcpus();
610
611 rcu_read_lock();
612 list_for_each_entry_rcu(va, &vmap_area_list, list) {
613 if (va->flags & VM_LAZY_FREE) {
614 if (va->va_start < *start)
615 *start = va->va_start;
616 if (va->va_end > *end)
617 *end = va->va_end;
618 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
619 list_add_tail(&va->purge_list, &valist);
620 va->flags |= VM_LAZY_FREEING;
621 va->flags &= ~VM_LAZY_FREE;
622 }
623 }
624 rcu_read_unlock();
625
626 if (nr)
627 atomic_sub(nr, &vmap_lazy_nr);
628
629 if (nr || force_flush)
630 flush_tlb_kernel_range(*start, *end);
631
632 if (nr) {
633 spin_lock(&vmap_area_lock);
634 list_for_each_entry_safe(va, n_va, &valist, purge_list)
635 __free_vmap_area(va);
636 spin_unlock(&vmap_area_lock);
637 }
638 spin_unlock(&purge_lock);
639}
640
641
642
643
644
645static void try_purge_vmap_area_lazy(void)
646{
647 unsigned long start = ULONG_MAX, end = 0;
648
649 __purge_vmap_area_lazy(&start, &end, 0, 0);
650}
651
652
653
654
655static void purge_vmap_area_lazy(void)
656{
657 unsigned long start = ULONG_MAX, end = 0;
658
659 __purge_vmap_area_lazy(&start, &end, 1, 0);
660}
661
662
663
664
665
666
667static void free_vmap_area_noflush(struct vmap_area *va)
668{
669 va->flags |= VM_LAZY_FREE;
670 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
671 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
672 try_purge_vmap_area_lazy();
673}
674
675
676
677
678
679static void free_unmap_vmap_area_noflush(struct vmap_area *va)
680{
681 unmap_vmap_area(va);
682 free_vmap_area_noflush(va);
683}
684
685
686
687
688static void free_unmap_vmap_area(struct vmap_area *va)
689{
690 flush_cache_vunmap(va->va_start, va->va_end);
691 free_unmap_vmap_area_noflush(va);
692}
693
694static struct vmap_area *find_vmap_area(unsigned long addr)
695{
696 struct vmap_area *va;
697
698 spin_lock(&vmap_area_lock);
699 va = __find_vmap_area(addr);
700 spin_unlock(&vmap_area_lock);
701
702 return va;
703}
704
705static void free_unmap_vmap_area_addr(unsigned long addr)
706{
707 struct vmap_area *va;
708
709 va = find_vmap_area(addr);
710 BUG_ON(!va);
711 free_unmap_vmap_area(va);
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726#if BITS_PER_LONG == 32
727#define VMALLOC_SPACE (128UL*1024*1024)
728#else
729#define VMALLOC_SPACE (128UL*1024*1024*1024)
730#endif
731
732#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
733#define VMAP_MAX_ALLOC BITS_PER_LONG
734#define VMAP_BBMAP_BITS_MAX 1024
735#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
736#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
737#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
738#define VMAP_BBMAP_BITS \
739 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
740 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
741 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
742
743#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
744
745static bool vmap_initialized __read_mostly = false;
746
747struct vmap_block_queue {
748 spinlock_t lock;
749 struct list_head free;
750};
751
752struct vmap_block {
753 spinlock_t lock;
754 struct vmap_area *va;
755 struct vmap_block_queue *vbq;
756 unsigned long free, dirty;
757 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
758 struct list_head free_list;
759 struct rcu_head rcu_head;
760 struct list_head purge;
761};
762
763
764static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
765
766
767
768
769
770
771static DEFINE_SPINLOCK(vmap_block_tree_lock);
772static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
773
774
775
776
777
778
779
780
781static unsigned long addr_to_vb_idx(unsigned long addr)
782{
783 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
784 addr /= VMAP_BLOCK_SIZE;
785 return addr;
786}
787
788static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
789{
790 struct vmap_block_queue *vbq;
791 struct vmap_block *vb;
792 struct vmap_area *va;
793 unsigned long vb_idx;
794 int node, err;
795
796 node = numa_node_id();
797
798 vb = kmalloc_node(sizeof(struct vmap_block),
799 gfp_mask & GFP_RECLAIM_MASK, node);
800 if (unlikely(!vb))
801 return ERR_PTR(-ENOMEM);
802
803 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
804 VMALLOC_START, VMALLOC_END,
805 node, gfp_mask);
806 if (IS_ERR(va)) {
807 kfree(vb);
808 return ERR_CAST(va);
809 }
810
811 err = radix_tree_preload(gfp_mask);
812 if (unlikely(err)) {
813 kfree(vb);
814 free_vmap_area(va);
815 return ERR_PTR(err);
816 }
817
818 spin_lock_init(&vb->lock);
819 vb->va = va;
820 vb->free = VMAP_BBMAP_BITS;
821 vb->dirty = 0;
822 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
823 INIT_LIST_HEAD(&vb->free_list);
824
825 vb_idx = addr_to_vb_idx(va->va_start);
826 spin_lock(&vmap_block_tree_lock);
827 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
828 spin_unlock(&vmap_block_tree_lock);
829 BUG_ON(err);
830 radix_tree_preload_end();
831
832 vbq = &get_cpu_var(vmap_block_queue);
833 vb->vbq = vbq;
834 spin_lock(&vbq->lock);
835 list_add_rcu(&vb->free_list, &vbq->free);
836 spin_unlock(&vbq->lock);
837 put_cpu_var(vmap_block_queue);
838
839 return vb;
840}
841
842static void free_vmap_block(struct vmap_block *vb)
843{
844 struct vmap_block *tmp;
845 unsigned long vb_idx;
846
847 vb_idx = addr_to_vb_idx(vb->va->va_start);
848 spin_lock(&vmap_block_tree_lock);
849 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
850 spin_unlock(&vmap_block_tree_lock);
851 BUG_ON(tmp != vb);
852
853 free_vmap_area_noflush(vb->va);
854 kfree_rcu(vb, rcu_head);
855}
856
857static void purge_fragmented_blocks(int cpu)
858{
859 LIST_HEAD(purge);
860 struct vmap_block *vb;
861 struct vmap_block *n_vb;
862 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
863
864 rcu_read_lock();
865 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
866
867 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
868 continue;
869
870 spin_lock(&vb->lock);
871 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
872 vb->free = 0;
873 vb->dirty = VMAP_BBMAP_BITS;
874 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
875 spin_lock(&vbq->lock);
876 list_del_rcu(&vb->free_list);
877 spin_unlock(&vbq->lock);
878 spin_unlock(&vb->lock);
879 list_add_tail(&vb->purge, &purge);
880 } else
881 spin_unlock(&vb->lock);
882 }
883 rcu_read_unlock();
884
885 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
886 list_del(&vb->purge);
887 free_vmap_block(vb);
888 }
889}
890
891static void purge_fragmented_blocks_allcpus(void)
892{
893 int cpu;
894
895 for_each_possible_cpu(cpu)
896 purge_fragmented_blocks(cpu);
897}
898
899static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
900{
901 struct vmap_block_queue *vbq;
902 struct vmap_block *vb;
903 unsigned long addr = 0;
904 unsigned int order;
905
906 BUG_ON(size & ~PAGE_MASK);
907 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
908 if (WARN_ON(size == 0)) {
909
910
911
912
913
914 return NULL;
915 }
916 order = get_order(size);
917
918again:
919 rcu_read_lock();
920 vbq = &get_cpu_var(vmap_block_queue);
921 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
922 int i;
923
924 spin_lock(&vb->lock);
925 if (vb->free < 1UL << order)
926 goto next;
927
928 i = VMAP_BBMAP_BITS - vb->free;
929 addr = vb->va->va_start + (i << PAGE_SHIFT);
930 BUG_ON(addr_to_vb_idx(addr) !=
931 addr_to_vb_idx(vb->va->va_start));
932 vb->free -= 1UL << order;
933 if (vb->free == 0) {
934 spin_lock(&vbq->lock);
935 list_del_rcu(&vb->free_list);
936 spin_unlock(&vbq->lock);
937 }
938 spin_unlock(&vb->lock);
939 break;
940next:
941 spin_unlock(&vb->lock);
942 }
943
944 put_cpu_var(vmap_block_queue);
945 rcu_read_unlock();
946
947 if (!addr) {
948 vb = new_vmap_block(gfp_mask);
949 if (IS_ERR(vb))
950 return vb;
951 goto again;
952 }
953
954 return (void *)addr;
955}
956
957static void vb_free(const void *addr, unsigned long size)
958{
959 unsigned long offset;
960 unsigned long vb_idx;
961 unsigned int order;
962 struct vmap_block *vb;
963
964 BUG_ON(size & ~PAGE_MASK);
965 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
966
967 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
968
969 order = get_order(size);
970
971 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
972
973 vb_idx = addr_to_vb_idx((unsigned long)addr);
974 rcu_read_lock();
975 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
976 rcu_read_unlock();
977 BUG_ON(!vb);
978
979 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
980
981 spin_lock(&vb->lock);
982 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
983
984 vb->dirty += 1UL << order;
985 if (vb->dirty == VMAP_BBMAP_BITS) {
986 BUG_ON(vb->free);
987 spin_unlock(&vb->lock);
988 free_vmap_block(vb);
989 } else
990 spin_unlock(&vb->lock);
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006void vm_unmap_aliases(void)
1007{
1008 unsigned long start = ULONG_MAX, end = 0;
1009 int cpu;
1010 int flush = 0;
1011
1012 if (unlikely(!vmap_initialized))
1013 return;
1014
1015 for_each_possible_cpu(cpu) {
1016 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1017 struct vmap_block *vb;
1018
1019 rcu_read_lock();
1020 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1021 int i;
1022
1023 spin_lock(&vb->lock);
1024 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1025 while (i < VMAP_BBMAP_BITS) {
1026 unsigned long s, e;
1027 int j;
1028 j = find_next_zero_bit(vb->dirty_map,
1029 VMAP_BBMAP_BITS, i);
1030
1031 s = vb->va->va_start + (i << PAGE_SHIFT);
1032 e = vb->va->va_start + (j << PAGE_SHIFT);
1033 flush = 1;
1034
1035 if (s < start)
1036 start = s;
1037 if (e > end)
1038 end = e;
1039
1040 i = j;
1041 i = find_next_bit(vb->dirty_map,
1042 VMAP_BBMAP_BITS, i);
1043 }
1044 spin_unlock(&vb->lock);
1045 }
1046 rcu_read_unlock();
1047 }
1048
1049 __purge_vmap_area_lazy(&start, &end, 1, flush);
1050}
1051EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1052
1053
1054
1055
1056
1057
1058void vm_unmap_ram(const void *mem, unsigned int count)
1059{
1060 unsigned long size = count << PAGE_SHIFT;
1061 unsigned long addr = (unsigned long)mem;
1062
1063 BUG_ON(!addr);
1064 BUG_ON(addr < VMALLOC_START);
1065 BUG_ON(addr > VMALLOC_END);
1066 BUG_ON(addr & (PAGE_SIZE-1));
1067
1068 debug_check_no_locks_freed(mem, size);
1069 vmap_debug_free_range(addr, addr+size);
1070
1071 if (likely(count <= VMAP_MAX_ALLOC))
1072 vb_free(mem, size);
1073 else
1074 free_unmap_vmap_area_addr(addr);
1075}
1076EXPORT_SYMBOL(vm_unmap_ram);
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1088{
1089 unsigned long size = count << PAGE_SHIFT;
1090 unsigned long addr;
1091 void *mem;
1092
1093 if (likely(count <= VMAP_MAX_ALLOC)) {
1094 mem = vb_alloc(size, GFP_KERNEL);
1095 if (IS_ERR(mem))
1096 return NULL;
1097 addr = (unsigned long)mem;
1098 } else {
1099 struct vmap_area *va;
1100 va = alloc_vmap_area(size, PAGE_SIZE,
1101 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1102 if (IS_ERR(va))
1103 return NULL;
1104
1105 addr = va->va_start;
1106 mem = (void *)addr;
1107 }
1108 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1109 vm_unmap_ram(mem, count);
1110 return NULL;
1111 }
1112 return mem;
1113}
1114EXPORT_SYMBOL(vm_map_ram);
1115
1116static struct vm_struct *vmlist __initdata;
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127void __init vm_area_add_early(struct vm_struct *vm)
1128{
1129 struct vm_struct *tmp, **p;
1130
1131 BUG_ON(vmap_initialized);
1132 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1133 if (tmp->addr >= vm->addr) {
1134 BUG_ON(tmp->addr < vm->addr + vm->size);
1135 break;
1136 } else
1137 BUG_ON(tmp->addr + tmp->size > vm->addr);
1138 }
1139 vm->next = *p;
1140 *p = vm;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1156{
1157 static size_t vm_init_off __initdata;
1158 unsigned long addr;
1159
1160 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1161 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1162
1163 vm->addr = (void *)addr;
1164
1165 vm_area_add_early(vm);
1166}
1167
1168void __init vmalloc_init(void)
1169{
1170 struct vmap_area *va;
1171 struct vm_struct *tmp;
1172 int i;
1173
1174 for_each_possible_cpu(i) {
1175 struct vmap_block_queue *vbq;
1176 struct vfree_deferred *p;
1177
1178 vbq = &per_cpu(vmap_block_queue, i);
1179 spin_lock_init(&vbq->lock);
1180 INIT_LIST_HEAD(&vbq->free);
1181 p = &per_cpu(vfree_deferred, i);
1182 init_llist_head(&p->list);
1183 INIT_WORK(&p->wq, free_work);
1184 }
1185
1186
1187 for (tmp = vmlist; tmp; tmp = tmp->next) {
1188 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1189 va->flags = VM_VM_AREA;
1190 va->va_start = (unsigned long)tmp->addr;
1191 va->va_end = va->va_start + tmp->size;
1192 va->vm = tmp;
1193 __insert_vmap_area(va);
1194 }
1195
1196 vmap_area_pcpu_hole = VMALLOC_END;
1197
1198 vmap_initialized = true;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1221 pgprot_t prot, struct page **pages)
1222{
1223 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1241{
1242 vunmap_page_range(addr, addr + size);
1243}
1244EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254void unmap_kernel_range(unsigned long addr, unsigned long size)
1255{
1256 unsigned long end = addr + size;
1257
1258 flush_cache_vunmap(addr, end);
1259 vunmap_page_range(addr, end);
1260 flush_tlb_kernel_range(addr, end);
1261}
1262
1263int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1264{
1265 unsigned long addr = (unsigned long)area->addr;
1266 unsigned long end = addr + area->size - PAGE_SIZE;
1267 int err;
1268
1269 err = vmap_page_range(addr, end, prot, *pages);
1270 if (err > 0) {
1271 *pages += err;
1272 err = 0;
1273 }
1274
1275 return err;
1276}
1277EXPORT_SYMBOL_GPL(map_vm_area);
1278
1279static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1280 unsigned long flags, const void *caller)
1281{
1282 spin_lock(&vmap_area_lock);
1283 vm->flags = flags;
1284 vm->addr = (void *)va->va_start;
1285 vm->size = va->va_end - va->va_start;
1286 vm->caller = caller;
1287 va->vm = vm;
1288 va->flags |= VM_VM_AREA;
1289 spin_unlock(&vmap_area_lock);
1290}
1291
1292static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1293{
1294
1295
1296
1297
1298
1299 smp_wmb();
1300 vm->flags &= ~VM_UNINITIALIZED;
1301}
1302
1303static struct vm_struct *__get_vm_area_node(unsigned long size,
1304 unsigned long align, unsigned long flags, unsigned long start,
1305 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1306{
1307 struct vmap_area *va;
1308 struct vm_struct *area;
1309
1310 BUG_ON(in_interrupt());
1311 if (flags & VM_IOREMAP)
1312 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
1313
1314 size = PAGE_ALIGN(size);
1315 if (unlikely(!size))
1316 return NULL;
1317
1318 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1319 if (unlikely(!area))
1320 return NULL;
1321
1322
1323
1324
1325 size += PAGE_SIZE;
1326
1327 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1328 if (IS_ERR(va)) {
1329 kfree(area);
1330 return NULL;
1331 }
1332
1333 setup_vmalloc_vm(area, va, flags, caller);
1334
1335 return area;
1336}
1337
1338struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1339 unsigned long start, unsigned long end)
1340{
1341 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1342 GFP_KERNEL, __builtin_return_address(0));
1343}
1344EXPORT_SYMBOL_GPL(__get_vm_area);
1345
1346struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1347 unsigned long start, unsigned long end,
1348 const void *caller)
1349{
1350 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1351 GFP_KERNEL, caller);
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1364{
1365 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1366 NUMA_NO_NODE, GFP_KERNEL,
1367 __builtin_return_address(0));
1368}
1369
1370struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1371 const void *caller)
1372{
1373 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1374 NUMA_NO_NODE, GFP_KERNEL, caller);
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385struct vm_struct *find_vm_area(const void *addr)
1386{
1387 struct vmap_area *va;
1388
1389 va = find_vmap_area((unsigned long)addr);
1390 if (va && va->flags & VM_VM_AREA)
1391 return va->vm;
1392
1393 return NULL;
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404struct vm_struct *remove_vm_area(const void *addr)
1405{
1406 struct vmap_area *va;
1407
1408 va = find_vmap_area((unsigned long)addr);
1409 if (va && va->flags & VM_VM_AREA) {
1410 struct vm_struct *vm = va->vm;
1411
1412 spin_lock(&vmap_area_lock);
1413 va->vm = NULL;
1414 va->flags &= ~VM_VM_AREA;
1415 spin_unlock(&vmap_area_lock);
1416
1417 vmap_debug_free_range(va->va_start, va->va_end);
1418 free_unmap_vmap_area(va);
1419 vm->size -= PAGE_SIZE;
1420
1421 return vm;
1422 }
1423 return NULL;
1424}
1425
1426static void __vunmap(const void *addr, int deallocate_pages)
1427{
1428 struct vm_struct *area;
1429
1430 if (!addr)
1431 return;
1432
1433 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1434 addr))
1435 return;
1436
1437 area = remove_vm_area(addr);
1438 if (unlikely(!area)) {
1439 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1440 addr);
1441 return;
1442 }
1443
1444 debug_check_no_locks_freed(addr, area->size);
1445 debug_check_no_obj_freed(addr, area->size);
1446
1447 if (deallocate_pages) {
1448 int i;
1449
1450 for (i = 0; i < area->nr_pages; i++) {
1451 struct page *page = area->pages[i];
1452
1453 BUG_ON(!page);
1454 __free_page(page);
1455 }
1456
1457 if (area->flags & VM_VPAGES)
1458 vfree(area->pages);
1459 else
1460 kfree(area->pages);
1461 }
1462
1463 kfree(area);
1464 return;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481void vfree(const void *addr)
1482{
1483 BUG_ON(in_nmi());
1484
1485 kmemleak_free(addr);
1486
1487 if (!addr)
1488 return;
1489 if (unlikely(in_interrupt())) {
1490 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1491 if (llist_add((struct llist_node *)addr, &p->list))
1492 schedule_work(&p->wq);
1493 } else
1494 __vunmap(addr, 1);
1495}
1496EXPORT_SYMBOL(vfree);
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void vunmap(const void *addr)
1508{
1509 BUG_ON(in_interrupt());
1510 might_sleep();
1511 if (addr)
1512 __vunmap(addr, 0);
1513}
1514EXPORT_SYMBOL(vunmap);
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526void *vmap(struct page **pages, unsigned int count,
1527 unsigned long flags, pgprot_t prot)
1528{
1529 struct vm_struct *area;
1530
1531 might_sleep();
1532
1533 if (count > totalram_pages)
1534 return NULL;
1535
1536 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1537 __builtin_return_address(0));
1538 if (!area)
1539 return NULL;
1540
1541 if (map_vm_area(area, prot, &pages)) {
1542 vunmap(area->addr);
1543 return NULL;
1544 }
1545
1546 return area->addr;
1547}
1548EXPORT_SYMBOL(vmap);
1549
1550static void *__vmalloc_node(unsigned long size, unsigned long align,
1551 gfp_t gfp_mask, pgprot_t prot,
1552 int node, const void *caller);
1553static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1554 pgprot_t prot, int node, const void *caller)
1555{
1556 const int order = 0;
1557 struct page **pages;
1558 unsigned int nr_pages, array_size, i;
1559 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1560
1561 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1562 array_size = (nr_pages * sizeof(struct page *));
1563
1564 area->nr_pages = nr_pages;
1565
1566 if (array_size > PAGE_SIZE) {
1567 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1568 PAGE_KERNEL, node, caller);
1569 area->flags |= VM_VPAGES;
1570 } else {
1571 pages = kmalloc_node(array_size, nested_gfp, node);
1572 }
1573 area->pages = pages;
1574 area->caller = caller;
1575 if (!area->pages) {
1576 remove_vm_area(area->addr);
1577 kfree(area);
1578 return NULL;
1579 }
1580
1581 for (i = 0; i < area->nr_pages; i++) {
1582 struct page *page;
1583 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1584
1585 if (node < 0)
1586 page = alloc_page(tmp_mask);
1587 else
1588 page = alloc_pages_node(node, tmp_mask, order);
1589
1590 if (unlikely(!page)) {
1591
1592 area->nr_pages = i;
1593 goto fail;
1594 }
1595 area->pages[i] = page;
1596 }
1597
1598 if (map_vm_area(area, prot, &pages))
1599 goto fail;
1600 return area->addr;
1601
1602fail:
1603 warn_alloc_failed(gfp_mask, order,
1604 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1605 (area->nr_pages*PAGE_SIZE), area->size);
1606 vfree(area->addr);
1607 return NULL;
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625void *__vmalloc_node_range(unsigned long size, unsigned long align,
1626 unsigned long start, unsigned long end, gfp_t gfp_mask,
1627 pgprot_t prot, int node, const void *caller)
1628{
1629 struct vm_struct *area;
1630 void *addr;
1631 unsigned long real_size = size;
1632
1633 size = PAGE_ALIGN(size);
1634 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1635 goto fail;
1636
1637 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
1638 start, end, node, gfp_mask, caller);
1639 if (!area)
1640 goto fail;
1641
1642 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1643 if (!addr)
1644 goto fail;
1645
1646
1647
1648
1649
1650
1651 clear_vm_uninitialized_flag(area);
1652
1653
1654
1655
1656
1657
1658 kmemleak_alloc(addr, real_size, 3, gfp_mask);
1659
1660 return addr;
1661
1662fail:
1663 warn_alloc_failed(gfp_mask, 0,
1664 "vmalloc: allocation failure: %lu bytes\n",
1665 real_size);
1666 return NULL;
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static void *__vmalloc_node(unsigned long size, unsigned long align,
1683 gfp_t gfp_mask, pgprot_t prot,
1684 int node, const void *caller)
1685{
1686 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1687 gfp_mask, prot, node, caller);
1688}
1689
1690void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1691{
1692 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1693 __builtin_return_address(0));
1694}
1695EXPORT_SYMBOL(__vmalloc);
1696
1697static inline void *__vmalloc_node_flags(unsigned long size,
1698 int node, gfp_t flags)
1699{
1700 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1701 node, __builtin_return_address(0));
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713void *vmalloc(unsigned long size)
1714{
1715 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1716 GFP_KERNEL | __GFP_HIGHMEM);
1717}
1718EXPORT_SYMBOL(vmalloc);
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730void *vzalloc(unsigned long size)
1731{
1732 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1733 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1734}
1735EXPORT_SYMBOL(vzalloc);
1736
1737
1738
1739
1740
1741
1742
1743
1744void *vmalloc_user(unsigned long size)
1745{
1746 struct vm_struct *area;
1747 void *ret;
1748
1749 ret = __vmalloc_node(size, SHMLBA,
1750 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1751 PAGE_KERNEL, NUMA_NO_NODE,
1752 __builtin_return_address(0));
1753 if (ret) {
1754 area = find_vm_area(ret);
1755 area->flags |= VM_USERMAP;
1756 }
1757 return ret;
1758}
1759EXPORT_SYMBOL(vmalloc_user);
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772void *vmalloc_node(unsigned long size, int node)
1773{
1774 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1775 node, __builtin_return_address(0));
1776}
1777EXPORT_SYMBOL(vmalloc_node);
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791void *vzalloc_node(unsigned long size, int node)
1792{
1793 return __vmalloc_node_flags(size, node,
1794 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1795}
1796EXPORT_SYMBOL(vzalloc_node);
1797
1798#ifndef PAGE_KERNEL_EXEC
1799# define PAGE_KERNEL_EXEC PAGE_KERNEL
1800#endif
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814void *vmalloc_exec(unsigned long size)
1815{
1816 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1817 NUMA_NO_NODE, __builtin_return_address(0));
1818}
1819
1820#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1821#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1822#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1823#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1824#else
1825#define GFP_VMALLOC32 GFP_KERNEL
1826#endif
1827
1828
1829
1830
1831
1832
1833
1834
1835void *vmalloc_32(unsigned long size)
1836{
1837 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1838 NUMA_NO_NODE, __builtin_return_address(0));
1839}
1840EXPORT_SYMBOL(vmalloc_32);
1841
1842
1843
1844
1845
1846
1847
1848
1849void *vmalloc_32_user(unsigned long size)
1850{
1851 struct vm_struct *area;
1852 void *ret;
1853
1854 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1855 NUMA_NO_NODE, __builtin_return_address(0));
1856 if (ret) {
1857 area = find_vm_area(ret);
1858 area->flags |= VM_USERMAP;
1859 }
1860 return ret;
1861}
1862EXPORT_SYMBOL(vmalloc_32_user);
1863
1864
1865
1866
1867
1868
1869static int aligned_vread(char *buf, char *addr, unsigned long count)
1870{
1871 struct page *p;
1872 int copied = 0;
1873
1874 while (count) {
1875 unsigned long offset, length;
1876
1877 offset = (unsigned long)addr & ~PAGE_MASK;
1878 length = PAGE_SIZE - offset;
1879 if (length > count)
1880 length = count;
1881 p = vmalloc_to_page(addr);
1882
1883
1884
1885
1886
1887
1888
1889 if (p) {
1890
1891
1892
1893
1894 void *map = kmap_atomic(p);
1895 memcpy(buf, map + offset, length);
1896 kunmap_atomic(map);
1897 } else
1898 memset(buf, 0, length);
1899
1900 addr += length;
1901 buf += length;
1902 copied += length;
1903 count -= length;
1904 }
1905 return copied;
1906}
1907
1908static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1909{
1910 struct page *p;
1911 int copied = 0;
1912
1913 while (count) {
1914 unsigned long offset, length;
1915
1916 offset = (unsigned long)addr & ~PAGE_MASK;
1917 length = PAGE_SIZE - offset;
1918 if (length > count)
1919 length = count;
1920 p = vmalloc_to_page(addr);
1921
1922
1923
1924
1925
1926
1927
1928 if (p) {
1929
1930
1931
1932
1933 void *map = kmap_atomic(p);
1934 memcpy(map + offset, buf, length);
1935 kunmap_atomic(map);
1936 }
1937 addr += length;
1938 buf += length;
1939 copied += length;
1940 count -= length;
1941 }
1942 return copied;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971long vread(char *buf, char *addr, unsigned long count)
1972{
1973 struct vmap_area *va;
1974 struct vm_struct *vm;
1975 char *vaddr, *buf_start = buf;
1976 unsigned long buflen = count;
1977 unsigned long n;
1978
1979
1980 if ((unsigned long) addr + count < count)
1981 count = -(unsigned long) addr;
1982
1983 spin_lock(&vmap_area_lock);
1984 list_for_each_entry(va, &vmap_area_list, list) {
1985 if (!count)
1986 break;
1987
1988 if (!(va->flags & VM_VM_AREA))
1989 continue;
1990
1991 vm = va->vm;
1992 vaddr = (char *) vm->addr;
1993 if (addr >= vaddr + vm->size - PAGE_SIZE)
1994 continue;
1995 while (addr < vaddr) {
1996 if (count == 0)
1997 goto finished;
1998 *buf = '\0';
1999 buf++;
2000 addr++;
2001 count--;
2002 }
2003 n = vaddr + vm->size - PAGE_SIZE - addr;
2004 if (n > count)
2005 n = count;
2006 if (!(vm->flags & VM_IOREMAP))
2007 aligned_vread(buf, addr, n);
2008 else
2009 memset(buf, 0, n);
2010 buf += n;
2011 addr += n;
2012 count -= n;
2013 }
2014finished:
2015 spin_unlock(&vmap_area_lock);
2016
2017 if (buf == buf_start)
2018 return 0;
2019
2020 if (buf != buf_start + buflen)
2021 memset(buf, 0, buflen - (buf - buf_start));
2022
2023 return buflen;
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052long vwrite(char *buf, char *addr, unsigned long count)
2053{
2054 struct vmap_area *va;
2055 struct vm_struct *vm;
2056 char *vaddr;
2057 unsigned long n, buflen;
2058 int copied = 0;
2059
2060
2061 if ((unsigned long) addr + count < count)
2062 count = -(unsigned long) addr;
2063 buflen = count;
2064
2065 spin_lock(&vmap_area_lock);
2066 list_for_each_entry(va, &vmap_area_list, list) {
2067 if (!count)
2068 break;
2069
2070 if (!(va->flags & VM_VM_AREA))
2071 continue;
2072
2073 vm = va->vm;
2074 vaddr = (char *) vm->addr;
2075 if (addr >= vaddr + vm->size - PAGE_SIZE)
2076 continue;
2077 while (addr < vaddr) {
2078 if (count == 0)
2079 goto finished;
2080 buf++;
2081 addr++;
2082 count--;
2083 }
2084 n = vaddr + vm->size - PAGE_SIZE - addr;
2085 if (n > count)
2086 n = count;
2087 if (!(vm->flags & VM_IOREMAP)) {
2088 aligned_vwrite(buf, addr, n);
2089 copied++;
2090 }
2091 buf += n;
2092 addr += n;
2093 count -= n;
2094 }
2095finished:
2096 spin_unlock(&vmap_area_lock);
2097 if (!copied)
2098 return 0;
2099 return buflen;
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2119 void *kaddr, unsigned long size)
2120{
2121 struct vm_struct *area;
2122
2123 size = PAGE_ALIGN(size);
2124
2125 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2126 return -EINVAL;
2127
2128 area = find_vm_area(kaddr);
2129 if (!area)
2130 return -EINVAL;
2131
2132 if (!(area->flags & VM_USERMAP))
2133 return -EINVAL;
2134
2135 if (kaddr + size > area->addr + area->size)
2136 return -EINVAL;
2137
2138 do {
2139 struct page *page = vmalloc_to_page(kaddr);
2140 int ret;
2141
2142 ret = vm_insert_page(vma, uaddr, page);
2143 if (ret)
2144 return ret;
2145
2146 uaddr += PAGE_SIZE;
2147 kaddr += PAGE_SIZE;
2148 size -= PAGE_SIZE;
2149 } while (size > 0);
2150
2151 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2152
2153 return 0;
2154}
2155EXPORT_SYMBOL(remap_vmalloc_range_partial);
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2172 unsigned long pgoff)
2173{
2174 return remap_vmalloc_range_partial(vma, vma->vm_start,
2175 addr + (pgoff << PAGE_SHIFT),
2176 vma->vm_end - vma->vm_start);
2177}
2178EXPORT_SYMBOL(remap_vmalloc_range);
2179
2180
2181
2182
2183
2184void __attribute__((weak)) vmalloc_sync_all(void)
2185{
2186}
2187
2188
2189static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2190{
2191 pte_t ***p = data;
2192
2193 if (p) {
2194 *(*p) = pte;
2195 (*p)++;
2196 }
2197 return 0;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2215{
2216 struct vm_struct *area;
2217
2218 area = get_vm_area_caller(size, VM_IOREMAP,
2219 __builtin_return_address(0));
2220 if (area == NULL)
2221 return NULL;
2222
2223
2224
2225
2226
2227 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2228 size, f, ptes ? &ptes : NULL)) {
2229 free_vm_area(area);
2230 return NULL;
2231 }
2232
2233 return area;
2234}
2235EXPORT_SYMBOL_GPL(alloc_vm_area);
2236
2237void free_vm_area(struct vm_struct *area)
2238{
2239 struct vm_struct *ret;
2240 ret = remove_vm_area(area->addr);
2241 BUG_ON(ret != area);
2242 kfree(area);
2243}
2244EXPORT_SYMBOL_GPL(free_vm_area);
2245
2246#ifdef CONFIG_SMP
2247static struct vmap_area *node_to_va(struct rb_node *n)
2248{
2249 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264static bool pvm_find_next_prev(unsigned long end,
2265 struct vmap_area **pnext,
2266 struct vmap_area **pprev)
2267{
2268 struct rb_node *n = vmap_area_root.rb_node;
2269 struct vmap_area *va = NULL;
2270
2271 while (n) {
2272 va = rb_entry(n, struct vmap_area, rb_node);
2273 if (end < va->va_end)
2274 n = n->rb_left;
2275 else if (end > va->va_end)
2276 n = n->rb_right;
2277 else
2278 break;
2279 }
2280
2281 if (!va)
2282 return false;
2283
2284 if (va->va_end > end) {
2285 *pnext = va;
2286 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2287 } else {
2288 *pprev = va;
2289 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2290 }
2291 return true;
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310static unsigned long pvm_determine_end(struct vmap_area **pnext,
2311 struct vmap_area **pprev,
2312 unsigned long align)
2313{
2314 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2315 unsigned long addr;
2316
2317 if (*pnext)
2318 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2319 else
2320 addr = vmalloc_end;
2321
2322 while (*pprev && (*pprev)->va_end > addr) {
2323 *pnext = *pprev;
2324 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2325 }
2326
2327 return addr;
2328}
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2355 const size_t *sizes, int nr_vms,
2356 size_t align)
2357{
2358 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2359 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2360 struct vmap_area **vas, *prev, *next;
2361 struct vm_struct **vms;
2362 int area, area2, last_area, term_area;
2363 unsigned long base, start, end, last_end;
2364 bool purged = false;
2365
2366
2367 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2368 for (last_area = 0, area = 0; area < nr_vms; area++) {
2369 start = offsets[area];
2370 end = start + sizes[area];
2371
2372
2373 BUG_ON(!IS_ALIGNED(offsets[area], align));
2374 BUG_ON(!IS_ALIGNED(sizes[area], align));
2375
2376
2377 if (start > offsets[last_area])
2378 last_area = area;
2379
2380 for (area2 = 0; area2 < nr_vms; area2++) {
2381 unsigned long start2 = offsets[area2];
2382 unsigned long end2 = start2 + sizes[area2];
2383
2384 if (area2 == area)
2385 continue;
2386
2387 BUG_ON(start2 >= start && start2 < end);
2388 BUG_ON(end2 <= end && end2 > start);
2389 }
2390 }
2391 last_end = offsets[last_area] + sizes[last_area];
2392
2393 if (vmalloc_end - vmalloc_start < last_end) {
2394 WARN_ON(true);
2395 return NULL;
2396 }
2397
2398 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2399 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2400 if (!vas || !vms)
2401 goto err_free2;
2402
2403 for (area = 0; area < nr_vms; area++) {
2404 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2405 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2406 if (!vas[area] || !vms[area])
2407 goto err_free;
2408 }
2409retry:
2410 spin_lock(&vmap_area_lock);
2411
2412
2413 area = term_area = last_area;
2414 start = offsets[area];
2415 end = start + sizes[area];
2416
2417 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2418 base = vmalloc_end - last_end;
2419 goto found;
2420 }
2421 base = pvm_determine_end(&next, &prev, align) - end;
2422
2423 while (true) {
2424 BUG_ON(next && next->va_end <= base + end);
2425 BUG_ON(prev && prev->va_end > base + end);
2426
2427
2428
2429
2430
2431 if (base + last_end < vmalloc_start + last_end) {
2432 spin_unlock(&vmap_area_lock);
2433 if (!purged) {
2434 purge_vmap_area_lazy();
2435 purged = true;
2436 goto retry;
2437 }
2438 goto err_free;
2439 }
2440
2441
2442
2443
2444
2445 if (next && next->va_start < base + end) {
2446 base = pvm_determine_end(&next, &prev, align) - end;
2447 term_area = area;
2448 continue;
2449 }
2450
2451
2452
2453
2454
2455
2456 if (prev && prev->va_end > base + start) {
2457 next = prev;
2458 prev = node_to_va(rb_prev(&next->rb_node));
2459 base = pvm_determine_end(&next, &prev, align) - end;
2460 term_area = area;
2461 continue;
2462 }
2463
2464
2465
2466
2467
2468 area = (area + nr_vms - 1) % nr_vms;
2469 if (area == term_area)
2470 break;
2471 start = offsets[area];
2472 end = start + sizes[area];
2473 pvm_find_next_prev(base + end, &next, &prev);
2474 }
2475found:
2476
2477 for (area = 0; area < nr_vms; area++) {
2478 struct vmap_area *va = vas[area];
2479
2480 va->va_start = base + offsets[area];
2481 va->va_end = va->va_start + sizes[area];
2482 __insert_vmap_area(va);
2483 }
2484
2485 vmap_area_pcpu_hole = base + offsets[last_area];
2486
2487 spin_unlock(&vmap_area_lock);
2488
2489
2490 for (area = 0; area < nr_vms; area++)
2491 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2492 pcpu_get_vm_areas);
2493
2494 kfree(vas);
2495 return vms;
2496
2497err_free:
2498 for (area = 0; area < nr_vms; area++) {
2499 kfree(vas[area]);
2500 kfree(vms[area]);
2501 }
2502err_free2:
2503 kfree(vas);
2504 kfree(vms);
2505 return NULL;
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2516{
2517 int i;
2518
2519 for (i = 0; i < nr_vms; i++)
2520 free_vm_area(vms[i]);
2521 kfree(vms);
2522}
2523#endif
2524
2525#ifdef CONFIG_PROC_FS
2526static void *s_start(struct seq_file *m, loff_t *pos)
2527 __acquires(&vmap_area_lock)
2528{
2529 loff_t n = *pos;
2530 struct vmap_area *va;
2531
2532 spin_lock(&vmap_area_lock);
2533 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2534 while (n > 0 && &va->list != &vmap_area_list) {
2535 n--;
2536 va = list_entry(va->list.next, typeof(*va), list);
2537 }
2538 if (!n && &va->list != &vmap_area_list)
2539 return va;
2540
2541 return NULL;
2542
2543}
2544
2545static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2546{
2547 struct vmap_area *va = p, *next;
2548
2549 ++*pos;
2550 next = list_entry(va->list.next, typeof(*va), list);
2551 if (&next->list != &vmap_area_list)
2552 return next;
2553
2554 return NULL;
2555}
2556
2557static void s_stop(struct seq_file *m, void *p)
2558 __releases(&vmap_area_lock)
2559{
2560 spin_unlock(&vmap_area_lock);
2561}
2562
2563static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2564{
2565 if (IS_ENABLED(CONFIG_NUMA)) {
2566 unsigned int nr, *counters = m->private;
2567
2568 if (!counters)
2569 return;
2570
2571 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2572
2573 for (nr = 0; nr < v->nr_pages; nr++)
2574 counters[page_to_nid(v->pages[nr])]++;
2575
2576 for_each_node_state(nr, N_HIGH_MEMORY)
2577 if (counters[nr])
2578 seq_printf(m, " N%u=%u", nr, counters[nr]);
2579 }
2580}
2581
2582static int s_show(struct seq_file *m, void *p)
2583{
2584 struct vmap_area *va = p;
2585 struct vm_struct *v;
2586
2587 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2588 return 0;
2589
2590 if (!(va->flags & VM_VM_AREA)) {
2591 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
2592 (void *)va->va_start, (void *)va->va_end,
2593 va->va_end - va->va_start);
2594 return 0;
2595 }
2596
2597 v = va->vm;
2598
2599
2600 smp_rmb();
2601 if (v->flags & VM_UNINITIALIZED)
2602 return 0;
2603
2604 seq_printf(m, "0x%pK-0x%pK %7ld",
2605 v->addr, v->addr + v->size, v->size);
2606
2607 if (v->caller)
2608 seq_printf(m, " %pS", v->caller);
2609
2610 if (v->nr_pages)
2611 seq_printf(m, " pages=%d", v->nr_pages);
2612
2613 if (v->phys_addr)
2614 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2615
2616 if (v->flags & VM_IOREMAP)
2617 seq_printf(m, " ioremap");
2618
2619 if (v->flags & VM_ALLOC)
2620 seq_printf(m, " vmalloc");
2621
2622 if (v->flags & VM_MAP)
2623 seq_printf(m, " vmap");
2624
2625 if (v->flags & VM_USERMAP)
2626 seq_printf(m, " user");
2627
2628 if (v->flags & VM_VPAGES)
2629 seq_printf(m, " vpages");
2630
2631 show_numa_info(m, v);
2632 seq_putc(m, '\n');
2633 return 0;
2634}
2635
2636static const struct seq_operations vmalloc_op = {
2637 .start = s_start,
2638 .next = s_next,
2639 .stop = s_stop,
2640 .show = s_show,
2641};
2642
2643static int vmalloc_open(struct inode *inode, struct file *file)
2644{
2645 unsigned int *ptr = NULL;
2646 int ret;
2647
2648 if (IS_ENABLED(CONFIG_NUMA)) {
2649 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
2650 if (ptr == NULL)
2651 return -ENOMEM;
2652 }
2653 ret = seq_open(file, &vmalloc_op);
2654 if (!ret) {
2655 struct seq_file *m = file->private_data;
2656 m->private = ptr;
2657 } else
2658 kfree(ptr);
2659 return ret;
2660}
2661
2662static const struct file_operations proc_vmalloc_operations = {
2663 .open = vmalloc_open,
2664 .read = seq_read,
2665 .llseek = seq_lseek,
2666 .release = seq_release_private,
2667};
2668
2669static int __init proc_vmalloc_init(void)
2670{
2671 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2672 return 0;
2673}
2674module_init(proc_vmalloc_init);
2675
2676void get_vmalloc_info(struct vmalloc_info *vmi)
2677{
2678 struct vmap_area *va;
2679 unsigned long free_area_size;
2680 unsigned long prev_end;
2681
2682 vmi->used = 0;
2683 vmi->largest_chunk = 0;
2684
2685 prev_end = VMALLOC_START;
2686
2687 spin_lock(&vmap_area_lock);
2688
2689 if (list_empty(&vmap_area_list)) {
2690 vmi->largest_chunk = VMALLOC_TOTAL;
2691 goto out;
2692 }
2693
2694 list_for_each_entry(va, &vmap_area_list, list) {
2695 unsigned long addr = va->va_start;
2696
2697
2698
2699
2700 if (addr < VMALLOC_START)
2701 continue;
2702 if (addr >= VMALLOC_END)
2703 break;
2704
2705 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2706 continue;
2707
2708 vmi->used += (va->va_end - va->va_start);
2709
2710 free_area_size = addr - prev_end;
2711 if (vmi->largest_chunk < free_area_size)
2712 vmi->largest_chunk = free_area_size;
2713
2714 prev_end = va->va_end;
2715 }
2716
2717 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2718 vmi->largest_chunk = VMALLOC_END - prev_end;
2719
2720out:
2721 spin_unlock(&vmap_area_lock);
2722}
2723#endif
2724
2725