1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
27#include <linux/pfn.h>
28#include <linux/kmemleak.h>
29#include <linux/atomic.h>
30#include <linux/llist.h>
31#include <asm/uaccess.h>
32#include <asm/tlbflush.h>
33#include <asm/shmparam.h>
34
35struct vfree_deferred {
36 struct llist_head list;
37 struct work_struct wq;
38};
39static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
40
41static void __vunmap(const void *, int);
42
43static void free_work(struct work_struct *w)
44{
45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
46 struct llist_node *llnode = llist_del_all(&p->list);
47 while (llnode) {
48 void *p = llnode;
49 llnode = llist_next(llnode);
50 __vunmap(p, 1);
51 }
52}
53
54
55
56static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
57{
58 pte_t *pte;
59
60 pte = pte_offset_kernel(pmd, addr);
61 do {
62 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
63 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64 } while (pte++, addr += PAGE_SIZE, addr != end);
65}
66
67static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
68{
69 pmd_t *pmd;
70 unsigned long next;
71
72 pmd = pmd_offset(pud, addr);
73 do {
74 next = pmd_addr_end(addr, end);
75 if (pmd_none_or_clear_bad(pmd))
76 continue;
77 vunmap_pte_range(pmd, addr, next);
78 } while (pmd++, addr = next, addr != end);
79}
80
81static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
82{
83 pud_t *pud;
84 unsigned long next;
85
86 pud = pud_offset(pgd, addr);
87 do {
88 next = pud_addr_end(addr, end);
89 if (pud_none_or_clear_bad(pud))
90 continue;
91 vunmap_pmd_range(pud, addr, next);
92 } while (pud++, addr = next, addr != end);
93}
94
95static void vunmap_page_range(unsigned long addr, unsigned long end)
96{
97 pgd_t *pgd;
98 unsigned long next;
99
100 BUG_ON(addr >= end);
101 pgd = pgd_offset_k(addr);
102 do {
103 next = pgd_addr_end(addr, end);
104 if (pgd_none_or_clear_bad(pgd))
105 continue;
106 vunmap_pud_range(pgd, addr, next);
107 } while (pgd++, addr = next, addr != end);
108}
109
110static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
111 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
112{
113 pte_t *pte;
114
115
116
117
118
119
120 pte = pte_alloc_kernel(pmd, addr);
121 if (!pte)
122 return -ENOMEM;
123 do {
124 struct page *page = pages[*nr];
125
126 if (WARN_ON(!pte_none(*pte)))
127 return -EBUSY;
128 if (WARN_ON(!page))
129 return -ENOMEM;
130 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
131 (*nr)++;
132 } while (pte++, addr += PAGE_SIZE, addr != end);
133 return 0;
134}
135
136static int vmap_pmd_range(pud_t *pud, unsigned long addr,
137 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
138{
139 pmd_t *pmd;
140 unsigned long next;
141
142 pmd = pmd_alloc(&init_mm, pud, addr);
143 if (!pmd)
144 return -ENOMEM;
145 do {
146 next = pmd_addr_end(addr, end);
147 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
148 return -ENOMEM;
149 } while (pmd++, addr = next, addr != end);
150 return 0;
151}
152
153static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
154 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
155{
156 pud_t *pud;
157 unsigned long next;
158
159 pud = pud_alloc(&init_mm, pgd, addr);
160 if (!pud)
161 return -ENOMEM;
162 do {
163 next = pud_addr_end(addr, end);
164 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
165 return -ENOMEM;
166 } while (pud++, addr = next, addr != end);
167 return 0;
168}
169
170
171
172
173
174
175
176static int vmap_page_range_noflush(unsigned long start, unsigned long end,
177 pgprot_t prot, struct page **pages)
178{
179 pgd_t *pgd;
180 unsigned long next;
181 unsigned long addr = start;
182 int err = 0;
183 int nr = 0;
184
185 BUG_ON(addr >= end);
186 pgd = pgd_offset_k(addr);
187 do {
188 next = pgd_addr_end(addr, end);
189 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
190 if (err)
191 return err;
192 } while (pgd++, addr = next, addr != end);
193
194 return nr;
195}
196
197static int vmap_page_range(unsigned long start, unsigned long end,
198 pgprot_t prot, struct page **pages)
199{
200 int ret;
201
202 ret = vmap_page_range_noflush(start, end, prot, pages);
203 flush_cache_vmap(start, end);
204 return ret;
205}
206
207int is_vmalloc_or_module_addr(const void *x)
208{
209
210
211
212
213
214#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
215 unsigned long addr = (unsigned long)x;
216 if (addr >= MODULES_VADDR && addr < MODULES_END)
217 return 1;
218#endif
219 return is_vmalloc_addr(x);
220}
221
222
223
224
225struct page *vmalloc_to_page(const void *vmalloc_addr)
226{
227 unsigned long addr = (unsigned long) vmalloc_addr;
228 struct page *page = NULL;
229 pgd_t *pgd = pgd_offset_k(addr);
230
231
232
233
234
235 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
236
237 if (!pgd_none(*pgd)) {
238 pud_t *pud = pud_offset(pgd, addr);
239 if (!pud_none(*pud)) {
240 pmd_t *pmd = pmd_offset(pud, addr);
241 if (!pmd_none(*pmd)) {
242 pte_t *ptep, pte;
243
244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep;
246 if (pte_present(pte))
247 page = pte_page(pte);
248 pte_unmap(ptep);
249 }
250 }
251 }
252 return page;
253}
254EXPORT_SYMBOL(vmalloc_to_page);
255
256
257
258
259unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
260{
261 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
262}
263EXPORT_SYMBOL(vmalloc_to_pfn);
264
265
266
267
268#define VM_LAZY_FREE 0x01
269#define VM_LAZY_FREEING 0x02
270#define VM_VM_AREA 0x04
271
272static DEFINE_SPINLOCK(vmap_area_lock);
273
274LIST_HEAD(vmap_area_list);
275static struct rb_root vmap_area_root = RB_ROOT;
276
277
278static struct rb_node *free_vmap_cache;
279static unsigned long cached_hole_size;
280static unsigned long cached_vstart;
281static unsigned long cached_align;
282
283static unsigned long vmap_area_pcpu_hole;
284
285static struct vmap_area *__find_vmap_area(unsigned long addr)
286{
287 struct rb_node *n = vmap_area_root.rb_node;
288
289 while (n) {
290 struct vmap_area *va;
291
292 va = rb_entry(n, struct vmap_area, rb_node);
293 if (addr < va->va_start)
294 n = n->rb_left;
295 else if (addr >= va->va_end)
296 n = n->rb_right;
297 else
298 return va;
299 }
300
301 return NULL;
302}
303
304static void __insert_vmap_area(struct vmap_area *va)
305{
306 struct rb_node **p = &vmap_area_root.rb_node;
307 struct rb_node *parent = NULL;
308 struct rb_node *tmp;
309
310 while (*p) {
311 struct vmap_area *tmp_va;
312
313 parent = *p;
314 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
315 if (va->va_start < tmp_va->va_end)
316 p = &(*p)->rb_left;
317 else if (va->va_end > tmp_va->va_start)
318 p = &(*p)->rb_right;
319 else
320 BUG();
321 }
322
323 rb_link_node(&va->rb_node, parent, p);
324 rb_insert_color(&va->rb_node, &vmap_area_root);
325
326
327 tmp = rb_prev(&va->rb_node);
328 if (tmp) {
329 struct vmap_area *prev;
330 prev = rb_entry(tmp, struct vmap_area, rb_node);
331 list_add_rcu(&va->list, &prev->list);
332 } else
333 list_add_rcu(&va->list, &vmap_area_list);
334}
335
336static void purge_vmap_area_lazy(void);
337
338
339
340
341
342static struct vmap_area *alloc_vmap_area(unsigned long size,
343 unsigned long align,
344 unsigned long vstart, unsigned long vend,
345 int node, gfp_t gfp_mask)
346{
347 struct vmap_area *va;
348 struct rb_node *n;
349 unsigned long addr;
350 int purged = 0;
351 struct vmap_area *first;
352
353 BUG_ON(!size);
354 BUG_ON(size & ~PAGE_MASK);
355 BUG_ON(!is_power_of_2(align));
356
357 va = kmalloc_node(sizeof(struct vmap_area),
358 gfp_mask & GFP_RECLAIM_MASK, node);
359 if (unlikely(!va))
360 return ERR_PTR(-ENOMEM);
361
362retry:
363 spin_lock(&vmap_area_lock);
364
365
366
367
368
369
370
371
372
373 if (!free_vmap_cache ||
374 size < cached_hole_size ||
375 vstart < cached_vstart ||
376 align < cached_align) {
377nocache:
378 cached_hole_size = 0;
379 free_vmap_cache = NULL;
380 }
381
382 cached_vstart = vstart;
383 cached_align = align;
384
385
386 if (free_vmap_cache) {
387 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
388 addr = ALIGN(first->va_end, align);
389 if (addr < vstart)
390 goto nocache;
391 if (addr + size < addr)
392 goto overflow;
393
394 } else {
395 addr = ALIGN(vstart, align);
396 if (addr + size < addr)
397 goto overflow;
398
399 n = vmap_area_root.rb_node;
400 first = NULL;
401
402 while (n) {
403 struct vmap_area *tmp;
404 tmp = rb_entry(n, struct vmap_area, rb_node);
405 if (tmp->va_end >= addr) {
406 first = tmp;
407 if (tmp->va_start <= addr)
408 break;
409 n = n->rb_left;
410 } else
411 n = n->rb_right;
412 }
413
414 if (!first)
415 goto found;
416 }
417
418
419 while (addr + size > first->va_start && addr + size <= vend) {
420 if (addr + cached_hole_size < first->va_start)
421 cached_hole_size = first->va_start - addr;
422 addr = ALIGN(first->va_end, align);
423 if (addr + size < addr)
424 goto overflow;
425
426 if (list_is_last(&first->list, &vmap_area_list))
427 goto found;
428
429 first = list_entry(first->list.next,
430 struct vmap_area, list);
431 }
432
433found:
434 if (addr + size > vend)
435 goto overflow;
436
437 va->va_start = addr;
438 va->va_end = addr + size;
439 va->flags = 0;
440 __insert_vmap_area(va);
441 free_vmap_cache = &va->rb_node;
442 spin_unlock(&vmap_area_lock);
443
444 BUG_ON(va->va_start & (align-1));
445 BUG_ON(va->va_start < vstart);
446 BUG_ON(va->va_end > vend);
447
448 return va;
449
450overflow:
451 spin_unlock(&vmap_area_lock);
452 if (!purged) {
453 purge_vmap_area_lazy();
454 purged = 1;
455 goto retry;
456 }
457 if (printk_ratelimit())
458 printk(KERN_WARNING
459 "vmap allocation for size %lu failed: "
460 "use vmalloc=<size> to increase size.\n", size);
461 kfree(va);
462 return ERR_PTR(-EBUSY);
463}
464
465static void __free_vmap_area(struct vmap_area *va)
466{
467 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
468
469 if (free_vmap_cache) {
470 if (va->va_end < cached_vstart) {
471 free_vmap_cache = NULL;
472 } else {
473 struct vmap_area *cache;
474 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
475 if (va->va_start <= cache->va_start) {
476 free_vmap_cache = rb_prev(&va->rb_node);
477
478
479
480
481 }
482 }
483 }
484 rb_erase(&va->rb_node, &vmap_area_root);
485 RB_CLEAR_NODE(&va->rb_node);
486 list_del_rcu(&va->list);
487
488
489
490
491
492
493
494 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
495 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
496
497 kfree_rcu(va, rcu_head);
498}
499
500
501
502
503static void free_vmap_area(struct vmap_area *va)
504{
505 spin_lock(&vmap_area_lock);
506 __free_vmap_area(va);
507 spin_unlock(&vmap_area_lock);
508}
509
510
511
512
513static void unmap_vmap_area(struct vmap_area *va)
514{
515 vunmap_page_range(va->va_start, va->va_end);
516}
517
518static void vmap_debug_free_range(unsigned long start, unsigned long end)
519{
520
521
522
523
524
525
526
527
528
529
530
531
532
533#ifdef CONFIG_DEBUG_PAGEALLOC
534 vunmap_page_range(start, end);
535 flush_tlb_kernel_range(start, end);
536#endif
537}
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static unsigned long lazy_max_pages(void)
556{
557 unsigned int log;
558
559 log = fls(num_online_cpus());
560
561 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
562}
563
564static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
565
566
567static void purge_fragmented_blocks_allcpus(void);
568
569
570
571
572
573void set_iounmap_nonlazy(void)
574{
575 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
576}
577
578
579
580
581
582
583
584
585
586
587
588static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
589 int sync, int force_flush)
590{
591 static DEFINE_SPINLOCK(purge_lock);
592 LIST_HEAD(valist);
593 struct vmap_area *va;
594 struct vmap_area *n_va;
595 int nr = 0;
596
597
598
599
600
601
602 if (!sync && !force_flush) {
603 if (!spin_trylock(&purge_lock))
604 return;
605 } else
606 spin_lock(&purge_lock);
607
608 if (sync)
609 purge_fragmented_blocks_allcpus();
610
611 rcu_read_lock();
612 list_for_each_entry_rcu(va, &vmap_area_list, list) {
613 if (va->flags & VM_LAZY_FREE) {
614 if (va->va_start < *start)
615 *start = va->va_start;
616 if (va->va_end > *end)
617 *end = va->va_end;
618 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
619 list_add_tail(&va->purge_list, &valist);
620 va->flags |= VM_LAZY_FREEING;
621 va->flags &= ~VM_LAZY_FREE;
622 }
623 }
624 rcu_read_unlock();
625
626 if (nr)
627 atomic_sub(nr, &vmap_lazy_nr);
628
629 if (nr || force_flush)
630 flush_tlb_kernel_range(*start, *end);
631
632 if (nr) {
633 spin_lock(&vmap_area_lock);
634 list_for_each_entry_safe(va, n_va, &valist, purge_list)
635 __free_vmap_area(va);
636 spin_unlock(&vmap_area_lock);
637 }
638 spin_unlock(&purge_lock);
639}
640
641
642
643
644
645static void try_purge_vmap_area_lazy(void)
646{
647 unsigned long start = ULONG_MAX, end = 0;
648
649 __purge_vmap_area_lazy(&start, &end, 0, 0);
650}
651
652
653
654
655static void purge_vmap_area_lazy(void)
656{
657 unsigned long start = ULONG_MAX, end = 0;
658
659 __purge_vmap_area_lazy(&start, &end, 1, 0);
660}
661
662
663
664
665
666
667static void free_vmap_area_noflush(struct vmap_area *va)
668{
669 va->flags |= VM_LAZY_FREE;
670 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
671 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
672 try_purge_vmap_area_lazy();
673}
674
675
676
677
678
679static void free_unmap_vmap_area_noflush(struct vmap_area *va)
680{
681 unmap_vmap_area(va);
682 free_vmap_area_noflush(va);
683}
684
685
686
687
688static void free_unmap_vmap_area(struct vmap_area *va)
689{
690 flush_cache_vunmap(va->va_start, va->va_end);
691 free_unmap_vmap_area_noflush(va);
692}
693
694static struct vmap_area *find_vmap_area(unsigned long addr)
695{
696 struct vmap_area *va;
697
698 spin_lock(&vmap_area_lock);
699 va = __find_vmap_area(addr);
700 spin_unlock(&vmap_area_lock);
701
702 return va;
703}
704
705static void free_unmap_vmap_area_addr(unsigned long addr)
706{
707 struct vmap_area *va;
708
709 va = find_vmap_area(addr);
710 BUG_ON(!va);
711 free_unmap_vmap_area(va);
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726#if BITS_PER_LONG == 32
727#define VMALLOC_SPACE (128UL*1024*1024)
728#else
729#define VMALLOC_SPACE (128UL*1024*1024*1024)
730#endif
731
732#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
733#define VMAP_MAX_ALLOC BITS_PER_LONG
734#define VMAP_BBMAP_BITS_MAX 1024
735#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
736#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
737#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
738#define VMAP_BBMAP_BITS \
739 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
740 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
741 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
742
743#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
744
745static bool vmap_initialized __read_mostly = false;
746
747struct vmap_block_queue {
748 spinlock_t lock;
749 struct list_head free;
750};
751
752struct vmap_block {
753 spinlock_t lock;
754 struct vmap_area *va;
755 unsigned long free, dirty;
756 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
757 struct list_head free_list;
758 struct rcu_head rcu_head;
759 struct list_head purge;
760};
761
762
763static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
764
765
766
767
768
769
770static DEFINE_SPINLOCK(vmap_block_tree_lock);
771static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
772
773
774
775
776
777
778
779
780static unsigned long addr_to_vb_idx(unsigned long addr)
781{
782 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
783 addr /= VMAP_BLOCK_SIZE;
784 return addr;
785}
786
787static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
788{
789 struct vmap_block_queue *vbq;
790 struct vmap_block *vb;
791 struct vmap_area *va;
792 unsigned long vb_idx;
793 int node, err;
794
795 node = numa_node_id();
796
797 vb = kmalloc_node(sizeof(struct vmap_block),
798 gfp_mask & GFP_RECLAIM_MASK, node);
799 if (unlikely(!vb))
800 return ERR_PTR(-ENOMEM);
801
802 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
803 VMALLOC_START, VMALLOC_END,
804 node, gfp_mask);
805 if (IS_ERR(va)) {
806 kfree(vb);
807 return ERR_CAST(va);
808 }
809
810 err = radix_tree_preload(gfp_mask);
811 if (unlikely(err)) {
812 kfree(vb);
813 free_vmap_area(va);
814 return ERR_PTR(err);
815 }
816
817 spin_lock_init(&vb->lock);
818 vb->va = va;
819 vb->free = VMAP_BBMAP_BITS;
820 vb->dirty = 0;
821 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
822 INIT_LIST_HEAD(&vb->free_list);
823
824 vb_idx = addr_to_vb_idx(va->va_start);
825 spin_lock(&vmap_block_tree_lock);
826 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
827 spin_unlock(&vmap_block_tree_lock);
828 BUG_ON(err);
829 radix_tree_preload_end();
830
831 vbq = &get_cpu_var(vmap_block_queue);
832 spin_lock(&vbq->lock);
833 list_add_rcu(&vb->free_list, &vbq->free);
834 spin_unlock(&vbq->lock);
835 put_cpu_var(vmap_block_queue);
836
837 return vb;
838}
839
840static void free_vmap_block(struct vmap_block *vb)
841{
842 struct vmap_block *tmp;
843 unsigned long vb_idx;
844
845 vb_idx = addr_to_vb_idx(vb->va->va_start);
846 spin_lock(&vmap_block_tree_lock);
847 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
848 spin_unlock(&vmap_block_tree_lock);
849 BUG_ON(tmp != vb);
850
851 free_vmap_area_noflush(vb->va);
852 kfree_rcu(vb, rcu_head);
853}
854
855static void purge_fragmented_blocks(int cpu)
856{
857 LIST_HEAD(purge);
858 struct vmap_block *vb;
859 struct vmap_block *n_vb;
860 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
861
862 rcu_read_lock();
863 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
864
865 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
866 continue;
867
868 spin_lock(&vb->lock);
869 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
870 vb->free = 0;
871 vb->dirty = VMAP_BBMAP_BITS;
872 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
873 spin_lock(&vbq->lock);
874 list_del_rcu(&vb->free_list);
875 spin_unlock(&vbq->lock);
876 spin_unlock(&vb->lock);
877 list_add_tail(&vb->purge, &purge);
878 } else
879 spin_unlock(&vb->lock);
880 }
881 rcu_read_unlock();
882
883 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
884 list_del(&vb->purge);
885 free_vmap_block(vb);
886 }
887}
888
889static void purge_fragmented_blocks_allcpus(void)
890{
891 int cpu;
892
893 for_each_possible_cpu(cpu)
894 purge_fragmented_blocks(cpu);
895}
896
897static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
898{
899 struct vmap_block_queue *vbq;
900 struct vmap_block *vb;
901 unsigned long addr = 0;
902 unsigned int order;
903
904 BUG_ON(size & ~PAGE_MASK);
905 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
906 if (WARN_ON(size == 0)) {
907
908
909
910
911
912 return NULL;
913 }
914 order = get_order(size);
915
916again:
917 rcu_read_lock();
918 vbq = &get_cpu_var(vmap_block_queue);
919 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
920 int i;
921
922 spin_lock(&vb->lock);
923 if (vb->free < 1UL << order)
924 goto next;
925
926 i = VMAP_BBMAP_BITS - vb->free;
927 addr = vb->va->va_start + (i << PAGE_SHIFT);
928 BUG_ON(addr_to_vb_idx(addr) !=
929 addr_to_vb_idx(vb->va->va_start));
930 vb->free -= 1UL << order;
931 if (vb->free == 0) {
932 spin_lock(&vbq->lock);
933 list_del_rcu(&vb->free_list);
934 spin_unlock(&vbq->lock);
935 }
936 spin_unlock(&vb->lock);
937 break;
938next:
939 spin_unlock(&vb->lock);
940 }
941
942 put_cpu_var(vmap_block_queue);
943 rcu_read_unlock();
944
945 if (!addr) {
946 vb = new_vmap_block(gfp_mask);
947 if (IS_ERR(vb))
948 return vb;
949 goto again;
950 }
951
952 return (void *)addr;
953}
954
955static void vb_free(const void *addr, unsigned long size)
956{
957 unsigned long offset;
958 unsigned long vb_idx;
959 unsigned int order;
960 struct vmap_block *vb;
961
962 BUG_ON(size & ~PAGE_MASK);
963 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
964
965 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
966
967 order = get_order(size);
968
969 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
970
971 vb_idx = addr_to_vb_idx((unsigned long)addr);
972 rcu_read_lock();
973 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
974 rcu_read_unlock();
975 BUG_ON(!vb);
976
977 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
978
979 spin_lock(&vb->lock);
980 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
981
982 vb->dirty += 1UL << order;
983 if (vb->dirty == VMAP_BBMAP_BITS) {
984 BUG_ON(vb->free);
985 spin_unlock(&vb->lock);
986 free_vmap_block(vb);
987 } else
988 spin_unlock(&vb->lock);
989}
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004void vm_unmap_aliases(void)
1005{
1006 unsigned long start = ULONG_MAX, end = 0;
1007 int cpu;
1008 int flush = 0;
1009
1010 if (unlikely(!vmap_initialized))
1011 return;
1012
1013 for_each_possible_cpu(cpu) {
1014 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1015 struct vmap_block *vb;
1016
1017 rcu_read_lock();
1018 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1019 int i, j;
1020
1021 spin_lock(&vb->lock);
1022 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1023 if (i < VMAP_BBMAP_BITS) {
1024 unsigned long s, e;
1025
1026 j = find_last_bit(vb->dirty_map,
1027 VMAP_BBMAP_BITS);
1028 j = j + 1;
1029
1030 s = vb->va->va_start + (i << PAGE_SHIFT);
1031 e = vb->va->va_start + (j << PAGE_SHIFT);
1032 flush = 1;
1033
1034 if (s < start)
1035 start = s;
1036 if (e > end)
1037 end = e;
1038 }
1039 spin_unlock(&vb->lock);
1040 }
1041 rcu_read_unlock();
1042 }
1043
1044 __purge_vmap_area_lazy(&start, &end, 1, flush);
1045}
1046EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1047
1048
1049
1050
1051
1052
1053void vm_unmap_ram(const void *mem, unsigned int count)
1054{
1055 unsigned long size = count << PAGE_SHIFT;
1056 unsigned long addr = (unsigned long)mem;
1057
1058 BUG_ON(!addr);
1059 BUG_ON(addr < VMALLOC_START);
1060 BUG_ON(addr > VMALLOC_END);
1061 BUG_ON(addr & (PAGE_SIZE-1));
1062
1063 debug_check_no_locks_freed(mem, size);
1064 vmap_debug_free_range(addr, addr+size);
1065
1066 if (likely(count <= VMAP_MAX_ALLOC))
1067 vb_free(mem, size);
1068 else
1069 free_unmap_vmap_area_addr(addr);
1070}
1071EXPORT_SYMBOL(vm_unmap_ram);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1083{
1084 unsigned long size = count << PAGE_SHIFT;
1085 unsigned long addr;
1086 void *mem;
1087
1088 if (likely(count <= VMAP_MAX_ALLOC)) {
1089 mem = vb_alloc(size, GFP_KERNEL);
1090 if (IS_ERR(mem))
1091 return NULL;
1092 addr = (unsigned long)mem;
1093 } else {
1094 struct vmap_area *va;
1095 va = alloc_vmap_area(size, PAGE_SIZE,
1096 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1097 if (IS_ERR(va))
1098 return NULL;
1099
1100 addr = va->va_start;
1101 mem = (void *)addr;
1102 }
1103 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1104 vm_unmap_ram(mem, count);
1105 return NULL;
1106 }
1107 return mem;
1108}
1109EXPORT_SYMBOL(vm_map_ram);
1110
1111static struct vm_struct *vmlist __initdata;
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122void __init vm_area_add_early(struct vm_struct *vm)
1123{
1124 struct vm_struct *tmp, **p;
1125
1126 BUG_ON(vmap_initialized);
1127 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1128 if (tmp->addr >= vm->addr) {
1129 BUG_ON(tmp->addr < vm->addr + vm->size);
1130 break;
1131 } else
1132 BUG_ON(tmp->addr + tmp->size > vm->addr);
1133 }
1134 vm->next = *p;
1135 *p = vm;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1151{
1152 static size_t vm_init_off __initdata;
1153 unsigned long addr;
1154
1155 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1156 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1157
1158 vm->addr = (void *)addr;
1159
1160 vm_area_add_early(vm);
1161}
1162
1163void __init vmalloc_init(void)
1164{
1165 struct vmap_area *va;
1166 struct vm_struct *tmp;
1167 int i;
1168
1169 for_each_possible_cpu(i) {
1170 struct vmap_block_queue *vbq;
1171 struct vfree_deferred *p;
1172
1173 vbq = &per_cpu(vmap_block_queue, i);
1174 spin_lock_init(&vbq->lock);
1175 INIT_LIST_HEAD(&vbq->free);
1176 p = &per_cpu(vfree_deferred, i);
1177 init_llist_head(&p->list);
1178 INIT_WORK(&p->wq, free_work);
1179 }
1180
1181
1182 for (tmp = vmlist; tmp; tmp = tmp->next) {
1183 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1184 va->flags = VM_VM_AREA;
1185 va->va_start = (unsigned long)tmp->addr;
1186 va->va_end = va->va_start + tmp->size;
1187 va->vm = tmp;
1188 __insert_vmap_area(va);
1189 }
1190
1191 vmap_area_pcpu_hole = VMALLOC_END;
1192
1193 vmap_initialized = true;
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1216 pgprot_t prot, struct page **pages)
1217{
1218 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1236{
1237 vunmap_page_range(addr, addr + size);
1238}
1239EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249void unmap_kernel_range(unsigned long addr, unsigned long size)
1250{
1251 unsigned long end = addr + size;
1252
1253 flush_cache_vunmap(addr, end);
1254 vunmap_page_range(addr, end);
1255 flush_tlb_kernel_range(addr, end);
1256}
1257
1258int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1259{
1260 unsigned long addr = (unsigned long)area->addr;
1261 unsigned long end = addr + get_vm_area_size(area);
1262 int err;
1263
1264 err = vmap_page_range(addr, end, prot, *pages);
1265 if (err > 0) {
1266 *pages += err;
1267 err = 0;
1268 }
1269
1270 return err;
1271}
1272EXPORT_SYMBOL_GPL(map_vm_area);
1273
1274static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1275 unsigned long flags, const void *caller)
1276{
1277 spin_lock(&vmap_area_lock);
1278 vm->flags = flags;
1279 vm->addr = (void *)va->va_start;
1280 vm->size = va->va_end - va->va_start;
1281 vm->caller = caller;
1282 va->vm = vm;
1283 va->flags |= VM_VM_AREA;
1284 spin_unlock(&vmap_area_lock);
1285}
1286
1287static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1288{
1289
1290
1291
1292
1293
1294 smp_wmb();
1295 vm->flags &= ~VM_UNINITIALIZED;
1296}
1297
1298static struct vm_struct *__get_vm_area_node(unsigned long size,
1299 unsigned long align, unsigned long flags, unsigned long start,
1300 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1301{
1302 struct vmap_area *va;
1303 struct vm_struct *area;
1304
1305 BUG_ON(in_interrupt());
1306 if (flags & VM_IOREMAP)
1307 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
1308
1309 size = PAGE_ALIGN(size);
1310 if (unlikely(!size))
1311 return NULL;
1312
1313 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1314 if (unlikely(!area))
1315 return NULL;
1316
1317
1318
1319
1320 size += PAGE_SIZE;
1321
1322 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1323 if (IS_ERR(va)) {
1324 kfree(area);
1325 return NULL;
1326 }
1327
1328 setup_vmalloc_vm(area, va, flags, caller);
1329
1330 return area;
1331}
1332
1333struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1334 unsigned long start, unsigned long end)
1335{
1336 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1337 GFP_KERNEL, __builtin_return_address(0));
1338}
1339EXPORT_SYMBOL_GPL(__get_vm_area);
1340
1341struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1342 unsigned long start, unsigned long end,
1343 const void *caller)
1344{
1345 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1346 GFP_KERNEL, caller);
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1359{
1360 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1361 NUMA_NO_NODE, GFP_KERNEL,
1362 __builtin_return_address(0));
1363}
1364
1365struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1366 const void *caller)
1367{
1368 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1369 NUMA_NO_NODE, GFP_KERNEL, caller);
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380struct vm_struct *find_vm_area(const void *addr)
1381{
1382 struct vmap_area *va;
1383
1384 va = find_vmap_area((unsigned long)addr);
1385 if (va && va->flags & VM_VM_AREA)
1386 return va->vm;
1387
1388 return NULL;
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399struct vm_struct *remove_vm_area(const void *addr)
1400{
1401 struct vmap_area *va;
1402
1403 va = find_vmap_area((unsigned long)addr);
1404 if (va && va->flags & VM_VM_AREA) {
1405 struct vm_struct *vm = va->vm;
1406
1407 spin_lock(&vmap_area_lock);
1408 va->vm = NULL;
1409 va->flags &= ~VM_VM_AREA;
1410 spin_unlock(&vmap_area_lock);
1411
1412 vmap_debug_free_range(va->va_start, va->va_end);
1413 free_unmap_vmap_area(va);
1414 vm->size -= PAGE_SIZE;
1415
1416 return vm;
1417 }
1418 return NULL;
1419}
1420
1421static void __vunmap(const void *addr, int deallocate_pages)
1422{
1423 struct vm_struct *area;
1424
1425 if (!addr)
1426 return;
1427
1428 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1429 addr))
1430 return;
1431
1432 area = remove_vm_area(addr);
1433 if (unlikely(!area)) {
1434 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1435 addr);
1436 return;
1437 }
1438
1439 debug_check_no_locks_freed(addr, area->size);
1440 debug_check_no_obj_freed(addr, area->size);
1441
1442 if (deallocate_pages) {
1443 int i;
1444
1445 for (i = 0; i < area->nr_pages; i++) {
1446 struct page *page = area->pages[i];
1447
1448 BUG_ON(!page);
1449 __free_page(page);
1450 }
1451
1452 if (area->flags & VM_VPAGES)
1453 vfree(area->pages);
1454 else
1455 kfree(area->pages);
1456 }
1457
1458 kfree(area);
1459 return;
1460}
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476void vfree(const void *addr)
1477{
1478 BUG_ON(in_nmi());
1479
1480 kmemleak_free(addr);
1481
1482 if (!addr)
1483 return;
1484 if (unlikely(in_interrupt())) {
1485 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1486 if (llist_add((struct llist_node *)addr, &p->list))
1487 schedule_work(&p->wq);
1488 } else
1489 __vunmap(addr, 1);
1490}
1491EXPORT_SYMBOL(vfree);
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502void vunmap(const void *addr)
1503{
1504 BUG_ON(in_interrupt());
1505 might_sleep();
1506 if (addr)
1507 __vunmap(addr, 0);
1508}
1509EXPORT_SYMBOL(vunmap);
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521void *vmap(struct page **pages, unsigned int count,
1522 unsigned long flags, pgprot_t prot)
1523{
1524 struct vm_struct *area;
1525
1526 might_sleep();
1527
1528 if (count > totalram_pages)
1529 return NULL;
1530
1531 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1532 __builtin_return_address(0));
1533 if (!area)
1534 return NULL;
1535
1536 if (map_vm_area(area, prot, &pages)) {
1537 vunmap(area->addr);
1538 return NULL;
1539 }
1540
1541 return area->addr;
1542}
1543EXPORT_SYMBOL(vmap);
1544
1545static void *__vmalloc_node(unsigned long size, unsigned long align,
1546 gfp_t gfp_mask, pgprot_t prot,
1547 int node, const void *caller);
1548static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1549 pgprot_t prot, int node, const void *caller)
1550{
1551 const int order = 0;
1552 struct page **pages;
1553 unsigned int nr_pages, array_size, i;
1554 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1555
1556 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1557 array_size = (nr_pages * sizeof(struct page *));
1558
1559 area->nr_pages = nr_pages;
1560
1561 if (array_size > PAGE_SIZE) {
1562 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1563 PAGE_KERNEL, node, caller);
1564 area->flags |= VM_VPAGES;
1565 } else {
1566 pages = kmalloc_node(array_size, nested_gfp, node);
1567 }
1568 area->pages = pages;
1569 area->caller = caller;
1570 if (!area->pages) {
1571 remove_vm_area(area->addr);
1572 kfree(area);
1573 return NULL;
1574 }
1575
1576 for (i = 0; i < area->nr_pages; i++) {
1577 struct page *page;
1578 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1579
1580 if (node < 0)
1581 page = alloc_page(tmp_mask);
1582 else
1583 page = alloc_pages_node(node, tmp_mask, order);
1584
1585 if (unlikely(!page)) {
1586
1587 area->nr_pages = i;
1588 goto fail;
1589 }
1590 area->pages[i] = page;
1591 }
1592
1593 if (map_vm_area(area, prot, &pages))
1594 goto fail;
1595 return area->addr;
1596
1597fail:
1598 warn_alloc_failed(gfp_mask, order,
1599 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1600 (area->nr_pages*PAGE_SIZE), area->size);
1601 vfree(area->addr);
1602 return NULL;
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void *__vmalloc_node_range(unsigned long size, unsigned long align,
1621 unsigned long start, unsigned long end, gfp_t gfp_mask,
1622 pgprot_t prot, int node, const void *caller)
1623{
1624 struct vm_struct *area;
1625 void *addr;
1626 unsigned long real_size = size;
1627
1628 size = PAGE_ALIGN(size);
1629 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1630 goto fail;
1631
1632 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
1633 start, end, node, gfp_mask, caller);
1634 if (!area)
1635 goto fail;
1636
1637 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1638 if (!addr)
1639 goto fail;
1640
1641
1642
1643
1644
1645
1646 clear_vm_uninitialized_flag(area);
1647
1648
1649
1650
1651
1652
1653 kmemleak_alloc(addr, real_size, 3, gfp_mask);
1654
1655 return addr;
1656
1657fail:
1658 warn_alloc_failed(gfp_mask, 0,
1659 "vmalloc: allocation failure: %lu bytes\n",
1660 real_size);
1661 return NULL;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677static void *__vmalloc_node(unsigned long size, unsigned long align,
1678 gfp_t gfp_mask, pgprot_t prot,
1679 int node, const void *caller)
1680{
1681 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1682 gfp_mask, prot, node, caller);
1683}
1684
1685void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1686{
1687 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1688 __builtin_return_address(0));
1689}
1690EXPORT_SYMBOL(__vmalloc);
1691
1692static inline void *__vmalloc_node_flags(unsigned long size,
1693 int node, gfp_t flags)
1694{
1695 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1696 node, __builtin_return_address(0));
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708void *vmalloc(unsigned long size)
1709{
1710 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1711 GFP_KERNEL | __GFP_HIGHMEM);
1712}
1713EXPORT_SYMBOL(vmalloc);
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725void *vzalloc(unsigned long size)
1726{
1727 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1728 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1729}
1730EXPORT_SYMBOL(vzalloc);
1731
1732
1733
1734
1735
1736
1737
1738
1739void *vmalloc_user(unsigned long size)
1740{
1741 struct vm_struct *area;
1742 void *ret;
1743
1744 ret = __vmalloc_node(size, SHMLBA,
1745 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1746 PAGE_KERNEL, NUMA_NO_NODE,
1747 __builtin_return_address(0));
1748 if (ret) {
1749 area = find_vm_area(ret);
1750 area->flags |= VM_USERMAP;
1751 }
1752 return ret;
1753}
1754EXPORT_SYMBOL(vmalloc_user);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767void *vmalloc_node(unsigned long size, int node)
1768{
1769 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1770 node, __builtin_return_address(0));
1771}
1772EXPORT_SYMBOL(vmalloc_node);
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786void *vzalloc_node(unsigned long size, int node)
1787{
1788 return __vmalloc_node_flags(size, node,
1789 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1790}
1791EXPORT_SYMBOL(vzalloc_node);
1792
1793#ifndef PAGE_KERNEL_EXEC
1794# define PAGE_KERNEL_EXEC PAGE_KERNEL
1795#endif
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809void *vmalloc_exec(unsigned long size)
1810{
1811 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1812 NUMA_NO_NODE, __builtin_return_address(0));
1813}
1814
1815#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1816#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1817#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1818#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1819#else
1820#define GFP_VMALLOC32 GFP_KERNEL
1821#endif
1822
1823
1824
1825
1826
1827
1828
1829
1830void *vmalloc_32(unsigned long size)
1831{
1832 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1833 NUMA_NO_NODE, __builtin_return_address(0));
1834}
1835EXPORT_SYMBOL(vmalloc_32);
1836
1837
1838
1839
1840
1841
1842
1843
1844void *vmalloc_32_user(unsigned long size)
1845{
1846 struct vm_struct *area;
1847 void *ret;
1848
1849 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1850 NUMA_NO_NODE, __builtin_return_address(0));
1851 if (ret) {
1852 area = find_vm_area(ret);
1853 area->flags |= VM_USERMAP;
1854 }
1855 return ret;
1856}
1857EXPORT_SYMBOL(vmalloc_32_user);
1858
1859
1860
1861
1862
1863
1864static int aligned_vread(char *buf, char *addr, unsigned long count)
1865{
1866 struct page *p;
1867 int copied = 0;
1868
1869 while (count) {
1870 unsigned long offset, length;
1871
1872 offset = (unsigned long)addr & ~PAGE_MASK;
1873 length = PAGE_SIZE - offset;
1874 if (length > count)
1875 length = count;
1876 p = vmalloc_to_page(addr);
1877
1878
1879
1880
1881
1882
1883
1884 if (p) {
1885
1886
1887
1888
1889 void *map = kmap_atomic(p);
1890 memcpy(buf, map + offset, length);
1891 kunmap_atomic(map);
1892 } else
1893 memset(buf, 0, length);
1894
1895 addr += length;
1896 buf += length;
1897 copied += length;
1898 count -= length;
1899 }
1900 return copied;
1901}
1902
1903static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1904{
1905 struct page *p;
1906 int copied = 0;
1907
1908 while (count) {
1909 unsigned long offset, length;
1910
1911 offset = (unsigned long)addr & ~PAGE_MASK;
1912 length = PAGE_SIZE - offset;
1913 if (length > count)
1914 length = count;
1915 p = vmalloc_to_page(addr);
1916
1917
1918
1919
1920
1921
1922
1923 if (p) {
1924
1925
1926
1927
1928 void *map = kmap_atomic(p);
1929 memcpy(map + offset, buf, length);
1930 kunmap_atomic(map);
1931 }
1932 addr += length;
1933 buf += length;
1934 copied += length;
1935 count -= length;
1936 }
1937 return copied;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966long vread(char *buf, char *addr, unsigned long count)
1967{
1968 struct vmap_area *va;
1969 struct vm_struct *vm;
1970 char *vaddr, *buf_start = buf;
1971 unsigned long buflen = count;
1972 unsigned long n;
1973
1974
1975 if ((unsigned long) addr + count < count)
1976 count = -(unsigned long) addr;
1977
1978 spin_lock(&vmap_area_lock);
1979 list_for_each_entry(va, &vmap_area_list, list) {
1980 if (!count)
1981 break;
1982
1983 if (!(va->flags & VM_VM_AREA))
1984 continue;
1985
1986 vm = va->vm;
1987 vaddr = (char *) vm->addr;
1988 if (addr >= vaddr + get_vm_area_size(vm))
1989 continue;
1990 while (addr < vaddr) {
1991 if (count == 0)
1992 goto finished;
1993 *buf = '\0';
1994 buf++;
1995 addr++;
1996 count--;
1997 }
1998 n = vaddr + get_vm_area_size(vm) - addr;
1999 if (n > count)
2000 n = count;
2001 if (!(vm->flags & VM_IOREMAP))
2002 aligned_vread(buf, addr, n);
2003 else
2004 memset(buf, 0, n);
2005 buf += n;
2006 addr += n;
2007 count -= n;
2008 }
2009finished:
2010 spin_unlock(&vmap_area_lock);
2011
2012 if (buf == buf_start)
2013 return 0;
2014
2015 if (buf != buf_start + buflen)
2016 memset(buf, 0, buflen - (buf - buf_start));
2017
2018 return buflen;
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047long vwrite(char *buf, char *addr, unsigned long count)
2048{
2049 struct vmap_area *va;
2050 struct vm_struct *vm;
2051 char *vaddr;
2052 unsigned long n, buflen;
2053 int copied = 0;
2054
2055
2056 if ((unsigned long) addr + count < count)
2057 count = -(unsigned long) addr;
2058 buflen = count;
2059
2060 spin_lock(&vmap_area_lock);
2061 list_for_each_entry(va, &vmap_area_list, list) {
2062 if (!count)
2063 break;
2064
2065 if (!(va->flags & VM_VM_AREA))
2066 continue;
2067
2068 vm = va->vm;
2069 vaddr = (char *) vm->addr;
2070 if (addr >= vaddr + get_vm_area_size(vm))
2071 continue;
2072 while (addr < vaddr) {
2073 if (count == 0)
2074 goto finished;
2075 buf++;
2076 addr++;
2077 count--;
2078 }
2079 n = vaddr + get_vm_area_size(vm) - addr;
2080 if (n > count)
2081 n = count;
2082 if (!(vm->flags & VM_IOREMAP)) {
2083 aligned_vwrite(buf, addr, n);
2084 copied++;
2085 }
2086 buf += n;
2087 addr += n;
2088 count -= n;
2089 }
2090finished:
2091 spin_unlock(&vmap_area_lock);
2092 if (!copied)
2093 return 0;
2094 return buflen;
2095}
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2114 void *kaddr, unsigned long size)
2115{
2116 struct vm_struct *area;
2117
2118 size = PAGE_ALIGN(size);
2119
2120 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2121 return -EINVAL;
2122
2123 area = find_vm_area(kaddr);
2124 if (!area)
2125 return -EINVAL;
2126
2127 if (!(area->flags & VM_USERMAP))
2128 return -EINVAL;
2129
2130 if (kaddr + size > area->addr + area->size)
2131 return -EINVAL;
2132
2133 do {
2134 struct page *page = vmalloc_to_page(kaddr);
2135 int ret;
2136
2137 ret = vm_insert_page(vma, uaddr, page);
2138 if (ret)
2139 return ret;
2140
2141 uaddr += PAGE_SIZE;
2142 kaddr += PAGE_SIZE;
2143 size -= PAGE_SIZE;
2144 } while (size > 0);
2145
2146 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2147
2148 return 0;
2149}
2150EXPORT_SYMBOL(remap_vmalloc_range_partial);
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2167 unsigned long pgoff)
2168{
2169 return remap_vmalloc_range_partial(vma, vma->vm_start,
2170 addr + (pgoff << PAGE_SHIFT),
2171 vma->vm_end - vma->vm_start);
2172}
2173EXPORT_SYMBOL(remap_vmalloc_range);
2174
2175
2176
2177
2178
2179void __attribute__((weak)) vmalloc_sync_all(void)
2180{
2181}
2182
2183
2184static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2185{
2186 pte_t ***p = data;
2187
2188 if (p) {
2189 *(*p) = pte;
2190 (*p)++;
2191 }
2192 return 0;
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2210{
2211 struct vm_struct *area;
2212
2213 area = get_vm_area_caller(size, VM_IOREMAP,
2214 __builtin_return_address(0));
2215 if (area == NULL)
2216 return NULL;
2217
2218
2219
2220
2221
2222 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2223 size, f, ptes ? &ptes : NULL)) {
2224 free_vm_area(area);
2225 return NULL;
2226 }
2227
2228 return area;
2229}
2230EXPORT_SYMBOL_GPL(alloc_vm_area);
2231
2232void free_vm_area(struct vm_struct *area)
2233{
2234 struct vm_struct *ret;
2235 ret = remove_vm_area(area->addr);
2236 BUG_ON(ret != area);
2237 kfree(area);
2238}
2239EXPORT_SYMBOL_GPL(free_vm_area);
2240
2241#ifdef CONFIG_SMP
2242static struct vmap_area *node_to_va(struct rb_node *n)
2243{
2244 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static bool pvm_find_next_prev(unsigned long end,
2260 struct vmap_area **pnext,
2261 struct vmap_area **pprev)
2262{
2263 struct rb_node *n = vmap_area_root.rb_node;
2264 struct vmap_area *va = NULL;
2265
2266 while (n) {
2267 va = rb_entry(n, struct vmap_area, rb_node);
2268 if (end < va->va_end)
2269 n = n->rb_left;
2270 else if (end > va->va_end)
2271 n = n->rb_right;
2272 else
2273 break;
2274 }
2275
2276 if (!va)
2277 return false;
2278
2279 if (va->va_end > end) {
2280 *pnext = va;
2281 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2282 } else {
2283 *pprev = va;
2284 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2285 }
2286 return true;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305static unsigned long pvm_determine_end(struct vmap_area **pnext,
2306 struct vmap_area **pprev,
2307 unsigned long align)
2308{
2309 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2310 unsigned long addr;
2311
2312 if (*pnext)
2313 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2314 else
2315 addr = vmalloc_end;
2316
2317 while (*pprev && (*pprev)->va_end > addr) {
2318 *pnext = *pprev;
2319 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2320 }
2321
2322 return addr;
2323}
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2350 const size_t *sizes, int nr_vms,
2351 size_t align)
2352{
2353 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2354 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2355 struct vmap_area **vas, *prev, *next;
2356 struct vm_struct **vms;
2357 int area, area2, last_area, term_area;
2358 unsigned long base, start, end, last_end;
2359 bool purged = false;
2360
2361
2362 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2363 for (last_area = 0, area = 0; area < nr_vms; area++) {
2364 start = offsets[area];
2365 end = start + sizes[area];
2366
2367
2368 BUG_ON(!IS_ALIGNED(offsets[area], align));
2369 BUG_ON(!IS_ALIGNED(sizes[area], align));
2370
2371
2372 if (start > offsets[last_area])
2373 last_area = area;
2374
2375 for (area2 = 0; area2 < nr_vms; area2++) {
2376 unsigned long start2 = offsets[area2];
2377 unsigned long end2 = start2 + sizes[area2];
2378
2379 if (area2 == area)
2380 continue;
2381
2382 BUG_ON(start2 >= start && start2 < end);
2383 BUG_ON(end2 <= end && end2 > start);
2384 }
2385 }
2386 last_end = offsets[last_area] + sizes[last_area];
2387
2388 if (vmalloc_end - vmalloc_start < last_end) {
2389 WARN_ON(true);
2390 return NULL;
2391 }
2392
2393 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2394 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2395 if (!vas || !vms)
2396 goto err_free2;
2397
2398 for (area = 0; area < nr_vms; area++) {
2399 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2400 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2401 if (!vas[area] || !vms[area])
2402 goto err_free;
2403 }
2404retry:
2405 spin_lock(&vmap_area_lock);
2406
2407
2408 area = term_area = last_area;
2409 start = offsets[area];
2410 end = start + sizes[area];
2411
2412 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2413 base = vmalloc_end - last_end;
2414 goto found;
2415 }
2416 base = pvm_determine_end(&next, &prev, align) - end;
2417
2418 while (true) {
2419 BUG_ON(next && next->va_end <= base + end);
2420 BUG_ON(prev && prev->va_end > base + end);
2421
2422
2423
2424
2425
2426 if (base + last_end < vmalloc_start + last_end) {
2427 spin_unlock(&vmap_area_lock);
2428 if (!purged) {
2429 purge_vmap_area_lazy();
2430 purged = true;
2431 goto retry;
2432 }
2433 goto err_free;
2434 }
2435
2436
2437
2438
2439
2440 if (next && next->va_start < base + end) {
2441 base = pvm_determine_end(&next, &prev, align) - end;
2442 term_area = area;
2443 continue;
2444 }
2445
2446
2447
2448
2449
2450
2451 if (prev && prev->va_end > base + start) {
2452 next = prev;
2453 prev = node_to_va(rb_prev(&next->rb_node));
2454 base = pvm_determine_end(&next, &prev, align) - end;
2455 term_area = area;
2456 continue;
2457 }
2458
2459
2460
2461
2462
2463 area = (area + nr_vms - 1) % nr_vms;
2464 if (area == term_area)
2465 break;
2466 start = offsets[area];
2467 end = start + sizes[area];
2468 pvm_find_next_prev(base + end, &next, &prev);
2469 }
2470found:
2471
2472 for (area = 0; area < nr_vms; area++) {
2473 struct vmap_area *va = vas[area];
2474
2475 va->va_start = base + offsets[area];
2476 va->va_end = va->va_start + sizes[area];
2477 __insert_vmap_area(va);
2478 }
2479
2480 vmap_area_pcpu_hole = base + offsets[last_area];
2481
2482 spin_unlock(&vmap_area_lock);
2483
2484
2485 for (area = 0; area < nr_vms; area++)
2486 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2487 pcpu_get_vm_areas);
2488
2489 kfree(vas);
2490 return vms;
2491
2492err_free:
2493 for (area = 0; area < nr_vms; area++) {
2494 kfree(vas[area]);
2495 kfree(vms[area]);
2496 }
2497err_free2:
2498 kfree(vas);
2499 kfree(vms);
2500 return NULL;
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2511{
2512 int i;
2513
2514 for (i = 0; i < nr_vms; i++)
2515 free_vm_area(vms[i]);
2516 kfree(vms);
2517}
2518#endif
2519
2520#ifdef CONFIG_PROC_FS
2521static void *s_start(struct seq_file *m, loff_t *pos)
2522 __acquires(&vmap_area_lock)
2523{
2524 loff_t n = *pos;
2525 struct vmap_area *va;
2526
2527 spin_lock(&vmap_area_lock);
2528 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2529 while (n > 0 && &va->list != &vmap_area_list) {
2530 n--;
2531 va = list_entry(va->list.next, typeof(*va), list);
2532 }
2533 if (!n && &va->list != &vmap_area_list)
2534 return va;
2535
2536 return NULL;
2537
2538}
2539
2540static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2541{
2542 struct vmap_area *va = p, *next;
2543
2544 ++*pos;
2545 next = list_entry(va->list.next, typeof(*va), list);
2546 if (&next->list != &vmap_area_list)
2547 return next;
2548
2549 return NULL;
2550}
2551
2552static void s_stop(struct seq_file *m, void *p)
2553 __releases(&vmap_area_lock)
2554{
2555 spin_unlock(&vmap_area_lock);
2556}
2557
2558static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2559{
2560 if (IS_ENABLED(CONFIG_NUMA)) {
2561 unsigned int nr, *counters = m->private;
2562
2563 if (!counters)
2564 return;
2565
2566 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2567
2568 for (nr = 0; nr < v->nr_pages; nr++)
2569 counters[page_to_nid(v->pages[nr])]++;
2570
2571 for_each_node_state(nr, N_HIGH_MEMORY)
2572 if (counters[nr])
2573 seq_printf(m, " N%u=%u", nr, counters[nr]);
2574 }
2575}
2576
2577static int s_show(struct seq_file *m, void *p)
2578{
2579 struct vmap_area *va = p;
2580 struct vm_struct *v;
2581
2582 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2583 return 0;
2584
2585 if (!(va->flags & VM_VM_AREA)) {
2586 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
2587 (void *)va->va_start, (void *)va->va_end,
2588 va->va_end - va->va_start);
2589 return 0;
2590 }
2591
2592 v = va->vm;
2593
2594
2595 smp_rmb();
2596 if (v->flags & VM_UNINITIALIZED)
2597 return 0;
2598
2599 seq_printf(m, "0x%pK-0x%pK %7ld",
2600 v->addr, v->addr + v->size, v->size);
2601
2602 if (v->caller)
2603 seq_printf(m, " %pS", v->caller);
2604
2605 if (v->nr_pages)
2606 seq_printf(m, " pages=%d", v->nr_pages);
2607
2608 if (v->phys_addr)
2609 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2610
2611 if (v->flags & VM_IOREMAP)
2612 seq_printf(m, " ioremap");
2613
2614 if (v->flags & VM_ALLOC)
2615 seq_printf(m, " vmalloc");
2616
2617 if (v->flags & VM_MAP)
2618 seq_printf(m, " vmap");
2619
2620 if (v->flags & VM_USERMAP)
2621 seq_printf(m, " user");
2622
2623 if (v->flags & VM_VPAGES)
2624 seq_printf(m, " vpages");
2625
2626 show_numa_info(m, v);
2627 seq_putc(m, '\n');
2628 return 0;
2629}
2630
2631static const struct seq_operations vmalloc_op = {
2632 .start = s_start,
2633 .next = s_next,
2634 .stop = s_stop,
2635 .show = s_show,
2636};
2637
2638static int vmalloc_open(struct inode *inode, struct file *file)
2639{
2640 unsigned int *ptr = NULL;
2641 int ret;
2642
2643 if (IS_ENABLED(CONFIG_NUMA)) {
2644 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
2645 if (ptr == NULL)
2646 return -ENOMEM;
2647 }
2648 ret = seq_open(file, &vmalloc_op);
2649 if (!ret) {
2650 struct seq_file *m = file->private_data;
2651 m->private = ptr;
2652 } else
2653 kfree(ptr);
2654 return ret;
2655}
2656
2657static const struct file_operations proc_vmalloc_operations = {
2658 .open = vmalloc_open,
2659 .read = seq_read,
2660 .llseek = seq_lseek,
2661 .release = seq_release_private,
2662};
2663
2664static int __init proc_vmalloc_init(void)
2665{
2666 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2667 return 0;
2668}
2669module_init(proc_vmalloc_init);
2670
2671void get_vmalloc_info(struct vmalloc_info *vmi)
2672{
2673 struct vmap_area *va;
2674 unsigned long free_area_size;
2675 unsigned long prev_end;
2676
2677 vmi->used = 0;
2678 vmi->largest_chunk = 0;
2679
2680 prev_end = VMALLOC_START;
2681
2682 spin_lock(&vmap_area_lock);
2683
2684 if (list_empty(&vmap_area_list)) {
2685 vmi->largest_chunk = VMALLOC_TOTAL;
2686 goto out;
2687 }
2688
2689 list_for_each_entry(va, &vmap_area_list, list) {
2690 unsigned long addr = va->va_start;
2691
2692
2693
2694
2695 if (addr < VMALLOC_START)
2696 continue;
2697 if (addr >= VMALLOC_END)
2698 break;
2699
2700 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2701 continue;
2702
2703 vmi->used += (va->va_end - va->va_start);
2704
2705 free_area_size = addr - prev_end;
2706 if (vmi->largest_chunk < free_area_size)
2707 vmi->largest_chunk = free_area_size;
2708
2709 prev_end = va->va_end;
2710 }
2711
2712 if (VMALLOC_END - prev_end > vmi->largest_chunk)
2713 vmi->largest_chunk = VMALLOC_END - prev_end;
2714
2715out:
2716 spin_unlock(&vmap_area_lock);
2717}
2718#endif
2719
2720