1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
27#include <linux/pfn.h>
28#include <linux/kmemleak.h>
29#include <linux/atomic.h>
30#include <linux/compiler.h>
31#include <linux/llist.h>
32#include <linux/bitops.h>
33
34#include <asm/uaccess.h>
35#include <asm/tlbflush.h>
36#include <asm/shmparam.h>
37
38#include "internal.h"
39
40struct vfree_deferred {
41 struct llist_head list;
42 struct work_struct wq;
43};
44static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
45
46static void __vunmap(const void *, int);
47
48static void free_work(struct work_struct *w)
49{
50 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
51 struct llist_node *llnode = llist_del_all(&p->list);
52 while (llnode) {
53 void *p = llnode;
54 llnode = llist_next(llnode);
55 __vunmap(p, 1);
56 }
57}
58
59
60
61static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
62{
63 pte_t *pte;
64
65 pte = pte_offset_kernel(pmd, addr);
66 do {
67 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
68 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
69 } while (pte++, addr += PAGE_SIZE, addr != end);
70}
71
72static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
73{
74 pmd_t *pmd;
75 unsigned long next;
76
77 pmd = pmd_offset(pud, addr);
78 do {
79 next = pmd_addr_end(addr, end);
80 if (pmd_clear_huge(pmd))
81 continue;
82 if (pmd_none_or_clear_bad(pmd))
83 continue;
84 vunmap_pte_range(pmd, addr, next);
85 } while (pmd++, addr = next, addr != end);
86}
87
88static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
89{
90 pud_t *pud;
91 unsigned long next;
92
93 pud = pud_offset(pgd, addr);
94 do {
95 next = pud_addr_end(addr, end);
96 if (pud_clear_huge(pud))
97 continue;
98 if (pud_none_or_clear_bad(pud))
99 continue;
100 vunmap_pmd_range(pud, addr, next);
101 } while (pud++, addr = next, addr != end);
102}
103
104static void vunmap_page_range(unsigned long addr, unsigned long end)
105{
106 pgd_t *pgd;
107 unsigned long next;
108
109 BUG_ON(addr >= end);
110 pgd = pgd_offset_k(addr);
111 do {
112 next = pgd_addr_end(addr, end);
113 if (pgd_none_or_clear_bad(pgd))
114 continue;
115 vunmap_pud_range(pgd, addr, next);
116 } while (pgd++, addr = next, addr != end);
117}
118
119static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
120 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
121{
122 pte_t *pte;
123
124
125
126
127
128
129 pte = pte_alloc_kernel(pmd, addr);
130 if (!pte)
131 return -ENOMEM;
132 do {
133 struct page *page = pages[*nr];
134
135 if (WARN_ON(!pte_none(*pte)))
136 return -EBUSY;
137 if (WARN_ON(!page))
138 return -ENOMEM;
139 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
140 (*nr)++;
141 } while (pte++, addr += PAGE_SIZE, addr != end);
142 return 0;
143}
144
145static int vmap_pmd_range(pud_t *pud, unsigned long addr,
146 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
147{
148 pmd_t *pmd;
149 unsigned long next;
150
151 pmd = pmd_alloc(&init_mm, pud, addr);
152 if (!pmd)
153 return -ENOMEM;
154 do {
155 next = pmd_addr_end(addr, end);
156 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
157 return -ENOMEM;
158 } while (pmd++, addr = next, addr != end);
159 return 0;
160}
161
162static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
163 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
164{
165 pud_t *pud;
166 unsigned long next;
167
168 pud = pud_alloc(&init_mm, pgd, addr);
169 if (!pud)
170 return -ENOMEM;
171 do {
172 next = pud_addr_end(addr, end);
173 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
174 return -ENOMEM;
175 } while (pud++, addr = next, addr != end);
176 return 0;
177}
178
179
180
181
182
183
184
185static int vmap_page_range_noflush(unsigned long start, unsigned long end,
186 pgprot_t prot, struct page **pages)
187{
188 pgd_t *pgd;
189 unsigned long next;
190 unsigned long addr = start;
191 int err = 0;
192 int nr = 0;
193
194 BUG_ON(addr >= end);
195 pgd = pgd_offset_k(addr);
196 do {
197 next = pgd_addr_end(addr, end);
198 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
199 if (err)
200 return err;
201 } while (pgd++, addr = next, addr != end);
202
203 return nr;
204}
205
206static int vmap_page_range(unsigned long start, unsigned long end,
207 pgprot_t prot, struct page **pages)
208{
209 int ret;
210
211 ret = vmap_page_range_noflush(start, end, prot, pages);
212 flush_cache_vmap(start, end);
213 return ret;
214}
215
216int is_vmalloc_or_module_addr(const void *x)
217{
218
219
220
221
222
223#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
224 unsigned long addr = (unsigned long)x;
225 if (addr >= MODULES_VADDR && addr < MODULES_END)
226 return 1;
227#endif
228 return is_vmalloc_addr(x);
229}
230
231
232
233
234struct page *vmalloc_to_page(const void *vmalloc_addr)
235{
236 unsigned long addr = (unsigned long) vmalloc_addr;
237 struct page *page = NULL;
238 pgd_t *pgd = pgd_offset_k(addr);
239
240
241
242
243
244 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
245
246 if (!pgd_none(*pgd)) {
247 pud_t *pud = pud_offset(pgd, addr);
248 if (!pud_none(*pud)) {
249 pmd_t *pmd = pmd_offset(pud, addr);
250 if (!pmd_none(*pmd)) {
251 pte_t *ptep, pte;
252
253 ptep = pte_offset_map(pmd, addr);
254 pte = *ptep;
255 if (pte_present(pte))
256 page = pte_page(pte);
257 pte_unmap(ptep);
258 }
259 }
260 }
261 return page;
262}
263EXPORT_SYMBOL(vmalloc_to_page);
264
265
266
267
268unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
269{
270 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
271}
272EXPORT_SYMBOL(vmalloc_to_pfn);
273
274
275
276
277#define VM_LAZY_FREE 0x01
278#define VM_LAZY_FREEING 0x02
279#define VM_VM_AREA 0x04
280
281static DEFINE_SPINLOCK(vmap_area_lock);
282
283LIST_HEAD(vmap_area_list);
284static struct rb_root vmap_area_root = RB_ROOT;
285
286
287static struct rb_node *free_vmap_cache;
288static unsigned long cached_hole_size;
289static unsigned long cached_vstart;
290static unsigned long cached_align;
291
292static unsigned long vmap_area_pcpu_hole;
293
294static struct vmap_area *__find_vmap_area(unsigned long addr)
295{
296 struct rb_node *n = vmap_area_root.rb_node;
297
298 while (n) {
299 struct vmap_area *va;
300
301 va = rb_entry(n, struct vmap_area, rb_node);
302 if (addr < va->va_start)
303 n = n->rb_left;
304 else if (addr >= va->va_end)
305 n = n->rb_right;
306 else
307 return va;
308 }
309
310 return NULL;
311}
312
313static void __insert_vmap_area(struct vmap_area *va)
314{
315 struct rb_node **p = &vmap_area_root.rb_node;
316 struct rb_node *parent = NULL;
317 struct rb_node *tmp;
318
319 while (*p) {
320 struct vmap_area *tmp_va;
321
322 parent = *p;
323 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
324 if (va->va_start < tmp_va->va_end)
325 p = &(*p)->rb_left;
326 else if (va->va_end > tmp_va->va_start)
327 p = &(*p)->rb_right;
328 else
329 BUG();
330 }
331
332 rb_link_node(&va->rb_node, parent, p);
333 rb_insert_color(&va->rb_node, &vmap_area_root);
334
335
336 tmp = rb_prev(&va->rb_node);
337 if (tmp) {
338 struct vmap_area *prev;
339 prev = rb_entry(tmp, struct vmap_area, rb_node);
340 list_add_rcu(&va->list, &prev->list);
341 } else
342 list_add_rcu(&va->list, &vmap_area_list);
343}
344
345static void purge_vmap_area_lazy(void);
346
347
348
349
350
351static struct vmap_area *alloc_vmap_area(unsigned long size,
352 unsigned long align,
353 unsigned long vstart, unsigned long vend,
354 int node, gfp_t gfp_mask)
355{
356 struct vmap_area *va;
357 struct rb_node *n;
358 unsigned long addr;
359 int purged = 0;
360 struct vmap_area *first;
361
362 BUG_ON(!size);
363 BUG_ON(offset_in_page(size));
364 BUG_ON(!is_power_of_2(align));
365
366 va = kmalloc_node(sizeof(struct vmap_area),
367 gfp_mask & GFP_RECLAIM_MASK, node);
368 if (unlikely(!va))
369 return ERR_PTR(-ENOMEM);
370
371
372
373
374
375 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
376
377retry:
378 spin_lock(&vmap_area_lock);
379
380
381
382
383
384
385
386
387
388 if (!free_vmap_cache ||
389 size < cached_hole_size ||
390 vstart < cached_vstart ||
391 align < cached_align) {
392nocache:
393 cached_hole_size = 0;
394 free_vmap_cache = NULL;
395 }
396
397 cached_vstart = vstart;
398 cached_align = align;
399
400
401 if (free_vmap_cache) {
402 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
403 addr = ALIGN(first->va_end, align);
404 if (addr < vstart)
405 goto nocache;
406 if (addr + size < addr)
407 goto overflow;
408
409 } else {
410 addr = ALIGN(vstart, align);
411 if (addr + size < addr)
412 goto overflow;
413
414 n = vmap_area_root.rb_node;
415 first = NULL;
416
417 while (n) {
418 struct vmap_area *tmp;
419 tmp = rb_entry(n, struct vmap_area, rb_node);
420 if (tmp->va_end >= addr) {
421 first = tmp;
422 if (tmp->va_start <= addr)
423 break;
424 n = n->rb_left;
425 } else
426 n = n->rb_right;
427 }
428
429 if (!first)
430 goto found;
431 }
432
433
434 while (addr + size > first->va_start && addr + size <= vend) {
435 if (addr + cached_hole_size < first->va_start)
436 cached_hole_size = first->va_start - addr;
437 addr = ALIGN(first->va_end, align);
438 if (addr + size < addr)
439 goto overflow;
440
441 if (list_is_last(&first->list, &vmap_area_list))
442 goto found;
443
444 first = list_entry(first->list.next,
445 struct vmap_area, list);
446 }
447
448found:
449 if (addr + size > vend)
450 goto overflow;
451
452 va->va_start = addr;
453 va->va_end = addr + size;
454 va->flags = 0;
455 __insert_vmap_area(va);
456 free_vmap_cache = &va->rb_node;
457 spin_unlock(&vmap_area_lock);
458
459 BUG_ON(va->va_start & (align-1));
460 BUG_ON(va->va_start < vstart);
461 BUG_ON(va->va_end > vend);
462
463 return va;
464
465overflow:
466 spin_unlock(&vmap_area_lock);
467 if (!purged) {
468 purge_vmap_area_lazy();
469 purged = 1;
470 goto retry;
471 }
472 if (printk_ratelimit())
473 pr_warn("vmap allocation for size %lu failed: "
474 "use vmalloc=<size> to increase size.\n", size);
475 kfree(va);
476 return ERR_PTR(-EBUSY);
477}
478
479static void __free_vmap_area(struct vmap_area *va)
480{
481 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
482
483 if (free_vmap_cache) {
484 if (va->va_end < cached_vstart) {
485 free_vmap_cache = NULL;
486 } else {
487 struct vmap_area *cache;
488 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
489 if (va->va_start <= cache->va_start) {
490 free_vmap_cache = rb_prev(&va->rb_node);
491
492
493
494
495 }
496 }
497 }
498 rb_erase(&va->rb_node, &vmap_area_root);
499 RB_CLEAR_NODE(&va->rb_node);
500 list_del_rcu(&va->list);
501
502
503
504
505
506
507
508 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
509 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
510
511 kfree_rcu(va, rcu_head);
512}
513
514
515
516
517static void free_vmap_area(struct vmap_area *va)
518{
519 spin_lock(&vmap_area_lock);
520 __free_vmap_area(va);
521 spin_unlock(&vmap_area_lock);
522}
523
524
525
526
527static void unmap_vmap_area(struct vmap_area *va)
528{
529 vunmap_page_range(va->va_start, va->va_end);
530}
531
532static void vmap_debug_free_range(unsigned long start, unsigned long end)
533{
534
535
536
537
538
539
540
541
542
543
544
545
546
547#ifdef CONFIG_DEBUG_PAGEALLOC
548 vunmap_page_range(start, end);
549 flush_tlb_kernel_range(start, end);
550#endif
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569static unsigned long lazy_max_pages(void)
570{
571 unsigned int log;
572
573 log = fls(num_online_cpus());
574
575 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
576}
577
578static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
579
580
581static void purge_fragmented_blocks_allcpus(void);
582
583
584
585
586
587void set_iounmap_nonlazy(void)
588{
589 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
590}
591
592
593
594
595
596
597
598
599
600
601
602static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
603 int sync, int force_flush)
604{
605 static DEFINE_SPINLOCK(purge_lock);
606 LIST_HEAD(valist);
607 struct vmap_area *va;
608 struct vmap_area *n_va;
609 int nr = 0;
610
611
612
613
614
615
616 if (!sync && !force_flush) {
617 if (!spin_trylock(&purge_lock))
618 return;
619 } else
620 spin_lock(&purge_lock);
621
622 if (sync)
623 purge_fragmented_blocks_allcpus();
624
625 rcu_read_lock();
626 list_for_each_entry_rcu(va, &vmap_area_list, list) {
627 if (va->flags & VM_LAZY_FREE) {
628 if (va->va_start < *start)
629 *start = va->va_start;
630 if (va->va_end > *end)
631 *end = va->va_end;
632 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
633 list_add_tail(&va->purge_list, &valist);
634 va->flags |= VM_LAZY_FREEING;
635 va->flags &= ~VM_LAZY_FREE;
636 }
637 }
638 rcu_read_unlock();
639
640 if (nr)
641 atomic_sub(nr, &vmap_lazy_nr);
642
643 if (nr || force_flush)
644 flush_tlb_kernel_range(*start, *end);
645
646 if (nr) {
647 spin_lock(&vmap_area_lock);
648 list_for_each_entry_safe(va, n_va, &valist, purge_list)
649 __free_vmap_area(va);
650 spin_unlock(&vmap_area_lock);
651 }
652 spin_unlock(&purge_lock);
653}
654
655
656
657
658
659static void try_purge_vmap_area_lazy(void)
660{
661 unsigned long start = ULONG_MAX, end = 0;
662
663 __purge_vmap_area_lazy(&start, &end, 0, 0);
664}
665
666
667
668
669static void purge_vmap_area_lazy(void)
670{
671 unsigned long start = ULONG_MAX, end = 0;
672
673 __purge_vmap_area_lazy(&start, &end, 1, 0);
674}
675
676
677
678
679
680
681static void free_vmap_area_noflush(struct vmap_area *va)
682{
683 va->flags |= VM_LAZY_FREE;
684 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
685 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
686 try_purge_vmap_area_lazy();
687}
688
689
690
691
692
693static void free_unmap_vmap_area_noflush(struct vmap_area *va)
694{
695 unmap_vmap_area(va);
696 free_vmap_area_noflush(va);
697}
698
699
700
701
702static void free_unmap_vmap_area(struct vmap_area *va)
703{
704 flush_cache_vunmap(va->va_start, va->va_end);
705 free_unmap_vmap_area_noflush(va);
706}
707
708static struct vmap_area *find_vmap_area(unsigned long addr)
709{
710 struct vmap_area *va;
711
712 spin_lock(&vmap_area_lock);
713 va = __find_vmap_area(addr);
714 spin_unlock(&vmap_area_lock);
715
716 return va;
717}
718
719static void free_unmap_vmap_area_addr(unsigned long addr)
720{
721 struct vmap_area *va;
722
723 va = find_vmap_area(addr);
724 BUG_ON(!va);
725 free_unmap_vmap_area(va);
726}
727
728
729
730
731
732
733
734
735
736
737
738
739
740#if BITS_PER_LONG == 32
741#define VMALLOC_SPACE (128UL*1024*1024)
742#else
743#define VMALLOC_SPACE (128UL*1024*1024*1024)
744#endif
745
746#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
747#define VMAP_MAX_ALLOC BITS_PER_LONG
748#define VMAP_BBMAP_BITS_MAX 1024
749#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
750#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
751#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
752#define VMAP_BBMAP_BITS \
753 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
754 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
755 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
756
757#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
758
759static bool vmap_initialized __read_mostly = false;
760
761struct vmap_block_queue {
762 spinlock_t lock;
763 struct list_head free;
764};
765
766struct vmap_block {
767 spinlock_t lock;
768 struct vmap_area *va;
769 unsigned long free, dirty;
770 unsigned long dirty_min, dirty_max;
771 struct list_head free_list;
772 struct rcu_head rcu_head;
773 struct list_head purge;
774};
775
776
777static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
778
779
780
781
782
783
784static DEFINE_SPINLOCK(vmap_block_tree_lock);
785static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
786
787
788
789
790
791
792
793
794static unsigned long addr_to_vb_idx(unsigned long addr)
795{
796 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
797 addr /= VMAP_BLOCK_SIZE;
798 return addr;
799}
800
801static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
802{
803 unsigned long addr;
804
805 addr = va_start + (pages_off << PAGE_SHIFT);
806 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
807 return (void *)addr;
808}
809
810
811
812
813
814
815
816
817
818static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
819{
820 struct vmap_block_queue *vbq;
821 struct vmap_block *vb;
822 struct vmap_area *va;
823 unsigned long vb_idx;
824 int node, err;
825 void *vaddr;
826
827 node = numa_node_id();
828
829 vb = kmalloc_node(sizeof(struct vmap_block),
830 gfp_mask & GFP_RECLAIM_MASK, node);
831 if (unlikely(!vb))
832 return ERR_PTR(-ENOMEM);
833
834 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
835 VMALLOC_START, VMALLOC_END,
836 node, gfp_mask);
837 if (IS_ERR(va)) {
838 kfree(vb);
839 return ERR_CAST(va);
840 }
841
842 err = radix_tree_preload(gfp_mask);
843 if (unlikely(err)) {
844 kfree(vb);
845 free_vmap_area(va);
846 return ERR_PTR(err);
847 }
848
849 vaddr = vmap_block_vaddr(va->va_start, 0);
850 spin_lock_init(&vb->lock);
851 vb->va = va;
852
853 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
854 vb->free = VMAP_BBMAP_BITS - (1UL << order);
855 vb->dirty = 0;
856 vb->dirty_min = VMAP_BBMAP_BITS;
857 vb->dirty_max = 0;
858 INIT_LIST_HEAD(&vb->free_list);
859
860 vb_idx = addr_to_vb_idx(va->va_start);
861 spin_lock(&vmap_block_tree_lock);
862 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
863 spin_unlock(&vmap_block_tree_lock);
864 BUG_ON(err);
865 radix_tree_preload_end();
866
867 vbq = &get_cpu_var(vmap_block_queue);
868 spin_lock(&vbq->lock);
869 list_add_tail_rcu(&vb->free_list, &vbq->free);
870 spin_unlock(&vbq->lock);
871 put_cpu_var(vmap_block_queue);
872
873 return vaddr;
874}
875
876static void free_vmap_block(struct vmap_block *vb)
877{
878 struct vmap_block *tmp;
879 unsigned long vb_idx;
880
881 vb_idx = addr_to_vb_idx(vb->va->va_start);
882 spin_lock(&vmap_block_tree_lock);
883 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
884 spin_unlock(&vmap_block_tree_lock);
885 BUG_ON(tmp != vb);
886
887 free_vmap_area_noflush(vb->va);
888 kfree_rcu(vb, rcu_head);
889}
890
891static void purge_fragmented_blocks(int cpu)
892{
893 LIST_HEAD(purge);
894 struct vmap_block *vb;
895 struct vmap_block *n_vb;
896 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
897
898 rcu_read_lock();
899 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
900
901 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
902 continue;
903
904 spin_lock(&vb->lock);
905 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
906 vb->free = 0;
907 vb->dirty = VMAP_BBMAP_BITS;
908 vb->dirty_min = 0;
909 vb->dirty_max = VMAP_BBMAP_BITS;
910 spin_lock(&vbq->lock);
911 list_del_rcu(&vb->free_list);
912 spin_unlock(&vbq->lock);
913 spin_unlock(&vb->lock);
914 list_add_tail(&vb->purge, &purge);
915 } else
916 spin_unlock(&vb->lock);
917 }
918 rcu_read_unlock();
919
920 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
921 list_del(&vb->purge);
922 free_vmap_block(vb);
923 }
924}
925
926static void purge_fragmented_blocks_allcpus(void)
927{
928 int cpu;
929
930 for_each_possible_cpu(cpu)
931 purge_fragmented_blocks(cpu);
932}
933
934static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
935{
936 struct vmap_block_queue *vbq;
937 struct vmap_block *vb;
938 void *vaddr = NULL;
939 unsigned int order;
940
941 BUG_ON(offset_in_page(size));
942 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
943 if (WARN_ON(size == 0)) {
944
945
946
947
948
949 return NULL;
950 }
951 order = get_order(size);
952
953 rcu_read_lock();
954 vbq = &get_cpu_var(vmap_block_queue);
955 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
956 unsigned long pages_off;
957
958 spin_lock(&vb->lock);
959 if (vb->free < (1UL << order)) {
960 spin_unlock(&vb->lock);
961 continue;
962 }
963
964 pages_off = VMAP_BBMAP_BITS - vb->free;
965 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
966 vb->free -= 1UL << order;
967 if (vb->free == 0) {
968 spin_lock(&vbq->lock);
969 list_del_rcu(&vb->free_list);
970 spin_unlock(&vbq->lock);
971 }
972
973 spin_unlock(&vb->lock);
974 break;
975 }
976
977 put_cpu_var(vmap_block_queue);
978 rcu_read_unlock();
979
980
981 if (!vaddr)
982 vaddr = new_vmap_block(order, gfp_mask);
983
984 return vaddr;
985}
986
987static void vb_free(const void *addr, unsigned long size)
988{
989 unsigned long offset;
990 unsigned long vb_idx;
991 unsigned int order;
992 struct vmap_block *vb;
993
994 BUG_ON(offset_in_page(size));
995 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
996
997 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
998
999 order = get_order(size);
1000
1001 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1002 offset >>= PAGE_SHIFT;
1003
1004 vb_idx = addr_to_vb_idx((unsigned long)addr);
1005 rcu_read_lock();
1006 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1007 rcu_read_unlock();
1008 BUG_ON(!vb);
1009
1010 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1011
1012 spin_lock(&vb->lock);
1013
1014
1015 vb->dirty_min = min(vb->dirty_min, offset);
1016 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1017
1018 vb->dirty += 1UL << order;
1019 if (vb->dirty == VMAP_BBMAP_BITS) {
1020 BUG_ON(vb->free);
1021 spin_unlock(&vb->lock);
1022 free_vmap_block(vb);
1023 } else
1024 spin_unlock(&vb->lock);
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040void vm_unmap_aliases(void)
1041{
1042 unsigned long start = ULONG_MAX, end = 0;
1043 int cpu;
1044 int flush = 0;
1045
1046 if (unlikely(!vmap_initialized))
1047 return;
1048
1049 for_each_possible_cpu(cpu) {
1050 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1051 struct vmap_block *vb;
1052
1053 rcu_read_lock();
1054 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1055 spin_lock(&vb->lock);
1056 if (vb->dirty) {
1057 unsigned long va_start = vb->va->va_start;
1058 unsigned long s, e;
1059
1060 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1061 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1062
1063 start = min(s, start);
1064 end = max(e, end);
1065
1066 flush = 1;
1067 }
1068 spin_unlock(&vb->lock);
1069 }
1070 rcu_read_unlock();
1071 }
1072
1073 __purge_vmap_area_lazy(&start, &end, 1, flush);
1074}
1075EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1076
1077
1078
1079
1080
1081
1082void vm_unmap_ram(const void *mem, unsigned int count)
1083{
1084 unsigned long size = count << PAGE_SHIFT;
1085 unsigned long addr = (unsigned long)mem;
1086
1087 BUG_ON(!addr);
1088 BUG_ON(addr < VMALLOC_START);
1089 BUG_ON(addr > VMALLOC_END);
1090 BUG_ON(addr & (PAGE_SIZE-1));
1091
1092 debug_check_no_locks_freed(mem, size);
1093 vmap_debug_free_range(addr, addr+size);
1094
1095 if (likely(count <= VMAP_MAX_ALLOC))
1096 vb_free(mem, size);
1097 else
1098 free_unmap_vmap_area_addr(addr);
1099}
1100EXPORT_SYMBOL(vm_unmap_ram);
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1118{
1119 unsigned long size = count << PAGE_SHIFT;
1120 unsigned long addr;
1121 void *mem;
1122
1123 if (likely(count <= VMAP_MAX_ALLOC)) {
1124 mem = vb_alloc(size, GFP_KERNEL);
1125 if (IS_ERR(mem))
1126 return NULL;
1127 addr = (unsigned long)mem;
1128 } else {
1129 struct vmap_area *va;
1130 va = alloc_vmap_area(size, PAGE_SIZE,
1131 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1132 if (IS_ERR(va))
1133 return NULL;
1134
1135 addr = va->va_start;
1136 mem = (void *)addr;
1137 }
1138 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1139 vm_unmap_ram(mem, count);
1140 return NULL;
1141 }
1142 return mem;
1143}
1144EXPORT_SYMBOL(vm_map_ram);
1145
1146static struct vm_struct *vmlist __initdata;
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157void __init vm_area_add_early(struct vm_struct *vm)
1158{
1159 struct vm_struct *tmp, **p;
1160
1161 BUG_ON(vmap_initialized);
1162 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1163 if (tmp->addr >= vm->addr) {
1164 BUG_ON(tmp->addr < vm->addr + vm->size);
1165 break;
1166 } else
1167 BUG_ON(tmp->addr + tmp->size > vm->addr);
1168 }
1169 vm->next = *p;
1170 *p = vm;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1186{
1187 static size_t vm_init_off __initdata;
1188 unsigned long addr;
1189
1190 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1191 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1192
1193 vm->addr = (void *)addr;
1194
1195 vm_area_add_early(vm);
1196}
1197
1198void __init vmalloc_init(void)
1199{
1200 struct vmap_area *va;
1201 struct vm_struct *tmp;
1202 int i;
1203
1204 for_each_possible_cpu(i) {
1205 struct vmap_block_queue *vbq;
1206 struct vfree_deferred *p;
1207
1208 vbq = &per_cpu(vmap_block_queue, i);
1209 spin_lock_init(&vbq->lock);
1210 INIT_LIST_HEAD(&vbq->free);
1211 p = &per_cpu(vfree_deferred, i);
1212 init_llist_head(&p->list);
1213 INIT_WORK(&p->wq, free_work);
1214 }
1215
1216
1217 for (tmp = vmlist; tmp; tmp = tmp->next) {
1218 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1219 va->flags = VM_VM_AREA;
1220 va->va_start = (unsigned long)tmp->addr;
1221 va->va_end = va->va_start + tmp->size;
1222 va->vm = tmp;
1223 __insert_vmap_area(va);
1224 }
1225
1226 vmap_area_pcpu_hole = VMALLOC_END;
1227
1228 vmap_initialized = true;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1251 pgprot_t prot, struct page **pages)
1252{
1253 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1271{
1272 vunmap_page_range(addr, addr + size);
1273}
1274EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284void unmap_kernel_range(unsigned long addr, unsigned long size)
1285{
1286 unsigned long end = addr + size;
1287
1288 flush_cache_vunmap(addr, end);
1289 vunmap_page_range(addr, end);
1290 flush_tlb_kernel_range(addr, end);
1291}
1292EXPORT_SYMBOL_GPL(unmap_kernel_range);
1293
1294int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1295{
1296 unsigned long addr = (unsigned long)area->addr;
1297 unsigned long end = addr + get_vm_area_size(area);
1298 int err;
1299
1300 err = vmap_page_range(addr, end, prot, pages);
1301
1302 return err > 0 ? 0 : err;
1303}
1304EXPORT_SYMBOL_GPL(map_vm_area);
1305
1306static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1307 unsigned long flags, const void *caller)
1308{
1309 spin_lock(&vmap_area_lock);
1310 vm->flags = flags;
1311 vm->addr = (void *)va->va_start;
1312 vm->size = va->va_end - va->va_start;
1313 vm->caller = caller;
1314 va->vm = vm;
1315 va->flags |= VM_VM_AREA;
1316 spin_unlock(&vmap_area_lock);
1317}
1318
1319static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1320{
1321
1322
1323
1324
1325
1326 smp_wmb();
1327 vm->flags &= ~VM_UNINITIALIZED;
1328}
1329
1330static struct vm_struct *__get_vm_area_node(unsigned long size,
1331 unsigned long align, unsigned long flags, unsigned long start,
1332 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1333{
1334 struct vmap_area *va;
1335 struct vm_struct *area;
1336
1337 BUG_ON(in_interrupt());
1338 if (flags & VM_IOREMAP)
1339 align = 1ul << clamp_t(int, fls_long(size),
1340 PAGE_SHIFT, IOREMAP_MAX_ORDER);
1341
1342 size = PAGE_ALIGN(size);
1343 if (unlikely(!size))
1344 return NULL;
1345
1346 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1347 if (unlikely(!area))
1348 return NULL;
1349
1350 if (!(flags & VM_NO_GUARD))
1351 size += PAGE_SIZE;
1352
1353 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1354 if (IS_ERR(va)) {
1355 kfree(area);
1356 return NULL;
1357 }
1358
1359 setup_vmalloc_vm(area, va, flags, caller);
1360
1361 return area;
1362}
1363
1364struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1365 unsigned long start, unsigned long end)
1366{
1367 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1368 GFP_KERNEL, __builtin_return_address(0));
1369}
1370EXPORT_SYMBOL_GPL(__get_vm_area);
1371
1372struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1373 unsigned long start, unsigned long end,
1374 const void *caller)
1375{
1376 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1377 GFP_KERNEL, caller);
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1390{
1391 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1392 NUMA_NO_NODE, GFP_KERNEL,
1393 __builtin_return_address(0));
1394}
1395
1396struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1397 const void *caller)
1398{
1399 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1400 NUMA_NO_NODE, GFP_KERNEL, caller);
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411struct vm_struct *find_vm_area(const void *addr)
1412{
1413 struct vmap_area *va;
1414
1415 va = find_vmap_area((unsigned long)addr);
1416 if (va && va->flags & VM_VM_AREA)
1417 return va->vm;
1418
1419 return NULL;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430struct vm_struct *remove_vm_area(const void *addr)
1431{
1432 struct vmap_area *va;
1433
1434 va = find_vmap_area((unsigned long)addr);
1435 if (va && va->flags & VM_VM_AREA) {
1436 struct vm_struct *vm = va->vm;
1437
1438 spin_lock(&vmap_area_lock);
1439 va->vm = NULL;
1440 va->flags &= ~VM_VM_AREA;
1441 spin_unlock(&vmap_area_lock);
1442
1443 vmap_debug_free_range(va->va_start, va->va_end);
1444 kasan_free_shadow(vm);
1445 free_unmap_vmap_area(va);
1446
1447 return vm;
1448 }
1449 return NULL;
1450}
1451
1452static void __vunmap(const void *addr, int deallocate_pages)
1453{
1454 struct vm_struct *area;
1455
1456 if (!addr)
1457 return;
1458
1459 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1460 addr))
1461 return;
1462
1463 area = remove_vm_area(addr);
1464 if (unlikely(!area)) {
1465 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1466 addr);
1467 return;
1468 }
1469
1470 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1471 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1472
1473 if (deallocate_pages) {
1474 int i;
1475
1476 for (i = 0; i < area->nr_pages; i++) {
1477 struct page *page = area->pages[i];
1478
1479 BUG_ON(!page);
1480 __free_page(page);
1481 }
1482
1483 if (area->flags & VM_VPAGES)
1484 vfree(area->pages);
1485 else
1486 kfree(area->pages);
1487 }
1488
1489 kfree(area);
1490 return;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void vfree(const void *addr)
1508{
1509 BUG_ON(in_nmi());
1510
1511 kmemleak_free(addr);
1512
1513 if (!addr)
1514 return;
1515 if (unlikely(in_interrupt())) {
1516 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
1517 if (llist_add((struct llist_node *)addr, &p->list))
1518 schedule_work(&p->wq);
1519 } else
1520 __vunmap(addr, 1);
1521}
1522EXPORT_SYMBOL(vfree);
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533void vunmap(const void *addr)
1534{
1535 BUG_ON(in_interrupt());
1536 might_sleep();
1537 if (addr)
1538 __vunmap(addr, 0);
1539}
1540EXPORT_SYMBOL(vunmap);
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552void *vmap(struct page **pages, unsigned int count,
1553 unsigned long flags, pgprot_t prot)
1554{
1555 struct vm_struct *area;
1556
1557 might_sleep();
1558
1559 if (count > totalram_pages)
1560 return NULL;
1561
1562 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1563 __builtin_return_address(0));
1564 if (!area)
1565 return NULL;
1566
1567 if (map_vm_area(area, prot, pages)) {
1568 vunmap(area->addr);
1569 return NULL;
1570 }
1571
1572 return area->addr;
1573}
1574EXPORT_SYMBOL(vmap);
1575
1576static void *__vmalloc_node(unsigned long size, unsigned long align,
1577 gfp_t gfp_mask, pgprot_t prot,
1578 int node, const void *caller);
1579static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1580 pgprot_t prot, int node)
1581{
1582 const int order = 0;
1583 struct page **pages;
1584 unsigned int nr_pages, array_size, i;
1585 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1586 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1587
1588 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1589 array_size = (nr_pages * sizeof(struct page *));
1590
1591 area->nr_pages = nr_pages;
1592
1593 if (array_size > PAGE_SIZE) {
1594 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1595 PAGE_KERNEL, node, area->caller);
1596 area->flags |= VM_VPAGES;
1597 } else {
1598 pages = kmalloc_node(array_size, nested_gfp, node);
1599 }
1600 area->pages = pages;
1601 if (!area->pages) {
1602 remove_vm_area(area->addr);
1603 kfree(area);
1604 return NULL;
1605 }
1606
1607 for (i = 0; i < area->nr_pages; i++) {
1608 struct page *page;
1609
1610 if (node == NUMA_NO_NODE)
1611 page = alloc_page(alloc_mask);
1612 else
1613 page = alloc_pages_node(node, alloc_mask, order);
1614
1615 if (unlikely(!page)) {
1616
1617 area->nr_pages = i;
1618 goto fail;
1619 }
1620 area->pages[i] = page;
1621 if (gfpflags_allow_blocking(gfp_mask))
1622 cond_resched();
1623 }
1624
1625 if (map_vm_area(area, prot, pages))
1626 goto fail;
1627 return area->addr;
1628
1629fail:
1630 warn_alloc_failed(gfp_mask, order,
1631 "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1632 (area->nr_pages*PAGE_SIZE), area->size);
1633 vfree(area->addr);
1634 return NULL;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653void *__vmalloc_node_range(unsigned long size, unsigned long align,
1654 unsigned long start, unsigned long end, gfp_t gfp_mask,
1655 pgprot_t prot, unsigned long vm_flags, int node,
1656 const void *caller)
1657{
1658 struct vm_struct *area;
1659 void *addr;
1660 unsigned long real_size = size;
1661
1662 size = PAGE_ALIGN(size);
1663 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1664 goto fail;
1665
1666 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1667 vm_flags, start, end, node, gfp_mask, caller);
1668 if (!area)
1669 goto fail;
1670
1671 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1672 if (!addr)
1673 return NULL;
1674
1675
1676
1677
1678
1679
1680 clear_vm_uninitialized_flag(area);
1681
1682
1683
1684
1685
1686
1687 kmemleak_alloc(addr, real_size, 2, gfp_mask);
1688
1689 return addr;
1690
1691fail:
1692 warn_alloc_failed(gfp_mask, 0,
1693 "vmalloc: allocation failure: %lu bytes\n",
1694 real_size);
1695 return NULL;
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711static void *__vmalloc_node(unsigned long size, unsigned long align,
1712 gfp_t gfp_mask, pgprot_t prot,
1713 int node, const void *caller)
1714{
1715 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1716 gfp_mask, prot, 0, node, caller);
1717}
1718
1719void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1720{
1721 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1722 __builtin_return_address(0));
1723}
1724EXPORT_SYMBOL(__vmalloc);
1725
1726static inline void *__vmalloc_node_flags(unsigned long size,
1727 int node, gfp_t flags)
1728{
1729 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1730 node, __builtin_return_address(0));
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742void *vmalloc(unsigned long size)
1743{
1744 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1745 GFP_KERNEL | __GFP_HIGHMEM);
1746}
1747EXPORT_SYMBOL(vmalloc);
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759void *vzalloc(unsigned long size)
1760{
1761 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1762 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1763}
1764EXPORT_SYMBOL(vzalloc);
1765
1766
1767
1768
1769
1770
1771
1772
1773void *vmalloc_user(unsigned long size)
1774{
1775 struct vm_struct *area;
1776 void *ret;
1777
1778 ret = __vmalloc_node(size, SHMLBA,
1779 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1780 PAGE_KERNEL, NUMA_NO_NODE,
1781 __builtin_return_address(0));
1782 if (ret) {
1783 area = find_vm_area(ret);
1784 area->flags |= VM_USERMAP;
1785 }
1786 return ret;
1787}
1788EXPORT_SYMBOL(vmalloc_user);
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801void *vmalloc_node(unsigned long size, int node)
1802{
1803 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1804 node, __builtin_return_address(0));
1805}
1806EXPORT_SYMBOL(vmalloc_node);
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820void *vzalloc_node(unsigned long size, int node)
1821{
1822 return __vmalloc_node_flags(size, node,
1823 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1824}
1825EXPORT_SYMBOL(vzalloc_node);
1826
1827#ifndef PAGE_KERNEL_EXEC
1828# define PAGE_KERNEL_EXEC PAGE_KERNEL
1829#endif
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843void *vmalloc_exec(unsigned long size)
1844{
1845 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1846 NUMA_NO_NODE, __builtin_return_address(0));
1847}
1848
1849#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1850#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1851#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1852#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1853#else
1854#define GFP_VMALLOC32 GFP_KERNEL
1855#endif
1856
1857
1858
1859
1860
1861
1862
1863
1864void *vmalloc_32(unsigned long size)
1865{
1866 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1867 NUMA_NO_NODE, __builtin_return_address(0));
1868}
1869EXPORT_SYMBOL(vmalloc_32);
1870
1871
1872
1873
1874
1875
1876
1877
1878void *vmalloc_32_user(unsigned long size)
1879{
1880 struct vm_struct *area;
1881 void *ret;
1882
1883 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1884 NUMA_NO_NODE, __builtin_return_address(0));
1885 if (ret) {
1886 area = find_vm_area(ret);
1887 area->flags |= VM_USERMAP;
1888 }
1889 return ret;
1890}
1891EXPORT_SYMBOL(vmalloc_32_user);
1892
1893
1894
1895
1896
1897
1898static int aligned_vread(char *buf, char *addr, unsigned long count)
1899{
1900 struct page *p;
1901 int copied = 0;
1902
1903 while (count) {
1904 unsigned long offset, length;
1905
1906 offset = offset_in_page(addr);
1907 length = PAGE_SIZE - offset;
1908 if (length > count)
1909 length = count;
1910 p = vmalloc_to_page(addr);
1911
1912
1913
1914
1915
1916
1917
1918 if (p) {
1919
1920
1921
1922
1923 void *map = kmap_atomic(p);
1924 memcpy(buf, map + offset, length);
1925 kunmap_atomic(map);
1926 } else
1927 memset(buf, 0, length);
1928
1929 addr += length;
1930 buf += length;
1931 copied += length;
1932 count -= length;
1933 }
1934 return copied;
1935}
1936
1937static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1938{
1939 struct page *p;
1940 int copied = 0;
1941
1942 while (count) {
1943 unsigned long offset, length;
1944
1945 offset = offset_in_page(addr);
1946 length = PAGE_SIZE - offset;
1947 if (length > count)
1948 length = count;
1949 p = vmalloc_to_page(addr);
1950
1951
1952
1953
1954
1955
1956
1957 if (p) {
1958
1959
1960
1961
1962 void *map = kmap_atomic(p);
1963 memcpy(map + offset, buf, length);
1964 kunmap_atomic(map);
1965 }
1966 addr += length;
1967 buf += length;
1968 copied += length;
1969 count -= length;
1970 }
1971 return copied;
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000long vread(char *buf, char *addr, unsigned long count)
2001{
2002 struct vmap_area *va;
2003 struct vm_struct *vm;
2004 char *vaddr, *buf_start = buf;
2005 unsigned long buflen = count;
2006 unsigned long n;
2007
2008
2009 if ((unsigned long) addr + count < count)
2010 count = -(unsigned long) addr;
2011
2012 spin_lock(&vmap_area_lock);
2013 list_for_each_entry(va, &vmap_area_list, list) {
2014 if (!count)
2015 break;
2016
2017 if (!(va->flags & VM_VM_AREA))
2018 continue;
2019
2020 vm = va->vm;
2021 vaddr = (char *) vm->addr;
2022 if (addr >= vaddr + get_vm_area_size(vm))
2023 continue;
2024 while (addr < vaddr) {
2025 if (count == 0)
2026 goto finished;
2027 *buf = '\0';
2028 buf++;
2029 addr++;
2030 count--;
2031 }
2032 n = vaddr + get_vm_area_size(vm) - addr;
2033 if (n > count)
2034 n = count;
2035 if (!(vm->flags & VM_IOREMAP))
2036 aligned_vread(buf, addr, n);
2037 else
2038 memset(buf, 0, n);
2039 buf += n;
2040 addr += n;
2041 count -= n;
2042 }
2043finished:
2044 spin_unlock(&vmap_area_lock);
2045
2046 if (buf == buf_start)
2047 return 0;
2048
2049 if (buf != buf_start + buflen)
2050 memset(buf, 0, buflen - (buf - buf_start));
2051
2052 return buflen;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081long vwrite(char *buf, char *addr, unsigned long count)
2082{
2083 struct vmap_area *va;
2084 struct vm_struct *vm;
2085 char *vaddr;
2086 unsigned long n, buflen;
2087 int copied = 0;
2088
2089
2090 if ((unsigned long) addr + count < count)
2091 count = -(unsigned long) addr;
2092 buflen = count;
2093
2094 spin_lock(&vmap_area_lock);
2095 list_for_each_entry(va, &vmap_area_list, list) {
2096 if (!count)
2097 break;
2098
2099 if (!(va->flags & VM_VM_AREA))
2100 continue;
2101
2102 vm = va->vm;
2103 vaddr = (char *) vm->addr;
2104 if (addr >= vaddr + get_vm_area_size(vm))
2105 continue;
2106 while (addr < vaddr) {
2107 if (count == 0)
2108 goto finished;
2109 buf++;
2110 addr++;
2111 count--;
2112 }
2113 n = vaddr + get_vm_area_size(vm) - addr;
2114 if (n > count)
2115 n = count;
2116 if (!(vm->flags & VM_IOREMAP)) {
2117 aligned_vwrite(buf, addr, n);
2118 copied++;
2119 }
2120 buf += n;
2121 addr += n;
2122 count -= n;
2123 }
2124finished:
2125 spin_unlock(&vmap_area_lock);
2126 if (!copied)
2127 return 0;
2128 return buflen;
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2148 void *kaddr, unsigned long size)
2149{
2150 struct vm_struct *area;
2151
2152 size = PAGE_ALIGN(size);
2153
2154 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2155 return -EINVAL;
2156
2157 area = find_vm_area(kaddr);
2158 if (!area)
2159 return -EINVAL;
2160
2161 if (!(area->flags & VM_USERMAP))
2162 return -EINVAL;
2163
2164 if (kaddr + size > area->addr + area->size)
2165 return -EINVAL;
2166
2167 do {
2168 struct page *page = vmalloc_to_page(kaddr);
2169 int ret;
2170
2171 ret = vm_insert_page(vma, uaddr, page);
2172 if (ret)
2173 return ret;
2174
2175 uaddr += PAGE_SIZE;
2176 kaddr += PAGE_SIZE;
2177 size -= PAGE_SIZE;
2178 } while (size > 0);
2179
2180 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2181
2182 return 0;
2183}
2184EXPORT_SYMBOL(remap_vmalloc_range_partial);
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2201 unsigned long pgoff)
2202{
2203 return remap_vmalloc_range_partial(vma, vma->vm_start,
2204 addr + (pgoff << PAGE_SHIFT),
2205 vma->vm_end - vma->vm_start);
2206}
2207EXPORT_SYMBOL(remap_vmalloc_range);
2208
2209
2210
2211
2212
2213void __weak vmalloc_sync_all(void)
2214{
2215}
2216
2217
2218static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2219{
2220 pte_t ***p = data;
2221
2222 if (p) {
2223 *(*p) = pte;
2224 (*p)++;
2225 }
2226 return 0;
2227}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2244{
2245 struct vm_struct *area;
2246
2247 area = get_vm_area_caller(size, VM_IOREMAP,
2248 __builtin_return_address(0));
2249 if (area == NULL)
2250 return NULL;
2251
2252
2253
2254
2255
2256 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2257 size, f, ptes ? &ptes : NULL)) {
2258 free_vm_area(area);
2259 return NULL;
2260 }
2261
2262 return area;
2263}
2264EXPORT_SYMBOL_GPL(alloc_vm_area);
2265
2266void free_vm_area(struct vm_struct *area)
2267{
2268 struct vm_struct *ret;
2269 ret = remove_vm_area(area->addr);
2270 BUG_ON(ret != area);
2271 kfree(area);
2272}
2273EXPORT_SYMBOL_GPL(free_vm_area);
2274
2275#ifdef CONFIG_SMP
2276static struct vmap_area *node_to_va(struct rb_node *n)
2277{
2278 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2279}
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293static bool pvm_find_next_prev(unsigned long end,
2294 struct vmap_area **pnext,
2295 struct vmap_area **pprev)
2296{
2297 struct rb_node *n = vmap_area_root.rb_node;
2298 struct vmap_area *va = NULL;
2299
2300 while (n) {
2301 va = rb_entry(n, struct vmap_area, rb_node);
2302 if (end < va->va_end)
2303 n = n->rb_left;
2304 else if (end > va->va_end)
2305 n = n->rb_right;
2306 else
2307 break;
2308 }
2309
2310 if (!va)
2311 return false;
2312
2313 if (va->va_end > end) {
2314 *pnext = va;
2315 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2316 } else {
2317 *pprev = va;
2318 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2319 }
2320 return true;
2321}
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339static unsigned long pvm_determine_end(struct vmap_area **pnext,
2340 struct vmap_area **pprev,
2341 unsigned long align)
2342{
2343 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2344 unsigned long addr;
2345
2346 if (*pnext)
2347 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2348 else
2349 addr = vmalloc_end;
2350
2351 while (*pprev && (*pprev)->va_end > addr) {
2352 *pnext = *pprev;
2353 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2354 }
2355
2356 return addr;
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2384 const size_t *sizes, int nr_vms,
2385 size_t align)
2386{
2387 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2388 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2389 struct vmap_area **vas, *prev, *next;
2390 struct vm_struct **vms;
2391 int area, area2, last_area, term_area;
2392 unsigned long base, start, end, last_end;
2393 bool purged = false;
2394
2395
2396 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2397 for (last_area = 0, area = 0; area < nr_vms; area++) {
2398 start = offsets[area];
2399 end = start + sizes[area];
2400
2401
2402 BUG_ON(!IS_ALIGNED(offsets[area], align));
2403 BUG_ON(!IS_ALIGNED(sizes[area], align));
2404
2405
2406 if (start > offsets[last_area])
2407 last_area = area;
2408
2409 for (area2 = 0; area2 < nr_vms; area2++) {
2410 unsigned long start2 = offsets[area2];
2411 unsigned long end2 = start2 + sizes[area2];
2412
2413 if (area2 == area)
2414 continue;
2415
2416 BUG_ON(start2 >= start && start2 < end);
2417 BUG_ON(end2 <= end && end2 > start);
2418 }
2419 }
2420 last_end = offsets[last_area] + sizes[last_area];
2421
2422 if (vmalloc_end - vmalloc_start < last_end) {
2423 WARN_ON(true);
2424 return NULL;
2425 }
2426
2427 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2428 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2429 if (!vas || !vms)
2430 goto err_free2;
2431
2432 for (area = 0; area < nr_vms; area++) {
2433 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2434 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2435 if (!vas[area] || !vms[area])
2436 goto err_free;
2437 }
2438retry:
2439 spin_lock(&vmap_area_lock);
2440
2441
2442 area = term_area = last_area;
2443 start = offsets[area];
2444 end = start + sizes[area];
2445
2446 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2447 base = vmalloc_end - last_end;
2448 goto found;
2449 }
2450 base = pvm_determine_end(&next, &prev, align) - end;
2451
2452 while (true) {
2453 BUG_ON(next && next->va_end <= base + end);
2454 BUG_ON(prev && prev->va_end > base + end);
2455
2456
2457
2458
2459
2460 if (base + last_end < vmalloc_start + last_end) {
2461 spin_unlock(&vmap_area_lock);
2462 if (!purged) {
2463 purge_vmap_area_lazy();
2464 purged = true;
2465 goto retry;
2466 }
2467 goto err_free;
2468 }
2469
2470
2471
2472
2473
2474 if (next && next->va_start < base + end) {
2475 base = pvm_determine_end(&next, &prev, align) - end;
2476 term_area = area;
2477 continue;
2478 }
2479
2480
2481
2482
2483
2484
2485 if (prev && prev->va_end > base + start) {
2486 next = prev;
2487 prev = node_to_va(rb_prev(&next->rb_node));
2488 base = pvm_determine_end(&next, &prev, align) - end;
2489 term_area = area;
2490 continue;
2491 }
2492
2493
2494
2495
2496
2497 area = (area + nr_vms - 1) % nr_vms;
2498 if (area == term_area)
2499 break;
2500 start = offsets[area];
2501 end = start + sizes[area];
2502 pvm_find_next_prev(base + end, &next, &prev);
2503 }
2504found:
2505
2506 for (area = 0; area < nr_vms; area++) {
2507 struct vmap_area *va = vas[area];
2508
2509 va->va_start = base + offsets[area];
2510 va->va_end = va->va_start + sizes[area];
2511 __insert_vmap_area(va);
2512 }
2513
2514 vmap_area_pcpu_hole = base + offsets[last_area];
2515
2516 spin_unlock(&vmap_area_lock);
2517
2518
2519 for (area = 0; area < nr_vms; area++)
2520 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2521 pcpu_get_vm_areas);
2522
2523 kfree(vas);
2524 return vms;
2525
2526err_free:
2527 for (area = 0; area < nr_vms; area++) {
2528 kfree(vas[area]);
2529 kfree(vms[area]);
2530 }
2531err_free2:
2532 kfree(vas);
2533 kfree(vms);
2534 return NULL;
2535}
2536
2537
2538
2539
2540
2541
2542
2543
2544void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2545{
2546 int i;
2547
2548 for (i = 0; i < nr_vms; i++)
2549 free_vm_area(vms[i]);
2550 kfree(vms);
2551}
2552#endif
2553
2554#ifdef CONFIG_PROC_FS
2555static void *s_start(struct seq_file *m, loff_t *pos)
2556 __acquires(&vmap_area_lock)
2557{
2558 loff_t n = *pos;
2559 struct vmap_area *va;
2560
2561 spin_lock(&vmap_area_lock);
2562 va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2563 while (n > 0 && &va->list != &vmap_area_list) {
2564 n--;
2565 va = list_entry(va->list.next, typeof(*va), list);
2566 }
2567 if (!n && &va->list != &vmap_area_list)
2568 return va;
2569
2570 return NULL;
2571
2572}
2573
2574static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2575{
2576 struct vmap_area *va = p, *next;
2577
2578 ++*pos;
2579 next = list_entry(va->list.next, typeof(*va), list);
2580 if (&next->list != &vmap_area_list)
2581 return next;
2582
2583 return NULL;
2584}
2585
2586static void s_stop(struct seq_file *m, void *p)
2587 __releases(&vmap_area_lock)
2588{
2589 spin_unlock(&vmap_area_lock);
2590}
2591
2592static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2593{
2594 if (IS_ENABLED(CONFIG_NUMA)) {
2595 unsigned int nr, *counters = m->private;
2596
2597 if (!counters)
2598 return;
2599
2600 if (v->flags & VM_UNINITIALIZED)
2601 return;
2602
2603 smp_rmb();
2604
2605 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2606
2607 for (nr = 0; nr < v->nr_pages; nr++)
2608 counters[page_to_nid(v->pages[nr])]++;
2609
2610 for_each_node_state(nr, N_HIGH_MEMORY)
2611 if (counters[nr])
2612 seq_printf(m, " N%u=%u", nr, counters[nr]);
2613 }
2614}
2615
2616static int s_show(struct seq_file *m, void *p)
2617{
2618 struct vmap_area *va = p;
2619 struct vm_struct *v;
2620
2621
2622
2623
2624
2625 if (!(va->flags & VM_VM_AREA))
2626 return 0;
2627
2628 v = va->vm;
2629
2630 seq_printf(m, "0x%pK-0x%pK %7ld",
2631 v->addr, v->addr + v->size, v->size);
2632
2633 if (v->caller)
2634 seq_printf(m, " %pS", v->caller);
2635
2636 if (v->nr_pages)
2637 seq_printf(m, " pages=%d", v->nr_pages);
2638
2639 if (v->phys_addr)
2640 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2641
2642 if (v->flags & VM_IOREMAP)
2643 seq_puts(m, " ioremap");
2644
2645 if (v->flags & VM_ALLOC)
2646 seq_puts(m, " vmalloc");
2647
2648 if (v->flags & VM_MAP)
2649 seq_puts(m, " vmap");
2650
2651 if (v->flags & VM_USERMAP)
2652 seq_puts(m, " user");
2653
2654 if (v->flags & VM_VPAGES)
2655 seq_puts(m, " vpages");
2656
2657 show_numa_info(m, v);
2658 seq_putc(m, '\n');
2659 return 0;
2660}
2661
2662static const struct seq_operations vmalloc_op = {
2663 .start = s_start,
2664 .next = s_next,
2665 .stop = s_stop,
2666 .show = s_show,
2667};
2668
2669static int vmalloc_open(struct inode *inode, struct file *file)
2670{
2671 if (IS_ENABLED(CONFIG_NUMA))
2672 return seq_open_private(file, &vmalloc_op,
2673 nr_node_ids * sizeof(unsigned int));
2674 else
2675 return seq_open(file, &vmalloc_op);
2676}
2677
2678static const struct file_operations proc_vmalloc_operations = {
2679 .open = vmalloc_open,
2680 .read = seq_read,
2681 .llseek = seq_lseek,
2682 .release = seq_release_private,
2683};
2684
2685static int __init proc_vmalloc_init(void)
2686{
2687 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2688 return 0;
2689}
2690module_init(proc_vmalloc_init);
2691
2692#endif
2693
2694