1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/notifier.h>
25#include <linux/rbtree.h>
26#include <linux/radix-tree.h>
27#include <linux/rcupdate.h>
28#include <linux/pfn.h>
29#include <linux/kmemleak.h>
30#include <linux/atomic.h>
31#include <linux/compiler.h>
32#include <linux/llist.h>
33#include <linux/bitops.h>
34
35#include <linux/uaccess.h>
36#include <asm/tlbflush.h>
37#include <asm/shmparam.h>
38
39#include "internal.h"
40
41struct vfree_deferred {
42 struct llist_head list;
43 struct work_struct wq;
44};
45static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
46
47static void __vunmap(const void *, int);
48
49static void free_work(struct work_struct *w)
50{
51 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
52 struct llist_node *llnode = llist_del_all(&p->list);
53 while (llnode) {
54 void *p = llnode;
55 llnode = llist_next(llnode);
56 __vunmap(p, 1);
57 }
58}
59
60
61
62static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
63{
64 pte_t *pte;
65
66 pte = pte_offset_kernel(pmd, addr);
67 do {
68 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
69 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
70 } while (pte++, addr += PAGE_SIZE, addr != end);
71}
72
73static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
74{
75 pmd_t *pmd;
76 unsigned long next;
77
78 pmd = pmd_offset(pud, addr);
79 do {
80 next = pmd_addr_end(addr, end);
81 if (pmd_clear_huge(pmd))
82 continue;
83 if (pmd_none_or_clear_bad(pmd))
84 continue;
85 vunmap_pte_range(pmd, addr, next);
86 } while (pmd++, addr = next, addr != end);
87}
88
89static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
90{
91 pud_t *pud;
92 unsigned long next;
93
94 pud = pud_offset(p4d, addr);
95 do {
96 next = pud_addr_end(addr, end);
97 if (pud_clear_huge(pud))
98 continue;
99 if (pud_none_or_clear_bad(pud))
100 continue;
101 vunmap_pmd_range(pud, addr, next);
102 } while (pud++, addr = next, addr != end);
103}
104
105static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
106{
107 p4d_t *p4d;
108 unsigned long next;
109
110 p4d = p4d_offset(pgd, addr);
111 do {
112 next = p4d_addr_end(addr, end);
113 if (p4d_clear_huge(p4d))
114 continue;
115 if (p4d_none_or_clear_bad(p4d))
116 continue;
117 vunmap_pud_range(p4d, addr, next);
118 } while (p4d++, addr = next, addr != end);
119}
120
121static void vunmap_page_range(unsigned long addr, unsigned long end)
122{
123 pgd_t *pgd;
124 unsigned long next;
125
126 BUG_ON(addr >= end);
127 pgd = pgd_offset_k(addr);
128 do {
129 next = pgd_addr_end(addr, end);
130 if (pgd_none_or_clear_bad(pgd))
131 continue;
132 vunmap_p4d_range(pgd, addr, next);
133 } while (pgd++, addr = next, addr != end);
134}
135
136static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
137 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
138{
139 pte_t *pte;
140
141
142
143
144
145
146 pte = pte_alloc_kernel(pmd, addr);
147 if (!pte)
148 return -ENOMEM;
149 do {
150 struct page *page = pages[*nr];
151
152 if (WARN_ON(!pte_none(*pte)))
153 return -EBUSY;
154 if (WARN_ON(!page))
155 return -ENOMEM;
156 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
157 (*nr)++;
158 } while (pte++, addr += PAGE_SIZE, addr != end);
159 return 0;
160}
161
162static int vmap_pmd_range(pud_t *pud, unsigned long addr,
163 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
164{
165 pmd_t *pmd;
166 unsigned long next;
167
168 pmd = pmd_alloc(&init_mm, pud, addr);
169 if (!pmd)
170 return -ENOMEM;
171 do {
172 next = pmd_addr_end(addr, end);
173 if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
174 return -ENOMEM;
175 } while (pmd++, addr = next, addr != end);
176 return 0;
177}
178
179static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
180 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
181{
182 pud_t *pud;
183 unsigned long next;
184
185 pud = pud_alloc(&init_mm, p4d, addr);
186 if (!pud)
187 return -ENOMEM;
188 do {
189 next = pud_addr_end(addr, end);
190 if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
191 return -ENOMEM;
192 } while (pud++, addr = next, addr != end);
193 return 0;
194}
195
196static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
197 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
198{
199 p4d_t *p4d;
200 unsigned long next;
201
202 p4d = p4d_alloc(&init_mm, pgd, addr);
203 if (!p4d)
204 return -ENOMEM;
205 do {
206 next = p4d_addr_end(addr, end);
207 if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
208 return -ENOMEM;
209 } while (p4d++, addr = next, addr != end);
210 return 0;
211}
212
213
214
215
216
217
218
219static int vmap_page_range_noflush(unsigned long start, unsigned long end,
220 pgprot_t prot, struct page **pages)
221{
222 pgd_t *pgd;
223 unsigned long next;
224 unsigned long addr = start;
225 int err = 0;
226 int nr = 0;
227
228 BUG_ON(addr >= end);
229 pgd = pgd_offset_k(addr);
230 do {
231 next = pgd_addr_end(addr, end);
232 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
233 if (err)
234 return err;
235 } while (pgd++, addr = next, addr != end);
236
237 return nr;
238}
239
240static int vmap_page_range(unsigned long start, unsigned long end,
241 pgprot_t prot, struct page **pages)
242{
243 int ret;
244
245 ret = vmap_page_range_noflush(start, end, prot, pages);
246 flush_cache_vmap(start, end);
247 return ret;
248}
249
250int is_vmalloc_or_module_addr(const void *x)
251{
252
253
254
255
256
257#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
258 unsigned long addr = (unsigned long)x;
259 if (addr >= MODULES_VADDR && addr < MODULES_END)
260 return 1;
261#endif
262 return is_vmalloc_addr(x);
263}
264
265
266
267
268struct page *vmalloc_to_page(const void *vmalloc_addr)
269{
270 unsigned long addr = (unsigned long) vmalloc_addr;
271 struct page *page = NULL;
272 pgd_t *pgd = pgd_offset_k(addr);
273 p4d_t *p4d;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *ptep, pte;
277
278
279
280
281
282 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
283
284 if (pgd_none(*pgd))
285 return NULL;
286 p4d = p4d_offset(pgd, addr);
287 if (p4d_none(*p4d))
288 return NULL;
289 pud = pud_offset(p4d, addr);
290
291
292
293
294
295
296
297
298
299 WARN_ON_ONCE(pud_bad(*pud));
300 if (pud_none(*pud) || pud_bad(*pud))
301 return NULL;
302 pmd = pmd_offset(pud, addr);
303 WARN_ON_ONCE(pmd_bad(*pmd));
304 if (pmd_none(*pmd) || pmd_bad(*pmd))
305 return NULL;
306
307 ptep = pte_offset_map(pmd, addr);
308 pte = *ptep;
309 if (pte_present(pte))
310 page = pte_page(pte);
311 pte_unmap(ptep);
312 return page;
313}
314EXPORT_SYMBOL(vmalloc_to_page);
315
316
317
318
319unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
320{
321 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
322}
323EXPORT_SYMBOL(vmalloc_to_pfn);
324
325
326
327
328#define VM_LAZY_FREE 0x02
329#define VM_VM_AREA 0x04
330
331static DEFINE_SPINLOCK(vmap_area_lock);
332
333LIST_HEAD(vmap_area_list);
334static LLIST_HEAD(vmap_purge_list);
335static struct rb_root vmap_area_root = RB_ROOT;
336
337
338static struct rb_node *free_vmap_cache;
339static unsigned long cached_hole_size;
340static unsigned long cached_vstart;
341static unsigned long cached_align;
342
343static unsigned long vmap_area_pcpu_hole;
344
345static struct vmap_area *__find_vmap_area(unsigned long addr)
346{
347 struct rb_node *n = vmap_area_root.rb_node;
348
349 while (n) {
350 struct vmap_area *va;
351
352 va = rb_entry(n, struct vmap_area, rb_node);
353 if (addr < va->va_start)
354 n = n->rb_left;
355 else if (addr >= va->va_end)
356 n = n->rb_right;
357 else
358 return va;
359 }
360
361 return NULL;
362}
363
364static void __insert_vmap_area(struct vmap_area *va)
365{
366 struct rb_node **p = &vmap_area_root.rb_node;
367 struct rb_node *parent = NULL;
368 struct rb_node *tmp;
369
370 while (*p) {
371 struct vmap_area *tmp_va;
372
373 parent = *p;
374 tmp_va = rb_entry(parent, struct vmap_area, rb_node);
375 if (va->va_start < tmp_va->va_end)
376 p = &(*p)->rb_left;
377 else if (va->va_end > tmp_va->va_start)
378 p = &(*p)->rb_right;
379 else
380 BUG();
381 }
382
383 rb_link_node(&va->rb_node, parent, p);
384 rb_insert_color(&va->rb_node, &vmap_area_root);
385
386
387 tmp = rb_prev(&va->rb_node);
388 if (tmp) {
389 struct vmap_area *prev;
390 prev = rb_entry(tmp, struct vmap_area, rb_node);
391 list_add_rcu(&va->list, &prev->list);
392 } else
393 list_add_rcu(&va->list, &vmap_area_list);
394}
395
396static void purge_vmap_area_lazy(void);
397
398static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
399
400
401
402
403
404static struct vmap_area *alloc_vmap_area(unsigned long size,
405 unsigned long align,
406 unsigned long vstart, unsigned long vend,
407 int node, gfp_t gfp_mask)
408{
409 struct vmap_area *va;
410 struct rb_node *n;
411 unsigned long addr;
412 int purged = 0;
413 struct vmap_area *first;
414
415 BUG_ON(!size);
416 BUG_ON(offset_in_page(size));
417 BUG_ON(!is_power_of_2(align));
418
419 might_sleep();
420
421 va = kmalloc_node(sizeof(struct vmap_area),
422 gfp_mask & GFP_RECLAIM_MASK, node);
423 if (unlikely(!va))
424 return ERR_PTR(-ENOMEM);
425
426
427
428
429
430 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
431
432retry:
433 spin_lock(&vmap_area_lock);
434
435
436
437
438
439
440
441
442
443 if (!free_vmap_cache ||
444 size < cached_hole_size ||
445 vstart < cached_vstart ||
446 align < cached_align) {
447nocache:
448 cached_hole_size = 0;
449 free_vmap_cache = NULL;
450 }
451
452 cached_vstart = vstart;
453 cached_align = align;
454
455
456 if (free_vmap_cache) {
457 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
458 addr = ALIGN(first->va_end, align);
459 if (addr < vstart)
460 goto nocache;
461 if (addr + size < addr)
462 goto overflow;
463
464 } else {
465 addr = ALIGN(vstart, align);
466 if (addr + size < addr)
467 goto overflow;
468
469 n = vmap_area_root.rb_node;
470 first = NULL;
471
472 while (n) {
473 struct vmap_area *tmp;
474 tmp = rb_entry(n, struct vmap_area, rb_node);
475 if (tmp->va_end >= addr) {
476 first = tmp;
477 if (tmp->va_start <= addr)
478 break;
479 n = n->rb_left;
480 } else
481 n = n->rb_right;
482 }
483
484 if (!first)
485 goto found;
486 }
487
488
489 while (addr + size > first->va_start && addr + size <= vend) {
490 if (addr + cached_hole_size < first->va_start)
491 cached_hole_size = first->va_start - addr;
492 addr = ALIGN(first->va_end, align);
493 if (addr + size < addr)
494 goto overflow;
495
496 if (list_is_last(&first->list, &vmap_area_list))
497 goto found;
498
499 first = list_next_entry(first, list);
500 }
501
502found:
503 if (addr + size > vend)
504 goto overflow;
505
506 va->va_start = addr;
507 va->va_end = addr + size;
508 va->flags = 0;
509 __insert_vmap_area(va);
510 free_vmap_cache = &va->rb_node;
511 spin_unlock(&vmap_area_lock);
512
513 BUG_ON(!IS_ALIGNED(va->va_start, align));
514 BUG_ON(va->va_start < vstart);
515 BUG_ON(va->va_end > vend);
516
517 return va;
518
519overflow:
520 spin_unlock(&vmap_area_lock);
521 if (!purged) {
522 purge_vmap_area_lazy();
523 purged = 1;
524 goto retry;
525 }
526
527 if (gfpflags_allow_blocking(gfp_mask)) {
528 unsigned long freed = 0;
529 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
530 if (freed > 0) {
531 purged = 0;
532 goto retry;
533 }
534 }
535
536 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
537 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
538 size);
539 kfree(va);
540 return ERR_PTR(-EBUSY);
541}
542
543int register_vmap_purge_notifier(struct notifier_block *nb)
544{
545 return blocking_notifier_chain_register(&vmap_notify_list, nb);
546}
547EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
548
549int unregister_vmap_purge_notifier(struct notifier_block *nb)
550{
551 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
552}
553EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
554
555static void __free_vmap_area(struct vmap_area *va)
556{
557 BUG_ON(RB_EMPTY_NODE(&va->rb_node));
558
559 if (free_vmap_cache) {
560 if (va->va_end < cached_vstart) {
561 free_vmap_cache = NULL;
562 } else {
563 struct vmap_area *cache;
564 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
565 if (va->va_start <= cache->va_start) {
566 free_vmap_cache = rb_prev(&va->rb_node);
567
568
569
570
571 }
572 }
573 }
574 rb_erase(&va->rb_node, &vmap_area_root);
575 RB_CLEAR_NODE(&va->rb_node);
576 list_del_rcu(&va->list);
577
578
579
580
581
582
583
584 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
585 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
586
587 kfree_rcu(va, rcu_head);
588}
589
590
591
592
593static void free_vmap_area(struct vmap_area *va)
594{
595 spin_lock(&vmap_area_lock);
596 __free_vmap_area(va);
597 spin_unlock(&vmap_area_lock);
598}
599
600
601
602
603static void unmap_vmap_area(struct vmap_area *va)
604{
605 vunmap_page_range(va->va_start, va->va_end);
606}
607
608static void vmap_debug_free_range(unsigned long start, unsigned long end)
609{
610
611
612
613
614
615
616
617
618
619
620
621
622 if (debug_pagealloc_enabled()) {
623 vunmap_page_range(start, end);
624 flush_tlb_kernel_range(start, end);
625 }
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644static unsigned long lazy_max_pages(void)
645{
646 unsigned int log;
647
648 log = fls(num_online_cpus());
649
650 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
651}
652
653static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
654
655
656
657
658
659
660static DEFINE_MUTEX(vmap_purge_lock);
661
662
663static void purge_fragmented_blocks_allcpus(void);
664
665
666
667
668
669void set_iounmap_nonlazy(void)
670{
671 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
672}
673
674
675
676
677static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
678{
679 struct llist_node *valist;
680 struct vmap_area *va;
681 struct vmap_area *n_va;
682 bool do_free = false;
683
684 lockdep_assert_held(&vmap_purge_lock);
685
686 valist = llist_del_all(&vmap_purge_list);
687 llist_for_each_entry(va, valist, purge_list) {
688 if (va->va_start < start)
689 start = va->va_start;
690 if (va->va_end > end)
691 end = va->va_end;
692 do_free = true;
693 }
694
695 if (!do_free)
696 return false;
697
698 flush_tlb_kernel_range(start, end);
699
700 spin_lock(&vmap_area_lock);
701 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
702 int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
703
704 __free_vmap_area(va);
705 atomic_sub(nr, &vmap_lazy_nr);
706 cond_resched_lock(&vmap_area_lock);
707 }
708 spin_unlock(&vmap_area_lock);
709 return true;
710}
711
712
713
714
715
716static void try_purge_vmap_area_lazy(void)
717{
718 if (mutex_trylock(&vmap_purge_lock)) {
719 __purge_vmap_area_lazy(ULONG_MAX, 0);
720 mutex_unlock(&vmap_purge_lock);
721 }
722}
723
724
725
726
727static void purge_vmap_area_lazy(void)
728{
729 mutex_lock(&vmap_purge_lock);
730 purge_fragmented_blocks_allcpus();
731 __purge_vmap_area_lazy(ULONG_MAX, 0);
732 mutex_unlock(&vmap_purge_lock);
733}
734
735
736
737
738
739
740static void free_vmap_area_noflush(struct vmap_area *va)
741{
742 int nr_lazy;
743
744 nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
745 &vmap_lazy_nr);
746
747
748 llist_add(&va->purge_list, &vmap_purge_list);
749
750 if (unlikely(nr_lazy > lazy_max_pages()))
751 try_purge_vmap_area_lazy();
752}
753
754
755
756
757static void free_unmap_vmap_area(struct vmap_area *va)
758{
759 flush_cache_vunmap(va->va_start, va->va_end);
760 unmap_vmap_area(va);
761 free_vmap_area_noflush(va);
762}
763
764static struct vmap_area *find_vmap_area(unsigned long addr)
765{
766 struct vmap_area *va;
767
768 spin_lock(&vmap_area_lock);
769 va = __find_vmap_area(addr);
770 spin_unlock(&vmap_area_lock);
771
772 return va;
773}
774
775
776
777
778
779
780
781
782
783
784
785
786#if BITS_PER_LONG == 32
787#define VMALLOC_SPACE (128UL*1024*1024)
788#else
789#define VMALLOC_SPACE (128UL*1024*1024*1024)
790#endif
791
792#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
793#define VMAP_MAX_ALLOC BITS_PER_LONG
794#define VMAP_BBMAP_BITS_MAX 1024
795#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
796#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
797#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
798#define VMAP_BBMAP_BITS \
799 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
800 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
801 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
802
803#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
804
805static bool vmap_initialized __read_mostly = false;
806
807struct vmap_block_queue {
808 spinlock_t lock;
809 struct list_head free;
810};
811
812struct vmap_block {
813 spinlock_t lock;
814 struct vmap_area *va;
815 unsigned long free, dirty;
816 unsigned long dirty_min, dirty_max;
817 struct list_head free_list;
818 struct rcu_head rcu_head;
819 struct list_head purge;
820};
821
822
823static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
824
825
826
827
828
829
830static DEFINE_SPINLOCK(vmap_block_tree_lock);
831static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
832
833
834
835
836
837
838
839
840static unsigned long addr_to_vb_idx(unsigned long addr)
841{
842 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
843 addr /= VMAP_BLOCK_SIZE;
844 return addr;
845}
846
847static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
848{
849 unsigned long addr;
850
851 addr = va_start + (pages_off << PAGE_SHIFT);
852 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
853 return (void *)addr;
854}
855
856
857
858
859
860
861
862
863
864static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
865{
866 struct vmap_block_queue *vbq;
867 struct vmap_block *vb;
868 struct vmap_area *va;
869 unsigned long vb_idx;
870 int node, err;
871 void *vaddr;
872
873 node = numa_node_id();
874
875 vb = kmalloc_node(sizeof(struct vmap_block),
876 gfp_mask & GFP_RECLAIM_MASK, node);
877 if (unlikely(!vb))
878 return ERR_PTR(-ENOMEM);
879
880 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
881 VMALLOC_START, VMALLOC_END,
882 node, gfp_mask);
883 if (IS_ERR(va)) {
884 kfree(vb);
885 return ERR_CAST(va);
886 }
887
888 err = radix_tree_preload(gfp_mask);
889 if (unlikely(err)) {
890 kfree(vb);
891 free_vmap_area(va);
892 return ERR_PTR(err);
893 }
894
895 vaddr = vmap_block_vaddr(va->va_start, 0);
896 spin_lock_init(&vb->lock);
897 vb->va = va;
898
899 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
900 vb->free = VMAP_BBMAP_BITS - (1UL << order);
901 vb->dirty = 0;
902 vb->dirty_min = VMAP_BBMAP_BITS;
903 vb->dirty_max = 0;
904 INIT_LIST_HEAD(&vb->free_list);
905
906 vb_idx = addr_to_vb_idx(va->va_start);
907 spin_lock(&vmap_block_tree_lock);
908 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
909 spin_unlock(&vmap_block_tree_lock);
910 BUG_ON(err);
911 radix_tree_preload_end();
912
913 vbq = &get_cpu_var(vmap_block_queue);
914 spin_lock(&vbq->lock);
915 list_add_tail_rcu(&vb->free_list, &vbq->free);
916 spin_unlock(&vbq->lock);
917 put_cpu_var(vmap_block_queue);
918
919 return vaddr;
920}
921
922static void free_vmap_block(struct vmap_block *vb)
923{
924 struct vmap_block *tmp;
925 unsigned long vb_idx;
926
927 vb_idx = addr_to_vb_idx(vb->va->va_start);
928 spin_lock(&vmap_block_tree_lock);
929 tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
930 spin_unlock(&vmap_block_tree_lock);
931 BUG_ON(tmp != vb);
932
933 free_vmap_area_noflush(vb->va);
934 kfree_rcu(vb, rcu_head);
935}
936
937static void purge_fragmented_blocks(int cpu)
938{
939 LIST_HEAD(purge);
940 struct vmap_block *vb;
941 struct vmap_block *n_vb;
942 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
943
944 rcu_read_lock();
945 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
946
947 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
948 continue;
949
950 spin_lock(&vb->lock);
951 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
952 vb->free = 0;
953 vb->dirty = VMAP_BBMAP_BITS;
954 vb->dirty_min = 0;
955 vb->dirty_max = VMAP_BBMAP_BITS;
956 spin_lock(&vbq->lock);
957 list_del_rcu(&vb->free_list);
958 spin_unlock(&vbq->lock);
959 spin_unlock(&vb->lock);
960 list_add_tail(&vb->purge, &purge);
961 } else
962 spin_unlock(&vb->lock);
963 }
964 rcu_read_unlock();
965
966 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
967 list_del(&vb->purge);
968 free_vmap_block(vb);
969 }
970}
971
972static void purge_fragmented_blocks_allcpus(void)
973{
974 int cpu;
975
976 for_each_possible_cpu(cpu)
977 purge_fragmented_blocks(cpu);
978}
979
980static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
981{
982 struct vmap_block_queue *vbq;
983 struct vmap_block *vb;
984 void *vaddr = NULL;
985 unsigned int order;
986
987 BUG_ON(offset_in_page(size));
988 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
989 if (WARN_ON(size == 0)) {
990
991
992
993
994
995 return NULL;
996 }
997 order = get_order(size);
998
999 rcu_read_lock();
1000 vbq = &get_cpu_var(vmap_block_queue);
1001 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1002 unsigned long pages_off;
1003
1004 spin_lock(&vb->lock);
1005 if (vb->free < (1UL << order)) {
1006 spin_unlock(&vb->lock);
1007 continue;
1008 }
1009
1010 pages_off = VMAP_BBMAP_BITS - vb->free;
1011 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1012 vb->free -= 1UL << order;
1013 if (vb->free == 0) {
1014 spin_lock(&vbq->lock);
1015 list_del_rcu(&vb->free_list);
1016 spin_unlock(&vbq->lock);
1017 }
1018
1019 spin_unlock(&vb->lock);
1020 break;
1021 }
1022
1023 put_cpu_var(vmap_block_queue);
1024 rcu_read_unlock();
1025
1026
1027 if (!vaddr)
1028 vaddr = new_vmap_block(order, gfp_mask);
1029
1030 return vaddr;
1031}
1032
1033static void vb_free(const void *addr, unsigned long size)
1034{
1035 unsigned long offset;
1036 unsigned long vb_idx;
1037 unsigned int order;
1038 struct vmap_block *vb;
1039
1040 BUG_ON(offset_in_page(size));
1041 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1042
1043 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1044
1045 order = get_order(size);
1046
1047 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1048 offset >>= PAGE_SHIFT;
1049
1050 vb_idx = addr_to_vb_idx((unsigned long)addr);
1051 rcu_read_lock();
1052 vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1053 rcu_read_unlock();
1054 BUG_ON(!vb);
1055
1056 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1057
1058 spin_lock(&vb->lock);
1059
1060
1061 vb->dirty_min = min(vb->dirty_min, offset);
1062 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1063
1064 vb->dirty += 1UL << order;
1065 if (vb->dirty == VMAP_BBMAP_BITS) {
1066 BUG_ON(vb->free);
1067 spin_unlock(&vb->lock);
1068 free_vmap_block(vb);
1069 } else
1070 spin_unlock(&vb->lock);
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void vm_unmap_aliases(void)
1087{
1088 unsigned long start = ULONG_MAX, end = 0;
1089 int cpu;
1090 int flush = 0;
1091
1092 if (unlikely(!vmap_initialized))
1093 return;
1094
1095 might_sleep();
1096
1097 for_each_possible_cpu(cpu) {
1098 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1099 struct vmap_block *vb;
1100
1101 rcu_read_lock();
1102 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1103 spin_lock(&vb->lock);
1104 if (vb->dirty) {
1105 unsigned long va_start = vb->va->va_start;
1106 unsigned long s, e;
1107
1108 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1109 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1110
1111 start = min(s, start);
1112 end = max(e, end);
1113
1114 flush = 1;
1115 }
1116 spin_unlock(&vb->lock);
1117 }
1118 rcu_read_unlock();
1119 }
1120
1121 mutex_lock(&vmap_purge_lock);
1122 purge_fragmented_blocks_allcpus();
1123 if (!__purge_vmap_area_lazy(start, end) && flush)
1124 flush_tlb_kernel_range(start, end);
1125 mutex_unlock(&vmap_purge_lock);
1126}
1127EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1128
1129
1130
1131
1132
1133
1134void vm_unmap_ram(const void *mem, unsigned int count)
1135{
1136 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1137 unsigned long addr = (unsigned long)mem;
1138 struct vmap_area *va;
1139
1140 might_sleep();
1141 BUG_ON(!addr);
1142 BUG_ON(addr < VMALLOC_START);
1143 BUG_ON(addr > VMALLOC_END);
1144 BUG_ON(!PAGE_ALIGNED(addr));
1145
1146 debug_check_no_locks_freed(mem, size);
1147 vmap_debug_free_range(addr, addr+size);
1148
1149 if (likely(count <= VMAP_MAX_ALLOC)) {
1150 vb_free(mem, size);
1151 return;
1152 }
1153
1154 va = find_vmap_area(addr);
1155 BUG_ON(!va);
1156 free_unmap_vmap_area(va);
1157}
1158EXPORT_SYMBOL(vm_unmap_ram);
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1176{
1177 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1178 unsigned long addr;
1179 void *mem;
1180
1181 if (likely(count <= VMAP_MAX_ALLOC)) {
1182 mem = vb_alloc(size, GFP_KERNEL);
1183 if (IS_ERR(mem))
1184 return NULL;
1185 addr = (unsigned long)mem;
1186 } else {
1187 struct vmap_area *va;
1188 va = alloc_vmap_area(size, PAGE_SIZE,
1189 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1190 if (IS_ERR(va))
1191 return NULL;
1192
1193 addr = va->va_start;
1194 mem = (void *)addr;
1195 }
1196 if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1197 vm_unmap_ram(mem, count);
1198 return NULL;
1199 }
1200 return mem;
1201}
1202EXPORT_SYMBOL(vm_map_ram);
1203
1204static struct vm_struct *vmlist __initdata;
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215void __init vm_area_add_early(struct vm_struct *vm)
1216{
1217 struct vm_struct *tmp, **p;
1218
1219 BUG_ON(vmap_initialized);
1220 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1221 if (tmp->addr >= vm->addr) {
1222 BUG_ON(tmp->addr < vm->addr + vm->size);
1223 break;
1224 } else
1225 BUG_ON(tmp->addr + tmp->size > vm->addr);
1226 }
1227 vm->next = *p;
1228 *p = vm;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1244{
1245 static size_t vm_init_off __initdata;
1246 unsigned long addr;
1247
1248 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1249 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1250
1251 vm->addr = (void *)addr;
1252
1253 vm_area_add_early(vm);
1254}
1255
1256void __init vmalloc_init(void)
1257{
1258 struct vmap_area *va;
1259 struct vm_struct *tmp;
1260 int i;
1261
1262 for_each_possible_cpu(i) {
1263 struct vmap_block_queue *vbq;
1264 struct vfree_deferred *p;
1265
1266 vbq = &per_cpu(vmap_block_queue, i);
1267 spin_lock_init(&vbq->lock);
1268 INIT_LIST_HEAD(&vbq->free);
1269 p = &per_cpu(vfree_deferred, i);
1270 init_llist_head(&p->list);
1271 INIT_WORK(&p->wq, free_work);
1272 }
1273
1274
1275 for (tmp = vmlist; tmp; tmp = tmp->next) {
1276 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1277 va->flags = VM_VM_AREA;
1278 va->va_start = (unsigned long)tmp->addr;
1279 va->va_end = va->va_start + tmp->size;
1280 va->vm = tmp;
1281 __insert_vmap_area(va);
1282 }
1283
1284 vmap_area_pcpu_hole = VMALLOC_END;
1285
1286 vmap_initialized = true;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1309 pgprot_t prot, struct page **pages)
1310{
1311 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1329{
1330 vunmap_page_range(addr, addr + size);
1331}
1332EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342void unmap_kernel_range(unsigned long addr, unsigned long size)
1343{
1344 unsigned long end = addr + size;
1345
1346 flush_cache_vunmap(addr, end);
1347 vunmap_page_range(addr, end);
1348 flush_tlb_kernel_range(addr, end);
1349}
1350EXPORT_SYMBOL_GPL(unmap_kernel_range);
1351
1352int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1353{
1354 unsigned long addr = (unsigned long)area->addr;
1355 unsigned long end = addr + get_vm_area_size(area);
1356 int err;
1357
1358 err = vmap_page_range(addr, end, prot, pages);
1359
1360 return err > 0 ? 0 : err;
1361}
1362EXPORT_SYMBOL_GPL(map_vm_area);
1363
1364static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1365 unsigned long flags, const void *caller)
1366{
1367 spin_lock(&vmap_area_lock);
1368 vm->flags = flags;
1369 vm->addr = (void *)va->va_start;
1370 vm->size = va->va_end - va->va_start;
1371 vm->caller = caller;
1372 va->vm = vm;
1373 va->flags |= VM_VM_AREA;
1374 spin_unlock(&vmap_area_lock);
1375}
1376
1377static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1378{
1379
1380
1381
1382
1383
1384 smp_wmb();
1385 vm->flags &= ~VM_UNINITIALIZED;
1386}
1387
1388static struct vm_struct *__get_vm_area_node(unsigned long size,
1389 unsigned long align, unsigned long flags, unsigned long start,
1390 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1391{
1392 struct vmap_area *va;
1393 struct vm_struct *area;
1394
1395 BUG_ON(in_interrupt());
1396 size = PAGE_ALIGN(size);
1397 if (unlikely(!size))
1398 return NULL;
1399
1400 if (flags & VM_IOREMAP)
1401 align = 1ul << clamp_t(int, get_count_order_long(size),
1402 PAGE_SHIFT, IOREMAP_MAX_ORDER);
1403
1404 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1405 if (unlikely(!area))
1406 return NULL;
1407
1408 if (!(flags & VM_NO_GUARD))
1409 size += PAGE_SIZE;
1410
1411 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1412 if (IS_ERR(va)) {
1413 kfree(area);
1414 return NULL;
1415 }
1416
1417 setup_vmalloc_vm(area, va, flags, caller);
1418
1419 return area;
1420}
1421
1422struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1423 unsigned long start, unsigned long end)
1424{
1425 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1426 GFP_KERNEL, __builtin_return_address(0));
1427}
1428EXPORT_SYMBOL_GPL(__get_vm_area);
1429
1430struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1431 unsigned long start, unsigned long end,
1432 const void *caller)
1433{
1434 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1435 GFP_KERNEL, caller);
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1448{
1449 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1450 NUMA_NO_NODE, GFP_KERNEL,
1451 __builtin_return_address(0));
1452}
1453
1454struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1455 const void *caller)
1456{
1457 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1458 NUMA_NO_NODE, GFP_KERNEL, caller);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469struct vm_struct *find_vm_area(const void *addr)
1470{
1471 struct vmap_area *va;
1472
1473 va = find_vmap_area((unsigned long)addr);
1474 if (va && va->flags & VM_VM_AREA)
1475 return va->vm;
1476
1477 return NULL;
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488struct vm_struct *remove_vm_area(const void *addr)
1489{
1490 struct vmap_area *va;
1491
1492 might_sleep();
1493
1494 va = find_vmap_area((unsigned long)addr);
1495 if (va && va->flags & VM_VM_AREA) {
1496 struct vm_struct *vm = va->vm;
1497
1498 spin_lock(&vmap_area_lock);
1499 va->vm = NULL;
1500 va->flags &= ~VM_VM_AREA;
1501 va->flags |= VM_LAZY_FREE;
1502 spin_unlock(&vmap_area_lock);
1503
1504 vmap_debug_free_range(va->va_start, va->va_end);
1505 kasan_free_shadow(vm);
1506 free_unmap_vmap_area(va);
1507
1508 return vm;
1509 }
1510 return NULL;
1511}
1512
1513static void __vunmap(const void *addr, int deallocate_pages)
1514{
1515 struct vm_struct *area;
1516
1517 if (!addr)
1518 return;
1519
1520 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1521 addr))
1522 return;
1523
1524 area = remove_vm_area(addr);
1525 if (unlikely(!area)) {
1526 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1527 addr);
1528 return;
1529 }
1530
1531 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1532 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1533
1534 if (deallocate_pages) {
1535 int i;
1536
1537 for (i = 0; i < area->nr_pages; i++) {
1538 struct page *page = area->pages[i];
1539
1540 BUG_ON(!page);
1541 __free_pages(page, 0);
1542 }
1543
1544 kvfree(area->pages);
1545 }
1546
1547 kfree(area);
1548 return;
1549}
1550
1551static inline void __vfree_deferred(const void *addr)
1552{
1553
1554
1555
1556
1557
1558
1559 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
1560
1561 if (llist_add((struct llist_node *)addr, &p->list))
1562 schedule_work(&p->wq);
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572void vfree_atomic(const void *addr)
1573{
1574 BUG_ON(in_nmi());
1575
1576 kmemleak_free(addr);
1577
1578 if (!addr)
1579 return;
1580 __vfree_deferred(addr);
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597void vfree(const void *addr)
1598{
1599 BUG_ON(in_nmi());
1600
1601 kmemleak_free(addr);
1602
1603 if (!addr)
1604 return;
1605 if (unlikely(in_interrupt()))
1606 __vfree_deferred(addr);
1607 else
1608 __vunmap(addr, 1);
1609}
1610EXPORT_SYMBOL(vfree);
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621void vunmap(const void *addr)
1622{
1623 BUG_ON(in_interrupt());
1624 might_sleep();
1625 if (addr)
1626 __vunmap(addr, 0);
1627}
1628EXPORT_SYMBOL(vunmap);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640void *vmap(struct page **pages, unsigned int count,
1641 unsigned long flags, pgprot_t prot)
1642{
1643 struct vm_struct *area;
1644 unsigned long size;
1645
1646 might_sleep();
1647
1648 if (count > totalram_pages)
1649 return NULL;
1650
1651 size = (unsigned long)count << PAGE_SHIFT;
1652 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
1653 if (!area)
1654 return NULL;
1655
1656 if (map_vm_area(area, prot, pages)) {
1657 vunmap(area->addr);
1658 return NULL;
1659 }
1660
1661 return area->addr;
1662}
1663EXPORT_SYMBOL(vmap);
1664
1665static void *__vmalloc_node(unsigned long size, unsigned long align,
1666 gfp_t gfp_mask, pgprot_t prot,
1667 int node, const void *caller);
1668static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1669 pgprot_t prot, int node)
1670{
1671 struct page **pages;
1672 unsigned int nr_pages, array_size, i;
1673 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1674 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1675 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
1676 0 :
1677 __GFP_HIGHMEM;
1678
1679 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1680 array_size = (nr_pages * sizeof(struct page *));
1681
1682 area->nr_pages = nr_pages;
1683
1684 if (array_size > PAGE_SIZE) {
1685 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
1686 PAGE_KERNEL, node, area->caller);
1687 } else {
1688 pages = kmalloc_node(array_size, nested_gfp, node);
1689 }
1690 area->pages = pages;
1691 if (!area->pages) {
1692 remove_vm_area(area->addr);
1693 kfree(area);
1694 return NULL;
1695 }
1696
1697 for (i = 0; i < area->nr_pages; i++) {
1698 struct page *page;
1699
1700 if (fatal_signal_pending(current)) {
1701 area->nr_pages = i;
1702 goto fail_no_warn;
1703 }
1704
1705 if (node == NUMA_NO_NODE)
1706 page = alloc_page(alloc_mask|highmem_mask);
1707 else
1708 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
1709
1710 if (unlikely(!page)) {
1711
1712 area->nr_pages = i;
1713 goto fail;
1714 }
1715 area->pages[i] = page;
1716 if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
1717 cond_resched();
1718 }
1719
1720 if (map_vm_area(area, prot, pages))
1721 goto fail;
1722 return area->addr;
1723
1724fail:
1725 warn_alloc(gfp_mask, NULL,
1726 "vmalloc: allocation failure, allocated %ld of %ld bytes",
1727 (area->nr_pages*PAGE_SIZE), area->size);
1728fail_no_warn:
1729 vfree(area->addr);
1730 return NULL;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749void *__vmalloc_node_range(unsigned long size, unsigned long align,
1750 unsigned long start, unsigned long end, gfp_t gfp_mask,
1751 pgprot_t prot, unsigned long vm_flags, int node,
1752 const void *caller)
1753{
1754 struct vm_struct *area;
1755 void *addr;
1756 unsigned long real_size = size;
1757
1758 size = PAGE_ALIGN(size);
1759 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1760 goto fail;
1761
1762 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1763 vm_flags, start, end, node, gfp_mask, caller);
1764 if (!area)
1765 goto fail;
1766
1767 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1768 if (!addr)
1769 return NULL;
1770
1771
1772
1773
1774
1775
1776 clear_vm_uninitialized_flag(area);
1777
1778 kmemleak_vmalloc(area, size, gfp_mask);
1779
1780 return addr;
1781
1782fail:
1783 warn_alloc(gfp_mask, NULL,
1784 "vmalloc: allocation failure: %lu bytes", real_size);
1785 return NULL;
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static void *__vmalloc_node(unsigned long size, unsigned long align,
1809 gfp_t gfp_mask, pgprot_t prot,
1810 int node, const void *caller)
1811{
1812 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1813 gfp_mask, prot, 0, node, caller);
1814}
1815
1816void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1817{
1818 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1819 __builtin_return_address(0));
1820}
1821EXPORT_SYMBOL(__vmalloc);
1822
1823static inline void *__vmalloc_node_flags(unsigned long size,
1824 int node, gfp_t flags)
1825{
1826 return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1827 node, __builtin_return_address(0));
1828}
1829
1830
1831void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
1832 void *caller)
1833{
1834 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846void *vmalloc(unsigned long size)
1847{
1848 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1849 GFP_KERNEL);
1850}
1851EXPORT_SYMBOL(vmalloc);
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863void *vzalloc(unsigned long size)
1864{
1865 return __vmalloc_node_flags(size, NUMA_NO_NODE,
1866 GFP_KERNEL | __GFP_ZERO);
1867}
1868EXPORT_SYMBOL(vzalloc);
1869
1870
1871
1872
1873
1874
1875
1876
1877void *vmalloc_user(unsigned long size)
1878{
1879 struct vm_struct *area;
1880 void *ret;
1881
1882 ret = __vmalloc_node(size, SHMLBA,
1883 GFP_KERNEL | __GFP_ZERO,
1884 PAGE_KERNEL, NUMA_NO_NODE,
1885 __builtin_return_address(0));
1886 if (ret) {
1887 area = find_vm_area(ret);
1888 area->flags |= VM_USERMAP;
1889 }
1890 return ret;
1891}
1892EXPORT_SYMBOL(vmalloc_user);
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905void *vmalloc_node(unsigned long size, int node)
1906{
1907 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
1908 node, __builtin_return_address(0));
1909}
1910EXPORT_SYMBOL(vmalloc_node);
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924void *vzalloc_node(unsigned long size, int node)
1925{
1926 return __vmalloc_node_flags(size, node,
1927 GFP_KERNEL | __GFP_ZERO);
1928}
1929EXPORT_SYMBOL(vzalloc_node);
1930
1931#ifndef PAGE_KERNEL_EXEC
1932# define PAGE_KERNEL_EXEC PAGE_KERNEL
1933#endif
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947void *vmalloc_exec(unsigned long size)
1948{
1949 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
1950 NUMA_NO_NODE, __builtin_return_address(0));
1951}
1952
1953#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1954#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1955#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1956#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1957#else
1958#define GFP_VMALLOC32 GFP_KERNEL
1959#endif
1960
1961
1962
1963
1964
1965
1966
1967
1968void *vmalloc_32(unsigned long size)
1969{
1970 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1971 NUMA_NO_NODE, __builtin_return_address(0));
1972}
1973EXPORT_SYMBOL(vmalloc_32);
1974
1975
1976
1977
1978
1979
1980
1981
1982void *vmalloc_32_user(unsigned long size)
1983{
1984 struct vm_struct *area;
1985 void *ret;
1986
1987 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1988 NUMA_NO_NODE, __builtin_return_address(0));
1989 if (ret) {
1990 area = find_vm_area(ret);
1991 area->flags |= VM_USERMAP;
1992 }
1993 return ret;
1994}
1995EXPORT_SYMBOL(vmalloc_32_user);
1996
1997
1998
1999
2000
2001
2002static int aligned_vread(char *buf, char *addr, unsigned long count)
2003{
2004 struct page *p;
2005 int copied = 0;
2006
2007 while (count) {
2008 unsigned long offset, length;
2009
2010 offset = offset_in_page(addr);
2011 length = PAGE_SIZE - offset;
2012 if (length > count)
2013 length = count;
2014 p = vmalloc_to_page(addr);
2015
2016
2017
2018
2019
2020
2021
2022 if (p) {
2023
2024
2025
2026
2027 void *map = kmap_atomic(p);
2028 memcpy(buf, map + offset, length);
2029 kunmap_atomic(map);
2030 } else
2031 memset(buf, 0, length);
2032
2033 addr += length;
2034 buf += length;
2035 copied += length;
2036 count -= length;
2037 }
2038 return copied;
2039}
2040
2041static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2042{
2043 struct page *p;
2044 int copied = 0;
2045
2046 while (count) {
2047 unsigned long offset, length;
2048
2049 offset = offset_in_page(addr);
2050 length = PAGE_SIZE - offset;
2051 if (length > count)
2052 length = count;
2053 p = vmalloc_to_page(addr);
2054
2055
2056
2057
2058
2059
2060
2061 if (p) {
2062
2063
2064
2065
2066 void *map = kmap_atomic(p);
2067 memcpy(map + offset, buf, length);
2068 kunmap_atomic(map);
2069 }
2070 addr += length;
2071 buf += length;
2072 copied += length;
2073 count -= length;
2074 }
2075 return copied;
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104long vread(char *buf, char *addr, unsigned long count)
2105{
2106 struct vmap_area *va;
2107 struct vm_struct *vm;
2108 char *vaddr, *buf_start = buf;
2109 unsigned long buflen = count;
2110 unsigned long n;
2111
2112
2113 if ((unsigned long) addr + count < count)
2114 count = -(unsigned long) addr;
2115
2116 spin_lock(&vmap_area_lock);
2117 list_for_each_entry(va, &vmap_area_list, list) {
2118 if (!count)
2119 break;
2120
2121 if (!(va->flags & VM_VM_AREA))
2122 continue;
2123
2124 vm = va->vm;
2125 vaddr = (char *) vm->addr;
2126 if (addr >= vaddr + get_vm_area_size(vm))
2127 continue;
2128 while (addr < vaddr) {
2129 if (count == 0)
2130 goto finished;
2131 *buf = '\0';
2132 buf++;
2133 addr++;
2134 count--;
2135 }
2136 n = vaddr + get_vm_area_size(vm) - addr;
2137 if (n > count)
2138 n = count;
2139 if (!(vm->flags & VM_IOREMAP))
2140 aligned_vread(buf, addr, n);
2141 else
2142 memset(buf, 0, n);
2143 buf += n;
2144 addr += n;
2145 count -= n;
2146 }
2147finished:
2148 spin_unlock(&vmap_area_lock);
2149
2150 if (buf == buf_start)
2151 return 0;
2152
2153 if (buf != buf_start + buflen)
2154 memset(buf, 0, buflen - (buf - buf_start));
2155
2156 return buflen;
2157}
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185long vwrite(char *buf, char *addr, unsigned long count)
2186{
2187 struct vmap_area *va;
2188 struct vm_struct *vm;
2189 char *vaddr;
2190 unsigned long n, buflen;
2191 int copied = 0;
2192
2193
2194 if ((unsigned long) addr + count < count)
2195 count = -(unsigned long) addr;
2196 buflen = count;
2197
2198 spin_lock(&vmap_area_lock);
2199 list_for_each_entry(va, &vmap_area_list, list) {
2200 if (!count)
2201 break;
2202
2203 if (!(va->flags & VM_VM_AREA))
2204 continue;
2205
2206 vm = va->vm;
2207 vaddr = (char *) vm->addr;
2208 if (addr >= vaddr + get_vm_area_size(vm))
2209 continue;
2210 while (addr < vaddr) {
2211 if (count == 0)
2212 goto finished;
2213 buf++;
2214 addr++;
2215 count--;
2216 }
2217 n = vaddr + get_vm_area_size(vm) - addr;
2218 if (n > count)
2219 n = count;
2220 if (!(vm->flags & VM_IOREMAP)) {
2221 aligned_vwrite(buf, addr, n);
2222 copied++;
2223 }
2224 buf += n;
2225 addr += n;
2226 count -= n;
2227 }
2228finished:
2229 spin_unlock(&vmap_area_lock);
2230 if (!copied)
2231 return 0;
2232 return buflen;
2233}
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2252 void *kaddr, unsigned long size)
2253{
2254 struct vm_struct *area;
2255
2256 size = PAGE_ALIGN(size);
2257
2258 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2259 return -EINVAL;
2260
2261 area = find_vm_area(kaddr);
2262 if (!area)
2263 return -EINVAL;
2264
2265 if (!(area->flags & VM_USERMAP))
2266 return -EINVAL;
2267
2268 if (kaddr + size > area->addr + area->size)
2269 return -EINVAL;
2270
2271 do {
2272 struct page *page = vmalloc_to_page(kaddr);
2273 int ret;
2274
2275 ret = vm_insert_page(vma, uaddr, page);
2276 if (ret)
2277 return ret;
2278
2279 uaddr += PAGE_SIZE;
2280 kaddr += PAGE_SIZE;
2281 size -= PAGE_SIZE;
2282 } while (size > 0);
2283
2284 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2285
2286 return 0;
2287}
2288EXPORT_SYMBOL(remap_vmalloc_range_partial);
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2305 unsigned long pgoff)
2306{
2307 return remap_vmalloc_range_partial(vma, vma->vm_start,
2308 addr + (pgoff << PAGE_SHIFT),
2309 vma->vm_end - vma->vm_start);
2310}
2311EXPORT_SYMBOL(remap_vmalloc_range);
2312
2313
2314
2315
2316
2317void __weak vmalloc_sync_all(void)
2318{
2319}
2320
2321
2322static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2323{
2324 pte_t ***p = data;
2325
2326 if (p) {
2327 *(*p) = pte;
2328 (*p)++;
2329 }
2330 return 0;
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2348{
2349 struct vm_struct *area;
2350
2351 area = get_vm_area_caller(size, VM_IOREMAP,
2352 __builtin_return_address(0));
2353 if (area == NULL)
2354 return NULL;
2355
2356
2357
2358
2359
2360 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2361 size, f, ptes ? &ptes : NULL)) {
2362 free_vm_area(area);
2363 return NULL;
2364 }
2365
2366 return area;
2367}
2368EXPORT_SYMBOL_GPL(alloc_vm_area);
2369
2370void free_vm_area(struct vm_struct *area)
2371{
2372 struct vm_struct *ret;
2373 ret = remove_vm_area(area->addr);
2374 BUG_ON(ret != area);
2375 kfree(area);
2376}
2377EXPORT_SYMBOL_GPL(free_vm_area);
2378
2379#ifdef CONFIG_SMP
2380static struct vmap_area *node_to_va(struct rb_node *n)
2381{
2382 return rb_entry_safe(n, struct vmap_area, rb_node);
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397static bool pvm_find_next_prev(unsigned long end,
2398 struct vmap_area **pnext,
2399 struct vmap_area **pprev)
2400{
2401 struct rb_node *n = vmap_area_root.rb_node;
2402 struct vmap_area *va = NULL;
2403
2404 while (n) {
2405 va = rb_entry(n, struct vmap_area, rb_node);
2406 if (end < va->va_end)
2407 n = n->rb_left;
2408 else if (end > va->va_end)
2409 n = n->rb_right;
2410 else
2411 break;
2412 }
2413
2414 if (!va)
2415 return false;
2416
2417 if (va->va_end > end) {
2418 *pnext = va;
2419 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2420 } else {
2421 *pprev = va;
2422 *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2423 }
2424 return true;
2425}
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443static unsigned long pvm_determine_end(struct vmap_area **pnext,
2444 struct vmap_area **pprev,
2445 unsigned long align)
2446{
2447 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2448 unsigned long addr;
2449
2450 if (*pnext)
2451 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2452 else
2453 addr = vmalloc_end;
2454
2455 while (*pprev && (*pprev)->va_end > addr) {
2456 *pnext = *pprev;
2457 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2458 }
2459
2460 return addr;
2461}
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2488 const size_t *sizes, int nr_vms,
2489 size_t align)
2490{
2491 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2492 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2493 struct vmap_area **vas, *prev, *next;
2494 struct vm_struct **vms;
2495 int area, area2, last_area, term_area;
2496 unsigned long base, start, end, last_end;
2497 bool purged = false;
2498
2499
2500 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
2501 for (last_area = 0, area = 0; area < nr_vms; area++) {
2502 start = offsets[area];
2503 end = start + sizes[area];
2504
2505
2506 BUG_ON(!IS_ALIGNED(offsets[area], align));
2507 BUG_ON(!IS_ALIGNED(sizes[area], align));
2508
2509
2510 if (start > offsets[last_area])
2511 last_area = area;
2512
2513 for (area2 = 0; area2 < nr_vms; area2++) {
2514 unsigned long start2 = offsets[area2];
2515 unsigned long end2 = start2 + sizes[area2];
2516
2517 if (area2 == area)
2518 continue;
2519
2520 BUG_ON(start2 >= start && start2 < end);
2521 BUG_ON(end2 <= end && end2 > start);
2522 }
2523 }
2524 last_end = offsets[last_area] + sizes[last_area];
2525
2526 if (vmalloc_end - vmalloc_start < last_end) {
2527 WARN_ON(true);
2528 return NULL;
2529 }
2530
2531 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2532 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2533 if (!vas || !vms)
2534 goto err_free2;
2535
2536 for (area = 0; area < nr_vms; area++) {
2537 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2538 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2539 if (!vas[area] || !vms[area])
2540 goto err_free;
2541 }
2542retry:
2543 spin_lock(&vmap_area_lock);
2544
2545
2546 area = term_area = last_area;
2547 start = offsets[area];
2548 end = start + sizes[area];
2549
2550 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2551 base = vmalloc_end - last_end;
2552 goto found;
2553 }
2554 base = pvm_determine_end(&next, &prev, align) - end;
2555
2556 while (true) {
2557 BUG_ON(next && next->va_end <= base + end);
2558 BUG_ON(prev && prev->va_end > base + end);
2559
2560
2561
2562
2563
2564 if (base + last_end < vmalloc_start + last_end) {
2565 spin_unlock(&vmap_area_lock);
2566 if (!purged) {
2567 purge_vmap_area_lazy();
2568 purged = true;
2569 goto retry;
2570 }
2571 goto err_free;
2572 }
2573
2574
2575
2576
2577
2578 if (next && next->va_start < base + end) {
2579 base = pvm_determine_end(&next, &prev, align) - end;
2580 term_area = area;
2581 continue;
2582 }
2583
2584
2585
2586
2587
2588
2589 if (prev && prev->va_end > base + start) {
2590 next = prev;
2591 prev = node_to_va(rb_prev(&next->rb_node));
2592 base = pvm_determine_end(&next, &prev, align) - end;
2593 term_area = area;
2594 continue;
2595 }
2596
2597
2598
2599
2600
2601 area = (area + nr_vms - 1) % nr_vms;
2602 if (area == term_area)
2603 break;
2604 start = offsets[area];
2605 end = start + sizes[area];
2606 pvm_find_next_prev(base + end, &next, &prev);
2607 }
2608found:
2609
2610 for (area = 0; area < nr_vms; area++) {
2611 struct vmap_area *va = vas[area];
2612
2613 va->va_start = base + offsets[area];
2614 va->va_end = va->va_start + sizes[area];
2615 __insert_vmap_area(va);
2616 }
2617
2618 vmap_area_pcpu_hole = base + offsets[last_area];
2619
2620 spin_unlock(&vmap_area_lock);
2621
2622
2623 for (area = 0; area < nr_vms; area++)
2624 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2625 pcpu_get_vm_areas);
2626
2627 kfree(vas);
2628 return vms;
2629
2630err_free:
2631 for (area = 0; area < nr_vms; area++) {
2632 kfree(vas[area]);
2633 kfree(vms[area]);
2634 }
2635err_free2:
2636 kfree(vas);
2637 kfree(vms);
2638 return NULL;
2639}
2640
2641
2642
2643
2644
2645
2646
2647
2648void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2649{
2650 int i;
2651
2652 for (i = 0; i < nr_vms; i++)
2653 free_vm_area(vms[i]);
2654 kfree(vms);
2655}
2656#endif
2657
2658#ifdef CONFIG_PROC_FS
2659static void *s_start(struct seq_file *m, loff_t *pos)
2660 __acquires(&vmap_area_lock)
2661{
2662 spin_lock(&vmap_area_lock);
2663 return seq_list_start(&vmap_area_list, *pos);
2664}
2665
2666static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2667{
2668 return seq_list_next(p, &vmap_area_list, pos);
2669}
2670
2671static void s_stop(struct seq_file *m, void *p)
2672 __releases(&vmap_area_lock)
2673{
2674 spin_unlock(&vmap_area_lock);
2675}
2676
2677static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2678{
2679 if (IS_ENABLED(CONFIG_NUMA)) {
2680 unsigned int nr, *counters = m->private;
2681
2682 if (!counters)
2683 return;
2684
2685 if (v->flags & VM_UNINITIALIZED)
2686 return;
2687
2688 smp_rmb();
2689
2690 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2691
2692 for (nr = 0; nr < v->nr_pages; nr++)
2693 counters[page_to_nid(v->pages[nr])]++;
2694
2695 for_each_node_state(nr, N_HIGH_MEMORY)
2696 if (counters[nr])
2697 seq_printf(m, " N%u=%u", nr, counters[nr]);
2698 }
2699}
2700
2701static int s_show(struct seq_file *m, void *p)
2702{
2703 struct vmap_area *va;
2704 struct vm_struct *v;
2705
2706 va = list_entry(p, struct vmap_area, list);
2707
2708
2709
2710
2711
2712 if (!(va->flags & VM_VM_AREA)) {
2713 seq_printf(m, "0x%pK-0x%pK %7ld %s\n",
2714 (void *)va->va_start, (void *)va->va_end,
2715 va->va_end - va->va_start,
2716 va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram");
2717
2718 return 0;
2719 }
2720
2721 v = va->vm;
2722
2723 seq_printf(m, "0x%pK-0x%pK %7ld",
2724 v->addr, v->addr + v->size, v->size);
2725
2726 if (v->caller)
2727 seq_printf(m, " %pS", v->caller);
2728
2729 if (v->nr_pages)
2730 seq_printf(m, " pages=%d", v->nr_pages);
2731
2732 if (v->phys_addr)
2733 seq_printf(m, " phys=%pa", &v->phys_addr);
2734
2735 if (v->flags & VM_IOREMAP)
2736 seq_puts(m, " ioremap");
2737
2738 if (v->flags & VM_ALLOC)
2739 seq_puts(m, " vmalloc");
2740
2741 if (v->flags & VM_MAP)
2742 seq_puts(m, " vmap");
2743
2744 if (v->flags & VM_USERMAP)
2745 seq_puts(m, " user");
2746
2747 if (is_vmalloc_addr(v->pages))
2748 seq_puts(m, " vpages");
2749
2750 show_numa_info(m, v);
2751 seq_putc(m, '\n');
2752 return 0;
2753}
2754
2755static const struct seq_operations vmalloc_op = {
2756 .start = s_start,
2757 .next = s_next,
2758 .stop = s_stop,
2759 .show = s_show,
2760};
2761
2762static int vmalloc_open(struct inode *inode, struct file *file)
2763{
2764 if (IS_ENABLED(CONFIG_NUMA))
2765 return seq_open_private(file, &vmalloc_op,
2766 nr_node_ids * sizeof(unsigned int));
2767 else
2768 return seq_open(file, &vmalloc_op);
2769}
2770
2771static const struct file_operations proc_vmalloc_operations = {
2772 .open = vmalloc_open,
2773 .read = seq_read,
2774 .llseek = seq_lseek,
2775 .release = seq_release_private,
2776};
2777
2778static int __init proc_vmalloc_init(void)
2779{
2780 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2781 return 0;
2782}
2783module_init(proc_vmalloc_init);
2784
2785#endif
2786
2787