1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/vmalloc.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/highmem.h>
17#include <linux/sched/signal.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/set_memory.h>
24#include <linux/debugobjects.h>
25#include <linux/kallsyms.h>
26#include <linux/list.h>
27#include <linux/notifier.h>
28#include <linux/rbtree.h>
29#include <linux/xarray.h>
30#include <linux/rcupdate.h>
31#include <linux/pfn.h>
32#include <linux/kmemleak.h>
33#include <linux/atomic.h>
34#include <linux/compiler.h>
35#include <linux/llist.h>
36#include <linux/bitops.h>
37#include <linux/rbtree_augmented.h>
38#include <linux/overflow.h>
39
40#include <linux/uaccess.h>
41#include <asm/tlbflush.h>
42#include <asm/shmparam.h>
43
44#include "internal.h"
45#include "pgalloc-track.h"
46
47bool is_vmalloc_addr(const void *x)
48{
49 unsigned long addr = (unsigned long)x;
50
51 return addr >= VMALLOC_START && addr < VMALLOC_END;
52}
53EXPORT_SYMBOL(is_vmalloc_addr);
54
55struct vfree_deferred {
56 struct llist_head list;
57 struct work_struct wq;
58};
59static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
60
61static void __vunmap(const void *, int);
62
63static void free_work(struct work_struct *w)
64{
65 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
66 struct llist_node *t, *llnode;
67
68 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
69 __vunmap((void *)llnode, 1);
70}
71
72
73
74static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
75 pgtbl_mod_mask *mask)
76{
77 pte_t *pte;
78
79 pte = pte_offset_kernel(pmd, addr);
80 do {
81 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
82 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83 } while (pte++, addr += PAGE_SIZE, addr != end);
84 *mask |= PGTBL_PTE_MODIFIED;
85}
86
87static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
88 pgtbl_mod_mask *mask)
89{
90 pmd_t *pmd;
91 unsigned long next;
92 int cleared;
93
94 pmd = pmd_offset(pud, addr);
95 do {
96 next = pmd_addr_end(addr, end);
97
98 cleared = pmd_clear_huge(pmd);
99 if (cleared || pmd_bad(*pmd))
100 *mask |= PGTBL_PMD_MODIFIED;
101
102 if (cleared)
103 continue;
104 if (pmd_none_or_clear_bad(pmd))
105 continue;
106 vunmap_pte_range(pmd, addr, next, mask);
107
108 cond_resched();
109 } while (pmd++, addr = next, addr != end);
110}
111
112static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
113 pgtbl_mod_mask *mask)
114{
115 pud_t *pud;
116 unsigned long next;
117 int cleared;
118
119 pud = pud_offset(p4d, addr);
120 do {
121 next = pud_addr_end(addr, end);
122
123 cleared = pud_clear_huge(pud);
124 if (cleared || pud_bad(*pud))
125 *mask |= PGTBL_PUD_MODIFIED;
126
127 if (cleared)
128 continue;
129 if (pud_none_or_clear_bad(pud))
130 continue;
131 vunmap_pmd_range(pud, addr, next, mask);
132 } while (pud++, addr = next, addr != end);
133}
134
135static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
136 pgtbl_mod_mask *mask)
137{
138 p4d_t *p4d;
139 unsigned long next;
140 int cleared;
141
142 p4d = p4d_offset(pgd, addr);
143 do {
144 next = p4d_addr_end(addr, end);
145
146 cleared = p4d_clear_huge(p4d);
147 if (cleared || p4d_bad(*p4d))
148 *mask |= PGTBL_P4D_MODIFIED;
149
150 if (cleared)
151 continue;
152 if (p4d_none_or_clear_bad(p4d))
153 continue;
154 vunmap_pud_range(p4d, addr, next, mask);
155 } while (p4d++, addr = next, addr != end);
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
172{
173 unsigned long end = start + size;
174 unsigned long next;
175 pgd_t *pgd;
176 unsigned long addr = start;
177 pgtbl_mod_mask mask = 0;
178
179 BUG_ON(addr >= end);
180 pgd = pgd_offset_k(addr);
181 do {
182 next = pgd_addr_end(addr, end);
183 if (pgd_bad(*pgd))
184 mask |= PGTBL_PGD_MODIFIED;
185 if (pgd_none_or_clear_bad(pgd))
186 continue;
187 vunmap_p4d_range(pgd, addr, next, &mask);
188 } while (pgd++, addr = next, addr != end);
189
190 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
191 arch_sync_kernel_mappings(start, end);
192}
193
194static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
195 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
196 pgtbl_mod_mask *mask)
197{
198 pte_t *pte;
199
200
201
202
203
204
205 pte = pte_alloc_kernel_track(pmd, addr, mask);
206 if (!pte)
207 return -ENOMEM;
208 do {
209 struct page *page = pages[*nr];
210
211 if (WARN_ON(!pte_none(*pte)))
212 return -EBUSY;
213 if (WARN_ON(!page))
214 return -ENOMEM;
215 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
216 (*nr)++;
217 } while (pte++, addr += PAGE_SIZE, addr != end);
218 *mask |= PGTBL_PTE_MODIFIED;
219 return 0;
220}
221
222static int vmap_pmd_range(pud_t *pud, unsigned long addr,
223 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
224 pgtbl_mod_mask *mask)
225{
226 pmd_t *pmd;
227 unsigned long next;
228
229 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
230 if (!pmd)
231 return -ENOMEM;
232 do {
233 next = pmd_addr_end(addr, end);
234 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
235 return -ENOMEM;
236 } while (pmd++, addr = next, addr != end);
237 return 0;
238}
239
240static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
241 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
242 pgtbl_mod_mask *mask)
243{
244 pud_t *pud;
245 unsigned long next;
246
247 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
248 if (!pud)
249 return -ENOMEM;
250 do {
251 next = pud_addr_end(addr, end);
252 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
253 return -ENOMEM;
254 } while (pud++, addr = next, addr != end);
255 return 0;
256}
257
258static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
259 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
260 pgtbl_mod_mask *mask)
261{
262 p4d_t *p4d;
263 unsigned long next;
264
265 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
266 if (!p4d)
267 return -ENOMEM;
268 do {
269 next = p4d_addr_end(addr, end);
270 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
271 return -ENOMEM;
272 } while (p4d++, addr = next, addr != end);
273 return 0;
274}
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294int map_kernel_range_noflush(unsigned long addr, unsigned long size,
295 pgprot_t prot, struct page **pages)
296{
297 unsigned long start = addr;
298 unsigned long end = addr + size;
299 unsigned long next;
300 pgd_t *pgd;
301 int err = 0;
302 int nr = 0;
303 pgtbl_mod_mask mask = 0;
304
305 BUG_ON(addr >= end);
306 pgd = pgd_offset_k(addr);
307 do {
308 next = pgd_addr_end(addr, end);
309 if (pgd_bad(*pgd))
310 mask |= PGTBL_PGD_MODIFIED;
311 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
312 if (err)
313 return err;
314 } while (pgd++, addr = next, addr != end);
315
316 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
317 arch_sync_kernel_mappings(start, end);
318
319 return 0;
320}
321
322int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
323 struct page **pages)
324{
325 int ret;
326
327 ret = map_kernel_range_noflush(start, size, prot, pages);
328 flush_cache_vmap(start, start + size);
329 return ret;
330}
331
332int is_vmalloc_or_module_addr(const void *x)
333{
334
335
336
337
338
339#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
340 unsigned long addr = (unsigned long)x;
341 if (addr >= MODULES_VADDR && addr < MODULES_END)
342 return 1;
343#endif
344 return is_vmalloc_addr(x);
345}
346
347
348
349
350struct page *vmalloc_to_page(const void *vmalloc_addr)
351{
352 unsigned long addr = (unsigned long) vmalloc_addr;
353 struct page *page = NULL;
354 pgd_t *pgd = pgd_offset_k(addr);
355 p4d_t *p4d;
356 pud_t *pud;
357 pmd_t *pmd;
358 pte_t *ptep, pte;
359
360
361
362
363
364 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
365
366 if (pgd_none(*pgd))
367 return NULL;
368 p4d = p4d_offset(pgd, addr);
369 if (p4d_none(*p4d))
370 return NULL;
371 pud = pud_offset(p4d, addr);
372
373
374
375
376
377
378
379
380
381 WARN_ON_ONCE(pud_bad(*pud));
382 if (pud_none(*pud) || pud_bad(*pud))
383 return NULL;
384 pmd = pmd_offset(pud, addr);
385 WARN_ON_ONCE(pmd_bad(*pmd));
386 if (pmd_none(*pmd) || pmd_bad(*pmd))
387 return NULL;
388
389 ptep = pte_offset_map(pmd, addr);
390 pte = *ptep;
391 if (pte_present(pte))
392 page = pte_page(pte);
393 pte_unmap(ptep);
394 return page;
395}
396EXPORT_SYMBOL(vmalloc_to_page);
397
398
399
400
401unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
402{
403 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
404}
405EXPORT_SYMBOL(vmalloc_to_pfn);
406
407
408
409
410#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
411#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
412
413
414static DEFINE_SPINLOCK(vmap_area_lock);
415static DEFINE_SPINLOCK(free_vmap_area_lock);
416
417LIST_HEAD(vmap_area_list);
418static LLIST_HEAD(vmap_purge_list);
419static struct rb_root vmap_area_root = RB_ROOT;
420static bool vmap_initialized __read_mostly;
421
422
423
424
425
426
427
428static struct kmem_cache *vmap_area_cachep;
429
430
431
432
433
434static LIST_HEAD(free_vmap_area_list);
435
436
437
438
439
440
441
442
443
444
445
446static struct rb_root free_vmap_area_root = RB_ROOT;
447
448
449
450
451
452
453static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
454
455static __always_inline unsigned long
456va_size(struct vmap_area *va)
457{
458 return (va->va_end - va->va_start);
459}
460
461static __always_inline unsigned long
462get_subtree_max_size(struct rb_node *node)
463{
464 struct vmap_area *va;
465
466 va = rb_entry_safe(node, struct vmap_area, rb_node);
467 return va ? va->subtree_max_size : 0;
468}
469
470
471
472
473static __always_inline unsigned long
474compute_subtree_max_size(struct vmap_area *va)
475{
476 return max3(va_size(va),
477 get_subtree_max_size(va->rb_node.rb_left),
478 get_subtree_max_size(va->rb_node.rb_right));
479}
480
481RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
482 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
483
484static void purge_vmap_area_lazy(void);
485static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
486static unsigned long lazy_max_pages(void);
487
488static atomic_long_t nr_vmalloc_pages;
489
490unsigned long vmalloc_nr_pages(void)
491{
492 return atomic_long_read(&nr_vmalloc_pages);
493}
494
495static struct vmap_area *__find_vmap_area(unsigned long addr)
496{
497 struct rb_node *n = vmap_area_root.rb_node;
498
499 while (n) {
500 struct vmap_area *va;
501
502 va = rb_entry(n, struct vmap_area, rb_node);
503 if (addr < va->va_start)
504 n = n->rb_left;
505 else if (addr >= va->va_end)
506 n = n->rb_right;
507 else
508 return va;
509 }
510
511 return NULL;
512}
513
514
515
516
517
518
519
520
521
522static __always_inline struct rb_node **
523find_va_links(struct vmap_area *va,
524 struct rb_root *root, struct rb_node *from,
525 struct rb_node **parent)
526{
527 struct vmap_area *tmp_va;
528 struct rb_node **link;
529
530 if (root) {
531 link = &root->rb_node;
532 if (unlikely(!*link)) {
533 *parent = NULL;
534 return link;
535 }
536 } else {
537 link = &from;
538 }
539
540
541
542
543
544
545 do {
546 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
547
548
549
550
551
552
553 if (va->va_start < tmp_va->va_end &&
554 va->va_end <= tmp_va->va_start)
555 link = &(*link)->rb_left;
556 else if (va->va_end > tmp_va->va_start &&
557 va->va_start >= tmp_va->va_end)
558 link = &(*link)->rb_right;
559 else {
560 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
561 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
562
563 return NULL;
564 }
565 } while (*link);
566
567 *parent = &tmp_va->rb_node;
568 return link;
569}
570
571static __always_inline struct list_head *
572get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
573{
574 struct list_head *list;
575
576 if (unlikely(!parent))
577
578
579
580
581
582
583 return NULL;
584
585 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
586 return (&parent->rb_right == link ? list->next : list);
587}
588
589static __always_inline void
590link_va(struct vmap_area *va, struct rb_root *root,
591 struct rb_node *parent, struct rb_node **link, struct list_head *head)
592{
593
594
595
596
597 if (likely(parent)) {
598 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
599 if (&parent->rb_right != link)
600 head = head->prev;
601 }
602
603
604 rb_link_node(&va->rb_node, parent, link);
605 if (root == &free_vmap_area_root) {
606
607
608
609
610
611
612
613
614
615
616
617 rb_insert_augmented(&va->rb_node,
618 root, &free_vmap_area_rb_augment_cb);
619 va->subtree_max_size = 0;
620 } else {
621 rb_insert_color(&va->rb_node, root);
622 }
623
624
625 list_add(&va->list, head);
626}
627
628static __always_inline void
629unlink_va(struct vmap_area *va, struct rb_root *root)
630{
631 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
632 return;
633
634 if (root == &free_vmap_area_root)
635 rb_erase_augmented(&va->rb_node,
636 root, &free_vmap_area_rb_augment_cb);
637 else
638 rb_erase(&va->rb_node, root);
639
640 list_del(&va->list);
641 RB_CLEAR_NODE(&va->rb_node);
642}
643
644#if DEBUG_AUGMENT_PROPAGATE_CHECK
645static void
646augment_tree_propagate_check(void)
647{
648 struct vmap_area *va;
649 unsigned long computed_size;
650
651 list_for_each_entry(va, &free_vmap_area_list, list) {
652 computed_size = compute_subtree_max_size(va);
653 if (computed_size != va->subtree_max_size)
654 pr_emerg("tree is corrupted: %lu, %lu\n",
655 va_size(va), va->subtree_max_size);
656 }
657}
658#endif
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687static __always_inline void
688augment_tree_propagate_from(struct vmap_area *va)
689{
690
691
692
693
694
695 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
696
697#if DEBUG_AUGMENT_PROPAGATE_CHECK
698 augment_tree_propagate_check();
699#endif
700}
701
702static void
703insert_vmap_area(struct vmap_area *va,
704 struct rb_root *root, struct list_head *head)
705{
706 struct rb_node **link;
707 struct rb_node *parent;
708
709 link = find_va_links(va, root, NULL, &parent);
710 if (link)
711 link_va(va, root, parent, link, head);
712}
713
714static void
715insert_vmap_area_augment(struct vmap_area *va,
716 struct rb_node *from, struct rb_root *root,
717 struct list_head *head)
718{
719 struct rb_node **link;
720 struct rb_node *parent;
721
722 if (from)
723 link = find_va_links(va, NULL, from, &parent);
724 else
725 link = find_va_links(va, root, NULL, &parent);
726
727 if (link) {
728 link_va(va, root, parent, link, head);
729 augment_tree_propagate_from(va);
730 }
731}
732
733
734
735
736
737
738
739
740
741
742
743
744static __always_inline struct vmap_area *
745merge_or_add_vmap_area(struct vmap_area *va,
746 struct rb_root *root, struct list_head *head)
747{
748 struct vmap_area *sibling;
749 struct list_head *next;
750 struct rb_node **link;
751 struct rb_node *parent;
752 bool merged = false;
753
754
755
756
757
758 link = find_va_links(va, root, NULL, &parent);
759 if (!link)
760 return NULL;
761
762
763
764
765 next = get_va_next_sibling(parent, link);
766 if (unlikely(next == NULL))
767 goto insert;
768
769
770
771
772
773
774
775
776 if (next != head) {
777 sibling = list_entry(next, struct vmap_area, list);
778 if (sibling->va_start == va->va_end) {
779 sibling->va_start = va->va_start;
780
781
782 kmem_cache_free(vmap_area_cachep, va);
783
784
785 va = sibling;
786 merged = true;
787 }
788 }
789
790
791
792
793
794
795
796
797 if (next->prev != head) {
798 sibling = list_entry(next->prev, struct vmap_area, list);
799 if (sibling->va_end == va->va_start) {
800
801
802
803
804
805
806
807 if (merged)
808 unlink_va(va, root);
809
810 sibling->va_end = va->va_end;
811
812
813 kmem_cache_free(vmap_area_cachep, va);
814
815
816 va = sibling;
817 merged = true;
818 }
819 }
820
821insert:
822 if (!merged)
823 link_va(va, root, parent, link, head);
824
825
826
827
828 augment_tree_propagate_from(va);
829 return va;
830}
831
832static __always_inline bool
833is_within_this_va(struct vmap_area *va, unsigned long size,
834 unsigned long align, unsigned long vstart)
835{
836 unsigned long nva_start_addr;
837
838 if (va->va_start > vstart)
839 nva_start_addr = ALIGN(va->va_start, align);
840 else
841 nva_start_addr = ALIGN(vstart, align);
842
843
844 if (nva_start_addr + size < nva_start_addr ||
845 nva_start_addr < vstart)
846 return false;
847
848 return (nva_start_addr + size <= va->va_end);
849}
850
851
852
853
854
855
856static __always_inline struct vmap_area *
857find_vmap_lowest_match(unsigned long size,
858 unsigned long align, unsigned long vstart)
859{
860 struct vmap_area *va;
861 struct rb_node *node;
862 unsigned long length;
863
864
865 node = free_vmap_area_root.rb_node;
866
867
868 length = size + align - 1;
869
870 while (node) {
871 va = rb_entry(node, struct vmap_area, rb_node);
872
873 if (get_subtree_max_size(node->rb_left) >= length &&
874 vstart < va->va_start) {
875 node = node->rb_left;
876 } else {
877 if (is_within_this_va(va, size, align, vstart))
878 return va;
879
880
881
882
883
884
885 if (get_subtree_max_size(node->rb_right) >= length) {
886 node = node->rb_right;
887 continue;
888 }
889
890
891
892
893
894
895 while ((node = rb_parent(node))) {
896 va = rb_entry(node, struct vmap_area, rb_node);
897 if (is_within_this_va(va, size, align, vstart))
898 return va;
899
900 if (get_subtree_max_size(node->rb_right) >= length &&
901 vstart <= va->va_start) {
902 node = node->rb_right;
903 break;
904 }
905 }
906 }
907 }
908
909 return NULL;
910}
911
912#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
913#include <linux/random.h>
914
915static struct vmap_area *
916find_vmap_lowest_linear_match(unsigned long size,
917 unsigned long align, unsigned long vstart)
918{
919 struct vmap_area *va;
920
921 list_for_each_entry(va, &free_vmap_area_list, list) {
922 if (!is_within_this_va(va, size, align, vstart))
923 continue;
924
925 return va;
926 }
927
928 return NULL;
929}
930
931static void
932find_vmap_lowest_match_check(unsigned long size)
933{
934 struct vmap_area *va_1, *va_2;
935 unsigned long vstart;
936 unsigned int rnd;
937
938 get_random_bytes(&rnd, sizeof(rnd));
939 vstart = VMALLOC_START + rnd;
940
941 va_1 = find_vmap_lowest_match(size, 1, vstart);
942 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
943
944 if (va_1 != va_2)
945 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
946 va_1, va_2, vstart);
947}
948#endif
949
950enum fit_type {
951 NOTHING_FIT = 0,
952 FL_FIT_TYPE = 1,
953 LE_FIT_TYPE = 2,
954 RE_FIT_TYPE = 3,
955 NE_FIT_TYPE = 4
956};
957
958static __always_inline enum fit_type
959classify_va_fit_type(struct vmap_area *va,
960 unsigned long nva_start_addr, unsigned long size)
961{
962 enum fit_type type;
963
964
965 if (nva_start_addr < va->va_start ||
966 nva_start_addr + size > va->va_end)
967 return NOTHING_FIT;
968
969
970 if (va->va_start == nva_start_addr) {
971 if (va->va_end == nva_start_addr + size)
972 type = FL_FIT_TYPE;
973 else
974 type = LE_FIT_TYPE;
975 } else if (va->va_end == nva_start_addr + size) {
976 type = RE_FIT_TYPE;
977 } else {
978 type = NE_FIT_TYPE;
979 }
980
981 return type;
982}
983
984static __always_inline int
985adjust_va_to_fit_type(struct vmap_area *va,
986 unsigned long nva_start_addr, unsigned long size,
987 enum fit_type type)
988{
989 struct vmap_area *lva = NULL;
990
991 if (type == FL_FIT_TYPE) {
992
993
994
995
996
997
998
999 unlink_va(va, &free_vmap_area_root);
1000 kmem_cache_free(vmap_area_cachep, va);
1001 } else if (type == LE_FIT_TYPE) {
1002
1003
1004
1005
1006
1007
1008
1009 va->va_start += size;
1010 } else if (type == RE_FIT_TYPE) {
1011
1012
1013
1014
1015
1016
1017
1018 va->va_end = nva_start_addr;
1019 } else if (type == NE_FIT_TYPE) {
1020
1021
1022
1023
1024
1025
1026
1027 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1028 if (unlikely(!lva)) {
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1055 if (!lva)
1056 return -1;
1057 }
1058
1059
1060
1061
1062 lva->va_start = va->va_start;
1063 lva->va_end = nva_start_addr;
1064
1065
1066
1067
1068 va->va_start = nva_start_addr + size;
1069 } else {
1070 return -1;
1071 }
1072
1073 if (type != FL_FIT_TYPE) {
1074 augment_tree_propagate_from(va);
1075
1076 if (lva)
1077 insert_vmap_area_augment(lva, &va->rb_node,
1078 &free_vmap_area_root, &free_vmap_area_list);
1079 }
1080
1081 return 0;
1082}
1083
1084
1085
1086
1087
1088static __always_inline unsigned long
1089__alloc_vmap_area(unsigned long size, unsigned long align,
1090 unsigned long vstart, unsigned long vend)
1091{
1092 unsigned long nva_start_addr;
1093 struct vmap_area *va;
1094 enum fit_type type;
1095 int ret;
1096
1097 va = find_vmap_lowest_match(size, align, vstart);
1098 if (unlikely(!va))
1099 return vend;
1100
1101 if (va->va_start > vstart)
1102 nva_start_addr = ALIGN(va->va_start, align);
1103 else
1104 nva_start_addr = ALIGN(vstart, align);
1105
1106
1107 if (nva_start_addr + size > vend)
1108 return vend;
1109
1110
1111 type = classify_va_fit_type(va, nva_start_addr, size);
1112 if (WARN_ON_ONCE(type == NOTHING_FIT))
1113 return vend;
1114
1115
1116 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1117 if (ret)
1118 return vend;
1119
1120#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1121 find_vmap_lowest_match_check(size);
1122#endif
1123
1124 return nva_start_addr;
1125}
1126
1127
1128
1129
1130static void free_vmap_area(struct vmap_area *va)
1131{
1132
1133
1134
1135 spin_lock(&vmap_area_lock);
1136 unlink_va(va, &vmap_area_root);
1137 spin_unlock(&vmap_area_lock);
1138
1139
1140
1141
1142 spin_lock(&free_vmap_area_lock);
1143 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1144 spin_unlock(&free_vmap_area_lock);
1145}
1146
1147
1148
1149
1150
1151static struct vmap_area *alloc_vmap_area(unsigned long size,
1152 unsigned long align,
1153 unsigned long vstart, unsigned long vend,
1154 int node, gfp_t gfp_mask)
1155{
1156 struct vmap_area *va, *pva;
1157 unsigned long addr;
1158 int purged = 0;
1159 int ret;
1160
1161 BUG_ON(!size);
1162 BUG_ON(offset_in_page(size));
1163 BUG_ON(!is_power_of_2(align));
1164
1165 if (unlikely(!vmap_initialized))
1166 return ERR_PTR(-EBUSY);
1167
1168 might_sleep();
1169 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1170
1171 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1172 if (unlikely(!va))
1173 return ERR_PTR(-ENOMEM);
1174
1175
1176
1177
1178
1179 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1180
1181retry:
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 pva = NULL;
1198
1199 if (!this_cpu_read(ne_fit_preload_node))
1200
1201
1202
1203
1204
1205 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1206
1207 spin_lock(&free_vmap_area_lock);
1208
1209 if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1210 kmem_cache_free(vmap_area_cachep, pva);
1211
1212
1213
1214
1215
1216 addr = __alloc_vmap_area(size, align, vstart, vend);
1217 spin_unlock(&free_vmap_area_lock);
1218
1219 if (unlikely(addr == vend))
1220 goto overflow;
1221
1222 va->va_start = addr;
1223 va->va_end = addr + size;
1224 va->vm = NULL;
1225
1226
1227 spin_lock(&vmap_area_lock);
1228 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1229 spin_unlock(&vmap_area_lock);
1230
1231 BUG_ON(!IS_ALIGNED(va->va_start, align));
1232 BUG_ON(va->va_start < vstart);
1233 BUG_ON(va->va_end > vend);
1234
1235 ret = kasan_populate_vmalloc(addr, size);
1236 if (ret) {
1237 free_vmap_area(va);
1238 return ERR_PTR(ret);
1239 }
1240
1241 return va;
1242
1243overflow:
1244 if (!purged) {
1245 purge_vmap_area_lazy();
1246 purged = 1;
1247 goto retry;
1248 }
1249
1250 if (gfpflags_allow_blocking(gfp_mask)) {
1251 unsigned long freed = 0;
1252 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1253 if (freed > 0) {
1254 purged = 0;
1255 goto retry;
1256 }
1257 }
1258
1259 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1260 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1261 size);
1262
1263 kmem_cache_free(vmap_area_cachep, va);
1264 return ERR_PTR(-EBUSY);
1265}
1266
1267int register_vmap_purge_notifier(struct notifier_block *nb)
1268{
1269 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1270}
1271EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1272
1273int unregister_vmap_purge_notifier(struct notifier_block *nb)
1274{
1275 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1276}
1277EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static unsigned long lazy_max_pages(void)
1296{
1297 unsigned int log;
1298
1299 log = fls(num_online_cpus());
1300
1301 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1302}
1303
1304static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1305
1306
1307
1308
1309
1310
1311static DEFINE_MUTEX(vmap_purge_lock);
1312
1313
1314static void purge_fragmented_blocks_allcpus(void);
1315
1316
1317
1318
1319
1320void set_iounmap_nonlazy(void)
1321{
1322 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1323}
1324
1325
1326
1327
1328static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1329{
1330 unsigned long resched_threshold;
1331 struct llist_node *valist;
1332 struct vmap_area *va;
1333 struct vmap_area *n_va;
1334
1335 lockdep_assert_held(&vmap_purge_lock);
1336
1337 valist = llist_del_all(&vmap_purge_list);
1338 if (unlikely(valist == NULL))
1339 return false;
1340
1341
1342
1343
1344
1345 llist_for_each_entry(va, valist, purge_list) {
1346 if (va->va_start < start)
1347 start = va->va_start;
1348 if (va->va_end > end)
1349 end = va->va_end;
1350 }
1351
1352 flush_tlb_kernel_range(start, end);
1353 resched_threshold = lazy_max_pages() << 1;
1354
1355 spin_lock(&free_vmap_area_lock);
1356 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1357 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1358 unsigned long orig_start = va->va_start;
1359 unsigned long orig_end = va->va_end;
1360
1361
1362
1363
1364
1365
1366 va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1367 &free_vmap_area_list);
1368
1369 if (!va)
1370 continue;
1371
1372 if (is_vmalloc_or_module_addr((void *)orig_start))
1373 kasan_release_vmalloc(orig_start, orig_end,
1374 va->va_start, va->va_end);
1375
1376 atomic_long_sub(nr, &vmap_lazy_nr);
1377
1378 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1379 cond_resched_lock(&free_vmap_area_lock);
1380 }
1381 spin_unlock(&free_vmap_area_lock);
1382 return true;
1383}
1384
1385
1386
1387
1388
1389static void try_purge_vmap_area_lazy(void)
1390{
1391 if (mutex_trylock(&vmap_purge_lock)) {
1392 __purge_vmap_area_lazy(ULONG_MAX, 0);
1393 mutex_unlock(&vmap_purge_lock);
1394 }
1395}
1396
1397
1398
1399
1400static void purge_vmap_area_lazy(void)
1401{
1402 mutex_lock(&vmap_purge_lock);
1403 purge_fragmented_blocks_allcpus();
1404 __purge_vmap_area_lazy(ULONG_MAX, 0);
1405 mutex_unlock(&vmap_purge_lock);
1406}
1407
1408
1409
1410
1411
1412
1413static void free_vmap_area_noflush(struct vmap_area *va)
1414{
1415 unsigned long nr_lazy;
1416
1417 spin_lock(&vmap_area_lock);
1418 unlink_va(va, &vmap_area_root);
1419 spin_unlock(&vmap_area_lock);
1420
1421 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1422 PAGE_SHIFT, &vmap_lazy_nr);
1423
1424
1425 llist_add(&va->purge_list, &vmap_purge_list);
1426
1427 if (unlikely(nr_lazy > lazy_max_pages()))
1428 try_purge_vmap_area_lazy();
1429}
1430
1431
1432
1433
1434static void free_unmap_vmap_area(struct vmap_area *va)
1435{
1436 flush_cache_vunmap(va->va_start, va->va_end);
1437 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1438 if (debug_pagealloc_enabled_static())
1439 flush_tlb_kernel_range(va->va_start, va->va_end);
1440
1441 free_vmap_area_noflush(va);
1442}
1443
1444static struct vmap_area *find_vmap_area(unsigned long addr)
1445{
1446 struct vmap_area *va;
1447
1448 spin_lock(&vmap_area_lock);
1449 va = __find_vmap_area(addr);
1450 spin_unlock(&vmap_area_lock);
1451
1452 return va;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466#if BITS_PER_LONG == 32
1467#define VMALLOC_SPACE (128UL*1024*1024)
1468#else
1469#define VMALLOC_SPACE (128UL*1024*1024*1024)
1470#endif
1471
1472#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1473#define VMAP_MAX_ALLOC BITS_PER_LONG
1474#define VMAP_BBMAP_BITS_MAX 1024
1475#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1476#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
1477#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
1478#define VMAP_BBMAP_BITS \
1479 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1480 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1481 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1482
1483#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1484
1485struct vmap_block_queue {
1486 spinlock_t lock;
1487 struct list_head free;
1488};
1489
1490struct vmap_block {
1491 spinlock_t lock;
1492 struct vmap_area *va;
1493 unsigned long free, dirty;
1494 unsigned long dirty_min, dirty_max;
1495 struct list_head free_list;
1496 struct rcu_head rcu_head;
1497 struct list_head purge;
1498};
1499
1500
1501static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1502
1503
1504
1505
1506
1507
1508static DEFINE_XARRAY(vmap_blocks);
1509
1510
1511
1512
1513
1514
1515
1516
1517static unsigned long addr_to_vb_idx(unsigned long addr)
1518{
1519 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1520 addr /= VMAP_BLOCK_SIZE;
1521 return addr;
1522}
1523
1524static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1525{
1526 unsigned long addr;
1527
1528 addr = va_start + (pages_off << PAGE_SHIFT);
1529 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1530 return (void *)addr;
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1542{
1543 struct vmap_block_queue *vbq;
1544 struct vmap_block *vb;
1545 struct vmap_area *va;
1546 unsigned long vb_idx;
1547 int node, err;
1548 void *vaddr;
1549
1550 node = numa_node_id();
1551
1552 vb = kmalloc_node(sizeof(struct vmap_block),
1553 gfp_mask & GFP_RECLAIM_MASK, node);
1554 if (unlikely(!vb))
1555 return ERR_PTR(-ENOMEM);
1556
1557 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1558 VMALLOC_START, VMALLOC_END,
1559 node, gfp_mask);
1560 if (IS_ERR(va)) {
1561 kfree(vb);
1562 return ERR_CAST(va);
1563 }
1564
1565 vaddr = vmap_block_vaddr(va->va_start, 0);
1566 spin_lock_init(&vb->lock);
1567 vb->va = va;
1568
1569 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1570 vb->free = VMAP_BBMAP_BITS - (1UL << order);
1571 vb->dirty = 0;
1572 vb->dirty_min = VMAP_BBMAP_BITS;
1573 vb->dirty_max = 0;
1574 INIT_LIST_HEAD(&vb->free_list);
1575
1576 vb_idx = addr_to_vb_idx(va->va_start);
1577 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1578 if (err) {
1579 kfree(vb);
1580 free_vmap_area(va);
1581 return ERR_PTR(err);
1582 }
1583
1584 vbq = &get_cpu_var(vmap_block_queue);
1585 spin_lock(&vbq->lock);
1586 list_add_tail_rcu(&vb->free_list, &vbq->free);
1587 spin_unlock(&vbq->lock);
1588 put_cpu_var(vmap_block_queue);
1589
1590 return vaddr;
1591}
1592
1593static void free_vmap_block(struct vmap_block *vb)
1594{
1595 struct vmap_block *tmp;
1596
1597 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1598 BUG_ON(tmp != vb);
1599
1600 free_vmap_area_noflush(vb->va);
1601 kfree_rcu(vb, rcu_head);
1602}
1603
1604static void purge_fragmented_blocks(int cpu)
1605{
1606 LIST_HEAD(purge);
1607 struct vmap_block *vb;
1608 struct vmap_block *n_vb;
1609 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1610
1611 rcu_read_lock();
1612 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1613
1614 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1615 continue;
1616
1617 spin_lock(&vb->lock);
1618 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1619 vb->free = 0;
1620 vb->dirty = VMAP_BBMAP_BITS;
1621 vb->dirty_min = 0;
1622 vb->dirty_max = VMAP_BBMAP_BITS;
1623 spin_lock(&vbq->lock);
1624 list_del_rcu(&vb->free_list);
1625 spin_unlock(&vbq->lock);
1626 spin_unlock(&vb->lock);
1627 list_add_tail(&vb->purge, &purge);
1628 } else
1629 spin_unlock(&vb->lock);
1630 }
1631 rcu_read_unlock();
1632
1633 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1634 list_del(&vb->purge);
1635 free_vmap_block(vb);
1636 }
1637}
1638
1639static void purge_fragmented_blocks_allcpus(void)
1640{
1641 int cpu;
1642
1643 for_each_possible_cpu(cpu)
1644 purge_fragmented_blocks(cpu);
1645}
1646
1647static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1648{
1649 struct vmap_block_queue *vbq;
1650 struct vmap_block *vb;
1651 void *vaddr = NULL;
1652 unsigned int order;
1653
1654 BUG_ON(offset_in_page(size));
1655 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1656 if (WARN_ON(size == 0)) {
1657
1658
1659
1660
1661
1662 return NULL;
1663 }
1664 order = get_order(size);
1665
1666 rcu_read_lock();
1667 vbq = &get_cpu_var(vmap_block_queue);
1668 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1669 unsigned long pages_off;
1670
1671 spin_lock(&vb->lock);
1672 if (vb->free < (1UL << order)) {
1673 spin_unlock(&vb->lock);
1674 continue;
1675 }
1676
1677 pages_off = VMAP_BBMAP_BITS - vb->free;
1678 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1679 vb->free -= 1UL << order;
1680 if (vb->free == 0) {
1681 spin_lock(&vbq->lock);
1682 list_del_rcu(&vb->free_list);
1683 spin_unlock(&vbq->lock);
1684 }
1685
1686 spin_unlock(&vb->lock);
1687 break;
1688 }
1689
1690 put_cpu_var(vmap_block_queue);
1691 rcu_read_unlock();
1692
1693
1694 if (!vaddr)
1695 vaddr = new_vmap_block(order, gfp_mask);
1696
1697 return vaddr;
1698}
1699
1700static void vb_free(unsigned long addr, unsigned long size)
1701{
1702 unsigned long offset;
1703 unsigned int order;
1704 struct vmap_block *vb;
1705
1706 BUG_ON(offset_in_page(size));
1707 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1708
1709 flush_cache_vunmap(addr, addr + size);
1710
1711 order = get_order(size);
1712 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1713 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1714
1715 unmap_kernel_range_noflush(addr, size);
1716
1717 if (debug_pagealloc_enabled_static())
1718 flush_tlb_kernel_range(addr, addr + size);
1719
1720 spin_lock(&vb->lock);
1721
1722
1723 vb->dirty_min = min(vb->dirty_min, offset);
1724 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1725
1726 vb->dirty += 1UL << order;
1727 if (vb->dirty == VMAP_BBMAP_BITS) {
1728 BUG_ON(vb->free);
1729 spin_unlock(&vb->lock);
1730 free_vmap_block(vb);
1731 } else
1732 spin_unlock(&vb->lock);
1733}
1734
1735static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1736{
1737 int cpu;
1738
1739 if (unlikely(!vmap_initialized))
1740 return;
1741
1742 might_sleep();
1743
1744 for_each_possible_cpu(cpu) {
1745 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1746 struct vmap_block *vb;
1747
1748 rcu_read_lock();
1749 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1750 spin_lock(&vb->lock);
1751 if (vb->dirty) {
1752 unsigned long va_start = vb->va->va_start;
1753 unsigned long s, e;
1754
1755 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1756 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1757
1758 start = min(s, start);
1759 end = max(e, end);
1760
1761 flush = 1;
1762 }
1763 spin_unlock(&vb->lock);
1764 }
1765 rcu_read_unlock();
1766 }
1767
1768 mutex_lock(&vmap_purge_lock);
1769 purge_fragmented_blocks_allcpus();
1770 if (!__purge_vmap_area_lazy(start, end) && flush)
1771 flush_tlb_kernel_range(start, end);
1772 mutex_unlock(&vmap_purge_lock);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788void vm_unmap_aliases(void)
1789{
1790 unsigned long start = ULONG_MAX, end = 0;
1791 int flush = 0;
1792
1793 _vm_unmap_aliases(start, end, flush);
1794}
1795EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1796
1797
1798
1799
1800
1801
1802void vm_unmap_ram(const void *mem, unsigned int count)
1803{
1804 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1805 unsigned long addr = (unsigned long)mem;
1806 struct vmap_area *va;
1807
1808 might_sleep();
1809 BUG_ON(!addr);
1810 BUG_ON(addr < VMALLOC_START);
1811 BUG_ON(addr > VMALLOC_END);
1812 BUG_ON(!PAGE_ALIGNED(addr));
1813
1814 kasan_poison_vmalloc(mem, size);
1815
1816 if (likely(count <= VMAP_MAX_ALLOC)) {
1817 debug_check_no_locks_freed(mem, size);
1818 vb_free(addr, size);
1819 return;
1820 }
1821
1822 va = find_vmap_area(addr);
1823 BUG_ON(!va);
1824 debug_check_no_locks_freed((void *)va->va_start,
1825 (va->va_end - va->va_start));
1826 free_unmap_vmap_area(va);
1827}
1828EXPORT_SYMBOL(vm_unmap_ram);
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844void *vm_map_ram(struct page **pages, unsigned int count, int node)
1845{
1846 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1847 unsigned long addr;
1848 void *mem;
1849
1850 if (likely(count <= VMAP_MAX_ALLOC)) {
1851 mem = vb_alloc(size, GFP_KERNEL);
1852 if (IS_ERR(mem))
1853 return NULL;
1854 addr = (unsigned long)mem;
1855 } else {
1856 struct vmap_area *va;
1857 va = alloc_vmap_area(size, PAGE_SIZE,
1858 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1859 if (IS_ERR(va))
1860 return NULL;
1861
1862 addr = va->va_start;
1863 mem = (void *)addr;
1864 }
1865
1866 kasan_unpoison_vmalloc(mem, size);
1867
1868 if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1869 vm_unmap_ram(mem, count);
1870 return NULL;
1871 }
1872 return mem;
1873}
1874EXPORT_SYMBOL(vm_map_ram);
1875
1876static struct vm_struct *vmlist __initdata;
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888void __init vm_area_add_early(struct vm_struct *vm)
1889{
1890 struct vm_struct *tmp, **p;
1891
1892 BUG_ON(vmap_initialized);
1893 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1894 if (tmp->addr >= vm->addr) {
1895 BUG_ON(tmp->addr < vm->addr + vm->size);
1896 break;
1897 } else
1898 BUG_ON(tmp->addr + tmp->size > vm->addr);
1899 }
1900 vm->next = *p;
1901 *p = vm;
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1917{
1918 static size_t vm_init_off __initdata;
1919 unsigned long addr;
1920
1921 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1922 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1923
1924 vm->addr = (void *)addr;
1925
1926 vm_area_add_early(vm);
1927}
1928
1929static void vmap_init_free_space(void)
1930{
1931 unsigned long vmap_start = 1;
1932 const unsigned long vmap_end = ULONG_MAX;
1933 struct vmap_area *busy, *free;
1934
1935
1936
1937
1938
1939
1940
1941 list_for_each_entry(busy, &vmap_area_list, list) {
1942 if (busy->va_start - vmap_start > 0) {
1943 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1944 if (!WARN_ON_ONCE(!free)) {
1945 free->va_start = vmap_start;
1946 free->va_end = busy->va_start;
1947
1948 insert_vmap_area_augment(free, NULL,
1949 &free_vmap_area_root,
1950 &free_vmap_area_list);
1951 }
1952 }
1953
1954 vmap_start = busy->va_end;
1955 }
1956
1957 if (vmap_end - vmap_start > 0) {
1958 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1959 if (!WARN_ON_ONCE(!free)) {
1960 free->va_start = vmap_start;
1961 free->va_end = vmap_end;
1962
1963 insert_vmap_area_augment(free, NULL,
1964 &free_vmap_area_root,
1965 &free_vmap_area_list);
1966 }
1967 }
1968}
1969
1970void __init vmalloc_init(void)
1971{
1972 struct vmap_area *va;
1973 struct vm_struct *tmp;
1974 int i;
1975
1976
1977
1978
1979 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1980
1981 for_each_possible_cpu(i) {
1982 struct vmap_block_queue *vbq;
1983 struct vfree_deferred *p;
1984
1985 vbq = &per_cpu(vmap_block_queue, i);
1986 spin_lock_init(&vbq->lock);
1987 INIT_LIST_HEAD(&vbq->free);
1988 p = &per_cpu(vfree_deferred, i);
1989 init_llist_head(&p->list);
1990 INIT_WORK(&p->wq, free_work);
1991 }
1992
1993
1994 for (tmp = vmlist; tmp; tmp = tmp->next) {
1995 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1996 if (WARN_ON_ONCE(!va))
1997 continue;
1998
1999 va->va_start = (unsigned long)tmp->addr;
2000 va->va_end = va->va_start + tmp->size;
2001 va->vm = tmp;
2002 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2003 }
2004
2005
2006
2007
2008 vmap_init_free_space();
2009 vmap_initialized = true;
2010}
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020void unmap_kernel_range(unsigned long addr, unsigned long size)
2021{
2022 unsigned long end = addr + size;
2023
2024 flush_cache_vunmap(addr, end);
2025 unmap_kernel_range_noflush(addr, size);
2026 flush_tlb_kernel_range(addr, end);
2027}
2028
2029static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2030 struct vmap_area *va, unsigned long flags, const void *caller)
2031{
2032 vm->flags = flags;
2033 vm->addr = (void *)va->va_start;
2034 vm->size = va->va_end - va->va_start;
2035 vm->caller = caller;
2036 va->vm = vm;
2037}
2038
2039static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2040 unsigned long flags, const void *caller)
2041{
2042 spin_lock(&vmap_area_lock);
2043 setup_vmalloc_vm_locked(vm, va, flags, caller);
2044 spin_unlock(&vmap_area_lock);
2045}
2046
2047static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2048{
2049
2050
2051
2052
2053
2054 smp_wmb();
2055 vm->flags &= ~VM_UNINITIALIZED;
2056}
2057
2058static struct vm_struct *__get_vm_area_node(unsigned long size,
2059 unsigned long align, unsigned long flags, unsigned long start,
2060 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2061{
2062 struct vmap_area *va;
2063 struct vm_struct *area;
2064 unsigned long requested_size = size;
2065
2066 BUG_ON(in_interrupt());
2067 size = PAGE_ALIGN(size);
2068 if (unlikely(!size))
2069 return NULL;
2070
2071 if (flags & VM_IOREMAP)
2072 align = 1ul << clamp_t(int, get_count_order_long(size),
2073 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2074
2075 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2076 if (unlikely(!area))
2077 return NULL;
2078
2079 if (!(flags & VM_NO_GUARD))
2080 size += PAGE_SIZE;
2081
2082 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2083 if (IS_ERR(va)) {
2084 kfree(area);
2085 return NULL;
2086 }
2087
2088 kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2089
2090 setup_vmalloc_vm(area, va, flags, caller);
2091
2092 return area;
2093}
2094
2095struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2096 unsigned long start, unsigned long end,
2097 const void *caller)
2098{
2099 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2100 GFP_KERNEL, caller);
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2115{
2116 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2117 NUMA_NO_NODE, GFP_KERNEL,
2118 __builtin_return_address(0));
2119}
2120
2121struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2122 const void *caller)
2123{
2124 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2125 NUMA_NO_NODE, GFP_KERNEL, caller);
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138struct vm_struct *find_vm_area(const void *addr)
2139{
2140 struct vmap_area *va;
2141
2142 va = find_vmap_area((unsigned long)addr);
2143 if (!va)
2144 return NULL;
2145
2146 return va->vm;
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159struct vm_struct *remove_vm_area(const void *addr)
2160{
2161 struct vmap_area *va;
2162
2163 might_sleep();
2164
2165 spin_lock(&vmap_area_lock);
2166 va = __find_vmap_area((unsigned long)addr);
2167 if (va && va->vm) {
2168 struct vm_struct *vm = va->vm;
2169
2170 va->vm = NULL;
2171 spin_unlock(&vmap_area_lock);
2172
2173 kasan_free_shadow(vm);
2174 free_unmap_vmap_area(va);
2175
2176 return vm;
2177 }
2178
2179 spin_unlock(&vmap_area_lock);
2180 return NULL;
2181}
2182
2183static inline void set_area_direct_map(const struct vm_struct *area,
2184 int (*set_direct_map)(struct page *page))
2185{
2186 int i;
2187
2188 for (i = 0; i < area->nr_pages; i++)
2189 if (page_address(area->pages[i]))
2190 set_direct_map(area->pages[i]);
2191}
2192
2193
2194static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2195{
2196 unsigned long start = ULONG_MAX, end = 0;
2197 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2198 int flush_dmap = 0;
2199 int i;
2200
2201 remove_vm_area(area->addr);
2202
2203
2204 if (!flush_reset)
2205 return;
2206
2207
2208
2209
2210
2211 if (!deallocate_pages) {
2212 vm_unmap_aliases();
2213 return;
2214 }
2215
2216
2217
2218
2219
2220
2221 for (i = 0; i < area->nr_pages; i++) {
2222 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2223 if (addr) {
2224 start = min(addr, start);
2225 end = max(addr + PAGE_SIZE, end);
2226 flush_dmap = 1;
2227 }
2228 }
2229
2230
2231
2232
2233
2234
2235 set_area_direct_map(area, set_direct_map_invalid_noflush);
2236 _vm_unmap_aliases(start, end, flush_dmap);
2237 set_area_direct_map(area, set_direct_map_default_noflush);
2238}
2239
2240static void __vunmap(const void *addr, int deallocate_pages)
2241{
2242 struct vm_struct *area;
2243
2244 if (!addr)
2245 return;
2246
2247 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2248 addr))
2249 return;
2250
2251 area = find_vm_area(addr);
2252 if (unlikely(!area)) {
2253 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2254 addr);
2255 return;
2256 }
2257
2258 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2259 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2260
2261 kasan_poison_vmalloc(area->addr, area->size);
2262
2263 vm_remove_mappings(area, deallocate_pages);
2264
2265 if (deallocate_pages) {
2266 int i;
2267
2268 for (i = 0; i < area->nr_pages; i++) {
2269 struct page *page = area->pages[i];
2270
2271 BUG_ON(!page);
2272 __free_pages(page, 0);
2273 }
2274 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2275
2276 kvfree(area->pages);
2277 }
2278
2279 kfree(area);
2280 return;
2281}
2282
2283static inline void __vfree_deferred(const void *addr)
2284{
2285
2286
2287
2288
2289
2290
2291 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2292
2293 if (llist_add((struct llist_node *)addr, &p->list))
2294 schedule_work(&p->wq);
2295}
2296
2297
2298
2299
2300
2301
2302
2303
2304void vfree_atomic(const void *addr)
2305{
2306 BUG_ON(in_nmi());
2307
2308 kmemleak_free(addr);
2309
2310 if (!addr)
2311 return;
2312 __vfree_deferred(addr);
2313}
2314
2315static void __vfree(const void *addr)
2316{
2317 if (unlikely(in_interrupt()))
2318 __vfree_deferred(addr);
2319 else
2320 __vunmap(addr, 1);
2321}
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339void vfree(const void *addr)
2340{
2341 BUG_ON(in_nmi());
2342
2343 kmemleak_free(addr);
2344
2345 might_sleep_if(!in_interrupt());
2346
2347 if (!addr)
2348 return;
2349
2350 __vfree(addr);
2351}
2352EXPORT_SYMBOL(vfree);
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363void vunmap(const void *addr)
2364{
2365 BUG_ON(in_interrupt());
2366 might_sleep();
2367 if (addr)
2368 __vunmap(addr, 0);
2369}
2370EXPORT_SYMBOL(vunmap);
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384void *vmap(struct page **pages, unsigned int count,
2385 unsigned long flags, pgprot_t prot)
2386{
2387 struct vm_struct *area;
2388 unsigned long size;
2389
2390 might_sleep();
2391
2392 if (count > totalram_pages())
2393 return NULL;
2394
2395 size = (unsigned long)count << PAGE_SHIFT;
2396 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2397 if (!area)
2398 return NULL;
2399
2400 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2401 pages) < 0) {
2402 vunmap(area->addr);
2403 return NULL;
2404 }
2405
2406 return area->addr;
2407}
2408EXPORT_SYMBOL(vmap);
2409
2410static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2411 pgprot_t prot, int node)
2412{
2413 struct page **pages;
2414 unsigned int nr_pages, array_size, i;
2415 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2416 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2417 const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2418 0 :
2419 __GFP_HIGHMEM;
2420
2421 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2422 array_size = (nr_pages * sizeof(struct page *));
2423
2424
2425 if (array_size > PAGE_SIZE) {
2426 pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2427 node, area->caller);
2428 } else {
2429 pages = kmalloc_node(array_size, nested_gfp, node);
2430 }
2431
2432 if (!pages) {
2433 remove_vm_area(area->addr);
2434 kfree(area);
2435 return NULL;
2436 }
2437
2438 area->pages = pages;
2439 area->nr_pages = nr_pages;
2440
2441 for (i = 0; i < area->nr_pages; i++) {
2442 struct page *page;
2443
2444 if (node == NUMA_NO_NODE)
2445 page = alloc_page(alloc_mask|highmem_mask);
2446 else
2447 page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2448
2449 if (unlikely(!page)) {
2450
2451 area->nr_pages = i;
2452 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2453 goto fail;
2454 }
2455 area->pages[i] = page;
2456 if (gfpflags_allow_blocking(gfp_mask))
2457 cond_resched();
2458 }
2459 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2460
2461 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2462 prot, pages) < 0)
2463 goto fail;
2464
2465 return area->addr;
2466
2467fail:
2468 warn_alloc(gfp_mask, NULL,
2469 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2470 (area->nr_pages*PAGE_SIZE), area->size);
2471 __vfree(area->addr);
2472 return NULL;
2473}
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493void *__vmalloc_node_range(unsigned long size, unsigned long align,
2494 unsigned long start, unsigned long end, gfp_t gfp_mask,
2495 pgprot_t prot, unsigned long vm_flags, int node,
2496 const void *caller)
2497{
2498 struct vm_struct *area;
2499 void *addr;
2500 unsigned long real_size = size;
2501
2502 size = PAGE_ALIGN(size);
2503 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2504 goto fail;
2505
2506 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2507 vm_flags, start, end, node, gfp_mask, caller);
2508 if (!area)
2509 goto fail;
2510
2511 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2512 if (!addr)
2513 return NULL;
2514
2515
2516
2517
2518
2519
2520 clear_vm_uninitialized_flag(area);
2521
2522 kmemleak_vmalloc(area, size, gfp_mask);
2523
2524 return addr;
2525
2526fail:
2527 warn_alloc(gfp_mask, NULL,
2528 "vmalloc: allocation failure: %lu bytes", real_size);
2529 return NULL;
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551void *__vmalloc_node(unsigned long size, unsigned long align,
2552 gfp_t gfp_mask, int node, const void *caller)
2553{
2554 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2555 gfp_mask, PAGE_KERNEL, 0, node, caller);
2556}
2557
2558
2559
2560
2561
2562#ifdef CONFIG_TEST_VMALLOC_MODULE
2563EXPORT_SYMBOL_GPL(__vmalloc_node);
2564#endif
2565
2566void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2567{
2568 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
2569 __builtin_return_address(0));
2570}
2571EXPORT_SYMBOL(__vmalloc);
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585void *vmalloc(unsigned long size)
2586{
2587 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
2588 __builtin_return_address(0));
2589}
2590EXPORT_SYMBOL(vmalloc);
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605void *vzalloc(unsigned long size)
2606{
2607 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
2608 __builtin_return_address(0));
2609}
2610EXPORT_SYMBOL(vzalloc);
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621void *vmalloc_user(unsigned long size)
2622{
2623 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2624 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2625 VM_USERMAP, NUMA_NO_NODE,
2626 __builtin_return_address(0));
2627}
2628EXPORT_SYMBOL(vmalloc_user);
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643void *vmalloc_node(unsigned long size, int node)
2644{
2645 return __vmalloc_node(size, 1, GFP_KERNEL, node,
2646 __builtin_return_address(0));
2647}
2648EXPORT_SYMBOL(vmalloc_node);
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661void *vzalloc_node(unsigned long size, int node)
2662{
2663 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
2664 __builtin_return_address(0));
2665}
2666EXPORT_SYMBOL(vzalloc_node);
2667
2668#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2669#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2670#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2671#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2672#else
2673
2674
2675
2676
2677#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2678#endif
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689void *vmalloc_32(unsigned long size)
2690{
2691 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
2692 __builtin_return_address(0));
2693}
2694EXPORT_SYMBOL(vmalloc_32);
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705void *vmalloc_32_user(unsigned long size)
2706{
2707 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2708 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2709 VM_USERMAP, NUMA_NO_NODE,
2710 __builtin_return_address(0));
2711}
2712EXPORT_SYMBOL(vmalloc_32_user);
2713
2714
2715
2716
2717
2718
2719static int aligned_vread(char *buf, char *addr, unsigned long count)
2720{
2721 struct page *p;
2722 int copied = 0;
2723
2724 while (count) {
2725 unsigned long offset, length;
2726
2727 offset = offset_in_page(addr);
2728 length = PAGE_SIZE - offset;
2729 if (length > count)
2730 length = count;
2731 p = vmalloc_to_page(addr);
2732
2733
2734
2735
2736
2737
2738
2739 if (p) {
2740
2741
2742
2743
2744 void *map = kmap_atomic(p);
2745 memcpy(buf, map + offset, length);
2746 kunmap_atomic(map);
2747 } else
2748 memset(buf, 0, length);
2749
2750 addr += length;
2751 buf += length;
2752 copied += length;
2753 count -= length;
2754 }
2755 return copied;
2756}
2757
2758static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2759{
2760 struct page *p;
2761 int copied = 0;
2762
2763 while (count) {
2764 unsigned long offset, length;
2765
2766 offset = offset_in_page(addr);
2767 length = PAGE_SIZE - offset;
2768 if (length > count)
2769 length = count;
2770 p = vmalloc_to_page(addr);
2771
2772
2773
2774
2775
2776
2777
2778 if (p) {
2779
2780
2781
2782
2783 void *map = kmap_atomic(p);
2784 memcpy(map + offset, buf, length);
2785 kunmap_atomic(map);
2786 }
2787 addr += length;
2788 buf += length;
2789 copied += length;
2790 count -= length;
2791 }
2792 return copied;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819long vread(char *buf, char *addr, unsigned long count)
2820{
2821 struct vmap_area *va;
2822 struct vm_struct *vm;
2823 char *vaddr, *buf_start = buf;
2824 unsigned long buflen = count;
2825 unsigned long n;
2826
2827
2828 if ((unsigned long) addr + count < count)
2829 count = -(unsigned long) addr;
2830
2831 spin_lock(&vmap_area_lock);
2832 list_for_each_entry(va, &vmap_area_list, list) {
2833 if (!count)
2834 break;
2835
2836 if (!va->vm)
2837 continue;
2838
2839 vm = va->vm;
2840 vaddr = (char *) vm->addr;
2841 if (addr >= vaddr + get_vm_area_size(vm))
2842 continue;
2843 while (addr < vaddr) {
2844 if (count == 0)
2845 goto finished;
2846 *buf = '\0';
2847 buf++;
2848 addr++;
2849 count--;
2850 }
2851 n = vaddr + get_vm_area_size(vm) - addr;
2852 if (n > count)
2853 n = count;
2854 if (!(vm->flags & VM_IOREMAP))
2855 aligned_vread(buf, addr, n);
2856 else
2857 memset(buf, 0, n);
2858 buf += n;
2859 addr += n;
2860 count -= n;
2861 }
2862finished:
2863 spin_unlock(&vmap_area_lock);
2864
2865 if (buf == buf_start)
2866 return 0;
2867
2868 if (buf != buf_start + buflen)
2869 memset(buf, 0, buflen - (buf - buf_start));
2870
2871 return buflen;
2872}
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898long vwrite(char *buf, char *addr, unsigned long count)
2899{
2900 struct vmap_area *va;
2901 struct vm_struct *vm;
2902 char *vaddr;
2903 unsigned long n, buflen;
2904 int copied = 0;
2905
2906
2907 if ((unsigned long) addr + count < count)
2908 count = -(unsigned long) addr;
2909 buflen = count;
2910
2911 spin_lock(&vmap_area_lock);
2912 list_for_each_entry(va, &vmap_area_list, list) {
2913 if (!count)
2914 break;
2915
2916 if (!va->vm)
2917 continue;
2918
2919 vm = va->vm;
2920 vaddr = (char *) vm->addr;
2921 if (addr >= vaddr + get_vm_area_size(vm))
2922 continue;
2923 while (addr < vaddr) {
2924 if (count == 0)
2925 goto finished;
2926 buf++;
2927 addr++;
2928 count--;
2929 }
2930 n = vaddr + get_vm_area_size(vm) - addr;
2931 if (n > count)
2932 n = count;
2933 if (!(vm->flags & VM_IOREMAP)) {
2934 aligned_vwrite(buf, addr, n);
2935 copied++;
2936 }
2937 buf += n;
2938 addr += n;
2939 count -= n;
2940 }
2941finished:
2942 spin_unlock(&vmap_area_lock);
2943 if (!copied)
2944 return 0;
2945 return buflen;
2946}
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2966 void *kaddr, unsigned long pgoff,
2967 unsigned long size)
2968{
2969 struct vm_struct *area;
2970 unsigned long off;
2971 unsigned long end_index;
2972
2973 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
2974 return -EINVAL;
2975
2976 size = PAGE_ALIGN(size);
2977
2978 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2979 return -EINVAL;
2980
2981 area = find_vm_area(kaddr);
2982 if (!area)
2983 return -EINVAL;
2984
2985 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
2986 return -EINVAL;
2987
2988 if (check_add_overflow(size, off, &end_index) ||
2989 end_index > get_vm_area_size(area))
2990 return -EINVAL;
2991 kaddr += off;
2992
2993 do {
2994 struct page *page = vmalloc_to_page(kaddr);
2995 int ret;
2996
2997 ret = vm_insert_page(vma, uaddr, page);
2998 if (ret)
2999 return ret;
3000
3001 uaddr += PAGE_SIZE;
3002 kaddr += PAGE_SIZE;
3003 size -= PAGE_SIZE;
3004 } while (size > 0);
3005
3006 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3007
3008 return 0;
3009}
3010EXPORT_SYMBOL(remap_vmalloc_range_partial);
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3027 unsigned long pgoff)
3028{
3029 return remap_vmalloc_range_partial(vma, vma->vm_start,
3030 addr, pgoff,
3031 vma->vm_end - vma->vm_start);
3032}
3033EXPORT_SYMBOL(remap_vmalloc_range);
3034
3035static int f(pte_t *pte, unsigned long addr, void *data)
3036{
3037 pte_t ***p = data;
3038
3039 if (p) {
3040 *(*p) = pte;
3041 (*p)++;
3042 }
3043 return 0;
3044}
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3061{
3062 struct vm_struct *area;
3063
3064 area = get_vm_area_caller(size, VM_IOREMAP,
3065 __builtin_return_address(0));
3066 if (area == NULL)
3067 return NULL;
3068
3069
3070
3071
3072
3073 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3074 size, f, ptes ? &ptes : NULL)) {
3075 free_vm_area(area);
3076 return NULL;
3077 }
3078
3079 return area;
3080}
3081EXPORT_SYMBOL_GPL(alloc_vm_area);
3082
3083void free_vm_area(struct vm_struct *area)
3084{
3085 struct vm_struct *ret;
3086 ret = remove_vm_area(area->addr);
3087 BUG_ON(ret != area);
3088 kfree(area);
3089}
3090EXPORT_SYMBOL_GPL(free_vm_area);
3091
3092#ifdef CONFIG_SMP
3093static struct vmap_area *node_to_va(struct rb_node *n)
3094{
3095 return rb_entry_safe(n, struct vmap_area, rb_node);
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107static struct vmap_area *
3108pvm_find_va_enclose_addr(unsigned long addr)
3109{
3110 struct vmap_area *va, *tmp;
3111 struct rb_node *n;
3112
3113 n = free_vmap_area_root.rb_node;
3114 va = NULL;
3115
3116 while (n) {
3117 tmp = rb_entry(n, struct vmap_area, rb_node);
3118 if (tmp->va_start <= addr) {
3119 va = tmp;
3120 if (tmp->va_end >= addr)
3121 break;
3122
3123 n = n->rb_right;
3124 } else {
3125 n = n->rb_left;
3126 }
3127 }
3128
3129 return va;
3130}
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141static unsigned long
3142pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3143{
3144 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3145 unsigned long addr;
3146
3147 if (likely(*va)) {
3148 list_for_each_entry_from_reverse((*va),
3149 &free_vmap_area_list, list) {
3150 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3151 if ((*va)->va_start < addr)
3152 return addr;
3153 }
3154 }
3155
3156 return 0;
3157}
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3184 const size_t *sizes, int nr_vms,
3185 size_t align)
3186{
3187 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3188 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3189 struct vmap_area **vas, *va;
3190 struct vm_struct **vms;
3191 int area, area2, last_area, term_area;
3192 unsigned long base, start, size, end, last_end, orig_start, orig_end;
3193 bool purged = false;
3194 enum fit_type type;
3195
3196
3197 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3198 for (last_area = 0, area = 0; area < nr_vms; area++) {
3199 start = offsets[area];
3200 end = start + sizes[area];
3201
3202
3203 BUG_ON(!IS_ALIGNED(offsets[area], align));
3204 BUG_ON(!IS_ALIGNED(sizes[area], align));
3205
3206
3207 if (start > offsets[last_area])
3208 last_area = area;
3209
3210 for (area2 = area + 1; area2 < nr_vms; area2++) {
3211 unsigned long start2 = offsets[area2];
3212 unsigned long end2 = start2 + sizes[area2];
3213
3214 BUG_ON(start2 < end && start < end2);
3215 }
3216 }
3217 last_end = offsets[last_area] + sizes[last_area];
3218
3219 if (vmalloc_end - vmalloc_start < last_end) {
3220 WARN_ON(true);
3221 return NULL;
3222 }
3223
3224 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3225 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3226 if (!vas || !vms)
3227 goto err_free2;
3228
3229 for (area = 0; area < nr_vms; area++) {
3230 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3231 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3232 if (!vas[area] || !vms[area])
3233 goto err_free;
3234 }
3235retry:
3236 spin_lock(&free_vmap_area_lock);
3237
3238
3239 area = term_area = last_area;
3240 start = offsets[area];
3241 end = start + sizes[area];
3242
3243 va = pvm_find_va_enclose_addr(vmalloc_end);
3244 base = pvm_determine_end_from_reverse(&va, align) - end;
3245
3246 while (true) {
3247
3248
3249
3250
3251 if (base + last_end < vmalloc_start + last_end)
3252 goto overflow;
3253
3254
3255
3256
3257 if (va == NULL)
3258 goto overflow;
3259
3260
3261
3262
3263
3264 if (base + end > va->va_end) {
3265 base = pvm_determine_end_from_reverse(&va, align) - end;
3266 term_area = area;
3267 continue;
3268 }
3269
3270
3271
3272
3273 if (base + start < va->va_start) {
3274 va = node_to_va(rb_prev(&va->rb_node));
3275 base = pvm_determine_end_from_reverse(&va, align) - end;
3276 term_area = area;
3277 continue;
3278 }
3279
3280
3281
3282
3283
3284 area = (area + nr_vms - 1) % nr_vms;
3285 if (area == term_area)
3286 break;
3287
3288 start = offsets[area];
3289 end = start + sizes[area];
3290 va = pvm_find_va_enclose_addr(base + end);
3291 }
3292
3293
3294 for (area = 0; area < nr_vms; area++) {
3295 int ret;
3296
3297 start = base + offsets[area];
3298 size = sizes[area];
3299
3300 va = pvm_find_va_enclose_addr(start);
3301 if (WARN_ON_ONCE(va == NULL))
3302
3303 goto recovery;
3304
3305 type = classify_va_fit_type(va, start, size);
3306 if (WARN_ON_ONCE(type == NOTHING_FIT))
3307
3308 goto recovery;
3309
3310 ret = adjust_va_to_fit_type(va, start, size, type);
3311 if (unlikely(ret))
3312 goto recovery;
3313
3314
3315 va = vas[area];
3316 va->va_start = start;
3317 va->va_end = start + size;
3318 }
3319
3320 spin_unlock(&free_vmap_area_lock);
3321
3322
3323 for (area = 0; area < nr_vms; area++) {
3324 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3325 goto err_free_shadow;
3326
3327 kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3328 sizes[area]);
3329 }
3330
3331
3332 spin_lock(&vmap_area_lock);
3333 for (area = 0; area < nr_vms; area++) {
3334 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3335
3336 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3337 pcpu_get_vm_areas);
3338 }
3339 spin_unlock(&vmap_area_lock);
3340
3341 kfree(vas);
3342 return vms;
3343
3344recovery:
3345
3346
3347
3348
3349
3350
3351 while (area--) {
3352 orig_start = vas[area]->va_start;
3353 orig_end = vas[area]->va_end;
3354 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3355 &free_vmap_area_list);
3356 if (va)
3357 kasan_release_vmalloc(orig_start, orig_end,
3358 va->va_start, va->va_end);
3359 vas[area] = NULL;
3360 }
3361
3362overflow:
3363 spin_unlock(&free_vmap_area_lock);
3364 if (!purged) {
3365 purge_vmap_area_lazy();
3366 purged = true;
3367
3368
3369 for (area = 0; area < nr_vms; area++) {
3370 if (vas[area])
3371 continue;
3372
3373 vas[area] = kmem_cache_zalloc(
3374 vmap_area_cachep, GFP_KERNEL);
3375 if (!vas[area])
3376 goto err_free;
3377 }
3378
3379 goto retry;
3380 }
3381
3382err_free:
3383 for (area = 0; area < nr_vms; area++) {
3384 if (vas[area])
3385 kmem_cache_free(vmap_area_cachep, vas[area]);
3386
3387 kfree(vms[area]);
3388 }
3389err_free2:
3390 kfree(vas);
3391 kfree(vms);
3392 return NULL;
3393
3394err_free_shadow:
3395 spin_lock(&free_vmap_area_lock);
3396
3397
3398
3399
3400
3401 for (area = 0; area < nr_vms; area++) {
3402 orig_start = vas[area]->va_start;
3403 orig_end = vas[area]->va_end;
3404 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3405 &free_vmap_area_list);
3406 if (va)
3407 kasan_release_vmalloc(orig_start, orig_end,
3408 va->va_start, va->va_end);
3409 vas[area] = NULL;
3410 kfree(vms[area]);
3411 }
3412 spin_unlock(&free_vmap_area_lock);
3413 kfree(vas);
3414 kfree(vms);
3415 return NULL;
3416}
3417
3418
3419
3420
3421
3422
3423
3424
3425void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3426{
3427 int i;
3428
3429 for (i = 0; i < nr_vms; i++)
3430 free_vm_area(vms[i]);
3431 kfree(vms);
3432}
3433#endif
3434
3435#ifdef CONFIG_PROC_FS
3436static void *s_start(struct seq_file *m, loff_t *pos)
3437 __acquires(&vmap_purge_lock)
3438 __acquires(&vmap_area_lock)
3439{
3440 mutex_lock(&vmap_purge_lock);
3441 spin_lock(&vmap_area_lock);
3442
3443 return seq_list_start(&vmap_area_list, *pos);
3444}
3445
3446static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3447{
3448 return seq_list_next(p, &vmap_area_list, pos);
3449}
3450
3451static void s_stop(struct seq_file *m, void *p)
3452 __releases(&vmap_purge_lock)
3453 __releases(&vmap_area_lock)
3454{
3455 mutex_unlock(&vmap_purge_lock);
3456 spin_unlock(&vmap_area_lock);
3457}
3458
3459static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3460{
3461 if (IS_ENABLED(CONFIG_NUMA)) {
3462 unsigned int nr, *counters = m->private;
3463
3464 if (!counters)
3465 return;
3466
3467 if (v->flags & VM_UNINITIALIZED)
3468 return;
3469
3470 smp_rmb();
3471
3472 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3473
3474 for (nr = 0; nr < v->nr_pages; nr++)
3475 counters[page_to_nid(v->pages[nr])]++;
3476
3477 for_each_node_state(nr, N_HIGH_MEMORY)
3478 if (counters[nr])
3479 seq_printf(m, " N%u=%u", nr, counters[nr]);
3480 }
3481}
3482
3483static void show_purge_info(struct seq_file *m)
3484{
3485 struct llist_node *head;
3486 struct vmap_area *va;
3487
3488 head = READ_ONCE(vmap_purge_list.first);
3489 if (head == NULL)
3490 return;
3491
3492 llist_for_each_entry(va, head, purge_list) {
3493 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3494 (void *)va->va_start, (void *)va->va_end,
3495 va->va_end - va->va_start);
3496 }
3497}
3498
3499static int s_show(struct seq_file *m, void *p)
3500{
3501 struct vmap_area *va;
3502 struct vm_struct *v;
3503
3504 va = list_entry(p, struct vmap_area, list);
3505
3506
3507
3508
3509
3510 if (!va->vm) {
3511 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3512 (void *)va->va_start, (void *)va->va_end,
3513 va->va_end - va->va_start);
3514
3515 return 0;
3516 }
3517
3518 v = va->vm;
3519
3520 seq_printf(m, "0x%pK-0x%pK %7ld",
3521 v->addr, v->addr + v->size, v->size);
3522
3523 if (v->caller)
3524 seq_printf(m, " %pS", v->caller);
3525
3526 if (v->nr_pages)
3527 seq_printf(m, " pages=%d", v->nr_pages);
3528
3529 if (v->phys_addr)
3530 seq_printf(m, " phys=%pa", &v->phys_addr);
3531
3532 if (v->flags & VM_IOREMAP)
3533 seq_puts(m, " ioremap");
3534
3535 if (v->flags & VM_ALLOC)
3536 seq_puts(m, " vmalloc");
3537
3538 if (v->flags & VM_MAP)
3539 seq_puts(m, " vmap");
3540
3541 if (v->flags & VM_USERMAP)
3542 seq_puts(m, " user");
3543
3544 if (v->flags & VM_DMA_COHERENT)
3545 seq_puts(m, " dma-coherent");
3546
3547 if (is_vmalloc_addr(v->pages))
3548 seq_puts(m, " vpages");
3549
3550 show_numa_info(m, v);
3551 seq_putc(m, '\n');
3552
3553
3554
3555
3556
3557
3558
3559 if (list_is_last(&va->list, &vmap_area_list))
3560 show_purge_info(m);
3561
3562 return 0;
3563}
3564
3565static const struct seq_operations vmalloc_op = {
3566 .start = s_start,
3567 .next = s_next,
3568 .stop = s_stop,
3569 .show = s_show,
3570};
3571
3572static int __init proc_vmalloc_init(void)
3573{
3574 if (IS_ENABLED(CONFIG_NUMA))
3575 proc_create_seq_private("vmallocinfo", 0400, NULL,
3576 &vmalloc_op,
3577 nr_node_ids * sizeof(unsigned int), NULL);
3578 else
3579 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3580 return 0;
3581}
3582module_init(proc_vmalloc_init);
3583
3584#endif
3585