1
2
3
4
5
6
7
8
9
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/set_memory.h>
22#include <linux/debugobjects.h>
23#include <linux/kallsyms.h>
24#include <linux/list.h>
25#include <linux/notifier.h>
26#include <linux/rbtree.h>
27#include <linux/xarray.h>
28#include <linux/rcupdate.h>
29#include <linux/pfn.h>
30#include <linux/kmemleak.h>
31#include <linux/atomic.h>
32#include <linux/compiler.h>
33#include <linux/llist.h>
34#include <linux/bitops.h>
35#include <linux/rbtree_augmented.h>
36#include <linux/overflow.h>
37
38#include <linux/uaccess.h>
39#include <asm/tlbflush.h>
40#include <asm/shmparam.h>
41
42#include "internal.h"
43#include "pgalloc-track.h"
44
45bool is_vmalloc_addr(const void *x)
46{
47 unsigned long addr = (unsigned long)x;
48
49 return addr >= VMALLOC_START && addr < VMALLOC_END;
50}
51EXPORT_SYMBOL(is_vmalloc_addr);
52
53struct vfree_deferred {
54 struct llist_head list;
55 struct work_struct wq;
56};
57static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
58
59static void __vunmap(const void *, int);
60
61static void free_work(struct work_struct *w)
62{
63 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
64 struct llist_node *t, *llnode;
65
66 llist_for_each_safe(llnode, t, llist_del_all(&p->list))
67 __vunmap((void *)llnode, 1);
68}
69
70
71
72static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
73 pgtbl_mod_mask *mask)
74{
75 pte_t *pte;
76
77 pte = pte_offset_kernel(pmd, addr);
78 do {
79 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
81 } while (pte++, addr += PAGE_SIZE, addr != end);
82 *mask |= PGTBL_PTE_MODIFIED;
83}
84
85static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
86 pgtbl_mod_mask *mask)
87{
88 pmd_t *pmd;
89 unsigned long next;
90 int cleared;
91
92 pmd = pmd_offset(pud, addr);
93 do {
94 next = pmd_addr_end(addr, end);
95
96 cleared = pmd_clear_huge(pmd);
97 if (cleared || pmd_bad(*pmd))
98 *mask |= PGTBL_PMD_MODIFIED;
99
100 if (cleared)
101 continue;
102 if (pmd_none_or_clear_bad(pmd))
103 continue;
104 vunmap_pte_range(pmd, addr, next, mask);
105
106 cond_resched();
107 } while (pmd++, addr = next, addr != end);
108}
109
110static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
111 pgtbl_mod_mask *mask)
112{
113 pud_t *pud;
114 unsigned long next;
115 int cleared;
116
117 pud = pud_offset(p4d, addr);
118 do {
119 next = pud_addr_end(addr, end);
120
121 cleared = pud_clear_huge(pud);
122 if (cleared || pud_bad(*pud))
123 *mask |= PGTBL_PUD_MODIFIED;
124
125 if (cleared)
126 continue;
127 if (pud_none_or_clear_bad(pud))
128 continue;
129 vunmap_pmd_range(pud, addr, next, mask);
130 } while (pud++, addr = next, addr != end);
131}
132
133static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
134 pgtbl_mod_mask *mask)
135{
136 p4d_t *p4d;
137 unsigned long next;
138 int cleared;
139
140 p4d = p4d_offset(pgd, addr);
141 do {
142 next = p4d_addr_end(addr, end);
143
144 cleared = p4d_clear_huge(p4d);
145 if (cleared || p4d_bad(*p4d))
146 *mask |= PGTBL_P4D_MODIFIED;
147
148 if (cleared)
149 continue;
150 if (p4d_none_or_clear_bad(p4d))
151 continue;
152 vunmap_pud_range(p4d, addr, next, mask);
153 } while (p4d++, addr = next, addr != end);
154}
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
170{
171 unsigned long end = start + size;
172 unsigned long next;
173 pgd_t *pgd;
174 unsigned long addr = start;
175 pgtbl_mod_mask mask = 0;
176
177 BUG_ON(addr >= end);
178 pgd = pgd_offset_k(addr);
179 do {
180 next = pgd_addr_end(addr, end);
181 if (pgd_bad(*pgd))
182 mask |= PGTBL_PGD_MODIFIED;
183 if (pgd_none_or_clear_bad(pgd))
184 continue;
185 vunmap_p4d_range(pgd, addr, next, &mask);
186 } while (pgd++, addr = next, addr != end);
187
188 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
189 arch_sync_kernel_mappings(start, end);
190}
191
192static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
193 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
194 pgtbl_mod_mask *mask)
195{
196 pte_t *pte;
197
198
199
200
201
202
203 pte = pte_alloc_kernel_track(pmd, addr, mask);
204 if (!pte)
205 return -ENOMEM;
206 do {
207 struct page *page = pages[*nr];
208
209 if (WARN_ON(!pte_none(*pte)))
210 return -EBUSY;
211 if (WARN_ON(!page))
212 return -ENOMEM;
213 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
214 (*nr)++;
215 } while (pte++, addr += PAGE_SIZE, addr != end);
216 *mask |= PGTBL_PTE_MODIFIED;
217 return 0;
218}
219
220static int vmap_pmd_range(pud_t *pud, unsigned long addr,
221 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
222 pgtbl_mod_mask *mask)
223{
224 pmd_t *pmd;
225 unsigned long next;
226
227 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
228 if (!pmd)
229 return -ENOMEM;
230 do {
231 next = pmd_addr_end(addr, end);
232 if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
233 return -ENOMEM;
234 } while (pmd++, addr = next, addr != end);
235 return 0;
236}
237
238static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
239 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
240 pgtbl_mod_mask *mask)
241{
242 pud_t *pud;
243 unsigned long next;
244
245 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
246 if (!pud)
247 return -ENOMEM;
248 do {
249 next = pud_addr_end(addr, end);
250 if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
251 return -ENOMEM;
252 } while (pud++, addr = next, addr != end);
253 return 0;
254}
255
256static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
257 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
258 pgtbl_mod_mask *mask)
259{
260 p4d_t *p4d;
261 unsigned long next;
262
263 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
264 if (!p4d)
265 return -ENOMEM;
266 do {
267 next = p4d_addr_end(addr, end);
268 if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
269 return -ENOMEM;
270 } while (p4d++, addr = next, addr != end);
271 return 0;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292int map_kernel_range_noflush(unsigned long addr, unsigned long size,
293 pgprot_t prot, struct page **pages)
294{
295 unsigned long start = addr;
296 unsigned long end = addr + size;
297 unsigned long next;
298 pgd_t *pgd;
299 int err = 0;
300 int nr = 0;
301 pgtbl_mod_mask mask = 0;
302
303 BUG_ON(addr >= end);
304 pgd = pgd_offset_k(addr);
305 do {
306 next = pgd_addr_end(addr, end);
307 if (pgd_bad(*pgd))
308 mask |= PGTBL_PGD_MODIFIED;
309 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
310 if (err)
311 return err;
312 } while (pgd++, addr = next, addr != end);
313
314 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
315 arch_sync_kernel_mappings(start, end);
316
317 return 0;
318}
319
320int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
321 struct page **pages)
322{
323 int ret;
324
325 ret = map_kernel_range_noflush(start, size, prot, pages);
326 flush_cache_vmap(start, start + size);
327 return ret;
328}
329
330int is_vmalloc_or_module_addr(const void *x)
331{
332
333
334
335
336
337#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
338 unsigned long addr = (unsigned long)x;
339 if (addr >= MODULES_VADDR && addr < MODULES_END)
340 return 1;
341#endif
342 return is_vmalloc_addr(x);
343}
344
345
346
347
348struct page *vmalloc_to_page(const void *vmalloc_addr)
349{
350 unsigned long addr = (unsigned long) vmalloc_addr;
351 struct page *page = NULL;
352 pgd_t *pgd = pgd_offset_k(addr);
353 p4d_t *p4d;
354 pud_t *pud;
355 pmd_t *pmd;
356 pte_t *ptep, pte;
357
358
359
360
361
362 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
363
364 if (pgd_none(*pgd))
365 return NULL;
366 p4d = p4d_offset(pgd, addr);
367 if (p4d_none(*p4d))
368 return NULL;
369 pud = pud_offset(p4d, addr);
370
371
372
373
374
375
376
377
378
379 WARN_ON_ONCE(pud_bad(*pud));
380 if (pud_none(*pud) || pud_bad(*pud))
381 return NULL;
382 pmd = pmd_offset(pud, addr);
383 WARN_ON_ONCE(pmd_bad(*pmd));
384 if (pmd_none(*pmd) || pmd_bad(*pmd))
385 return NULL;
386
387 ptep = pte_offset_map(pmd, addr);
388 pte = *ptep;
389 if (pte_present(pte))
390 page = pte_page(pte);
391 pte_unmap(ptep);
392 return page;
393}
394EXPORT_SYMBOL(vmalloc_to_page);
395
396
397
398
399unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
400{
401 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
402}
403EXPORT_SYMBOL(vmalloc_to_pfn);
404
405
406
407
408#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
409#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
410
411
412static DEFINE_SPINLOCK(vmap_area_lock);
413static DEFINE_SPINLOCK(free_vmap_area_lock);
414
415LIST_HEAD(vmap_area_list);
416static LLIST_HEAD(vmap_purge_list);
417static struct rb_root vmap_area_root = RB_ROOT;
418static bool vmap_initialized __read_mostly;
419
420
421
422
423
424
425
426static struct kmem_cache *vmap_area_cachep;
427
428
429
430
431
432static LIST_HEAD(free_vmap_area_list);
433
434
435
436
437
438
439
440
441
442
443
444static struct rb_root free_vmap_area_root = RB_ROOT;
445
446
447
448
449
450
451static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
452
453static __always_inline unsigned long
454va_size(struct vmap_area *va)
455{
456 return (va->va_end - va->va_start);
457}
458
459static __always_inline unsigned long
460get_subtree_max_size(struct rb_node *node)
461{
462 struct vmap_area *va;
463
464 va = rb_entry_safe(node, struct vmap_area, rb_node);
465 return va ? va->subtree_max_size : 0;
466}
467
468
469
470
471static __always_inline unsigned long
472compute_subtree_max_size(struct vmap_area *va)
473{
474 return max3(va_size(va),
475 get_subtree_max_size(va->rb_node.rb_left),
476 get_subtree_max_size(va->rb_node.rb_right));
477}
478
479RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
480 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
481
482static void purge_vmap_area_lazy(void);
483static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
484static unsigned long lazy_max_pages(void);
485
486static atomic_long_t nr_vmalloc_pages;
487
488unsigned long vmalloc_nr_pages(void)
489{
490 return atomic_long_read(&nr_vmalloc_pages);
491}
492
493static struct vmap_area *__find_vmap_area(unsigned long addr)
494{
495 struct rb_node *n = vmap_area_root.rb_node;
496
497 while (n) {
498 struct vmap_area *va;
499
500 va = rb_entry(n, struct vmap_area, rb_node);
501 if (addr < va->va_start)
502 n = n->rb_left;
503 else if (addr >= va->va_end)
504 n = n->rb_right;
505 else
506 return va;
507 }
508
509 return NULL;
510}
511
512
513
514
515
516
517
518
519
520static __always_inline struct rb_node **
521find_va_links(struct vmap_area *va,
522 struct rb_root *root, struct rb_node *from,
523 struct rb_node **parent)
524{
525 struct vmap_area *tmp_va;
526 struct rb_node **link;
527
528 if (root) {
529 link = &root->rb_node;
530 if (unlikely(!*link)) {
531 *parent = NULL;
532 return link;
533 }
534 } else {
535 link = &from;
536 }
537
538
539
540
541
542
543 do {
544 tmp_va = rb_entry(*link, struct vmap_area, rb_node);
545
546
547
548
549
550
551 if (va->va_start < tmp_va->va_end &&
552 va->va_end <= tmp_va->va_start)
553 link = &(*link)->rb_left;
554 else if (va->va_end > tmp_va->va_start &&
555 va->va_start >= tmp_va->va_end)
556 link = &(*link)->rb_right;
557 else {
558 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
559 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
560
561 return NULL;
562 }
563 } while (*link);
564
565 *parent = &tmp_va->rb_node;
566 return link;
567}
568
569static __always_inline struct list_head *
570get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
571{
572 struct list_head *list;
573
574 if (unlikely(!parent))
575
576
577
578
579
580
581 return NULL;
582
583 list = &rb_entry(parent, struct vmap_area, rb_node)->list;
584 return (&parent->rb_right == link ? list->next : list);
585}
586
587static __always_inline void
588link_va(struct vmap_area *va, struct rb_root *root,
589 struct rb_node *parent, struct rb_node **link, struct list_head *head)
590{
591
592
593
594
595 if (likely(parent)) {
596 head = &rb_entry(parent, struct vmap_area, rb_node)->list;
597 if (&parent->rb_right != link)
598 head = head->prev;
599 }
600
601
602 rb_link_node(&va->rb_node, parent, link);
603 if (root == &free_vmap_area_root) {
604
605
606
607
608
609
610
611
612
613
614
615 rb_insert_augmented(&va->rb_node,
616 root, &free_vmap_area_rb_augment_cb);
617 va->subtree_max_size = 0;
618 } else {
619 rb_insert_color(&va->rb_node, root);
620 }
621
622
623 list_add(&va->list, head);
624}
625
626static __always_inline void
627unlink_va(struct vmap_area *va, struct rb_root *root)
628{
629 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
630 return;
631
632 if (root == &free_vmap_area_root)
633 rb_erase_augmented(&va->rb_node,
634 root, &free_vmap_area_rb_augment_cb);
635 else
636 rb_erase(&va->rb_node, root);
637
638 list_del(&va->list);
639 RB_CLEAR_NODE(&va->rb_node);
640}
641
642#if DEBUG_AUGMENT_PROPAGATE_CHECK
643static void
644augment_tree_propagate_check(void)
645{
646 struct vmap_area *va;
647 unsigned long computed_size;
648
649 list_for_each_entry(va, &free_vmap_area_list, list) {
650 computed_size = compute_subtree_max_size(va);
651 if (computed_size != va->subtree_max_size)
652 pr_emerg("tree is corrupted: %lu, %lu\n",
653 va_size(va), va->subtree_max_size);
654 }
655}
656#endif
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static __always_inline void
686augment_tree_propagate_from(struct vmap_area *va)
687{
688
689
690
691
692
693 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
694
695#if DEBUG_AUGMENT_PROPAGATE_CHECK
696 augment_tree_propagate_check();
697#endif
698}
699
700static void
701insert_vmap_area(struct vmap_area *va,
702 struct rb_root *root, struct list_head *head)
703{
704 struct rb_node **link;
705 struct rb_node *parent;
706
707 link = find_va_links(va, root, NULL, &parent);
708 if (link)
709 link_va(va, root, parent, link, head);
710}
711
712static void
713insert_vmap_area_augment(struct vmap_area *va,
714 struct rb_node *from, struct rb_root *root,
715 struct list_head *head)
716{
717 struct rb_node **link;
718 struct rb_node *parent;
719
720 if (from)
721 link = find_va_links(va, NULL, from, &parent);
722 else
723 link = find_va_links(va, root, NULL, &parent);
724
725 if (link) {
726 link_va(va, root, parent, link, head);
727 augment_tree_propagate_from(va);
728 }
729}
730
731
732
733
734
735
736
737
738
739
740
741
742static __always_inline struct vmap_area *
743merge_or_add_vmap_area(struct vmap_area *va,
744 struct rb_root *root, struct list_head *head)
745{
746 struct vmap_area *sibling;
747 struct list_head *next;
748 struct rb_node **link;
749 struct rb_node *parent;
750 bool merged = false;
751
752
753
754
755
756 link = find_va_links(va, root, NULL, &parent);
757 if (!link)
758 return NULL;
759
760
761
762
763 next = get_va_next_sibling(parent, link);
764 if (unlikely(next == NULL))
765 goto insert;
766
767
768
769
770
771
772
773
774 if (next != head) {
775 sibling = list_entry(next, struct vmap_area, list);
776 if (sibling->va_start == va->va_end) {
777 sibling->va_start = va->va_start;
778
779
780 kmem_cache_free(vmap_area_cachep, va);
781
782
783 va = sibling;
784 merged = true;
785 }
786 }
787
788
789
790
791
792
793
794
795 if (next->prev != head) {
796 sibling = list_entry(next->prev, struct vmap_area, list);
797 if (sibling->va_end == va->va_start) {
798
799
800
801
802
803
804
805 if (merged)
806 unlink_va(va, root);
807
808 sibling->va_end = va->va_end;
809
810
811 kmem_cache_free(vmap_area_cachep, va);
812
813
814 va = sibling;
815 merged = true;
816 }
817 }
818
819insert:
820 if (!merged)
821 link_va(va, root, parent, link, head);
822
823
824
825
826 augment_tree_propagate_from(va);
827 return va;
828}
829
830static __always_inline bool
831is_within_this_va(struct vmap_area *va, unsigned long size,
832 unsigned long align, unsigned long vstart)
833{
834 unsigned long nva_start_addr;
835
836 if (va->va_start > vstart)
837 nva_start_addr = ALIGN(va->va_start, align);
838 else
839 nva_start_addr = ALIGN(vstart, align);
840
841
842 if (nva_start_addr + size < nva_start_addr ||
843 nva_start_addr < vstart)
844 return false;
845
846 return (nva_start_addr + size <= va->va_end);
847}
848
849
850
851
852
853
854static __always_inline struct vmap_area *
855find_vmap_lowest_match(unsigned long size,
856 unsigned long align, unsigned long vstart)
857{
858 struct vmap_area *va;
859 struct rb_node *node;
860 unsigned long length;
861
862
863 node = free_vmap_area_root.rb_node;
864
865
866 length = size + align - 1;
867
868 while (node) {
869 va = rb_entry(node, struct vmap_area, rb_node);
870
871 if (get_subtree_max_size(node->rb_left) >= length &&
872 vstart < va->va_start) {
873 node = node->rb_left;
874 } else {
875 if (is_within_this_va(va, size, align, vstart))
876 return va;
877
878
879
880
881
882
883 if (get_subtree_max_size(node->rb_right) >= length) {
884 node = node->rb_right;
885 continue;
886 }
887
888
889
890
891
892
893 while ((node = rb_parent(node))) {
894 va = rb_entry(node, struct vmap_area, rb_node);
895 if (is_within_this_va(va, size, align, vstart))
896 return va;
897
898 if (get_subtree_max_size(node->rb_right) >= length &&
899 vstart <= va->va_start) {
900 node = node->rb_right;
901 break;
902 }
903 }
904 }
905 }
906
907 return NULL;
908}
909
910#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
911#include <linux/random.h>
912
913static struct vmap_area *
914find_vmap_lowest_linear_match(unsigned long size,
915 unsigned long align, unsigned long vstart)
916{
917 struct vmap_area *va;
918
919 list_for_each_entry(va, &free_vmap_area_list, list) {
920 if (!is_within_this_va(va, size, align, vstart))
921 continue;
922
923 return va;
924 }
925
926 return NULL;
927}
928
929static void
930find_vmap_lowest_match_check(unsigned long size)
931{
932 struct vmap_area *va_1, *va_2;
933 unsigned long vstart;
934 unsigned int rnd;
935
936 get_random_bytes(&rnd, sizeof(rnd));
937 vstart = VMALLOC_START + rnd;
938
939 va_1 = find_vmap_lowest_match(size, 1, vstart);
940 va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
941
942 if (va_1 != va_2)
943 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
944 va_1, va_2, vstart);
945}
946#endif
947
948enum fit_type {
949 NOTHING_FIT = 0,
950 FL_FIT_TYPE = 1,
951 LE_FIT_TYPE = 2,
952 RE_FIT_TYPE = 3,
953 NE_FIT_TYPE = 4
954};
955
956static __always_inline enum fit_type
957classify_va_fit_type(struct vmap_area *va,
958 unsigned long nva_start_addr, unsigned long size)
959{
960 enum fit_type type;
961
962
963 if (nva_start_addr < va->va_start ||
964 nva_start_addr + size > va->va_end)
965 return NOTHING_FIT;
966
967
968 if (va->va_start == nva_start_addr) {
969 if (va->va_end == nva_start_addr + size)
970 type = FL_FIT_TYPE;
971 else
972 type = LE_FIT_TYPE;
973 } else if (va->va_end == nva_start_addr + size) {
974 type = RE_FIT_TYPE;
975 } else {
976 type = NE_FIT_TYPE;
977 }
978
979 return type;
980}
981
982static __always_inline int
983adjust_va_to_fit_type(struct vmap_area *va,
984 unsigned long nva_start_addr, unsigned long size,
985 enum fit_type type)
986{
987 struct vmap_area *lva = NULL;
988
989 if (type == FL_FIT_TYPE) {
990
991
992
993
994
995
996
997 unlink_va(va, &free_vmap_area_root);
998 kmem_cache_free(vmap_area_cachep, va);
999 } else if (type == LE_FIT_TYPE) {
1000
1001
1002
1003
1004
1005
1006
1007 va->va_start += size;
1008 } else if (type == RE_FIT_TYPE) {
1009
1010
1011
1012
1013
1014
1015
1016 va->va_end = nva_start_addr;
1017 } else if (type == NE_FIT_TYPE) {
1018
1019
1020
1021
1022
1023
1024
1025 lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1026 if (unlikely(!lva)) {
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1053 if (!lva)
1054 return -1;
1055 }
1056
1057
1058
1059
1060 lva->va_start = va->va_start;
1061 lva->va_end = nva_start_addr;
1062
1063
1064
1065
1066 va->va_start = nva_start_addr + size;
1067 } else {
1068 return -1;
1069 }
1070
1071 if (type != FL_FIT_TYPE) {
1072 augment_tree_propagate_from(va);
1073
1074 if (lva)
1075 insert_vmap_area_augment(lva, &va->rb_node,
1076 &free_vmap_area_root, &free_vmap_area_list);
1077 }
1078
1079 return 0;
1080}
1081
1082
1083
1084
1085
1086static __always_inline unsigned long
1087__alloc_vmap_area(unsigned long size, unsigned long align,
1088 unsigned long vstart, unsigned long vend)
1089{
1090 unsigned long nva_start_addr;
1091 struct vmap_area *va;
1092 enum fit_type type;
1093 int ret;
1094
1095 va = find_vmap_lowest_match(size, align, vstart);
1096 if (unlikely(!va))
1097 return vend;
1098
1099 if (va->va_start > vstart)
1100 nva_start_addr = ALIGN(va->va_start, align);
1101 else
1102 nva_start_addr = ALIGN(vstart, align);
1103
1104
1105 if (nva_start_addr + size > vend)
1106 return vend;
1107
1108
1109 type = classify_va_fit_type(va, nva_start_addr, size);
1110 if (WARN_ON_ONCE(type == NOTHING_FIT))
1111 return vend;
1112
1113
1114 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1115 if (ret)
1116 return vend;
1117
1118#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1119 find_vmap_lowest_match_check(size);
1120#endif
1121
1122 return nva_start_addr;
1123}
1124
1125
1126
1127
1128static void free_vmap_area(struct vmap_area *va)
1129{
1130
1131
1132
1133 spin_lock(&vmap_area_lock);
1134 unlink_va(va, &vmap_area_root);
1135 spin_unlock(&vmap_area_lock);
1136
1137
1138
1139
1140 spin_lock(&free_vmap_area_lock);
1141 merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1142 spin_unlock(&free_vmap_area_lock);
1143}
1144
1145
1146
1147
1148
1149static struct vmap_area *alloc_vmap_area(unsigned long size,
1150 unsigned long align,
1151 unsigned long vstart, unsigned long vend,
1152 int node, gfp_t gfp_mask)
1153{
1154 struct vmap_area *va, *pva;
1155 unsigned long addr;
1156 int purged = 0;
1157 int ret;
1158
1159 BUG_ON(!size);
1160 BUG_ON(offset_in_page(size));
1161 BUG_ON(!is_power_of_2(align));
1162
1163 if (unlikely(!vmap_initialized))
1164 return ERR_PTR(-EBUSY);
1165
1166 might_sleep();
1167 gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1168
1169 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1170 if (unlikely(!va))
1171 return ERR_PTR(-ENOMEM);
1172
1173
1174
1175
1176
1177 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1178
1179retry:
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 pva = NULL;
1196
1197 if (!this_cpu_read(ne_fit_preload_node))
1198
1199
1200
1201
1202
1203 pva = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1204
1205 spin_lock(&free_vmap_area_lock);
1206
1207 if (pva && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva))
1208 kmem_cache_free(vmap_area_cachep, pva);
1209
1210
1211
1212
1213
1214 addr = __alloc_vmap_area(size, align, vstart, vend);
1215 spin_unlock(&free_vmap_area_lock);
1216
1217 if (unlikely(addr == vend))
1218 goto overflow;
1219
1220 va->va_start = addr;
1221 va->va_end = addr + size;
1222 va->vm = NULL;
1223
1224
1225 spin_lock(&vmap_area_lock);
1226 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1227 spin_unlock(&vmap_area_lock);
1228
1229 BUG_ON(!IS_ALIGNED(va->va_start, align));
1230 BUG_ON(va->va_start < vstart);
1231 BUG_ON(va->va_end > vend);
1232
1233 ret = kasan_populate_vmalloc(addr, size);
1234 if (ret) {
1235 free_vmap_area(va);
1236 return ERR_PTR(ret);
1237 }
1238
1239 return va;
1240
1241overflow:
1242 if (!purged) {
1243 purge_vmap_area_lazy();
1244 purged = 1;
1245 goto retry;
1246 }
1247
1248 if (gfpflags_allow_blocking(gfp_mask)) {
1249 unsigned long freed = 0;
1250 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1251 if (freed > 0) {
1252 purged = 0;
1253 goto retry;
1254 }
1255 }
1256
1257 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1258 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1259 size);
1260
1261 kmem_cache_free(vmap_area_cachep, va);
1262 return ERR_PTR(-EBUSY);
1263}
1264
1265int register_vmap_purge_notifier(struct notifier_block *nb)
1266{
1267 return blocking_notifier_chain_register(&vmap_notify_list, nb);
1268}
1269EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1270
1271int unregister_vmap_purge_notifier(struct notifier_block *nb)
1272{
1273 return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1274}
1275EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static unsigned long lazy_max_pages(void)
1294{
1295 unsigned int log;
1296
1297 log = fls(num_online_cpus());
1298
1299 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1300}
1301
1302static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1303
1304
1305
1306
1307
1308
1309static DEFINE_MUTEX(vmap_purge_lock);
1310
1311
1312static void purge_fragmented_blocks_allcpus(void);
1313
1314
1315
1316
1317
1318void set_iounmap_nonlazy(void)
1319{
1320 atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1321}
1322
1323
1324
1325
1326static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1327{
1328 unsigned long resched_threshold;
1329 struct llist_node *valist;
1330 struct vmap_area *va;
1331 struct vmap_area *n_va;
1332
1333 lockdep_assert_held(&vmap_purge_lock);
1334
1335 valist = llist_del_all(&vmap_purge_list);
1336 if (unlikely(valist == NULL))
1337 return false;
1338
1339
1340
1341
1342
1343 llist_for_each_entry(va, valist, purge_list) {
1344 if (va->va_start < start)
1345 start = va->va_start;
1346 if (va->va_end > end)
1347 end = va->va_end;
1348 }
1349
1350 flush_tlb_kernel_range(start, end);
1351 resched_threshold = lazy_max_pages() << 1;
1352
1353 spin_lock(&free_vmap_area_lock);
1354 llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1355 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1356 unsigned long orig_start = va->va_start;
1357 unsigned long orig_end = va->va_end;
1358
1359
1360
1361
1362
1363
1364 va = merge_or_add_vmap_area(va, &free_vmap_area_root,
1365 &free_vmap_area_list);
1366
1367 if (!va)
1368 continue;
1369
1370 if (is_vmalloc_or_module_addr((void *)orig_start))
1371 kasan_release_vmalloc(orig_start, orig_end,
1372 va->va_start, va->va_end);
1373
1374 atomic_long_sub(nr, &vmap_lazy_nr);
1375
1376 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1377 cond_resched_lock(&free_vmap_area_lock);
1378 }
1379 spin_unlock(&free_vmap_area_lock);
1380 return true;
1381}
1382
1383
1384
1385
1386
1387static void try_purge_vmap_area_lazy(void)
1388{
1389 if (mutex_trylock(&vmap_purge_lock)) {
1390 __purge_vmap_area_lazy(ULONG_MAX, 0);
1391 mutex_unlock(&vmap_purge_lock);
1392 }
1393}
1394
1395
1396
1397
1398static void purge_vmap_area_lazy(void)
1399{
1400 mutex_lock(&vmap_purge_lock);
1401 purge_fragmented_blocks_allcpus();
1402 __purge_vmap_area_lazy(ULONG_MAX, 0);
1403 mutex_unlock(&vmap_purge_lock);
1404}
1405
1406
1407
1408
1409
1410
1411static void free_vmap_area_noflush(struct vmap_area *va)
1412{
1413 unsigned long nr_lazy;
1414
1415 spin_lock(&vmap_area_lock);
1416 unlink_va(va, &vmap_area_root);
1417 spin_unlock(&vmap_area_lock);
1418
1419 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1420 PAGE_SHIFT, &vmap_lazy_nr);
1421
1422
1423 llist_add(&va->purge_list, &vmap_purge_list);
1424
1425 if (unlikely(nr_lazy > lazy_max_pages()))
1426 try_purge_vmap_area_lazy();
1427}
1428
1429
1430
1431
1432static void free_unmap_vmap_area(struct vmap_area *va)
1433{
1434 flush_cache_vunmap(va->va_start, va->va_end);
1435 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
1436 if (debug_pagealloc_enabled_static())
1437 flush_tlb_kernel_range(va->va_start, va->va_end);
1438
1439 free_vmap_area_noflush(va);
1440}
1441
1442static struct vmap_area *find_vmap_area(unsigned long addr)
1443{
1444 struct vmap_area *va;
1445
1446 spin_lock(&vmap_area_lock);
1447 va = __find_vmap_area(addr);
1448 spin_unlock(&vmap_area_lock);
1449
1450 return va;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464#if BITS_PER_LONG == 32
1465#define VMALLOC_SPACE (128UL*1024*1024)
1466#else
1467#define VMALLOC_SPACE (128UL*1024*1024*1024)
1468#endif
1469
1470#define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
1471#define VMAP_MAX_ALLOC BITS_PER_LONG
1472#define VMAP_BBMAP_BITS_MAX 1024
1473#define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
1474#define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y))
1475#define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y))
1476#define VMAP_BBMAP_BITS \
1477 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
1478 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
1479 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1480
1481#define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
1482
1483struct vmap_block_queue {
1484 spinlock_t lock;
1485 struct list_head free;
1486};
1487
1488struct vmap_block {
1489 spinlock_t lock;
1490 struct vmap_area *va;
1491 unsigned long free, dirty;
1492 unsigned long dirty_min, dirty_max;
1493 struct list_head free_list;
1494 struct rcu_head rcu_head;
1495 struct list_head purge;
1496};
1497
1498
1499static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1500
1501
1502
1503
1504
1505
1506static DEFINE_XARRAY(vmap_blocks);
1507
1508
1509
1510
1511
1512
1513
1514
1515static unsigned long addr_to_vb_idx(unsigned long addr)
1516{
1517 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1518 addr /= VMAP_BLOCK_SIZE;
1519 return addr;
1520}
1521
1522static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1523{
1524 unsigned long addr;
1525
1526 addr = va_start + (pages_off << PAGE_SHIFT);
1527 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1528 return (void *)addr;
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1540{
1541 struct vmap_block_queue *vbq;
1542 struct vmap_block *vb;
1543 struct vmap_area *va;
1544 unsigned long vb_idx;
1545 int node, err;
1546 void *vaddr;
1547
1548 node = numa_node_id();
1549
1550 vb = kmalloc_node(sizeof(struct vmap_block),
1551 gfp_mask & GFP_RECLAIM_MASK, node);
1552 if (unlikely(!vb))
1553 return ERR_PTR(-ENOMEM);
1554
1555 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1556 VMALLOC_START, VMALLOC_END,
1557 node, gfp_mask);
1558 if (IS_ERR(va)) {
1559 kfree(vb);
1560 return ERR_CAST(va);
1561 }
1562
1563 vaddr = vmap_block_vaddr(va->va_start, 0);
1564 spin_lock_init(&vb->lock);
1565 vb->va = va;
1566
1567 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1568 vb->free = VMAP_BBMAP_BITS - (1UL << order);
1569 vb->dirty = 0;
1570 vb->dirty_min = VMAP_BBMAP_BITS;
1571 vb->dirty_max = 0;
1572 INIT_LIST_HEAD(&vb->free_list);
1573
1574 vb_idx = addr_to_vb_idx(va->va_start);
1575 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1576 if (err) {
1577 kfree(vb);
1578 free_vmap_area(va);
1579 return ERR_PTR(err);
1580 }
1581
1582 vbq = &get_cpu_var(vmap_block_queue);
1583 spin_lock(&vbq->lock);
1584 list_add_tail_rcu(&vb->free_list, &vbq->free);
1585 spin_unlock(&vbq->lock);
1586 put_cpu_var(vmap_block_queue);
1587
1588 return vaddr;
1589}
1590
1591static void free_vmap_block(struct vmap_block *vb)
1592{
1593 struct vmap_block *tmp;
1594
1595 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1596 BUG_ON(tmp != vb);
1597
1598 free_vmap_area_noflush(vb->va);
1599 kfree_rcu(vb, rcu_head);
1600}
1601
1602static void purge_fragmented_blocks(int cpu)
1603{
1604 LIST_HEAD(purge);
1605 struct vmap_block *vb;
1606 struct vmap_block *n_vb;
1607 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1608
1609 rcu_read_lock();
1610 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1611
1612 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1613 continue;
1614
1615 spin_lock(&vb->lock);
1616 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1617 vb->free = 0;
1618 vb->dirty = VMAP_BBMAP_BITS;
1619 vb->dirty_min = 0;
1620 vb->dirty_max = VMAP_BBMAP_BITS;
1621 spin_lock(&vbq->lock);
1622 list_del_rcu(&vb->free_list);
1623 spin_unlock(&vbq->lock);
1624 spin_unlock(&vb->lock);
1625 list_add_tail(&vb->purge, &purge);
1626 } else
1627 spin_unlock(&vb->lock);
1628 }
1629 rcu_read_unlock();
1630
1631 list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1632 list_del(&vb->purge);
1633 free_vmap_block(vb);
1634 }
1635}
1636
1637static void purge_fragmented_blocks_allcpus(void)
1638{
1639 int cpu;
1640
1641 for_each_possible_cpu(cpu)
1642 purge_fragmented_blocks(cpu);
1643}
1644
1645static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1646{
1647 struct vmap_block_queue *vbq;
1648 struct vmap_block *vb;
1649 void *vaddr = NULL;
1650 unsigned int order;
1651
1652 BUG_ON(offset_in_page(size));
1653 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1654 if (WARN_ON(size == 0)) {
1655
1656
1657
1658
1659
1660 return NULL;
1661 }
1662 order = get_order(size);
1663
1664 rcu_read_lock();
1665 vbq = &get_cpu_var(vmap_block_queue);
1666 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1667 unsigned long pages_off;
1668
1669 spin_lock(&vb->lock);
1670 if (vb->free < (1UL << order)) {
1671 spin_unlock(&vb->lock);
1672 continue;
1673 }
1674
1675 pages_off = VMAP_BBMAP_BITS - vb->free;
1676 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1677 vb->free -= 1UL << order;
1678 if (vb->free == 0) {
1679 spin_lock(&vbq->lock);
1680 list_del_rcu(&vb->free_list);
1681 spin_unlock(&vbq->lock);
1682 }
1683
1684 spin_unlock(&vb->lock);
1685 break;
1686 }
1687
1688 put_cpu_var(vmap_block_queue);
1689 rcu_read_unlock();
1690
1691
1692 if (!vaddr)
1693 vaddr = new_vmap_block(order, gfp_mask);
1694
1695 return vaddr;
1696}
1697
1698static void vb_free(unsigned long addr, unsigned long size)
1699{
1700 unsigned long offset;
1701 unsigned int order;
1702 struct vmap_block *vb;
1703
1704 BUG_ON(offset_in_page(size));
1705 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1706
1707 flush_cache_vunmap(addr, addr + size);
1708
1709 order = get_order(size);
1710 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1711 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1712
1713 unmap_kernel_range_noflush(addr, size);
1714
1715 if (debug_pagealloc_enabled_static())
1716 flush_tlb_kernel_range(addr, addr + size);
1717
1718 spin_lock(&vb->lock);
1719
1720
1721 vb->dirty_min = min(vb->dirty_min, offset);
1722 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1723
1724 vb->dirty += 1UL << order;
1725 if (vb->dirty == VMAP_BBMAP_BITS) {
1726 BUG_ON(vb->free);
1727 spin_unlock(&vb->lock);
1728 free_vmap_block(vb);
1729 } else
1730 spin_unlock(&vb->lock);
1731}
1732
1733static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1734{
1735 int cpu;
1736
1737 if (unlikely(!vmap_initialized))
1738 return;
1739
1740 might_sleep();
1741
1742 for_each_possible_cpu(cpu) {
1743 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1744 struct vmap_block *vb;
1745
1746 rcu_read_lock();
1747 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1748 spin_lock(&vb->lock);
1749 if (vb->dirty) {
1750 unsigned long va_start = vb->va->va_start;
1751 unsigned long s, e;
1752
1753 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1754 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1755
1756 start = min(s, start);
1757 end = max(e, end);
1758
1759 flush = 1;
1760 }
1761 spin_unlock(&vb->lock);
1762 }
1763 rcu_read_unlock();
1764 }
1765
1766 mutex_lock(&vmap_purge_lock);
1767 purge_fragmented_blocks_allcpus();
1768 if (!__purge_vmap_area_lazy(start, end) && flush)
1769 flush_tlb_kernel_range(start, end);
1770 mutex_unlock(&vmap_purge_lock);
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786void vm_unmap_aliases(void)
1787{
1788 unsigned long start = ULONG_MAX, end = 0;
1789 int flush = 0;
1790
1791 _vm_unmap_aliases(start, end, flush);
1792}
1793EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1794
1795
1796
1797
1798
1799
1800void vm_unmap_ram(const void *mem, unsigned int count)
1801{
1802 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1803 unsigned long addr = (unsigned long)mem;
1804 struct vmap_area *va;
1805
1806 might_sleep();
1807 BUG_ON(!addr);
1808 BUG_ON(addr < VMALLOC_START);
1809 BUG_ON(addr > VMALLOC_END);
1810 BUG_ON(!PAGE_ALIGNED(addr));
1811
1812 kasan_poison_vmalloc(mem, size);
1813
1814 if (likely(count <= VMAP_MAX_ALLOC)) {
1815 debug_check_no_locks_freed(mem, size);
1816 vb_free(addr, size);
1817 return;
1818 }
1819
1820 va = find_vmap_area(addr);
1821 BUG_ON(!va);
1822 debug_check_no_locks_freed((void *)va->va_start,
1823 (va->va_end - va->va_start));
1824 free_unmap_vmap_area(va);
1825}
1826EXPORT_SYMBOL(vm_unmap_ram);
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842void *vm_map_ram(struct page **pages, unsigned int count, int node)
1843{
1844 unsigned long size = (unsigned long)count << PAGE_SHIFT;
1845 unsigned long addr;
1846 void *mem;
1847
1848 if (likely(count <= VMAP_MAX_ALLOC)) {
1849 mem = vb_alloc(size, GFP_KERNEL);
1850 if (IS_ERR(mem))
1851 return NULL;
1852 addr = (unsigned long)mem;
1853 } else {
1854 struct vmap_area *va;
1855 va = alloc_vmap_area(size, PAGE_SIZE,
1856 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1857 if (IS_ERR(va))
1858 return NULL;
1859
1860 addr = va->va_start;
1861 mem = (void *)addr;
1862 }
1863
1864 kasan_unpoison_vmalloc(mem, size);
1865
1866 if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
1867 vm_unmap_ram(mem, count);
1868 return NULL;
1869 }
1870 return mem;
1871}
1872EXPORT_SYMBOL(vm_map_ram);
1873
1874static struct vm_struct *vmlist __initdata;
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886void __init vm_area_add_early(struct vm_struct *vm)
1887{
1888 struct vm_struct *tmp, **p;
1889
1890 BUG_ON(vmap_initialized);
1891 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1892 if (tmp->addr >= vm->addr) {
1893 BUG_ON(tmp->addr < vm->addr + vm->size);
1894 break;
1895 } else
1896 BUG_ON(tmp->addr + tmp->size > vm->addr);
1897 }
1898 vm->next = *p;
1899 *p = vm;
1900}
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1915{
1916 static size_t vm_init_off __initdata;
1917 unsigned long addr;
1918
1919 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1920 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1921
1922 vm->addr = (void *)addr;
1923
1924 vm_area_add_early(vm);
1925}
1926
1927static void vmap_init_free_space(void)
1928{
1929 unsigned long vmap_start = 1;
1930 const unsigned long vmap_end = ULONG_MAX;
1931 struct vmap_area *busy, *free;
1932
1933
1934
1935
1936
1937
1938
1939 list_for_each_entry(busy, &vmap_area_list, list) {
1940 if (busy->va_start - vmap_start > 0) {
1941 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1942 if (!WARN_ON_ONCE(!free)) {
1943 free->va_start = vmap_start;
1944 free->va_end = busy->va_start;
1945
1946 insert_vmap_area_augment(free, NULL,
1947 &free_vmap_area_root,
1948 &free_vmap_area_list);
1949 }
1950 }
1951
1952 vmap_start = busy->va_end;
1953 }
1954
1955 if (vmap_end - vmap_start > 0) {
1956 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1957 if (!WARN_ON_ONCE(!free)) {
1958 free->va_start = vmap_start;
1959 free->va_end = vmap_end;
1960
1961 insert_vmap_area_augment(free, NULL,
1962 &free_vmap_area_root,
1963 &free_vmap_area_list);
1964 }
1965 }
1966}
1967
1968void __init vmalloc_init(void)
1969{
1970 struct vmap_area *va;
1971 struct vm_struct *tmp;
1972 int i;
1973
1974
1975
1976
1977 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1978
1979 for_each_possible_cpu(i) {
1980 struct vmap_block_queue *vbq;
1981 struct vfree_deferred *p;
1982
1983 vbq = &per_cpu(vmap_block_queue, i);
1984 spin_lock_init(&vbq->lock);
1985 INIT_LIST_HEAD(&vbq->free);
1986 p = &per_cpu(vfree_deferred, i);
1987 init_llist_head(&p->list);
1988 INIT_WORK(&p->wq, free_work);
1989 }
1990
1991
1992 for (tmp = vmlist; tmp; tmp = tmp->next) {
1993 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1994 if (WARN_ON_ONCE(!va))
1995 continue;
1996
1997 va->va_start = (unsigned long)tmp->addr;
1998 va->va_end = va->va_start + tmp->size;
1999 va->vm = tmp;
2000 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2001 }
2002
2003
2004
2005
2006 vmap_init_free_space();
2007 vmap_initialized = true;
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018void unmap_kernel_range(unsigned long addr, unsigned long size)
2019{
2020 unsigned long end = addr + size;
2021
2022 flush_cache_vunmap(addr, end);
2023 unmap_kernel_range_noflush(addr, size);
2024 flush_tlb_kernel_range(addr, end);
2025}
2026
2027static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2028 struct vmap_area *va, unsigned long flags, const void *caller)
2029{
2030 vm->flags = flags;
2031 vm->addr = (void *)va->va_start;
2032 vm->size = va->va_end - va->va_start;
2033 vm->caller = caller;
2034 va->vm = vm;
2035}
2036
2037static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2038 unsigned long flags, const void *caller)
2039{
2040 spin_lock(&vmap_area_lock);
2041 setup_vmalloc_vm_locked(vm, va, flags, caller);
2042 spin_unlock(&vmap_area_lock);
2043}
2044
2045static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2046{
2047
2048
2049
2050
2051
2052 smp_wmb();
2053 vm->flags &= ~VM_UNINITIALIZED;
2054}
2055
2056static struct vm_struct *__get_vm_area_node(unsigned long size,
2057 unsigned long align, unsigned long flags, unsigned long start,
2058 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2059{
2060 struct vmap_area *va;
2061 struct vm_struct *area;
2062 unsigned long requested_size = size;
2063
2064 BUG_ON(in_interrupt());
2065 size = PAGE_ALIGN(size);
2066 if (unlikely(!size))
2067 return NULL;
2068
2069 if (flags & VM_IOREMAP)
2070 align = 1ul << clamp_t(int, get_count_order_long(size),
2071 PAGE_SHIFT, IOREMAP_MAX_ORDER);
2072
2073 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2074 if (unlikely(!area))
2075 return NULL;
2076
2077 if (!(flags & VM_NO_GUARD))
2078 size += PAGE_SIZE;
2079
2080 va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2081 if (IS_ERR(va)) {
2082 kfree(area);
2083 return NULL;
2084 }
2085
2086 kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2087
2088 setup_vmalloc_vm(area, va, flags, caller);
2089
2090 return area;
2091}
2092
2093struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2094 unsigned long start, unsigned long end,
2095 const void *caller)
2096{
2097 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2098 GFP_KERNEL, caller);
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2113{
2114 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2115 NUMA_NO_NODE, GFP_KERNEL,
2116 __builtin_return_address(0));
2117}
2118
2119struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2120 const void *caller)
2121{
2122 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2123 NUMA_NO_NODE, GFP_KERNEL, caller);
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136struct vm_struct *find_vm_area(const void *addr)
2137{
2138 struct vmap_area *va;
2139
2140 va = find_vmap_area((unsigned long)addr);
2141 if (!va)
2142 return NULL;
2143
2144 return va->vm;
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157struct vm_struct *remove_vm_area(const void *addr)
2158{
2159 struct vmap_area *va;
2160
2161 might_sleep();
2162
2163 spin_lock(&vmap_area_lock);
2164 va = __find_vmap_area((unsigned long)addr);
2165 if (va && va->vm) {
2166 struct vm_struct *vm = va->vm;
2167
2168 va->vm = NULL;
2169 spin_unlock(&vmap_area_lock);
2170
2171 kasan_free_shadow(vm);
2172 free_unmap_vmap_area(va);
2173
2174 return vm;
2175 }
2176
2177 spin_unlock(&vmap_area_lock);
2178 return NULL;
2179}
2180
2181static inline void set_area_direct_map(const struct vm_struct *area,
2182 int (*set_direct_map)(struct page *page))
2183{
2184 int i;
2185
2186 for (i = 0; i < area->nr_pages; i++)
2187 if (page_address(area->pages[i]))
2188 set_direct_map(area->pages[i]);
2189}
2190
2191
2192static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2193{
2194 unsigned long start = ULONG_MAX, end = 0;
2195 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2196 int flush_dmap = 0;
2197 int i;
2198
2199 remove_vm_area(area->addr);
2200
2201
2202 if (!flush_reset)
2203 return;
2204
2205
2206
2207
2208
2209 if (!deallocate_pages) {
2210 vm_unmap_aliases();
2211 return;
2212 }
2213
2214
2215
2216
2217
2218
2219 for (i = 0; i < area->nr_pages; i++) {
2220 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2221 if (addr) {
2222 start = min(addr, start);
2223 end = max(addr + PAGE_SIZE, end);
2224 flush_dmap = 1;
2225 }
2226 }
2227
2228
2229
2230
2231
2232
2233 set_area_direct_map(area, set_direct_map_invalid_noflush);
2234 _vm_unmap_aliases(start, end, flush_dmap);
2235 set_area_direct_map(area, set_direct_map_default_noflush);
2236}
2237
2238static void __vunmap(const void *addr, int deallocate_pages)
2239{
2240 struct vm_struct *area;
2241
2242 if (!addr)
2243 return;
2244
2245 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2246 addr))
2247 return;
2248
2249 area = find_vm_area(addr);
2250 if (unlikely(!area)) {
2251 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2252 addr);
2253 return;
2254 }
2255
2256 debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2257 debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2258
2259 kasan_poison_vmalloc(area->addr, area->size);
2260
2261 vm_remove_mappings(area, deallocate_pages);
2262
2263 if (deallocate_pages) {
2264 int i;
2265
2266 for (i = 0; i < area->nr_pages; i++) {
2267 struct page *page = area->pages[i];
2268
2269 BUG_ON(!page);
2270 __free_pages(page, 0);
2271 }
2272 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2273
2274 kvfree(area->pages);
2275 }
2276
2277 kfree(area);
2278 return;
2279}
2280
2281static inline void __vfree_deferred(const void *addr)
2282{
2283
2284
2285
2286
2287
2288
2289 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2290
2291 if (llist_add((struct llist_node *)addr, &p->list))
2292 schedule_work(&p->wq);
2293}
2294
2295
2296
2297
2298
2299
2300
2301
2302void vfree_atomic(const void *addr)
2303{
2304 BUG_ON(in_nmi());
2305
2306 kmemleak_free(addr);
2307
2308 if (!addr)
2309 return;
2310 __vfree_deferred(addr);
2311}
2312
2313static void __vfree(const void *addr)
2314{
2315 if (unlikely(in_interrupt()))
2316 __vfree_deferred(addr);
2317 else
2318 __vunmap(addr, 1);
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338void vfree(const void *addr)
2339{
2340 BUG_ON(in_nmi());
2341
2342 kmemleak_free(addr);
2343
2344 might_sleep_if(!in_interrupt());
2345
2346 if (!addr)
2347 return;
2348
2349 __vfree(addr);
2350}
2351EXPORT_SYMBOL(vfree);
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362void vunmap(const void *addr)
2363{
2364 BUG_ON(in_interrupt());
2365 might_sleep();
2366 if (addr)
2367 __vunmap(addr, 0);
2368}
2369EXPORT_SYMBOL(vunmap);
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386void *vmap(struct page **pages, unsigned int count,
2387 unsigned long flags, pgprot_t prot)
2388{
2389 struct vm_struct *area;
2390 unsigned long size;
2391
2392 might_sleep();
2393
2394 if (count > totalram_pages())
2395 return NULL;
2396
2397 size = (unsigned long)count << PAGE_SHIFT;
2398 area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2399 if (!area)
2400 return NULL;
2401
2402 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot),
2403 pages) < 0) {
2404 vunmap(area->addr);
2405 return NULL;
2406 }
2407
2408 if (flags & VM_MAP_PUT_PAGES)
2409 area->pages = pages;
2410 return area->addr;
2411}
2412EXPORT_SYMBOL(vmap);
2413
2414#ifdef CONFIG_VMAP_PFN
2415struct vmap_pfn_data {
2416 unsigned long *pfns;
2417 pgprot_t prot;
2418 unsigned int idx;
2419};
2420
2421static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2422{
2423 struct vmap_pfn_data *data = private;
2424
2425 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2426 return -EINVAL;
2427 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2428 return 0;
2429}
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2441{
2442 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2443 struct vm_struct *area;
2444
2445 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2446 __builtin_return_address(0));
2447 if (!area)
2448 return NULL;
2449 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2450 count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2451 free_vm_area(area);
2452 return NULL;
2453 }
2454 return area->addr;
2455}
2456EXPORT_SYMBOL_GPL(vmap_pfn);
2457#endif
2458
2459static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2460 pgprot_t prot, int node)
2461{
2462 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2463 unsigned int nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2464 unsigned int array_size = nr_pages * sizeof(struct page *), i;
2465 struct page **pages;
2466
2467 gfp_mask |= __GFP_NOWARN;
2468 if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2469 gfp_mask |= __GFP_HIGHMEM;
2470
2471
2472 if (array_size > PAGE_SIZE) {
2473 pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2474 area->caller);
2475 } else {
2476 pages = kmalloc_node(array_size, nested_gfp, node);
2477 }
2478
2479 if (!pages) {
2480 remove_vm_area(area->addr);
2481 kfree(area);
2482 return NULL;
2483 }
2484
2485 area->pages = pages;
2486 area->nr_pages = nr_pages;
2487
2488 for (i = 0; i < area->nr_pages; i++) {
2489 struct page *page;
2490
2491 if (node == NUMA_NO_NODE)
2492 page = alloc_page(gfp_mask);
2493 else
2494 page = alloc_pages_node(node, gfp_mask, 0);
2495
2496 if (unlikely(!page)) {
2497
2498 area->nr_pages = i;
2499 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2500 goto fail;
2501 }
2502 area->pages[i] = page;
2503 if (gfpflags_allow_blocking(gfp_mask))
2504 cond_resched();
2505 }
2506 atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2507
2508 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area),
2509 prot, pages) < 0)
2510 goto fail;
2511
2512 return area->addr;
2513
2514fail:
2515 warn_alloc(gfp_mask, NULL,
2516 "vmalloc: allocation failure, allocated %ld of %ld bytes",
2517 (area->nr_pages*PAGE_SIZE), area->size);
2518 __vfree(area->addr);
2519 return NULL;
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540void *__vmalloc_node_range(unsigned long size, unsigned long align,
2541 unsigned long start, unsigned long end, gfp_t gfp_mask,
2542 pgprot_t prot, unsigned long vm_flags, int node,
2543 const void *caller)
2544{
2545 struct vm_struct *area;
2546 void *addr;
2547 unsigned long real_size = size;
2548
2549 size = PAGE_ALIGN(size);
2550 if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2551 goto fail;
2552
2553 area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
2554 vm_flags, start, end, node, gfp_mask, caller);
2555 if (!area)
2556 goto fail;
2557
2558 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2559 if (!addr)
2560 return NULL;
2561
2562
2563
2564
2565
2566
2567 clear_vm_uninitialized_flag(area);
2568
2569 kmemleak_vmalloc(area, size, gfp_mask);
2570
2571 return addr;
2572
2573fail:
2574 warn_alloc(gfp_mask, NULL,
2575 "vmalloc: allocation failure: %lu bytes", real_size);
2576 return NULL;
2577}
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598void *__vmalloc_node(unsigned long size, unsigned long align,
2599 gfp_t gfp_mask, int node, const void *caller)
2600{
2601 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2602 gfp_mask, PAGE_KERNEL, 0, node, caller);
2603}
2604
2605
2606
2607
2608
2609#ifdef CONFIG_TEST_VMALLOC_MODULE
2610EXPORT_SYMBOL_GPL(__vmalloc_node);
2611#endif
2612
2613void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2614{
2615 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
2616 __builtin_return_address(0));
2617}
2618EXPORT_SYMBOL(__vmalloc);
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632void *vmalloc(unsigned long size)
2633{
2634 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
2635 __builtin_return_address(0));
2636}
2637EXPORT_SYMBOL(vmalloc);
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652void *vzalloc(unsigned long size)
2653{
2654 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
2655 __builtin_return_address(0));
2656}
2657EXPORT_SYMBOL(vzalloc);
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668void *vmalloc_user(unsigned long size)
2669{
2670 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2671 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2672 VM_USERMAP, NUMA_NO_NODE,
2673 __builtin_return_address(0));
2674}
2675EXPORT_SYMBOL(vmalloc_user);
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690void *vmalloc_node(unsigned long size, int node)
2691{
2692 return __vmalloc_node(size, 1, GFP_KERNEL, node,
2693 __builtin_return_address(0));
2694}
2695EXPORT_SYMBOL(vmalloc_node);
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708void *vzalloc_node(unsigned long size, int node)
2709{
2710 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
2711 __builtin_return_address(0));
2712}
2713EXPORT_SYMBOL(vzalloc_node);
2714
2715#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2716#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2717#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2718#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2719#else
2720
2721
2722
2723
2724#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2725#endif
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736void *vmalloc_32(unsigned long size)
2737{
2738 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
2739 __builtin_return_address(0));
2740}
2741EXPORT_SYMBOL(vmalloc_32);
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752void *vmalloc_32_user(unsigned long size)
2753{
2754 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END,
2755 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2756 VM_USERMAP, NUMA_NO_NODE,
2757 __builtin_return_address(0));
2758}
2759EXPORT_SYMBOL(vmalloc_32_user);
2760
2761
2762
2763
2764
2765
2766static int aligned_vread(char *buf, char *addr, unsigned long count)
2767{
2768 struct page *p;
2769 int copied = 0;
2770
2771 while (count) {
2772 unsigned long offset, length;
2773
2774 offset = offset_in_page(addr);
2775 length = PAGE_SIZE - offset;
2776 if (length > count)
2777 length = count;
2778 p = vmalloc_to_page(addr);
2779
2780
2781
2782
2783
2784
2785
2786 if (p) {
2787
2788
2789
2790
2791 void *map = kmap_atomic(p);
2792 memcpy(buf, map + offset, length);
2793 kunmap_atomic(map);
2794 } else
2795 memset(buf, 0, length);
2796
2797 addr += length;
2798 buf += length;
2799 copied += length;
2800 count -= length;
2801 }
2802 return copied;
2803}
2804
2805static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2806{
2807 struct page *p;
2808 int copied = 0;
2809
2810 while (count) {
2811 unsigned long offset, length;
2812
2813 offset = offset_in_page(addr);
2814 length = PAGE_SIZE - offset;
2815 if (length > count)
2816 length = count;
2817 p = vmalloc_to_page(addr);
2818
2819
2820
2821
2822
2823
2824
2825 if (p) {
2826
2827
2828
2829
2830 void *map = kmap_atomic(p);
2831 memcpy(map + offset, buf, length);
2832 kunmap_atomic(map);
2833 }
2834 addr += length;
2835 buf += length;
2836 copied += length;
2837 count -= length;
2838 }
2839 return copied;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866long vread(char *buf, char *addr, unsigned long count)
2867{
2868 struct vmap_area *va;
2869 struct vm_struct *vm;
2870 char *vaddr, *buf_start = buf;
2871 unsigned long buflen = count;
2872 unsigned long n;
2873
2874
2875 if ((unsigned long) addr + count < count)
2876 count = -(unsigned long) addr;
2877
2878 spin_lock(&vmap_area_lock);
2879 list_for_each_entry(va, &vmap_area_list, list) {
2880 if (!count)
2881 break;
2882
2883 if (!va->vm)
2884 continue;
2885
2886 vm = va->vm;
2887 vaddr = (char *) vm->addr;
2888 if (addr >= vaddr + get_vm_area_size(vm))
2889 continue;
2890 while (addr < vaddr) {
2891 if (count == 0)
2892 goto finished;
2893 *buf = '\0';
2894 buf++;
2895 addr++;
2896 count--;
2897 }
2898 n = vaddr + get_vm_area_size(vm) - addr;
2899 if (n > count)
2900 n = count;
2901 if (!(vm->flags & VM_IOREMAP))
2902 aligned_vread(buf, addr, n);
2903 else
2904 memset(buf, 0, n);
2905 buf += n;
2906 addr += n;
2907 count -= n;
2908 }
2909finished:
2910 spin_unlock(&vmap_area_lock);
2911
2912 if (buf == buf_start)
2913 return 0;
2914
2915 if (buf != buf_start + buflen)
2916 memset(buf, 0, buflen - (buf - buf_start));
2917
2918 return buflen;
2919}
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945long vwrite(char *buf, char *addr, unsigned long count)
2946{
2947 struct vmap_area *va;
2948 struct vm_struct *vm;
2949 char *vaddr;
2950 unsigned long n, buflen;
2951 int copied = 0;
2952
2953
2954 if ((unsigned long) addr + count < count)
2955 count = -(unsigned long) addr;
2956 buflen = count;
2957
2958 spin_lock(&vmap_area_lock);
2959 list_for_each_entry(va, &vmap_area_list, list) {
2960 if (!count)
2961 break;
2962
2963 if (!va->vm)
2964 continue;
2965
2966 vm = va->vm;
2967 vaddr = (char *) vm->addr;
2968 if (addr >= vaddr + get_vm_area_size(vm))
2969 continue;
2970 while (addr < vaddr) {
2971 if (count == 0)
2972 goto finished;
2973 buf++;
2974 addr++;
2975 count--;
2976 }
2977 n = vaddr + get_vm_area_size(vm) - addr;
2978 if (n > count)
2979 n = count;
2980 if (!(vm->flags & VM_IOREMAP)) {
2981 aligned_vwrite(buf, addr, n);
2982 copied++;
2983 }
2984 buf += n;
2985 addr += n;
2986 count -= n;
2987 }
2988finished:
2989 spin_unlock(&vmap_area_lock);
2990 if (!copied)
2991 return 0;
2992 return buflen;
2993}
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3013 void *kaddr, unsigned long pgoff,
3014 unsigned long size)
3015{
3016 struct vm_struct *area;
3017 unsigned long off;
3018 unsigned long end_index;
3019
3020 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3021 return -EINVAL;
3022
3023 size = PAGE_ALIGN(size);
3024
3025 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3026 return -EINVAL;
3027
3028 area = find_vm_area(kaddr);
3029 if (!area)
3030 return -EINVAL;
3031
3032 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3033 return -EINVAL;
3034
3035 if (check_add_overflow(size, off, &end_index) ||
3036 end_index > get_vm_area_size(area))
3037 return -EINVAL;
3038 kaddr += off;
3039
3040 do {
3041 struct page *page = vmalloc_to_page(kaddr);
3042 int ret;
3043
3044 ret = vm_insert_page(vma, uaddr, page);
3045 if (ret)
3046 return ret;
3047
3048 uaddr += PAGE_SIZE;
3049 kaddr += PAGE_SIZE;
3050 size -= PAGE_SIZE;
3051 } while (size > 0);
3052
3053 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3054
3055 return 0;
3056}
3057EXPORT_SYMBOL(remap_vmalloc_range_partial);
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3074 unsigned long pgoff)
3075{
3076 return remap_vmalloc_range_partial(vma, vma->vm_start,
3077 addr, pgoff,
3078 vma->vm_end - vma->vm_start);
3079}
3080EXPORT_SYMBOL(remap_vmalloc_range);
3081
3082void free_vm_area(struct vm_struct *area)
3083{
3084 struct vm_struct *ret;
3085 ret = remove_vm_area(area->addr);
3086 BUG_ON(ret != area);
3087 kfree(area);
3088}
3089EXPORT_SYMBOL_GPL(free_vm_area);
3090
3091#ifdef CONFIG_SMP
3092static struct vmap_area *node_to_va(struct rb_node *n)
3093{
3094 return rb_entry_safe(n, struct vmap_area, rb_node);
3095}
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106static struct vmap_area *
3107pvm_find_va_enclose_addr(unsigned long addr)
3108{
3109 struct vmap_area *va, *tmp;
3110 struct rb_node *n;
3111
3112 n = free_vmap_area_root.rb_node;
3113 va = NULL;
3114
3115 while (n) {
3116 tmp = rb_entry(n, struct vmap_area, rb_node);
3117 if (tmp->va_start <= addr) {
3118 va = tmp;
3119 if (tmp->va_end >= addr)
3120 break;
3121
3122 n = n->rb_right;
3123 } else {
3124 n = n->rb_left;
3125 }
3126 }
3127
3128 return va;
3129}
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140static unsigned long
3141pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3142{
3143 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3144 unsigned long addr;
3145
3146 if (likely(*va)) {
3147 list_for_each_entry_from_reverse((*va),
3148 &free_vmap_area_list, list) {
3149 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3150 if ((*va)->va_start < addr)
3151 return addr;
3152 }
3153 }
3154
3155 return 0;
3156}
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3183 const size_t *sizes, int nr_vms,
3184 size_t align)
3185{
3186 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3187 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3188 struct vmap_area **vas, *va;
3189 struct vm_struct **vms;
3190 int area, area2, last_area, term_area;
3191 unsigned long base, start, size, end, last_end, orig_start, orig_end;
3192 bool purged = false;
3193 enum fit_type type;
3194
3195
3196 BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3197 for (last_area = 0, area = 0; area < nr_vms; area++) {
3198 start = offsets[area];
3199 end = start + sizes[area];
3200
3201
3202 BUG_ON(!IS_ALIGNED(offsets[area], align));
3203 BUG_ON(!IS_ALIGNED(sizes[area], align));
3204
3205
3206 if (start > offsets[last_area])
3207 last_area = area;
3208
3209 for (area2 = area + 1; area2 < nr_vms; area2++) {
3210 unsigned long start2 = offsets[area2];
3211 unsigned long end2 = start2 + sizes[area2];
3212
3213 BUG_ON(start2 < end && start < end2);
3214 }
3215 }
3216 last_end = offsets[last_area] + sizes[last_area];
3217
3218 if (vmalloc_end - vmalloc_start < last_end) {
3219 WARN_ON(true);
3220 return NULL;
3221 }
3222
3223 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3224 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3225 if (!vas || !vms)
3226 goto err_free2;
3227
3228 for (area = 0; area < nr_vms; area++) {
3229 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3230 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3231 if (!vas[area] || !vms[area])
3232 goto err_free;
3233 }
3234retry:
3235 spin_lock(&free_vmap_area_lock);
3236
3237
3238 area = term_area = last_area;
3239 start = offsets[area];
3240 end = start + sizes[area];
3241
3242 va = pvm_find_va_enclose_addr(vmalloc_end);
3243 base = pvm_determine_end_from_reverse(&va, align) - end;
3244
3245 while (true) {
3246
3247
3248
3249
3250 if (base + last_end < vmalloc_start + last_end)
3251 goto overflow;
3252
3253
3254
3255
3256 if (va == NULL)
3257 goto overflow;
3258
3259
3260
3261
3262
3263 if (base + end > va->va_end) {
3264 base = pvm_determine_end_from_reverse(&va, align) - end;
3265 term_area = area;
3266 continue;
3267 }
3268
3269
3270
3271
3272 if (base + start < va->va_start) {
3273 va = node_to_va(rb_prev(&va->rb_node));
3274 base = pvm_determine_end_from_reverse(&va, align) - end;
3275 term_area = area;
3276 continue;
3277 }
3278
3279
3280
3281
3282
3283 area = (area + nr_vms - 1) % nr_vms;
3284 if (area == term_area)
3285 break;
3286
3287 start = offsets[area];
3288 end = start + sizes[area];
3289 va = pvm_find_va_enclose_addr(base + end);
3290 }
3291
3292
3293 for (area = 0; area < nr_vms; area++) {
3294 int ret;
3295
3296 start = base + offsets[area];
3297 size = sizes[area];
3298
3299 va = pvm_find_va_enclose_addr(start);
3300 if (WARN_ON_ONCE(va == NULL))
3301
3302 goto recovery;
3303
3304 type = classify_va_fit_type(va, start, size);
3305 if (WARN_ON_ONCE(type == NOTHING_FIT))
3306
3307 goto recovery;
3308
3309 ret = adjust_va_to_fit_type(va, start, size, type);
3310 if (unlikely(ret))
3311 goto recovery;
3312
3313
3314 va = vas[area];
3315 va->va_start = start;
3316 va->va_end = start + size;
3317 }
3318
3319 spin_unlock(&free_vmap_area_lock);
3320
3321
3322 for (area = 0; area < nr_vms; area++) {
3323 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3324 goto err_free_shadow;
3325
3326 kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3327 sizes[area]);
3328 }
3329
3330
3331 spin_lock(&vmap_area_lock);
3332 for (area = 0; area < nr_vms; area++) {
3333 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3334
3335 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3336 pcpu_get_vm_areas);
3337 }
3338 spin_unlock(&vmap_area_lock);
3339
3340 kfree(vas);
3341 return vms;
3342
3343recovery:
3344
3345
3346
3347
3348
3349
3350 while (area--) {
3351 orig_start = vas[area]->va_start;
3352 orig_end = vas[area]->va_end;
3353 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3354 &free_vmap_area_list);
3355 if (va)
3356 kasan_release_vmalloc(orig_start, orig_end,
3357 va->va_start, va->va_end);
3358 vas[area] = NULL;
3359 }
3360
3361overflow:
3362 spin_unlock(&free_vmap_area_lock);
3363 if (!purged) {
3364 purge_vmap_area_lazy();
3365 purged = true;
3366
3367
3368 for (area = 0; area < nr_vms; area++) {
3369 if (vas[area])
3370 continue;
3371
3372 vas[area] = kmem_cache_zalloc(
3373 vmap_area_cachep, GFP_KERNEL);
3374 if (!vas[area])
3375 goto err_free;
3376 }
3377
3378 goto retry;
3379 }
3380
3381err_free:
3382 for (area = 0; area < nr_vms; area++) {
3383 if (vas[area])
3384 kmem_cache_free(vmap_area_cachep, vas[area]);
3385
3386 kfree(vms[area]);
3387 }
3388err_free2:
3389 kfree(vas);
3390 kfree(vms);
3391 return NULL;
3392
3393err_free_shadow:
3394 spin_lock(&free_vmap_area_lock);
3395
3396
3397
3398
3399
3400 for (area = 0; area < nr_vms; area++) {
3401 orig_start = vas[area]->va_start;
3402 orig_end = vas[area]->va_end;
3403 va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
3404 &free_vmap_area_list);
3405 if (va)
3406 kasan_release_vmalloc(orig_start, orig_end,
3407 va->va_start, va->va_end);
3408 vas[area] = NULL;
3409 kfree(vms[area]);
3410 }
3411 spin_unlock(&free_vmap_area_lock);
3412 kfree(vas);
3413 kfree(vms);
3414 return NULL;
3415}
3416
3417
3418
3419
3420
3421
3422
3423
3424void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3425{
3426 int i;
3427
3428 for (i = 0; i < nr_vms; i++)
3429 free_vm_area(vms[i]);
3430 kfree(vms);
3431}
3432#endif
3433
3434#ifdef CONFIG_PROC_FS
3435static void *s_start(struct seq_file *m, loff_t *pos)
3436 __acquires(&vmap_purge_lock)
3437 __acquires(&vmap_area_lock)
3438{
3439 mutex_lock(&vmap_purge_lock);
3440 spin_lock(&vmap_area_lock);
3441
3442 return seq_list_start(&vmap_area_list, *pos);
3443}
3444
3445static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3446{
3447 return seq_list_next(p, &vmap_area_list, pos);
3448}
3449
3450static void s_stop(struct seq_file *m, void *p)
3451 __releases(&vmap_purge_lock)
3452 __releases(&vmap_area_lock)
3453{
3454 mutex_unlock(&vmap_purge_lock);
3455 spin_unlock(&vmap_area_lock);
3456}
3457
3458static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3459{
3460 if (IS_ENABLED(CONFIG_NUMA)) {
3461 unsigned int nr, *counters = m->private;
3462
3463 if (!counters)
3464 return;
3465
3466 if (v->flags & VM_UNINITIALIZED)
3467 return;
3468
3469 smp_rmb();
3470
3471 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3472
3473 for (nr = 0; nr < v->nr_pages; nr++)
3474 counters[page_to_nid(v->pages[nr])]++;
3475
3476 for_each_node_state(nr, N_HIGH_MEMORY)
3477 if (counters[nr])
3478 seq_printf(m, " N%u=%u", nr, counters[nr]);
3479 }
3480}
3481
3482static void show_purge_info(struct seq_file *m)
3483{
3484 struct llist_node *head;
3485 struct vmap_area *va;
3486
3487 head = READ_ONCE(vmap_purge_list.first);
3488 if (head == NULL)
3489 return;
3490
3491 llist_for_each_entry(va, head, purge_list) {
3492 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3493 (void *)va->va_start, (void *)va->va_end,
3494 va->va_end - va->va_start);
3495 }
3496}
3497
3498static int s_show(struct seq_file *m, void *p)
3499{
3500 struct vmap_area *va;
3501 struct vm_struct *v;
3502
3503 va = list_entry(p, struct vmap_area, list);
3504
3505
3506
3507
3508
3509 if (!va->vm) {
3510 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3511 (void *)va->va_start, (void *)va->va_end,
3512 va->va_end - va->va_start);
3513
3514 return 0;
3515 }
3516
3517 v = va->vm;
3518
3519 seq_printf(m, "0x%pK-0x%pK %7ld",
3520 v->addr, v->addr + v->size, v->size);
3521
3522 if (v->caller)
3523 seq_printf(m, " %pS", v->caller);
3524
3525 if (v->nr_pages)
3526 seq_printf(m, " pages=%d", v->nr_pages);
3527
3528 if (v->phys_addr)
3529 seq_printf(m, " phys=%pa", &v->phys_addr);
3530
3531 if (v->flags & VM_IOREMAP)
3532 seq_puts(m, " ioremap");
3533
3534 if (v->flags & VM_ALLOC)
3535 seq_puts(m, " vmalloc");
3536
3537 if (v->flags & VM_MAP)
3538 seq_puts(m, " vmap");
3539
3540 if (v->flags & VM_USERMAP)
3541 seq_puts(m, " user");
3542
3543 if (v->flags & VM_DMA_COHERENT)
3544 seq_puts(m, " dma-coherent");
3545
3546 if (is_vmalloc_addr(v->pages))
3547 seq_puts(m, " vpages");
3548
3549 show_numa_info(m, v);
3550 seq_putc(m, '\n');
3551
3552
3553
3554
3555
3556
3557
3558 if (list_is_last(&va->list, &vmap_area_list))
3559 show_purge_info(m);
3560
3561 return 0;
3562}
3563
3564static const struct seq_operations vmalloc_op = {
3565 .start = s_start,
3566 .next = s_next,
3567 .stop = s_stop,
3568 .show = s_show,
3569};
3570
3571static int __init proc_vmalloc_init(void)
3572{
3573 if (IS_ENABLED(CONFIG_NUMA))
3574 proc_create_seq_private("vmallocinfo", 0400, NULL,
3575 &vmalloc_op,
3576 nr_node_ids * sizeof(unsigned int), NULL);
3577 else
3578 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3579 return 0;
3580}
3581module_init(proc_vmalloc_init);
3582
3583#endif
3584