1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57
58#include <linux/bitmap.h>
59#include <linux/bootmem.h>
60#include <linux/err.h>
61#include <linux/list.h>
62#include <linux/log2.h>
63#include <linux/mm.h>
64#include <linux/module.h>
65#include <linux/mutex.h>
66#include <linux/percpu.h>
67#include <linux/pfn.h>
68#include <linux/slab.h>
69#include <linux/spinlock.h>
70#include <linux/vmalloc.h>
71#include <linux/workqueue.h>
72#include <linux/kmemleak.h>
73
74#include <asm/cacheflush.h>
75#include <asm/sections.h>
76#include <asm/tlbflush.h>
77#include <asm/io.h>
78
79#define PCPU_SLOT_BASE_SHIFT 5
80#define PCPU_DFL_MAP_ALLOC 16
81#define PCPU_ATOMIC_MAP_MARGIN_LOW 32
82#define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
83#define PCPU_EMPTY_POP_PAGES_LOW 2
84#define PCPU_EMPTY_POP_PAGES_HIGH 4
85
86#ifdef CONFIG_SMP
87
88#ifndef __addr_to_pcpu_ptr
89#define __addr_to_pcpu_ptr(addr) \
90 (void __percpu *)((unsigned long)(addr) - \
91 (unsigned long)pcpu_base_addr + \
92 (unsigned long)__per_cpu_start)
93#endif
94#ifndef __pcpu_ptr_to_addr
95#define __pcpu_ptr_to_addr(ptr) \
96 (void __force *)((unsigned long)(ptr) + \
97 (unsigned long)pcpu_base_addr - \
98 (unsigned long)__per_cpu_start)
99#endif
100#else
101
102#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
103#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
104#endif
105
106struct pcpu_chunk {
107 struct list_head list;
108 int free_size;
109 int contig_hint;
110 void *base_addr;
111
112 int map_used;
113 int map_alloc;
114 int *map;
115 struct work_struct map_extend_work;
116
117 void *data;
118 int first_free;
119 bool immutable;
120 int nr_populated;
121 unsigned long populated[];
122};
123
124static int pcpu_unit_pages __read_mostly;
125static int pcpu_unit_size __read_mostly;
126static int pcpu_nr_units __read_mostly;
127static int pcpu_atom_size __read_mostly;
128static int pcpu_nr_slots __read_mostly;
129static size_t pcpu_chunk_struct_size __read_mostly;
130
131
132static unsigned int pcpu_low_unit_cpu __read_mostly;
133static unsigned int pcpu_high_unit_cpu __read_mostly;
134
135
136void *pcpu_base_addr __read_mostly;
137EXPORT_SYMBOL_GPL(pcpu_base_addr);
138
139static const int *pcpu_unit_map __read_mostly;
140const unsigned long *pcpu_unit_offsets __read_mostly;
141
142
143static int pcpu_nr_groups __read_mostly;
144static const unsigned long *pcpu_group_offsets __read_mostly;
145static const size_t *pcpu_group_sizes __read_mostly;
146
147
148
149
150
151
152static struct pcpu_chunk *pcpu_first_chunk;
153
154
155
156
157
158
159
160
161static struct pcpu_chunk *pcpu_reserved_chunk;
162static int pcpu_reserved_chunk_limit;
163
164static DEFINE_SPINLOCK(pcpu_lock);
165static DEFINE_MUTEX(pcpu_alloc_mutex);
166
167static struct list_head *pcpu_slot __read_mostly;
168
169
170
171
172
173static int pcpu_nr_empty_pop_pages;
174
175
176
177
178
179
180
181static void pcpu_balance_workfn(struct work_struct *work);
182static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
183static bool pcpu_async_enabled __read_mostly;
184static bool pcpu_atomic_alloc_failed;
185
186static void pcpu_schedule_balance_work(void)
187{
188 if (pcpu_async_enabled)
189 schedule_work(&pcpu_balance_work);
190}
191
192static bool pcpu_addr_in_first_chunk(void *addr)
193{
194 void *first_start = pcpu_first_chunk->base_addr;
195
196 return addr >= first_start && addr < first_start + pcpu_unit_size;
197}
198
199static bool pcpu_addr_in_reserved_chunk(void *addr)
200{
201 void *first_start = pcpu_first_chunk->base_addr;
202
203 return addr >= first_start &&
204 addr < first_start + pcpu_reserved_chunk_limit;
205}
206
207static int __pcpu_size_to_slot(int size)
208{
209 int highbit = fls(size);
210 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
211}
212
213static int pcpu_size_to_slot(int size)
214{
215 if (size == pcpu_unit_size)
216 return pcpu_nr_slots - 1;
217 return __pcpu_size_to_slot(size);
218}
219
220static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
221{
222 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
223 return 0;
224
225 return pcpu_size_to_slot(chunk->free_size);
226}
227
228
229static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
230{
231 page->index = (unsigned long)pcpu;
232}
233
234
235static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
236{
237 return (struct pcpu_chunk *)page->index;
238}
239
240static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
241{
242 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
243}
244
245static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
246 unsigned int cpu, int page_idx)
247{
248 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
249 (page_idx << PAGE_SHIFT);
250}
251
252static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
253 int *rs, int *re, int end)
254{
255 *rs = find_next_zero_bit(chunk->populated, end, *rs);
256 *re = find_next_bit(chunk->populated, end, *rs + 1);
257}
258
259static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
260 int *rs, int *re, int end)
261{
262 *rs = find_next_bit(chunk->populated, end, *rs);
263 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
264}
265
266
267
268
269
270
271
272#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
273 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
274 (rs) < (re); \
275 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
276
277#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
278 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
279 (rs) < (re); \
280 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static void *pcpu_mem_zalloc(size_t size)
297{
298 if (WARN_ON_ONCE(!slab_is_available()))
299 return NULL;
300
301 if (size <= PAGE_SIZE)
302 return kzalloc(size, GFP_KERNEL);
303 else
304 return vzalloc(size);
305}
306
307
308
309
310
311
312
313static void pcpu_mem_free(void *ptr)
314{
315 kvfree(ptr);
316}
317
318
319
320
321
322
323
324
325
326
327static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
328{
329 int off = chunk->map[i] & ~1;
330 int end = chunk->map[i + 1] & ~1;
331
332 if (!PAGE_ALIGNED(off) && i > 0) {
333 int prev = chunk->map[i - 1];
334
335 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
336 off = round_down(off, PAGE_SIZE);
337 }
338
339 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
340 int next = chunk->map[i + 1];
341 int nend = chunk->map[i + 2] & ~1;
342
343 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
344 end = round_up(end, PAGE_SIZE);
345 }
346
347 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
364{
365 int nslot = pcpu_chunk_slot(chunk);
366
367 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
368 if (oslot < nslot)
369 list_move(&chunk->list, &pcpu_slot[nslot]);
370 else
371 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
372 }
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
395{
396 int margin, new_alloc;
397
398 if (is_atomic) {
399 margin = 3;
400
401 if (chunk->map_alloc <
402 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
403 pcpu_async_enabled)
404 schedule_work(&chunk->map_extend_work);
405 } else {
406 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
407 }
408
409 if (chunk->map_alloc >= chunk->map_used + margin)
410 return 0;
411
412 new_alloc = PCPU_DFL_MAP_ALLOC;
413 while (new_alloc < chunk->map_used + margin)
414 new_alloc *= 2;
415
416 return new_alloc;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
433{
434 int *old = NULL, *new = NULL;
435 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
436 unsigned long flags;
437
438 new = pcpu_mem_zalloc(new_size);
439 if (!new)
440 return -ENOMEM;
441
442
443 spin_lock_irqsave(&pcpu_lock, flags);
444
445 if (new_alloc <= chunk->map_alloc)
446 goto out_unlock;
447
448 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
449 old = chunk->map;
450
451 memcpy(new, old, old_size);
452
453 chunk->map_alloc = new_alloc;
454 chunk->map = new;
455 new = NULL;
456
457out_unlock:
458 spin_unlock_irqrestore(&pcpu_lock, flags);
459
460
461
462
463
464 pcpu_mem_free(old);
465 pcpu_mem_free(new);
466
467 return 0;
468}
469
470static void pcpu_map_extend_workfn(struct work_struct *work)
471{
472 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
473 map_extend_work);
474 int new_alloc;
475
476 spin_lock_irq(&pcpu_lock);
477 new_alloc = pcpu_need_to_extend(chunk, false);
478 spin_unlock_irq(&pcpu_lock);
479
480 if (new_alloc)
481 pcpu_extend_area_map(chunk, new_alloc);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
503 int size, int align, bool pop_only)
504{
505 int cand_off = off;
506
507 while (true) {
508 int head = ALIGN(cand_off, align) - off;
509 int page_start, page_end, rs, re;
510
511 if (this_size < head + size)
512 return -1;
513
514 if (!pop_only)
515 return head;
516
517
518
519
520
521
522 page_start = PFN_DOWN(head + off);
523 page_end = PFN_UP(head + off + size);
524
525 rs = page_start;
526 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
527 if (rs >= page_end)
528 return head;
529 cand_off = re * PAGE_SIZE;
530 }
531}
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
555 bool pop_only, int *occ_pages_p)
556{
557 int oslot = pcpu_chunk_slot(chunk);
558 int max_contig = 0;
559 int i, off;
560 bool seen_free = false;
561 int *p;
562
563 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
564 int head, tail;
565 int this_size;
566
567 off = *p;
568 if (off & 1)
569 continue;
570
571 this_size = (p[1] & ~1) - off;
572
573 head = pcpu_fit_in_area(chunk, off, this_size, size, align,
574 pop_only);
575 if (head < 0) {
576 if (!seen_free) {
577 chunk->first_free = i;
578 seen_free = true;
579 }
580 max_contig = max(this_size, max_contig);
581 continue;
582 }
583
584
585
586
587
588
589
590 if (head && (head < sizeof(int) || !(p[-1] & 1))) {
591 *p = off += head;
592 if (p[-1] & 1)
593 chunk->free_size -= head;
594 else
595 max_contig = max(*p - p[-1], max_contig);
596 this_size -= head;
597 head = 0;
598 }
599
600
601 tail = this_size - head - size;
602 if (tail < sizeof(int)) {
603 tail = 0;
604 size = this_size - head;
605 }
606
607
608 if (head || tail) {
609 int nr_extra = !!head + !!tail;
610
611
612 memmove(p + nr_extra + 1, p + 1,
613 sizeof(chunk->map[0]) * (chunk->map_used - i));
614 chunk->map_used += nr_extra;
615
616 if (head) {
617 if (!seen_free) {
618 chunk->first_free = i;
619 seen_free = true;
620 }
621 *++p = off += head;
622 ++i;
623 max_contig = max(head, max_contig);
624 }
625 if (tail) {
626 p[1] = off + size;
627 max_contig = max(tail, max_contig);
628 }
629 }
630
631 if (!seen_free)
632 chunk->first_free = i + 1;
633
634
635 if (i + 1 == chunk->map_used)
636 chunk->contig_hint = max_contig;
637 else
638 chunk->contig_hint = max(chunk->contig_hint,
639 max_contig);
640
641 chunk->free_size -= size;
642 *p |= 1;
643
644 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
645 pcpu_chunk_relocate(chunk, oslot);
646 return off;
647 }
648
649 chunk->contig_hint = max_contig;
650 pcpu_chunk_relocate(chunk, oslot);
651
652
653 return -1;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
670 int *occ_pages_p)
671{
672 int oslot = pcpu_chunk_slot(chunk);
673 int off = 0;
674 unsigned i, j;
675 int to_free = 0;
676 int *p;
677
678 freeme |= 1;
679
680 i = 0;
681 j = chunk->map_used;
682 while (i != j) {
683 unsigned k = (i + j) / 2;
684 off = chunk->map[k];
685 if (off < freeme)
686 i = k + 1;
687 else if (off > freeme)
688 j = k;
689 else
690 i = j = k;
691 }
692 BUG_ON(off != freeme);
693
694 if (i < chunk->first_free)
695 chunk->first_free = i;
696
697 p = chunk->map + i;
698 *p = off &= ~1;
699 chunk->free_size += (p[1] & ~1) - off;
700
701 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
702
703
704 if (!(p[1] & 1))
705 to_free++;
706
707 if (i > 0 && !(p[-1] & 1)) {
708 to_free++;
709 i--;
710 p--;
711 }
712 if (to_free) {
713 chunk->map_used -= to_free;
714 memmove(p + 1, p + 1 + to_free,
715 (chunk->map_used - i) * sizeof(chunk->map[0]));
716 }
717
718 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
719 pcpu_chunk_relocate(chunk, oslot);
720}
721
722static struct pcpu_chunk *pcpu_alloc_chunk(void)
723{
724 struct pcpu_chunk *chunk;
725
726 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
727 if (!chunk)
728 return NULL;
729
730 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
731 sizeof(chunk->map[0]));
732 if (!chunk->map) {
733 pcpu_mem_free(chunk);
734 return NULL;
735 }
736
737 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
738 chunk->map[0] = 0;
739 chunk->map[1] = pcpu_unit_size | 1;
740 chunk->map_used = 1;
741
742 INIT_LIST_HEAD(&chunk->list);
743 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
744 chunk->free_size = pcpu_unit_size;
745 chunk->contig_hint = pcpu_unit_size;
746
747 return chunk;
748}
749
750static void pcpu_free_chunk(struct pcpu_chunk *chunk)
751{
752 if (!chunk)
753 return;
754 pcpu_mem_free(chunk->map);
755 pcpu_mem_free(chunk);
756}
757
758
759
760
761
762
763
764
765
766
767
768static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
769 int page_start, int page_end)
770{
771 int nr = page_end - page_start;
772
773 lockdep_assert_held(&pcpu_lock);
774
775 bitmap_set(chunk->populated, page_start, nr);
776 chunk->nr_populated += nr;
777 pcpu_nr_empty_pop_pages += nr;
778}
779
780
781
782
783
784
785
786
787
788
789
790static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
791 int page_start, int page_end)
792{
793 int nr = page_end - page_start;
794
795 lockdep_assert_held(&pcpu_lock);
796
797 bitmap_clear(chunk->populated, page_start, nr);
798 chunk->nr_populated -= nr;
799 pcpu_nr_empty_pop_pages -= nr;
800}
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
818static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
819static struct pcpu_chunk *pcpu_create_chunk(void);
820static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
821static struct page *pcpu_addr_to_page(void *addr);
822static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
823
824#ifdef CONFIG_NEED_PER_CPU_KM
825#include "percpu-km.c"
826#else
827#include "percpu-vm.c"
828#endif
829
830
831
832
833
834
835
836
837static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
838{
839
840 if (pcpu_addr_in_first_chunk(addr)) {
841
842 if (pcpu_addr_in_reserved_chunk(addr))
843 return pcpu_reserved_chunk;
844 return pcpu_first_chunk;
845 }
846
847
848
849
850
851
852
853
854 addr += pcpu_unit_offsets[raw_smp_processor_id()];
855 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
856}
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
872 gfp_t gfp)
873{
874 static int warn_limit = 10;
875 struct pcpu_chunk *chunk;
876 const char *err;
877 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
878 int occ_pages = 0;
879 int slot, off, new_alloc, cpu, ret;
880 unsigned long flags;
881 void __percpu *ptr;
882
883
884
885
886
887 if (unlikely(align < 2))
888 align = 2;
889
890 size = ALIGN(size, 2);
891
892 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
893 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
894 size, align);
895 return NULL;
896 }
897
898 spin_lock_irqsave(&pcpu_lock, flags);
899
900
901 if (reserved && pcpu_reserved_chunk) {
902 chunk = pcpu_reserved_chunk;
903
904 if (size > chunk->contig_hint) {
905 err = "alloc from reserved chunk failed";
906 goto fail_unlock;
907 }
908
909 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
910 spin_unlock_irqrestore(&pcpu_lock, flags);
911 if (is_atomic ||
912 pcpu_extend_area_map(chunk, new_alloc) < 0) {
913 err = "failed to extend area map of reserved chunk";
914 goto fail;
915 }
916 spin_lock_irqsave(&pcpu_lock, flags);
917 }
918
919 off = pcpu_alloc_area(chunk, size, align, is_atomic,
920 &occ_pages);
921 if (off >= 0)
922 goto area_found;
923
924 err = "alloc from reserved chunk failed";
925 goto fail_unlock;
926 }
927
928restart:
929
930 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
931 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
932 if (size > chunk->contig_hint)
933 continue;
934
935 new_alloc = pcpu_need_to_extend(chunk, is_atomic);
936 if (new_alloc) {
937 if (is_atomic)
938 continue;
939 spin_unlock_irqrestore(&pcpu_lock, flags);
940 if (pcpu_extend_area_map(chunk,
941 new_alloc) < 0) {
942 err = "failed to extend area map";
943 goto fail;
944 }
945 spin_lock_irqsave(&pcpu_lock, flags);
946
947
948
949
950 goto restart;
951 }
952
953 off = pcpu_alloc_area(chunk, size, align, is_atomic,
954 &occ_pages);
955 if (off >= 0)
956 goto area_found;
957 }
958 }
959
960 spin_unlock_irqrestore(&pcpu_lock, flags);
961
962
963
964
965
966
967 if (is_atomic)
968 goto fail;
969
970 mutex_lock(&pcpu_alloc_mutex);
971
972 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973 chunk = pcpu_create_chunk();
974 if (!chunk) {
975 mutex_unlock(&pcpu_alloc_mutex);
976 err = "failed to allocate new chunk";
977 goto fail;
978 }
979
980 spin_lock_irqsave(&pcpu_lock, flags);
981 pcpu_chunk_relocate(chunk, -1);
982 } else {
983 spin_lock_irqsave(&pcpu_lock, flags);
984 }
985
986 mutex_unlock(&pcpu_alloc_mutex);
987 goto restart;
988
989area_found:
990 spin_unlock_irqrestore(&pcpu_lock, flags);
991
992
993 if (!is_atomic) {
994 int page_start, page_end, rs, re;
995
996 mutex_lock(&pcpu_alloc_mutex);
997
998 page_start = PFN_DOWN(off);
999 page_end = PFN_UP(off + size);
1000
1001 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1002 WARN_ON(chunk->immutable);
1003
1004 ret = pcpu_populate_chunk(chunk, rs, re);
1005
1006 spin_lock_irqsave(&pcpu_lock, flags);
1007 if (ret) {
1008 mutex_unlock(&pcpu_alloc_mutex);
1009 pcpu_free_area(chunk, off, &occ_pages);
1010 err = "failed to populate";
1011 goto fail_unlock;
1012 }
1013 pcpu_chunk_populated(chunk, rs, re);
1014 spin_unlock_irqrestore(&pcpu_lock, flags);
1015 }
1016
1017 mutex_unlock(&pcpu_alloc_mutex);
1018 }
1019
1020 if (chunk != pcpu_reserved_chunk)
1021 pcpu_nr_empty_pop_pages -= occ_pages;
1022
1023 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1024 pcpu_schedule_balance_work();
1025
1026
1027 for_each_possible_cpu(cpu)
1028 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1029
1030 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1031 kmemleak_alloc_percpu(ptr, size, gfp);
1032 return ptr;
1033
1034fail_unlock:
1035 spin_unlock_irqrestore(&pcpu_lock, flags);
1036fail:
1037 if (!is_atomic && warn_limit) {
1038 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1039 size, align, is_atomic, err);
1040 dump_stack();
1041 if (!--warn_limit)
1042 pr_info("limit reached, disable warning\n");
1043 }
1044 if (is_atomic) {
1045
1046 pcpu_atomic_alloc_failed = true;
1047 pcpu_schedule_balance_work();
1048 }
1049 return NULL;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1066{
1067 return pcpu_alloc(size, align, false, gfp);
1068}
1069EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1070
1071
1072
1073
1074
1075
1076
1077
1078void __percpu *__alloc_percpu(size_t size, size_t align)
1079{
1080 return pcpu_alloc(size, align, false, GFP_KERNEL);
1081}
1082EXPORT_SYMBOL_GPL(__alloc_percpu);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1101{
1102 return pcpu_alloc(size, align, true, GFP_KERNEL);
1103}
1104
1105
1106
1107
1108
1109
1110
1111static void pcpu_balance_workfn(struct work_struct *work)
1112{
1113 LIST_HEAD(to_free);
1114 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1115 struct pcpu_chunk *chunk, *next;
1116 int slot, nr_to_pop, ret;
1117
1118
1119
1120
1121
1122 mutex_lock(&pcpu_alloc_mutex);
1123 spin_lock_irq(&pcpu_lock);
1124
1125 list_for_each_entry_safe(chunk, next, free_head, list) {
1126 WARN_ON(chunk->immutable);
1127
1128
1129 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130 continue;
1131
1132 list_move(&chunk->list, &to_free);
1133 }
1134
1135 spin_unlock_irq(&pcpu_lock);
1136
1137 list_for_each_entry_safe(chunk, next, &to_free, list) {
1138 int rs, re;
1139
1140 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1141 pcpu_depopulate_chunk(chunk, rs, re);
1142 spin_lock_irq(&pcpu_lock);
1143 pcpu_chunk_depopulated(chunk, rs, re);
1144 spin_unlock_irq(&pcpu_lock);
1145 }
1146 pcpu_destroy_chunk(chunk);
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159retry_pop:
1160 if (pcpu_atomic_alloc_failed) {
1161 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1162
1163 pcpu_atomic_alloc_failed = false;
1164 } else {
1165 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1166 pcpu_nr_empty_pop_pages,
1167 0, PCPU_EMPTY_POP_PAGES_HIGH);
1168 }
1169
1170 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1171 int nr_unpop = 0, rs, re;
1172
1173 if (!nr_to_pop)
1174 break;
1175
1176 spin_lock_irq(&pcpu_lock);
1177 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1178 nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1179 if (nr_unpop)
1180 break;
1181 }
1182 spin_unlock_irq(&pcpu_lock);
1183
1184 if (!nr_unpop)
1185 continue;
1186
1187
1188 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1189 int nr = min(re - rs, nr_to_pop);
1190
1191 ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1192 if (!ret) {
1193 nr_to_pop -= nr;
1194 spin_lock_irq(&pcpu_lock);
1195 pcpu_chunk_populated(chunk, rs, rs + nr);
1196 spin_unlock_irq(&pcpu_lock);
1197 } else {
1198 nr_to_pop = 0;
1199 }
1200
1201 if (!nr_to_pop)
1202 break;
1203 }
1204 }
1205
1206 if (nr_to_pop) {
1207
1208 chunk = pcpu_create_chunk();
1209 if (chunk) {
1210 spin_lock_irq(&pcpu_lock);
1211 pcpu_chunk_relocate(chunk, -1);
1212 spin_unlock_irq(&pcpu_lock);
1213 goto retry_pop;
1214 }
1215 }
1216
1217 mutex_unlock(&pcpu_alloc_mutex);
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229void free_percpu(void __percpu *ptr)
1230{
1231 void *addr;
1232 struct pcpu_chunk *chunk;
1233 unsigned long flags;
1234 int off, occ_pages;
1235
1236 if (!ptr)
1237 return;
1238
1239 kmemleak_free_percpu(ptr);
1240
1241 addr = __pcpu_ptr_to_addr(ptr);
1242
1243 spin_lock_irqsave(&pcpu_lock, flags);
1244
1245 chunk = pcpu_chunk_addr_search(addr);
1246 off = addr - chunk->base_addr;
1247
1248 pcpu_free_area(chunk, off, &occ_pages);
1249
1250 if (chunk != pcpu_reserved_chunk)
1251 pcpu_nr_empty_pop_pages += occ_pages;
1252
1253
1254 if (chunk->free_size == pcpu_unit_size) {
1255 struct pcpu_chunk *pos;
1256
1257 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1258 if (pos != chunk) {
1259 pcpu_schedule_balance_work();
1260 break;
1261 }
1262 }
1263
1264 spin_unlock_irqrestore(&pcpu_lock, flags);
1265}
1266EXPORT_SYMBOL_GPL(free_percpu);
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279bool is_kernel_percpu_address(unsigned long addr)
1280{
1281#ifdef CONFIG_SMP
1282 const size_t static_size = __per_cpu_end - __per_cpu_start;
1283 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1284 unsigned int cpu;
1285
1286 for_each_possible_cpu(cpu) {
1287 void *start = per_cpu_ptr(base, cpu);
1288
1289 if ((void *)addr >= start && (void *)addr < start + static_size)
1290 return true;
1291 }
1292#endif
1293
1294 return false;
1295}
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320phys_addr_t per_cpu_ptr_to_phys(void *addr)
1321{
1322 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1323 bool in_first_chunk = false;
1324 unsigned long first_low, first_high;
1325 unsigned int cpu;
1326
1327
1328
1329
1330
1331
1332 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1333 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1334 pcpu_unit_pages);
1335 if ((unsigned long)addr >= first_low &&
1336 (unsigned long)addr < first_high) {
1337 for_each_possible_cpu(cpu) {
1338 void *start = per_cpu_ptr(base, cpu);
1339
1340 if (addr >= start && addr < start + pcpu_unit_size) {
1341 in_first_chunk = true;
1342 break;
1343 }
1344 }
1345 }
1346
1347 if (in_first_chunk) {
1348 if (!is_vmalloc_addr(addr))
1349 return __pa(addr);
1350 else
1351 return page_to_phys(vmalloc_to_page(addr)) +
1352 offset_in_page(addr);
1353 } else
1354 return page_to_phys(pcpu_addr_to_page(addr)) +
1355 offset_in_page(addr);
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1374 int nr_units)
1375{
1376 struct pcpu_alloc_info *ai;
1377 size_t base_size, ai_size;
1378 void *ptr;
1379 int unit;
1380
1381 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1382 __alignof__(ai->groups[0].cpu_map[0]));
1383 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1384
1385 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1386 if (!ptr)
1387 return NULL;
1388 ai = ptr;
1389 ptr += base_size;
1390
1391 ai->groups[0].cpu_map = ptr;
1392
1393 for (unit = 0; unit < nr_units; unit++)
1394 ai->groups[0].cpu_map[unit] = NR_CPUS;
1395
1396 ai->nr_groups = nr_groups;
1397 ai->__ai_size = PFN_ALIGN(ai_size);
1398
1399 return ai;
1400}
1401
1402
1403
1404
1405
1406
1407
1408void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1409{
1410 memblock_free_early(__pa(ai), ai->__ai_size);
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420static void pcpu_dump_alloc_info(const char *lvl,
1421 const struct pcpu_alloc_info *ai)
1422{
1423 int group_width = 1, cpu_width = 1, width;
1424 char empty_str[] = "--------";
1425 int alloc = 0, alloc_end = 0;
1426 int group, v;
1427 int upa, apl;
1428
1429 v = ai->nr_groups;
1430 while (v /= 10)
1431 group_width++;
1432
1433 v = num_possible_cpus();
1434 while (v /= 10)
1435 cpu_width++;
1436 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1437
1438 upa = ai->alloc_size / ai->unit_size;
1439 width = upa * (cpu_width + 1) + group_width + 3;
1440 apl = rounddown_pow_of_two(max(60 / width, 1));
1441
1442 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1443 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1444 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1445
1446 for (group = 0; group < ai->nr_groups; group++) {
1447 const struct pcpu_group_info *gi = &ai->groups[group];
1448 int unit = 0, unit_end = 0;
1449
1450 BUG_ON(gi->nr_units % upa);
1451 for (alloc_end += gi->nr_units / upa;
1452 alloc < alloc_end; alloc++) {
1453 if (!(alloc % apl)) {
1454 pr_cont("\n");
1455 printk("%spcpu-alloc: ", lvl);
1456 }
1457 pr_cont("[%0*d] ", group_width, group);
1458
1459 for (unit_end += upa; unit < unit_end; unit++)
1460 if (gi->cpu_map[unit] != NR_CPUS)
1461 pr_cont("%0*d ",
1462 cpu_width, gi->cpu_map[unit]);
1463 else
1464 pr_cont("%s ", empty_str);
1465 }
1466 }
1467 pr_cont("\n");
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1527 void *base_addr)
1528{
1529 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1530 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1531 size_t dyn_size = ai->dyn_size;
1532 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1533 struct pcpu_chunk *schunk, *dchunk = NULL;
1534 unsigned long *group_offsets;
1535 size_t *group_sizes;
1536 unsigned long *unit_off;
1537 unsigned int cpu;
1538 int *unit_map;
1539 int group, unit, i;
1540
1541#define PCPU_SETUP_BUG_ON(cond) do { \
1542 if (unlikely(cond)) { \
1543 pr_emerg("failed to initialize, %s\n", #cond); \
1544 pr_emerg("cpu_possible_mask=%*pb\n", \
1545 cpumask_pr_args(cpu_possible_mask)); \
1546 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1547 BUG(); \
1548 } \
1549} while (0)
1550
1551
1552 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1553#ifdef CONFIG_SMP
1554 PCPU_SETUP_BUG_ON(!ai->static_size);
1555 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1556#endif
1557 PCPU_SETUP_BUG_ON(!base_addr);
1558 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1559 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1560 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1561 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1562 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1563 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1564
1565
1566 group_offsets = memblock_virt_alloc(ai->nr_groups *
1567 sizeof(group_offsets[0]), 0);
1568 group_sizes = memblock_virt_alloc(ai->nr_groups *
1569 sizeof(group_sizes[0]), 0);
1570 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1571 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1572
1573 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1574 unit_map[cpu] = UINT_MAX;
1575
1576 pcpu_low_unit_cpu = NR_CPUS;
1577 pcpu_high_unit_cpu = NR_CPUS;
1578
1579 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1580 const struct pcpu_group_info *gi = &ai->groups[group];
1581
1582 group_offsets[group] = gi->base_offset;
1583 group_sizes[group] = gi->nr_units * ai->unit_size;
1584
1585 for (i = 0; i < gi->nr_units; i++) {
1586 cpu = gi->cpu_map[i];
1587 if (cpu == NR_CPUS)
1588 continue;
1589
1590 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1591 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1592 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1593
1594 unit_map[cpu] = unit + i;
1595 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1596
1597
1598 if (pcpu_low_unit_cpu == NR_CPUS ||
1599 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1600 pcpu_low_unit_cpu = cpu;
1601 if (pcpu_high_unit_cpu == NR_CPUS ||
1602 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1603 pcpu_high_unit_cpu = cpu;
1604 }
1605 }
1606 pcpu_nr_units = unit;
1607
1608 for_each_possible_cpu(cpu)
1609 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1610
1611
1612#undef PCPU_SETUP_BUG_ON
1613 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1614
1615 pcpu_nr_groups = ai->nr_groups;
1616 pcpu_group_offsets = group_offsets;
1617 pcpu_group_sizes = group_sizes;
1618 pcpu_unit_map = unit_map;
1619 pcpu_unit_offsets = unit_off;
1620
1621
1622 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1623 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1624 pcpu_atom_size = ai->atom_size;
1625 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1626 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1627
1628
1629
1630
1631
1632 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1633 pcpu_slot = memblock_virt_alloc(
1634 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1635 for (i = 0; i < pcpu_nr_slots; i++)
1636 INIT_LIST_HEAD(&pcpu_slot[i]);
1637
1638
1639
1640
1641
1642
1643
1644
1645 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1646 INIT_LIST_HEAD(&schunk->list);
1647 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
1648 schunk->base_addr = base_addr;
1649 schunk->map = smap;
1650 schunk->map_alloc = ARRAY_SIZE(smap);
1651 schunk->immutable = true;
1652 bitmap_fill(schunk->populated, pcpu_unit_pages);
1653 schunk->nr_populated = pcpu_unit_pages;
1654
1655 if (ai->reserved_size) {
1656 schunk->free_size = ai->reserved_size;
1657 pcpu_reserved_chunk = schunk;
1658 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1659 } else {
1660 schunk->free_size = dyn_size;
1661 dyn_size = 0;
1662 }
1663 schunk->contig_hint = schunk->free_size;
1664
1665 schunk->map[0] = 1;
1666 schunk->map[1] = ai->static_size;
1667 schunk->map_used = 1;
1668 if (schunk->free_size)
1669 schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1670 schunk->map[schunk->map_used] |= 1;
1671
1672
1673 if (dyn_size) {
1674 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1675 INIT_LIST_HEAD(&dchunk->list);
1676 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
1677 dchunk->base_addr = base_addr;
1678 dchunk->map = dmap;
1679 dchunk->map_alloc = ARRAY_SIZE(dmap);
1680 dchunk->immutable = true;
1681 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1682 dchunk->nr_populated = pcpu_unit_pages;
1683
1684 dchunk->contig_hint = dchunk->free_size = dyn_size;
1685 dchunk->map[0] = 1;
1686 dchunk->map[1] = pcpu_reserved_chunk_limit;
1687 dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1688 dchunk->map_used = 2;
1689 }
1690
1691
1692 pcpu_first_chunk = dchunk ?: schunk;
1693 pcpu_nr_empty_pop_pages +=
1694 pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1695 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1696
1697
1698 pcpu_base_addr = base_addr;
1699 return 0;
1700}
1701
1702#ifdef CONFIG_SMP
1703
1704const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1705 [PCPU_FC_AUTO] = "auto",
1706 [PCPU_FC_EMBED] = "embed",
1707 [PCPU_FC_PAGE] = "page",
1708};
1709
1710enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1711
1712static int __init percpu_alloc_setup(char *str)
1713{
1714 if (!str)
1715 return -EINVAL;
1716
1717 if (0)
1718 ;
1719#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1720 else if (!strcmp(str, "embed"))
1721 pcpu_chosen_fc = PCPU_FC_EMBED;
1722#endif
1723#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1724 else if (!strcmp(str, "page"))
1725 pcpu_chosen_fc = PCPU_FC_PAGE;
1726#endif
1727 else
1728 pr_warn("unknown allocator %s specified\n", str);
1729
1730 return 0;
1731}
1732early_param("percpu_alloc", percpu_alloc_setup);
1733
1734
1735
1736
1737
1738
1739#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1740 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1741#define BUILD_EMBED_FIRST_CHUNK
1742#endif
1743
1744
1745#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1746#define BUILD_PAGE_FIRST_CHUNK
1747#endif
1748
1749
1750#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1773 size_t reserved_size, size_t dyn_size,
1774 size_t atom_size,
1775 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1776{
1777 static int group_map[NR_CPUS] __initdata;
1778 static int group_cnt[NR_CPUS] __initdata;
1779 const size_t static_size = __per_cpu_end - __per_cpu_start;
1780 int nr_groups = 1, nr_units = 0;
1781 size_t size_sum, min_unit_size, alloc_size;
1782 int upa, max_upa, uninitialized_var(best_upa);
1783 int last_allocs, group, unit;
1784 unsigned int cpu, tcpu;
1785 struct pcpu_alloc_info *ai;
1786 unsigned int *cpu_map;
1787
1788
1789 memset(group_map, 0, sizeof(group_map));
1790 memset(group_cnt, 0, sizeof(group_cnt));
1791
1792
1793 size_sum = PFN_ALIGN(static_size + reserved_size +
1794 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1795 dyn_size = size_sum - static_size - reserved_size;
1796
1797
1798
1799
1800
1801
1802
1803 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1804
1805 alloc_size = roundup(min_unit_size, atom_size);
1806 upa = alloc_size / min_unit_size;
1807 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1808 upa--;
1809 max_upa = upa;
1810
1811
1812 for_each_possible_cpu(cpu) {
1813 group = 0;
1814 next_group:
1815 for_each_possible_cpu(tcpu) {
1816 if (cpu == tcpu)
1817 break;
1818 if (group_map[tcpu] == group && cpu_distance_fn &&
1819 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1820 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1821 group++;
1822 nr_groups = max(nr_groups, group + 1);
1823 goto next_group;
1824 }
1825 }
1826 group_map[cpu] = group;
1827 group_cnt[group]++;
1828 }
1829
1830
1831
1832
1833
1834
1835 last_allocs = INT_MAX;
1836 for (upa = max_upa; upa; upa--) {
1837 int allocs = 0, wasted = 0;
1838
1839 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1840 continue;
1841
1842 for (group = 0; group < nr_groups; group++) {
1843 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1844 allocs += this_allocs;
1845 wasted += this_allocs * upa - group_cnt[group];
1846 }
1847
1848
1849
1850
1851
1852
1853 if (wasted > num_possible_cpus() / 3)
1854 continue;
1855
1856
1857 if (allocs > last_allocs)
1858 break;
1859 last_allocs = allocs;
1860 best_upa = upa;
1861 }
1862 upa = best_upa;
1863
1864
1865 for (group = 0; group < nr_groups; group++)
1866 nr_units += roundup(group_cnt[group], upa);
1867
1868 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1869 if (!ai)
1870 return ERR_PTR(-ENOMEM);
1871 cpu_map = ai->groups[0].cpu_map;
1872
1873 for (group = 0; group < nr_groups; group++) {
1874 ai->groups[group].cpu_map = cpu_map;
1875 cpu_map += roundup(group_cnt[group], upa);
1876 }
1877
1878 ai->static_size = static_size;
1879 ai->reserved_size = reserved_size;
1880 ai->dyn_size = dyn_size;
1881 ai->unit_size = alloc_size / upa;
1882 ai->atom_size = atom_size;
1883 ai->alloc_size = alloc_size;
1884
1885 for (group = 0, unit = 0; group_cnt[group]; group++) {
1886 struct pcpu_group_info *gi = &ai->groups[group];
1887
1888
1889
1890
1891
1892
1893 gi->base_offset = unit * ai->unit_size;
1894
1895 for_each_possible_cpu(cpu)
1896 if (group_map[cpu] == group)
1897 gi->cpu_map[gi->nr_units++] = cpu;
1898 gi->nr_units = roundup(gi->nr_units, upa);
1899 unit += gi->nr_units;
1900 }
1901 BUG_ON(unit != nr_units);
1902
1903 return ai;
1904}
1905#endif
1906
1907#if defined(BUILD_EMBED_FIRST_CHUNK)
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1941 size_t atom_size,
1942 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1943 pcpu_fc_alloc_fn_t alloc_fn,
1944 pcpu_fc_free_fn_t free_fn)
1945{
1946 void *base = (void *)ULONG_MAX;
1947 void **areas = NULL;
1948 struct pcpu_alloc_info *ai;
1949 size_t size_sum, areas_size, max_distance;
1950 int group, i, rc;
1951
1952 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1953 cpu_distance_fn);
1954 if (IS_ERR(ai))
1955 return PTR_ERR(ai);
1956
1957 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1958 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1959
1960 areas = memblock_virt_alloc_nopanic(areas_size, 0);
1961 if (!areas) {
1962 rc = -ENOMEM;
1963 goto out_free;
1964 }
1965
1966
1967 for (group = 0; group < ai->nr_groups; group++) {
1968 struct pcpu_group_info *gi = &ai->groups[group];
1969 unsigned int cpu = NR_CPUS;
1970 void *ptr;
1971
1972 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1973 cpu = gi->cpu_map[i];
1974 BUG_ON(cpu == NR_CPUS);
1975
1976
1977 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1978 if (!ptr) {
1979 rc = -ENOMEM;
1980 goto out_free_areas;
1981 }
1982
1983 kmemleak_free(ptr);
1984 areas[group] = ptr;
1985
1986 base = min(ptr, base);
1987 }
1988
1989
1990
1991
1992
1993
1994 for (group = 0; group < ai->nr_groups; group++) {
1995 struct pcpu_group_info *gi = &ai->groups[group];
1996 void *ptr = areas[group];
1997
1998 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1999 if (gi->cpu_map[i] == NR_CPUS) {
2000
2001 free_fn(ptr, ai->unit_size);
2002 continue;
2003 }
2004
2005 memcpy(ptr, __per_cpu_load, ai->static_size);
2006 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2007 }
2008 }
2009
2010
2011 max_distance = 0;
2012 for (group = 0; group < ai->nr_groups; group++) {
2013 ai->groups[group].base_offset = areas[group] - base;
2014 max_distance = max_t(size_t, max_distance,
2015 ai->groups[group].base_offset);
2016 }
2017 max_distance += ai->unit_size;
2018
2019
2020 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2021 pr_warn("max_distance=0x%zx too large for vmalloc space 0x%lx\n",
2022 max_distance, VMALLOC_TOTAL);
2023#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2024
2025 rc = -EINVAL;
2026 goto out_free;
2027#endif
2028 }
2029
2030 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2031 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2032 ai->dyn_size, ai->unit_size);
2033
2034 rc = pcpu_setup_first_chunk(ai, base);
2035 goto out_free;
2036
2037out_free_areas:
2038 for (group = 0; group < ai->nr_groups; group++)
2039 if (areas[group])
2040 free_fn(areas[group],
2041 ai->groups[group].nr_units * ai->unit_size);
2042out_free:
2043 pcpu_free_alloc_info(ai);
2044 if (areas)
2045 memblock_free_early(__pa(areas), areas_size);
2046 return rc;
2047}
2048#endif
2049
2050#ifdef BUILD_PAGE_FIRST_CHUNK
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067int __init pcpu_page_first_chunk(size_t reserved_size,
2068 pcpu_fc_alloc_fn_t alloc_fn,
2069 pcpu_fc_free_fn_t free_fn,
2070 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2071{
2072 static struct vm_struct vm;
2073 struct pcpu_alloc_info *ai;
2074 char psize_str[16];
2075 int unit_pages;
2076 size_t pages_size;
2077 struct page **pages;
2078 int unit, i, j, rc;
2079
2080 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2081
2082 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2083 if (IS_ERR(ai))
2084 return PTR_ERR(ai);
2085 BUG_ON(ai->nr_groups != 1);
2086 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2087
2088 unit_pages = ai->unit_size >> PAGE_SHIFT;
2089
2090
2091 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2092 sizeof(pages[0]));
2093 pages = memblock_virt_alloc(pages_size, 0);
2094
2095
2096 j = 0;
2097 for (unit = 0; unit < num_possible_cpus(); unit++)
2098 for (i = 0; i < unit_pages; i++) {
2099 unsigned int cpu = ai->groups[0].cpu_map[unit];
2100 void *ptr;
2101
2102 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2103 if (!ptr) {
2104 pr_warn("failed to allocate %s page for cpu%u\n",
2105 psize_str, cpu);
2106 goto enomem;
2107 }
2108
2109 kmemleak_free(ptr);
2110 pages[j++] = virt_to_page(ptr);
2111 }
2112
2113
2114 vm.flags = VM_ALLOC;
2115 vm.size = num_possible_cpus() * ai->unit_size;
2116 vm_area_register_early(&vm, PAGE_SIZE);
2117
2118 for (unit = 0; unit < num_possible_cpus(); unit++) {
2119 unsigned long unit_addr =
2120 (unsigned long)vm.addr + unit * ai->unit_size;
2121
2122 for (i = 0; i < unit_pages; i++)
2123 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2124
2125
2126 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2127 unit_pages);
2128 if (rc < 0)
2129 panic("failed to map percpu area, err=%d\n", rc);
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2141 }
2142
2143
2144 pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2145 unit_pages, psize_str, vm.addr, ai->static_size,
2146 ai->reserved_size, ai->dyn_size);
2147
2148 rc = pcpu_setup_first_chunk(ai, vm.addr);
2149 goto out_free_ar;
2150
2151enomem:
2152 while (--j >= 0)
2153 free_fn(page_address(pages[j]), PAGE_SIZE);
2154 rc = -ENOMEM;
2155out_free_ar:
2156 memblock_free_early(__pa(pages), pages_size);
2157 pcpu_free_alloc_info(ai);
2158 return rc;
2159}
2160#endif
2161
2162#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2176EXPORT_SYMBOL(__per_cpu_offset);
2177
2178static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2179 size_t align)
2180{
2181 return memblock_virt_alloc_from_nopanic(
2182 size, align, __pa(MAX_DMA_ADDRESS));
2183}
2184
2185static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2186{
2187 memblock_free_early(__pa(ptr), size);
2188}
2189
2190void __init setup_per_cpu_areas(void)
2191{
2192 unsigned long delta;
2193 unsigned int cpu;
2194 int rc;
2195
2196
2197
2198
2199
2200 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2201 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2202 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2203 if (rc < 0)
2204 panic("Failed to initialize percpu areas.");
2205
2206 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2207 for_each_possible_cpu(cpu)
2208 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2209}
2210#endif
2211
2212#else
2213
2214
2215
2216
2217
2218
2219
2220
2221void __init setup_per_cpu_areas(void)
2222{
2223 const size_t unit_size =
2224 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2225 PERCPU_DYNAMIC_RESERVE));
2226 struct pcpu_alloc_info *ai;
2227 void *fc;
2228
2229 ai = pcpu_alloc_alloc_info(1, 1);
2230 fc = memblock_virt_alloc_from_nopanic(unit_size,
2231 PAGE_SIZE,
2232 __pa(MAX_DMA_ADDRESS));
2233 if (!ai || !fc)
2234 panic("Failed to allocate memory for percpu areas.");
2235
2236 kmemleak_free(fc);
2237
2238 ai->dyn_size = unit_size;
2239 ai->unit_size = unit_size;
2240 ai->atom_size = unit_size;
2241 ai->alloc_size = unit_size;
2242 ai->groups[0].nr_units = 1;
2243 ai->groups[0].cpu_map[0] = 0;
2244
2245 if (pcpu_setup_first_chunk(ai, fc) < 0)
2246 panic("Failed to initialize percpu areas.");
2247}
2248
2249#endif
2250
2251
2252
2253
2254
2255
2256
2257void __init percpu_init_late(void)
2258{
2259 struct pcpu_chunk *target_chunks[] =
2260 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2261 struct pcpu_chunk *chunk;
2262 unsigned long flags;
2263 int i;
2264
2265 for (i = 0; (chunk = target_chunks[i]); i++) {
2266 int *map;
2267 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2268
2269 BUILD_BUG_ON(size > PAGE_SIZE);
2270
2271 map = pcpu_mem_zalloc(size);
2272 BUG_ON(!map);
2273
2274 spin_lock_irqsave(&pcpu_lock, flags);
2275 memcpy(map, chunk->map, size);
2276 chunk->map = map;
2277 spin_unlock_irqrestore(&pcpu_lock, flags);
2278 }
2279}
2280
2281
2282
2283
2284
2285
2286static int __init percpu_enable_async(void)
2287{
2288 pcpu_async_enabled = true;
2289 return 0;
2290}
2291subsys_initcall(percpu_enable_async);
2292