1
2
3
4
5
6#include <linux/slab.h>
7
8#include <linux/mm.h>
9#include <linux/poison.h>
10#include <linux/interrupt.h>
11#include <linux/memory.h>
12#include <linux/compiler.h>
13#include <linux/module.h>
14#include <linux/cpu.h>
15#include <linux/uaccess.h>
16#include <linux/seq_file.h>
17#include <linux/proc_fs.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/page.h>
21#include <linux/memcontrol.h>
22
23#define CREATE_TRACE_POINTS
24#include <trace/events/kmem.h>
25
26#include "slab.h"
27
28enum slab_state slab_state;
29LIST_HEAD(slab_caches);
30DEFINE_MUTEX(slab_mutex);
31struct kmem_cache *kmem_cache;
32
33
34
35
36#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB | SLAB_KASAN)
39
40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
41 SLAB_NOTRACK | SLAB_ACCOUNT)
42
43
44
45
46
47static int slab_nomerge;
48
49static int __init setup_slab_nomerge(char *str)
50{
51 slab_nomerge = 1;
52 return 1;
53}
54
55#ifdef CONFIG_SLUB
56__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
57#endif
58
59__setup("slab_nomerge", setup_slab_nomerge);
60
61
62
63
64unsigned int kmem_cache_size(struct kmem_cache *s)
65{
66 return s->object_size;
67}
68EXPORT_SYMBOL(kmem_cache_size);
69
70#ifdef CONFIG_DEBUG_VM
71static int kmem_cache_sanity_check(const char *name, size_t size)
72{
73 struct kmem_cache *s = NULL;
74
75 if (!name || in_interrupt() || size < sizeof(void *) ||
76 size > KMALLOC_MAX_SIZE) {
77 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
78 return -EINVAL;
79 }
80
81 list_for_each_entry(s, &slab_caches, list) {
82 char tmp;
83 int res;
84
85
86
87
88
89
90 res = probe_kernel_address(s->name, tmp);
91 if (res) {
92 pr_err("Slab cache with size %d has lost its name\n",
93 s->object_size);
94 continue;
95 }
96 }
97
98 WARN_ON(strchr(name, ' '));
99 return 0;
100}
101#else
102static inline int kmem_cache_sanity_check(const char *name, size_t size)
103{
104 return 0;
105}
106#endif
107
108void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
109{
110 size_t i;
111
112 for (i = 0; i < nr; i++) {
113 if (s)
114 kmem_cache_free(s, p[i]);
115 else
116 kfree(p[i]);
117 }
118}
119
120int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void **p)
122{
123 size_t i;
124
125 for (i = 0; i < nr; i++) {
126 void *x = p[i] = kmem_cache_alloc(s, flags);
127 if (!x) {
128 __kmem_cache_free_bulk(s, i, p);
129 return 0;
130 }
131 }
132 return i;
133}
134
135#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
136void slab_init_memcg_params(struct kmem_cache *s)
137{
138 s->memcg_params.is_root_cache = true;
139 INIT_LIST_HEAD(&s->memcg_params.list);
140 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
141}
142
143static int init_memcg_params(struct kmem_cache *s,
144 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
145{
146 struct memcg_cache_array *arr;
147
148 if (memcg) {
149 s->memcg_params.is_root_cache = false;
150 s->memcg_params.memcg = memcg;
151 s->memcg_params.root_cache = root_cache;
152 return 0;
153 }
154
155 slab_init_memcg_params(s);
156
157 if (!memcg_nr_cache_ids)
158 return 0;
159
160 arr = kzalloc(sizeof(struct memcg_cache_array) +
161 memcg_nr_cache_ids * sizeof(void *),
162 GFP_KERNEL);
163 if (!arr)
164 return -ENOMEM;
165
166 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
167 return 0;
168}
169
170static void destroy_memcg_params(struct kmem_cache *s)
171{
172 if (is_root_cache(s))
173 kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
174}
175
176static int update_memcg_params(struct kmem_cache *s, int new_array_size)
177{
178 struct memcg_cache_array *old, *new;
179
180 if (!is_root_cache(s))
181 return 0;
182
183 new = kzalloc(sizeof(struct memcg_cache_array) +
184 new_array_size * sizeof(void *), GFP_KERNEL);
185 if (!new)
186 return -ENOMEM;
187
188 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
189 lockdep_is_held(&slab_mutex));
190 if (old)
191 memcpy(new->entries, old->entries,
192 memcg_nr_cache_ids * sizeof(void *));
193
194 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
195 if (old)
196 kfree_rcu(old, rcu);
197 return 0;
198}
199
200int memcg_update_all_caches(int num_memcgs)
201{
202 struct kmem_cache *s;
203 int ret = 0;
204
205 mutex_lock(&slab_mutex);
206 list_for_each_entry(s, &slab_caches, list) {
207 ret = update_memcg_params(s, num_memcgs);
208
209
210
211
212 if (ret)
213 break;
214 }
215 mutex_unlock(&slab_mutex);
216 return ret;
217}
218#else
219static inline int init_memcg_params(struct kmem_cache *s,
220 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
221{
222 return 0;
223}
224
225static inline void destroy_memcg_params(struct kmem_cache *s)
226{
227}
228#endif
229
230
231
232
233int slab_unmergeable(struct kmem_cache *s)
234{
235 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
236 return 1;
237
238 if (!is_root_cache(s))
239 return 1;
240
241 if (s->ctor)
242 return 1;
243
244
245
246
247 if (s->refcount < 0)
248 return 1;
249
250 return 0;
251}
252
253struct kmem_cache *find_mergeable(size_t size, size_t align,
254 unsigned long flags, const char *name, void (*ctor)(void *))
255{
256 struct kmem_cache *s;
257
258 if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
259 return NULL;
260
261 if (ctor)
262 return NULL;
263
264 size = ALIGN(size, sizeof(void *));
265 align = calculate_alignment(flags, align, size);
266 size = ALIGN(size, align);
267 flags = kmem_cache_flags(size, flags, name, NULL);
268
269 list_for_each_entry_reverse(s, &slab_caches, list) {
270 if (slab_unmergeable(s))
271 continue;
272
273 if (size > s->size)
274 continue;
275
276 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
277 continue;
278
279
280
281
282 if ((s->size & ~(align - 1)) != s->size)
283 continue;
284
285 if (s->size - size >= sizeof(void *))
286 continue;
287
288 if (IS_ENABLED(CONFIG_SLAB) && align &&
289 (align > s->align || s->align % align))
290 continue;
291
292 return s;
293 }
294 return NULL;
295}
296
297
298
299
300
301unsigned long calculate_alignment(unsigned long flags,
302 unsigned long align, unsigned long size)
303{
304
305
306
307
308
309
310
311 if (flags & SLAB_HWCACHE_ALIGN) {
312 unsigned long ralign = cache_line_size();
313 while (size <= ralign / 2)
314 ralign /= 2;
315 align = max(align, ralign);
316 }
317
318 if (align < ARCH_SLAB_MINALIGN)
319 align = ARCH_SLAB_MINALIGN;
320
321 return ALIGN(align, sizeof(void *));
322}
323
324static struct kmem_cache *create_cache(const char *name,
325 size_t object_size, size_t size, size_t align,
326 unsigned long flags, void (*ctor)(void *),
327 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
328{
329 struct kmem_cache *s;
330 int err;
331
332 err = -ENOMEM;
333 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
334 if (!s)
335 goto out;
336
337 s->name = name;
338 s->object_size = object_size;
339 s->size = size;
340 s->align = align;
341 s->ctor = ctor;
342
343 err = init_memcg_params(s, memcg, root_cache);
344 if (err)
345 goto out_free_cache;
346
347 err = __kmem_cache_create(s, flags);
348 if (err)
349 goto out_free_cache;
350
351 s->refcount = 1;
352 list_add(&s->list, &slab_caches);
353out:
354 if (err)
355 return ERR_PTR(err);
356 return s;
357
358out_free_cache:
359 destroy_memcg_params(s);
360 kmem_cache_free(kmem_cache, s);
361 goto out;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388struct kmem_cache *
389kmem_cache_create(const char *name, size_t size, size_t align,
390 unsigned long flags, void (*ctor)(void *))
391{
392 struct kmem_cache *s = NULL;
393 const char *cache_name;
394 int err;
395
396 get_online_cpus();
397 get_online_mems();
398 memcg_get_cache_ids();
399
400 mutex_lock(&slab_mutex);
401
402 err = kmem_cache_sanity_check(name, size);
403 if (err) {
404 goto out_unlock;
405 }
406
407
408
409
410
411
412
413 flags &= CACHE_CREATE_MASK;
414
415 s = __kmem_cache_alias(name, size, align, flags, ctor);
416 if (s)
417 goto out_unlock;
418
419 cache_name = kstrdup_const(name, GFP_KERNEL);
420 if (!cache_name) {
421 err = -ENOMEM;
422 goto out_unlock;
423 }
424
425 s = create_cache(cache_name, size, size,
426 calculate_alignment(flags, align, size),
427 flags, ctor, NULL, NULL);
428 if (IS_ERR(s)) {
429 err = PTR_ERR(s);
430 kfree_const(cache_name);
431 }
432
433out_unlock:
434 mutex_unlock(&slab_mutex);
435
436 memcg_put_cache_ids();
437 put_online_mems();
438 put_online_cpus();
439
440 if (err) {
441 if (flags & SLAB_PANIC)
442 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
443 name, err);
444 else {
445 pr_warn("kmem_cache_create(%s) failed with error %d\n",
446 name, err);
447 dump_stack();
448 }
449 return NULL;
450 }
451 return s;
452}
453EXPORT_SYMBOL(kmem_cache_create);
454
455static int shutdown_cache(struct kmem_cache *s,
456 struct list_head *release, bool *need_rcu_barrier)
457{
458 if (__kmem_cache_shutdown(s) != 0)
459 return -EBUSY;
460
461 if (s->flags & SLAB_DESTROY_BY_RCU)
462 *need_rcu_barrier = true;
463
464 list_move(&s->list, release);
465 return 0;
466}
467
468static void release_caches(struct list_head *release, bool need_rcu_barrier)
469{
470 struct kmem_cache *s, *s2;
471
472 if (need_rcu_barrier)
473 rcu_barrier();
474
475 list_for_each_entry_safe(s, s2, release, list) {
476#ifdef SLAB_SUPPORTS_SYSFS
477 sysfs_slab_remove(s);
478#else
479 slab_kmem_cache_release(s);
480#endif
481 }
482}
483
484#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
485
486
487
488
489
490
491
492
493
494void memcg_create_kmem_cache(struct mem_cgroup *memcg,
495 struct kmem_cache *root_cache)
496{
497 static char memcg_name_buf[NAME_MAX + 1];
498 struct cgroup_subsys_state *css = &memcg->css;
499 struct memcg_cache_array *arr;
500 struct kmem_cache *s = NULL;
501 char *cache_name;
502 int idx;
503
504 get_online_cpus();
505 get_online_mems();
506
507 mutex_lock(&slab_mutex);
508
509
510
511
512
513 if (memcg->kmem_state != KMEM_ONLINE)
514 goto out_unlock;
515
516 idx = memcg_cache_id(memcg);
517 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
518 lockdep_is_held(&slab_mutex));
519
520
521
522
523
524
525 if (arr->entries[idx])
526 goto out_unlock;
527
528 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
529 cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
530 css->id, memcg_name_buf);
531 if (!cache_name)
532 goto out_unlock;
533
534 s = create_cache(cache_name, root_cache->object_size,
535 root_cache->size, root_cache->align,
536 root_cache->flags, root_cache->ctor,
537 memcg, root_cache);
538
539
540
541
542
543 if (IS_ERR(s)) {
544 kfree(cache_name);
545 goto out_unlock;
546 }
547
548 list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
549
550
551
552
553
554
555 smp_wmb();
556 arr->entries[idx] = s;
557
558out_unlock:
559 mutex_unlock(&slab_mutex);
560
561 put_online_mems();
562 put_online_cpus();
563}
564
565void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
566{
567 int idx;
568 struct memcg_cache_array *arr;
569 struct kmem_cache *s, *c;
570
571 idx = memcg_cache_id(memcg);
572
573 get_online_cpus();
574 get_online_mems();
575
576 mutex_lock(&slab_mutex);
577 list_for_each_entry(s, &slab_caches, list) {
578 if (!is_root_cache(s))
579 continue;
580
581 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
582 lockdep_is_held(&slab_mutex));
583 c = arr->entries[idx];
584 if (!c)
585 continue;
586
587 __kmem_cache_shrink(c, true);
588 arr->entries[idx] = NULL;
589 }
590 mutex_unlock(&slab_mutex);
591
592 put_online_mems();
593 put_online_cpus();
594}
595
596static int __shutdown_memcg_cache(struct kmem_cache *s,
597 struct list_head *release, bool *need_rcu_barrier)
598{
599 BUG_ON(is_root_cache(s));
600
601 if (shutdown_cache(s, release, need_rcu_barrier))
602 return -EBUSY;
603
604 list_del(&s->memcg_params.list);
605 return 0;
606}
607
608void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
609{
610 LIST_HEAD(release);
611 bool need_rcu_barrier = false;
612 struct kmem_cache *s, *s2;
613
614 get_online_cpus();
615 get_online_mems();
616
617 mutex_lock(&slab_mutex);
618 list_for_each_entry_safe(s, s2, &slab_caches, list) {
619 if (is_root_cache(s) || s->memcg_params.memcg != memcg)
620 continue;
621
622
623
624
625 BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
626 }
627 mutex_unlock(&slab_mutex);
628
629 put_online_mems();
630 put_online_cpus();
631
632 release_caches(&release, need_rcu_barrier);
633}
634
635static int shutdown_memcg_caches(struct kmem_cache *s,
636 struct list_head *release, bool *need_rcu_barrier)
637{
638 struct memcg_cache_array *arr;
639 struct kmem_cache *c, *c2;
640 LIST_HEAD(busy);
641 int i;
642
643 BUG_ON(!is_root_cache(s));
644
645
646
647
648
649 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
650 lockdep_is_held(&slab_mutex));
651 for_each_memcg_cache_index(i) {
652 c = arr->entries[i];
653 if (!c)
654 continue;
655 if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
656
657
658
659
660
661 list_move(&c->memcg_params.list, &busy);
662 else
663
664
665
666
667
668
669 arr->entries[i] = NULL;
670 }
671
672
673
674
675
676 list_for_each_entry_safe(c, c2, &s->memcg_params.list,
677 memcg_params.list)
678 __shutdown_memcg_cache(c, release, need_rcu_barrier);
679
680 list_splice(&busy, &s->memcg_params.list);
681
682
683
684
685
686 if (!list_empty(&s->memcg_params.list))
687 return -EBUSY;
688 return 0;
689}
690#else
691static inline int shutdown_memcg_caches(struct kmem_cache *s,
692 struct list_head *release, bool *need_rcu_barrier)
693{
694 return 0;
695}
696#endif
697
698void slab_kmem_cache_release(struct kmem_cache *s)
699{
700 __kmem_cache_release(s);
701 destroy_memcg_params(s);
702 kfree_const(s->name);
703 kmem_cache_free(kmem_cache, s);
704}
705
706void kmem_cache_destroy(struct kmem_cache *s)
707{
708 LIST_HEAD(release);
709 bool need_rcu_barrier = false;
710 int err;
711
712 if (unlikely(!s))
713 return;
714
715 get_online_cpus();
716 get_online_mems();
717
718 mutex_lock(&slab_mutex);
719
720 s->refcount--;
721 if (s->refcount)
722 goto out_unlock;
723
724 err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
725 if (!err)
726 err = shutdown_cache(s, &release, &need_rcu_barrier);
727
728 if (err) {
729 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
730 s->name);
731 dump_stack();
732 }
733out_unlock:
734 mutex_unlock(&slab_mutex);
735
736 put_online_mems();
737 put_online_cpus();
738
739 release_caches(&release, need_rcu_barrier);
740}
741EXPORT_SYMBOL(kmem_cache_destroy);
742
743
744
745
746
747
748
749
750int kmem_cache_shrink(struct kmem_cache *cachep)
751{
752 int ret;
753
754 get_online_cpus();
755 get_online_mems();
756 ret = __kmem_cache_shrink(cachep, false);
757 put_online_mems();
758 put_online_cpus();
759 return ret;
760}
761EXPORT_SYMBOL(kmem_cache_shrink);
762
763bool slab_is_available(void)
764{
765 return slab_state >= UP;
766}
767
768#ifndef CONFIG_SLOB
769
770void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
771 unsigned long flags)
772{
773 int err;
774
775 s->name = name;
776 s->size = s->object_size = size;
777 s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
778
779 slab_init_memcg_params(s);
780
781 err = __kmem_cache_create(s, flags);
782
783 if (err)
784 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
785 name, size, err);
786
787 s->refcount = -1;
788}
789
790struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
791 unsigned long flags)
792{
793 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
794
795 if (!s)
796 panic("Out of memory when creating slab %s\n", name);
797
798 create_boot_cache(s, name, size, flags);
799 list_add(&s->list, &slab_caches);
800 s->refcount = 1;
801 return s;
802}
803
804struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
805EXPORT_SYMBOL(kmalloc_caches);
806
807#ifdef CONFIG_ZONE_DMA
808struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
809EXPORT_SYMBOL(kmalloc_dma_caches);
810#endif
811
812
813
814
815
816
817
818static s8 size_index[24] = {
819 3,
820 4,
821 5,
822 5,
823 6,
824 6,
825 6,
826 6,
827 1,
828 1,
829 1,
830 1,
831 7,
832 7,
833 7,
834 7,
835 2,
836 2,
837 2,
838 2,
839 2,
840 2,
841 2,
842 2
843};
844
845static inline int size_index_elem(size_t bytes)
846{
847 return (bytes - 1) / 8;
848}
849
850
851
852
853
854struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
855{
856 int index;
857
858 if (unlikely(size > KMALLOC_MAX_SIZE)) {
859 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
860 return NULL;
861 }
862
863 if (size <= 192) {
864 if (!size)
865 return ZERO_SIZE_PTR;
866
867 index = size_index[size_index_elem(size)];
868 } else
869 index = fls(size - 1);
870
871#ifdef CONFIG_ZONE_DMA
872 if (unlikely((flags & GFP_DMA)))
873 return kmalloc_dma_caches[index];
874
875#endif
876 return kmalloc_caches[index];
877}
878
879
880
881
882
883
884static struct {
885 const char *name;
886 unsigned long size;
887} const kmalloc_info[] __initconst = {
888 {NULL, 0}, {"kmalloc-96", 96},
889 {"kmalloc-192", 192}, {"kmalloc-8", 8},
890 {"kmalloc-16", 16}, {"kmalloc-32", 32},
891 {"kmalloc-64", 64}, {"kmalloc-128", 128},
892 {"kmalloc-256", 256}, {"kmalloc-512", 512},
893 {"kmalloc-1024", 1024}, {"kmalloc-2048", 2048},
894 {"kmalloc-4096", 4096}, {"kmalloc-8192", 8192},
895 {"kmalloc-16384", 16384}, {"kmalloc-32768", 32768},
896 {"kmalloc-65536", 65536}, {"kmalloc-131072", 131072},
897 {"kmalloc-262144", 262144}, {"kmalloc-524288", 524288},
898 {"kmalloc-1048576", 1048576}, {"kmalloc-2097152", 2097152},
899 {"kmalloc-4194304", 4194304}, {"kmalloc-8388608", 8388608},
900 {"kmalloc-16777216", 16777216}, {"kmalloc-33554432", 33554432},
901 {"kmalloc-67108864", 67108864}
902};
903
904
905
906
907
908
909
910
911
912
913
914
915void __init setup_kmalloc_cache_index_table(void)
916{
917 int i;
918
919 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
920 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
921
922 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
923 int elem = size_index_elem(i);
924
925 if (elem >= ARRAY_SIZE(size_index))
926 break;
927 size_index[elem] = KMALLOC_SHIFT_LOW;
928 }
929
930 if (KMALLOC_MIN_SIZE >= 64) {
931
932
933
934
935 for (i = 64 + 8; i <= 96; i += 8)
936 size_index[size_index_elem(i)] = 7;
937
938 }
939
940 if (KMALLOC_MIN_SIZE >= 128) {
941
942
943
944
945
946 for (i = 128 + 8; i <= 192; i += 8)
947 size_index[size_index_elem(i)] = 8;
948 }
949}
950
951static void __init new_kmalloc_cache(int idx, unsigned long flags)
952{
953 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
954 kmalloc_info[idx].size, flags);
955}
956
957
958
959
960
961
962void __init create_kmalloc_caches(unsigned long flags)
963{
964 int i;
965
966 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
967 if (!kmalloc_caches[i])
968 new_kmalloc_cache(i, flags);
969
970
971
972
973
974
975 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
976 new_kmalloc_cache(1, flags);
977 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
978 new_kmalloc_cache(2, flags);
979 }
980
981
982 slab_state = UP;
983
984#ifdef CONFIG_ZONE_DMA
985 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
986 struct kmem_cache *s = kmalloc_caches[i];
987
988 if (s) {
989 int size = kmalloc_size(i);
990 char *n = kasprintf(GFP_NOWAIT,
991 "dma-kmalloc-%d", size);
992
993 BUG_ON(!n);
994 kmalloc_dma_caches[i] = create_kmalloc_cache(n,
995 size, SLAB_CACHE_DMA | flags);
996 }
997 }
998#endif
999}
1000#endif
1001
1002
1003
1004
1005
1006
1007void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1008{
1009 void *ret;
1010 struct page *page;
1011
1012 flags |= __GFP_COMP;
1013 page = alloc_kmem_pages(flags, order);
1014 ret = page ? page_address(page) : NULL;
1015 kmemleak_alloc(ret, size, 1, flags);
1016 kasan_kmalloc_large(ret, size, flags);
1017 return ret;
1018}
1019EXPORT_SYMBOL(kmalloc_order);
1020
1021#ifdef CONFIG_TRACING
1022void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1023{
1024 void *ret = kmalloc_order(size, flags, order);
1025 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1026 return ret;
1027}
1028EXPORT_SYMBOL(kmalloc_order_trace);
1029#endif
1030
1031#ifdef CONFIG_SLABINFO
1032
1033#ifdef CONFIG_SLAB
1034#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1035#else
1036#define SLABINFO_RIGHTS S_IRUSR
1037#endif
1038
1039static void print_slabinfo_header(struct seq_file *m)
1040{
1041
1042
1043
1044
1045#ifdef CONFIG_DEBUG_SLAB
1046 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1047#else
1048 seq_puts(m, "slabinfo - version: 2.1\n");
1049#endif
1050 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1051 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1052 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1053#ifdef CONFIG_DEBUG_SLAB
1054 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1055 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1056#endif
1057 seq_putc(m, '\n');
1058}
1059
1060void *slab_start(struct seq_file *m, loff_t *pos)
1061{
1062 mutex_lock(&slab_mutex);
1063 return seq_list_start(&slab_caches, *pos);
1064}
1065
1066void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1067{
1068 return seq_list_next(p, &slab_caches, pos);
1069}
1070
1071void slab_stop(struct seq_file *m, void *p)
1072{
1073 mutex_unlock(&slab_mutex);
1074}
1075
1076static void
1077memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1078{
1079 struct kmem_cache *c;
1080 struct slabinfo sinfo;
1081
1082 if (!is_root_cache(s))
1083 return;
1084
1085 for_each_memcg_cache(c, s) {
1086 memset(&sinfo, 0, sizeof(sinfo));
1087 get_slabinfo(c, &sinfo);
1088
1089 info->active_slabs += sinfo.active_slabs;
1090 info->num_slabs += sinfo.num_slabs;
1091 info->shared_avail += sinfo.shared_avail;
1092 info->active_objs += sinfo.active_objs;
1093 info->num_objs += sinfo.num_objs;
1094 }
1095}
1096
1097static void cache_show(struct kmem_cache *s, struct seq_file *m)
1098{
1099 struct slabinfo sinfo;
1100
1101 memset(&sinfo, 0, sizeof(sinfo));
1102 get_slabinfo(s, &sinfo);
1103
1104 memcg_accumulate_slabinfo(s, &sinfo);
1105
1106 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1107 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1108 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1109
1110 seq_printf(m, " : tunables %4u %4u %4u",
1111 sinfo.limit, sinfo.batchcount, sinfo.shared);
1112 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1113 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1114 slabinfo_show_stats(m, s);
1115 seq_putc(m, '\n');
1116}
1117
1118static int slab_show(struct seq_file *m, void *p)
1119{
1120 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1121
1122 if (p == slab_caches.next)
1123 print_slabinfo_header(m);
1124 if (is_root_cache(s))
1125 cache_show(s, m);
1126 return 0;
1127}
1128
1129#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1130int memcg_slab_show(struct seq_file *m, void *p)
1131{
1132 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1134
1135 if (p == slab_caches.next)
1136 print_slabinfo_header(m);
1137 if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1138 cache_show(s, m);
1139 return 0;
1140}
1141#endif
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156static const struct seq_operations slabinfo_op = {
1157 .start = slab_start,
1158 .next = slab_next,
1159 .stop = slab_stop,
1160 .show = slab_show,
1161};
1162
1163static int slabinfo_open(struct inode *inode, struct file *file)
1164{
1165 return seq_open(file, &slabinfo_op);
1166}
1167
1168static const struct file_operations proc_slabinfo_operations = {
1169 .open = slabinfo_open,
1170 .read = seq_read,
1171 .write = slabinfo_write,
1172 .llseek = seq_lseek,
1173 .release = seq_release,
1174};
1175
1176static int __init slab_proc_init(void)
1177{
1178 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1179 &proc_slabinfo_operations);
1180 return 0;
1181}
1182module_init(slab_proc_init);
1183#endif
1184
1185static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1186 gfp_t flags)
1187{
1188 void *ret;
1189 size_t ks = 0;
1190
1191 if (p)
1192 ks = ksize(p);
1193
1194 if (ks >= new_size) {
1195 kasan_krealloc((void *)p, new_size, flags);
1196 return (void *)p;
1197 }
1198
1199 ret = kmalloc_track_caller(new_size, flags);
1200 if (ret && p)
1201 memcpy(ret, p, ks);
1202
1203 return ret;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1217{
1218 if (unlikely(!new_size))
1219 return ZERO_SIZE_PTR;
1220
1221 return __do_krealloc(p, new_size, flags);
1222
1223}
1224EXPORT_SYMBOL(__krealloc);
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237void *krealloc(const void *p, size_t new_size, gfp_t flags)
1238{
1239 void *ret;
1240
1241 if (unlikely(!new_size)) {
1242 kfree(p);
1243 return ZERO_SIZE_PTR;
1244 }
1245
1246 ret = __do_krealloc(p, new_size, flags);
1247 if (ret && p != ret)
1248 kfree(p);
1249
1250 return ret;
1251}
1252EXPORT_SYMBOL(krealloc);
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265void kzfree(const void *p)
1266{
1267 size_t ks;
1268 void *mem = (void *)p;
1269
1270 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1271 return;
1272 ks = ksize(mem);
1273 memset(mem, 0, ks);
1274 kfree(mem);
1275}
1276EXPORT_SYMBOL(kzfree);
1277
1278
1279EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1280EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1281EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1282EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1283EXPORT_TRACEPOINT_SYMBOL(kfree);
1284EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1285