1
2
3
4
5
6
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/module.h>
16#include <linux/cpu.h>
17#include <linux/uaccess.h>
18#include <linux/seq_file.h>
19#include <linux/proc_fs.h>
20#include <linux/debugfs.h>
21#include <asm/cacheflush.h>
22#include <asm/tlbflush.h>
23#include <asm/page.h>
24#include <linux/memcontrol.h>
25
26#define CREATE_TRACE_POINTS
27#include <trace/events/kmem.h>
28
29#include "internal.h"
30
31#include "slab.h"
32
33enum slab_state slab_state;
34LIST_HEAD(slab_caches);
35DEFINE_MUTEX(slab_mutex);
36struct kmem_cache *kmem_cache;
37
38#ifdef CONFIG_HARDENED_USERCOPY
39bool usercopy_fallback __ro_after_init =
40 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
41module_param(usercopy_fallback, bool, 0400);
42MODULE_PARM_DESC(usercopy_fallback,
43 "WARN instead of reject usercopy whitelist violations");
44#endif
45
46static LIST_HEAD(slab_caches_to_rcu_destroy);
47static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
48static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
49 slab_caches_to_rcu_destroy_workfn);
50
51
52
53
54#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
55 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
56 SLAB_FAILSLAB | SLAB_KASAN)
57
58#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
59 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
60
61
62
63
64static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
65
66static int __init setup_slab_nomerge(char *str)
67{
68 slab_nomerge = true;
69 return 1;
70}
71
72#ifdef CONFIG_SLUB
73__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
74
75
76
77
78
79static int __init setup_slub_merge(char *str)
80{
81 slab_nomerge = false;
82 return 1;
83}
84__setup_param("slub_merge", slub_merge, setup_slub_merge, 0);
85#endif
86
87__setup("slab_nomerge", setup_slab_nomerge);
88
89
90
91
92unsigned int kmem_cache_size(struct kmem_cache *s)
93{
94 return s->object_size;
95}
96EXPORT_SYMBOL(kmem_cache_size);
97
98#ifdef CONFIG_DEBUG_VM
99static int kmem_cache_sanity_check(const char *name, unsigned int size)
100{
101 if (!name || in_interrupt() || size < sizeof(void *) ||
102 size > KMALLOC_MAX_SIZE) {
103 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
104 return -EINVAL;
105 }
106
107 WARN_ON(strchr(name, ' '));
108 return 0;
109}
110#else
111static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
112{
113 return 0;
114}
115#endif
116
117void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
118{
119 size_t i;
120
121 for (i = 0; i < nr; i++) {
122 if (s)
123 kmem_cache_free(s, p[i]);
124 else
125 kfree(p[i]);
126 }
127}
128
129int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
130 void **p)
131{
132 size_t i;
133
134 for (i = 0; i < nr; i++) {
135 void *x = p[i] = kmem_cache_alloc(s, flags);
136 if (!x) {
137 __kmem_cache_free_bulk(s, i, p);
138 return 0;
139 }
140 }
141 return i;
142}
143
144
145
146
147
148static unsigned int calculate_alignment(slab_flags_t flags,
149 unsigned int align, unsigned int size)
150{
151
152
153
154
155
156
157
158 if (flags & SLAB_HWCACHE_ALIGN) {
159 unsigned int ralign;
160
161 ralign = cache_line_size();
162 while (size <= ralign / 2)
163 ralign /= 2;
164 align = max(align, ralign);
165 }
166
167 if (align < ARCH_SLAB_MINALIGN)
168 align = ARCH_SLAB_MINALIGN;
169
170 return ALIGN(align, sizeof(void *));
171}
172
173
174
175
176int slab_unmergeable(struct kmem_cache *s)
177{
178 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
179 return 1;
180
181 if (s->ctor)
182 return 1;
183
184 if (s->usersize)
185 return 1;
186
187
188
189
190 if (s->refcount < 0)
191 return 1;
192
193 return 0;
194}
195
196struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
197 slab_flags_t flags, const char *name, void (*ctor)(void *))
198{
199 struct kmem_cache *s;
200
201 if (slab_nomerge)
202 return NULL;
203
204 if (ctor)
205 return NULL;
206
207 size = ALIGN(size, sizeof(void *));
208 align = calculate_alignment(flags, align, size);
209 size = ALIGN(size, align);
210 flags = kmem_cache_flags(size, flags, name, NULL);
211
212 if (flags & SLAB_NEVER_MERGE)
213 return NULL;
214
215 list_for_each_entry_reverse(s, &slab_caches, list) {
216 if (slab_unmergeable(s))
217 continue;
218
219 if (size > s->size)
220 continue;
221
222 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
223 continue;
224
225
226
227
228 if ((s->size & ~(align - 1)) != s->size)
229 continue;
230
231 if (s->size - size >= sizeof(void *))
232 continue;
233
234 if (IS_ENABLED(CONFIG_SLAB) && align &&
235 (align > s->align || s->align % align))
236 continue;
237
238 return s;
239 }
240 return NULL;
241}
242
243static struct kmem_cache *create_cache(const char *name,
244 unsigned int object_size, unsigned int align,
245 slab_flags_t flags, unsigned int useroffset,
246 unsigned int usersize, void (*ctor)(void *),
247 struct kmem_cache *root_cache)
248{
249 struct kmem_cache *s;
250 int err;
251
252 if (WARN_ON(useroffset + usersize > object_size))
253 useroffset = usersize = 0;
254
255 err = -ENOMEM;
256 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
257 if (!s)
258 goto out;
259
260 s->name = name;
261 s->size = s->object_size = object_size;
262 s->align = align;
263 s->ctor = ctor;
264 s->useroffset = useroffset;
265 s->usersize = usersize;
266
267 err = __kmem_cache_create(s, flags);
268 if (err)
269 goto out_free_cache;
270
271 s->refcount = 1;
272 list_add(&s->list, &slab_caches);
273out:
274 if (err)
275 return ERR_PTR(err);
276 return s;
277
278out_free_cache:
279 kmem_cache_free(kmem_cache, s);
280 goto out;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311struct kmem_cache *
312kmem_cache_create_usercopy(const char *name,
313 unsigned int size, unsigned int align,
314 slab_flags_t flags,
315 unsigned int useroffset, unsigned int usersize,
316 void (*ctor)(void *))
317{
318 struct kmem_cache *s = NULL;
319 const char *cache_name;
320 int err;
321
322 get_online_cpus();
323 get_online_mems();
324
325 mutex_lock(&slab_mutex);
326
327 err = kmem_cache_sanity_check(name, size);
328 if (err) {
329 goto out_unlock;
330 }
331
332
333 if (flags & ~SLAB_FLAGS_PERMITTED) {
334 err = -EINVAL;
335 goto out_unlock;
336 }
337
338
339
340
341
342
343
344 flags &= CACHE_CREATE_MASK;
345
346
347 if (WARN_ON(!usersize && useroffset) ||
348 WARN_ON(size < usersize || size - usersize < useroffset))
349 usersize = useroffset = 0;
350
351 if (!usersize)
352 s = __kmem_cache_alias(name, size, align, flags, ctor);
353 if (s)
354 goto out_unlock;
355
356 cache_name = kstrdup_const(name, GFP_KERNEL);
357 if (!cache_name) {
358 err = -ENOMEM;
359 goto out_unlock;
360 }
361
362 s = create_cache(cache_name, size,
363 calculate_alignment(flags, align, size),
364 flags, useroffset, usersize, ctor, NULL);
365 if (IS_ERR(s)) {
366 err = PTR_ERR(s);
367 kfree_const(cache_name);
368 }
369
370out_unlock:
371 mutex_unlock(&slab_mutex);
372
373 put_online_mems();
374 put_online_cpus();
375
376 if (err) {
377 if (flags & SLAB_PANIC)
378 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
379 name, err);
380 else {
381 pr_warn("kmem_cache_create(%s) failed with error %d\n",
382 name, err);
383 dump_stack();
384 }
385 return NULL;
386 }
387 return s;
388}
389EXPORT_SYMBOL(kmem_cache_create_usercopy);
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416struct kmem_cache *
417kmem_cache_create(const char *name, unsigned int size, unsigned int align,
418 slab_flags_t flags, void (*ctor)(void *))
419{
420 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
421 ctor);
422}
423EXPORT_SYMBOL(kmem_cache_create);
424
425static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
426{
427 LIST_HEAD(to_destroy);
428 struct kmem_cache *s, *s2;
429
430
431
432
433
434
435
436
437
438
439 mutex_lock(&slab_mutex);
440 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
441 mutex_unlock(&slab_mutex);
442
443 if (list_empty(&to_destroy))
444 return;
445
446 rcu_barrier();
447
448 list_for_each_entry_safe(s, s2, &to_destroy, list) {
449#ifdef SLAB_SUPPORTS_SYSFS
450 sysfs_slab_release(s);
451#else
452 slab_kmem_cache_release(s);
453#endif
454 }
455}
456
457static int shutdown_cache(struct kmem_cache *s)
458{
459
460 kasan_cache_shutdown(s);
461
462 if (__kmem_cache_shutdown(s) != 0)
463 return -EBUSY;
464
465 list_del(&s->list);
466
467 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
468#ifdef SLAB_SUPPORTS_SYSFS
469 sysfs_slab_unlink(s);
470#endif
471 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
472 schedule_work(&slab_caches_to_rcu_destroy_work);
473 } else {
474#ifdef SLAB_SUPPORTS_SYSFS
475 sysfs_slab_unlink(s);
476 sysfs_slab_release(s);
477#else
478 slab_kmem_cache_release(s);
479#endif
480 }
481
482 return 0;
483}
484
485void slab_kmem_cache_release(struct kmem_cache *s)
486{
487 __kmem_cache_release(s);
488 kfree_const(s->name);
489 kmem_cache_free(kmem_cache, s);
490}
491
492void kmem_cache_destroy(struct kmem_cache *s)
493{
494 int err;
495
496 if (unlikely(!s))
497 return;
498
499 get_online_cpus();
500 get_online_mems();
501
502 mutex_lock(&slab_mutex);
503
504 s->refcount--;
505 if (s->refcount)
506 goto out_unlock;
507
508 err = shutdown_cache(s);
509 if (err) {
510 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
511 s->name);
512 dump_stack();
513 }
514out_unlock:
515 mutex_unlock(&slab_mutex);
516
517 put_online_mems();
518 put_online_cpus();
519}
520EXPORT_SYMBOL(kmem_cache_destroy);
521
522
523
524
525
526
527
528
529
530
531int kmem_cache_shrink(struct kmem_cache *cachep)
532{
533 int ret;
534
535 get_online_cpus();
536 get_online_mems();
537 kasan_cache_shrink(cachep);
538 ret = __kmem_cache_shrink(cachep);
539 put_online_mems();
540 put_online_cpus();
541 return ret;
542}
543EXPORT_SYMBOL(kmem_cache_shrink);
544
545bool slab_is_available(void)
546{
547 return slab_state >= UP;
548}
549
550#ifndef CONFIG_SLOB
551
552void __init create_boot_cache(struct kmem_cache *s, const char *name,
553 unsigned int size, slab_flags_t flags,
554 unsigned int useroffset, unsigned int usersize)
555{
556 int err;
557 unsigned int align = ARCH_KMALLOC_MINALIGN;
558
559 s->name = name;
560 s->size = s->object_size = size;
561
562
563
564
565
566 if (is_power_of_2(size))
567 align = max(align, size);
568 s->align = calculate_alignment(flags, align, size);
569
570 s->useroffset = useroffset;
571 s->usersize = usersize;
572
573 err = __kmem_cache_create(s, flags);
574
575 if (err)
576 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
577 name, size, err);
578
579 s->refcount = -1;
580}
581
582struct kmem_cache *__init create_kmalloc_cache(const char *name,
583 unsigned int size, slab_flags_t flags,
584 unsigned int useroffset, unsigned int usersize)
585{
586 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
587
588 if (!s)
589 panic("Out of memory when creating slab %s\n", name);
590
591 create_boot_cache(s, name, size, flags, useroffset, usersize);
592 list_add(&s->list, &slab_caches);
593 s->refcount = 1;
594 return s;
595}
596
597struct kmem_cache *
598kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
599EXPORT_SYMBOL(kmalloc_caches);
600
601
602
603
604
605
606
607static u8 size_index[24] __ro_after_init = {
608 3,
609 4,
610 5,
611 5,
612 6,
613 6,
614 6,
615 6,
616 1,
617 1,
618 1,
619 1,
620 7,
621 7,
622 7,
623 7,
624 2,
625 2,
626 2,
627 2,
628 2,
629 2,
630 2,
631 2
632};
633
634static inline unsigned int size_index_elem(unsigned int bytes)
635{
636 return (bytes - 1) / 8;
637}
638
639
640
641
642
643struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
644{
645 unsigned int index;
646
647 if (size <= 192) {
648 if (!size)
649 return ZERO_SIZE_PTR;
650
651 index = size_index[size_index_elem(size)];
652 } else {
653 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
654 return NULL;
655 index = fls(size - 1);
656 }
657
658 return kmalloc_caches[kmalloc_type(flags)][index];
659}
660
661#ifdef CONFIG_ZONE_DMA
662#define INIT_KMALLOC_INFO(__size, __short_size) \
663{ \
664 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
665 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
666 .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
667 .size = __size, \
668}
669#else
670#define INIT_KMALLOC_INFO(__size, __short_size) \
671{ \
672 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
673 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
674 .size = __size, \
675}
676#endif
677
678
679
680
681
682
683const struct kmalloc_info_struct kmalloc_info[] __initconst = {
684 INIT_KMALLOC_INFO(0, 0),
685 INIT_KMALLOC_INFO(96, 96),
686 INIT_KMALLOC_INFO(192, 192),
687 INIT_KMALLOC_INFO(8, 8),
688 INIT_KMALLOC_INFO(16, 16),
689 INIT_KMALLOC_INFO(32, 32),
690 INIT_KMALLOC_INFO(64, 64),
691 INIT_KMALLOC_INFO(128, 128),
692 INIT_KMALLOC_INFO(256, 256),
693 INIT_KMALLOC_INFO(512, 512),
694 INIT_KMALLOC_INFO(1024, 1k),
695 INIT_KMALLOC_INFO(2048, 2k),
696 INIT_KMALLOC_INFO(4096, 4k),
697 INIT_KMALLOC_INFO(8192, 8k),
698 INIT_KMALLOC_INFO(16384, 16k),
699 INIT_KMALLOC_INFO(32768, 32k),
700 INIT_KMALLOC_INFO(65536, 64k),
701 INIT_KMALLOC_INFO(131072, 128k),
702 INIT_KMALLOC_INFO(262144, 256k),
703 INIT_KMALLOC_INFO(524288, 512k),
704 INIT_KMALLOC_INFO(1048576, 1M),
705 INIT_KMALLOC_INFO(2097152, 2M),
706 INIT_KMALLOC_INFO(4194304, 4M),
707 INIT_KMALLOC_INFO(8388608, 8M),
708 INIT_KMALLOC_INFO(16777216, 16M),
709 INIT_KMALLOC_INFO(33554432, 32M),
710 INIT_KMALLOC_INFO(67108864, 64M)
711};
712
713
714
715
716
717
718
719
720
721
722
723
724void __init setup_kmalloc_cache_index_table(void)
725{
726 unsigned int i;
727
728 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
729 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
730
731 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
732 unsigned int elem = size_index_elem(i);
733
734 if (elem >= ARRAY_SIZE(size_index))
735 break;
736 size_index[elem] = KMALLOC_SHIFT_LOW;
737 }
738
739 if (KMALLOC_MIN_SIZE >= 64) {
740
741
742
743
744 for (i = 64 + 8; i <= 96; i += 8)
745 size_index[size_index_elem(i)] = 7;
746
747 }
748
749 if (KMALLOC_MIN_SIZE >= 128) {
750
751
752
753
754
755 for (i = 128 + 8; i <= 192; i += 8)
756 size_index[size_index_elem(i)] = 8;
757 }
758}
759
760static void __init
761new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
762{
763 if (type == KMALLOC_RECLAIM)
764 flags |= SLAB_RECLAIM_ACCOUNT;
765
766 kmalloc_caches[type][idx] = create_kmalloc_cache(
767 kmalloc_info[idx].name[type],
768 kmalloc_info[idx].size, flags, 0,
769 kmalloc_info[idx].size);
770}
771
772
773
774
775
776
777void __init create_kmalloc_caches(slab_flags_t flags)
778{
779 int i;
780 enum kmalloc_cache_type type;
781
782 for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
783 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
784 if (!kmalloc_caches[type][i])
785 new_kmalloc_cache(i, type, flags);
786
787
788
789
790
791
792 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
793 !kmalloc_caches[type][1])
794 new_kmalloc_cache(1, type, flags);
795 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
796 !kmalloc_caches[type][2])
797 new_kmalloc_cache(2, type, flags);
798 }
799 }
800
801
802 slab_state = UP;
803
804#ifdef CONFIG_ZONE_DMA
805 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
806 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
807
808 if (s) {
809 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
810 kmalloc_info[i].name[KMALLOC_DMA],
811 kmalloc_info[i].size,
812 SLAB_CACHE_DMA | flags, 0,
813 kmalloc_info[i].size);
814 }
815 }
816#endif
817}
818#endif
819
820gfp_t kmalloc_fix_flags(gfp_t flags)
821{
822 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
823
824 flags &= ~GFP_SLAB_BUG_MASK;
825 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
826 invalid_mask, &invalid_mask, flags, &flags);
827 dump_stack();
828
829 return flags;
830}
831
832
833
834
835
836
837void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
838{
839 void *ret = NULL;
840 struct page *page;
841
842 if (unlikely(flags & GFP_SLAB_BUG_MASK))
843 flags = kmalloc_fix_flags(flags);
844
845 flags |= __GFP_COMP;
846 page = alloc_pages(flags, order);
847 if (likely(page)) {
848 ret = page_address(page);
849 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
850 PAGE_SIZE << order);
851 }
852 ret = kasan_kmalloc_large(ret, size, flags);
853
854 kmemleak_alloc(ret, size, 1, flags);
855 return ret;
856}
857EXPORT_SYMBOL(kmalloc_order);
858
859#ifdef CONFIG_TRACING
860void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
861{
862 void *ret = kmalloc_order(size, flags, order);
863 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
864 return ret;
865}
866EXPORT_SYMBOL(kmalloc_order_trace);
867#endif
868
869#ifdef CONFIG_SLAB_FREELIST_RANDOM
870
871static void freelist_randomize(struct rnd_state *state, unsigned int *list,
872 unsigned int count)
873{
874 unsigned int rand;
875 unsigned int i;
876
877 for (i = 0; i < count; i++)
878 list[i] = i;
879
880
881 for (i = count - 1; i > 0; i--) {
882 rand = prandom_u32_state(state);
883 rand %= (i + 1);
884 swap(list[i], list[rand]);
885 }
886}
887
888
889int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
890 gfp_t gfp)
891{
892 struct rnd_state state;
893
894 if (count < 2 || cachep->random_seq)
895 return 0;
896
897 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
898 if (!cachep->random_seq)
899 return -ENOMEM;
900
901
902 prandom_seed_state(&state, get_random_long());
903
904 freelist_randomize(&state, cachep->random_seq, count);
905 return 0;
906}
907
908
909void cache_random_seq_destroy(struct kmem_cache *cachep)
910{
911 kfree(cachep->random_seq);
912 cachep->random_seq = NULL;
913}
914#endif
915
916#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
917#ifdef CONFIG_SLAB
918#define SLABINFO_RIGHTS (0600)
919#else
920#define SLABINFO_RIGHTS (0400)
921#endif
922
923static void print_slabinfo_header(struct seq_file *m)
924{
925
926
927
928
929#ifdef CONFIG_DEBUG_SLAB
930 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
931#else
932 seq_puts(m, "slabinfo - version: 2.1\n");
933#endif
934 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
935 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
936 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
937#ifdef CONFIG_DEBUG_SLAB
938 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
939 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
940#endif
941 seq_putc(m, '\n');
942}
943
944void *slab_start(struct seq_file *m, loff_t *pos)
945{
946 mutex_lock(&slab_mutex);
947 return seq_list_start(&slab_caches, *pos);
948}
949
950void *slab_next(struct seq_file *m, void *p, loff_t *pos)
951{
952 return seq_list_next(p, &slab_caches, pos);
953}
954
955void slab_stop(struct seq_file *m, void *p)
956{
957 mutex_unlock(&slab_mutex);
958}
959
960static void cache_show(struct kmem_cache *s, struct seq_file *m)
961{
962 struct slabinfo sinfo;
963
964 memset(&sinfo, 0, sizeof(sinfo));
965 get_slabinfo(s, &sinfo);
966
967 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
968 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
969 sinfo.objects_per_slab, (1 << sinfo.cache_order));
970
971 seq_printf(m, " : tunables %4u %4u %4u",
972 sinfo.limit, sinfo.batchcount, sinfo.shared);
973 seq_printf(m, " : slabdata %6lu %6lu %6lu",
974 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
975 slabinfo_show_stats(m, s);
976 seq_putc(m, '\n');
977}
978
979static int slab_show(struct seq_file *m, void *p)
980{
981 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
982
983 if (p == slab_caches.next)
984 print_slabinfo_header(m);
985 cache_show(s, m);
986 return 0;
987}
988
989void dump_unreclaimable_slab(void)
990{
991 struct kmem_cache *s, *s2;
992 struct slabinfo sinfo;
993
994
995
996
997
998
999
1000
1001 if (!mutex_trylock(&slab_mutex)) {
1002 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1003 return;
1004 }
1005
1006 pr_info("Unreclaimable slab info:\n");
1007 pr_info("Name Used Total\n");
1008
1009 list_for_each_entry_safe(s, s2, &slab_caches, list) {
1010 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1011 continue;
1012
1013 get_slabinfo(s, &sinfo);
1014
1015 if (sinfo.num_objs > 0)
1016 pr_info("%-17s %10luKB %10luKB\n", s->name,
1017 (sinfo.active_objs * s->size) / 1024,
1018 (sinfo.num_objs * s->size) / 1024);
1019 }
1020 mutex_unlock(&slab_mutex);
1021}
1022
1023#if defined(CONFIG_MEMCG_KMEM)
1024int memcg_slab_show(struct seq_file *m, void *p)
1025{
1026
1027
1028
1029
1030 return 0;
1031}
1032#endif
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static const struct seq_operations slabinfo_op = {
1048 .start = slab_start,
1049 .next = slab_next,
1050 .stop = slab_stop,
1051 .show = slab_show,
1052};
1053
1054static int slabinfo_open(struct inode *inode, struct file *file)
1055{
1056 return seq_open(file, &slabinfo_op);
1057}
1058
1059static const struct file_operations proc_slabinfo_operations = {
1060 .open = slabinfo_open,
1061 .read = seq_read,
1062 .write = slabinfo_write,
1063 .llseek = seq_lseek,
1064 .release = seq_release,
1065};
1066
1067static int __init slab_proc_init(void)
1068{
1069 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1070 &proc_slabinfo_operations);
1071 return 0;
1072}
1073module_init(slab_proc_init);
1074
1075#endif
1076
1077static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1078 gfp_t flags)
1079{
1080 void *ret;
1081 size_t ks;
1082
1083 ks = ksize(p);
1084
1085 if (ks >= new_size) {
1086 p = kasan_krealloc((void *)p, new_size, flags);
1087 return (void *)p;
1088 }
1089
1090 ret = kmalloc_track_caller(new_size, flags);
1091 if (ret && p)
1092 memcpy(ret, p, ks);
1093
1094 return ret;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1110{
1111 if (unlikely(!new_size))
1112 return ZERO_SIZE_PTR;
1113
1114 return __do_krealloc(p, new_size, flags);
1115
1116}
1117EXPORT_SYMBOL(__krealloc);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132void *krealloc(const void *p, size_t new_size, gfp_t flags)
1133{
1134 void *ret;
1135
1136 if (unlikely(!new_size)) {
1137 kfree(p);
1138 return ZERO_SIZE_PTR;
1139 }
1140
1141 ret = __do_krealloc(p, new_size, flags);
1142 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1143 kfree(p);
1144
1145 return ret;
1146}
1147EXPORT_SYMBOL(krealloc);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160void kfree_sensitive(const void *p)
1161{
1162 size_t ks;
1163 void *mem = (void *)p;
1164
1165 ks = ksize(mem);
1166 if (ks)
1167 memzero_explicit(mem, ks);
1168 kfree(mem);
1169}
1170EXPORT_SYMBOL(kfree_sensitive);
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186size_t ksize(const void *objp)
1187{
1188 size_t size;
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
1204 return 0;
1205
1206 size = __ksize(objp);
1207
1208
1209
1210
1211 kasan_unpoison_shadow(objp, size);
1212 return size;
1213}
1214EXPORT_SYMBOL(ksize);
1215
1216
1217EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1218EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1219EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1220EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1221EXPORT_TRACEPOINT_SYMBOL(kfree);
1222EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1223
1224int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1225{
1226 if (__should_failslab(s, gfpflags))
1227 return -ENOMEM;
1228 return 0;
1229}
1230ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1231
1232
1233#undef kzfree
1234void kzfree(const void *p)
1235{
1236 kfree_sensitive(p);
1237}
1238EXPORT_SYMBOL(kzfree);
1239