1
2
3
4
5
6
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/module.h>
16#include <linux/cpu.h>
17#include <linux/uaccess.h>
18#include <linux/seq_file.h>
19#include <linux/proc_fs.h>
20#include <linux/debugfs.h>
21#include <linux/kasan.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/page.h>
25#include <linux/memcontrol.h>
26
27#define CREATE_TRACE_POINTS
28#include <trace/events/kmem.h>
29
30#include "internal.h"
31
32#include "slab.h"
33
34enum slab_state slab_state;
35LIST_HEAD(slab_caches);
36DEFINE_MUTEX(slab_mutex);
37struct kmem_cache *kmem_cache;
38
39#ifdef CONFIG_HARDENED_USERCOPY
40bool usercopy_fallback __ro_after_init =
41 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
42module_param(usercopy_fallback, bool, 0400);
43MODULE_PARM_DESC(usercopy_fallback,
44 "WARN instead of reject usercopy whitelist violations");
45#endif
46
47static LIST_HEAD(slab_caches_to_rcu_destroy);
48static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
49static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
50 slab_caches_to_rcu_destroy_workfn);
51
52
53
54
55#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
56 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
57 SLAB_FAILSLAB | kasan_never_merge())
58
59#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
60 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
61
62
63
64
65static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
66
67static int __init setup_slab_nomerge(char *str)
68{
69 slab_nomerge = true;
70 return 1;
71}
72
73#ifdef CONFIG_SLUB
74__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
75#endif
76
77__setup("slab_nomerge", setup_slab_nomerge);
78
79
80
81
82unsigned int kmem_cache_size(struct kmem_cache *s)
83{
84 return s->object_size;
85}
86EXPORT_SYMBOL(kmem_cache_size);
87
88#ifdef CONFIG_DEBUG_VM
89static int kmem_cache_sanity_check(const char *name, unsigned int size)
90{
91 if (!name || in_interrupt() || size < sizeof(void *) ||
92 size > KMALLOC_MAX_SIZE) {
93 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
94 return -EINVAL;
95 }
96
97 WARN_ON(strchr(name, ' '));
98 return 0;
99}
100#else
101static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
102{
103 return 0;
104}
105#endif
106
107void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108{
109 size_t i;
110
111 for (i = 0; i < nr; i++) {
112 if (s)
113 kmem_cache_free(s, p[i]);
114 else
115 kfree(p[i]);
116 }
117}
118
119int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
120 void **p)
121{
122 size_t i;
123
124 for (i = 0; i < nr; i++) {
125 void *x = p[i] = kmem_cache_alloc(s, flags);
126 if (!x) {
127 __kmem_cache_free_bulk(s, i, p);
128 return 0;
129 }
130 }
131 return i;
132}
133
134
135
136
137
138static unsigned int calculate_alignment(slab_flags_t flags,
139 unsigned int align, unsigned int size)
140{
141
142
143
144
145
146
147
148 if (flags & SLAB_HWCACHE_ALIGN) {
149 unsigned int ralign;
150
151 ralign = cache_line_size();
152 while (size <= ralign / 2)
153 ralign /= 2;
154 align = max(align, ralign);
155 }
156
157 if (align < ARCH_SLAB_MINALIGN)
158 align = ARCH_SLAB_MINALIGN;
159
160 return ALIGN(align, sizeof(void *));
161}
162
163
164
165
166int slab_unmergeable(struct kmem_cache *s)
167{
168 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
169 return 1;
170
171 if (s->ctor)
172 return 1;
173
174 if (s->usersize)
175 return 1;
176
177
178
179
180 if (s->refcount < 0)
181 return 1;
182
183 return 0;
184}
185
186struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
187 slab_flags_t flags, const char *name, void (*ctor)(void *))
188{
189 struct kmem_cache *s;
190
191 if (slab_nomerge)
192 return NULL;
193
194 if (ctor)
195 return NULL;
196
197 size = ALIGN(size, sizeof(void *));
198 align = calculate_alignment(flags, align, size);
199 size = ALIGN(size, align);
200 flags = kmem_cache_flags(size, flags, name, NULL);
201
202 if (flags & SLAB_NEVER_MERGE)
203 return NULL;
204
205 list_for_each_entry_reverse(s, &slab_caches, list) {
206 if (slab_unmergeable(s))
207 continue;
208
209 if (size > s->size)
210 continue;
211
212 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
213 continue;
214
215
216
217
218 if ((s->size & ~(align - 1)) != s->size)
219 continue;
220
221 if (s->size - size >= sizeof(void *))
222 continue;
223
224 if (IS_ENABLED(CONFIG_SLAB) && align &&
225 (align > s->align || s->align % align))
226 continue;
227
228 return s;
229 }
230 return NULL;
231}
232
233static struct kmem_cache *create_cache(const char *name,
234 unsigned int object_size, unsigned int align,
235 slab_flags_t flags, unsigned int useroffset,
236 unsigned int usersize, void (*ctor)(void *),
237 struct kmem_cache *root_cache)
238{
239 struct kmem_cache *s;
240 int err;
241
242 if (WARN_ON(useroffset + usersize > object_size))
243 useroffset = usersize = 0;
244
245 err = -ENOMEM;
246 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
247 if (!s)
248 goto out;
249
250 s->name = name;
251 s->size = s->object_size = object_size;
252 s->align = align;
253 s->ctor = ctor;
254 s->useroffset = useroffset;
255 s->usersize = usersize;
256
257 err = __kmem_cache_create(s, flags);
258 if (err)
259 goto out_free_cache;
260
261 s->refcount = 1;
262 list_add(&s->list, &slab_caches);
263out:
264 if (err)
265 return ERR_PTR(err);
266 return s;
267
268out_free_cache:
269 kmem_cache_free(kmem_cache, s);
270 goto out;
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301struct kmem_cache *
302kmem_cache_create_usercopy(const char *name,
303 unsigned int size, unsigned int align,
304 slab_flags_t flags,
305 unsigned int useroffset, unsigned int usersize,
306 void (*ctor)(void *))
307{
308 struct kmem_cache *s = NULL;
309 const char *cache_name;
310 int err;
311
312 get_online_cpus();
313 get_online_mems();
314
315 mutex_lock(&slab_mutex);
316
317 err = kmem_cache_sanity_check(name, size);
318 if (err) {
319 goto out_unlock;
320 }
321
322
323 if (flags & ~SLAB_FLAGS_PERMITTED) {
324 err = -EINVAL;
325 goto out_unlock;
326 }
327
328
329
330
331
332
333
334 flags &= CACHE_CREATE_MASK;
335
336
337 if (WARN_ON(!usersize && useroffset) ||
338 WARN_ON(size < usersize || size - usersize < useroffset))
339 usersize = useroffset = 0;
340
341 if (!usersize)
342 s = __kmem_cache_alias(name, size, align, flags, ctor);
343 if (s)
344 goto out_unlock;
345
346 cache_name = kstrdup_const(name, GFP_KERNEL);
347 if (!cache_name) {
348 err = -ENOMEM;
349 goto out_unlock;
350 }
351
352 s = create_cache(cache_name, size,
353 calculate_alignment(flags, align, size),
354 flags, useroffset, usersize, ctor, NULL);
355 if (IS_ERR(s)) {
356 err = PTR_ERR(s);
357 kfree_const(cache_name);
358 }
359
360out_unlock:
361 mutex_unlock(&slab_mutex);
362
363 put_online_mems();
364 put_online_cpus();
365
366 if (err) {
367 if (flags & SLAB_PANIC)
368 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
369 name, err);
370 else {
371 pr_warn("kmem_cache_create(%s) failed with error %d\n",
372 name, err);
373 dump_stack();
374 }
375 return NULL;
376 }
377 return s;
378}
379EXPORT_SYMBOL(kmem_cache_create_usercopy);
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406struct kmem_cache *
407kmem_cache_create(const char *name, unsigned int size, unsigned int align,
408 slab_flags_t flags, void (*ctor)(void *))
409{
410 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
411 ctor);
412}
413EXPORT_SYMBOL(kmem_cache_create);
414
415static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
416{
417 LIST_HEAD(to_destroy);
418 struct kmem_cache *s, *s2;
419
420
421
422
423
424
425
426
427
428
429 mutex_lock(&slab_mutex);
430 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
431 mutex_unlock(&slab_mutex);
432
433 if (list_empty(&to_destroy))
434 return;
435
436 rcu_barrier();
437
438 list_for_each_entry_safe(s, s2, &to_destroy, list) {
439#ifdef SLAB_SUPPORTS_SYSFS
440 sysfs_slab_release(s);
441#else
442 slab_kmem_cache_release(s);
443#endif
444 }
445}
446
447static int shutdown_cache(struct kmem_cache *s)
448{
449
450 kasan_cache_shutdown(s);
451
452 if (__kmem_cache_shutdown(s) != 0)
453 return -EBUSY;
454
455 list_del(&s->list);
456
457 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
458#ifdef SLAB_SUPPORTS_SYSFS
459 sysfs_slab_unlink(s);
460#endif
461 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
462 schedule_work(&slab_caches_to_rcu_destroy_work);
463 } else {
464#ifdef SLAB_SUPPORTS_SYSFS
465 sysfs_slab_unlink(s);
466 sysfs_slab_release(s);
467#else
468 slab_kmem_cache_release(s);
469#endif
470 }
471
472 return 0;
473}
474
475void slab_kmem_cache_release(struct kmem_cache *s)
476{
477 __kmem_cache_release(s);
478 kfree_const(s->name);
479 kmem_cache_free(kmem_cache, s);
480}
481
482void kmem_cache_destroy(struct kmem_cache *s)
483{
484 int err;
485
486 if (unlikely(!s))
487 return;
488
489 get_online_cpus();
490 get_online_mems();
491
492 mutex_lock(&slab_mutex);
493
494 s->refcount--;
495 if (s->refcount)
496 goto out_unlock;
497
498 err = shutdown_cache(s);
499 if (err) {
500 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
501 s->name);
502 dump_stack();
503 }
504out_unlock:
505 mutex_unlock(&slab_mutex);
506
507 put_online_mems();
508 put_online_cpus();
509}
510EXPORT_SYMBOL(kmem_cache_destroy);
511
512
513
514
515
516
517
518
519
520
521int kmem_cache_shrink(struct kmem_cache *cachep)
522{
523 int ret;
524
525 get_online_cpus();
526 get_online_mems();
527 kasan_cache_shrink(cachep);
528 ret = __kmem_cache_shrink(cachep);
529 put_online_mems();
530 put_online_cpus();
531 return ret;
532}
533EXPORT_SYMBOL(kmem_cache_shrink);
534
535bool slab_is_available(void)
536{
537 return slab_state >= UP;
538}
539
540#ifndef CONFIG_SLOB
541
542void __init create_boot_cache(struct kmem_cache *s, const char *name,
543 unsigned int size, slab_flags_t flags,
544 unsigned int useroffset, unsigned int usersize)
545{
546 int err;
547 unsigned int align = ARCH_KMALLOC_MINALIGN;
548
549 s->name = name;
550 s->size = s->object_size = size;
551
552
553
554
555
556 if (is_power_of_2(size))
557 align = max(align, size);
558 s->align = calculate_alignment(flags, align, size);
559
560 s->useroffset = useroffset;
561 s->usersize = usersize;
562
563 err = __kmem_cache_create(s, flags);
564
565 if (err)
566 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
567 name, size, err);
568
569 s->refcount = -1;
570}
571
572struct kmem_cache *__init create_kmalloc_cache(const char *name,
573 unsigned int size, slab_flags_t flags,
574 unsigned int useroffset, unsigned int usersize)
575{
576 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
577
578 if (!s)
579 panic("Out of memory when creating slab %s\n", name);
580
581 create_boot_cache(s, name, size, flags, useroffset, usersize);
582 list_add(&s->list, &slab_caches);
583 s->refcount = 1;
584 return s;
585}
586
587struct kmem_cache *
588kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
589{ };
590EXPORT_SYMBOL(kmalloc_caches);
591
592
593
594
595
596
597
598static u8 size_index[24] __ro_after_init = {
599 3,
600 4,
601 5,
602 5,
603 6,
604 6,
605 6,
606 6,
607 1,
608 1,
609 1,
610 1,
611 7,
612 7,
613 7,
614 7,
615 2,
616 2,
617 2,
618 2,
619 2,
620 2,
621 2,
622 2
623};
624
625static inline unsigned int size_index_elem(unsigned int bytes)
626{
627 return (bytes - 1) / 8;
628}
629
630
631
632
633
634struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
635{
636 unsigned int index;
637
638 if (size <= 192) {
639 if (!size)
640 return ZERO_SIZE_PTR;
641
642 index = size_index[size_index_elem(size)];
643 } else {
644 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
645 return NULL;
646 index = fls(size - 1);
647 }
648
649 return kmalloc_caches[kmalloc_type(flags)][index];
650}
651
652#ifdef CONFIG_ZONE_DMA
653#define INIT_KMALLOC_INFO(__size, __short_size) \
654{ \
655 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
656 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
657 .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
658 .size = __size, \
659}
660#else
661#define INIT_KMALLOC_INFO(__size, __short_size) \
662{ \
663 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
664 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
665 .size = __size, \
666}
667#endif
668
669
670
671
672
673
674const struct kmalloc_info_struct kmalloc_info[] __initconst = {
675 INIT_KMALLOC_INFO(0, 0),
676 INIT_KMALLOC_INFO(96, 96),
677 INIT_KMALLOC_INFO(192, 192),
678 INIT_KMALLOC_INFO(8, 8),
679 INIT_KMALLOC_INFO(16, 16),
680 INIT_KMALLOC_INFO(32, 32),
681 INIT_KMALLOC_INFO(64, 64),
682 INIT_KMALLOC_INFO(128, 128),
683 INIT_KMALLOC_INFO(256, 256),
684 INIT_KMALLOC_INFO(512, 512),
685 INIT_KMALLOC_INFO(1024, 1k),
686 INIT_KMALLOC_INFO(2048, 2k),
687 INIT_KMALLOC_INFO(4096, 4k),
688 INIT_KMALLOC_INFO(8192, 8k),
689 INIT_KMALLOC_INFO(16384, 16k),
690 INIT_KMALLOC_INFO(32768, 32k),
691 INIT_KMALLOC_INFO(65536, 64k),
692 INIT_KMALLOC_INFO(131072, 128k),
693 INIT_KMALLOC_INFO(262144, 256k),
694 INIT_KMALLOC_INFO(524288, 512k),
695 INIT_KMALLOC_INFO(1048576, 1M),
696 INIT_KMALLOC_INFO(2097152, 2M),
697 INIT_KMALLOC_INFO(4194304, 4M),
698 INIT_KMALLOC_INFO(8388608, 8M),
699 INIT_KMALLOC_INFO(16777216, 16M),
700 INIT_KMALLOC_INFO(33554432, 32M),
701 INIT_KMALLOC_INFO(67108864, 64M)
702};
703
704
705
706
707
708
709
710
711
712
713
714
715void __init setup_kmalloc_cache_index_table(void)
716{
717 unsigned int i;
718
719 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
720 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
721
722 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
723 unsigned int elem = size_index_elem(i);
724
725 if (elem >= ARRAY_SIZE(size_index))
726 break;
727 size_index[elem] = KMALLOC_SHIFT_LOW;
728 }
729
730 if (KMALLOC_MIN_SIZE >= 64) {
731
732
733
734
735 for (i = 64 + 8; i <= 96; i += 8)
736 size_index[size_index_elem(i)] = 7;
737
738 }
739
740 if (KMALLOC_MIN_SIZE >= 128) {
741
742
743
744
745
746 for (i = 128 + 8; i <= 192; i += 8)
747 size_index[size_index_elem(i)] = 8;
748 }
749}
750
751static void __init
752new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
753{
754 if (type == KMALLOC_RECLAIM)
755 flags |= SLAB_RECLAIM_ACCOUNT;
756
757 kmalloc_caches[type][idx] = create_kmalloc_cache(
758 kmalloc_info[idx].name[type],
759 kmalloc_info[idx].size, flags, 0,
760 kmalloc_info[idx].size);
761}
762
763
764
765
766
767
768void __init create_kmalloc_caches(slab_flags_t flags)
769{
770 int i;
771 enum kmalloc_cache_type type;
772
773 for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
774 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
775 if (!kmalloc_caches[type][i])
776 new_kmalloc_cache(i, type, flags);
777
778
779
780
781
782
783 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
784 !kmalloc_caches[type][1])
785 new_kmalloc_cache(1, type, flags);
786 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
787 !kmalloc_caches[type][2])
788 new_kmalloc_cache(2, type, flags);
789 }
790 }
791
792
793 slab_state = UP;
794
795#ifdef CONFIG_ZONE_DMA
796 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
797 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
798
799 if (s) {
800 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
801 kmalloc_info[i].name[KMALLOC_DMA],
802 kmalloc_info[i].size,
803 SLAB_CACHE_DMA | flags, 0,
804 kmalloc_info[i].size);
805 }
806 }
807#endif
808}
809#endif
810
811gfp_t kmalloc_fix_flags(gfp_t flags)
812{
813 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
814
815 flags &= ~GFP_SLAB_BUG_MASK;
816 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
817 invalid_mask, &invalid_mask, flags, &flags);
818 dump_stack();
819
820 return flags;
821}
822
823
824
825
826
827
828void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
829{
830 void *ret = NULL;
831 struct page *page;
832
833 if (unlikely(flags & GFP_SLAB_BUG_MASK))
834 flags = kmalloc_fix_flags(flags);
835
836 flags |= __GFP_COMP;
837 page = alloc_pages(flags, order);
838 if (likely(page)) {
839 ret = page_address(page);
840 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
841 PAGE_SIZE << order);
842 }
843 ret = kasan_kmalloc_large(ret, size, flags);
844
845 kmemleak_alloc(ret, size, 1, flags);
846 return ret;
847}
848EXPORT_SYMBOL(kmalloc_order);
849
850#ifdef CONFIG_TRACING
851void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
852{
853 void *ret = kmalloc_order(size, flags, order);
854 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
855 return ret;
856}
857EXPORT_SYMBOL(kmalloc_order_trace);
858#endif
859
860#ifdef CONFIG_SLAB_FREELIST_RANDOM
861
862static void freelist_randomize(struct rnd_state *state, unsigned int *list,
863 unsigned int count)
864{
865 unsigned int rand;
866 unsigned int i;
867
868 for (i = 0; i < count; i++)
869 list[i] = i;
870
871
872 for (i = count - 1; i > 0; i--) {
873 rand = prandom_u32_state(state);
874 rand %= (i + 1);
875 swap(list[i], list[rand]);
876 }
877}
878
879
880int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
881 gfp_t gfp)
882{
883 struct rnd_state state;
884
885 if (count < 2 || cachep->random_seq)
886 return 0;
887
888 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
889 if (!cachep->random_seq)
890 return -ENOMEM;
891
892
893 prandom_seed_state(&state, get_random_long());
894
895 freelist_randomize(&state, cachep->random_seq, count);
896 return 0;
897}
898
899
900void cache_random_seq_destroy(struct kmem_cache *cachep)
901{
902 kfree(cachep->random_seq);
903 cachep->random_seq = NULL;
904}
905#endif
906
907#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
908#ifdef CONFIG_SLAB
909#define SLABINFO_RIGHTS (0600)
910#else
911#define SLABINFO_RIGHTS (0400)
912#endif
913
914static void print_slabinfo_header(struct seq_file *m)
915{
916
917
918
919
920#ifdef CONFIG_DEBUG_SLAB
921 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
922#else
923 seq_puts(m, "slabinfo - version: 2.1\n");
924#endif
925 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
926 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
927 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
928#ifdef CONFIG_DEBUG_SLAB
929 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
930 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
931#endif
932 seq_putc(m, '\n');
933}
934
935void *slab_start(struct seq_file *m, loff_t *pos)
936{
937 mutex_lock(&slab_mutex);
938 return seq_list_start(&slab_caches, *pos);
939}
940
941void *slab_next(struct seq_file *m, void *p, loff_t *pos)
942{
943 return seq_list_next(p, &slab_caches, pos);
944}
945
946void slab_stop(struct seq_file *m, void *p)
947{
948 mutex_unlock(&slab_mutex);
949}
950
951static void cache_show(struct kmem_cache *s, struct seq_file *m)
952{
953 struct slabinfo sinfo;
954
955 memset(&sinfo, 0, sizeof(sinfo));
956 get_slabinfo(s, &sinfo);
957
958 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
959 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
960 sinfo.objects_per_slab, (1 << sinfo.cache_order));
961
962 seq_printf(m, " : tunables %4u %4u %4u",
963 sinfo.limit, sinfo.batchcount, sinfo.shared);
964 seq_printf(m, " : slabdata %6lu %6lu %6lu",
965 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
966 slabinfo_show_stats(m, s);
967 seq_putc(m, '\n');
968}
969
970static int slab_show(struct seq_file *m, void *p)
971{
972 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
973
974 if (p == slab_caches.next)
975 print_slabinfo_header(m);
976 cache_show(s, m);
977 return 0;
978}
979
980void dump_unreclaimable_slab(void)
981{
982 struct kmem_cache *s;
983 struct slabinfo sinfo;
984
985
986
987
988
989
990
991
992 if (!mutex_trylock(&slab_mutex)) {
993 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
994 return;
995 }
996
997 pr_info("Unreclaimable slab info:\n");
998 pr_info("Name Used Total\n");
999
1000 list_for_each_entry(s, &slab_caches, list) {
1001 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1002 continue;
1003
1004 get_slabinfo(s, &sinfo);
1005
1006 if (sinfo.num_objs > 0)
1007 pr_info("%-17s %10luKB %10luKB\n", s->name,
1008 (sinfo.active_objs * s->size) / 1024,
1009 (sinfo.num_objs * s->size) / 1024);
1010 }
1011 mutex_unlock(&slab_mutex);
1012}
1013
1014#if defined(CONFIG_MEMCG_KMEM)
1015int memcg_slab_show(struct seq_file *m, void *p)
1016{
1017
1018
1019
1020
1021 return 0;
1022}
1023#endif
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static const struct seq_operations slabinfo_op = {
1039 .start = slab_start,
1040 .next = slab_next,
1041 .stop = slab_stop,
1042 .show = slab_show,
1043};
1044
1045static int slabinfo_open(struct inode *inode, struct file *file)
1046{
1047 return seq_open(file, &slabinfo_op);
1048}
1049
1050static const struct proc_ops slabinfo_proc_ops = {
1051 .proc_flags = PROC_ENTRY_PERMANENT,
1052 .proc_open = slabinfo_open,
1053 .proc_read = seq_read,
1054 .proc_write = slabinfo_write,
1055 .proc_lseek = seq_lseek,
1056 .proc_release = seq_release,
1057};
1058
1059static int __init slab_proc_init(void)
1060{
1061 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1062 return 0;
1063}
1064module_init(slab_proc_init);
1065
1066#endif
1067
1068static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1069 gfp_t flags)
1070{
1071 void *ret;
1072 size_t ks;
1073
1074 ks = ksize(p);
1075
1076 if (ks >= new_size) {
1077 p = kasan_krealloc((void *)p, new_size, flags);
1078 return (void *)p;
1079 }
1080
1081 ret = kmalloc_track_caller(new_size, flags);
1082 if (ret && p)
1083 memcpy(ret, p, ks);
1084
1085 return ret;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101void *krealloc(const void *p, size_t new_size, gfp_t flags)
1102{
1103 void *ret;
1104
1105 if (unlikely(!new_size)) {
1106 kfree(p);
1107 return ZERO_SIZE_PTR;
1108 }
1109
1110 ret = __do_krealloc(p, new_size, flags);
1111 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1112 kfree(p);
1113
1114 return ret;
1115}
1116EXPORT_SYMBOL(krealloc);
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129void kfree_sensitive(const void *p)
1130{
1131 size_t ks;
1132 void *mem = (void *)p;
1133
1134 ks = ksize(mem);
1135 if (ks)
1136 memzero_explicit(mem, ks);
1137 kfree(mem);
1138}
1139EXPORT_SYMBOL(kfree_sensitive);
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155size_t ksize(const void *objp)
1156{
1157 size_t size;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
1173 return 0;
1174
1175 size = __ksize(objp);
1176
1177
1178
1179
1180 kasan_unpoison_range(objp, size);
1181 return size;
1182}
1183EXPORT_SYMBOL(ksize);
1184
1185
1186EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1187EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1188EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1189EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1190EXPORT_TRACEPOINT_SYMBOL(kfree);
1191EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1192
1193int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1194{
1195 if (__should_failslab(s, gfpflags))
1196 return -ENOMEM;
1197 return 0;
1198}
1199ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1200