1
2
3
4
5
6
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/module.h>
16#include <linux/cpu.h>
17#include <linux/uaccess.h>
18#include <linux/seq_file.h>
19#include <linux/proc_fs.h>
20#include <linux/debugfs.h>
21#include <asm/cacheflush.h>
22#include <asm/tlbflush.h>
23#include <asm/page.h>
24#include <linux/memcontrol.h>
25
26#define CREATE_TRACE_POINTS
27#include <trace/events/kmem.h>
28
29#include "slab.h"
30
31enum slab_state slab_state;
32LIST_HEAD(slab_caches);
33DEFINE_MUTEX(slab_mutex);
34struct kmem_cache *kmem_cache;
35
36#ifdef CONFIG_HARDENED_USERCOPY
37bool usercopy_fallback __ro_after_init =
38 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
39module_param(usercopy_fallback, bool, 0400);
40MODULE_PARM_DESC(usercopy_fallback,
41 "WARN instead of reject usercopy whitelist violations");
42#endif
43
44static LIST_HEAD(slab_caches_to_rcu_destroy);
45static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
46static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
47 slab_caches_to_rcu_destroy_workfn);
48
49
50
51
52#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
53 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
54 SLAB_FAILSLAB | SLAB_KASAN)
55
56#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
57 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
58
59
60
61
62static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
63
64static int __init setup_slab_nomerge(char *str)
65{
66 slab_nomerge = true;
67 return 1;
68}
69
70#ifdef CONFIG_SLUB
71__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
72#endif
73
74__setup("slab_nomerge", setup_slab_nomerge);
75
76
77
78
79unsigned int kmem_cache_size(struct kmem_cache *s)
80{
81 return s->object_size;
82}
83EXPORT_SYMBOL(kmem_cache_size);
84
85#ifdef CONFIG_DEBUG_VM
86static int kmem_cache_sanity_check(const char *name, unsigned int size)
87{
88 if (!name || in_interrupt() || size < sizeof(void *) ||
89 size > KMALLOC_MAX_SIZE) {
90 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
91 return -EINVAL;
92 }
93
94 WARN_ON(strchr(name, ' '));
95 return 0;
96}
97#else
98static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
99{
100 return 0;
101}
102#endif
103
104void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
105{
106 size_t i;
107
108 for (i = 0; i < nr; i++) {
109 if (s)
110 kmem_cache_free(s, p[i]);
111 else
112 kfree(p[i]);
113 }
114}
115
116int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
117 void **p)
118{
119 size_t i;
120
121 for (i = 0; i < nr; i++) {
122 void *x = p[i] = kmem_cache_alloc(s, flags);
123 if (!x) {
124 __kmem_cache_free_bulk(s, i, p);
125 return 0;
126 }
127 }
128 return i;
129}
130
131#ifdef CONFIG_MEMCG_KMEM
132
133LIST_HEAD(slab_root_caches);
134static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
135
136static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
137
138void slab_init_memcg_params(struct kmem_cache *s)
139{
140 s->memcg_params.root_cache = NULL;
141 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
142 INIT_LIST_HEAD(&s->memcg_params.children);
143 s->memcg_params.dying = false;
144}
145
146static int init_memcg_params(struct kmem_cache *s,
147 struct kmem_cache *root_cache)
148{
149 struct memcg_cache_array *arr;
150
151 if (root_cache) {
152 int ret = percpu_ref_init(&s->memcg_params.refcnt,
153 kmemcg_cache_shutdown,
154 0, GFP_KERNEL);
155 if (ret)
156 return ret;
157
158 s->memcg_params.root_cache = root_cache;
159 INIT_LIST_HEAD(&s->memcg_params.children_node);
160 INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
161 return 0;
162 }
163
164 slab_init_memcg_params(s);
165
166 if (!memcg_nr_cache_ids)
167 return 0;
168
169 arr = kvzalloc(sizeof(struct memcg_cache_array) +
170 memcg_nr_cache_ids * sizeof(void *),
171 GFP_KERNEL);
172 if (!arr)
173 return -ENOMEM;
174
175 RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
176 return 0;
177}
178
179static void destroy_memcg_params(struct kmem_cache *s)
180{
181 if (is_root_cache(s)) {
182 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
183 } else {
184 mem_cgroup_put(s->memcg_params.memcg);
185 WRITE_ONCE(s->memcg_params.memcg, NULL);
186 percpu_ref_exit(&s->memcg_params.refcnt);
187 }
188}
189
190static void free_memcg_params(struct rcu_head *rcu)
191{
192 struct memcg_cache_array *old;
193
194 old = container_of(rcu, struct memcg_cache_array, rcu);
195 kvfree(old);
196}
197
198static int update_memcg_params(struct kmem_cache *s, int new_array_size)
199{
200 struct memcg_cache_array *old, *new;
201
202 new = kvzalloc(sizeof(struct memcg_cache_array) +
203 new_array_size * sizeof(void *), GFP_KERNEL);
204 if (!new)
205 return -ENOMEM;
206
207 old = rcu_dereference_protected(s->memcg_params.memcg_caches,
208 lockdep_is_held(&slab_mutex));
209 if (old)
210 memcpy(new->entries, old->entries,
211 memcg_nr_cache_ids * sizeof(void *));
212
213 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
214 if (old)
215 call_rcu(&old->rcu, free_memcg_params);
216 return 0;
217}
218
219int memcg_update_all_caches(int num_memcgs)
220{
221 struct kmem_cache *s;
222 int ret = 0;
223
224 mutex_lock(&slab_mutex);
225 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
226 ret = update_memcg_params(s, num_memcgs);
227
228
229
230
231 if (ret)
232 break;
233 }
234 mutex_unlock(&slab_mutex);
235 return ret;
236}
237
238void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
239{
240 if (is_root_cache(s)) {
241 list_add(&s->root_caches_node, &slab_root_caches);
242 } else {
243 css_get(&memcg->css);
244 s->memcg_params.memcg = memcg;
245 list_add(&s->memcg_params.children_node,
246 &s->memcg_params.root_cache->memcg_params.children);
247 list_add(&s->memcg_params.kmem_caches_node,
248 &s->memcg_params.memcg->kmem_caches);
249 }
250}
251
252static void memcg_unlink_cache(struct kmem_cache *s)
253{
254 if (is_root_cache(s)) {
255 list_del(&s->root_caches_node);
256 } else {
257 list_del(&s->memcg_params.children_node);
258 list_del(&s->memcg_params.kmem_caches_node);
259 }
260}
261#else
262static inline int init_memcg_params(struct kmem_cache *s,
263 struct kmem_cache *root_cache)
264{
265 return 0;
266}
267
268static inline void destroy_memcg_params(struct kmem_cache *s)
269{
270}
271
272static inline void memcg_unlink_cache(struct kmem_cache *s)
273{
274}
275#endif
276
277
278
279
280
281static unsigned int calculate_alignment(slab_flags_t flags,
282 unsigned int align, unsigned int size)
283{
284
285
286
287
288
289
290
291 if (flags & SLAB_HWCACHE_ALIGN) {
292 unsigned int ralign;
293
294 ralign = cache_line_size();
295 while (size <= ralign / 2)
296 ralign /= 2;
297 align = max(align, ralign);
298 }
299
300 if (align < ARCH_SLAB_MINALIGN)
301 align = ARCH_SLAB_MINALIGN;
302
303 return ALIGN(align, sizeof(void *));
304}
305
306
307
308
309int slab_unmergeable(struct kmem_cache *s)
310{
311 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
312 return 1;
313
314 if (!is_root_cache(s))
315 return 1;
316
317 if (s->ctor)
318 return 1;
319
320 if (s->usersize)
321 return 1;
322
323
324
325
326 if (s->refcount < 0)
327 return 1;
328
329 return 0;
330}
331
332struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
333 slab_flags_t flags, const char *name, void (*ctor)(void *))
334{
335 struct kmem_cache *s;
336
337 if (slab_nomerge)
338 return NULL;
339
340 if (ctor)
341 return NULL;
342
343 size = ALIGN(size, sizeof(void *));
344 align = calculate_alignment(flags, align, size);
345 size = ALIGN(size, align);
346 flags = kmem_cache_flags(size, flags, name, NULL);
347
348 if (flags & SLAB_NEVER_MERGE)
349 return NULL;
350
351 list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
352 if (slab_unmergeable(s))
353 continue;
354
355 if (size > s->size)
356 continue;
357
358 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
359 continue;
360
361
362
363
364 if ((s->size & ~(align - 1)) != s->size)
365 continue;
366
367 if (s->size - size >= sizeof(void *))
368 continue;
369
370 if (IS_ENABLED(CONFIG_SLAB) && align &&
371 (align > s->align || s->align % align))
372 continue;
373
374 return s;
375 }
376 return NULL;
377}
378
379static struct kmem_cache *create_cache(const char *name,
380 unsigned int object_size, unsigned int align,
381 slab_flags_t flags, unsigned int useroffset,
382 unsigned int usersize, void (*ctor)(void *),
383 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
384{
385 struct kmem_cache *s;
386 int err;
387
388 if (WARN_ON(useroffset + usersize > object_size))
389 useroffset = usersize = 0;
390
391 err = -ENOMEM;
392 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
393 if (!s)
394 goto out;
395
396 s->name = name;
397 s->size = s->object_size = object_size;
398 s->align = align;
399 s->ctor = ctor;
400 s->useroffset = useroffset;
401 s->usersize = usersize;
402
403 err = init_memcg_params(s, root_cache);
404 if (err)
405 goto out_free_cache;
406
407 err = __kmem_cache_create(s, flags);
408 if (err)
409 goto out_free_cache;
410
411 s->refcount = 1;
412 list_add(&s->list, &slab_caches);
413 memcg_link_cache(s, memcg);
414out:
415 if (err)
416 return ERR_PTR(err);
417 return s;
418
419out_free_cache:
420 destroy_memcg_params(s);
421 kmem_cache_free(kmem_cache, s);
422 goto out;
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453struct kmem_cache *
454kmem_cache_create_usercopy(const char *name,
455 unsigned int size, unsigned int align,
456 slab_flags_t flags,
457 unsigned int useroffset, unsigned int usersize,
458 void (*ctor)(void *))
459{
460 struct kmem_cache *s = NULL;
461 const char *cache_name;
462 int err;
463
464 get_online_cpus();
465 get_online_mems();
466 memcg_get_cache_ids();
467
468 mutex_lock(&slab_mutex);
469
470 err = kmem_cache_sanity_check(name, size);
471 if (err) {
472 goto out_unlock;
473 }
474
475
476 if (flags & ~SLAB_FLAGS_PERMITTED) {
477 err = -EINVAL;
478 goto out_unlock;
479 }
480
481
482
483
484
485
486
487 flags &= CACHE_CREATE_MASK;
488
489
490 if (WARN_ON(!usersize && useroffset) ||
491 WARN_ON(size < usersize || size - usersize < useroffset))
492 usersize = useroffset = 0;
493
494 if (!usersize)
495 s = __kmem_cache_alias(name, size, align, flags, ctor);
496 if (s)
497 goto out_unlock;
498
499 cache_name = kstrdup_const(name, GFP_KERNEL);
500 if (!cache_name) {
501 err = -ENOMEM;
502 goto out_unlock;
503 }
504
505 s = create_cache(cache_name, size,
506 calculate_alignment(flags, align, size),
507 flags, useroffset, usersize, ctor, NULL, NULL);
508 if (IS_ERR(s)) {
509 err = PTR_ERR(s);
510 kfree_const(cache_name);
511 }
512
513out_unlock:
514 mutex_unlock(&slab_mutex);
515
516 memcg_put_cache_ids();
517 put_online_mems();
518 put_online_cpus();
519
520 if (err) {
521 if (flags & SLAB_PANIC)
522 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
523 name, err);
524 else {
525 pr_warn("kmem_cache_create(%s) failed with error %d\n",
526 name, err);
527 dump_stack();
528 }
529 return NULL;
530 }
531 return s;
532}
533EXPORT_SYMBOL(kmem_cache_create_usercopy);
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560struct kmem_cache *
561kmem_cache_create(const char *name, unsigned int size, unsigned int align,
562 slab_flags_t flags, void (*ctor)(void *))
563{
564 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
565 ctor);
566}
567EXPORT_SYMBOL(kmem_cache_create);
568
569static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
570{
571 LIST_HEAD(to_destroy);
572 struct kmem_cache *s, *s2;
573
574
575
576
577
578
579
580
581
582
583 mutex_lock(&slab_mutex);
584 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
585 mutex_unlock(&slab_mutex);
586
587 if (list_empty(&to_destroy))
588 return;
589
590 rcu_barrier();
591
592 list_for_each_entry_safe(s, s2, &to_destroy, list) {
593#ifdef SLAB_SUPPORTS_SYSFS
594 sysfs_slab_release(s);
595#else
596 slab_kmem_cache_release(s);
597#endif
598 }
599}
600
601static int shutdown_cache(struct kmem_cache *s)
602{
603
604 kasan_cache_shutdown(s);
605
606 if (__kmem_cache_shutdown(s) != 0)
607 return -EBUSY;
608
609 memcg_unlink_cache(s);
610 list_del(&s->list);
611
612 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
613#ifdef SLAB_SUPPORTS_SYSFS
614 sysfs_slab_unlink(s);
615#endif
616 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
617 schedule_work(&slab_caches_to_rcu_destroy_work);
618 } else {
619#ifdef SLAB_SUPPORTS_SYSFS
620 sysfs_slab_unlink(s);
621 sysfs_slab_release(s);
622#else
623 slab_kmem_cache_release(s);
624#endif
625 }
626
627 return 0;
628}
629
630#ifdef CONFIG_MEMCG_KMEM
631
632
633
634
635
636
637
638
639
640void memcg_create_kmem_cache(struct mem_cgroup *memcg,
641 struct kmem_cache *root_cache)
642{
643 static char memcg_name_buf[NAME_MAX + 1];
644 struct cgroup_subsys_state *css = &memcg->css;
645 struct memcg_cache_array *arr;
646 struct kmem_cache *s = NULL;
647 char *cache_name;
648 int idx;
649
650 get_online_cpus();
651 get_online_mems();
652
653 mutex_lock(&slab_mutex);
654
655
656
657
658
659 if (memcg->kmem_state != KMEM_ONLINE)
660 goto out_unlock;
661
662 idx = memcg_cache_id(memcg);
663 arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
664 lockdep_is_held(&slab_mutex));
665
666
667
668
669
670
671 if (arr->entries[idx])
672 goto out_unlock;
673
674 cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
675 cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
676 css->serial_nr, memcg_name_buf);
677 if (!cache_name)
678 goto out_unlock;
679
680 s = create_cache(cache_name, root_cache->object_size,
681 root_cache->align,
682 root_cache->flags & CACHE_CREATE_MASK,
683 root_cache->useroffset, root_cache->usersize,
684 root_cache->ctor, memcg, root_cache);
685
686
687
688
689
690 if (IS_ERR(s)) {
691 kfree(cache_name);
692 goto out_unlock;
693 }
694
695
696
697
698
699
700 smp_wmb();
701 arr->entries[idx] = s;
702
703out_unlock:
704 mutex_unlock(&slab_mutex);
705
706 put_online_mems();
707 put_online_cpus();
708}
709
710static void kmemcg_workfn(struct work_struct *work)
711{
712 struct kmem_cache *s = container_of(work, struct kmem_cache,
713 memcg_params.work);
714
715 get_online_cpus();
716 get_online_mems();
717
718 mutex_lock(&slab_mutex);
719 s->memcg_params.work_fn(s);
720 mutex_unlock(&slab_mutex);
721
722 put_online_mems();
723 put_online_cpus();
724}
725
726static void kmemcg_rcufn(struct rcu_head *head)
727{
728 struct kmem_cache *s = container_of(head, struct kmem_cache,
729 memcg_params.rcu_head);
730
731
732
733
734
735
736 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
737 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
738}
739
740static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
741{
742 WARN_ON(shutdown_cache(s));
743}
744
745static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
746{
747 struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
748 memcg_params.refcnt);
749 unsigned long flags;
750
751 spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
752 if (s->memcg_params.root_cache->memcg_params.dying)
753 goto unlock;
754
755 s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
756 INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
757 queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
758
759unlock:
760 spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
761}
762
763static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
764{
765 __kmemcg_cache_deactivate_after_rcu(s);
766 percpu_ref_kill(&s->memcg_params.refcnt);
767}
768
769static void kmemcg_cache_deactivate(struct kmem_cache *s)
770{
771 if (WARN_ON_ONCE(is_root_cache(s)))
772 return;
773
774 __kmemcg_cache_deactivate(s);
775 s->flags |= SLAB_DEACTIVATED;
776
777
778
779
780
781
782 spin_lock_irq(&memcg_kmem_wq_lock);
783 if (s->memcg_params.root_cache->memcg_params.dying)
784 goto unlock;
785
786 s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
787 call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
788unlock:
789 spin_unlock_irq(&memcg_kmem_wq_lock);
790}
791
792void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
793 struct mem_cgroup *parent)
794{
795 int idx;
796 struct memcg_cache_array *arr;
797 struct kmem_cache *s, *c;
798 unsigned int nr_reparented;
799
800 idx = memcg_cache_id(memcg);
801
802 get_online_cpus();
803 get_online_mems();
804
805 mutex_lock(&slab_mutex);
806 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
807 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
808 lockdep_is_held(&slab_mutex));
809 c = arr->entries[idx];
810 if (!c)
811 continue;
812
813 kmemcg_cache_deactivate(c);
814 arr->entries[idx] = NULL;
815 }
816 nr_reparented = 0;
817 list_for_each_entry(s, &memcg->kmem_caches,
818 memcg_params.kmem_caches_node) {
819 WRITE_ONCE(s->memcg_params.memcg, parent);
820 css_put(&memcg->css);
821 nr_reparented++;
822 }
823 if (nr_reparented) {
824 list_splice_init(&memcg->kmem_caches,
825 &parent->kmem_caches);
826 css_get_many(&parent->css, nr_reparented);
827 }
828 mutex_unlock(&slab_mutex);
829
830 put_online_mems();
831 put_online_cpus();
832}
833
834static int shutdown_memcg_caches(struct kmem_cache *s)
835{
836 struct memcg_cache_array *arr;
837 struct kmem_cache *c, *c2;
838 LIST_HEAD(busy);
839 int i;
840
841 BUG_ON(!is_root_cache(s));
842
843
844
845
846
847 arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
848 lockdep_is_held(&slab_mutex));
849 for_each_memcg_cache_index(i) {
850 c = arr->entries[i];
851 if (!c)
852 continue;
853 if (shutdown_cache(c))
854
855
856
857
858
859 list_move(&c->memcg_params.children_node, &busy);
860 else
861
862
863
864
865
866
867 arr->entries[i] = NULL;
868 }
869
870
871
872
873
874 list_for_each_entry_safe(c, c2, &s->memcg_params.children,
875 memcg_params.children_node)
876 shutdown_cache(c);
877
878 list_splice(&busy, &s->memcg_params.children);
879
880
881
882
883
884 if (!list_empty(&s->memcg_params.children))
885 return -EBUSY;
886 return 0;
887}
888
889static void flush_memcg_workqueue(struct kmem_cache *s)
890{
891 spin_lock_irq(&memcg_kmem_wq_lock);
892 s->memcg_params.dying = true;
893 spin_unlock_irq(&memcg_kmem_wq_lock);
894
895
896
897
898
899 rcu_barrier();
900
901
902
903
904
905
906 flush_workqueue(memcg_kmem_cache_wq);
907}
908#else
909static inline int shutdown_memcg_caches(struct kmem_cache *s)
910{
911 return 0;
912}
913
914static inline void flush_memcg_workqueue(struct kmem_cache *s)
915{
916}
917#endif
918
919void slab_kmem_cache_release(struct kmem_cache *s)
920{
921 __kmem_cache_release(s);
922 destroy_memcg_params(s);
923 kfree_const(s->name);
924 kmem_cache_free(kmem_cache, s);
925}
926
927void kmem_cache_destroy(struct kmem_cache *s)
928{
929 int err;
930
931 if (unlikely(!s))
932 return;
933
934 flush_memcg_workqueue(s);
935
936 get_online_cpus();
937 get_online_mems();
938
939 mutex_lock(&slab_mutex);
940
941 s->refcount--;
942 if (s->refcount)
943 goto out_unlock;
944
945 err = shutdown_memcg_caches(s);
946 if (!err)
947 err = shutdown_cache(s);
948
949 if (err) {
950 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
951 s->name);
952 dump_stack();
953 }
954out_unlock:
955 mutex_unlock(&slab_mutex);
956
957 put_online_mems();
958 put_online_cpus();
959}
960EXPORT_SYMBOL(kmem_cache_destroy);
961
962
963
964
965
966
967
968
969
970
971int kmem_cache_shrink(struct kmem_cache *cachep)
972{
973 int ret;
974
975 get_online_cpus();
976 get_online_mems();
977 kasan_cache_shrink(cachep);
978 ret = __kmem_cache_shrink(cachep);
979 put_online_mems();
980 put_online_cpus();
981 return ret;
982}
983EXPORT_SYMBOL(kmem_cache_shrink);
984
985
986
987
988
989void kmem_cache_shrink_all(struct kmem_cache *s)
990{
991 struct kmem_cache *c;
992
993 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
994 kmem_cache_shrink(s);
995 return;
996 }
997
998 get_online_cpus();
999 get_online_mems();
1000 kasan_cache_shrink(s);
1001 __kmem_cache_shrink(s);
1002
1003
1004
1005
1006
1007 mutex_lock(&slab_mutex);
1008 for_each_memcg_cache(c, s) {
1009
1010
1011
1012 if (s->flags & SLAB_DEACTIVATED)
1013 continue;
1014 kasan_cache_shrink(c);
1015 __kmem_cache_shrink(c);
1016 }
1017 mutex_unlock(&slab_mutex);
1018 put_online_mems();
1019 put_online_cpus();
1020}
1021
1022bool slab_is_available(void)
1023{
1024 return slab_state >= UP;
1025}
1026
1027#ifndef CONFIG_SLOB
1028
1029void __init create_boot_cache(struct kmem_cache *s, const char *name,
1030 unsigned int size, slab_flags_t flags,
1031 unsigned int useroffset, unsigned int usersize)
1032{
1033 int err;
1034 unsigned int align = ARCH_KMALLOC_MINALIGN;
1035
1036 s->name = name;
1037 s->size = s->object_size = size;
1038
1039
1040
1041
1042
1043 if (is_power_of_2(size))
1044 align = max(align, size);
1045 s->align = calculate_alignment(flags, align, size);
1046
1047 s->useroffset = useroffset;
1048 s->usersize = usersize;
1049
1050 slab_init_memcg_params(s);
1051
1052 err = __kmem_cache_create(s, flags);
1053
1054 if (err)
1055 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
1056 name, size, err);
1057
1058 s->refcount = -1;
1059}
1060
1061struct kmem_cache *__init create_kmalloc_cache(const char *name,
1062 unsigned int size, slab_flags_t flags,
1063 unsigned int useroffset, unsigned int usersize)
1064{
1065 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
1066
1067 if (!s)
1068 panic("Out of memory when creating slab %s\n", name);
1069
1070 create_boot_cache(s, name, size, flags, useroffset, usersize);
1071 list_add(&s->list, &slab_caches);
1072 memcg_link_cache(s, NULL);
1073 s->refcount = 1;
1074 return s;
1075}
1076
1077struct kmem_cache *
1078kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
1079{ };
1080EXPORT_SYMBOL(kmalloc_caches);
1081
1082
1083
1084
1085
1086
1087
1088static u8 size_index[24] __ro_after_init = {
1089 3,
1090 4,
1091 5,
1092 5,
1093 6,
1094 6,
1095 6,
1096 6,
1097 1,
1098 1,
1099 1,
1100 1,
1101 7,
1102 7,
1103 7,
1104 7,
1105 2,
1106 2,
1107 2,
1108 2,
1109 2,
1110 2,
1111 2,
1112 2
1113};
1114
1115static inline unsigned int size_index_elem(unsigned int bytes)
1116{
1117 return (bytes - 1) / 8;
1118}
1119
1120
1121
1122
1123
1124struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
1125{
1126 unsigned int index;
1127
1128 if (size <= 192) {
1129 if (!size)
1130 return ZERO_SIZE_PTR;
1131
1132 index = size_index[size_index_elem(size)];
1133 } else {
1134 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
1135 return NULL;
1136 index = fls(size - 1);
1137 }
1138
1139 return kmalloc_caches[kmalloc_type(flags)][index];
1140}
1141
1142
1143
1144
1145
1146
1147const struct kmalloc_info_struct kmalloc_info[] __initconst = {
1148 {NULL, 0}, {"kmalloc-96", 96},
1149 {"kmalloc-192", 192}, {"kmalloc-8", 8},
1150 {"kmalloc-16", 16}, {"kmalloc-32", 32},
1151 {"kmalloc-64", 64}, {"kmalloc-128", 128},
1152 {"kmalloc-256", 256}, {"kmalloc-512", 512},
1153 {"kmalloc-1k", 1024}, {"kmalloc-2k", 2048},
1154 {"kmalloc-4k", 4096}, {"kmalloc-8k", 8192},
1155 {"kmalloc-16k", 16384}, {"kmalloc-32k", 32768},
1156 {"kmalloc-64k", 65536}, {"kmalloc-128k", 131072},
1157 {"kmalloc-256k", 262144}, {"kmalloc-512k", 524288},
1158 {"kmalloc-1M", 1048576}, {"kmalloc-2M", 2097152},
1159 {"kmalloc-4M", 4194304}, {"kmalloc-8M", 8388608},
1160 {"kmalloc-16M", 16777216}, {"kmalloc-32M", 33554432},
1161 {"kmalloc-64M", 67108864}
1162};
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175void __init setup_kmalloc_cache_index_table(void)
1176{
1177 unsigned int i;
1178
1179 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
1180 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
1181
1182 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
1183 unsigned int elem = size_index_elem(i);
1184
1185 if (elem >= ARRAY_SIZE(size_index))
1186 break;
1187 size_index[elem] = KMALLOC_SHIFT_LOW;
1188 }
1189
1190 if (KMALLOC_MIN_SIZE >= 64) {
1191
1192
1193
1194
1195 for (i = 64 + 8; i <= 96; i += 8)
1196 size_index[size_index_elem(i)] = 7;
1197
1198 }
1199
1200 if (KMALLOC_MIN_SIZE >= 128) {
1201
1202
1203
1204
1205
1206 for (i = 128 + 8; i <= 192; i += 8)
1207 size_index[size_index_elem(i)] = 8;
1208 }
1209}
1210
1211static const char *
1212kmalloc_cache_name(const char *prefix, unsigned int size)
1213{
1214
1215 static const char units[3] = "\0kM";
1216 int idx = 0;
1217
1218 while (size >= 1024 && (size % 1024 == 0)) {
1219 size /= 1024;
1220 idx++;
1221 }
1222
1223 return kasprintf(GFP_NOWAIT, "%s-%u%c", prefix, size, units[idx]);
1224}
1225
1226static void __init
1227new_kmalloc_cache(int idx, int type, slab_flags_t flags)
1228{
1229 const char *name;
1230
1231 if (type == KMALLOC_RECLAIM) {
1232 flags |= SLAB_RECLAIM_ACCOUNT;
1233 name = kmalloc_cache_name("kmalloc-rcl",
1234 kmalloc_info[idx].size);
1235 BUG_ON(!name);
1236 } else {
1237 name = kmalloc_info[idx].name;
1238 }
1239
1240 kmalloc_caches[type][idx] = create_kmalloc_cache(name,
1241 kmalloc_info[idx].size, flags, 0,
1242 kmalloc_info[idx].size);
1243}
1244
1245
1246
1247
1248
1249
1250void __init create_kmalloc_caches(slab_flags_t flags)
1251{
1252 int i, type;
1253
1254 for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
1255 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
1256 if (!kmalloc_caches[type][i])
1257 new_kmalloc_cache(i, type, flags);
1258
1259
1260
1261
1262
1263
1264 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
1265 !kmalloc_caches[type][1])
1266 new_kmalloc_cache(1, type, flags);
1267 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
1268 !kmalloc_caches[type][2])
1269 new_kmalloc_cache(2, type, flags);
1270 }
1271 }
1272
1273
1274 slab_state = UP;
1275
1276#ifdef CONFIG_ZONE_DMA
1277 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
1278 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
1279
1280 if (s) {
1281 unsigned int size = kmalloc_size(i);
1282 const char *n = kmalloc_cache_name("dma-kmalloc", size);
1283
1284 BUG_ON(!n);
1285 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
1286 n, size, SLAB_CACHE_DMA | flags, 0, 0);
1287 }
1288 }
1289#endif
1290}
1291#endif
1292
1293
1294
1295
1296
1297
1298void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1299{
1300 void *ret = NULL;
1301 struct page *page;
1302
1303 flags |= __GFP_COMP;
1304 page = alloc_pages(flags, order);
1305 if (likely(page)) {
1306 ret = page_address(page);
1307 mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
1308 1 << order);
1309 }
1310 ret = kasan_kmalloc_large(ret, size, flags);
1311
1312 kmemleak_alloc(ret, size, 1, flags);
1313 return ret;
1314}
1315EXPORT_SYMBOL(kmalloc_order);
1316
1317#ifdef CONFIG_TRACING
1318void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1319{
1320 void *ret = kmalloc_order(size, flags, order);
1321 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1322 return ret;
1323}
1324EXPORT_SYMBOL(kmalloc_order_trace);
1325#endif
1326
1327#ifdef CONFIG_SLAB_FREELIST_RANDOM
1328
1329static void freelist_randomize(struct rnd_state *state, unsigned int *list,
1330 unsigned int count)
1331{
1332 unsigned int rand;
1333 unsigned int i;
1334
1335 for (i = 0; i < count; i++)
1336 list[i] = i;
1337
1338
1339 for (i = count - 1; i > 0; i--) {
1340 rand = prandom_u32_state(state);
1341 rand %= (i + 1);
1342 swap(list[i], list[rand]);
1343 }
1344}
1345
1346
1347int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1348 gfp_t gfp)
1349{
1350 struct rnd_state state;
1351
1352 if (count < 2 || cachep->random_seq)
1353 return 0;
1354
1355 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1356 if (!cachep->random_seq)
1357 return -ENOMEM;
1358
1359
1360 prandom_seed_state(&state, get_random_long());
1361
1362 freelist_randomize(&state, cachep->random_seq, count);
1363 return 0;
1364}
1365
1366
1367void cache_random_seq_destroy(struct kmem_cache *cachep)
1368{
1369 kfree(cachep->random_seq);
1370 cachep->random_seq = NULL;
1371}
1372#endif
1373
1374#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1375#ifdef CONFIG_SLAB
1376#define SLABINFO_RIGHTS (0600)
1377#else
1378#define SLABINFO_RIGHTS (0400)
1379#endif
1380
1381static void print_slabinfo_header(struct seq_file *m)
1382{
1383
1384
1385
1386
1387#ifdef CONFIG_DEBUG_SLAB
1388 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1389#else
1390 seq_puts(m, "slabinfo - version: 2.1\n");
1391#endif
1392 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1393 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1394 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1395#ifdef CONFIG_DEBUG_SLAB
1396 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1397 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1398#endif
1399 seq_putc(m, '\n');
1400}
1401
1402void *slab_start(struct seq_file *m, loff_t *pos)
1403{
1404 mutex_lock(&slab_mutex);
1405 return seq_list_start(&slab_root_caches, *pos);
1406}
1407
1408void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1409{
1410 return seq_list_next(p, &slab_root_caches, pos);
1411}
1412
1413void slab_stop(struct seq_file *m, void *p)
1414{
1415 mutex_unlock(&slab_mutex);
1416}
1417
1418static void
1419memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1420{
1421 struct kmem_cache *c;
1422 struct slabinfo sinfo;
1423
1424 if (!is_root_cache(s))
1425 return;
1426
1427 for_each_memcg_cache(c, s) {
1428 memset(&sinfo, 0, sizeof(sinfo));
1429 get_slabinfo(c, &sinfo);
1430
1431 info->active_slabs += sinfo.active_slabs;
1432 info->num_slabs += sinfo.num_slabs;
1433 info->shared_avail += sinfo.shared_avail;
1434 info->active_objs += sinfo.active_objs;
1435 info->num_objs += sinfo.num_objs;
1436 }
1437}
1438
1439static void cache_show(struct kmem_cache *s, struct seq_file *m)
1440{
1441 struct slabinfo sinfo;
1442
1443 memset(&sinfo, 0, sizeof(sinfo));
1444 get_slabinfo(s, &sinfo);
1445
1446 memcg_accumulate_slabinfo(s, &sinfo);
1447
1448 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1449 cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1450 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1451
1452 seq_printf(m, " : tunables %4u %4u %4u",
1453 sinfo.limit, sinfo.batchcount, sinfo.shared);
1454 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1455 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1456 slabinfo_show_stats(m, s);
1457 seq_putc(m, '\n');
1458}
1459
1460static int slab_show(struct seq_file *m, void *p)
1461{
1462 struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
1463
1464 if (p == slab_root_caches.next)
1465 print_slabinfo_header(m);
1466 cache_show(s, m);
1467 return 0;
1468}
1469
1470void dump_unreclaimable_slab(void)
1471{
1472 struct kmem_cache *s, *s2;
1473 struct slabinfo sinfo;
1474
1475
1476
1477
1478
1479
1480
1481
1482 if (!mutex_trylock(&slab_mutex)) {
1483 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1484 return;
1485 }
1486
1487 pr_info("Unreclaimable slab info:\n");
1488 pr_info("Name Used Total\n");
1489
1490 list_for_each_entry_safe(s, s2, &slab_caches, list) {
1491 if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
1492 continue;
1493
1494 get_slabinfo(s, &sinfo);
1495
1496 if (sinfo.num_objs > 0)
1497 pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
1498 (sinfo.active_objs * s->size) / 1024,
1499 (sinfo.num_objs * s->size) / 1024);
1500 }
1501 mutex_unlock(&slab_mutex);
1502}
1503
1504#if defined(CONFIG_MEMCG)
1505void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1506{
1507 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1508
1509 mutex_lock(&slab_mutex);
1510 return seq_list_start(&memcg->kmem_caches, *pos);
1511}
1512
1513void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
1514{
1515 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1516
1517 return seq_list_next(p, &memcg->kmem_caches, pos);
1518}
1519
1520void memcg_slab_stop(struct seq_file *m, void *p)
1521{
1522 mutex_unlock(&slab_mutex);
1523}
1524
1525int memcg_slab_show(struct seq_file *m, void *p)
1526{
1527 struct kmem_cache *s = list_entry(p, struct kmem_cache,
1528 memcg_params.kmem_caches_node);
1529 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
1530
1531 if (p == memcg->kmem_caches.next)
1532 print_slabinfo_header(m);
1533 cache_show(s, m);
1534 return 0;
1535}
1536#endif
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static const struct seq_operations slabinfo_op = {
1552 .start = slab_start,
1553 .next = slab_next,
1554 .stop = slab_stop,
1555 .show = slab_show,
1556};
1557
1558static int slabinfo_open(struct inode *inode, struct file *file)
1559{
1560 return seq_open(file, &slabinfo_op);
1561}
1562
1563static const struct file_operations proc_slabinfo_operations = {
1564 .open = slabinfo_open,
1565 .read = seq_read,
1566 .write = slabinfo_write,
1567 .llseek = seq_lseek,
1568 .release = seq_release,
1569};
1570
1571static int __init slab_proc_init(void)
1572{
1573 proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1574 &proc_slabinfo_operations);
1575 return 0;
1576}
1577module_init(slab_proc_init);
1578
1579#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
1580
1581
1582
1583static int memcg_slabinfo_show(struct seq_file *m, void *unused)
1584{
1585 struct kmem_cache *s, *c;
1586 struct slabinfo sinfo;
1587
1588 mutex_lock(&slab_mutex);
1589 seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
1590 seq_puts(m, " <active_slabs> <num_slabs>\n");
1591 list_for_each_entry(s, &slab_root_caches, root_caches_node) {
1592
1593
1594
1595 if (list_empty(&s->memcg_params.children))
1596 continue;
1597
1598 memset(&sinfo, 0, sizeof(sinfo));
1599 get_slabinfo(s, &sinfo);
1600 seq_printf(m, "%-17s root %6lu %6lu %6lu %6lu\n",
1601 cache_name(s), sinfo.active_objs, sinfo.num_objs,
1602 sinfo.active_slabs, sinfo.num_slabs);
1603
1604 for_each_memcg_cache(c, s) {
1605 struct cgroup_subsys_state *css;
1606 char *status = "";
1607
1608 css = &c->memcg_params.memcg->css;
1609 if (!(css->flags & CSS_ONLINE))
1610 status = ":dead";
1611 else if (c->flags & SLAB_DEACTIVATED)
1612 status = ":deact";
1613
1614 memset(&sinfo, 0, sizeof(sinfo));
1615 get_slabinfo(c, &sinfo);
1616 seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
1617 cache_name(c), css->id, status,
1618 sinfo.active_objs, sinfo.num_objs,
1619 sinfo.active_slabs, sinfo.num_slabs);
1620 }
1621 }
1622 mutex_unlock(&slab_mutex);
1623 return 0;
1624}
1625DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
1626
1627static int __init memcg_slabinfo_init(void)
1628{
1629 debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
1630 NULL, NULL, &memcg_slabinfo_fops);
1631 return 0;
1632}
1633
1634late_initcall(memcg_slabinfo_init);
1635#endif
1636#endif
1637
1638static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1639 gfp_t flags)
1640{
1641 void *ret;
1642 size_t ks = 0;
1643
1644 if (p)
1645 ks = ksize(p);
1646
1647 if (ks >= new_size) {
1648 p = kasan_krealloc((void *)p, new_size, flags);
1649 return (void *)p;
1650 }
1651
1652 ret = kmalloc_track_caller(new_size, flags);
1653 if (ret && p)
1654 memcpy(ret, p, ks);
1655
1656 return ret;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1672{
1673 if (unlikely(!new_size))
1674 return ZERO_SIZE_PTR;
1675
1676 return __do_krealloc(p, new_size, flags);
1677
1678}
1679EXPORT_SYMBOL(__krealloc);
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694void *krealloc(const void *p, size_t new_size, gfp_t flags)
1695{
1696 void *ret;
1697
1698 if (unlikely(!new_size)) {
1699 kfree(p);
1700 return ZERO_SIZE_PTR;
1701 }
1702
1703 ret = __do_krealloc(p, new_size, flags);
1704 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1705 kfree(p);
1706
1707 return ret;
1708}
1709EXPORT_SYMBOL(krealloc);
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722void kzfree(const void *p)
1723{
1724 size_t ks;
1725 void *mem = (void *)p;
1726
1727 if (unlikely(ZERO_OR_NULL_PTR(mem)))
1728 return;
1729 ks = ksize(mem);
1730 memset(mem, 0, ks);
1731 kfree(mem);
1732}
1733EXPORT_SYMBOL(kzfree);
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749size_t ksize(const void *objp)
1750{
1751 size_t size;
1752
1753 if (WARN_ON_ONCE(!objp))
1754 return 0;
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768 if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
1769 return 0;
1770
1771 size = __ksize(objp);
1772
1773
1774
1775
1776 kasan_unpoison_shadow(objp, size);
1777 return size;
1778}
1779EXPORT_SYMBOL(ksize);
1780
1781
1782EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1783EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1784EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1785EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1786EXPORT_TRACEPOINT_SYMBOL(kfree);
1787EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1788
1789int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1790{
1791 if (__should_failslab(s, gfpflags))
1792 return -ENOMEM;
1793 return 0;
1794}
1795ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1796