1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#include <linux/slab.h>
91#include <linux/mm.h>
92#include <linux/poison.h>
93#include <linux/swap.h>
94#include <linux/cache.h>
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compiler.h>
98#include <linux/cpuset.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/notifier.h>
102#include <linux/kallsyms.h>
103#include <linux/cpu.h>
104#include <linux/sysctl.h>
105#include <linux/module.h>
106#include <linux/rcupdate.h>
107#include <linux/string.h>
108#include <linux/uaccess.h>
109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
111#include <linux/mempolicy.h>
112#include <linux/mutex.h>
113#include <linux/fault-inject.h>
114#include <linux/rtmutex.h>
115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h>
117#include <linux/memory.h>
118#include <linux/prefetch.h>
119#include <linux/sched/task_stack.h>
120
121#include <net/sock.h>
122
123#include <asm/cacheflush.h>
124#include <asm/tlbflush.h>
125#include <asm/page.h>
126
127#include <trace/events/kmem.h>
128
129#include "internal.h"
130
131#include "slab.h"
132
133
134
135
136
137
138
139
140
141
142
143#ifdef CONFIG_DEBUG_SLAB
144#define DEBUG 1
145#define STATS 1
146#define FORCED_DEBUG 1
147#else
148#define DEBUG 0
149#define STATS 0
150#define FORCED_DEBUG 0
151#endif
152
153
154#define BYTES_PER_WORD sizeof(void *)
155#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
156
157#ifndef ARCH_KMALLOC_FLAGS
158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
159#endif
160
161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
162 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
163
164#if FREELIST_BYTE_INDEX
165typedef unsigned char freelist_idx_t;
166#else
167typedef unsigned short freelist_idx_t;
168#endif
169
170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct array_cache {
185 unsigned int avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int touched;
189 void *entry[];
190
191
192
193
194};
195
196struct alien_cache {
197 spinlock_t lock;
198 struct array_cache ac;
199};
200
201
202
203
204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206#define CACHE_CACHE 0
207#define SIZE_NODE (MAX_NUMNODES)
208
209static int drain_freelist(struct kmem_cache *cache,
210 struct kmem_cache_node *n, int tofree);
211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 int node, struct list_head *list);
213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215static void cache_reap(struct work_struct *unused);
216
217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
218 void **list);
219static inline void fixup_slab_list(struct kmem_cache *cachep,
220 struct kmem_cache_node *n, struct page *page,
221 void **list);
222static int slab_early_init = 1;
223
224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
225
226static void kmem_cache_node_init(struct kmem_cache_node *parent)
227{
228 INIT_LIST_HEAD(&parent->slabs_full);
229 INIT_LIST_HEAD(&parent->slabs_partial);
230 INIT_LIST_HEAD(&parent->slabs_free);
231 parent->total_slabs = 0;
232 parent->free_slabs = 0;
233 parent->shared = NULL;
234 parent->alien = NULL;
235 parent->colour_next = 0;
236 spin_lock_init(&parent->list_lock);
237 parent->free_objects = 0;
238 parent->free_touched = 0;
239}
240
241#define MAKE_LIST(cachep, listp, slab, nodeid) \
242 do { \
243 INIT_LIST_HEAD(listp); \
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
245 } while (0)
246
247#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
248 do { \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
252 } while (0)
253
254#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U)
255#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U)
256#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
257#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
258
259#define BATCHREFILL_LIMIT 16
260
261
262
263
264
265
266
267#define REAPTIMEOUT_AC (2*HZ)
268#define REAPTIMEOUT_NODE (4*HZ)
269
270#if STATS
271#define STATS_INC_ACTIVE(x) ((x)->num_active++)
272#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
273#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
274#define STATS_INC_GROWN(x) ((x)->grown++)
275#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
276#define STATS_SET_HIGH(x) \
277 do { \
278 if ((x)->num_active > (x)->high_mark) \
279 (x)->high_mark = (x)->num_active; \
280 } while (0)
281#define STATS_INC_ERR(x) ((x)->errors++)
282#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
283#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
284#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
285#define STATS_SET_FREEABLE(x, i) \
286 do { \
287 if ((x)->max_freeable < i) \
288 (x)->max_freeable = i; \
289 } while (0)
290#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
291#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
292#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
293#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
294#else
295#define STATS_INC_ACTIVE(x) do { } while (0)
296#define STATS_DEC_ACTIVE(x) do { } while (0)
297#define STATS_INC_ALLOCED(x) do { } while (0)
298#define STATS_INC_GROWN(x) do { } while (0)
299#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
300#define STATS_SET_HIGH(x) do { } while (0)
301#define STATS_INC_ERR(x) do { } while (0)
302#define STATS_INC_NODEALLOCS(x) do { } while (0)
303#define STATS_INC_NODEFREES(x) do { } while (0)
304#define STATS_INC_ACOVERFLOW(x) do { } while (0)
305#define STATS_SET_FREEABLE(x, i) do { } while (0)
306#define STATS_INC_ALLOCHIT(x) do { } while (0)
307#define STATS_INC_ALLOCMISS(x) do { } while (0)
308#define STATS_INC_FREEHIT(x) do { } while (0)
309#define STATS_INC_FREEMISS(x) do { } while (0)
310#endif
311
312#if DEBUG
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static int obj_offset(struct kmem_cache *cachep)
328{
329 return cachep->obj_offset;
330}
331
332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
333{
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 return (unsigned long long*) (objp + obj_offset(cachep) -
336 sizeof(unsigned long long));
337}
338
339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
340{
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
342 if (cachep->flags & SLAB_STORE_USER)
343 return (unsigned long long *)(objp + cachep->size -
344 sizeof(unsigned long long) -
345 REDZONE_ALIGN);
346 return (unsigned long long *) (objp + cachep->size -
347 sizeof(unsigned long long));
348}
349
350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
351{
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353 return (void **)(objp + cachep->size - BYTES_PER_WORD);
354}
355
356#else
357
358#define obj_offset(x) 0
359#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
361#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
362
363#endif
364
365#ifdef CONFIG_DEBUG_SLAB_LEAK
366
367static inline bool is_store_user_clean(struct kmem_cache *cachep)
368{
369 return atomic_read(&cachep->store_user_clean) == 1;
370}
371
372static inline void set_store_user_clean(struct kmem_cache *cachep)
373{
374 atomic_set(&cachep->store_user_clean, 1);
375}
376
377static inline void set_store_user_dirty(struct kmem_cache *cachep)
378{
379 if (is_store_user_clean(cachep))
380 atomic_set(&cachep->store_user_clean, 0);
381}
382
383#else
384static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385
386#endif
387
388
389
390
391
392#define SLAB_MAX_ORDER_HI 1
393#define SLAB_MAX_ORDER_LO 0
394static int slab_max_order = SLAB_MAX_ORDER_LO;
395static bool slab_max_order_set __initdata;
396
397static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
398 unsigned int idx)
399{
400 return page->s_mem + cache->size * idx;
401}
402
403#define BOOT_CPUCACHE_ENTRIES 1
404
405static struct kmem_cache kmem_cache_boot = {
406 .batchcount = 1,
407 .limit = BOOT_CPUCACHE_ENTRIES,
408 .shared = 1,
409 .size = sizeof(struct kmem_cache),
410 .name = "kmem_cache",
411};
412
413static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
414
415static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
416{
417 return this_cpu_ptr(cachep->cpu_cache);
418}
419
420
421
422
423static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
424 slab_flags_t flags, size_t *left_over)
425{
426 unsigned int num;
427 size_t slab_size = PAGE_SIZE << gfporder;
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
447 num = slab_size / buffer_size;
448 *left_over = slab_size % buffer_size;
449 } else {
450 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
451 *left_over = slab_size %
452 (buffer_size + sizeof(freelist_idx_t));
453 }
454
455 return num;
456}
457
458#if DEBUG
459#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
460
461static void __slab_error(const char *function, struct kmem_cache *cachep,
462 char *msg)
463{
464 pr_err("slab error in %s(): cache `%s': %s\n",
465 function, cachep->name, msg);
466 dump_stack();
467 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
468}
469#endif
470
471
472
473
474
475
476
477
478
479static int use_alien_caches __read_mostly = 1;
480static int __init noaliencache_setup(char *s)
481{
482 use_alien_caches = 0;
483 return 1;
484}
485__setup("noaliencache", noaliencache_setup);
486
487static int __init slab_max_order_setup(char *str)
488{
489 get_option(&str, &slab_max_order);
490 slab_max_order = slab_max_order < 0 ? 0 :
491 min(slab_max_order, MAX_ORDER - 1);
492 slab_max_order_set = true;
493
494 return 1;
495}
496__setup("slab_max_order=", slab_max_order_setup);
497
498#ifdef CONFIG_NUMA
499
500
501
502
503
504
505static DEFINE_PER_CPU(unsigned long, slab_reap_node);
506
507static void init_reap_node(int cpu)
508{
509 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
510 node_online_map);
511}
512
513static void next_reap_node(void)
514{
515 int node = __this_cpu_read(slab_reap_node);
516
517 node = next_node_in(node, node_online_map);
518 __this_cpu_write(slab_reap_node, node);
519}
520
521#else
522#define init_reap_node(cpu) do { } while (0)
523#define next_reap_node(void) do { } while (0)
524#endif
525
526
527
528
529
530
531
532
533static void start_cpu_timer(int cpu)
534{
535 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
536
537 if (reap_work->work.func == NULL) {
538 init_reap_node(cpu);
539 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
540 schedule_delayed_work_on(cpu, reap_work,
541 __round_jiffies_relative(HZ, cpu));
542 }
543}
544
545static void init_arraycache(struct array_cache *ac, int limit, int batch)
546{
547 if (ac) {
548 ac->avail = 0;
549 ac->limit = limit;
550 ac->batchcount = batch;
551 ac->touched = 0;
552 }
553}
554
555static struct array_cache *alloc_arraycache(int node, int entries,
556 int batchcount, gfp_t gfp)
557{
558 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
559 struct array_cache *ac = NULL;
560
561 ac = kmalloc_node(memsize, gfp, node);
562
563
564
565
566
567
568
569 kmemleak_no_scan(ac);
570 init_arraycache(ac, entries, batchcount);
571 return ac;
572}
573
574static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
575 struct page *page, void *objp)
576{
577 struct kmem_cache_node *n;
578 int page_node;
579 LIST_HEAD(list);
580
581 page_node = page_to_nid(page);
582 n = get_node(cachep, page_node);
583
584 spin_lock(&n->list_lock);
585 free_block(cachep, &objp, 1, page_node, &list);
586 spin_unlock(&n->list_lock);
587
588 slabs_destroy(cachep, &list);
589}
590
591
592
593
594
595
596
597static int transfer_objects(struct array_cache *to,
598 struct array_cache *from, unsigned int max)
599{
600
601 int nr = min3(from->avail, max, to->limit - to->avail);
602
603 if (!nr)
604 return 0;
605
606 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
607 sizeof(void *) *nr);
608
609 from->avail -= nr;
610 to->avail += nr;
611 return nr;
612}
613
614
615static __always_inline void __free_one(struct array_cache *ac, void *objp)
616{
617
618 if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
619 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
620 return;
621 ac->entry[ac->avail++] = objp;
622}
623
624#ifndef CONFIG_NUMA
625
626#define drain_alien_cache(cachep, alien) do { } while (0)
627#define reap_alien(cachep, n) do { } while (0)
628
629static inline struct alien_cache **alloc_alien_cache(int node,
630 int limit, gfp_t gfp)
631{
632 return NULL;
633}
634
635static inline void free_alien_cache(struct alien_cache **ac_ptr)
636{
637}
638
639static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
640{
641 return 0;
642}
643
644static inline void *alternate_node_alloc(struct kmem_cache *cachep,
645 gfp_t flags)
646{
647 return NULL;
648}
649
650static inline void *____cache_alloc_node(struct kmem_cache *cachep,
651 gfp_t flags, int nodeid)
652{
653 return NULL;
654}
655
656static inline gfp_t gfp_exact_node(gfp_t flags)
657{
658 return flags & ~__GFP_NOFAIL;
659}
660
661#else
662
663static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
664static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
665
666static struct alien_cache *__alloc_alien_cache(int node, int entries,
667 int batch, gfp_t gfp)
668{
669 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
670 struct alien_cache *alc = NULL;
671
672 alc = kmalloc_node(memsize, gfp, node);
673 if (alc) {
674 kmemleak_no_scan(alc);
675 init_arraycache(&alc->ac, entries, batch);
676 spin_lock_init(&alc->lock);
677 }
678 return alc;
679}
680
681static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
682{
683 struct alien_cache **alc_ptr;
684 int i;
685
686 if (limit > 1)
687 limit = 12;
688 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
689 if (!alc_ptr)
690 return NULL;
691
692 for_each_node(i) {
693 if (i == node || !node_online(i))
694 continue;
695 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
696 if (!alc_ptr[i]) {
697 for (i--; i >= 0; i--)
698 kfree(alc_ptr[i]);
699 kfree(alc_ptr);
700 return NULL;
701 }
702 }
703 return alc_ptr;
704}
705
706static void free_alien_cache(struct alien_cache **alc_ptr)
707{
708 int i;
709
710 if (!alc_ptr)
711 return;
712 for_each_node(i)
713 kfree(alc_ptr[i]);
714 kfree(alc_ptr);
715}
716
717static void __drain_alien_cache(struct kmem_cache *cachep,
718 struct array_cache *ac, int node,
719 struct list_head *list)
720{
721 struct kmem_cache_node *n = get_node(cachep, node);
722
723 if (ac->avail) {
724 spin_lock(&n->list_lock);
725
726
727
728
729
730 if (n->shared)
731 transfer_objects(n->shared, ac, ac->limit);
732
733 free_block(cachep, ac->entry, ac->avail, node, list);
734 ac->avail = 0;
735 spin_unlock(&n->list_lock);
736 }
737}
738
739
740
741
742static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
743{
744 int node = __this_cpu_read(slab_reap_node);
745
746 if (n->alien) {
747 struct alien_cache *alc = n->alien[node];
748 struct array_cache *ac;
749
750 if (alc) {
751 ac = &alc->ac;
752 if (ac->avail && spin_trylock_irq(&alc->lock)) {
753 LIST_HEAD(list);
754
755 __drain_alien_cache(cachep, ac, node, &list);
756 spin_unlock_irq(&alc->lock);
757 slabs_destroy(cachep, &list);
758 }
759 }
760 }
761}
762
763static void drain_alien_cache(struct kmem_cache *cachep,
764 struct alien_cache **alien)
765{
766 int i = 0;
767 struct alien_cache *alc;
768 struct array_cache *ac;
769 unsigned long flags;
770
771 for_each_online_node(i) {
772 alc = alien[i];
773 if (alc) {
774 LIST_HEAD(list);
775
776 ac = &alc->ac;
777 spin_lock_irqsave(&alc->lock, flags);
778 __drain_alien_cache(cachep, ac, i, &list);
779 spin_unlock_irqrestore(&alc->lock, flags);
780 slabs_destroy(cachep, &list);
781 }
782 }
783}
784
785static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
786 int node, int page_node)
787{
788 struct kmem_cache_node *n;
789 struct alien_cache *alien = NULL;
790 struct array_cache *ac;
791 LIST_HEAD(list);
792
793 n = get_node(cachep, node);
794 STATS_INC_NODEFREES(cachep);
795 if (n->alien && n->alien[page_node]) {
796 alien = n->alien[page_node];
797 ac = &alien->ac;
798 spin_lock(&alien->lock);
799 if (unlikely(ac->avail == ac->limit)) {
800 STATS_INC_ACOVERFLOW(cachep);
801 __drain_alien_cache(cachep, ac, page_node, &list);
802 }
803 __free_one(ac, objp);
804 spin_unlock(&alien->lock);
805 slabs_destroy(cachep, &list);
806 } else {
807 n = get_node(cachep, page_node);
808 spin_lock(&n->list_lock);
809 free_block(cachep, &objp, 1, page_node, &list);
810 spin_unlock(&n->list_lock);
811 slabs_destroy(cachep, &list);
812 }
813 return 1;
814}
815
816static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
817{
818 int page_node = page_to_nid(virt_to_page(objp));
819 int node = numa_mem_id();
820
821
822
823
824 if (likely(node == page_node))
825 return 0;
826
827 return __cache_free_alien(cachep, objp, node, page_node);
828}
829
830
831
832
833
834static inline gfp_t gfp_exact_node(gfp_t flags)
835{
836 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
837}
838#endif
839
840static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
841{
842 struct kmem_cache_node *n;
843
844
845
846
847
848
849 n = get_node(cachep, node);
850 if (n) {
851 spin_lock_irq(&n->list_lock);
852 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
853 cachep->num;
854 spin_unlock_irq(&n->list_lock);
855
856 return 0;
857 }
858
859 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
860 if (!n)
861 return -ENOMEM;
862
863 kmem_cache_node_init(n);
864 n->next_reap = jiffies + REAPTIMEOUT_NODE +
865 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
866
867 n->free_limit =
868 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
869
870
871
872
873
874
875 cachep->node[node] = n;
876
877 return 0;
878}
879
880#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
881
882
883
884
885
886
887
888
889
890static int init_cache_node_node(int node)
891{
892 int ret;
893 struct kmem_cache *cachep;
894
895 list_for_each_entry(cachep, &slab_caches, list) {
896 ret = init_cache_node(cachep, node, GFP_KERNEL);
897 if (ret)
898 return ret;
899 }
900
901 return 0;
902}
903#endif
904
905static int setup_kmem_cache_node(struct kmem_cache *cachep,
906 int node, gfp_t gfp, bool force_change)
907{
908 int ret = -ENOMEM;
909 struct kmem_cache_node *n;
910 struct array_cache *old_shared = NULL;
911 struct array_cache *new_shared = NULL;
912 struct alien_cache **new_alien = NULL;
913 LIST_HEAD(list);
914
915 if (use_alien_caches) {
916 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
917 if (!new_alien)
918 goto fail;
919 }
920
921 if (cachep->shared) {
922 new_shared = alloc_arraycache(node,
923 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
924 if (!new_shared)
925 goto fail;
926 }
927
928 ret = init_cache_node(cachep, node, gfp);
929 if (ret)
930 goto fail;
931
932 n = get_node(cachep, node);
933 spin_lock_irq(&n->list_lock);
934 if (n->shared && force_change) {
935 free_block(cachep, n->shared->entry,
936 n->shared->avail, node, &list);
937 n->shared->avail = 0;
938 }
939
940 if (!n->shared || force_change) {
941 old_shared = n->shared;
942 n->shared = new_shared;
943 new_shared = NULL;
944 }
945
946 if (!n->alien) {
947 n->alien = new_alien;
948 new_alien = NULL;
949 }
950
951 spin_unlock_irq(&n->list_lock);
952 slabs_destroy(cachep, &list);
953
954
955
956
957
958
959
960 if (old_shared && force_change)
961 synchronize_rcu();
962
963fail:
964 kfree(old_shared);
965 kfree(new_shared);
966 free_alien_cache(new_alien);
967
968 return ret;
969}
970
971#ifdef CONFIG_SMP
972
973static void cpuup_canceled(long cpu)
974{
975 struct kmem_cache *cachep;
976 struct kmem_cache_node *n = NULL;
977 int node = cpu_to_mem(cpu);
978 const struct cpumask *mask = cpumask_of_node(node);
979
980 list_for_each_entry(cachep, &slab_caches, list) {
981 struct array_cache *nc;
982 struct array_cache *shared;
983 struct alien_cache **alien;
984 LIST_HEAD(list);
985
986 n = get_node(cachep, node);
987 if (!n)
988 continue;
989
990 spin_lock_irq(&n->list_lock);
991
992
993 n->free_limit -= cachep->batchcount;
994
995
996 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
997 free_block(cachep, nc->entry, nc->avail, node, &list);
998 nc->avail = 0;
999
1000 if (!cpumask_empty(mask)) {
1001 spin_unlock_irq(&n->list_lock);
1002 goto free_slab;
1003 }
1004
1005 shared = n->shared;
1006 if (shared) {
1007 free_block(cachep, shared->entry,
1008 shared->avail, node, &list);
1009 n->shared = NULL;
1010 }
1011
1012 alien = n->alien;
1013 n->alien = NULL;
1014
1015 spin_unlock_irq(&n->list_lock);
1016
1017 kfree(shared);
1018 if (alien) {
1019 drain_alien_cache(cachep, alien);
1020 free_alien_cache(alien);
1021 }
1022
1023free_slab:
1024 slabs_destroy(cachep, &list);
1025 }
1026
1027
1028
1029
1030
1031 list_for_each_entry(cachep, &slab_caches, list) {
1032 n = get_node(cachep, node);
1033 if (!n)
1034 continue;
1035 drain_freelist(cachep, n, INT_MAX);
1036 }
1037}
1038
1039static int cpuup_prepare(long cpu)
1040{
1041 struct kmem_cache *cachep;
1042 int node = cpu_to_mem(cpu);
1043 int err;
1044
1045
1046
1047
1048
1049
1050
1051 err = init_cache_node_node(node);
1052 if (err < 0)
1053 goto bad;
1054
1055
1056
1057
1058
1059 list_for_each_entry(cachep, &slab_caches, list) {
1060 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1061 if (err)
1062 goto bad;
1063 }
1064
1065 return 0;
1066bad:
1067 cpuup_canceled(cpu);
1068 return -ENOMEM;
1069}
1070
1071int slab_prepare_cpu(unsigned int cpu)
1072{
1073 int err;
1074
1075 mutex_lock(&slab_mutex);
1076 err = cpuup_prepare(cpu);
1077 mutex_unlock(&slab_mutex);
1078 return err;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091int slab_dead_cpu(unsigned int cpu)
1092{
1093 mutex_lock(&slab_mutex);
1094 cpuup_canceled(cpu);
1095 mutex_unlock(&slab_mutex);
1096 return 0;
1097}
1098#endif
1099
1100static int slab_online_cpu(unsigned int cpu)
1101{
1102 start_cpu_timer(cpu);
1103 return 0;
1104}
1105
1106static int slab_offline_cpu(unsigned int cpu)
1107{
1108
1109
1110
1111
1112
1113
1114 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1115
1116 per_cpu(slab_reap_work, cpu).work.func = NULL;
1117 return 0;
1118}
1119
1120#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1121
1122
1123
1124
1125
1126
1127
1128static int __meminit drain_cache_node_node(int node)
1129{
1130 struct kmem_cache *cachep;
1131 int ret = 0;
1132
1133 list_for_each_entry(cachep, &slab_caches, list) {
1134 struct kmem_cache_node *n;
1135
1136 n = get_node(cachep, node);
1137 if (!n)
1138 continue;
1139
1140 drain_freelist(cachep, n, INT_MAX);
1141
1142 if (!list_empty(&n->slabs_full) ||
1143 !list_empty(&n->slabs_partial)) {
1144 ret = -EBUSY;
1145 break;
1146 }
1147 }
1148 return ret;
1149}
1150
1151static int __meminit slab_memory_callback(struct notifier_block *self,
1152 unsigned long action, void *arg)
1153{
1154 struct memory_notify *mnb = arg;
1155 int ret = 0;
1156 int nid;
1157
1158 nid = mnb->status_change_nid;
1159 if (nid < 0)
1160 goto out;
1161
1162 switch (action) {
1163 case MEM_GOING_ONLINE:
1164 mutex_lock(&slab_mutex);
1165 ret = init_cache_node_node(nid);
1166 mutex_unlock(&slab_mutex);
1167 break;
1168 case MEM_GOING_OFFLINE:
1169 mutex_lock(&slab_mutex);
1170 ret = drain_cache_node_node(nid);
1171 mutex_unlock(&slab_mutex);
1172 break;
1173 case MEM_ONLINE:
1174 case MEM_OFFLINE:
1175 case MEM_CANCEL_ONLINE:
1176 case MEM_CANCEL_OFFLINE:
1177 break;
1178 }
1179out:
1180 return notifier_from_errno(ret);
1181}
1182#endif
1183
1184
1185
1186
1187static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1188 int nodeid)
1189{
1190 struct kmem_cache_node *ptr;
1191
1192 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1193 BUG_ON(!ptr);
1194
1195 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1196
1197
1198
1199 spin_lock_init(&ptr->list_lock);
1200
1201 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1202 cachep->node[nodeid] = ptr;
1203}
1204
1205
1206
1207
1208
1209static void __init set_up_node(struct kmem_cache *cachep, int index)
1210{
1211 int node;
1212
1213 for_each_online_node(node) {
1214 cachep->node[node] = &init_kmem_cache_node[index + node];
1215 cachep->node[node]->next_reap = jiffies +
1216 REAPTIMEOUT_NODE +
1217 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1218 }
1219}
1220
1221
1222
1223
1224
1225void __init kmem_cache_init(void)
1226{
1227 int i;
1228
1229 kmem_cache = &kmem_cache_boot;
1230
1231 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1232 use_alien_caches = 0;
1233
1234 for (i = 0; i < NUM_INIT_LISTS; i++)
1235 kmem_cache_node_init(&init_kmem_cache_node[i]);
1236
1237
1238
1239
1240
1241
1242 if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1243 slab_max_order = SLAB_MAX_ORDER_HI;
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 create_boot_cache(kmem_cache, "kmem_cache",
1271 offsetof(struct kmem_cache, node) +
1272 nr_node_ids * sizeof(struct kmem_cache_node *),
1273 SLAB_HWCACHE_ALIGN, 0, 0);
1274 list_add(&kmem_cache->list, &slab_caches);
1275 slab_state = PARTIAL;
1276
1277
1278
1279
1280
1281 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1282 kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1283 kmalloc_info[INDEX_NODE].size,
1284 ARCH_KMALLOC_FLAGS, 0,
1285 kmalloc_info[INDEX_NODE].size);
1286 slab_state = PARTIAL_NODE;
1287 setup_kmalloc_cache_index_table();
1288
1289 slab_early_init = 0;
1290
1291
1292 {
1293 int nid;
1294
1295 for_each_online_node(nid) {
1296 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1297
1298 init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1299 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1300 }
1301 }
1302
1303 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1304}
1305
1306void __init kmem_cache_init_late(void)
1307{
1308 struct kmem_cache *cachep;
1309
1310
1311 mutex_lock(&slab_mutex);
1312 list_for_each_entry(cachep, &slab_caches, list)
1313 if (enable_cpucache(cachep, GFP_NOWAIT))
1314 BUG();
1315 mutex_unlock(&slab_mutex);
1316
1317
1318 slab_state = FULL;
1319
1320#ifdef CONFIG_NUMA
1321
1322
1323
1324
1325 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1326#endif
1327
1328
1329
1330
1331
1332}
1333
1334static int __init cpucache_init(void)
1335{
1336 int ret;
1337
1338
1339
1340
1341 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1342 slab_online_cpu, slab_offline_cpu);
1343 WARN_ON(ret < 0);
1344
1345 return 0;
1346}
1347__initcall(cpucache_init);
1348
1349static noinline void
1350slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1351{
1352#if DEBUG
1353 struct kmem_cache_node *n;
1354 unsigned long flags;
1355 int node;
1356 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1357 DEFAULT_RATELIMIT_BURST);
1358
1359 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1360 return;
1361
1362 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1363 nodeid, gfpflags, &gfpflags);
1364 pr_warn(" cache: %s, object size: %d, order: %d\n",
1365 cachep->name, cachep->size, cachep->gfporder);
1366
1367 for_each_kmem_cache_node(cachep, node, n) {
1368 unsigned long total_slabs, free_slabs, free_objs;
1369
1370 spin_lock_irqsave(&n->list_lock, flags);
1371 total_slabs = n->total_slabs;
1372 free_slabs = n->free_slabs;
1373 free_objs = n->free_objects;
1374 spin_unlock_irqrestore(&n->list_lock, flags);
1375
1376 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1377 node, total_slabs - free_slabs, total_slabs,
1378 (total_slabs * cachep->num) - free_objs,
1379 total_slabs * cachep->num);
1380 }
1381#endif
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1393 int nodeid)
1394{
1395 struct page *page;
1396
1397 flags |= cachep->allocflags;
1398
1399 page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1400 if (!page) {
1401 slab_out_of_memory(cachep, flags, nodeid);
1402 return NULL;
1403 }
1404
1405 account_slab_page(page, cachep->gfporder, cachep);
1406 __SetPageSlab(page);
1407
1408 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1409 SetPageSlabPfmemalloc(page);
1410
1411 return page;
1412}
1413
1414
1415
1416
1417static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1418{
1419 int order = cachep->gfporder;
1420
1421 BUG_ON(!PageSlab(page));
1422 __ClearPageSlabPfmemalloc(page);
1423 __ClearPageSlab(page);
1424 page_mapcount_reset(page);
1425 page->mapping = NULL;
1426
1427 if (current->reclaim_state)
1428 current->reclaim_state->reclaimed_slab += 1 << order;
1429 unaccount_slab_page(page, order, cachep);
1430 __free_pages(page, order);
1431}
1432
1433static void kmem_rcu_free(struct rcu_head *head)
1434{
1435 struct kmem_cache *cachep;
1436 struct page *page;
1437
1438 page = container_of(head, struct page, rcu_head);
1439 cachep = page->slab_cache;
1440
1441 kmem_freepages(cachep, page);
1442}
1443
1444#if DEBUG
1445static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1446{
1447 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1448 (cachep->size % PAGE_SIZE) == 0)
1449 return true;
1450
1451 return false;
1452}
1453
1454#ifdef CONFIG_DEBUG_PAGEALLOC
1455static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1456 unsigned long caller)
1457{
1458 int size = cachep->object_size;
1459
1460 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1461
1462 if (size < 5 * sizeof(unsigned long))
1463 return;
1464
1465 *addr++ = 0x12345678;
1466 *addr++ = caller;
1467 *addr++ = smp_processor_id();
1468 size -= 3 * sizeof(unsigned long);
1469 {
1470 unsigned long *sptr = &caller;
1471 unsigned long svalue;
1472
1473 while (!kstack_end(sptr)) {
1474 svalue = *sptr++;
1475 if (kernel_text_address(svalue)) {
1476 *addr++ = svalue;
1477 size -= sizeof(unsigned long);
1478 if (size <= sizeof(unsigned long))
1479 break;
1480 }
1481 }
1482
1483 }
1484 *addr++ = 0x87654321;
1485}
1486
1487static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1488 int map, unsigned long caller)
1489{
1490 if (!is_debug_pagealloc_cache(cachep))
1491 return;
1492
1493 if (caller)
1494 store_stackinfo(cachep, objp, caller);
1495
1496 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1497}
1498
1499#else
1500static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1501 int map, unsigned long caller) {}
1502
1503#endif
1504
1505static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1506{
1507 int size = cachep->object_size;
1508 addr = &((char *)addr)[obj_offset(cachep)];
1509
1510 memset(addr, val, size);
1511 *(unsigned char *)(addr + size - 1) = POISON_END;
1512}
1513
1514static void dump_line(char *data, int offset, int limit)
1515{
1516 int i;
1517 unsigned char error = 0;
1518 int bad_count = 0;
1519
1520 pr_err("%03x: ", offset);
1521 for (i = 0; i < limit; i++) {
1522 if (data[offset + i] != POISON_FREE) {
1523 error = data[offset + i];
1524 bad_count++;
1525 }
1526 }
1527 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1528 &data[offset], limit, 1);
1529
1530 if (bad_count == 1) {
1531 error ^= POISON_FREE;
1532 if (!(error & (error - 1))) {
1533 pr_err("Single bit error detected. Probably bad RAM.\n");
1534#ifdef CONFIG_X86
1535 pr_err("Run memtest86+ or a similar memory test tool.\n");
1536#else
1537 pr_err("Run a memory test tool.\n");
1538#endif
1539 }
1540 }
1541}
1542#endif
1543
1544#if DEBUG
1545
1546static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1547{
1548 int i, size;
1549 char *realobj;
1550
1551 if (cachep->flags & SLAB_RED_ZONE) {
1552 pr_err("Redzone: 0x%llx/0x%llx\n",
1553 *dbg_redzone1(cachep, objp),
1554 *dbg_redzone2(cachep, objp));
1555 }
1556
1557 if (cachep->flags & SLAB_STORE_USER)
1558 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1559 realobj = (char *)objp + obj_offset(cachep);
1560 size = cachep->object_size;
1561 for (i = 0; i < size && lines; i += 16, lines--) {
1562 int limit;
1563 limit = 16;
1564 if (i + limit > size)
1565 limit = size - i;
1566 dump_line(realobj, i, limit);
1567 }
1568}
1569
1570static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1571{
1572 char *realobj;
1573 int size, i;
1574 int lines = 0;
1575
1576 if (is_debug_pagealloc_cache(cachep))
1577 return;
1578
1579 realobj = (char *)objp + obj_offset(cachep);
1580 size = cachep->object_size;
1581
1582 for (i = 0; i < size; i++) {
1583 char exp = POISON_FREE;
1584 if (i == size - 1)
1585 exp = POISON_END;
1586 if (realobj[i] != exp) {
1587 int limit;
1588
1589
1590 if (lines == 0) {
1591 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1592 print_tainted(), cachep->name,
1593 realobj, size);
1594 print_objinfo(cachep, objp, 0);
1595 }
1596
1597 i = (i / 16) * 16;
1598 limit = 16;
1599 if (i + limit > size)
1600 limit = size - i;
1601 dump_line(realobj, i, limit);
1602 i += 16;
1603 lines++;
1604
1605 if (lines > 5)
1606 break;
1607 }
1608 }
1609 if (lines != 0) {
1610
1611
1612
1613 struct page *page = virt_to_head_page(objp);
1614 unsigned int objnr;
1615
1616 objnr = obj_to_index(cachep, page, objp);
1617 if (objnr) {
1618 objp = index_to_obj(cachep, page, objnr - 1);
1619 realobj = (char *)objp + obj_offset(cachep);
1620 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1621 print_objinfo(cachep, objp, 2);
1622 }
1623 if (objnr + 1 < cachep->num) {
1624 objp = index_to_obj(cachep, page, objnr + 1);
1625 realobj = (char *)objp + obj_offset(cachep);
1626 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1627 print_objinfo(cachep, objp, 2);
1628 }
1629 }
1630}
1631#endif
1632
1633#if DEBUG
1634static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1635 struct page *page)
1636{
1637 int i;
1638
1639 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1640 poison_obj(cachep, page->freelist - obj_offset(cachep),
1641 POISON_FREE);
1642 }
1643
1644 for (i = 0; i < cachep->num; i++) {
1645 void *objp = index_to_obj(cachep, page, i);
1646
1647 if (cachep->flags & SLAB_POISON) {
1648 check_poison_obj(cachep, objp);
1649 slab_kernel_map(cachep, objp, 1, 0);
1650 }
1651 if (cachep->flags & SLAB_RED_ZONE) {
1652 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1653 slab_error(cachep, "start of a freed object was overwritten");
1654 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1655 slab_error(cachep, "end of a freed object was overwritten");
1656 }
1657 }
1658}
1659#else
1660static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1661 struct page *page)
1662{
1663}
1664#endif
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1676{
1677 void *freelist;
1678
1679 freelist = page->freelist;
1680 slab_destroy_debugcheck(cachep, page);
1681 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1682 call_rcu(&page->rcu_head, kmem_rcu_free);
1683 else
1684 kmem_freepages(cachep, page);
1685
1686
1687
1688
1689
1690 if (OFF_SLAB(cachep))
1691 kmem_cache_free(cachep->freelist_cache, freelist);
1692}
1693
1694
1695
1696
1697
1698static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1699{
1700 struct page *page, *n;
1701
1702 list_for_each_entry_safe(page, n, list, slab_list) {
1703 list_del(&page->slab_list);
1704 slab_destroy(cachep, page);
1705 }
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static size_t calculate_slab_order(struct kmem_cache *cachep,
1723 size_t size, slab_flags_t flags)
1724{
1725 size_t left_over = 0;
1726 int gfporder;
1727
1728 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1729 unsigned int num;
1730 size_t remainder;
1731
1732 num = cache_estimate(gfporder, size, flags, &remainder);
1733 if (!num)
1734 continue;
1735
1736
1737 if (num > SLAB_OBJ_MAX_NUM)
1738 break;
1739
1740 if (flags & CFLGS_OFF_SLAB) {
1741 struct kmem_cache *freelist_cache;
1742 size_t freelist_size;
1743
1744 freelist_size = num * sizeof(freelist_idx_t);
1745 freelist_cache = kmalloc_slab(freelist_size, 0u);
1746 if (!freelist_cache)
1747 continue;
1748
1749
1750
1751
1752
1753 if (OFF_SLAB(freelist_cache))
1754 continue;
1755
1756
1757 if (freelist_cache->size > cachep->size / 2)
1758 continue;
1759 }
1760
1761
1762 cachep->num = num;
1763 cachep->gfporder = gfporder;
1764 left_over = remainder;
1765
1766
1767
1768
1769
1770
1771 if (flags & SLAB_RECLAIM_ACCOUNT)
1772 break;
1773
1774
1775
1776
1777
1778 if (gfporder >= slab_max_order)
1779 break;
1780
1781
1782
1783
1784 if (left_over * 8 <= (PAGE_SIZE << gfporder))
1785 break;
1786 }
1787 return left_over;
1788}
1789
1790static struct array_cache __percpu *alloc_kmem_cache_cpus(
1791 struct kmem_cache *cachep, int entries, int batchcount)
1792{
1793 int cpu;
1794 size_t size;
1795 struct array_cache __percpu *cpu_cache;
1796
1797 size = sizeof(void *) * entries + sizeof(struct array_cache);
1798 cpu_cache = __alloc_percpu(size, sizeof(void *));
1799
1800 if (!cpu_cache)
1801 return NULL;
1802
1803 for_each_possible_cpu(cpu) {
1804 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1805 entries, batchcount);
1806 }
1807
1808 return cpu_cache;
1809}
1810
1811static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1812{
1813 if (slab_state >= FULL)
1814 return enable_cpucache(cachep, gfp);
1815
1816 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1817 if (!cachep->cpu_cache)
1818 return 1;
1819
1820 if (slab_state == DOWN) {
1821
1822 set_up_node(kmem_cache, CACHE_CACHE);
1823 } else if (slab_state == PARTIAL) {
1824
1825 set_up_node(cachep, SIZE_NODE);
1826 } else {
1827 int node;
1828
1829 for_each_online_node(node) {
1830 cachep->node[node] = kmalloc_node(
1831 sizeof(struct kmem_cache_node), gfp, node);
1832 BUG_ON(!cachep->node[node]);
1833 kmem_cache_node_init(cachep->node[node]);
1834 }
1835 }
1836
1837 cachep->node[numa_mem_id()]->next_reap =
1838 jiffies + REAPTIMEOUT_NODE +
1839 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1840
1841 cpu_cache_get(cachep)->avail = 0;
1842 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1843 cpu_cache_get(cachep)->batchcount = 1;
1844 cpu_cache_get(cachep)->touched = 0;
1845 cachep->batchcount = 1;
1846 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1847 return 0;
1848}
1849
1850slab_flags_t kmem_cache_flags(unsigned int object_size,
1851 slab_flags_t flags, const char *name,
1852 void (*ctor)(void *))
1853{
1854 return flags;
1855}
1856
1857struct kmem_cache *
1858__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1859 slab_flags_t flags, void (*ctor)(void *))
1860{
1861 struct kmem_cache *cachep;
1862
1863 cachep = find_mergeable(size, align, flags, name, ctor);
1864 if (cachep) {
1865 cachep->refcount++;
1866
1867
1868
1869
1870
1871 cachep->object_size = max_t(int, cachep->object_size, size);
1872 }
1873 return cachep;
1874}
1875
1876static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1877 size_t size, slab_flags_t flags)
1878{
1879 size_t left;
1880
1881 cachep->num = 0;
1882
1883
1884
1885
1886
1887
1888 if (unlikely(slab_want_init_on_free(cachep)))
1889 return false;
1890
1891 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1892 return false;
1893
1894 left = calculate_slab_order(cachep, size,
1895 flags | CFLGS_OBJFREELIST_SLAB);
1896 if (!cachep->num)
1897 return false;
1898
1899 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1900 return false;
1901
1902 cachep->colour = left / cachep->colour_off;
1903
1904 return true;
1905}
1906
1907static bool set_off_slab_cache(struct kmem_cache *cachep,
1908 size_t size, slab_flags_t flags)
1909{
1910 size_t left;
1911
1912 cachep->num = 0;
1913
1914
1915
1916
1917
1918 if (flags & SLAB_NOLEAKTRACE)
1919 return false;
1920
1921
1922
1923
1924
1925 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1926 if (!cachep->num)
1927 return false;
1928
1929
1930
1931
1932
1933 if (left >= cachep->num * sizeof(freelist_idx_t))
1934 return false;
1935
1936 cachep->colour = left / cachep->colour_off;
1937
1938 return true;
1939}
1940
1941static bool set_on_slab_cache(struct kmem_cache *cachep,
1942 size_t size, slab_flags_t flags)
1943{
1944 size_t left;
1945
1946 cachep->num = 0;
1947
1948 left = calculate_slab_order(cachep, size, flags);
1949 if (!cachep->num)
1950 return false;
1951
1952 cachep->colour = left / cachep->colour_off;
1953
1954 return true;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1981{
1982 size_t ralign = BYTES_PER_WORD;
1983 gfp_t gfp;
1984 int err;
1985 unsigned int size = cachep->size;
1986
1987#if DEBUG
1988#if FORCED_DEBUG
1989
1990
1991
1992
1993
1994
1995 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1996 2 * sizeof(unsigned long long)))
1997 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1998 if (!(flags & SLAB_TYPESAFE_BY_RCU))
1999 flags |= SLAB_POISON;
2000#endif
2001#endif
2002
2003
2004
2005
2006
2007
2008 size = ALIGN(size, BYTES_PER_WORD);
2009
2010 if (flags & SLAB_RED_ZONE) {
2011 ralign = REDZONE_ALIGN;
2012
2013
2014 size = ALIGN(size, REDZONE_ALIGN);
2015 }
2016
2017
2018 if (ralign < cachep->align) {
2019 ralign = cachep->align;
2020 }
2021
2022 if (ralign > __alignof__(unsigned long long))
2023 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2024
2025
2026
2027 cachep->align = ralign;
2028 cachep->colour_off = cache_line_size();
2029
2030 if (cachep->colour_off < cachep->align)
2031 cachep->colour_off = cachep->align;
2032
2033 if (slab_is_available())
2034 gfp = GFP_KERNEL;
2035 else
2036 gfp = GFP_NOWAIT;
2037
2038#if DEBUG
2039
2040
2041
2042
2043
2044 if (flags & SLAB_RED_ZONE) {
2045
2046 cachep->obj_offset += sizeof(unsigned long long);
2047 size += 2 * sizeof(unsigned long long);
2048 }
2049 if (flags & SLAB_STORE_USER) {
2050
2051
2052
2053
2054 if (flags & SLAB_RED_ZONE)
2055 size += REDZONE_ALIGN;
2056 else
2057 size += BYTES_PER_WORD;
2058 }
2059#endif
2060
2061 kasan_cache_create(cachep, &size, &flags);
2062
2063 size = ALIGN(size, cachep->align);
2064
2065
2066
2067
2068 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2069 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2070
2071#if DEBUG
2072
2073
2074
2075
2076
2077
2078
2079 if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2080 size >= 256 && cachep->object_size > cache_line_size()) {
2081 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2082 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2083
2084 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2085 flags |= CFLGS_OFF_SLAB;
2086 cachep->obj_offset += tmp_size - size;
2087 size = tmp_size;
2088 goto done;
2089 }
2090 }
2091 }
2092#endif
2093
2094 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2095 flags |= CFLGS_OBJFREELIST_SLAB;
2096 goto done;
2097 }
2098
2099 if (set_off_slab_cache(cachep, size, flags)) {
2100 flags |= CFLGS_OFF_SLAB;
2101 goto done;
2102 }
2103
2104 if (set_on_slab_cache(cachep, size, flags))
2105 goto done;
2106
2107 return -E2BIG;
2108
2109done:
2110 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2111 cachep->flags = flags;
2112 cachep->allocflags = __GFP_COMP;
2113 if (flags & SLAB_CACHE_DMA)
2114 cachep->allocflags |= GFP_DMA;
2115 if (flags & SLAB_CACHE_DMA32)
2116 cachep->allocflags |= GFP_DMA32;
2117 if (flags & SLAB_RECLAIM_ACCOUNT)
2118 cachep->allocflags |= __GFP_RECLAIMABLE;
2119 cachep->size = size;
2120 cachep->reciprocal_buffer_size = reciprocal_value(size);
2121
2122#if DEBUG
2123
2124
2125
2126
2127
2128 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2129 (cachep->flags & SLAB_POISON) &&
2130 is_debug_pagealloc_cache(cachep))
2131 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2132#endif
2133
2134 if (OFF_SLAB(cachep)) {
2135 cachep->freelist_cache =
2136 kmalloc_slab(cachep->freelist_size, 0u);
2137 }
2138
2139 err = setup_cpu_cache(cachep, gfp);
2140 if (err) {
2141 __kmem_cache_release(cachep);
2142 return err;
2143 }
2144
2145 return 0;
2146}
2147
2148#if DEBUG
2149static void check_irq_off(void)
2150{
2151 BUG_ON(!irqs_disabled());
2152}
2153
2154static void check_irq_on(void)
2155{
2156 BUG_ON(irqs_disabled());
2157}
2158
2159static void check_mutex_acquired(void)
2160{
2161 BUG_ON(!mutex_is_locked(&slab_mutex));
2162}
2163
2164static void check_spinlock_acquired(struct kmem_cache *cachep)
2165{
2166#ifdef CONFIG_SMP
2167 check_irq_off();
2168 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2169#endif
2170}
2171
2172static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2173{
2174#ifdef CONFIG_SMP
2175 check_irq_off();
2176 assert_spin_locked(&get_node(cachep, node)->list_lock);
2177#endif
2178}
2179
2180#else
2181#define check_irq_off() do { } while(0)
2182#define check_irq_on() do { } while(0)
2183#define check_mutex_acquired() do { } while(0)
2184#define check_spinlock_acquired(x) do { } while(0)
2185#define check_spinlock_acquired_node(x, y) do { } while(0)
2186#endif
2187
2188static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2189 int node, bool free_all, struct list_head *list)
2190{
2191 int tofree;
2192
2193 if (!ac || !ac->avail)
2194 return;
2195
2196 tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2197 if (tofree > ac->avail)
2198 tofree = (ac->avail + 1) / 2;
2199
2200 free_block(cachep, ac->entry, tofree, node, list);
2201 ac->avail -= tofree;
2202 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2203}
2204
2205static void do_drain(void *arg)
2206{
2207 struct kmem_cache *cachep = arg;
2208 struct array_cache *ac;
2209 int node = numa_mem_id();
2210 struct kmem_cache_node *n;
2211 LIST_HEAD(list);
2212
2213 check_irq_off();
2214 ac = cpu_cache_get(cachep);
2215 n = get_node(cachep, node);
2216 spin_lock(&n->list_lock);
2217 free_block(cachep, ac->entry, ac->avail, node, &list);
2218 spin_unlock(&n->list_lock);
2219 ac->avail = 0;
2220 slabs_destroy(cachep, &list);
2221}
2222
2223static void drain_cpu_caches(struct kmem_cache *cachep)
2224{
2225 struct kmem_cache_node *n;
2226 int node;
2227 LIST_HEAD(list);
2228
2229 on_each_cpu(do_drain, cachep, 1);
2230 check_irq_on();
2231 for_each_kmem_cache_node(cachep, node, n)
2232 if (n->alien)
2233 drain_alien_cache(cachep, n->alien);
2234
2235 for_each_kmem_cache_node(cachep, node, n) {
2236 spin_lock_irq(&n->list_lock);
2237 drain_array_locked(cachep, n->shared, node, true, &list);
2238 spin_unlock_irq(&n->list_lock);
2239
2240 slabs_destroy(cachep, &list);
2241 }
2242}
2243
2244
2245
2246
2247
2248
2249
2250static int drain_freelist(struct kmem_cache *cache,
2251 struct kmem_cache_node *n, int tofree)
2252{
2253 struct list_head *p;
2254 int nr_freed;
2255 struct page *page;
2256
2257 nr_freed = 0;
2258 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2259
2260 spin_lock_irq(&n->list_lock);
2261 p = n->slabs_free.prev;
2262 if (p == &n->slabs_free) {
2263 spin_unlock_irq(&n->list_lock);
2264 goto out;
2265 }
2266
2267 page = list_entry(p, struct page, slab_list);
2268 list_del(&page->slab_list);
2269 n->free_slabs--;
2270 n->total_slabs--;
2271
2272
2273
2274
2275 n->free_objects -= cache->num;
2276 spin_unlock_irq(&n->list_lock);
2277 slab_destroy(cache, page);
2278 nr_freed++;
2279 }
2280out:
2281 return nr_freed;
2282}
2283
2284bool __kmem_cache_empty(struct kmem_cache *s)
2285{
2286 int node;
2287 struct kmem_cache_node *n;
2288
2289 for_each_kmem_cache_node(s, node, n)
2290 if (!list_empty(&n->slabs_full) ||
2291 !list_empty(&n->slabs_partial))
2292 return false;
2293 return true;
2294}
2295
2296int __kmem_cache_shrink(struct kmem_cache *cachep)
2297{
2298 int ret = 0;
2299 int node;
2300 struct kmem_cache_node *n;
2301
2302 drain_cpu_caches(cachep);
2303
2304 check_irq_on();
2305 for_each_kmem_cache_node(cachep, node, n) {
2306 drain_freelist(cachep, n, INT_MAX);
2307
2308 ret += !list_empty(&n->slabs_full) ||
2309 !list_empty(&n->slabs_partial);
2310 }
2311 return (ret ? 1 : 0);
2312}
2313
2314int __kmem_cache_shutdown(struct kmem_cache *cachep)
2315{
2316 return __kmem_cache_shrink(cachep);
2317}
2318
2319void __kmem_cache_release(struct kmem_cache *cachep)
2320{
2321 int i;
2322 struct kmem_cache_node *n;
2323
2324 cache_random_seq_destroy(cachep);
2325
2326 free_percpu(cachep->cpu_cache);
2327
2328
2329 for_each_kmem_cache_node(cachep, i, n) {
2330 kfree(n->shared);
2331 free_alien_cache(n->alien);
2332 kfree(n);
2333 cachep->node[i] = NULL;
2334 }
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351static void *alloc_slabmgmt(struct kmem_cache *cachep,
2352 struct page *page, int colour_off,
2353 gfp_t local_flags, int nodeid)
2354{
2355 void *freelist;
2356 void *addr = page_address(page);
2357
2358 page->s_mem = addr + colour_off;
2359 page->active = 0;
2360
2361 if (OBJFREELIST_SLAB(cachep))
2362 freelist = NULL;
2363 else if (OFF_SLAB(cachep)) {
2364
2365 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2366 local_flags, nodeid);
2367 if (!freelist)
2368 return NULL;
2369 } else {
2370
2371 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2372 cachep->freelist_size;
2373 }
2374
2375 return freelist;
2376}
2377
2378static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2379{
2380 return ((freelist_idx_t *)page->freelist)[idx];
2381}
2382
2383static inline void set_free_obj(struct page *page,
2384 unsigned int idx, freelist_idx_t val)
2385{
2386 ((freelist_idx_t *)(page->freelist))[idx] = val;
2387}
2388
2389static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2390{
2391#if DEBUG
2392 int i;
2393
2394 for (i = 0; i < cachep->num; i++) {
2395 void *objp = index_to_obj(cachep, page, i);
2396
2397 if (cachep->flags & SLAB_STORE_USER)
2398 *dbg_userword(cachep, objp) = NULL;
2399
2400 if (cachep->flags & SLAB_RED_ZONE) {
2401 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2402 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2403 }
2404
2405
2406
2407
2408
2409 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2410 kasan_unpoison_object_data(cachep,
2411 objp + obj_offset(cachep));
2412 cachep->ctor(objp + obj_offset(cachep));
2413 kasan_poison_object_data(
2414 cachep, objp + obj_offset(cachep));
2415 }
2416
2417 if (cachep->flags & SLAB_RED_ZONE) {
2418 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2419 slab_error(cachep, "constructor overwrote the end of an object");
2420 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2421 slab_error(cachep, "constructor overwrote the start of an object");
2422 }
2423
2424 if (cachep->flags & SLAB_POISON) {
2425 poison_obj(cachep, objp, POISON_FREE);
2426 slab_kernel_map(cachep, objp, 0, 0);
2427 }
2428 }
2429#endif
2430}
2431
2432#ifdef CONFIG_SLAB_FREELIST_RANDOM
2433
2434union freelist_init_state {
2435 struct {
2436 unsigned int pos;
2437 unsigned int *list;
2438 unsigned int count;
2439 };
2440 struct rnd_state rnd_state;
2441};
2442
2443
2444
2445
2446
2447static bool freelist_state_initialize(union freelist_init_state *state,
2448 struct kmem_cache *cachep,
2449 unsigned int count)
2450{
2451 bool ret;
2452 unsigned int rand;
2453
2454
2455 rand = get_random_int();
2456
2457
2458 if (!cachep->random_seq) {
2459 prandom_seed_state(&state->rnd_state, rand);
2460 ret = false;
2461 } else {
2462 state->list = cachep->random_seq;
2463 state->count = count;
2464 state->pos = rand % count;
2465 ret = true;
2466 }
2467 return ret;
2468}
2469
2470
2471static freelist_idx_t next_random_slot(union freelist_init_state *state)
2472{
2473 if (state->pos >= state->count)
2474 state->pos = 0;
2475 return state->list[state->pos++];
2476}
2477
2478
2479static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2480{
2481 swap(((freelist_idx_t *)page->freelist)[a],
2482 ((freelist_idx_t *)page->freelist)[b]);
2483}
2484
2485
2486
2487
2488
2489static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2490{
2491 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2492 union freelist_init_state state;
2493 bool precomputed;
2494
2495 if (count < 2)
2496 return false;
2497
2498 precomputed = freelist_state_initialize(&state, cachep, count);
2499
2500
2501 if (OBJFREELIST_SLAB(cachep)) {
2502 if (!precomputed)
2503 objfreelist = count - 1;
2504 else
2505 objfreelist = next_random_slot(&state);
2506 page->freelist = index_to_obj(cachep, page, objfreelist) +
2507 obj_offset(cachep);
2508 count--;
2509 }
2510
2511
2512
2513
2514
2515 if (!precomputed) {
2516 for (i = 0; i < count; i++)
2517 set_free_obj(page, i, i);
2518
2519
2520 for (i = count - 1; i > 0; i--) {
2521 rand = prandom_u32_state(&state.rnd_state);
2522 rand %= (i + 1);
2523 swap_free_obj(page, i, rand);
2524 }
2525 } else {
2526 for (i = 0; i < count; i++)
2527 set_free_obj(page, i, next_random_slot(&state));
2528 }
2529
2530 if (OBJFREELIST_SLAB(cachep))
2531 set_free_obj(page, cachep->num - 1, objfreelist);
2532
2533 return true;
2534}
2535#else
2536static inline bool shuffle_freelist(struct kmem_cache *cachep,
2537 struct page *page)
2538{
2539 return false;
2540}
2541#endif
2542
2543static void cache_init_objs(struct kmem_cache *cachep,
2544 struct page *page)
2545{
2546 int i;
2547 void *objp;
2548 bool shuffled;
2549
2550 cache_init_objs_debug(cachep, page);
2551
2552
2553 shuffled = shuffle_freelist(cachep, page);
2554
2555 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2556 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2557 obj_offset(cachep);
2558 }
2559
2560 for (i = 0; i < cachep->num; i++) {
2561 objp = index_to_obj(cachep, page, i);
2562 objp = kasan_init_slab_obj(cachep, objp);
2563
2564
2565 if (DEBUG == 0 && cachep->ctor) {
2566 kasan_unpoison_object_data(cachep, objp);
2567 cachep->ctor(objp);
2568 kasan_poison_object_data(cachep, objp);
2569 }
2570
2571 if (!shuffled)
2572 set_free_obj(page, i, i);
2573 }
2574}
2575
2576static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2577{
2578 void *objp;
2579
2580 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2581 page->active++;
2582
2583#if DEBUG
2584 if (cachep->flags & SLAB_STORE_USER)
2585 set_store_user_dirty(cachep);
2586#endif
2587
2588 return objp;
2589}
2590
2591static void slab_put_obj(struct kmem_cache *cachep,
2592 struct page *page, void *objp)
2593{
2594 unsigned int objnr = obj_to_index(cachep, page, objp);
2595#if DEBUG
2596 unsigned int i;
2597
2598
2599 for (i = page->active; i < cachep->num; i++) {
2600 if (get_free_obj(page, i) == objnr) {
2601 pr_err("slab: double free detected in cache '%s', objp %px\n",
2602 cachep->name, objp);
2603 BUG();
2604 }
2605 }
2606#endif
2607 page->active--;
2608 if (!page->freelist)
2609 page->freelist = objp + obj_offset(cachep);
2610
2611 set_free_obj(page, page->active, objnr);
2612}
2613
2614
2615
2616
2617
2618
2619static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2620 void *freelist)
2621{
2622 page->slab_cache = cache;
2623 page->freelist = freelist;
2624}
2625
2626
2627
2628
2629
2630static struct page *cache_grow_begin(struct kmem_cache *cachep,
2631 gfp_t flags, int nodeid)
2632{
2633 void *freelist;
2634 size_t offset;
2635 gfp_t local_flags;
2636 int page_node;
2637 struct kmem_cache_node *n;
2638 struct page *page;
2639
2640
2641
2642
2643
2644 if (unlikely(flags & GFP_SLAB_BUG_MASK))
2645 flags = kmalloc_fix_flags(flags);
2646
2647 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2648 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2649
2650 check_irq_off();
2651 if (gfpflags_allow_blocking(local_flags))
2652 local_irq_enable();
2653
2654
2655
2656
2657
2658 page = kmem_getpages(cachep, local_flags, nodeid);
2659 if (!page)
2660 goto failed;
2661
2662 page_node = page_to_nid(page);
2663 n = get_node(cachep, page_node);
2664
2665
2666 n->colour_next++;
2667 if (n->colour_next >= cachep->colour)
2668 n->colour_next = 0;
2669
2670 offset = n->colour_next;
2671 if (offset >= cachep->colour)
2672 offset = 0;
2673
2674 offset *= cachep->colour_off;
2675
2676
2677
2678
2679
2680
2681 kasan_poison_slab(page);
2682
2683
2684 freelist = alloc_slabmgmt(cachep, page, offset,
2685 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2686 if (OFF_SLAB(cachep) && !freelist)
2687 goto opps1;
2688
2689 slab_map_pages(cachep, page, freelist);
2690
2691 cache_init_objs(cachep, page);
2692
2693 if (gfpflags_allow_blocking(local_flags))
2694 local_irq_disable();
2695
2696 return page;
2697
2698opps1:
2699 kmem_freepages(cachep, page);
2700failed:
2701 if (gfpflags_allow_blocking(local_flags))
2702 local_irq_disable();
2703 return NULL;
2704}
2705
2706static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2707{
2708 struct kmem_cache_node *n;
2709 void *list = NULL;
2710
2711 check_irq_off();
2712
2713 if (!page)
2714 return;
2715
2716 INIT_LIST_HEAD(&page->slab_list);
2717 n = get_node(cachep, page_to_nid(page));
2718
2719 spin_lock(&n->list_lock);
2720 n->total_slabs++;
2721 if (!page->active) {
2722 list_add_tail(&page->slab_list, &n->slabs_free);
2723 n->free_slabs++;
2724 } else
2725 fixup_slab_list(cachep, n, page, &list);
2726
2727 STATS_INC_GROWN(cachep);
2728 n->free_objects += cachep->num - page->active;
2729 spin_unlock(&n->list_lock);
2730
2731 fixup_objfreelist_debug(cachep, &list);
2732}
2733
2734#if DEBUG
2735
2736
2737
2738
2739
2740
2741static void kfree_debugcheck(const void *objp)
2742{
2743 if (!virt_addr_valid(objp)) {
2744 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2745 (unsigned long)objp);
2746 BUG();
2747 }
2748}
2749
2750static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2751{
2752 unsigned long long redzone1, redzone2;
2753
2754 redzone1 = *dbg_redzone1(cache, obj);
2755 redzone2 = *dbg_redzone2(cache, obj);
2756
2757
2758
2759
2760 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2761 return;
2762
2763 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2764 slab_error(cache, "double free detected");
2765 else
2766 slab_error(cache, "memory outside object was overwritten");
2767
2768 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2769 obj, redzone1, redzone2);
2770}
2771
2772static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2773 unsigned long caller)
2774{
2775 unsigned int objnr;
2776 struct page *page;
2777
2778 BUG_ON(virt_to_cache(objp) != cachep);
2779
2780 objp -= obj_offset(cachep);
2781 kfree_debugcheck(objp);
2782 page = virt_to_head_page(objp);
2783
2784 if (cachep->flags & SLAB_RED_ZONE) {
2785 verify_redzone_free(cachep, objp);
2786 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2787 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2788 }
2789 if (cachep->flags & SLAB_STORE_USER) {
2790 set_store_user_dirty(cachep);
2791 *dbg_userword(cachep, objp) = (void *)caller;
2792 }
2793
2794 objnr = obj_to_index(cachep, page, objp);
2795
2796 BUG_ON(objnr >= cachep->num);
2797 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2798
2799 if (cachep->flags & SLAB_POISON) {
2800 poison_obj(cachep, objp, POISON_FREE);
2801 slab_kernel_map(cachep, objp, 0, caller);
2802 }
2803 return objp;
2804}
2805
2806#else
2807#define kfree_debugcheck(x) do { } while(0)
2808#define cache_free_debugcheck(x,objp,z) (objp)
2809#endif
2810
2811static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2812 void **list)
2813{
2814#if DEBUG
2815 void *next = *list;
2816 void *objp;
2817
2818 while (next) {
2819 objp = next - obj_offset(cachep);
2820 next = *(void **)next;
2821 poison_obj(cachep, objp, POISON_FREE);
2822 }
2823#endif
2824}
2825
2826static inline void fixup_slab_list(struct kmem_cache *cachep,
2827 struct kmem_cache_node *n, struct page *page,
2828 void **list)
2829{
2830
2831 list_del(&page->slab_list);
2832 if (page->active == cachep->num) {
2833 list_add(&page->slab_list, &n->slabs_full);
2834 if (OBJFREELIST_SLAB(cachep)) {
2835#if DEBUG
2836
2837 if (cachep->flags & SLAB_POISON) {
2838 void **objp = page->freelist;
2839
2840 *objp = *list;
2841 *list = objp;
2842 }
2843#endif
2844 page->freelist = NULL;
2845 }
2846 } else
2847 list_add(&page->slab_list, &n->slabs_partial);
2848}
2849
2850
2851static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2852 struct page *page, bool pfmemalloc)
2853{
2854 if (!page)
2855 return NULL;
2856
2857 if (pfmemalloc)
2858 return page;
2859
2860 if (!PageSlabPfmemalloc(page))
2861 return page;
2862
2863
2864 if (n->free_objects > n->free_limit) {
2865 ClearPageSlabPfmemalloc(page);
2866 return page;
2867 }
2868
2869
2870 list_del(&page->slab_list);
2871 if (!page->active) {
2872 list_add_tail(&page->slab_list, &n->slabs_free);
2873 n->free_slabs++;
2874 } else
2875 list_add_tail(&page->slab_list, &n->slabs_partial);
2876
2877 list_for_each_entry(page, &n->slabs_partial, slab_list) {
2878 if (!PageSlabPfmemalloc(page))
2879 return page;
2880 }
2881
2882 n->free_touched = 1;
2883 list_for_each_entry(page, &n->slabs_free, slab_list) {
2884 if (!PageSlabPfmemalloc(page)) {
2885 n->free_slabs--;
2886 return page;
2887 }
2888 }
2889
2890 return NULL;
2891}
2892
2893static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2894{
2895 struct page *page;
2896
2897 assert_spin_locked(&n->list_lock);
2898 page = list_first_entry_or_null(&n->slabs_partial, struct page,
2899 slab_list);
2900 if (!page) {
2901 n->free_touched = 1;
2902 page = list_first_entry_or_null(&n->slabs_free, struct page,
2903 slab_list);
2904 if (page)
2905 n->free_slabs--;
2906 }
2907
2908 if (sk_memalloc_socks())
2909 page = get_valid_first_slab(n, page, pfmemalloc);
2910
2911 return page;
2912}
2913
2914static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2915 struct kmem_cache_node *n, gfp_t flags)
2916{
2917 struct page *page;
2918 void *obj;
2919 void *list = NULL;
2920
2921 if (!gfp_pfmemalloc_allowed(flags))
2922 return NULL;
2923
2924 spin_lock(&n->list_lock);
2925 page = get_first_slab(n, true);
2926 if (!page) {
2927 spin_unlock(&n->list_lock);
2928 return NULL;
2929 }
2930
2931 obj = slab_get_obj(cachep, page);
2932 n->free_objects--;
2933
2934 fixup_slab_list(cachep, n, page, &list);
2935
2936 spin_unlock(&n->list_lock);
2937 fixup_objfreelist_debug(cachep, &list);
2938
2939 return obj;
2940}
2941
2942
2943
2944
2945
2946static __always_inline int alloc_block(struct kmem_cache *cachep,
2947 struct array_cache *ac, struct page *page, int batchcount)
2948{
2949
2950
2951
2952
2953 BUG_ON(page->active >= cachep->num);
2954
2955 while (page->active < cachep->num && batchcount--) {
2956 STATS_INC_ALLOCED(cachep);
2957 STATS_INC_ACTIVE(cachep);
2958 STATS_SET_HIGH(cachep);
2959
2960 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2961 }
2962
2963 return batchcount;
2964}
2965
2966static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2967{
2968 int batchcount;
2969 struct kmem_cache_node *n;
2970 struct array_cache *ac, *shared;
2971 int node;
2972 void *list = NULL;
2973 struct page *page;
2974
2975 check_irq_off();
2976 node = numa_mem_id();
2977
2978 ac = cpu_cache_get(cachep);
2979 batchcount = ac->batchcount;
2980 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2981
2982
2983
2984
2985
2986 batchcount = BATCHREFILL_LIMIT;
2987 }
2988 n = get_node(cachep, node);
2989
2990 BUG_ON(ac->avail > 0 || !n);
2991 shared = READ_ONCE(n->shared);
2992 if (!n->free_objects && (!shared || !shared->avail))
2993 goto direct_grow;
2994
2995 spin_lock(&n->list_lock);
2996 shared = READ_ONCE(n->shared);
2997
2998
2999 if (shared && transfer_objects(ac, shared, batchcount)) {
3000 shared->touched = 1;
3001 goto alloc_done;
3002 }
3003
3004 while (batchcount > 0) {
3005
3006 page = get_first_slab(n, false);
3007 if (!page)
3008 goto must_grow;
3009
3010 check_spinlock_acquired(cachep);
3011
3012 batchcount = alloc_block(cachep, ac, page, batchcount);
3013 fixup_slab_list(cachep, n, page, &list);
3014 }
3015
3016must_grow:
3017 n->free_objects -= ac->avail;
3018alloc_done:
3019 spin_unlock(&n->list_lock);
3020 fixup_objfreelist_debug(cachep, &list);
3021
3022direct_grow:
3023 if (unlikely(!ac->avail)) {
3024
3025 if (sk_memalloc_socks()) {
3026 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3027
3028 if (obj)
3029 return obj;
3030 }
3031
3032 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3033
3034
3035
3036
3037
3038 ac = cpu_cache_get(cachep);
3039 if (!ac->avail && page)
3040 alloc_block(cachep, ac, page, batchcount);
3041 cache_grow_end(cachep, page);
3042
3043 if (!ac->avail)
3044 return NULL;
3045 }
3046 ac->touched = 1;
3047
3048 return ac->entry[--ac->avail];
3049}
3050
3051static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3052 gfp_t flags)
3053{
3054 might_sleep_if(gfpflags_allow_blocking(flags));
3055}
3056
3057#if DEBUG
3058static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3059 gfp_t flags, void *objp, unsigned long caller)
3060{
3061 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
3062 if (!objp)
3063 return objp;
3064 if (cachep->flags & SLAB_POISON) {
3065 check_poison_obj(cachep, objp);
3066 slab_kernel_map(cachep, objp, 1, 0);
3067 poison_obj(cachep, objp, POISON_INUSE);
3068 }
3069 if (cachep->flags & SLAB_STORE_USER)
3070 *dbg_userword(cachep, objp) = (void *)caller;
3071
3072 if (cachep->flags & SLAB_RED_ZONE) {
3073 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3074 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3075 slab_error(cachep, "double free, or memory outside object was overwritten");
3076 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3077 objp, *dbg_redzone1(cachep, objp),
3078 *dbg_redzone2(cachep, objp));
3079 }
3080 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3081 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3082 }
3083
3084 objp += obj_offset(cachep);
3085 if (cachep->ctor && cachep->flags & SLAB_POISON)
3086 cachep->ctor(objp);
3087 if (ARCH_SLAB_MINALIGN &&
3088 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3089 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3090 objp, (int)ARCH_SLAB_MINALIGN);
3091 }
3092 return objp;
3093}
3094#else
3095#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3096#endif
3097
3098static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3099{
3100 void *objp;
3101 struct array_cache *ac;
3102
3103 check_irq_off();
3104
3105 ac = cpu_cache_get(cachep);
3106 if (likely(ac->avail)) {
3107 ac->touched = 1;
3108 objp = ac->entry[--ac->avail];
3109
3110 STATS_INC_ALLOCHIT(cachep);
3111 goto out;
3112 }
3113
3114 STATS_INC_ALLOCMISS(cachep);
3115 objp = cache_alloc_refill(cachep, flags);
3116
3117
3118
3119
3120 ac = cpu_cache_get(cachep);
3121
3122out:
3123
3124
3125
3126
3127
3128 if (objp)
3129 kmemleak_erase(&ac->entry[ac->avail]);
3130 return objp;
3131}
3132
3133#ifdef CONFIG_NUMA
3134
3135
3136
3137
3138
3139
3140static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3141{
3142 int nid_alloc, nid_here;
3143
3144 if (in_interrupt() || (flags & __GFP_THISNODE))
3145 return NULL;
3146 nid_alloc = nid_here = numa_mem_id();
3147 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3148 nid_alloc = cpuset_slab_spread_node();
3149 else if (current->mempolicy)
3150 nid_alloc = mempolicy_slab_node();
3151 if (nid_alloc != nid_here)
3152 return ____cache_alloc_node(cachep, flags, nid_alloc);
3153 return NULL;
3154}
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3165{
3166 struct zonelist *zonelist;
3167 struct zoneref *z;
3168 struct zone *zone;
3169 enum zone_type high_zoneidx = gfp_zone(flags);
3170 void *obj = NULL;
3171 struct page *page;
3172 int nid;
3173 unsigned int cpuset_mems_cookie;
3174
3175 if (flags & __GFP_THISNODE)
3176 return NULL;
3177
3178retry_cpuset:
3179 cpuset_mems_cookie = read_mems_allowed_begin();
3180 zonelist = node_zonelist(mempolicy_slab_node(), flags);
3181
3182retry:
3183
3184
3185
3186
3187 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3188 nid = zone_to_nid(zone);
3189
3190 if (cpuset_zone_allowed(zone, flags) &&
3191 get_node(cache, nid) &&
3192 get_node(cache, nid)->free_objects) {
3193 obj = ____cache_alloc_node(cache,
3194 gfp_exact_node(flags), nid);
3195 if (obj)
3196 break;
3197 }
3198 }
3199
3200 if (!obj) {
3201
3202
3203
3204
3205
3206
3207 page = cache_grow_begin(cache, flags, numa_mem_id());
3208 cache_grow_end(cache, page);
3209 if (page) {
3210 nid = page_to_nid(page);
3211 obj = ____cache_alloc_node(cache,
3212 gfp_exact_node(flags), nid);
3213
3214
3215
3216
3217
3218 if (!obj)
3219 goto retry;
3220 }
3221 }
3222
3223 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3224 goto retry_cpuset;
3225 return obj;
3226}
3227
3228
3229
3230
3231static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3232 int nodeid)
3233{
3234 struct page *page;
3235 struct kmem_cache_node *n;
3236 void *obj = NULL;
3237 void *list = NULL;
3238
3239 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3240 n = get_node(cachep, nodeid);
3241 BUG_ON(!n);
3242
3243 check_irq_off();
3244 spin_lock(&n->list_lock);
3245 page = get_first_slab(n, false);
3246 if (!page)
3247 goto must_grow;
3248
3249 check_spinlock_acquired_node(cachep, nodeid);
3250
3251 STATS_INC_NODEALLOCS(cachep);
3252 STATS_INC_ACTIVE(cachep);
3253 STATS_SET_HIGH(cachep);
3254
3255 BUG_ON(page->active == cachep->num);
3256
3257 obj = slab_get_obj(cachep, page);
3258 n->free_objects--;
3259
3260 fixup_slab_list(cachep, n, page, &list);
3261
3262 spin_unlock(&n->list_lock);
3263 fixup_objfreelist_debug(cachep, &list);
3264 return obj;
3265
3266must_grow:
3267 spin_unlock(&n->list_lock);
3268 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3269 if (page) {
3270
3271 obj = slab_get_obj(cachep, page);
3272 }
3273 cache_grow_end(cachep, page);
3274
3275 return obj ? obj : fallback_alloc(cachep, flags);
3276}
3277
3278static __always_inline void *
3279slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3280 unsigned long caller)
3281{
3282 unsigned long save_flags;
3283 void *ptr;
3284 int slab_node = numa_mem_id();
3285 struct obj_cgroup *objcg = NULL;
3286
3287 flags &= gfp_allowed_mask;
3288 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3289 if (unlikely(!cachep))
3290 return NULL;
3291
3292 cache_alloc_debugcheck_before(cachep, flags);
3293 local_irq_save(save_flags);
3294
3295 if (nodeid == NUMA_NO_NODE)
3296 nodeid = slab_node;
3297
3298 if (unlikely(!get_node(cachep, nodeid))) {
3299
3300 ptr = fallback_alloc(cachep, flags);
3301 goto out;
3302 }
3303
3304 if (nodeid == slab_node) {
3305
3306
3307
3308
3309
3310
3311 ptr = ____cache_alloc(cachep, flags);
3312 if (ptr)
3313 goto out;
3314 }
3315
3316 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3317 out:
3318 local_irq_restore(save_flags);
3319 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3320
3321 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3322 memset(ptr, 0, cachep->object_size);
3323
3324 slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3325 return ptr;
3326}
3327
3328static __always_inline void *
3329__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3330{
3331 void *objp;
3332
3333 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3334 objp = alternate_node_alloc(cache, flags);
3335 if (objp)
3336 goto out;
3337 }
3338 objp = ____cache_alloc(cache, flags);
3339
3340
3341
3342
3343
3344 if (!objp)
3345 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3346
3347 out:
3348 return objp;
3349}
3350#else
3351
3352static __always_inline void *
3353__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3354{
3355 return ____cache_alloc(cachep, flags);
3356}
3357
3358#endif
3359
3360static __always_inline void *
3361slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3362{
3363 unsigned long save_flags;
3364 void *objp;
3365 struct obj_cgroup *objcg = NULL;
3366
3367 flags &= gfp_allowed_mask;
3368 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3369 if (unlikely(!cachep))
3370 return NULL;
3371
3372 cache_alloc_debugcheck_before(cachep, flags);
3373 local_irq_save(save_flags);
3374 objp = __do_cache_alloc(cachep, flags);
3375 local_irq_restore(save_flags);
3376 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3377 prefetchw(objp);
3378
3379 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3380 memset(objp, 0, cachep->object_size);
3381
3382 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3383 return objp;
3384}
3385
3386
3387
3388
3389
3390static void free_block(struct kmem_cache *cachep, void **objpp,
3391 int nr_objects, int node, struct list_head *list)
3392{
3393 int i;
3394 struct kmem_cache_node *n = get_node(cachep, node);
3395 struct page *page;
3396
3397 n->free_objects += nr_objects;
3398
3399 for (i = 0; i < nr_objects; i++) {
3400 void *objp;
3401 struct page *page;
3402
3403 objp = objpp[i];
3404
3405 page = virt_to_head_page(objp);
3406 list_del(&page->slab_list);
3407 check_spinlock_acquired_node(cachep, node);
3408 slab_put_obj(cachep, page, objp);
3409 STATS_DEC_ACTIVE(cachep);
3410
3411
3412 if (page->active == 0) {
3413 list_add(&page->slab_list, &n->slabs_free);
3414 n->free_slabs++;
3415 } else {
3416
3417
3418
3419
3420 list_add_tail(&page->slab_list, &n->slabs_partial);
3421 }
3422 }
3423
3424 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3425 n->free_objects -= cachep->num;
3426
3427 page = list_last_entry(&n->slabs_free, struct page, slab_list);
3428 list_move(&page->slab_list, list);
3429 n->free_slabs--;
3430 n->total_slabs--;
3431 }
3432}
3433
3434static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3435{
3436 int batchcount;
3437 struct kmem_cache_node *n;
3438 int node = numa_mem_id();
3439 LIST_HEAD(list);
3440
3441 batchcount = ac->batchcount;
3442
3443 check_irq_off();
3444 n = get_node(cachep, node);
3445 spin_lock(&n->list_lock);
3446 if (n->shared) {
3447 struct array_cache *shared_array = n->shared;
3448 int max = shared_array->limit - shared_array->avail;
3449 if (max) {
3450 if (batchcount > max)
3451 batchcount = max;
3452 memcpy(&(shared_array->entry[shared_array->avail]),
3453 ac->entry, sizeof(void *) * batchcount);
3454 shared_array->avail += batchcount;
3455 goto free_done;
3456 }
3457 }
3458
3459 free_block(cachep, ac->entry, batchcount, node, &list);
3460free_done:
3461#if STATS
3462 {
3463 int i = 0;
3464 struct page *page;
3465
3466 list_for_each_entry(page, &n->slabs_free, slab_list) {
3467 BUG_ON(page->active);
3468
3469 i++;
3470 }
3471 STATS_SET_FREEABLE(cachep, i);
3472 }
3473#endif
3474 spin_unlock(&n->list_lock);
3475 ac->avail -= batchcount;
3476 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3477 slabs_destroy(cachep, &list);
3478}
3479
3480
3481
3482
3483
3484static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3485 unsigned long caller)
3486{
3487
3488 if (kasan_slab_free(cachep, objp, _RET_IP_))
3489 return;
3490
3491 ___cache_free(cachep, objp, caller);
3492}
3493
3494void ___cache_free(struct kmem_cache *cachep, void *objp,
3495 unsigned long caller)
3496{
3497 struct array_cache *ac = cpu_cache_get(cachep);
3498
3499 check_irq_off();
3500 if (unlikely(slab_want_init_on_free(cachep)))
3501 memset(objp, 0, cachep->object_size);
3502 kmemleak_free_recursive(objp, cachep->flags);
3503 objp = cache_free_debugcheck(cachep, objp, caller);
3504 memcg_slab_free_hook(cachep, &objp, 1);
3505
3506
3507
3508
3509
3510
3511
3512
3513 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3514 return;
3515
3516 if (ac->avail < ac->limit) {
3517 STATS_INC_FREEHIT(cachep);
3518 } else {
3519 STATS_INC_FREEMISS(cachep);
3520 cache_flusharray(cachep, ac);
3521 }
3522
3523 if (sk_memalloc_socks()) {
3524 struct page *page = virt_to_head_page(objp);
3525
3526 if (unlikely(PageSlabPfmemalloc(page))) {
3527 cache_free_pfmemalloc(cachep, page, objp);
3528 return;
3529 }
3530 }
3531
3532 __free_one(ac, objp);
3533}
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3546{
3547 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3548
3549 trace_kmem_cache_alloc(_RET_IP_, ret,
3550 cachep->object_size, cachep->size, flags);
3551
3552 return ret;
3553}
3554EXPORT_SYMBOL(kmem_cache_alloc);
3555
3556static __always_inline void
3557cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3558 size_t size, void **p, unsigned long caller)
3559{
3560 size_t i;
3561
3562 for (i = 0; i < size; i++)
3563 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3564}
3565
3566int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3567 void **p)
3568{
3569 size_t i;
3570 struct obj_cgroup *objcg = NULL;
3571
3572 s = slab_pre_alloc_hook(s, &objcg, size, flags);
3573 if (!s)
3574 return 0;
3575
3576 cache_alloc_debugcheck_before(s, flags);
3577
3578 local_irq_disable();
3579 for (i = 0; i < size; i++) {
3580 void *objp = __do_cache_alloc(s, flags);
3581
3582 if (unlikely(!objp))
3583 goto error;
3584 p[i] = objp;
3585 }
3586 local_irq_enable();
3587
3588 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3589
3590
3591 if (unlikely(slab_want_init_on_alloc(flags, s)))
3592 for (i = 0; i < size; i++)
3593 memset(p[i], 0, s->object_size);
3594
3595 slab_post_alloc_hook(s, objcg, flags, size, p);
3596
3597 return size;
3598error:
3599 local_irq_enable();
3600 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3601 slab_post_alloc_hook(s, objcg, flags, i, p);
3602 __kmem_cache_free_bulk(s, i, p);
3603 return 0;
3604}
3605EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3606
3607#ifdef CONFIG_TRACING
3608void *
3609kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3610{
3611 void *ret;
3612
3613 ret = slab_alloc(cachep, flags, _RET_IP_);
3614
3615 ret = kasan_kmalloc(cachep, ret, size, flags);
3616 trace_kmalloc(_RET_IP_, ret,
3617 size, cachep->size, flags);
3618 return ret;
3619}
3620EXPORT_SYMBOL(kmem_cache_alloc_trace);
3621#endif
3622
3623#ifdef CONFIG_NUMA
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3638{
3639 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3640
3641 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3642 cachep->object_size, cachep->size,
3643 flags, nodeid);
3644
3645 return ret;
3646}
3647EXPORT_SYMBOL(kmem_cache_alloc_node);
3648
3649#ifdef CONFIG_TRACING
3650void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3651 gfp_t flags,
3652 int nodeid,
3653 size_t size)
3654{
3655 void *ret;
3656
3657 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3658
3659 ret = kasan_kmalloc(cachep, ret, size, flags);
3660 trace_kmalloc_node(_RET_IP_, ret,
3661 size, cachep->size,
3662 flags, nodeid);
3663 return ret;
3664}
3665EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3666#endif
3667
3668static __always_inline void *
3669__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3670{
3671 struct kmem_cache *cachep;
3672 void *ret;
3673
3674 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3675 return NULL;
3676 cachep = kmalloc_slab(size, flags);
3677 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3678 return cachep;
3679 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3680 ret = kasan_kmalloc(cachep, ret, size, flags);
3681
3682 return ret;
3683}
3684
3685void *__kmalloc_node(size_t size, gfp_t flags, int node)
3686{
3687 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3688}
3689EXPORT_SYMBOL(__kmalloc_node);
3690
3691void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3692 int node, unsigned long caller)
3693{
3694 return __do_kmalloc_node(size, flags, node, caller);
3695}
3696EXPORT_SYMBOL(__kmalloc_node_track_caller);
3697#endif
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3708 unsigned long caller)
3709{
3710 struct kmem_cache *cachep;
3711 void *ret;
3712
3713 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3714 return NULL;
3715 cachep = kmalloc_slab(size, flags);
3716 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3717 return cachep;
3718 ret = slab_alloc(cachep, flags, caller);
3719
3720 ret = kasan_kmalloc(cachep, ret, size, flags);
3721 trace_kmalloc(caller, ret,
3722 size, cachep->size, flags);
3723
3724 return ret;
3725}
3726
3727void *__kmalloc(size_t size, gfp_t flags)
3728{
3729 return __do_kmalloc(size, flags, _RET_IP_);
3730}
3731EXPORT_SYMBOL(__kmalloc);
3732
3733void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3734{
3735 return __do_kmalloc(size, flags, caller);
3736}
3737EXPORT_SYMBOL(__kmalloc_track_caller);
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3748{
3749 unsigned long flags;
3750 cachep = cache_from_obj(cachep, objp);
3751 if (!cachep)
3752 return;
3753
3754 local_irq_save(flags);
3755 debug_check_no_locks_freed(objp, cachep->object_size);
3756 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3757 debug_check_no_obj_freed(objp, cachep->object_size);
3758 __cache_free(cachep, objp, _RET_IP_);
3759 local_irq_restore(flags);
3760
3761 trace_kmem_cache_free(_RET_IP_, objp);
3762}
3763EXPORT_SYMBOL(kmem_cache_free);
3764
3765void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3766{
3767 struct kmem_cache *s;
3768 size_t i;
3769
3770 local_irq_disable();
3771 for (i = 0; i < size; i++) {
3772 void *objp = p[i];
3773
3774 if (!orig_s)
3775 s = virt_to_cache(objp);
3776 else
3777 s = cache_from_obj(orig_s, objp);
3778 if (!s)
3779 continue;
3780
3781 debug_check_no_locks_freed(objp, s->object_size);
3782 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3783 debug_check_no_obj_freed(objp, s->object_size);
3784
3785 __cache_free(s, objp, _RET_IP_);
3786 }
3787 local_irq_enable();
3788
3789
3790}
3791EXPORT_SYMBOL(kmem_cache_free_bulk);
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802void kfree(const void *objp)
3803{
3804 struct kmem_cache *c;
3805 unsigned long flags;
3806
3807 trace_kfree(_RET_IP_, objp);
3808
3809 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3810 return;
3811 local_irq_save(flags);
3812 kfree_debugcheck(objp);
3813 c = virt_to_cache(objp);
3814 if (!c) {
3815 local_irq_restore(flags);
3816 return;
3817 }
3818 debug_check_no_locks_freed(objp, c->object_size);
3819
3820 debug_check_no_obj_freed(objp, c->object_size);
3821 __cache_free(c, (void *)objp, _RET_IP_);
3822 local_irq_restore(flags);
3823}
3824EXPORT_SYMBOL(kfree);
3825
3826
3827
3828
3829static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3830{
3831 int ret;
3832 int node;
3833 struct kmem_cache_node *n;
3834
3835 for_each_online_node(node) {
3836 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3837 if (ret)
3838 goto fail;
3839
3840 }
3841
3842 return 0;
3843
3844fail:
3845 if (!cachep->list.next) {
3846
3847 node--;
3848 while (node >= 0) {
3849 n = get_node(cachep, node);
3850 if (n) {
3851 kfree(n->shared);
3852 free_alien_cache(n->alien);
3853 kfree(n);
3854 cachep->node[node] = NULL;
3855 }
3856 node--;
3857 }
3858 }
3859 return -ENOMEM;
3860}
3861
3862
3863static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3864 int batchcount, int shared, gfp_t gfp)
3865{
3866 struct array_cache __percpu *cpu_cache, *prev;
3867 int cpu;
3868
3869 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3870 if (!cpu_cache)
3871 return -ENOMEM;
3872
3873 prev = cachep->cpu_cache;
3874 cachep->cpu_cache = cpu_cache;
3875
3876
3877
3878
3879 if (prev)
3880 kick_all_cpus_sync();
3881
3882 check_irq_on();
3883 cachep->batchcount = batchcount;
3884 cachep->limit = limit;
3885 cachep->shared = shared;
3886
3887 if (!prev)
3888 goto setup_node;
3889
3890 for_each_online_cpu(cpu) {
3891 LIST_HEAD(list);
3892 int node;
3893 struct kmem_cache_node *n;
3894 struct array_cache *ac = per_cpu_ptr(prev, cpu);
3895
3896 node = cpu_to_mem(cpu);
3897 n = get_node(cachep, node);
3898 spin_lock_irq(&n->list_lock);
3899 free_block(cachep, ac->entry, ac->avail, node, &list);
3900 spin_unlock_irq(&n->list_lock);
3901 slabs_destroy(cachep, &list);
3902 }
3903 free_percpu(prev);
3904
3905setup_node:
3906 return setup_kmem_cache_nodes(cachep, gfp);
3907}
3908
3909
3910static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3911{
3912 int err;
3913 int limit = 0;
3914 int shared = 0;
3915 int batchcount = 0;
3916
3917 err = cache_random_seq_create(cachep, cachep->num, gfp);
3918 if (err)
3919 goto end;
3920
3921 if (limit && shared && batchcount)
3922 goto skip_setup;
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932 if (cachep->size > 131072)
3933 limit = 1;
3934 else if (cachep->size > PAGE_SIZE)
3935 limit = 8;
3936 else if (cachep->size > 1024)
3937 limit = 24;
3938 else if (cachep->size > 256)
3939 limit = 54;
3940 else
3941 limit = 120;
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952 shared = 0;
3953 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3954 shared = 8;
3955
3956#if DEBUG
3957
3958
3959
3960
3961 if (limit > 32)
3962 limit = 32;
3963#endif
3964 batchcount = (limit + 1) / 2;
3965skip_setup:
3966 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3967end:
3968 if (err)
3969 pr_err("enable_cpucache failed for %s, error %d\n",
3970 cachep->name, -err);
3971 return err;
3972}
3973
3974
3975
3976
3977
3978
3979static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3980 struct array_cache *ac, int node)
3981{
3982 LIST_HEAD(list);
3983
3984
3985 check_mutex_acquired();
3986
3987 if (!ac || !ac->avail)
3988 return;
3989
3990 if (ac->touched) {
3991 ac->touched = 0;
3992 return;
3993 }
3994
3995 spin_lock_irq(&n->list_lock);
3996 drain_array_locked(cachep, ac, node, false, &list);
3997 spin_unlock_irq(&n->list_lock);
3998
3999 slabs_destroy(cachep, &list);
4000}
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static void cache_reap(struct work_struct *w)
4015{
4016 struct kmem_cache *searchp;
4017 struct kmem_cache_node *n;
4018 int node = numa_mem_id();
4019 struct delayed_work *work = to_delayed_work(w);
4020
4021 if (!mutex_trylock(&slab_mutex))
4022
4023 goto out;
4024
4025 list_for_each_entry(searchp, &slab_caches, list) {
4026 check_irq_on();
4027
4028
4029
4030
4031
4032
4033 n = get_node(searchp, node);
4034
4035 reap_alien(searchp, n);
4036
4037 drain_array(searchp, n, cpu_cache_get(searchp), node);
4038
4039
4040
4041
4042
4043 if (time_after(n->next_reap, jiffies))
4044 goto next;
4045
4046 n->next_reap = jiffies + REAPTIMEOUT_NODE;
4047
4048 drain_array(searchp, n, n->shared, node);
4049
4050 if (n->free_touched)
4051 n->free_touched = 0;
4052 else {
4053 int freed;
4054
4055 freed = drain_freelist(searchp, n, (n->free_limit +
4056 5 * searchp->num - 1) / (5 * searchp->num));
4057 STATS_ADD_REAPED(searchp, freed);
4058 }
4059next:
4060 cond_resched();
4061 }
4062 check_irq_on();
4063 mutex_unlock(&slab_mutex);
4064 next_reap_node();
4065out:
4066
4067 schedule_delayed_work_on(smp_processor_id(), work,
4068 round_jiffies_relative(REAPTIMEOUT_AC));
4069}
4070
4071void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4072{
4073 unsigned long active_objs, num_objs, active_slabs;
4074 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4075 unsigned long free_slabs = 0;
4076 int node;
4077 struct kmem_cache_node *n;
4078
4079 for_each_kmem_cache_node(cachep, node, n) {
4080 check_irq_on();
4081 spin_lock_irq(&n->list_lock);
4082
4083 total_slabs += n->total_slabs;
4084 free_slabs += n->free_slabs;
4085 free_objs += n->free_objects;
4086
4087 if (n->shared)
4088 shared_avail += n->shared->avail;
4089
4090 spin_unlock_irq(&n->list_lock);
4091 }
4092 num_objs = total_slabs * cachep->num;
4093 active_slabs = total_slabs - free_slabs;
4094 active_objs = num_objs - free_objs;
4095
4096 sinfo->active_objs = active_objs;
4097 sinfo->num_objs = num_objs;
4098 sinfo->active_slabs = active_slabs;
4099 sinfo->num_slabs = total_slabs;
4100 sinfo->shared_avail = shared_avail;
4101 sinfo->limit = cachep->limit;
4102 sinfo->batchcount = cachep->batchcount;
4103 sinfo->shared = cachep->shared;
4104 sinfo->objects_per_slab = cachep->num;
4105 sinfo->cache_order = cachep->gfporder;
4106}
4107
4108void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4109{
4110#if STATS
4111 {
4112 unsigned long high = cachep->high_mark;
4113 unsigned long allocs = cachep->num_allocations;
4114 unsigned long grown = cachep->grown;
4115 unsigned long reaped = cachep->reaped;
4116 unsigned long errors = cachep->errors;
4117 unsigned long max_freeable = cachep->max_freeable;
4118 unsigned long node_allocs = cachep->node_allocs;
4119 unsigned long node_frees = cachep->node_frees;
4120 unsigned long overflows = cachep->node_overflow;
4121
4122 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4123 allocs, high, grown,
4124 reaped, errors, max_freeable, node_allocs,
4125 node_frees, overflows);
4126 }
4127
4128 {
4129 unsigned long allochit = atomic_read(&cachep->allochit);
4130 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4131 unsigned long freehit = atomic_read(&cachep->freehit);
4132 unsigned long freemiss = atomic_read(&cachep->freemiss);
4133
4134 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4135 allochit, allocmiss, freehit, freemiss);
4136 }
4137#endif
4138}
4139
4140#define MAX_SLABINFO_WRITE 128
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4151 size_t count, loff_t *ppos)
4152{
4153 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4154 int limit, batchcount, shared, res;
4155 struct kmem_cache *cachep;
4156
4157 if (count > MAX_SLABINFO_WRITE)
4158 return -EINVAL;
4159 if (copy_from_user(&kbuf, buffer, count))
4160 return -EFAULT;
4161 kbuf[MAX_SLABINFO_WRITE] = '\0';
4162
4163 tmp = strchr(kbuf, ' ');
4164 if (!tmp)
4165 return -EINVAL;
4166 *tmp = '\0';
4167 tmp++;
4168 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4169 return -EINVAL;
4170
4171
4172 mutex_lock(&slab_mutex);
4173 res = -EINVAL;
4174 list_for_each_entry(cachep, &slab_caches, list) {
4175 if (!strcmp(cachep->name, kbuf)) {
4176 if (limit < 1 || batchcount < 1 ||
4177 batchcount > limit || shared < 0) {
4178 res = 0;
4179 } else {
4180 res = do_tune_cpucache(cachep, limit,
4181 batchcount, shared,
4182 GFP_KERNEL);
4183 }
4184 break;
4185 }
4186 }
4187 mutex_unlock(&slab_mutex);
4188 if (res >= 0)
4189 res = count;
4190 return res;
4191}
4192
4193#ifdef CONFIG_DEBUG_SLAB_LEAK
4194
4195static inline int add_caller(unsigned long *n, unsigned long v)
4196{
4197 unsigned long *p;
4198 int l;
4199 if (!v)
4200 return 1;
4201 l = n[1];
4202 p = n + 2;
4203 while (l) {
4204 int i = l/2;
4205 unsigned long *q = p + 2 * i;
4206 if (*q == v) {
4207 q[1]++;
4208 return 1;
4209 }
4210 if (*q > v) {
4211 l = i;
4212 } else {
4213 p = q + 2;
4214 l -= i + 1;
4215 }
4216 }
4217 if (++n[1] == n[0])
4218 return 0;
4219 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4220 p[0] = v;
4221 p[1] = 1;
4222 return 1;
4223}
4224
4225static void handle_slab(unsigned long *n, struct kmem_cache *c,
4226 struct page *page)
4227{
4228 void *p;
4229 int i, j;
4230 unsigned long v;
4231
4232 if (n[0] == n[1])
4233 return;
4234 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4235 bool active = true;
4236
4237 for (j = page->active; j < c->num; j++) {
4238 if (get_free_obj(page, j) == i) {
4239 active = false;
4240 break;
4241 }
4242 }
4243
4244 if (!active)
4245 continue;
4246
4247
4248
4249
4250
4251
4252
4253 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4254 continue;
4255
4256 if (!add_caller(n, v))
4257 return;
4258 }
4259}
4260
4261static void show_symbol(struct seq_file *m, unsigned long address)
4262{
4263#ifdef CONFIG_KALLSYMS
4264 unsigned long offset, size;
4265 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4266
4267 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4268 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4269 if (modname[0])
4270 seq_printf(m, " [%s]", modname);
4271 return;
4272 }
4273#endif
4274 seq_printf(m, "%px", (void *)address);
4275}
4276
4277static int leaks_show(struct seq_file *m, void *p)
4278{
4279 struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
4280 root_caches_node);
4281 struct page *page;
4282 struct kmem_cache_node *n;
4283 const char *name;
4284 unsigned long *x = m->private;
4285 int node;
4286 int i;
4287
4288 if (!(cachep->flags & SLAB_STORE_USER))
4289 return 0;
4290 if (!(cachep->flags & SLAB_RED_ZONE))
4291 return 0;
4292
4293
4294
4295
4296
4297
4298
4299 do {
4300 drain_cpu_caches(cachep);
4301
4302
4303
4304
4305 set_store_user_clean(cachep);
4306
4307 x[1] = 0;
4308
4309 for_each_kmem_cache_node(cachep, node, n) {
4310
4311 check_irq_on();
4312 spin_lock_irq(&n->list_lock);
4313
4314 list_for_each_entry(page, &n->slabs_full, slab_list)
4315 handle_slab(x, cachep, page);
4316 list_for_each_entry(page, &n->slabs_partial, slab_list)
4317 handle_slab(x, cachep, page);
4318 spin_unlock_irq(&n->list_lock);
4319 }
4320 } while (!is_store_user_clean(cachep));
4321
4322 name = cachep->name;
4323 if (x[0] == x[1]) {
4324
4325 mutex_unlock(&slab_mutex);
4326 m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
4327 GFP_KERNEL);
4328 if (!m->private) {
4329
4330 m->private = x;
4331 mutex_lock(&slab_mutex);
4332 return -ENOMEM;
4333 }
4334 *(unsigned long *)m->private = x[0] * 2;
4335 kfree(x);
4336 mutex_lock(&slab_mutex);
4337
4338 m->count = m->size;
4339 return 0;
4340 }
4341 for (i = 0; i < x[1]; i++) {
4342 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4343 show_symbol(m, x[2*i+2]);
4344 seq_putc(m, '\n');
4345 }
4346
4347 return 0;
4348}
4349
4350static const struct seq_operations slabstats_op = {
4351 .start = slab_start,
4352 .next = slab_next,
4353 .stop = slab_stop,
4354 .show = leaks_show,
4355};
4356
4357static int slabstats_open(struct inode *inode, struct file *file)
4358{
4359 unsigned long *n;
4360
4361 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4362 if (!n)
4363 return -ENOMEM;
4364
4365 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4366
4367 return 0;
4368}
4369
4370static const struct file_operations proc_slabstats_operations = {
4371 .open = slabstats_open,
4372 .read = seq_read,
4373 .llseek = seq_lseek,
4374 .release = seq_release_private,
4375};
4376#endif
4377
4378static int __init slab_proc_init(void)
4379{
4380#ifdef CONFIG_DEBUG_SLAB_LEAK
4381 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4382#endif
4383 return 0;
4384}
4385module_init(slab_proc_init);
4386
4387#ifdef CONFIG_HARDENED_USERCOPY
4388
4389
4390
4391
4392
4393
4394
4395
4396void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4397 bool to_user)
4398{
4399 struct kmem_cache *cachep;
4400 unsigned int objnr;
4401 unsigned long offset;
4402
4403 ptr = kasan_reset_tag(ptr);
4404
4405
4406 cachep = page->slab_cache;
4407 objnr = obj_to_index(cachep, page, (void *)ptr);
4408 BUG_ON(objnr >= cachep->num);
4409
4410
4411 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4412
4413
4414 if (offset >= cachep->useroffset &&
4415 offset - cachep->useroffset <= cachep->usersize &&
4416 n <= cachep->useroffset - offset + cachep->usersize)
4417 return;
4418
4419
4420
4421
4422
4423
4424
4425 if (usercopy_fallback &&
4426 offset <= cachep->object_size &&
4427 n <= cachep->object_size - offset) {
4428 usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4429 return;
4430 }
4431
4432 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4433}
4434#endif
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445size_t __ksize(const void *objp)
4446{
4447 struct kmem_cache *c;
4448 size_t size;
4449
4450 BUG_ON(!objp);
4451 if (unlikely(objp == ZERO_SIZE_PTR))
4452 return 0;
4453
4454 c = virt_to_cache(objp);
4455 size = c ? c->object_size : 0;
4456
4457 return size;
4458}
4459EXPORT_SYMBOL(__ksize);
4460