1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#include <linux/slab.h>
91#include <linux/mm.h>
92#include <linux/poison.h>
93#include <linux/swap.h>
94#include <linux/cache.h>
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compiler.h>
98#include <linux/cpuset.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/notifier.h>
102#include <linux/kallsyms.h>
103#include <linux/cpu.h>
104#include <linux/sysctl.h>
105#include <linux/module.h>
106#include <linux/rcupdate.h>
107#include <linux/string.h>
108#include <linux/uaccess.h>
109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
111#include <linux/mempolicy.h>
112#include <linux/mutex.h>
113#include <linux/fault-inject.h>
114#include <linux/rtmutex.h>
115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h>
117#include <linux/memory.h>
118#include <linux/prefetch.h>
119#include <linux/sched/task_stack.h>
120
121#include <net/sock.h>
122
123#include <asm/cacheflush.h>
124#include <asm/tlbflush.h>
125#include <asm/page.h>
126
127#include <trace/events/kmem.h>
128
129#include "internal.h"
130
131#include "slab.h"
132
133
134
135
136
137
138
139
140
141
142
143#ifdef CONFIG_DEBUG_SLAB
144#define DEBUG 1
145#define STATS 1
146#define FORCED_DEBUG 1
147#else
148#define DEBUG 0
149#define STATS 0
150#define FORCED_DEBUG 0
151#endif
152
153
154#define BYTES_PER_WORD sizeof(void *)
155#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
156
157#ifndef ARCH_KMALLOC_FLAGS
158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
159#endif
160
161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
162 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
163
164#if FREELIST_BYTE_INDEX
165typedef unsigned char freelist_idx_t;
166#else
167typedef unsigned short freelist_idx_t;
168#endif
169
170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct array_cache {
185 unsigned int avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int touched;
189 void *entry[];
190
191
192
193
194};
195
196struct alien_cache {
197 spinlock_t lock;
198 struct array_cache ac;
199};
200
201
202
203
204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206#define CACHE_CACHE 0
207#define SIZE_NODE (MAX_NUMNODES)
208
209static int drain_freelist(struct kmem_cache *cache,
210 struct kmem_cache_node *n, int tofree);
211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 int node, struct list_head *list);
213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215static void cache_reap(struct work_struct *unused);
216
217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
218 void **list);
219static inline void fixup_slab_list(struct kmem_cache *cachep,
220 struct kmem_cache_node *n, struct page *page,
221 void **list);
222static int slab_early_init = 1;
223
224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
225
226static void kmem_cache_node_init(struct kmem_cache_node *parent)
227{
228 INIT_LIST_HEAD(&parent->slabs_full);
229 INIT_LIST_HEAD(&parent->slabs_partial);
230 INIT_LIST_HEAD(&parent->slabs_free);
231 parent->total_slabs = 0;
232 parent->free_slabs = 0;
233 parent->shared = NULL;
234 parent->alien = NULL;
235 parent->colour_next = 0;
236 spin_lock_init(&parent->list_lock);
237 parent->free_objects = 0;
238 parent->free_touched = 0;
239}
240
241#define MAKE_LIST(cachep, listp, slab, nodeid) \
242 do { \
243 INIT_LIST_HEAD(listp); \
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
245 } while (0)
246
247#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
248 do { \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
252 } while (0)
253
254#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U)
255#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U)
256#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
257#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
258
259#define BATCHREFILL_LIMIT 16
260
261
262
263
264
265
266
267#define REAPTIMEOUT_AC (2*HZ)
268#define REAPTIMEOUT_NODE (4*HZ)
269
270#if STATS
271#define STATS_INC_ACTIVE(x) ((x)->num_active++)
272#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
273#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
274#define STATS_INC_GROWN(x) ((x)->grown++)
275#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
276#define STATS_SET_HIGH(x) \
277 do { \
278 if ((x)->num_active > (x)->high_mark) \
279 (x)->high_mark = (x)->num_active; \
280 } while (0)
281#define STATS_INC_ERR(x) ((x)->errors++)
282#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
283#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
284#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
285#define STATS_SET_FREEABLE(x, i) \
286 do { \
287 if ((x)->max_freeable < i) \
288 (x)->max_freeable = i; \
289 } while (0)
290#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
291#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
292#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
293#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
294#else
295#define STATS_INC_ACTIVE(x) do { } while (0)
296#define STATS_DEC_ACTIVE(x) do { } while (0)
297#define STATS_INC_ALLOCED(x) do { } while (0)
298#define STATS_INC_GROWN(x) do { } while (0)
299#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
300#define STATS_SET_HIGH(x) do { } while (0)
301#define STATS_INC_ERR(x) do { } while (0)
302#define STATS_INC_NODEALLOCS(x) do { } while (0)
303#define STATS_INC_NODEFREES(x) do { } while (0)
304#define STATS_INC_ACOVERFLOW(x) do { } while (0)
305#define STATS_SET_FREEABLE(x, i) do { } while (0)
306#define STATS_INC_ALLOCHIT(x) do { } while (0)
307#define STATS_INC_ALLOCMISS(x) do { } while (0)
308#define STATS_INC_FREEHIT(x) do { } while (0)
309#define STATS_INC_FREEMISS(x) do { } while (0)
310#endif
311
312#if DEBUG
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static int obj_offset(struct kmem_cache *cachep)
328{
329 return cachep->obj_offset;
330}
331
332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
333{
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 return (unsigned long long*) (objp + obj_offset(cachep) -
336 sizeof(unsigned long long));
337}
338
339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
340{
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
342 if (cachep->flags & SLAB_STORE_USER)
343 return (unsigned long long *)(objp + cachep->size -
344 sizeof(unsigned long long) -
345 REDZONE_ALIGN);
346 return (unsigned long long *) (objp + cachep->size -
347 sizeof(unsigned long long));
348}
349
350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
351{
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353 return (void **)(objp + cachep->size - BYTES_PER_WORD);
354}
355
356#else
357
358#define obj_offset(x) 0
359#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
361#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
362
363#endif
364
365
366
367
368
369#define SLAB_MAX_ORDER_HI 1
370#define SLAB_MAX_ORDER_LO 0
371static int slab_max_order = SLAB_MAX_ORDER_LO;
372static bool slab_max_order_set __initdata;
373
374static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
375 unsigned int idx)
376{
377 return page->s_mem + cache->size * idx;
378}
379
380#define BOOT_CPUCACHE_ENTRIES 1
381
382static struct kmem_cache kmem_cache_boot = {
383 .batchcount = 1,
384 .limit = BOOT_CPUCACHE_ENTRIES,
385 .shared = 1,
386 .size = sizeof(struct kmem_cache),
387 .name = "kmem_cache",
388};
389
390static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
391
392static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
393{
394 return this_cpu_ptr(cachep->cpu_cache);
395}
396
397
398
399
400static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
401 slab_flags_t flags, size_t *left_over)
402{
403 unsigned int num;
404 size_t slab_size = PAGE_SIZE << gfporder;
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
424 num = slab_size / buffer_size;
425 *left_over = slab_size % buffer_size;
426 } else {
427 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
428 *left_over = slab_size %
429 (buffer_size + sizeof(freelist_idx_t));
430 }
431
432 return num;
433}
434
435#if DEBUG
436#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
437
438static void __slab_error(const char *function, struct kmem_cache *cachep,
439 char *msg)
440{
441 pr_err("slab error in %s(): cache `%s': %s\n",
442 function, cachep->name, msg);
443 dump_stack();
444 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
445}
446#endif
447
448
449
450
451
452
453
454
455
456static int use_alien_caches __read_mostly = 1;
457static int __init noaliencache_setup(char *s)
458{
459 use_alien_caches = 0;
460 return 1;
461}
462__setup("noaliencache", noaliencache_setup);
463
464static int __init slab_max_order_setup(char *str)
465{
466 get_option(&str, &slab_max_order);
467 slab_max_order = slab_max_order < 0 ? 0 :
468 min(slab_max_order, MAX_ORDER - 1);
469 slab_max_order_set = true;
470
471 return 1;
472}
473__setup("slab_max_order=", slab_max_order_setup);
474
475#ifdef CONFIG_NUMA
476
477
478
479
480
481
482static DEFINE_PER_CPU(unsigned long, slab_reap_node);
483
484static void init_reap_node(int cpu)
485{
486 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
487 node_online_map);
488}
489
490static void next_reap_node(void)
491{
492 int node = __this_cpu_read(slab_reap_node);
493
494 node = next_node_in(node, node_online_map);
495 __this_cpu_write(slab_reap_node, node);
496}
497
498#else
499#define init_reap_node(cpu) do { } while (0)
500#define next_reap_node(void) do { } while (0)
501#endif
502
503
504
505
506
507
508
509
510static void start_cpu_timer(int cpu)
511{
512 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
513
514 if (reap_work->work.func == NULL) {
515 init_reap_node(cpu);
516 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
517 schedule_delayed_work_on(cpu, reap_work,
518 __round_jiffies_relative(HZ, cpu));
519 }
520}
521
522static void init_arraycache(struct array_cache *ac, int limit, int batch)
523{
524 if (ac) {
525 ac->avail = 0;
526 ac->limit = limit;
527 ac->batchcount = batch;
528 ac->touched = 0;
529 }
530}
531
532static struct array_cache *alloc_arraycache(int node, int entries,
533 int batchcount, gfp_t gfp)
534{
535 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
536 struct array_cache *ac = NULL;
537
538 ac = kmalloc_node(memsize, gfp, node);
539
540
541
542
543
544
545
546 kmemleak_no_scan(ac);
547 init_arraycache(ac, entries, batchcount);
548 return ac;
549}
550
551static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
552 struct page *page, void *objp)
553{
554 struct kmem_cache_node *n;
555 int page_node;
556 LIST_HEAD(list);
557
558 page_node = page_to_nid(page);
559 n = get_node(cachep, page_node);
560
561 spin_lock(&n->list_lock);
562 free_block(cachep, &objp, 1, page_node, &list);
563 spin_unlock(&n->list_lock);
564
565 slabs_destroy(cachep, &list);
566}
567
568
569
570
571
572
573
574static int transfer_objects(struct array_cache *to,
575 struct array_cache *from, unsigned int max)
576{
577
578 int nr = min3(from->avail, max, to->limit - to->avail);
579
580 if (!nr)
581 return 0;
582
583 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
584 sizeof(void *) *nr);
585
586 from->avail -= nr;
587 to->avail += nr;
588 return nr;
589}
590
591
592static __always_inline void __free_one(struct array_cache *ac, void *objp)
593{
594
595 if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
596 WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
597 return;
598 ac->entry[ac->avail++] = objp;
599}
600
601#ifndef CONFIG_NUMA
602
603#define drain_alien_cache(cachep, alien) do { } while (0)
604#define reap_alien(cachep, n) do { } while (0)
605
606static inline struct alien_cache **alloc_alien_cache(int node,
607 int limit, gfp_t gfp)
608{
609 return NULL;
610}
611
612static inline void free_alien_cache(struct alien_cache **ac_ptr)
613{
614}
615
616static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
617{
618 return 0;
619}
620
621static inline void *alternate_node_alloc(struct kmem_cache *cachep,
622 gfp_t flags)
623{
624 return NULL;
625}
626
627static inline void *____cache_alloc_node(struct kmem_cache *cachep,
628 gfp_t flags, int nodeid)
629{
630 return NULL;
631}
632
633static inline gfp_t gfp_exact_node(gfp_t flags)
634{
635 return flags & ~__GFP_NOFAIL;
636}
637
638#else
639
640static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
641static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
642
643static struct alien_cache *__alloc_alien_cache(int node, int entries,
644 int batch, gfp_t gfp)
645{
646 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
647 struct alien_cache *alc = NULL;
648
649 alc = kmalloc_node(memsize, gfp, node);
650 if (alc) {
651 kmemleak_no_scan(alc);
652 init_arraycache(&alc->ac, entries, batch);
653 spin_lock_init(&alc->lock);
654 }
655 return alc;
656}
657
658static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
659{
660 struct alien_cache **alc_ptr;
661 int i;
662
663 if (limit > 1)
664 limit = 12;
665 alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
666 if (!alc_ptr)
667 return NULL;
668
669 for_each_node(i) {
670 if (i == node || !node_online(i))
671 continue;
672 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
673 if (!alc_ptr[i]) {
674 for (i--; i >= 0; i--)
675 kfree(alc_ptr[i]);
676 kfree(alc_ptr);
677 return NULL;
678 }
679 }
680 return alc_ptr;
681}
682
683static void free_alien_cache(struct alien_cache **alc_ptr)
684{
685 int i;
686
687 if (!alc_ptr)
688 return;
689 for_each_node(i)
690 kfree(alc_ptr[i]);
691 kfree(alc_ptr);
692}
693
694static void __drain_alien_cache(struct kmem_cache *cachep,
695 struct array_cache *ac, int node,
696 struct list_head *list)
697{
698 struct kmem_cache_node *n = get_node(cachep, node);
699
700 if (ac->avail) {
701 spin_lock(&n->list_lock);
702
703
704
705
706
707 if (n->shared)
708 transfer_objects(n->shared, ac, ac->limit);
709
710 free_block(cachep, ac->entry, ac->avail, node, list);
711 ac->avail = 0;
712 spin_unlock(&n->list_lock);
713 }
714}
715
716
717
718
719static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
720{
721 int node = __this_cpu_read(slab_reap_node);
722
723 if (n->alien) {
724 struct alien_cache *alc = n->alien[node];
725 struct array_cache *ac;
726
727 if (alc) {
728 ac = &alc->ac;
729 if (ac->avail && spin_trylock_irq(&alc->lock)) {
730 LIST_HEAD(list);
731
732 __drain_alien_cache(cachep, ac, node, &list);
733 spin_unlock_irq(&alc->lock);
734 slabs_destroy(cachep, &list);
735 }
736 }
737 }
738}
739
740static void drain_alien_cache(struct kmem_cache *cachep,
741 struct alien_cache **alien)
742{
743 int i = 0;
744 struct alien_cache *alc;
745 struct array_cache *ac;
746 unsigned long flags;
747
748 for_each_online_node(i) {
749 alc = alien[i];
750 if (alc) {
751 LIST_HEAD(list);
752
753 ac = &alc->ac;
754 spin_lock_irqsave(&alc->lock, flags);
755 __drain_alien_cache(cachep, ac, i, &list);
756 spin_unlock_irqrestore(&alc->lock, flags);
757 slabs_destroy(cachep, &list);
758 }
759 }
760}
761
762static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
763 int node, int page_node)
764{
765 struct kmem_cache_node *n;
766 struct alien_cache *alien = NULL;
767 struct array_cache *ac;
768 LIST_HEAD(list);
769
770 n = get_node(cachep, node);
771 STATS_INC_NODEFREES(cachep);
772 if (n->alien && n->alien[page_node]) {
773 alien = n->alien[page_node];
774 ac = &alien->ac;
775 spin_lock(&alien->lock);
776 if (unlikely(ac->avail == ac->limit)) {
777 STATS_INC_ACOVERFLOW(cachep);
778 __drain_alien_cache(cachep, ac, page_node, &list);
779 }
780 __free_one(ac, objp);
781 spin_unlock(&alien->lock);
782 slabs_destroy(cachep, &list);
783 } else {
784 n = get_node(cachep, page_node);
785 spin_lock(&n->list_lock);
786 free_block(cachep, &objp, 1, page_node, &list);
787 spin_unlock(&n->list_lock);
788 slabs_destroy(cachep, &list);
789 }
790 return 1;
791}
792
793static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
794{
795 int page_node = page_to_nid(virt_to_page(objp));
796 int node = numa_mem_id();
797
798
799
800
801 if (likely(node == page_node))
802 return 0;
803
804 return __cache_free_alien(cachep, objp, node, page_node);
805}
806
807
808
809
810
811static inline gfp_t gfp_exact_node(gfp_t flags)
812{
813 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
814}
815#endif
816
817static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
818{
819 struct kmem_cache_node *n;
820
821
822
823
824
825
826 n = get_node(cachep, node);
827 if (n) {
828 spin_lock_irq(&n->list_lock);
829 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
830 cachep->num;
831 spin_unlock_irq(&n->list_lock);
832
833 return 0;
834 }
835
836 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
837 if (!n)
838 return -ENOMEM;
839
840 kmem_cache_node_init(n);
841 n->next_reap = jiffies + REAPTIMEOUT_NODE +
842 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
843
844 n->free_limit =
845 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
846
847
848
849
850
851
852 cachep->node[node] = n;
853
854 return 0;
855}
856
857#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
858
859
860
861
862
863
864
865
866
867static int init_cache_node_node(int node)
868{
869 int ret;
870 struct kmem_cache *cachep;
871
872 list_for_each_entry(cachep, &slab_caches, list) {
873 ret = init_cache_node(cachep, node, GFP_KERNEL);
874 if (ret)
875 return ret;
876 }
877
878 return 0;
879}
880#endif
881
882static int setup_kmem_cache_node(struct kmem_cache *cachep,
883 int node, gfp_t gfp, bool force_change)
884{
885 int ret = -ENOMEM;
886 struct kmem_cache_node *n;
887 struct array_cache *old_shared = NULL;
888 struct array_cache *new_shared = NULL;
889 struct alien_cache **new_alien = NULL;
890 LIST_HEAD(list);
891
892 if (use_alien_caches) {
893 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
894 if (!new_alien)
895 goto fail;
896 }
897
898 if (cachep->shared) {
899 new_shared = alloc_arraycache(node,
900 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
901 if (!new_shared)
902 goto fail;
903 }
904
905 ret = init_cache_node(cachep, node, gfp);
906 if (ret)
907 goto fail;
908
909 n = get_node(cachep, node);
910 spin_lock_irq(&n->list_lock);
911 if (n->shared && force_change) {
912 free_block(cachep, n->shared->entry,
913 n->shared->avail, node, &list);
914 n->shared->avail = 0;
915 }
916
917 if (!n->shared || force_change) {
918 old_shared = n->shared;
919 n->shared = new_shared;
920 new_shared = NULL;
921 }
922
923 if (!n->alien) {
924 n->alien = new_alien;
925 new_alien = NULL;
926 }
927
928 spin_unlock_irq(&n->list_lock);
929 slabs_destroy(cachep, &list);
930
931
932
933
934
935
936
937 if (old_shared && force_change)
938 synchronize_rcu();
939
940fail:
941 kfree(old_shared);
942 kfree(new_shared);
943 free_alien_cache(new_alien);
944
945 return ret;
946}
947
948#ifdef CONFIG_SMP
949
950static void cpuup_canceled(long cpu)
951{
952 struct kmem_cache *cachep;
953 struct kmem_cache_node *n = NULL;
954 int node = cpu_to_mem(cpu);
955 const struct cpumask *mask = cpumask_of_node(node);
956
957 list_for_each_entry(cachep, &slab_caches, list) {
958 struct array_cache *nc;
959 struct array_cache *shared;
960 struct alien_cache **alien;
961 LIST_HEAD(list);
962
963 n = get_node(cachep, node);
964 if (!n)
965 continue;
966
967 spin_lock_irq(&n->list_lock);
968
969
970 n->free_limit -= cachep->batchcount;
971
972
973 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
974 free_block(cachep, nc->entry, nc->avail, node, &list);
975 nc->avail = 0;
976
977 if (!cpumask_empty(mask)) {
978 spin_unlock_irq(&n->list_lock);
979 goto free_slab;
980 }
981
982 shared = n->shared;
983 if (shared) {
984 free_block(cachep, shared->entry,
985 shared->avail, node, &list);
986 n->shared = NULL;
987 }
988
989 alien = n->alien;
990 n->alien = NULL;
991
992 spin_unlock_irq(&n->list_lock);
993
994 kfree(shared);
995 if (alien) {
996 drain_alien_cache(cachep, alien);
997 free_alien_cache(alien);
998 }
999
1000free_slab:
1001 slabs_destroy(cachep, &list);
1002 }
1003
1004
1005
1006
1007
1008 list_for_each_entry(cachep, &slab_caches, list) {
1009 n = get_node(cachep, node);
1010 if (!n)
1011 continue;
1012 drain_freelist(cachep, n, INT_MAX);
1013 }
1014}
1015
1016static int cpuup_prepare(long cpu)
1017{
1018 struct kmem_cache *cachep;
1019 int node = cpu_to_mem(cpu);
1020 int err;
1021
1022
1023
1024
1025
1026
1027
1028 err = init_cache_node_node(node);
1029 if (err < 0)
1030 goto bad;
1031
1032
1033
1034
1035
1036 list_for_each_entry(cachep, &slab_caches, list) {
1037 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1038 if (err)
1039 goto bad;
1040 }
1041
1042 return 0;
1043bad:
1044 cpuup_canceled(cpu);
1045 return -ENOMEM;
1046}
1047
1048int slab_prepare_cpu(unsigned int cpu)
1049{
1050 int err;
1051
1052 mutex_lock(&slab_mutex);
1053 err = cpuup_prepare(cpu);
1054 mutex_unlock(&slab_mutex);
1055 return err;
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068int slab_dead_cpu(unsigned int cpu)
1069{
1070 mutex_lock(&slab_mutex);
1071 cpuup_canceled(cpu);
1072 mutex_unlock(&slab_mutex);
1073 return 0;
1074}
1075#endif
1076
1077static int slab_online_cpu(unsigned int cpu)
1078{
1079 start_cpu_timer(cpu);
1080 return 0;
1081}
1082
1083static int slab_offline_cpu(unsigned int cpu)
1084{
1085
1086
1087
1088
1089
1090
1091 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1092
1093 per_cpu(slab_reap_work, cpu).work.func = NULL;
1094 return 0;
1095}
1096
1097#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1098
1099
1100
1101
1102
1103
1104
1105static int __meminit drain_cache_node_node(int node)
1106{
1107 struct kmem_cache *cachep;
1108 int ret = 0;
1109
1110 list_for_each_entry(cachep, &slab_caches, list) {
1111 struct kmem_cache_node *n;
1112
1113 n = get_node(cachep, node);
1114 if (!n)
1115 continue;
1116
1117 drain_freelist(cachep, n, INT_MAX);
1118
1119 if (!list_empty(&n->slabs_full) ||
1120 !list_empty(&n->slabs_partial)) {
1121 ret = -EBUSY;
1122 break;
1123 }
1124 }
1125 return ret;
1126}
1127
1128static int __meminit slab_memory_callback(struct notifier_block *self,
1129 unsigned long action, void *arg)
1130{
1131 struct memory_notify *mnb = arg;
1132 int ret = 0;
1133 int nid;
1134
1135 nid = mnb->status_change_nid;
1136 if (nid < 0)
1137 goto out;
1138
1139 switch (action) {
1140 case MEM_GOING_ONLINE:
1141 mutex_lock(&slab_mutex);
1142 ret = init_cache_node_node(nid);
1143 mutex_unlock(&slab_mutex);
1144 break;
1145 case MEM_GOING_OFFLINE:
1146 mutex_lock(&slab_mutex);
1147 ret = drain_cache_node_node(nid);
1148 mutex_unlock(&slab_mutex);
1149 break;
1150 case MEM_ONLINE:
1151 case MEM_OFFLINE:
1152 case MEM_CANCEL_ONLINE:
1153 case MEM_CANCEL_OFFLINE:
1154 break;
1155 }
1156out:
1157 return notifier_from_errno(ret);
1158}
1159#endif
1160
1161
1162
1163
1164static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1165 int nodeid)
1166{
1167 struct kmem_cache_node *ptr;
1168
1169 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1170 BUG_ON(!ptr);
1171
1172 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1173
1174
1175
1176 spin_lock_init(&ptr->list_lock);
1177
1178 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1179 cachep->node[nodeid] = ptr;
1180}
1181
1182
1183
1184
1185
1186static void __init set_up_node(struct kmem_cache *cachep, int index)
1187{
1188 int node;
1189
1190 for_each_online_node(node) {
1191 cachep->node[node] = &init_kmem_cache_node[index + node];
1192 cachep->node[node]->next_reap = jiffies +
1193 REAPTIMEOUT_NODE +
1194 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1195 }
1196}
1197
1198
1199
1200
1201
1202void __init kmem_cache_init(void)
1203{
1204 int i;
1205
1206 kmem_cache = &kmem_cache_boot;
1207
1208 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1209 use_alien_caches = 0;
1210
1211 for (i = 0; i < NUM_INIT_LISTS; i++)
1212 kmem_cache_node_init(&init_kmem_cache_node[i]);
1213
1214
1215
1216
1217
1218
1219 if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
1220 slab_max_order = SLAB_MAX_ORDER_HI;
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247 create_boot_cache(kmem_cache, "kmem_cache",
1248 offsetof(struct kmem_cache, node) +
1249 nr_node_ids * sizeof(struct kmem_cache_node *),
1250 SLAB_HWCACHE_ALIGN, 0, 0);
1251 list_add(&kmem_cache->list, &slab_caches);
1252 slab_state = PARTIAL;
1253
1254
1255
1256
1257
1258 kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache(
1259 kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL],
1260 kmalloc_info[INDEX_NODE].size,
1261 ARCH_KMALLOC_FLAGS, 0,
1262 kmalloc_info[INDEX_NODE].size);
1263 slab_state = PARTIAL_NODE;
1264 setup_kmalloc_cache_index_table();
1265
1266 slab_early_init = 0;
1267
1268
1269 {
1270 int nid;
1271
1272 for_each_online_node(nid) {
1273 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1274
1275 init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE],
1276 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1277 }
1278 }
1279
1280 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1281}
1282
1283void __init kmem_cache_init_late(void)
1284{
1285 struct kmem_cache *cachep;
1286
1287
1288 mutex_lock(&slab_mutex);
1289 list_for_each_entry(cachep, &slab_caches, list)
1290 if (enable_cpucache(cachep, GFP_NOWAIT))
1291 BUG();
1292 mutex_unlock(&slab_mutex);
1293
1294
1295 slab_state = FULL;
1296
1297#ifdef CONFIG_NUMA
1298
1299
1300
1301
1302 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1303#endif
1304
1305
1306
1307
1308
1309}
1310
1311static int __init cpucache_init(void)
1312{
1313 int ret;
1314
1315
1316
1317
1318 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1319 slab_online_cpu, slab_offline_cpu);
1320 WARN_ON(ret < 0);
1321
1322 return 0;
1323}
1324__initcall(cpucache_init);
1325
1326static noinline void
1327slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1328{
1329#if DEBUG
1330 struct kmem_cache_node *n;
1331 unsigned long flags;
1332 int node;
1333 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1334 DEFAULT_RATELIMIT_BURST);
1335
1336 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1337 return;
1338
1339 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1340 nodeid, gfpflags, &gfpflags);
1341 pr_warn(" cache: %s, object size: %d, order: %d\n",
1342 cachep->name, cachep->size, cachep->gfporder);
1343
1344 for_each_kmem_cache_node(cachep, node, n) {
1345 unsigned long total_slabs, free_slabs, free_objs;
1346
1347 spin_lock_irqsave(&n->list_lock, flags);
1348 total_slabs = n->total_slabs;
1349 free_slabs = n->free_slabs;
1350 free_objs = n->free_objects;
1351 spin_unlock_irqrestore(&n->list_lock, flags);
1352
1353 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1354 node, total_slabs - free_slabs, total_slabs,
1355 (total_slabs * cachep->num) - free_objs,
1356 total_slabs * cachep->num);
1357 }
1358#endif
1359}
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1370 int nodeid)
1371{
1372 struct page *page;
1373
1374 flags |= cachep->allocflags;
1375
1376 page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1377 if (!page) {
1378 slab_out_of_memory(cachep, flags, nodeid);
1379 return NULL;
1380 }
1381
1382 account_slab_page(page, cachep->gfporder, cachep);
1383 __SetPageSlab(page);
1384
1385 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1386 SetPageSlabPfmemalloc(page);
1387
1388 return page;
1389}
1390
1391
1392
1393
1394static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1395{
1396 int order = cachep->gfporder;
1397
1398 BUG_ON(!PageSlab(page));
1399 __ClearPageSlabPfmemalloc(page);
1400 __ClearPageSlab(page);
1401 page_mapcount_reset(page);
1402 page->mapping = NULL;
1403
1404 if (current->reclaim_state)
1405 current->reclaim_state->reclaimed_slab += 1 << order;
1406 unaccount_slab_page(page, order, cachep);
1407 __free_pages(page, order);
1408}
1409
1410static void kmem_rcu_free(struct rcu_head *head)
1411{
1412 struct kmem_cache *cachep;
1413 struct page *page;
1414
1415 page = container_of(head, struct page, rcu_head);
1416 cachep = page->slab_cache;
1417
1418 kmem_freepages(cachep, page);
1419}
1420
1421#if DEBUG
1422static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1423{
1424 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
1425 (cachep->size % PAGE_SIZE) == 0)
1426 return true;
1427
1428 return false;
1429}
1430
1431#ifdef CONFIG_DEBUG_PAGEALLOC
1432static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
1433{
1434 if (!is_debug_pagealloc_cache(cachep))
1435 return;
1436
1437 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1438}
1439
1440#else
1441static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1442 int map) {}
1443
1444#endif
1445
1446static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1447{
1448 int size = cachep->object_size;
1449 addr = &((char *)addr)[obj_offset(cachep)];
1450
1451 memset(addr, val, size);
1452 *(unsigned char *)(addr + size - 1) = POISON_END;
1453}
1454
1455static void dump_line(char *data, int offset, int limit)
1456{
1457 int i;
1458 unsigned char error = 0;
1459 int bad_count = 0;
1460
1461 pr_err("%03x: ", offset);
1462 for (i = 0; i < limit; i++) {
1463 if (data[offset + i] != POISON_FREE) {
1464 error = data[offset + i];
1465 bad_count++;
1466 }
1467 }
1468 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1469 &data[offset], limit, 1);
1470
1471 if (bad_count == 1) {
1472 error ^= POISON_FREE;
1473 if (!(error & (error - 1))) {
1474 pr_err("Single bit error detected. Probably bad RAM.\n");
1475#ifdef CONFIG_X86
1476 pr_err("Run memtest86+ or a similar memory test tool.\n");
1477#else
1478 pr_err("Run a memory test tool.\n");
1479#endif
1480 }
1481 }
1482}
1483#endif
1484
1485#if DEBUG
1486
1487static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1488{
1489 int i, size;
1490 char *realobj;
1491
1492 if (cachep->flags & SLAB_RED_ZONE) {
1493 pr_err("Redzone: 0x%llx/0x%llx\n",
1494 *dbg_redzone1(cachep, objp),
1495 *dbg_redzone2(cachep, objp));
1496 }
1497
1498 if (cachep->flags & SLAB_STORE_USER)
1499 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1500 realobj = (char *)objp + obj_offset(cachep);
1501 size = cachep->object_size;
1502 for (i = 0; i < size && lines; i += 16, lines--) {
1503 int limit;
1504 limit = 16;
1505 if (i + limit > size)
1506 limit = size - i;
1507 dump_line(realobj, i, limit);
1508 }
1509}
1510
1511static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1512{
1513 char *realobj;
1514 int size, i;
1515 int lines = 0;
1516
1517 if (is_debug_pagealloc_cache(cachep))
1518 return;
1519
1520 realobj = (char *)objp + obj_offset(cachep);
1521 size = cachep->object_size;
1522
1523 for (i = 0; i < size; i++) {
1524 char exp = POISON_FREE;
1525 if (i == size - 1)
1526 exp = POISON_END;
1527 if (realobj[i] != exp) {
1528 int limit;
1529
1530
1531 if (lines == 0) {
1532 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1533 print_tainted(), cachep->name,
1534 realobj, size);
1535 print_objinfo(cachep, objp, 0);
1536 }
1537
1538 i = (i / 16) * 16;
1539 limit = 16;
1540 if (i + limit > size)
1541 limit = size - i;
1542 dump_line(realobj, i, limit);
1543 i += 16;
1544 lines++;
1545
1546 if (lines > 5)
1547 break;
1548 }
1549 }
1550 if (lines != 0) {
1551
1552
1553
1554 struct page *page = virt_to_head_page(objp);
1555 unsigned int objnr;
1556
1557 objnr = obj_to_index(cachep, page, objp);
1558 if (objnr) {
1559 objp = index_to_obj(cachep, page, objnr - 1);
1560 realobj = (char *)objp + obj_offset(cachep);
1561 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1562 print_objinfo(cachep, objp, 2);
1563 }
1564 if (objnr + 1 < cachep->num) {
1565 objp = index_to_obj(cachep, page, objnr + 1);
1566 realobj = (char *)objp + obj_offset(cachep);
1567 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1568 print_objinfo(cachep, objp, 2);
1569 }
1570 }
1571}
1572#endif
1573
1574#if DEBUG
1575static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1576 struct page *page)
1577{
1578 int i;
1579
1580 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1581 poison_obj(cachep, page->freelist - obj_offset(cachep),
1582 POISON_FREE);
1583 }
1584
1585 for (i = 0; i < cachep->num; i++) {
1586 void *objp = index_to_obj(cachep, page, i);
1587
1588 if (cachep->flags & SLAB_POISON) {
1589 check_poison_obj(cachep, objp);
1590 slab_kernel_map(cachep, objp, 1);
1591 }
1592 if (cachep->flags & SLAB_RED_ZONE) {
1593 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1594 slab_error(cachep, "start of a freed object was overwritten");
1595 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1596 slab_error(cachep, "end of a freed object was overwritten");
1597 }
1598 }
1599}
1600#else
1601static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1602 struct page *page)
1603{
1604}
1605#endif
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1617{
1618 void *freelist;
1619
1620 freelist = page->freelist;
1621 slab_destroy_debugcheck(cachep, page);
1622 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1623 call_rcu(&page->rcu_head, kmem_rcu_free);
1624 else
1625 kmem_freepages(cachep, page);
1626
1627
1628
1629
1630
1631 if (OFF_SLAB(cachep))
1632 kmem_cache_free(cachep->freelist_cache, freelist);
1633}
1634
1635
1636
1637
1638
1639static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1640{
1641 struct page *page, *n;
1642
1643 list_for_each_entry_safe(page, n, list, slab_list) {
1644 list_del(&page->slab_list);
1645 slab_destroy(cachep, page);
1646 }
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static size_t calculate_slab_order(struct kmem_cache *cachep,
1664 size_t size, slab_flags_t flags)
1665{
1666 size_t left_over = 0;
1667 int gfporder;
1668
1669 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1670 unsigned int num;
1671 size_t remainder;
1672
1673 num = cache_estimate(gfporder, size, flags, &remainder);
1674 if (!num)
1675 continue;
1676
1677
1678 if (num > SLAB_OBJ_MAX_NUM)
1679 break;
1680
1681 if (flags & CFLGS_OFF_SLAB) {
1682 struct kmem_cache *freelist_cache;
1683 size_t freelist_size;
1684
1685 freelist_size = num * sizeof(freelist_idx_t);
1686 freelist_cache = kmalloc_slab(freelist_size, 0u);
1687 if (!freelist_cache)
1688 continue;
1689
1690
1691
1692
1693
1694 if (OFF_SLAB(freelist_cache))
1695 continue;
1696
1697
1698 if (freelist_cache->size > cachep->size / 2)
1699 continue;
1700 }
1701
1702
1703 cachep->num = num;
1704 cachep->gfporder = gfporder;
1705 left_over = remainder;
1706
1707
1708
1709
1710
1711
1712 if (flags & SLAB_RECLAIM_ACCOUNT)
1713 break;
1714
1715
1716
1717
1718
1719 if (gfporder >= slab_max_order)
1720 break;
1721
1722
1723
1724
1725 if (left_over * 8 <= (PAGE_SIZE << gfporder))
1726 break;
1727 }
1728 return left_over;
1729}
1730
1731static struct array_cache __percpu *alloc_kmem_cache_cpus(
1732 struct kmem_cache *cachep, int entries, int batchcount)
1733{
1734 int cpu;
1735 size_t size;
1736 struct array_cache __percpu *cpu_cache;
1737
1738 size = sizeof(void *) * entries + sizeof(struct array_cache);
1739 cpu_cache = __alloc_percpu(size, sizeof(void *));
1740
1741 if (!cpu_cache)
1742 return NULL;
1743
1744 for_each_possible_cpu(cpu) {
1745 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1746 entries, batchcount);
1747 }
1748
1749 return cpu_cache;
1750}
1751
1752static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1753{
1754 if (slab_state >= FULL)
1755 return enable_cpucache(cachep, gfp);
1756
1757 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1758 if (!cachep->cpu_cache)
1759 return 1;
1760
1761 if (slab_state == DOWN) {
1762
1763 set_up_node(kmem_cache, CACHE_CACHE);
1764 } else if (slab_state == PARTIAL) {
1765
1766 set_up_node(cachep, SIZE_NODE);
1767 } else {
1768 int node;
1769
1770 for_each_online_node(node) {
1771 cachep->node[node] = kmalloc_node(
1772 sizeof(struct kmem_cache_node), gfp, node);
1773 BUG_ON(!cachep->node[node]);
1774 kmem_cache_node_init(cachep->node[node]);
1775 }
1776 }
1777
1778 cachep->node[numa_mem_id()]->next_reap =
1779 jiffies + REAPTIMEOUT_NODE +
1780 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1781
1782 cpu_cache_get(cachep)->avail = 0;
1783 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1784 cpu_cache_get(cachep)->batchcount = 1;
1785 cpu_cache_get(cachep)->touched = 0;
1786 cachep->batchcount = 1;
1787 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1788 return 0;
1789}
1790
1791slab_flags_t kmem_cache_flags(unsigned int object_size,
1792 slab_flags_t flags, const char *name,
1793 void (*ctor)(void *))
1794{
1795 return flags;
1796}
1797
1798struct kmem_cache *
1799__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1800 slab_flags_t flags, void (*ctor)(void *))
1801{
1802 struct kmem_cache *cachep;
1803
1804 cachep = find_mergeable(size, align, flags, name, ctor);
1805 if (cachep) {
1806 cachep->refcount++;
1807
1808
1809
1810
1811
1812 cachep->object_size = max_t(int, cachep->object_size, size);
1813 }
1814 return cachep;
1815}
1816
1817static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1818 size_t size, slab_flags_t flags)
1819{
1820 size_t left;
1821
1822 cachep->num = 0;
1823
1824
1825
1826
1827
1828
1829 if (unlikely(slab_want_init_on_free(cachep)))
1830 return false;
1831
1832 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1833 return false;
1834
1835 left = calculate_slab_order(cachep, size,
1836 flags | CFLGS_OBJFREELIST_SLAB);
1837 if (!cachep->num)
1838 return false;
1839
1840 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1841 return false;
1842
1843 cachep->colour = left / cachep->colour_off;
1844
1845 return true;
1846}
1847
1848static bool set_off_slab_cache(struct kmem_cache *cachep,
1849 size_t size, slab_flags_t flags)
1850{
1851 size_t left;
1852
1853 cachep->num = 0;
1854
1855
1856
1857
1858
1859 if (flags & SLAB_NOLEAKTRACE)
1860 return false;
1861
1862
1863
1864
1865
1866 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1867 if (!cachep->num)
1868 return false;
1869
1870
1871
1872
1873
1874 if (left >= cachep->num * sizeof(freelist_idx_t))
1875 return false;
1876
1877 cachep->colour = left / cachep->colour_off;
1878
1879 return true;
1880}
1881
1882static bool set_on_slab_cache(struct kmem_cache *cachep,
1883 size_t size, slab_flags_t flags)
1884{
1885 size_t left;
1886
1887 cachep->num = 0;
1888
1889 left = calculate_slab_order(cachep, size, flags);
1890 if (!cachep->num)
1891 return false;
1892
1893 cachep->colour = left / cachep->colour_off;
1894
1895 return true;
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1922{
1923 size_t ralign = BYTES_PER_WORD;
1924 gfp_t gfp;
1925 int err;
1926 unsigned int size = cachep->size;
1927
1928#if DEBUG
1929#if FORCED_DEBUG
1930
1931
1932
1933
1934
1935
1936 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
1937 2 * sizeof(unsigned long long)))
1938 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1939 if (!(flags & SLAB_TYPESAFE_BY_RCU))
1940 flags |= SLAB_POISON;
1941#endif
1942#endif
1943
1944
1945
1946
1947
1948
1949 size = ALIGN(size, BYTES_PER_WORD);
1950
1951 if (flags & SLAB_RED_ZONE) {
1952 ralign = REDZONE_ALIGN;
1953
1954
1955 size = ALIGN(size, REDZONE_ALIGN);
1956 }
1957
1958
1959 if (ralign < cachep->align) {
1960 ralign = cachep->align;
1961 }
1962
1963 if (ralign > __alignof__(unsigned long long))
1964 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1965
1966
1967
1968 cachep->align = ralign;
1969 cachep->colour_off = cache_line_size();
1970
1971 if (cachep->colour_off < cachep->align)
1972 cachep->colour_off = cachep->align;
1973
1974 if (slab_is_available())
1975 gfp = GFP_KERNEL;
1976 else
1977 gfp = GFP_NOWAIT;
1978
1979#if DEBUG
1980
1981
1982
1983
1984
1985 if (flags & SLAB_RED_ZONE) {
1986
1987 cachep->obj_offset += sizeof(unsigned long long);
1988 size += 2 * sizeof(unsigned long long);
1989 }
1990 if (flags & SLAB_STORE_USER) {
1991
1992
1993
1994
1995 if (flags & SLAB_RED_ZONE)
1996 size += REDZONE_ALIGN;
1997 else
1998 size += BYTES_PER_WORD;
1999 }
2000#endif
2001
2002 kasan_cache_create(cachep, &size, &flags);
2003
2004 size = ALIGN(size, cachep->align);
2005
2006
2007
2008
2009 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2010 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2011
2012#if DEBUG
2013
2014
2015
2016
2017
2018
2019
2020 if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
2021 size >= 256 && cachep->object_size > cache_line_size()) {
2022 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2023 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2024
2025 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2026 flags |= CFLGS_OFF_SLAB;
2027 cachep->obj_offset += tmp_size - size;
2028 size = tmp_size;
2029 goto done;
2030 }
2031 }
2032 }
2033#endif
2034
2035 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2036 flags |= CFLGS_OBJFREELIST_SLAB;
2037 goto done;
2038 }
2039
2040 if (set_off_slab_cache(cachep, size, flags)) {
2041 flags |= CFLGS_OFF_SLAB;
2042 goto done;
2043 }
2044
2045 if (set_on_slab_cache(cachep, size, flags))
2046 goto done;
2047
2048 return -E2BIG;
2049
2050done:
2051 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2052 cachep->flags = flags;
2053 cachep->allocflags = __GFP_COMP;
2054 if (flags & SLAB_CACHE_DMA)
2055 cachep->allocflags |= GFP_DMA;
2056 if (flags & SLAB_CACHE_DMA32)
2057 cachep->allocflags |= GFP_DMA32;
2058 if (flags & SLAB_RECLAIM_ACCOUNT)
2059 cachep->allocflags |= __GFP_RECLAIMABLE;
2060 cachep->size = size;
2061 cachep->reciprocal_buffer_size = reciprocal_value(size);
2062
2063#if DEBUG
2064
2065
2066
2067
2068
2069 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2070 (cachep->flags & SLAB_POISON) &&
2071 is_debug_pagealloc_cache(cachep))
2072 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2073#endif
2074
2075 if (OFF_SLAB(cachep)) {
2076 cachep->freelist_cache =
2077 kmalloc_slab(cachep->freelist_size, 0u);
2078 }
2079
2080 err = setup_cpu_cache(cachep, gfp);
2081 if (err) {
2082 __kmem_cache_release(cachep);
2083 return err;
2084 }
2085
2086 return 0;
2087}
2088
2089#if DEBUG
2090static void check_irq_off(void)
2091{
2092 BUG_ON(!irqs_disabled());
2093}
2094
2095static void check_irq_on(void)
2096{
2097 BUG_ON(irqs_disabled());
2098}
2099
2100static void check_mutex_acquired(void)
2101{
2102 BUG_ON(!mutex_is_locked(&slab_mutex));
2103}
2104
2105static void check_spinlock_acquired(struct kmem_cache *cachep)
2106{
2107#ifdef CONFIG_SMP
2108 check_irq_off();
2109 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2110#endif
2111}
2112
2113static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2114{
2115#ifdef CONFIG_SMP
2116 check_irq_off();
2117 assert_spin_locked(&get_node(cachep, node)->list_lock);
2118#endif
2119}
2120
2121#else
2122#define check_irq_off() do { } while(0)
2123#define check_irq_on() do { } while(0)
2124#define check_mutex_acquired() do { } while(0)
2125#define check_spinlock_acquired(x) do { } while(0)
2126#define check_spinlock_acquired_node(x, y) do { } while(0)
2127#endif
2128
2129static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2130 int node, bool free_all, struct list_head *list)
2131{
2132 int tofree;
2133
2134 if (!ac || !ac->avail)
2135 return;
2136
2137 tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2138 if (tofree > ac->avail)
2139 tofree = (ac->avail + 1) / 2;
2140
2141 free_block(cachep, ac->entry, tofree, node, list);
2142 ac->avail -= tofree;
2143 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2144}
2145
2146static void do_drain(void *arg)
2147{
2148 struct kmem_cache *cachep = arg;
2149 struct array_cache *ac;
2150 int node = numa_mem_id();
2151 struct kmem_cache_node *n;
2152 LIST_HEAD(list);
2153
2154 check_irq_off();
2155 ac = cpu_cache_get(cachep);
2156 n = get_node(cachep, node);
2157 spin_lock(&n->list_lock);
2158 free_block(cachep, ac->entry, ac->avail, node, &list);
2159 spin_unlock(&n->list_lock);
2160 ac->avail = 0;
2161 slabs_destroy(cachep, &list);
2162}
2163
2164static void drain_cpu_caches(struct kmem_cache *cachep)
2165{
2166 struct kmem_cache_node *n;
2167 int node;
2168 LIST_HEAD(list);
2169
2170 on_each_cpu(do_drain, cachep, 1);
2171 check_irq_on();
2172 for_each_kmem_cache_node(cachep, node, n)
2173 if (n->alien)
2174 drain_alien_cache(cachep, n->alien);
2175
2176 for_each_kmem_cache_node(cachep, node, n) {
2177 spin_lock_irq(&n->list_lock);
2178 drain_array_locked(cachep, n->shared, node, true, &list);
2179 spin_unlock_irq(&n->list_lock);
2180
2181 slabs_destroy(cachep, &list);
2182 }
2183}
2184
2185
2186
2187
2188
2189
2190
2191static int drain_freelist(struct kmem_cache *cache,
2192 struct kmem_cache_node *n, int tofree)
2193{
2194 struct list_head *p;
2195 int nr_freed;
2196 struct page *page;
2197
2198 nr_freed = 0;
2199 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2200
2201 spin_lock_irq(&n->list_lock);
2202 p = n->slabs_free.prev;
2203 if (p == &n->slabs_free) {
2204 spin_unlock_irq(&n->list_lock);
2205 goto out;
2206 }
2207
2208 page = list_entry(p, struct page, slab_list);
2209 list_del(&page->slab_list);
2210 n->free_slabs--;
2211 n->total_slabs--;
2212
2213
2214
2215
2216 n->free_objects -= cache->num;
2217 spin_unlock_irq(&n->list_lock);
2218 slab_destroy(cache, page);
2219 nr_freed++;
2220 }
2221out:
2222 return nr_freed;
2223}
2224
2225bool __kmem_cache_empty(struct kmem_cache *s)
2226{
2227 int node;
2228 struct kmem_cache_node *n;
2229
2230 for_each_kmem_cache_node(s, node, n)
2231 if (!list_empty(&n->slabs_full) ||
2232 !list_empty(&n->slabs_partial))
2233 return false;
2234 return true;
2235}
2236
2237int __kmem_cache_shrink(struct kmem_cache *cachep)
2238{
2239 int ret = 0;
2240 int node;
2241 struct kmem_cache_node *n;
2242
2243 drain_cpu_caches(cachep);
2244
2245 check_irq_on();
2246 for_each_kmem_cache_node(cachep, node, n) {
2247 drain_freelist(cachep, n, INT_MAX);
2248
2249 ret += !list_empty(&n->slabs_full) ||
2250 !list_empty(&n->slabs_partial);
2251 }
2252 return (ret ? 1 : 0);
2253}
2254
2255int __kmem_cache_shutdown(struct kmem_cache *cachep)
2256{
2257 return __kmem_cache_shrink(cachep);
2258}
2259
2260void __kmem_cache_release(struct kmem_cache *cachep)
2261{
2262 int i;
2263 struct kmem_cache_node *n;
2264
2265 cache_random_seq_destroy(cachep);
2266
2267 free_percpu(cachep->cpu_cache);
2268
2269
2270 for_each_kmem_cache_node(cachep, i, n) {
2271 kfree(n->shared);
2272 free_alien_cache(n->alien);
2273 kfree(n);
2274 cachep->node[i] = NULL;
2275 }
2276}
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292static void *alloc_slabmgmt(struct kmem_cache *cachep,
2293 struct page *page, int colour_off,
2294 gfp_t local_flags, int nodeid)
2295{
2296 void *freelist;
2297 void *addr = page_address(page);
2298
2299 page->s_mem = addr + colour_off;
2300 page->active = 0;
2301
2302 if (OBJFREELIST_SLAB(cachep))
2303 freelist = NULL;
2304 else if (OFF_SLAB(cachep)) {
2305
2306 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2307 local_flags, nodeid);
2308 } else {
2309
2310 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2311 cachep->freelist_size;
2312 }
2313
2314 return freelist;
2315}
2316
2317static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2318{
2319 return ((freelist_idx_t *)page->freelist)[idx];
2320}
2321
2322static inline void set_free_obj(struct page *page,
2323 unsigned int idx, freelist_idx_t val)
2324{
2325 ((freelist_idx_t *)(page->freelist))[idx] = val;
2326}
2327
2328static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2329{
2330#if DEBUG
2331 int i;
2332
2333 for (i = 0; i < cachep->num; i++) {
2334 void *objp = index_to_obj(cachep, page, i);
2335
2336 if (cachep->flags & SLAB_STORE_USER)
2337 *dbg_userword(cachep, objp) = NULL;
2338
2339 if (cachep->flags & SLAB_RED_ZONE) {
2340 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2341 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2342 }
2343
2344
2345
2346
2347
2348 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2349 kasan_unpoison_object_data(cachep,
2350 objp + obj_offset(cachep));
2351 cachep->ctor(objp + obj_offset(cachep));
2352 kasan_poison_object_data(
2353 cachep, objp + obj_offset(cachep));
2354 }
2355
2356 if (cachep->flags & SLAB_RED_ZONE) {
2357 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2358 slab_error(cachep, "constructor overwrote the end of an object");
2359 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2360 slab_error(cachep, "constructor overwrote the start of an object");
2361 }
2362
2363 if (cachep->flags & SLAB_POISON) {
2364 poison_obj(cachep, objp, POISON_FREE);
2365 slab_kernel_map(cachep, objp, 0);
2366 }
2367 }
2368#endif
2369}
2370
2371#ifdef CONFIG_SLAB_FREELIST_RANDOM
2372
2373union freelist_init_state {
2374 struct {
2375 unsigned int pos;
2376 unsigned int *list;
2377 unsigned int count;
2378 };
2379 struct rnd_state rnd_state;
2380};
2381
2382
2383
2384
2385
2386static bool freelist_state_initialize(union freelist_init_state *state,
2387 struct kmem_cache *cachep,
2388 unsigned int count)
2389{
2390 bool ret;
2391 unsigned int rand;
2392
2393
2394 rand = get_random_int();
2395
2396
2397 if (!cachep->random_seq) {
2398 prandom_seed_state(&state->rnd_state, rand);
2399 ret = false;
2400 } else {
2401 state->list = cachep->random_seq;
2402 state->count = count;
2403 state->pos = rand % count;
2404 ret = true;
2405 }
2406 return ret;
2407}
2408
2409
2410static freelist_idx_t next_random_slot(union freelist_init_state *state)
2411{
2412 if (state->pos >= state->count)
2413 state->pos = 0;
2414 return state->list[state->pos++];
2415}
2416
2417
2418static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2419{
2420 swap(((freelist_idx_t *)page->freelist)[a],
2421 ((freelist_idx_t *)page->freelist)[b]);
2422}
2423
2424
2425
2426
2427
2428static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2429{
2430 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2431 union freelist_init_state state;
2432 bool precomputed;
2433
2434 if (count < 2)
2435 return false;
2436
2437 precomputed = freelist_state_initialize(&state, cachep, count);
2438
2439
2440 if (OBJFREELIST_SLAB(cachep)) {
2441 if (!precomputed)
2442 objfreelist = count - 1;
2443 else
2444 objfreelist = next_random_slot(&state);
2445 page->freelist = index_to_obj(cachep, page, objfreelist) +
2446 obj_offset(cachep);
2447 count--;
2448 }
2449
2450
2451
2452
2453
2454 if (!precomputed) {
2455 for (i = 0; i < count; i++)
2456 set_free_obj(page, i, i);
2457
2458
2459 for (i = count - 1; i > 0; i--) {
2460 rand = prandom_u32_state(&state.rnd_state);
2461 rand %= (i + 1);
2462 swap_free_obj(page, i, rand);
2463 }
2464 } else {
2465 for (i = 0; i < count; i++)
2466 set_free_obj(page, i, next_random_slot(&state));
2467 }
2468
2469 if (OBJFREELIST_SLAB(cachep))
2470 set_free_obj(page, cachep->num - 1, objfreelist);
2471
2472 return true;
2473}
2474#else
2475static inline bool shuffle_freelist(struct kmem_cache *cachep,
2476 struct page *page)
2477{
2478 return false;
2479}
2480#endif
2481
2482static void cache_init_objs(struct kmem_cache *cachep,
2483 struct page *page)
2484{
2485 int i;
2486 void *objp;
2487 bool shuffled;
2488
2489 cache_init_objs_debug(cachep, page);
2490
2491
2492 shuffled = shuffle_freelist(cachep, page);
2493
2494 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2495 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2496 obj_offset(cachep);
2497 }
2498
2499 for (i = 0; i < cachep->num; i++) {
2500 objp = index_to_obj(cachep, page, i);
2501 objp = kasan_init_slab_obj(cachep, objp);
2502
2503
2504 if (DEBUG == 0 && cachep->ctor) {
2505 kasan_unpoison_object_data(cachep, objp);
2506 cachep->ctor(objp);
2507 kasan_poison_object_data(cachep, objp);
2508 }
2509
2510 if (!shuffled)
2511 set_free_obj(page, i, i);
2512 }
2513}
2514
2515static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2516{
2517 void *objp;
2518
2519 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2520 page->active++;
2521
2522 return objp;
2523}
2524
2525static void slab_put_obj(struct kmem_cache *cachep,
2526 struct page *page, void *objp)
2527{
2528 unsigned int objnr = obj_to_index(cachep, page, objp);
2529#if DEBUG
2530 unsigned int i;
2531
2532
2533 for (i = page->active; i < cachep->num; i++) {
2534 if (get_free_obj(page, i) == objnr) {
2535 pr_err("slab: double free detected in cache '%s', objp %px\n",
2536 cachep->name, objp);
2537 BUG();
2538 }
2539 }
2540#endif
2541 page->active--;
2542 if (!page->freelist)
2543 page->freelist = objp + obj_offset(cachep);
2544
2545 set_free_obj(page, page->active, objnr);
2546}
2547
2548
2549
2550
2551
2552
2553static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2554 void *freelist)
2555{
2556 page->slab_cache = cache;
2557 page->freelist = freelist;
2558}
2559
2560
2561
2562
2563
2564static struct page *cache_grow_begin(struct kmem_cache *cachep,
2565 gfp_t flags, int nodeid)
2566{
2567 void *freelist;
2568 size_t offset;
2569 gfp_t local_flags;
2570 int page_node;
2571 struct kmem_cache_node *n;
2572 struct page *page;
2573
2574
2575
2576
2577
2578 if (unlikely(flags & GFP_SLAB_BUG_MASK))
2579 flags = kmalloc_fix_flags(flags);
2580
2581 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2582 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2583
2584 check_irq_off();
2585 if (gfpflags_allow_blocking(local_flags))
2586 local_irq_enable();
2587
2588
2589
2590
2591
2592 page = kmem_getpages(cachep, local_flags, nodeid);
2593 if (!page)
2594 goto failed;
2595
2596 page_node = page_to_nid(page);
2597 n = get_node(cachep, page_node);
2598
2599
2600 n->colour_next++;
2601 if (n->colour_next >= cachep->colour)
2602 n->colour_next = 0;
2603
2604 offset = n->colour_next;
2605 if (offset >= cachep->colour)
2606 offset = 0;
2607
2608 offset *= cachep->colour_off;
2609
2610
2611
2612
2613
2614
2615 kasan_poison_slab(page);
2616
2617
2618 freelist = alloc_slabmgmt(cachep, page, offset,
2619 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2620 if (OFF_SLAB(cachep) && !freelist)
2621 goto opps1;
2622
2623 slab_map_pages(cachep, page, freelist);
2624
2625 cache_init_objs(cachep, page);
2626
2627 if (gfpflags_allow_blocking(local_flags))
2628 local_irq_disable();
2629
2630 return page;
2631
2632opps1:
2633 kmem_freepages(cachep, page);
2634failed:
2635 if (gfpflags_allow_blocking(local_flags))
2636 local_irq_disable();
2637 return NULL;
2638}
2639
2640static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2641{
2642 struct kmem_cache_node *n;
2643 void *list = NULL;
2644
2645 check_irq_off();
2646
2647 if (!page)
2648 return;
2649
2650 INIT_LIST_HEAD(&page->slab_list);
2651 n = get_node(cachep, page_to_nid(page));
2652
2653 spin_lock(&n->list_lock);
2654 n->total_slabs++;
2655 if (!page->active) {
2656 list_add_tail(&page->slab_list, &n->slabs_free);
2657 n->free_slabs++;
2658 } else
2659 fixup_slab_list(cachep, n, page, &list);
2660
2661 STATS_INC_GROWN(cachep);
2662 n->free_objects += cachep->num - page->active;
2663 spin_unlock(&n->list_lock);
2664
2665 fixup_objfreelist_debug(cachep, &list);
2666}
2667
2668#if DEBUG
2669
2670
2671
2672
2673
2674
2675static void kfree_debugcheck(const void *objp)
2676{
2677 if (!virt_addr_valid(objp)) {
2678 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2679 (unsigned long)objp);
2680 BUG();
2681 }
2682}
2683
2684static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2685{
2686 unsigned long long redzone1, redzone2;
2687
2688 redzone1 = *dbg_redzone1(cache, obj);
2689 redzone2 = *dbg_redzone2(cache, obj);
2690
2691
2692
2693
2694 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2695 return;
2696
2697 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2698 slab_error(cache, "double free detected");
2699 else
2700 slab_error(cache, "memory outside object was overwritten");
2701
2702 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2703 obj, redzone1, redzone2);
2704}
2705
2706static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2707 unsigned long caller)
2708{
2709 unsigned int objnr;
2710 struct page *page;
2711
2712 BUG_ON(virt_to_cache(objp) != cachep);
2713
2714 objp -= obj_offset(cachep);
2715 kfree_debugcheck(objp);
2716 page = virt_to_head_page(objp);
2717
2718 if (cachep->flags & SLAB_RED_ZONE) {
2719 verify_redzone_free(cachep, objp);
2720 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2721 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2722 }
2723 if (cachep->flags & SLAB_STORE_USER)
2724 *dbg_userword(cachep, objp) = (void *)caller;
2725
2726 objnr = obj_to_index(cachep, page, objp);
2727
2728 BUG_ON(objnr >= cachep->num);
2729 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2730
2731 if (cachep->flags & SLAB_POISON) {
2732 poison_obj(cachep, objp, POISON_FREE);
2733 slab_kernel_map(cachep, objp, 0);
2734 }
2735 return objp;
2736}
2737
2738#else
2739#define kfree_debugcheck(x) do { } while(0)
2740#define cache_free_debugcheck(x,objp,z) (objp)
2741#endif
2742
2743static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2744 void **list)
2745{
2746#if DEBUG
2747 void *next = *list;
2748 void *objp;
2749
2750 while (next) {
2751 objp = next - obj_offset(cachep);
2752 next = *(void **)next;
2753 poison_obj(cachep, objp, POISON_FREE);
2754 }
2755#endif
2756}
2757
2758static inline void fixup_slab_list(struct kmem_cache *cachep,
2759 struct kmem_cache_node *n, struct page *page,
2760 void **list)
2761{
2762
2763 list_del(&page->slab_list);
2764 if (page->active == cachep->num) {
2765 list_add(&page->slab_list, &n->slabs_full);
2766 if (OBJFREELIST_SLAB(cachep)) {
2767#if DEBUG
2768
2769 if (cachep->flags & SLAB_POISON) {
2770 void **objp = page->freelist;
2771
2772 *objp = *list;
2773 *list = objp;
2774 }
2775#endif
2776 page->freelist = NULL;
2777 }
2778 } else
2779 list_add(&page->slab_list, &n->slabs_partial);
2780}
2781
2782
2783static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2784 struct page *page, bool pfmemalloc)
2785{
2786 if (!page)
2787 return NULL;
2788
2789 if (pfmemalloc)
2790 return page;
2791
2792 if (!PageSlabPfmemalloc(page))
2793 return page;
2794
2795
2796 if (n->free_objects > n->free_limit) {
2797 ClearPageSlabPfmemalloc(page);
2798 return page;
2799 }
2800
2801
2802 list_del(&page->slab_list);
2803 if (!page->active) {
2804 list_add_tail(&page->slab_list, &n->slabs_free);
2805 n->free_slabs++;
2806 } else
2807 list_add_tail(&page->slab_list, &n->slabs_partial);
2808
2809 list_for_each_entry(page, &n->slabs_partial, slab_list) {
2810 if (!PageSlabPfmemalloc(page))
2811 return page;
2812 }
2813
2814 n->free_touched = 1;
2815 list_for_each_entry(page, &n->slabs_free, slab_list) {
2816 if (!PageSlabPfmemalloc(page)) {
2817 n->free_slabs--;
2818 return page;
2819 }
2820 }
2821
2822 return NULL;
2823}
2824
2825static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2826{
2827 struct page *page;
2828
2829 assert_spin_locked(&n->list_lock);
2830 page = list_first_entry_or_null(&n->slabs_partial, struct page,
2831 slab_list);
2832 if (!page) {
2833 n->free_touched = 1;
2834 page = list_first_entry_or_null(&n->slabs_free, struct page,
2835 slab_list);
2836 if (page)
2837 n->free_slabs--;
2838 }
2839
2840 if (sk_memalloc_socks())
2841 page = get_valid_first_slab(n, page, pfmemalloc);
2842
2843 return page;
2844}
2845
2846static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2847 struct kmem_cache_node *n, gfp_t flags)
2848{
2849 struct page *page;
2850 void *obj;
2851 void *list = NULL;
2852
2853 if (!gfp_pfmemalloc_allowed(flags))
2854 return NULL;
2855
2856 spin_lock(&n->list_lock);
2857 page = get_first_slab(n, true);
2858 if (!page) {
2859 spin_unlock(&n->list_lock);
2860 return NULL;
2861 }
2862
2863 obj = slab_get_obj(cachep, page);
2864 n->free_objects--;
2865
2866 fixup_slab_list(cachep, n, page, &list);
2867
2868 spin_unlock(&n->list_lock);
2869 fixup_objfreelist_debug(cachep, &list);
2870
2871 return obj;
2872}
2873
2874
2875
2876
2877
2878static __always_inline int alloc_block(struct kmem_cache *cachep,
2879 struct array_cache *ac, struct page *page, int batchcount)
2880{
2881
2882
2883
2884
2885 BUG_ON(page->active >= cachep->num);
2886
2887 while (page->active < cachep->num && batchcount--) {
2888 STATS_INC_ALLOCED(cachep);
2889 STATS_INC_ACTIVE(cachep);
2890 STATS_SET_HIGH(cachep);
2891
2892 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2893 }
2894
2895 return batchcount;
2896}
2897
2898static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2899{
2900 int batchcount;
2901 struct kmem_cache_node *n;
2902 struct array_cache *ac, *shared;
2903 int node;
2904 void *list = NULL;
2905 struct page *page;
2906
2907 check_irq_off();
2908 node = numa_mem_id();
2909
2910 ac = cpu_cache_get(cachep);
2911 batchcount = ac->batchcount;
2912 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2913
2914
2915
2916
2917
2918 batchcount = BATCHREFILL_LIMIT;
2919 }
2920 n = get_node(cachep, node);
2921
2922 BUG_ON(ac->avail > 0 || !n);
2923 shared = READ_ONCE(n->shared);
2924 if (!n->free_objects && (!shared || !shared->avail))
2925 goto direct_grow;
2926
2927 spin_lock(&n->list_lock);
2928 shared = READ_ONCE(n->shared);
2929
2930
2931 if (shared && transfer_objects(ac, shared, batchcount)) {
2932 shared->touched = 1;
2933 goto alloc_done;
2934 }
2935
2936 while (batchcount > 0) {
2937
2938 page = get_first_slab(n, false);
2939 if (!page)
2940 goto must_grow;
2941
2942 check_spinlock_acquired(cachep);
2943
2944 batchcount = alloc_block(cachep, ac, page, batchcount);
2945 fixup_slab_list(cachep, n, page, &list);
2946 }
2947
2948must_grow:
2949 n->free_objects -= ac->avail;
2950alloc_done:
2951 spin_unlock(&n->list_lock);
2952 fixup_objfreelist_debug(cachep, &list);
2953
2954direct_grow:
2955 if (unlikely(!ac->avail)) {
2956
2957 if (sk_memalloc_socks()) {
2958 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
2959
2960 if (obj)
2961 return obj;
2962 }
2963
2964 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
2965
2966
2967
2968
2969
2970 ac = cpu_cache_get(cachep);
2971 if (!ac->avail && page)
2972 alloc_block(cachep, ac, page, batchcount);
2973 cache_grow_end(cachep, page);
2974
2975 if (!ac->avail)
2976 return NULL;
2977 }
2978 ac->touched = 1;
2979
2980 return ac->entry[--ac->avail];
2981}
2982
2983static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2984 gfp_t flags)
2985{
2986 might_sleep_if(gfpflags_allow_blocking(flags));
2987}
2988
2989#if DEBUG
2990static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2991 gfp_t flags, void *objp, unsigned long caller)
2992{
2993 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2994 if (!objp)
2995 return objp;
2996 if (cachep->flags & SLAB_POISON) {
2997 check_poison_obj(cachep, objp);
2998 slab_kernel_map(cachep, objp, 1);
2999 poison_obj(cachep, objp, POISON_INUSE);
3000 }
3001 if (cachep->flags & SLAB_STORE_USER)
3002 *dbg_userword(cachep, objp) = (void *)caller;
3003
3004 if (cachep->flags & SLAB_RED_ZONE) {
3005 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3006 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3007 slab_error(cachep, "double free, or memory outside object was overwritten");
3008 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3009 objp, *dbg_redzone1(cachep, objp),
3010 *dbg_redzone2(cachep, objp));
3011 }
3012 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3013 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3014 }
3015
3016 objp += obj_offset(cachep);
3017 if (cachep->ctor && cachep->flags & SLAB_POISON)
3018 cachep->ctor(objp);
3019 if (ARCH_SLAB_MINALIGN &&
3020 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3021 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3022 objp, (int)ARCH_SLAB_MINALIGN);
3023 }
3024 return objp;
3025}
3026#else
3027#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3028#endif
3029
3030static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3031{
3032 void *objp;
3033 struct array_cache *ac;
3034
3035 check_irq_off();
3036
3037 ac = cpu_cache_get(cachep);
3038 if (likely(ac->avail)) {
3039 ac->touched = 1;
3040 objp = ac->entry[--ac->avail];
3041
3042 STATS_INC_ALLOCHIT(cachep);
3043 goto out;
3044 }
3045
3046 STATS_INC_ALLOCMISS(cachep);
3047 objp = cache_alloc_refill(cachep, flags);
3048
3049
3050
3051
3052 ac = cpu_cache_get(cachep);
3053
3054out:
3055
3056
3057
3058
3059
3060 if (objp)
3061 kmemleak_erase(&ac->entry[ac->avail]);
3062 return objp;
3063}
3064
3065#ifdef CONFIG_NUMA
3066
3067
3068
3069
3070
3071
3072static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3073{
3074 int nid_alloc, nid_here;
3075
3076 if (in_interrupt() || (flags & __GFP_THISNODE))
3077 return NULL;
3078 nid_alloc = nid_here = numa_mem_id();
3079 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3080 nid_alloc = cpuset_slab_spread_node();
3081 else if (current->mempolicy)
3082 nid_alloc = mempolicy_slab_node();
3083 if (nid_alloc != nid_here)
3084 return ____cache_alloc_node(cachep, flags, nid_alloc);
3085 return NULL;
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3097{
3098 struct zonelist *zonelist;
3099 struct zoneref *z;
3100 struct zone *zone;
3101 enum zone_type highest_zoneidx = gfp_zone(flags);
3102 void *obj = NULL;
3103 struct page *page;
3104 int nid;
3105 unsigned int cpuset_mems_cookie;
3106
3107 if (flags & __GFP_THISNODE)
3108 return NULL;
3109
3110retry_cpuset:
3111 cpuset_mems_cookie = read_mems_allowed_begin();
3112 zonelist = node_zonelist(mempolicy_slab_node(), flags);
3113
3114retry:
3115
3116
3117
3118
3119 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
3120 nid = zone_to_nid(zone);
3121
3122 if (cpuset_zone_allowed(zone, flags) &&
3123 get_node(cache, nid) &&
3124 get_node(cache, nid)->free_objects) {
3125 obj = ____cache_alloc_node(cache,
3126 gfp_exact_node(flags), nid);
3127 if (obj)
3128 break;
3129 }
3130 }
3131
3132 if (!obj) {
3133
3134
3135
3136
3137
3138
3139 page = cache_grow_begin(cache, flags, numa_mem_id());
3140 cache_grow_end(cache, page);
3141 if (page) {
3142 nid = page_to_nid(page);
3143 obj = ____cache_alloc_node(cache,
3144 gfp_exact_node(flags), nid);
3145
3146
3147
3148
3149
3150 if (!obj)
3151 goto retry;
3152 }
3153 }
3154
3155 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3156 goto retry_cpuset;
3157 return obj;
3158}
3159
3160
3161
3162
3163static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3164 int nodeid)
3165{
3166 struct page *page;
3167 struct kmem_cache_node *n;
3168 void *obj = NULL;
3169 void *list = NULL;
3170
3171 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3172 n = get_node(cachep, nodeid);
3173 BUG_ON(!n);
3174
3175 check_irq_off();
3176 spin_lock(&n->list_lock);
3177 page = get_first_slab(n, false);
3178 if (!page)
3179 goto must_grow;
3180
3181 check_spinlock_acquired_node(cachep, nodeid);
3182
3183 STATS_INC_NODEALLOCS(cachep);
3184 STATS_INC_ACTIVE(cachep);
3185 STATS_SET_HIGH(cachep);
3186
3187 BUG_ON(page->active == cachep->num);
3188
3189 obj = slab_get_obj(cachep, page);
3190 n->free_objects--;
3191
3192 fixup_slab_list(cachep, n, page, &list);
3193
3194 spin_unlock(&n->list_lock);
3195 fixup_objfreelist_debug(cachep, &list);
3196 return obj;
3197
3198must_grow:
3199 spin_unlock(&n->list_lock);
3200 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3201 if (page) {
3202
3203 obj = slab_get_obj(cachep, page);
3204 }
3205 cache_grow_end(cachep, page);
3206
3207 return obj ? obj : fallback_alloc(cachep, flags);
3208}
3209
3210static __always_inline void *
3211slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3212 unsigned long caller)
3213{
3214 unsigned long save_flags;
3215 void *ptr;
3216 int slab_node = numa_mem_id();
3217 struct obj_cgroup *objcg = NULL;
3218
3219 flags &= gfp_allowed_mask;
3220 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3221 if (unlikely(!cachep))
3222 return NULL;
3223
3224 cache_alloc_debugcheck_before(cachep, flags);
3225 local_irq_save(save_flags);
3226
3227 if (nodeid == NUMA_NO_NODE)
3228 nodeid = slab_node;
3229
3230 if (unlikely(!get_node(cachep, nodeid))) {
3231
3232 ptr = fallback_alloc(cachep, flags);
3233 goto out;
3234 }
3235
3236 if (nodeid == slab_node) {
3237
3238
3239
3240
3241
3242
3243 ptr = ____cache_alloc(cachep, flags);
3244 if (ptr)
3245 goto out;
3246 }
3247
3248 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3249 out:
3250 local_irq_restore(save_flags);
3251 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3252
3253 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && ptr)
3254 memset(ptr, 0, cachep->object_size);
3255
3256 slab_post_alloc_hook(cachep, objcg, flags, 1, &ptr);
3257 return ptr;
3258}
3259
3260static __always_inline void *
3261__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3262{
3263 void *objp;
3264
3265 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3266 objp = alternate_node_alloc(cache, flags);
3267 if (objp)
3268 goto out;
3269 }
3270 objp = ____cache_alloc(cache, flags);
3271
3272
3273
3274
3275
3276 if (!objp)
3277 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3278
3279 out:
3280 return objp;
3281}
3282#else
3283
3284static __always_inline void *
3285__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3286{
3287 return ____cache_alloc(cachep, flags);
3288}
3289
3290#endif
3291
3292static __always_inline void *
3293slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3294{
3295 unsigned long save_flags;
3296 void *objp;
3297 struct obj_cgroup *objcg = NULL;
3298
3299 flags &= gfp_allowed_mask;
3300 cachep = slab_pre_alloc_hook(cachep, &objcg, 1, flags);
3301 if (unlikely(!cachep))
3302 return NULL;
3303
3304 cache_alloc_debugcheck_before(cachep, flags);
3305 local_irq_save(save_flags);
3306 objp = __do_cache_alloc(cachep, flags);
3307 local_irq_restore(save_flags);
3308 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3309 prefetchw(objp);
3310
3311 if (unlikely(slab_want_init_on_alloc(flags, cachep)) && objp)
3312 memset(objp, 0, cachep->object_size);
3313
3314 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp);
3315 return objp;
3316}
3317
3318
3319
3320
3321
3322static void free_block(struct kmem_cache *cachep, void **objpp,
3323 int nr_objects, int node, struct list_head *list)
3324{
3325 int i;
3326 struct kmem_cache_node *n = get_node(cachep, node);
3327 struct page *page;
3328
3329 n->free_objects += nr_objects;
3330
3331 for (i = 0; i < nr_objects; i++) {
3332 void *objp;
3333 struct page *page;
3334
3335 objp = objpp[i];
3336
3337 page = virt_to_head_page(objp);
3338 list_del(&page->slab_list);
3339 check_spinlock_acquired_node(cachep, node);
3340 slab_put_obj(cachep, page, objp);
3341 STATS_DEC_ACTIVE(cachep);
3342
3343
3344 if (page->active == 0) {
3345 list_add(&page->slab_list, &n->slabs_free);
3346 n->free_slabs++;
3347 } else {
3348
3349
3350
3351
3352 list_add_tail(&page->slab_list, &n->slabs_partial);
3353 }
3354 }
3355
3356 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3357 n->free_objects -= cachep->num;
3358
3359 page = list_last_entry(&n->slabs_free, struct page, slab_list);
3360 list_move(&page->slab_list, list);
3361 n->free_slabs--;
3362 n->total_slabs--;
3363 }
3364}
3365
3366static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3367{
3368 int batchcount;
3369 struct kmem_cache_node *n;
3370 int node = numa_mem_id();
3371 LIST_HEAD(list);
3372
3373 batchcount = ac->batchcount;
3374
3375 check_irq_off();
3376 n = get_node(cachep, node);
3377 spin_lock(&n->list_lock);
3378 if (n->shared) {
3379 struct array_cache *shared_array = n->shared;
3380 int max = shared_array->limit - shared_array->avail;
3381 if (max) {
3382 if (batchcount > max)
3383 batchcount = max;
3384 memcpy(&(shared_array->entry[shared_array->avail]),
3385 ac->entry, sizeof(void *) * batchcount);
3386 shared_array->avail += batchcount;
3387 goto free_done;
3388 }
3389 }
3390
3391 free_block(cachep, ac->entry, batchcount, node, &list);
3392free_done:
3393#if STATS
3394 {
3395 int i = 0;
3396 struct page *page;
3397
3398 list_for_each_entry(page, &n->slabs_free, slab_list) {
3399 BUG_ON(page->active);
3400
3401 i++;
3402 }
3403 STATS_SET_FREEABLE(cachep, i);
3404 }
3405#endif
3406 spin_unlock(&n->list_lock);
3407 ac->avail -= batchcount;
3408 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3409 slabs_destroy(cachep, &list);
3410}
3411
3412
3413
3414
3415
3416static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3417 unsigned long caller)
3418{
3419
3420 if (kasan_slab_free(cachep, objp, _RET_IP_))
3421 return;
3422
3423
3424 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU))
3425 __kcsan_check_access(objp, cachep->object_size,
3426 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
3427
3428 ___cache_free(cachep, objp, caller);
3429}
3430
3431void ___cache_free(struct kmem_cache *cachep, void *objp,
3432 unsigned long caller)
3433{
3434 struct array_cache *ac = cpu_cache_get(cachep);
3435
3436 check_irq_off();
3437 if (unlikely(slab_want_init_on_free(cachep)))
3438 memset(objp, 0, cachep->object_size);
3439 kmemleak_free_recursive(objp, cachep->flags);
3440 objp = cache_free_debugcheck(cachep, objp, caller);
3441 memcg_slab_free_hook(cachep, &objp, 1);
3442
3443
3444
3445
3446
3447
3448
3449
3450 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3451 return;
3452
3453 if (ac->avail < ac->limit) {
3454 STATS_INC_FREEHIT(cachep);
3455 } else {
3456 STATS_INC_FREEMISS(cachep);
3457 cache_flusharray(cachep, ac);
3458 }
3459
3460 if (sk_memalloc_socks()) {
3461 struct page *page = virt_to_head_page(objp);
3462
3463 if (unlikely(PageSlabPfmemalloc(page))) {
3464 cache_free_pfmemalloc(cachep, page, objp);
3465 return;
3466 }
3467 }
3468
3469 __free_one(ac, objp);
3470}
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3483{
3484 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3485
3486 trace_kmem_cache_alloc(_RET_IP_, ret,
3487 cachep->object_size, cachep->size, flags);
3488
3489 return ret;
3490}
3491EXPORT_SYMBOL(kmem_cache_alloc);
3492
3493static __always_inline void
3494cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3495 size_t size, void **p, unsigned long caller)
3496{
3497 size_t i;
3498
3499 for (i = 0; i < size; i++)
3500 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3501}
3502
3503int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3504 void **p)
3505{
3506 size_t i;
3507 struct obj_cgroup *objcg = NULL;
3508
3509 s = slab_pre_alloc_hook(s, &objcg, size, flags);
3510 if (!s)
3511 return 0;
3512
3513 cache_alloc_debugcheck_before(s, flags);
3514
3515 local_irq_disable();
3516 for (i = 0; i < size; i++) {
3517 void *objp = __do_cache_alloc(s, flags);
3518
3519 if (unlikely(!objp))
3520 goto error;
3521 p[i] = objp;
3522 }
3523 local_irq_enable();
3524
3525 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3526
3527
3528 if (unlikely(slab_want_init_on_alloc(flags, s)))
3529 for (i = 0; i < size; i++)
3530 memset(p[i], 0, s->object_size);
3531
3532 slab_post_alloc_hook(s, objcg, flags, size, p);
3533
3534 return size;
3535error:
3536 local_irq_enable();
3537 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3538 slab_post_alloc_hook(s, objcg, flags, i, p);
3539 __kmem_cache_free_bulk(s, i, p);
3540 return 0;
3541}
3542EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3543
3544#ifdef CONFIG_TRACING
3545void *
3546kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3547{
3548 void *ret;
3549
3550 ret = slab_alloc(cachep, flags, _RET_IP_);
3551
3552 ret = kasan_kmalloc(cachep, ret, size, flags);
3553 trace_kmalloc(_RET_IP_, ret,
3554 size, cachep->size, flags);
3555 return ret;
3556}
3557EXPORT_SYMBOL(kmem_cache_alloc_trace);
3558#endif
3559
3560#ifdef CONFIG_NUMA
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3575{
3576 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3577
3578 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3579 cachep->object_size, cachep->size,
3580 flags, nodeid);
3581
3582 return ret;
3583}
3584EXPORT_SYMBOL(kmem_cache_alloc_node);
3585
3586#ifdef CONFIG_TRACING
3587void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3588 gfp_t flags,
3589 int nodeid,
3590 size_t size)
3591{
3592 void *ret;
3593
3594 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3595
3596 ret = kasan_kmalloc(cachep, ret, size, flags);
3597 trace_kmalloc_node(_RET_IP_, ret,
3598 size, cachep->size,
3599 flags, nodeid);
3600 return ret;
3601}
3602EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3603#endif
3604
3605static __always_inline void *
3606__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3607{
3608 struct kmem_cache *cachep;
3609 void *ret;
3610
3611 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3612 return NULL;
3613 cachep = kmalloc_slab(size, flags);
3614 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3615 return cachep;
3616 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3617 ret = kasan_kmalloc(cachep, ret, size, flags);
3618
3619 return ret;
3620}
3621
3622void *__kmalloc_node(size_t size, gfp_t flags, int node)
3623{
3624 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3625}
3626EXPORT_SYMBOL(__kmalloc_node);
3627
3628void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3629 int node, unsigned long caller)
3630{
3631 return __do_kmalloc_node(size, flags, node, caller);
3632}
3633EXPORT_SYMBOL(__kmalloc_node_track_caller);
3634#endif
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3645 unsigned long caller)
3646{
3647 struct kmem_cache *cachep;
3648 void *ret;
3649
3650 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3651 return NULL;
3652 cachep = kmalloc_slab(size, flags);
3653 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3654 return cachep;
3655 ret = slab_alloc(cachep, flags, caller);
3656
3657 ret = kasan_kmalloc(cachep, ret, size, flags);
3658 trace_kmalloc(caller, ret,
3659 size, cachep->size, flags);
3660
3661 return ret;
3662}
3663
3664void *__kmalloc(size_t size, gfp_t flags)
3665{
3666 return __do_kmalloc(size, flags, _RET_IP_);
3667}
3668EXPORT_SYMBOL(__kmalloc);
3669
3670void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3671{
3672 return __do_kmalloc(size, flags, caller);
3673}
3674EXPORT_SYMBOL(__kmalloc_track_caller);
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3685{
3686 unsigned long flags;
3687 cachep = cache_from_obj(cachep, objp);
3688 if (!cachep)
3689 return;
3690
3691 local_irq_save(flags);
3692 debug_check_no_locks_freed(objp, cachep->object_size);
3693 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3694 debug_check_no_obj_freed(objp, cachep->object_size);
3695 __cache_free(cachep, objp, _RET_IP_);
3696 local_irq_restore(flags);
3697
3698 trace_kmem_cache_free(_RET_IP_, objp);
3699}
3700EXPORT_SYMBOL(kmem_cache_free);
3701
3702void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3703{
3704 struct kmem_cache *s;
3705 size_t i;
3706
3707 local_irq_disable();
3708 for (i = 0; i < size; i++) {
3709 void *objp = p[i];
3710
3711 if (!orig_s)
3712 s = virt_to_cache(objp);
3713 else
3714 s = cache_from_obj(orig_s, objp);
3715 if (!s)
3716 continue;
3717
3718 debug_check_no_locks_freed(objp, s->object_size);
3719 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3720 debug_check_no_obj_freed(objp, s->object_size);
3721
3722 __cache_free(s, objp, _RET_IP_);
3723 }
3724 local_irq_enable();
3725
3726
3727}
3728EXPORT_SYMBOL(kmem_cache_free_bulk);
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739void kfree(const void *objp)
3740{
3741 struct kmem_cache *c;
3742 unsigned long flags;
3743
3744 trace_kfree(_RET_IP_, objp);
3745
3746 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3747 return;
3748 local_irq_save(flags);
3749 kfree_debugcheck(objp);
3750 c = virt_to_cache(objp);
3751 if (!c) {
3752 local_irq_restore(flags);
3753 return;
3754 }
3755 debug_check_no_locks_freed(objp, c->object_size);
3756
3757 debug_check_no_obj_freed(objp, c->object_size);
3758 __cache_free(c, (void *)objp, _RET_IP_);
3759 local_irq_restore(flags);
3760}
3761EXPORT_SYMBOL(kfree);
3762
3763
3764
3765
3766static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3767{
3768 int ret;
3769 int node;
3770 struct kmem_cache_node *n;
3771
3772 for_each_online_node(node) {
3773 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3774 if (ret)
3775 goto fail;
3776
3777 }
3778
3779 return 0;
3780
3781fail:
3782 if (!cachep->list.next) {
3783
3784 node--;
3785 while (node >= 0) {
3786 n = get_node(cachep, node);
3787 if (n) {
3788 kfree(n->shared);
3789 free_alien_cache(n->alien);
3790 kfree(n);
3791 cachep->node[node] = NULL;
3792 }
3793 node--;
3794 }
3795 }
3796 return -ENOMEM;
3797}
3798
3799
3800static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3801 int batchcount, int shared, gfp_t gfp)
3802{
3803 struct array_cache __percpu *cpu_cache, *prev;
3804 int cpu;
3805
3806 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3807 if (!cpu_cache)
3808 return -ENOMEM;
3809
3810 prev = cachep->cpu_cache;
3811 cachep->cpu_cache = cpu_cache;
3812
3813
3814
3815
3816 if (prev)
3817 kick_all_cpus_sync();
3818
3819 check_irq_on();
3820 cachep->batchcount = batchcount;
3821 cachep->limit = limit;
3822 cachep->shared = shared;
3823
3824 if (!prev)
3825 goto setup_node;
3826
3827 for_each_online_cpu(cpu) {
3828 LIST_HEAD(list);
3829 int node;
3830 struct kmem_cache_node *n;
3831 struct array_cache *ac = per_cpu_ptr(prev, cpu);
3832
3833 node = cpu_to_mem(cpu);
3834 n = get_node(cachep, node);
3835 spin_lock_irq(&n->list_lock);
3836 free_block(cachep, ac->entry, ac->avail, node, &list);
3837 spin_unlock_irq(&n->list_lock);
3838 slabs_destroy(cachep, &list);
3839 }
3840 free_percpu(prev);
3841
3842setup_node:
3843 return setup_kmem_cache_nodes(cachep, gfp);
3844}
3845
3846
3847static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3848{
3849 int err;
3850 int limit = 0;
3851 int shared = 0;
3852 int batchcount = 0;
3853
3854 err = cache_random_seq_create(cachep, cachep->num, gfp);
3855 if (err)
3856 goto end;
3857
3858 if (limit && shared && batchcount)
3859 goto skip_setup;
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869 if (cachep->size > 131072)
3870 limit = 1;
3871 else if (cachep->size > PAGE_SIZE)
3872 limit = 8;
3873 else if (cachep->size > 1024)
3874 limit = 24;
3875 else if (cachep->size > 256)
3876 limit = 54;
3877 else
3878 limit = 120;
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889 shared = 0;
3890 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3891 shared = 8;
3892
3893#if DEBUG
3894
3895
3896
3897
3898 if (limit > 32)
3899 limit = 32;
3900#endif
3901 batchcount = (limit + 1) / 2;
3902skip_setup:
3903 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3904end:
3905 if (err)
3906 pr_err("enable_cpucache failed for %s, error %d\n",
3907 cachep->name, -err);
3908 return err;
3909}
3910
3911
3912
3913
3914
3915
3916static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
3917 struct array_cache *ac, int node)
3918{
3919 LIST_HEAD(list);
3920
3921
3922 check_mutex_acquired();
3923
3924 if (!ac || !ac->avail)
3925 return;
3926
3927 if (ac->touched) {
3928 ac->touched = 0;
3929 return;
3930 }
3931
3932 spin_lock_irq(&n->list_lock);
3933 drain_array_locked(cachep, ac, node, false, &list);
3934 spin_unlock_irq(&n->list_lock);
3935
3936 slabs_destroy(cachep, &list);
3937}
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951static void cache_reap(struct work_struct *w)
3952{
3953 struct kmem_cache *searchp;
3954 struct kmem_cache_node *n;
3955 int node = numa_mem_id();
3956 struct delayed_work *work = to_delayed_work(w);
3957
3958 if (!mutex_trylock(&slab_mutex))
3959
3960 goto out;
3961
3962 list_for_each_entry(searchp, &slab_caches, list) {
3963 check_irq_on();
3964
3965
3966
3967
3968
3969
3970 n = get_node(searchp, node);
3971
3972 reap_alien(searchp, n);
3973
3974 drain_array(searchp, n, cpu_cache_get(searchp), node);
3975
3976
3977
3978
3979
3980 if (time_after(n->next_reap, jiffies))
3981 goto next;
3982
3983 n->next_reap = jiffies + REAPTIMEOUT_NODE;
3984
3985 drain_array(searchp, n, n->shared, node);
3986
3987 if (n->free_touched)
3988 n->free_touched = 0;
3989 else {
3990 int freed;
3991
3992 freed = drain_freelist(searchp, n, (n->free_limit +
3993 5 * searchp->num - 1) / (5 * searchp->num));
3994 STATS_ADD_REAPED(searchp, freed);
3995 }
3996next:
3997 cond_resched();
3998 }
3999 check_irq_on();
4000 mutex_unlock(&slab_mutex);
4001 next_reap_node();
4002out:
4003
4004 schedule_delayed_work_on(smp_processor_id(), work,
4005 round_jiffies_relative(REAPTIMEOUT_AC));
4006}
4007
4008void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4009{
4010 unsigned long active_objs, num_objs, active_slabs;
4011 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4012 unsigned long free_slabs = 0;
4013 int node;
4014 struct kmem_cache_node *n;
4015
4016 for_each_kmem_cache_node(cachep, node, n) {
4017 check_irq_on();
4018 spin_lock_irq(&n->list_lock);
4019
4020 total_slabs += n->total_slabs;
4021 free_slabs += n->free_slabs;
4022 free_objs += n->free_objects;
4023
4024 if (n->shared)
4025 shared_avail += n->shared->avail;
4026
4027 spin_unlock_irq(&n->list_lock);
4028 }
4029 num_objs = total_slabs * cachep->num;
4030 active_slabs = total_slabs - free_slabs;
4031 active_objs = num_objs - free_objs;
4032
4033 sinfo->active_objs = active_objs;
4034 sinfo->num_objs = num_objs;
4035 sinfo->active_slabs = active_slabs;
4036 sinfo->num_slabs = total_slabs;
4037 sinfo->shared_avail = shared_avail;
4038 sinfo->limit = cachep->limit;
4039 sinfo->batchcount = cachep->batchcount;
4040 sinfo->shared = cachep->shared;
4041 sinfo->objects_per_slab = cachep->num;
4042 sinfo->cache_order = cachep->gfporder;
4043}
4044
4045void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4046{
4047#if STATS
4048 {
4049 unsigned long high = cachep->high_mark;
4050 unsigned long allocs = cachep->num_allocations;
4051 unsigned long grown = cachep->grown;
4052 unsigned long reaped = cachep->reaped;
4053 unsigned long errors = cachep->errors;
4054 unsigned long max_freeable = cachep->max_freeable;
4055 unsigned long node_allocs = cachep->node_allocs;
4056 unsigned long node_frees = cachep->node_frees;
4057 unsigned long overflows = cachep->node_overflow;
4058
4059 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4060 allocs, high, grown,
4061 reaped, errors, max_freeable, node_allocs,
4062 node_frees, overflows);
4063 }
4064
4065 {
4066 unsigned long allochit = atomic_read(&cachep->allochit);
4067 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4068 unsigned long freehit = atomic_read(&cachep->freehit);
4069 unsigned long freemiss = atomic_read(&cachep->freemiss);
4070
4071 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4072 allochit, allocmiss, freehit, freemiss);
4073 }
4074#endif
4075}
4076
4077#define MAX_SLABINFO_WRITE 128
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4088 size_t count, loff_t *ppos)
4089{
4090 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4091 int limit, batchcount, shared, res;
4092 struct kmem_cache *cachep;
4093
4094 if (count > MAX_SLABINFO_WRITE)
4095 return -EINVAL;
4096 if (copy_from_user(&kbuf, buffer, count))
4097 return -EFAULT;
4098 kbuf[MAX_SLABINFO_WRITE] = '\0';
4099
4100 tmp = strchr(kbuf, ' ');
4101 if (!tmp)
4102 return -EINVAL;
4103 *tmp = '\0';
4104 tmp++;
4105 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4106 return -EINVAL;
4107
4108
4109 mutex_lock(&slab_mutex);
4110 res = -EINVAL;
4111 list_for_each_entry(cachep, &slab_caches, list) {
4112 if (!strcmp(cachep->name, kbuf)) {
4113 if (limit < 1 || batchcount < 1 ||
4114 batchcount > limit || shared < 0) {
4115 res = 0;
4116 } else {
4117 res = do_tune_cpucache(cachep, limit,
4118 batchcount, shared,
4119 GFP_KERNEL);
4120 }
4121 break;
4122 }
4123 }
4124 mutex_unlock(&slab_mutex);
4125 if (res >= 0)
4126 res = count;
4127 return res;
4128}
4129
4130#ifdef CONFIG_HARDENED_USERCOPY
4131
4132
4133
4134
4135
4136
4137
4138
4139void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4140 bool to_user)
4141{
4142 struct kmem_cache *cachep;
4143 unsigned int objnr;
4144 unsigned long offset;
4145
4146 ptr = kasan_reset_tag(ptr);
4147
4148
4149 cachep = page->slab_cache;
4150 objnr = obj_to_index(cachep, page, (void *)ptr);
4151 BUG_ON(objnr >= cachep->num);
4152
4153
4154 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4155
4156
4157 if (offset >= cachep->useroffset &&
4158 offset - cachep->useroffset <= cachep->usersize &&
4159 n <= cachep->useroffset - offset + cachep->usersize)
4160 return;
4161
4162
4163
4164
4165
4166
4167
4168 if (usercopy_fallback &&
4169 offset <= cachep->object_size &&
4170 n <= cachep->object_size - offset) {
4171 usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4172 return;
4173 }
4174
4175 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4176}
4177#endif
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188size_t __ksize(const void *objp)
4189{
4190 struct kmem_cache *c;
4191 size_t size;
4192
4193 BUG_ON(!objp);
4194 if (unlikely(objp == ZERO_SIZE_PTR))
4195 return 0;
4196
4197 c = virt_to_cache(objp);
4198 size = c ? c->object_size : 0;
4199
4200 return size;
4201}
4202EXPORT_SYMBOL(__ksize);
4203