1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#include <linux/slab.h>
91#include <linux/mm.h>
92#include <linux/poison.h>
93#include <linux/swap.h>
94#include <linux/cache.h>
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compiler.h>
98#include <linux/cpuset.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/notifier.h>
102#include <linux/kallsyms.h>
103#include <linux/cpu.h>
104#include <linux/sysctl.h>
105#include <linux/module.h>
106#include <linux/rcupdate.h>
107#include <linux/string.h>
108#include <linux/uaccess.h>
109#include <linux/nodemask.h>
110#include <linux/kmemleak.h>
111#include <linux/mempolicy.h>
112#include <linux/mutex.h>
113#include <linux/fault-inject.h>
114#include <linux/rtmutex.h>
115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h>
117#include <linux/memory.h>
118#include <linux/prefetch.h>
119#include <linux/sched/task_stack.h>
120
121#include <net/sock.h>
122
123#include <asm/cacheflush.h>
124#include <asm/tlbflush.h>
125#include <asm/page.h>
126
127#include <trace/events/kmem.h>
128
129#include "internal.h"
130
131#include "slab.h"
132
133
134
135
136
137
138
139
140
141
142
143#ifdef CONFIG_DEBUG_SLAB
144#define DEBUG 1
145#define STATS 1
146#define FORCED_DEBUG 1
147#else
148#define DEBUG 0
149#define STATS 0
150#define FORCED_DEBUG 0
151#endif
152
153
154#define BYTES_PER_WORD sizeof(void *)
155#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
156
157#ifndef ARCH_KMALLOC_FLAGS
158#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
159#endif
160
161#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
162 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
163
164#if FREELIST_BYTE_INDEX
165typedef unsigned char freelist_idx_t;
166#else
167typedef unsigned short freelist_idx_t;
168#endif
169
170#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
171
172
173
174
175
176
177
178
179
180
181
182
183
184struct array_cache {
185 unsigned int avail;
186 unsigned int limit;
187 unsigned int batchcount;
188 unsigned int touched;
189 void *entry[];
190
191
192
193
194};
195
196struct alien_cache {
197 spinlock_t lock;
198 struct array_cache ac;
199};
200
201
202
203
204#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
205static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
206#define CACHE_CACHE 0
207#define SIZE_NODE (MAX_NUMNODES)
208
209static int drain_freelist(struct kmem_cache *cache,
210 struct kmem_cache_node *n, int tofree);
211static void free_block(struct kmem_cache *cachep, void **objpp, int len,
212 int node, struct list_head *list);
213static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
214static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
215static void cache_reap(struct work_struct *unused);
216
217static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
218 void **list);
219static inline void fixup_slab_list(struct kmem_cache *cachep,
220 struct kmem_cache_node *n, struct page *page,
221 void **list);
222static int slab_early_init = 1;
223
224#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
225
226static void kmem_cache_node_init(struct kmem_cache_node *parent)
227{
228 INIT_LIST_HEAD(&parent->slabs_full);
229 INIT_LIST_HEAD(&parent->slabs_partial);
230 INIT_LIST_HEAD(&parent->slabs_free);
231 parent->total_slabs = 0;
232 parent->free_slabs = 0;
233 parent->shared = NULL;
234 parent->alien = NULL;
235 parent->colour_next = 0;
236 spin_lock_init(&parent->list_lock);
237 parent->free_objects = 0;
238 parent->free_touched = 0;
239}
240
241#define MAKE_LIST(cachep, listp, slab, nodeid) \
242 do { \
243 INIT_LIST_HEAD(listp); \
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
245 } while (0)
246
247#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
248 do { \
249 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
252 } while (0)
253
254#define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U)
255#define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U)
256#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
257#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
258
259#define BATCHREFILL_LIMIT 16
260
261
262
263
264
265
266
267#define REAPTIMEOUT_AC (2*HZ)
268#define REAPTIMEOUT_NODE (4*HZ)
269
270#if STATS
271#define STATS_INC_ACTIVE(x) ((x)->num_active++)
272#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
273#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
274#define STATS_INC_GROWN(x) ((x)->grown++)
275#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
276#define STATS_SET_HIGH(x) \
277 do { \
278 if ((x)->num_active > (x)->high_mark) \
279 (x)->high_mark = (x)->num_active; \
280 } while (0)
281#define STATS_INC_ERR(x) ((x)->errors++)
282#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
283#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
284#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
285#define STATS_SET_FREEABLE(x, i) \
286 do { \
287 if ((x)->max_freeable < i) \
288 (x)->max_freeable = i; \
289 } while (0)
290#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
291#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
292#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
293#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
294#else
295#define STATS_INC_ACTIVE(x) do { } while (0)
296#define STATS_DEC_ACTIVE(x) do { } while (0)
297#define STATS_INC_ALLOCED(x) do { } while (0)
298#define STATS_INC_GROWN(x) do { } while (0)
299#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
300#define STATS_SET_HIGH(x) do { } while (0)
301#define STATS_INC_ERR(x) do { } while (0)
302#define STATS_INC_NODEALLOCS(x) do { } while (0)
303#define STATS_INC_NODEFREES(x) do { } while (0)
304#define STATS_INC_ACOVERFLOW(x) do { } while (0)
305#define STATS_SET_FREEABLE(x, i) do { } while (0)
306#define STATS_INC_ALLOCHIT(x) do { } while (0)
307#define STATS_INC_ALLOCMISS(x) do { } while (0)
308#define STATS_INC_FREEHIT(x) do { } while (0)
309#define STATS_INC_FREEMISS(x) do { } while (0)
310#endif
311
312#if DEBUG
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static int obj_offset(struct kmem_cache *cachep)
328{
329 return cachep->obj_offset;
330}
331
332static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
333{
334 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
335 return (unsigned long long*) (objp + obj_offset(cachep) -
336 sizeof(unsigned long long));
337}
338
339static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
340{
341 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
342 if (cachep->flags & SLAB_STORE_USER)
343 return (unsigned long long *)(objp + cachep->size -
344 sizeof(unsigned long long) -
345 REDZONE_ALIGN);
346 return (unsigned long long *) (objp + cachep->size -
347 sizeof(unsigned long long));
348}
349
350static void **dbg_userword(struct kmem_cache *cachep, void *objp)
351{
352 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
353 return (void **)(objp + cachep->size - BYTES_PER_WORD);
354}
355
356#else
357
358#define obj_offset(x) 0
359#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
361#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
362
363#endif
364
365#ifdef CONFIG_DEBUG_SLAB_LEAK
366
367static inline bool is_store_user_clean(struct kmem_cache *cachep)
368{
369 return atomic_read(&cachep->store_user_clean) == 1;
370}
371
372static inline void set_store_user_clean(struct kmem_cache *cachep)
373{
374 atomic_set(&cachep->store_user_clean, 1);
375}
376
377static inline void set_store_user_dirty(struct kmem_cache *cachep)
378{
379 if (is_store_user_clean(cachep))
380 atomic_set(&cachep->store_user_clean, 0);
381}
382
383#else
384static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
385
386#endif
387
388
389
390
391
392#define SLAB_MAX_ORDER_HI 1
393#define SLAB_MAX_ORDER_LO 0
394static int slab_max_order = SLAB_MAX_ORDER_LO;
395static bool slab_max_order_set __initdata;
396
397static inline struct kmem_cache *virt_to_cache(const void *obj)
398{
399 struct page *page = virt_to_head_page(obj);
400 return page->slab_cache;
401}
402
403static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
404 unsigned int idx)
405{
406 return page->s_mem + cache->size * idx;
407}
408
409
410
411
412
413
414
415static inline unsigned int obj_to_index(const struct kmem_cache *cache,
416 const struct page *page, void *obj)
417{
418 u32 offset = (obj - page->s_mem);
419 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
420}
421
422#define BOOT_CPUCACHE_ENTRIES 1
423
424static struct kmem_cache kmem_cache_boot = {
425 .batchcount = 1,
426 .limit = BOOT_CPUCACHE_ENTRIES,
427 .shared = 1,
428 .size = sizeof(struct kmem_cache),
429 .name = "kmem_cache",
430};
431
432static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
433
434static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
435{
436 return this_cpu_ptr(cachep->cpu_cache);
437}
438
439
440
441
442static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
443 slab_flags_t flags, size_t *left_over)
444{
445 unsigned int num;
446 size_t slab_size = PAGE_SIZE << gfporder;
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
466 num = slab_size / buffer_size;
467 *left_over = slab_size % buffer_size;
468 } else {
469 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
470 *left_over = slab_size %
471 (buffer_size + sizeof(freelist_idx_t));
472 }
473
474 return num;
475}
476
477#if DEBUG
478#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
479
480static void __slab_error(const char *function, struct kmem_cache *cachep,
481 char *msg)
482{
483 pr_err("slab error in %s(): cache `%s': %s\n",
484 function, cachep->name, msg);
485 dump_stack();
486 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
487}
488#endif
489
490
491
492
493
494
495
496
497
498static int use_alien_caches __read_mostly = 1;
499static int __init noaliencache_setup(char *s)
500{
501 use_alien_caches = 0;
502 return 1;
503}
504__setup("noaliencache", noaliencache_setup);
505
506static int __init slab_max_order_setup(char *str)
507{
508 get_option(&str, &slab_max_order);
509 slab_max_order = slab_max_order < 0 ? 0 :
510 min(slab_max_order, MAX_ORDER - 1);
511 slab_max_order_set = true;
512
513 return 1;
514}
515__setup("slab_max_order=", slab_max_order_setup);
516
517#ifdef CONFIG_NUMA
518
519
520
521
522
523
524static DEFINE_PER_CPU(unsigned long, slab_reap_node);
525
526static void init_reap_node(int cpu)
527{
528 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
529 node_online_map);
530}
531
532static void next_reap_node(void)
533{
534 int node = __this_cpu_read(slab_reap_node);
535
536 node = next_node_in(node, node_online_map);
537 __this_cpu_write(slab_reap_node, node);
538}
539
540#else
541#define init_reap_node(cpu) do { } while (0)
542#define next_reap_node(void) do { } while (0)
543#endif
544
545
546
547
548
549
550
551
552static void start_cpu_timer(int cpu)
553{
554 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
555
556 if (reap_work->work.func == NULL) {
557 init_reap_node(cpu);
558 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
559 schedule_delayed_work_on(cpu, reap_work,
560 __round_jiffies_relative(HZ, cpu));
561 }
562}
563
564static void init_arraycache(struct array_cache *ac, int limit, int batch)
565{
566
567
568
569
570
571
572
573 kmemleak_no_scan(ac);
574 if (ac) {
575 ac->avail = 0;
576 ac->limit = limit;
577 ac->batchcount = batch;
578 ac->touched = 0;
579 }
580}
581
582static struct array_cache *alloc_arraycache(int node, int entries,
583 int batchcount, gfp_t gfp)
584{
585 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
586 struct array_cache *ac = NULL;
587
588 ac = kmalloc_node(memsize, gfp, node);
589 init_arraycache(ac, entries, batchcount);
590 return ac;
591}
592
593static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
594 struct page *page, void *objp)
595{
596 struct kmem_cache_node *n;
597 int page_node;
598 LIST_HEAD(list);
599
600 page_node = page_to_nid(page);
601 n = get_node(cachep, page_node);
602
603 spin_lock(&n->list_lock);
604 free_block(cachep, &objp, 1, page_node, &list);
605 spin_unlock(&n->list_lock);
606
607 slabs_destroy(cachep, &list);
608}
609
610
611
612
613
614
615
616static int transfer_objects(struct array_cache *to,
617 struct array_cache *from, unsigned int max)
618{
619
620 int nr = min3(from->avail, max, to->limit - to->avail);
621
622 if (!nr)
623 return 0;
624
625 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
626 sizeof(void *) *nr);
627
628 from->avail -= nr;
629 to->avail += nr;
630 return nr;
631}
632
633#ifndef CONFIG_NUMA
634
635#define drain_alien_cache(cachep, alien) do { } while (0)
636#define reap_alien(cachep, n) do { } while (0)
637
638static inline struct alien_cache **alloc_alien_cache(int node,
639 int limit, gfp_t gfp)
640{
641 return NULL;
642}
643
644static inline void free_alien_cache(struct alien_cache **ac_ptr)
645{
646}
647
648static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
649{
650 return 0;
651}
652
653static inline void *alternate_node_alloc(struct kmem_cache *cachep,
654 gfp_t flags)
655{
656 return NULL;
657}
658
659static inline void *____cache_alloc_node(struct kmem_cache *cachep,
660 gfp_t flags, int nodeid)
661{
662 return NULL;
663}
664
665static inline gfp_t gfp_exact_node(gfp_t flags)
666{
667 return flags & ~__GFP_NOFAIL;
668}
669
670#else
671
672static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
673static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
674
675static struct alien_cache *__alloc_alien_cache(int node, int entries,
676 int batch, gfp_t gfp)
677{
678 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
679 struct alien_cache *alc = NULL;
680
681 alc = kmalloc_node(memsize, gfp, node);
682 init_arraycache(&alc->ac, entries, batch);
683 spin_lock_init(&alc->lock);
684 return alc;
685}
686
687static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
688{
689 struct alien_cache **alc_ptr;
690 size_t memsize = sizeof(void *) * nr_node_ids;
691 int i;
692
693 if (limit > 1)
694 limit = 12;
695 alc_ptr = kzalloc_node(memsize, gfp, node);
696 if (!alc_ptr)
697 return NULL;
698
699 for_each_node(i) {
700 if (i == node || !node_online(i))
701 continue;
702 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
703 if (!alc_ptr[i]) {
704 for (i--; i >= 0; i--)
705 kfree(alc_ptr[i]);
706 kfree(alc_ptr);
707 return NULL;
708 }
709 }
710 return alc_ptr;
711}
712
713static void free_alien_cache(struct alien_cache **alc_ptr)
714{
715 int i;
716
717 if (!alc_ptr)
718 return;
719 for_each_node(i)
720 kfree(alc_ptr[i]);
721 kfree(alc_ptr);
722}
723
724static void __drain_alien_cache(struct kmem_cache *cachep,
725 struct array_cache *ac, int node,
726 struct list_head *list)
727{
728 struct kmem_cache_node *n = get_node(cachep, node);
729
730 if (ac->avail) {
731 spin_lock(&n->list_lock);
732
733
734
735
736
737 if (n->shared)
738 transfer_objects(n->shared, ac, ac->limit);
739
740 free_block(cachep, ac->entry, ac->avail, node, list);
741 ac->avail = 0;
742 spin_unlock(&n->list_lock);
743 }
744}
745
746
747
748
749static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
750{
751 int node = __this_cpu_read(slab_reap_node);
752
753 if (n->alien) {
754 struct alien_cache *alc = n->alien[node];
755 struct array_cache *ac;
756
757 if (alc) {
758 ac = &alc->ac;
759 if (ac->avail && spin_trylock_irq(&alc->lock)) {
760 LIST_HEAD(list);
761
762 __drain_alien_cache(cachep, ac, node, &list);
763 spin_unlock_irq(&alc->lock);
764 slabs_destroy(cachep, &list);
765 }
766 }
767 }
768}
769
770static void drain_alien_cache(struct kmem_cache *cachep,
771 struct alien_cache **alien)
772{
773 int i = 0;
774 struct alien_cache *alc;
775 struct array_cache *ac;
776 unsigned long flags;
777
778 for_each_online_node(i) {
779 alc = alien[i];
780 if (alc) {
781 LIST_HEAD(list);
782
783 ac = &alc->ac;
784 spin_lock_irqsave(&alc->lock, flags);
785 __drain_alien_cache(cachep, ac, i, &list);
786 spin_unlock_irqrestore(&alc->lock, flags);
787 slabs_destroy(cachep, &list);
788 }
789 }
790}
791
792static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
793 int node, int page_node)
794{
795 struct kmem_cache_node *n;
796 struct alien_cache *alien = NULL;
797 struct array_cache *ac;
798 LIST_HEAD(list);
799
800 n = get_node(cachep, node);
801 STATS_INC_NODEFREES(cachep);
802 if (n->alien && n->alien[page_node]) {
803 alien = n->alien[page_node];
804 ac = &alien->ac;
805 spin_lock(&alien->lock);
806 if (unlikely(ac->avail == ac->limit)) {
807 STATS_INC_ACOVERFLOW(cachep);
808 __drain_alien_cache(cachep, ac, page_node, &list);
809 }
810 ac->entry[ac->avail++] = objp;
811 spin_unlock(&alien->lock);
812 slabs_destroy(cachep, &list);
813 } else {
814 n = get_node(cachep, page_node);
815 spin_lock(&n->list_lock);
816 free_block(cachep, &objp, 1, page_node, &list);
817 spin_unlock(&n->list_lock);
818 slabs_destroy(cachep, &list);
819 }
820 return 1;
821}
822
823static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
824{
825 int page_node = page_to_nid(virt_to_page(objp));
826 int node = numa_mem_id();
827
828
829
830
831 if (likely(node == page_node))
832 return 0;
833
834 return __cache_free_alien(cachep, objp, node, page_node);
835}
836
837
838
839
840
841static inline gfp_t gfp_exact_node(gfp_t flags)
842{
843 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
844}
845#endif
846
847static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
848{
849 struct kmem_cache_node *n;
850
851
852
853
854
855
856 n = get_node(cachep, node);
857 if (n) {
858 spin_lock_irq(&n->list_lock);
859 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
860 cachep->num;
861 spin_unlock_irq(&n->list_lock);
862
863 return 0;
864 }
865
866 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
867 if (!n)
868 return -ENOMEM;
869
870 kmem_cache_node_init(n);
871 n->next_reap = jiffies + REAPTIMEOUT_NODE +
872 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
873
874 n->free_limit =
875 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
876
877
878
879
880
881
882 cachep->node[node] = n;
883
884 return 0;
885}
886
887#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
888
889
890
891
892
893
894
895
896
897static int init_cache_node_node(int node)
898{
899 int ret;
900 struct kmem_cache *cachep;
901
902 list_for_each_entry(cachep, &slab_caches, list) {
903 ret = init_cache_node(cachep, node, GFP_KERNEL);
904 if (ret)
905 return ret;
906 }
907
908 return 0;
909}
910#endif
911
912static int setup_kmem_cache_node(struct kmem_cache *cachep,
913 int node, gfp_t gfp, bool force_change)
914{
915 int ret = -ENOMEM;
916 struct kmem_cache_node *n;
917 struct array_cache *old_shared = NULL;
918 struct array_cache *new_shared = NULL;
919 struct alien_cache **new_alien = NULL;
920 LIST_HEAD(list);
921
922 if (use_alien_caches) {
923 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
924 if (!new_alien)
925 goto fail;
926 }
927
928 if (cachep->shared) {
929 new_shared = alloc_arraycache(node,
930 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
931 if (!new_shared)
932 goto fail;
933 }
934
935 ret = init_cache_node(cachep, node, gfp);
936 if (ret)
937 goto fail;
938
939 n = get_node(cachep, node);
940 spin_lock_irq(&n->list_lock);
941 if (n->shared && force_change) {
942 free_block(cachep, n->shared->entry,
943 n->shared->avail, node, &list);
944 n->shared->avail = 0;
945 }
946
947 if (!n->shared || force_change) {
948 old_shared = n->shared;
949 n->shared = new_shared;
950 new_shared = NULL;
951 }
952
953 if (!n->alien) {
954 n->alien = new_alien;
955 new_alien = NULL;
956 }
957
958 spin_unlock_irq(&n->list_lock);
959 slabs_destroy(cachep, &list);
960
961
962
963
964
965
966
967 if (old_shared && force_change)
968 synchronize_sched();
969
970fail:
971 kfree(old_shared);
972 kfree(new_shared);
973 free_alien_cache(new_alien);
974
975 return ret;
976}
977
978#ifdef CONFIG_SMP
979
980static void cpuup_canceled(long cpu)
981{
982 struct kmem_cache *cachep;
983 struct kmem_cache_node *n = NULL;
984 int node = cpu_to_mem(cpu);
985 const struct cpumask *mask = cpumask_of_node(node);
986
987 list_for_each_entry(cachep, &slab_caches, list) {
988 struct array_cache *nc;
989 struct array_cache *shared;
990 struct alien_cache **alien;
991 LIST_HEAD(list);
992
993 n = get_node(cachep, node);
994 if (!n)
995 continue;
996
997 spin_lock_irq(&n->list_lock);
998
999
1000 n->free_limit -= cachep->batchcount;
1001
1002
1003 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1004 if (nc) {
1005 free_block(cachep, nc->entry, nc->avail, node, &list);
1006 nc->avail = 0;
1007 }
1008
1009 if (!cpumask_empty(mask)) {
1010 spin_unlock_irq(&n->list_lock);
1011 goto free_slab;
1012 }
1013
1014 shared = n->shared;
1015 if (shared) {
1016 free_block(cachep, shared->entry,
1017 shared->avail, node, &list);
1018 n->shared = NULL;
1019 }
1020
1021 alien = n->alien;
1022 n->alien = NULL;
1023
1024 spin_unlock_irq(&n->list_lock);
1025
1026 kfree(shared);
1027 if (alien) {
1028 drain_alien_cache(cachep, alien);
1029 free_alien_cache(alien);
1030 }
1031
1032free_slab:
1033 slabs_destroy(cachep, &list);
1034 }
1035
1036
1037
1038
1039
1040 list_for_each_entry(cachep, &slab_caches, list) {
1041 n = get_node(cachep, node);
1042 if (!n)
1043 continue;
1044 drain_freelist(cachep, n, INT_MAX);
1045 }
1046}
1047
1048static int cpuup_prepare(long cpu)
1049{
1050 struct kmem_cache *cachep;
1051 int node = cpu_to_mem(cpu);
1052 int err;
1053
1054
1055
1056
1057
1058
1059
1060 err = init_cache_node_node(node);
1061 if (err < 0)
1062 goto bad;
1063
1064
1065
1066
1067
1068 list_for_each_entry(cachep, &slab_caches, list) {
1069 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1070 if (err)
1071 goto bad;
1072 }
1073
1074 return 0;
1075bad:
1076 cpuup_canceled(cpu);
1077 return -ENOMEM;
1078}
1079
1080int slab_prepare_cpu(unsigned int cpu)
1081{
1082 int err;
1083
1084 mutex_lock(&slab_mutex);
1085 err = cpuup_prepare(cpu);
1086 mutex_unlock(&slab_mutex);
1087 return err;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100int slab_dead_cpu(unsigned int cpu)
1101{
1102 mutex_lock(&slab_mutex);
1103 cpuup_canceled(cpu);
1104 mutex_unlock(&slab_mutex);
1105 return 0;
1106}
1107#endif
1108
1109static int slab_online_cpu(unsigned int cpu)
1110{
1111 start_cpu_timer(cpu);
1112 return 0;
1113}
1114
1115static int slab_offline_cpu(unsigned int cpu)
1116{
1117
1118
1119
1120
1121
1122
1123 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1124
1125 per_cpu(slab_reap_work, cpu).work.func = NULL;
1126 return 0;
1127}
1128
1129#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1130
1131
1132
1133
1134
1135
1136
1137static int __meminit drain_cache_node_node(int node)
1138{
1139 struct kmem_cache *cachep;
1140 int ret = 0;
1141
1142 list_for_each_entry(cachep, &slab_caches, list) {
1143 struct kmem_cache_node *n;
1144
1145 n = get_node(cachep, node);
1146 if (!n)
1147 continue;
1148
1149 drain_freelist(cachep, n, INT_MAX);
1150
1151 if (!list_empty(&n->slabs_full) ||
1152 !list_empty(&n->slabs_partial)) {
1153 ret = -EBUSY;
1154 break;
1155 }
1156 }
1157 return ret;
1158}
1159
1160static int __meminit slab_memory_callback(struct notifier_block *self,
1161 unsigned long action, void *arg)
1162{
1163 struct memory_notify *mnb = arg;
1164 int ret = 0;
1165 int nid;
1166
1167 nid = mnb->status_change_nid;
1168 if (nid < 0)
1169 goto out;
1170
1171 switch (action) {
1172 case MEM_GOING_ONLINE:
1173 mutex_lock(&slab_mutex);
1174 ret = init_cache_node_node(nid);
1175 mutex_unlock(&slab_mutex);
1176 break;
1177 case MEM_GOING_OFFLINE:
1178 mutex_lock(&slab_mutex);
1179 ret = drain_cache_node_node(nid);
1180 mutex_unlock(&slab_mutex);
1181 break;
1182 case MEM_ONLINE:
1183 case MEM_OFFLINE:
1184 case MEM_CANCEL_ONLINE:
1185 case MEM_CANCEL_OFFLINE:
1186 break;
1187 }
1188out:
1189 return notifier_from_errno(ret);
1190}
1191#endif
1192
1193
1194
1195
1196static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1197 int nodeid)
1198{
1199 struct kmem_cache_node *ptr;
1200
1201 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1202 BUG_ON(!ptr);
1203
1204 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1205
1206
1207
1208 spin_lock_init(&ptr->list_lock);
1209
1210 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1211 cachep->node[nodeid] = ptr;
1212}
1213
1214
1215
1216
1217
1218static void __init set_up_node(struct kmem_cache *cachep, int index)
1219{
1220 int node;
1221
1222 for_each_online_node(node) {
1223 cachep->node[node] = &init_kmem_cache_node[index + node];
1224 cachep->node[node]->next_reap = jiffies +
1225 REAPTIMEOUT_NODE +
1226 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1227 }
1228}
1229
1230
1231
1232
1233
1234void __init kmem_cache_init(void)
1235{
1236 int i;
1237
1238 kmem_cache = &kmem_cache_boot;
1239
1240 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1241 use_alien_caches = 0;
1242
1243 for (i = 0; i < NUM_INIT_LISTS; i++)
1244 kmem_cache_node_init(&init_kmem_cache_node[i]);
1245
1246
1247
1248
1249
1250
1251 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1252 slab_max_order = SLAB_MAX_ORDER_HI;
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 create_boot_cache(kmem_cache, "kmem_cache",
1280 offsetof(struct kmem_cache, node) +
1281 nr_node_ids * sizeof(struct kmem_cache_node *),
1282 SLAB_HWCACHE_ALIGN, 0, 0);
1283 list_add(&kmem_cache->list, &slab_caches);
1284 memcg_link_cache(kmem_cache);
1285 slab_state = PARTIAL;
1286
1287
1288
1289
1290
1291 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache(
1292 kmalloc_info[INDEX_NODE].name,
1293 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS,
1294 0, kmalloc_size(INDEX_NODE));
1295 slab_state = PARTIAL_NODE;
1296 setup_kmalloc_cache_index_table();
1297
1298 slab_early_init = 0;
1299
1300
1301 {
1302 int nid;
1303
1304 for_each_online_node(nid) {
1305 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1306
1307 init_list(kmalloc_caches[INDEX_NODE],
1308 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1309 }
1310 }
1311
1312 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1313}
1314
1315void __init kmem_cache_init_late(void)
1316{
1317 struct kmem_cache *cachep;
1318
1319
1320 mutex_lock(&slab_mutex);
1321 list_for_each_entry(cachep, &slab_caches, list)
1322 if (enable_cpucache(cachep, GFP_NOWAIT))
1323 BUG();
1324 mutex_unlock(&slab_mutex);
1325
1326
1327 slab_state = FULL;
1328
1329#ifdef CONFIG_NUMA
1330
1331
1332
1333
1334 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1335#endif
1336
1337
1338
1339
1340
1341}
1342
1343static int __init cpucache_init(void)
1344{
1345 int ret;
1346
1347
1348
1349
1350 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1351 slab_online_cpu, slab_offline_cpu);
1352 WARN_ON(ret < 0);
1353
1354 return 0;
1355}
1356__initcall(cpucache_init);
1357
1358static noinline void
1359slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1360{
1361#if DEBUG
1362 struct kmem_cache_node *n;
1363 unsigned long flags;
1364 int node;
1365 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1366 DEFAULT_RATELIMIT_BURST);
1367
1368 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1369 return;
1370
1371 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1372 nodeid, gfpflags, &gfpflags);
1373 pr_warn(" cache: %s, object size: %d, order: %d\n",
1374 cachep->name, cachep->size, cachep->gfporder);
1375
1376 for_each_kmem_cache_node(cachep, node, n) {
1377 unsigned long total_slabs, free_slabs, free_objs;
1378
1379 spin_lock_irqsave(&n->list_lock, flags);
1380 total_slabs = n->total_slabs;
1381 free_slabs = n->free_slabs;
1382 free_objs = n->free_objects;
1383 spin_unlock_irqrestore(&n->list_lock, flags);
1384
1385 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1386 node, total_slabs - free_slabs, total_slabs,
1387 (total_slabs * cachep->num) - free_objs,
1388 total_slabs * cachep->num);
1389 }
1390#endif
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1402 int nodeid)
1403{
1404 struct page *page;
1405 int nr_pages;
1406
1407 flags |= cachep->allocflags;
1408
1409 page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
1410 if (!page) {
1411 slab_out_of_memory(cachep, flags, nodeid);
1412 return NULL;
1413 }
1414
1415 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1416 __free_pages(page, cachep->gfporder);
1417 return NULL;
1418 }
1419
1420 nr_pages = (1 << cachep->gfporder);
1421 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1422 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
1423 else
1424 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
1425
1426 __SetPageSlab(page);
1427
1428 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1429 SetPageSlabPfmemalloc(page);
1430
1431 return page;
1432}
1433
1434
1435
1436
1437static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1438{
1439 int order = cachep->gfporder;
1440 unsigned long nr_freed = (1 << order);
1441
1442 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1443 mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
1444 else
1445 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
1446
1447 BUG_ON(!PageSlab(page));
1448 __ClearPageSlabPfmemalloc(page);
1449 __ClearPageSlab(page);
1450 page_mapcount_reset(page);
1451 page->mapping = NULL;
1452
1453 if (current->reclaim_state)
1454 current->reclaim_state->reclaimed_slab += nr_freed;
1455 memcg_uncharge_slab(page, order, cachep);
1456 __free_pages(page, order);
1457}
1458
1459static void kmem_rcu_free(struct rcu_head *head)
1460{
1461 struct kmem_cache *cachep;
1462 struct page *page;
1463
1464 page = container_of(head, struct page, rcu_head);
1465 cachep = page->slab_cache;
1466
1467 kmem_freepages(cachep, page);
1468}
1469
1470#if DEBUG
1471static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1472{
1473 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1474 (cachep->size % PAGE_SIZE) == 0)
1475 return true;
1476
1477 return false;
1478}
1479
1480#ifdef CONFIG_DEBUG_PAGEALLOC
1481static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1482 unsigned long caller)
1483{
1484 int size = cachep->object_size;
1485
1486 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1487
1488 if (size < 5 * sizeof(unsigned long))
1489 return;
1490
1491 *addr++ = 0x12345678;
1492 *addr++ = caller;
1493 *addr++ = smp_processor_id();
1494 size -= 3 * sizeof(unsigned long);
1495 {
1496 unsigned long *sptr = &caller;
1497 unsigned long svalue;
1498
1499 while (!kstack_end(sptr)) {
1500 svalue = *sptr++;
1501 if (kernel_text_address(svalue)) {
1502 *addr++ = svalue;
1503 size -= sizeof(unsigned long);
1504 if (size <= sizeof(unsigned long))
1505 break;
1506 }
1507 }
1508
1509 }
1510 *addr++ = 0x87654321;
1511}
1512
1513static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1514 int map, unsigned long caller)
1515{
1516 if (!is_debug_pagealloc_cache(cachep))
1517 return;
1518
1519 if (caller)
1520 store_stackinfo(cachep, objp, caller);
1521
1522 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1523}
1524
1525#else
1526static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1527 int map, unsigned long caller) {}
1528
1529#endif
1530
1531static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1532{
1533 int size = cachep->object_size;
1534 addr = &((char *)addr)[obj_offset(cachep)];
1535
1536 memset(addr, val, size);
1537 *(unsigned char *)(addr + size - 1) = POISON_END;
1538}
1539
1540static void dump_line(char *data, int offset, int limit)
1541{
1542 int i;
1543 unsigned char error = 0;
1544 int bad_count = 0;
1545
1546 pr_err("%03x: ", offset);
1547 for (i = 0; i < limit; i++) {
1548 if (data[offset + i] != POISON_FREE) {
1549 error = data[offset + i];
1550 bad_count++;
1551 }
1552 }
1553 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1554 &data[offset], limit, 1);
1555
1556 if (bad_count == 1) {
1557 error ^= POISON_FREE;
1558 if (!(error & (error - 1))) {
1559 pr_err("Single bit error detected. Probably bad RAM.\n");
1560#ifdef CONFIG_X86
1561 pr_err("Run memtest86+ or a similar memory test tool.\n");
1562#else
1563 pr_err("Run a memory test tool.\n");
1564#endif
1565 }
1566 }
1567}
1568#endif
1569
1570#if DEBUG
1571
1572static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1573{
1574 int i, size;
1575 char *realobj;
1576
1577 if (cachep->flags & SLAB_RED_ZONE) {
1578 pr_err("Redzone: 0x%llx/0x%llx\n",
1579 *dbg_redzone1(cachep, objp),
1580 *dbg_redzone2(cachep, objp));
1581 }
1582
1583 if (cachep->flags & SLAB_STORE_USER)
1584 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1585 realobj = (char *)objp + obj_offset(cachep);
1586 size = cachep->object_size;
1587 for (i = 0; i < size && lines; i += 16, lines--) {
1588 int limit;
1589 limit = 16;
1590 if (i + limit > size)
1591 limit = size - i;
1592 dump_line(realobj, i, limit);
1593 }
1594}
1595
1596static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1597{
1598 char *realobj;
1599 int size, i;
1600 int lines = 0;
1601
1602 if (is_debug_pagealloc_cache(cachep))
1603 return;
1604
1605 realobj = (char *)objp + obj_offset(cachep);
1606 size = cachep->object_size;
1607
1608 for (i = 0; i < size; i++) {
1609 char exp = POISON_FREE;
1610 if (i == size - 1)
1611 exp = POISON_END;
1612 if (realobj[i] != exp) {
1613 int limit;
1614
1615
1616 if (lines == 0) {
1617 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1618 print_tainted(), cachep->name,
1619 realobj, size);
1620 print_objinfo(cachep, objp, 0);
1621 }
1622
1623 i = (i / 16) * 16;
1624 limit = 16;
1625 if (i + limit > size)
1626 limit = size - i;
1627 dump_line(realobj, i, limit);
1628 i += 16;
1629 lines++;
1630
1631 if (lines > 5)
1632 break;
1633 }
1634 }
1635 if (lines != 0) {
1636
1637
1638
1639 struct page *page = virt_to_head_page(objp);
1640 unsigned int objnr;
1641
1642 objnr = obj_to_index(cachep, page, objp);
1643 if (objnr) {
1644 objp = index_to_obj(cachep, page, objnr - 1);
1645 realobj = (char *)objp + obj_offset(cachep);
1646 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1647 print_objinfo(cachep, objp, 2);
1648 }
1649 if (objnr + 1 < cachep->num) {
1650 objp = index_to_obj(cachep, page, objnr + 1);
1651 realobj = (char *)objp + obj_offset(cachep);
1652 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1653 print_objinfo(cachep, objp, 2);
1654 }
1655 }
1656}
1657#endif
1658
1659#if DEBUG
1660static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1661 struct page *page)
1662{
1663 int i;
1664
1665 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1666 poison_obj(cachep, page->freelist - obj_offset(cachep),
1667 POISON_FREE);
1668 }
1669
1670 for (i = 0; i < cachep->num; i++) {
1671 void *objp = index_to_obj(cachep, page, i);
1672
1673 if (cachep->flags & SLAB_POISON) {
1674 check_poison_obj(cachep, objp);
1675 slab_kernel_map(cachep, objp, 1, 0);
1676 }
1677 if (cachep->flags & SLAB_RED_ZONE) {
1678 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1679 slab_error(cachep, "start of a freed object was overwritten");
1680 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1681 slab_error(cachep, "end of a freed object was overwritten");
1682 }
1683 }
1684}
1685#else
1686static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1687 struct page *page)
1688{
1689}
1690#endif
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1702{
1703 void *freelist;
1704
1705 freelist = page->freelist;
1706 slab_destroy_debugcheck(cachep, page);
1707 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
1708 call_rcu(&page->rcu_head, kmem_rcu_free);
1709 else
1710 kmem_freepages(cachep, page);
1711
1712
1713
1714
1715
1716 if (OFF_SLAB(cachep))
1717 kmem_cache_free(cachep->freelist_cache, freelist);
1718}
1719
1720static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1721{
1722 struct page *page, *n;
1723
1724 list_for_each_entry_safe(page, n, list, lru) {
1725 list_del(&page->lru);
1726 slab_destroy(cachep, page);
1727 }
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static size_t calculate_slab_order(struct kmem_cache *cachep,
1743 size_t size, slab_flags_t flags)
1744{
1745 size_t left_over = 0;
1746 int gfporder;
1747
1748 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1749 unsigned int num;
1750 size_t remainder;
1751
1752 num = cache_estimate(gfporder, size, flags, &remainder);
1753 if (!num)
1754 continue;
1755
1756
1757 if (num > SLAB_OBJ_MAX_NUM)
1758 break;
1759
1760 if (flags & CFLGS_OFF_SLAB) {
1761 struct kmem_cache *freelist_cache;
1762 size_t freelist_size;
1763
1764 freelist_size = num * sizeof(freelist_idx_t);
1765 freelist_cache = kmalloc_slab(freelist_size, 0u);
1766 if (!freelist_cache)
1767 continue;
1768
1769
1770
1771
1772
1773 if (OFF_SLAB(freelist_cache))
1774 continue;
1775
1776
1777 if (freelist_cache->size > cachep->size / 2)
1778 continue;
1779 }
1780
1781
1782 cachep->num = num;
1783 cachep->gfporder = gfporder;
1784 left_over = remainder;
1785
1786
1787
1788
1789
1790
1791 if (flags & SLAB_RECLAIM_ACCOUNT)
1792 break;
1793
1794
1795
1796
1797
1798 if (gfporder >= slab_max_order)
1799 break;
1800
1801
1802
1803
1804 if (left_over * 8 <= (PAGE_SIZE << gfporder))
1805 break;
1806 }
1807 return left_over;
1808}
1809
1810static struct array_cache __percpu *alloc_kmem_cache_cpus(
1811 struct kmem_cache *cachep, int entries, int batchcount)
1812{
1813 int cpu;
1814 size_t size;
1815 struct array_cache __percpu *cpu_cache;
1816
1817 size = sizeof(void *) * entries + sizeof(struct array_cache);
1818 cpu_cache = __alloc_percpu(size, sizeof(void *));
1819
1820 if (!cpu_cache)
1821 return NULL;
1822
1823 for_each_possible_cpu(cpu) {
1824 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1825 entries, batchcount);
1826 }
1827
1828 return cpu_cache;
1829}
1830
1831static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1832{
1833 if (slab_state >= FULL)
1834 return enable_cpucache(cachep, gfp);
1835
1836 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1837 if (!cachep->cpu_cache)
1838 return 1;
1839
1840 if (slab_state == DOWN) {
1841
1842 set_up_node(kmem_cache, CACHE_CACHE);
1843 } else if (slab_state == PARTIAL) {
1844
1845 set_up_node(cachep, SIZE_NODE);
1846 } else {
1847 int node;
1848
1849 for_each_online_node(node) {
1850 cachep->node[node] = kmalloc_node(
1851 sizeof(struct kmem_cache_node), gfp, node);
1852 BUG_ON(!cachep->node[node]);
1853 kmem_cache_node_init(cachep->node[node]);
1854 }
1855 }
1856
1857 cachep->node[numa_mem_id()]->next_reap =
1858 jiffies + REAPTIMEOUT_NODE +
1859 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1860
1861 cpu_cache_get(cachep)->avail = 0;
1862 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1863 cpu_cache_get(cachep)->batchcount = 1;
1864 cpu_cache_get(cachep)->touched = 0;
1865 cachep->batchcount = 1;
1866 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1867 return 0;
1868}
1869
1870slab_flags_t kmem_cache_flags(unsigned int object_size,
1871 slab_flags_t flags, const char *name,
1872 void (*ctor)(void *))
1873{
1874 return flags;
1875}
1876
1877struct kmem_cache *
1878__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
1879 slab_flags_t flags, void (*ctor)(void *))
1880{
1881 struct kmem_cache *cachep;
1882
1883 cachep = find_mergeable(size, align, flags, name, ctor);
1884 if (cachep) {
1885 cachep->refcount++;
1886
1887
1888
1889
1890
1891 cachep->object_size = max_t(int, cachep->object_size, size);
1892 }
1893 return cachep;
1894}
1895
1896static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1897 size_t size, slab_flags_t flags)
1898{
1899 size_t left;
1900
1901 cachep->num = 0;
1902
1903 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU)
1904 return false;
1905
1906 left = calculate_slab_order(cachep, size,
1907 flags | CFLGS_OBJFREELIST_SLAB);
1908 if (!cachep->num)
1909 return false;
1910
1911 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1912 return false;
1913
1914 cachep->colour = left / cachep->colour_off;
1915
1916 return true;
1917}
1918
1919static bool set_off_slab_cache(struct kmem_cache *cachep,
1920 size_t size, slab_flags_t flags)
1921{
1922 size_t left;
1923
1924 cachep->num = 0;
1925
1926
1927
1928
1929
1930 if (flags & SLAB_NOLEAKTRACE)
1931 return false;
1932
1933
1934
1935
1936
1937 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1938 if (!cachep->num)
1939 return false;
1940
1941
1942
1943
1944
1945 if (left >= cachep->num * sizeof(freelist_idx_t))
1946 return false;
1947
1948 cachep->colour = left / cachep->colour_off;
1949
1950 return true;
1951}
1952
1953static bool set_on_slab_cache(struct kmem_cache *cachep,
1954 size_t size, slab_flags_t flags)
1955{
1956 size_t left;
1957
1958 cachep->num = 0;
1959
1960 left = calculate_slab_order(cachep, size, flags);
1961 if (!cachep->num)
1962 return false;
1963
1964 cachep->colour = left / cachep->colour_off;
1965
1966 return true;
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
1991{
1992 size_t ralign = BYTES_PER_WORD;
1993 gfp_t gfp;
1994 int err;
1995 unsigned int size = cachep->size;
1996
1997#if DEBUG
1998#if FORCED_DEBUG
1999
2000
2001
2002
2003
2004
2005 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2006 2 * sizeof(unsigned long long)))
2007 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2008 if (!(flags & SLAB_TYPESAFE_BY_RCU))
2009 flags |= SLAB_POISON;
2010#endif
2011#endif
2012
2013
2014
2015
2016
2017
2018 size = ALIGN(size, BYTES_PER_WORD);
2019
2020 if (flags & SLAB_RED_ZONE) {
2021 ralign = REDZONE_ALIGN;
2022
2023
2024 size = ALIGN(size, REDZONE_ALIGN);
2025 }
2026
2027
2028 if (ralign < cachep->align) {
2029 ralign = cachep->align;
2030 }
2031
2032 if (ralign > __alignof__(unsigned long long))
2033 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2034
2035
2036
2037 cachep->align = ralign;
2038 cachep->colour_off = cache_line_size();
2039
2040 if (cachep->colour_off < cachep->align)
2041 cachep->colour_off = cachep->align;
2042
2043 if (slab_is_available())
2044 gfp = GFP_KERNEL;
2045 else
2046 gfp = GFP_NOWAIT;
2047
2048#if DEBUG
2049
2050
2051
2052
2053
2054 if (flags & SLAB_RED_ZONE) {
2055
2056 cachep->obj_offset += sizeof(unsigned long long);
2057 size += 2 * sizeof(unsigned long long);
2058 }
2059 if (flags & SLAB_STORE_USER) {
2060
2061
2062
2063
2064 if (flags & SLAB_RED_ZONE)
2065 size += REDZONE_ALIGN;
2066 else
2067 size += BYTES_PER_WORD;
2068 }
2069#endif
2070
2071 kasan_cache_create(cachep, &size, &flags);
2072
2073 size = ALIGN(size, cachep->align);
2074
2075
2076
2077
2078 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2079 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2080
2081#if DEBUG
2082
2083
2084
2085
2086
2087
2088
2089 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2090 size >= 256 && cachep->object_size > cache_line_size()) {
2091 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2092 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2093
2094 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2095 flags |= CFLGS_OFF_SLAB;
2096 cachep->obj_offset += tmp_size - size;
2097 size = tmp_size;
2098 goto done;
2099 }
2100 }
2101 }
2102#endif
2103
2104 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2105 flags |= CFLGS_OBJFREELIST_SLAB;
2106 goto done;
2107 }
2108
2109 if (set_off_slab_cache(cachep, size, flags)) {
2110 flags |= CFLGS_OFF_SLAB;
2111 goto done;
2112 }
2113
2114 if (set_on_slab_cache(cachep, size, flags))
2115 goto done;
2116
2117 return -E2BIG;
2118
2119done:
2120 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2121 cachep->flags = flags;
2122 cachep->allocflags = __GFP_COMP;
2123 if (flags & SLAB_CACHE_DMA)
2124 cachep->allocflags |= GFP_DMA;
2125 if (flags & SLAB_RECLAIM_ACCOUNT)
2126 cachep->allocflags |= __GFP_RECLAIMABLE;
2127 cachep->size = size;
2128 cachep->reciprocal_buffer_size = reciprocal_value(size);
2129
2130#if DEBUG
2131
2132
2133
2134
2135
2136 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2137 (cachep->flags & SLAB_POISON) &&
2138 is_debug_pagealloc_cache(cachep))
2139 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2140#endif
2141
2142 if (OFF_SLAB(cachep)) {
2143 cachep->freelist_cache =
2144 kmalloc_slab(cachep->freelist_size, 0u);
2145 }
2146
2147 err = setup_cpu_cache(cachep, gfp);
2148 if (err) {
2149 __kmem_cache_release(cachep);
2150 return err;
2151 }
2152
2153 return 0;
2154}
2155
2156#if DEBUG
2157static void check_irq_off(void)
2158{
2159 BUG_ON(!irqs_disabled());
2160}
2161
2162static void check_irq_on(void)
2163{
2164 BUG_ON(irqs_disabled());
2165}
2166
2167static void check_mutex_acquired(void)
2168{
2169 BUG_ON(!mutex_is_locked(&slab_mutex));
2170}
2171
2172static void check_spinlock_acquired(struct kmem_cache *cachep)
2173{
2174#ifdef CONFIG_SMP
2175 check_irq_off();
2176 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2177#endif
2178}
2179
2180static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2181{
2182#ifdef CONFIG_SMP
2183 check_irq_off();
2184 assert_spin_locked(&get_node(cachep, node)->list_lock);
2185#endif
2186}
2187
2188#else
2189#define check_irq_off() do { } while(0)
2190#define check_irq_on() do { } while(0)
2191#define check_mutex_acquired() do { } while(0)
2192#define check_spinlock_acquired(x) do { } while(0)
2193#define check_spinlock_acquired_node(x, y) do { } while(0)
2194#endif
2195
2196static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2197 int node, bool free_all, struct list_head *list)
2198{
2199 int tofree;
2200
2201 if (!ac || !ac->avail)
2202 return;
2203
2204 tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2205 if (tofree > ac->avail)
2206 tofree = (ac->avail + 1) / 2;
2207
2208 free_block(cachep, ac->entry, tofree, node, list);
2209 ac->avail -= tofree;
2210 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2211}
2212
2213static void do_drain(void *arg)
2214{
2215 struct kmem_cache *cachep = arg;
2216 struct array_cache *ac;
2217 int node = numa_mem_id();
2218 struct kmem_cache_node *n;
2219 LIST_HEAD(list);
2220
2221 check_irq_off();
2222 ac = cpu_cache_get(cachep);
2223 n = get_node(cachep, node);
2224 spin_lock(&n->list_lock);
2225 free_block(cachep, ac->entry, ac->avail, node, &list);
2226 spin_unlock(&n->list_lock);
2227 slabs_destroy(cachep, &list);
2228 ac->avail = 0;
2229}
2230
2231static void drain_cpu_caches(struct kmem_cache *cachep)
2232{
2233 struct kmem_cache_node *n;
2234 int node;
2235 LIST_HEAD(list);
2236
2237 on_each_cpu(do_drain, cachep, 1);
2238 check_irq_on();
2239 for_each_kmem_cache_node(cachep, node, n)
2240 if (n->alien)
2241 drain_alien_cache(cachep, n->alien);
2242
2243 for_each_kmem_cache_node(cachep, node, n) {
2244 spin_lock_irq(&n->list_lock);
2245 drain_array_locked(cachep, n->shared, node, true, &list);
2246 spin_unlock_irq(&n->list_lock);
2247
2248 slabs_destroy(cachep, &list);
2249 }
2250}
2251
2252
2253
2254
2255
2256
2257
2258static int drain_freelist(struct kmem_cache *cache,
2259 struct kmem_cache_node *n, int tofree)
2260{
2261 struct list_head *p;
2262 int nr_freed;
2263 struct page *page;
2264
2265 nr_freed = 0;
2266 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2267
2268 spin_lock_irq(&n->list_lock);
2269 p = n->slabs_free.prev;
2270 if (p == &n->slabs_free) {
2271 spin_unlock_irq(&n->list_lock);
2272 goto out;
2273 }
2274
2275 page = list_entry(p, struct page, lru);
2276 list_del(&page->lru);
2277 n->free_slabs--;
2278 n->total_slabs--;
2279
2280
2281
2282
2283 n->free_objects -= cache->num;
2284 spin_unlock_irq(&n->list_lock);
2285 slab_destroy(cache, page);
2286 nr_freed++;
2287 }
2288out:
2289 return nr_freed;
2290}
2291
2292bool __kmem_cache_empty(struct kmem_cache *s)
2293{
2294 int node;
2295 struct kmem_cache_node *n;
2296
2297 for_each_kmem_cache_node(s, node, n)
2298 if (!list_empty(&n->slabs_full) ||
2299 !list_empty(&n->slabs_partial))
2300 return false;
2301 return true;
2302}
2303
2304int __kmem_cache_shrink(struct kmem_cache *cachep)
2305{
2306 int ret = 0;
2307 int node;
2308 struct kmem_cache_node *n;
2309
2310 drain_cpu_caches(cachep);
2311
2312 check_irq_on();
2313 for_each_kmem_cache_node(cachep, node, n) {
2314 drain_freelist(cachep, n, INT_MAX);
2315
2316 ret += !list_empty(&n->slabs_full) ||
2317 !list_empty(&n->slabs_partial);
2318 }
2319 return (ret ? 1 : 0);
2320}
2321
2322#ifdef CONFIG_MEMCG
2323void __kmemcg_cache_deactivate(struct kmem_cache *cachep)
2324{
2325 __kmem_cache_shrink(cachep);
2326}
2327#endif
2328
2329int __kmem_cache_shutdown(struct kmem_cache *cachep)
2330{
2331 return __kmem_cache_shrink(cachep);
2332}
2333
2334void __kmem_cache_release(struct kmem_cache *cachep)
2335{
2336 int i;
2337 struct kmem_cache_node *n;
2338
2339 cache_random_seq_destroy(cachep);
2340
2341 free_percpu(cachep->cpu_cache);
2342
2343
2344 for_each_kmem_cache_node(cachep, i, n) {
2345 kfree(n->shared);
2346 free_alien_cache(n->alien);
2347 kfree(n);
2348 cachep->node[i] = NULL;
2349 }
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366static void *alloc_slabmgmt(struct kmem_cache *cachep,
2367 struct page *page, int colour_off,
2368 gfp_t local_flags, int nodeid)
2369{
2370 void *freelist;
2371 void *addr = page_address(page);
2372
2373 page->s_mem = addr + colour_off;
2374 page->active = 0;
2375
2376 if (OBJFREELIST_SLAB(cachep))
2377 freelist = NULL;
2378 else if (OFF_SLAB(cachep)) {
2379
2380 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2381 local_flags, nodeid);
2382 if (!freelist)
2383 return NULL;
2384 } else {
2385
2386 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2387 cachep->freelist_size;
2388 }
2389
2390 return freelist;
2391}
2392
2393static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2394{
2395 return ((freelist_idx_t *)page->freelist)[idx];
2396}
2397
2398static inline void set_free_obj(struct page *page,
2399 unsigned int idx, freelist_idx_t val)
2400{
2401 ((freelist_idx_t *)(page->freelist))[idx] = val;
2402}
2403
2404static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2405{
2406#if DEBUG
2407 int i;
2408
2409 for (i = 0; i < cachep->num; i++) {
2410 void *objp = index_to_obj(cachep, page, i);
2411
2412 if (cachep->flags & SLAB_STORE_USER)
2413 *dbg_userword(cachep, objp) = NULL;
2414
2415 if (cachep->flags & SLAB_RED_ZONE) {
2416 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2417 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2418 }
2419
2420
2421
2422
2423
2424 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2425 kasan_unpoison_object_data(cachep,
2426 objp + obj_offset(cachep));
2427 cachep->ctor(objp + obj_offset(cachep));
2428 kasan_poison_object_data(
2429 cachep, objp + obj_offset(cachep));
2430 }
2431
2432 if (cachep->flags & SLAB_RED_ZONE) {
2433 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2434 slab_error(cachep, "constructor overwrote the end of an object");
2435 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2436 slab_error(cachep, "constructor overwrote the start of an object");
2437 }
2438
2439 if (cachep->flags & SLAB_POISON) {
2440 poison_obj(cachep, objp, POISON_FREE);
2441 slab_kernel_map(cachep, objp, 0, 0);
2442 }
2443 }
2444#endif
2445}
2446
2447#ifdef CONFIG_SLAB_FREELIST_RANDOM
2448
2449union freelist_init_state {
2450 struct {
2451 unsigned int pos;
2452 unsigned int *list;
2453 unsigned int count;
2454 };
2455 struct rnd_state rnd_state;
2456};
2457
2458
2459
2460
2461
2462static bool freelist_state_initialize(union freelist_init_state *state,
2463 struct kmem_cache *cachep,
2464 unsigned int count)
2465{
2466 bool ret;
2467 unsigned int rand;
2468
2469
2470 rand = get_random_int();
2471
2472
2473 if (!cachep->random_seq) {
2474 prandom_seed_state(&state->rnd_state, rand);
2475 ret = false;
2476 } else {
2477 state->list = cachep->random_seq;
2478 state->count = count;
2479 state->pos = rand % count;
2480 ret = true;
2481 }
2482 return ret;
2483}
2484
2485
2486static freelist_idx_t next_random_slot(union freelist_init_state *state)
2487{
2488 if (state->pos >= state->count)
2489 state->pos = 0;
2490 return state->list[state->pos++];
2491}
2492
2493
2494static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2495{
2496 swap(((freelist_idx_t *)page->freelist)[a],
2497 ((freelist_idx_t *)page->freelist)[b]);
2498}
2499
2500
2501
2502
2503
2504static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2505{
2506 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2507 union freelist_init_state state;
2508 bool precomputed;
2509
2510 if (count < 2)
2511 return false;
2512
2513 precomputed = freelist_state_initialize(&state, cachep, count);
2514
2515
2516 if (OBJFREELIST_SLAB(cachep)) {
2517 if (!precomputed)
2518 objfreelist = count - 1;
2519 else
2520 objfreelist = next_random_slot(&state);
2521 page->freelist = index_to_obj(cachep, page, objfreelist) +
2522 obj_offset(cachep);
2523 count--;
2524 }
2525
2526
2527
2528
2529
2530 if (!precomputed) {
2531 for (i = 0; i < count; i++)
2532 set_free_obj(page, i, i);
2533
2534
2535 for (i = count - 1; i > 0; i--) {
2536 rand = prandom_u32_state(&state.rnd_state);
2537 rand %= (i + 1);
2538 swap_free_obj(page, i, rand);
2539 }
2540 } else {
2541 for (i = 0; i < count; i++)
2542 set_free_obj(page, i, next_random_slot(&state));
2543 }
2544
2545 if (OBJFREELIST_SLAB(cachep))
2546 set_free_obj(page, cachep->num - 1, objfreelist);
2547
2548 return true;
2549}
2550#else
2551static inline bool shuffle_freelist(struct kmem_cache *cachep,
2552 struct page *page)
2553{
2554 return false;
2555}
2556#endif
2557
2558static void cache_init_objs(struct kmem_cache *cachep,
2559 struct page *page)
2560{
2561 int i;
2562 void *objp;
2563 bool shuffled;
2564
2565 cache_init_objs_debug(cachep, page);
2566
2567
2568 shuffled = shuffle_freelist(cachep, page);
2569
2570 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2571 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2572 obj_offset(cachep);
2573 }
2574
2575 for (i = 0; i < cachep->num; i++) {
2576 objp = index_to_obj(cachep, page, i);
2577 kasan_init_slab_obj(cachep, objp);
2578
2579
2580 if (DEBUG == 0 && cachep->ctor) {
2581 kasan_unpoison_object_data(cachep, objp);
2582 cachep->ctor(objp);
2583 kasan_poison_object_data(cachep, objp);
2584 }
2585
2586 if (!shuffled)
2587 set_free_obj(page, i, i);
2588 }
2589}
2590
2591static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2592{
2593 void *objp;
2594
2595 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2596 page->active++;
2597
2598#if DEBUG
2599 if (cachep->flags & SLAB_STORE_USER)
2600 set_store_user_dirty(cachep);
2601#endif
2602
2603 return objp;
2604}
2605
2606static void slab_put_obj(struct kmem_cache *cachep,
2607 struct page *page, void *objp)
2608{
2609 unsigned int objnr = obj_to_index(cachep, page, objp);
2610#if DEBUG
2611 unsigned int i;
2612
2613
2614 for (i = page->active; i < cachep->num; i++) {
2615 if (get_free_obj(page, i) == objnr) {
2616 pr_err("slab: double free detected in cache '%s', objp %px\n",
2617 cachep->name, objp);
2618 BUG();
2619 }
2620 }
2621#endif
2622 page->active--;
2623 if (!page->freelist)
2624 page->freelist = objp + obj_offset(cachep);
2625
2626 set_free_obj(page, page->active, objnr);
2627}
2628
2629
2630
2631
2632
2633
2634static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2635 void *freelist)
2636{
2637 page->slab_cache = cache;
2638 page->freelist = freelist;
2639}
2640
2641
2642
2643
2644
2645static struct page *cache_grow_begin(struct kmem_cache *cachep,
2646 gfp_t flags, int nodeid)
2647{
2648 void *freelist;
2649 size_t offset;
2650 gfp_t local_flags;
2651 int page_node;
2652 struct kmem_cache_node *n;
2653 struct page *page;
2654
2655
2656
2657
2658
2659 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2660 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2661 flags &= ~GFP_SLAB_BUG_MASK;
2662 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2663 invalid_mask, &invalid_mask, flags, &flags);
2664 dump_stack();
2665 }
2666 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
2667 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2668
2669 check_irq_off();
2670 if (gfpflags_allow_blocking(local_flags))
2671 local_irq_enable();
2672
2673
2674
2675
2676
2677 page = kmem_getpages(cachep, local_flags, nodeid);
2678 if (!page)
2679 goto failed;
2680
2681 page_node = page_to_nid(page);
2682 n = get_node(cachep, page_node);
2683
2684
2685 n->colour_next++;
2686 if (n->colour_next >= cachep->colour)
2687 n->colour_next = 0;
2688
2689 offset = n->colour_next;
2690 if (offset >= cachep->colour)
2691 offset = 0;
2692
2693 offset *= cachep->colour_off;
2694
2695
2696 freelist = alloc_slabmgmt(cachep, page, offset,
2697 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2698 if (OFF_SLAB(cachep) && !freelist)
2699 goto opps1;
2700
2701 slab_map_pages(cachep, page, freelist);
2702
2703 kasan_poison_slab(page);
2704 cache_init_objs(cachep, page);
2705
2706 if (gfpflags_allow_blocking(local_flags))
2707 local_irq_disable();
2708
2709 return page;
2710
2711opps1:
2712 kmem_freepages(cachep, page);
2713failed:
2714 if (gfpflags_allow_blocking(local_flags))
2715 local_irq_disable();
2716 return NULL;
2717}
2718
2719static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2720{
2721 struct kmem_cache_node *n;
2722 void *list = NULL;
2723
2724 check_irq_off();
2725
2726 if (!page)
2727 return;
2728
2729 INIT_LIST_HEAD(&page->lru);
2730 n = get_node(cachep, page_to_nid(page));
2731
2732 spin_lock(&n->list_lock);
2733 n->total_slabs++;
2734 if (!page->active) {
2735 list_add_tail(&page->lru, &(n->slabs_free));
2736 n->free_slabs++;
2737 } else
2738 fixup_slab_list(cachep, n, page, &list);
2739
2740 STATS_INC_GROWN(cachep);
2741 n->free_objects += cachep->num - page->active;
2742 spin_unlock(&n->list_lock);
2743
2744 fixup_objfreelist_debug(cachep, &list);
2745}
2746
2747#if DEBUG
2748
2749
2750
2751
2752
2753
2754static void kfree_debugcheck(const void *objp)
2755{
2756 if (!virt_addr_valid(objp)) {
2757 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2758 (unsigned long)objp);
2759 BUG();
2760 }
2761}
2762
2763static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2764{
2765 unsigned long long redzone1, redzone2;
2766
2767 redzone1 = *dbg_redzone1(cache, obj);
2768 redzone2 = *dbg_redzone2(cache, obj);
2769
2770
2771
2772
2773 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2774 return;
2775
2776 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2777 slab_error(cache, "double free detected");
2778 else
2779 slab_error(cache, "memory outside object was overwritten");
2780
2781 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2782 obj, redzone1, redzone2);
2783}
2784
2785static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2786 unsigned long caller)
2787{
2788 unsigned int objnr;
2789 struct page *page;
2790
2791 BUG_ON(virt_to_cache(objp) != cachep);
2792
2793 objp -= obj_offset(cachep);
2794 kfree_debugcheck(objp);
2795 page = virt_to_head_page(objp);
2796
2797 if (cachep->flags & SLAB_RED_ZONE) {
2798 verify_redzone_free(cachep, objp);
2799 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2800 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2801 }
2802 if (cachep->flags & SLAB_STORE_USER) {
2803 set_store_user_dirty(cachep);
2804 *dbg_userword(cachep, objp) = (void *)caller;
2805 }
2806
2807 objnr = obj_to_index(cachep, page, objp);
2808
2809 BUG_ON(objnr >= cachep->num);
2810 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2811
2812 if (cachep->flags & SLAB_POISON) {
2813 poison_obj(cachep, objp, POISON_FREE);
2814 slab_kernel_map(cachep, objp, 0, caller);
2815 }
2816 return objp;
2817}
2818
2819#else
2820#define kfree_debugcheck(x) do { } while(0)
2821#define cache_free_debugcheck(x,objp,z) (objp)
2822#endif
2823
2824static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2825 void **list)
2826{
2827#if DEBUG
2828 void *next = *list;
2829 void *objp;
2830
2831 while (next) {
2832 objp = next - obj_offset(cachep);
2833 next = *(void **)next;
2834 poison_obj(cachep, objp, POISON_FREE);
2835 }
2836#endif
2837}
2838
2839static inline void fixup_slab_list(struct kmem_cache *cachep,
2840 struct kmem_cache_node *n, struct page *page,
2841 void **list)
2842{
2843
2844 list_del(&page->lru);
2845 if (page->active == cachep->num) {
2846 list_add(&page->lru, &n->slabs_full);
2847 if (OBJFREELIST_SLAB(cachep)) {
2848#if DEBUG
2849
2850 if (cachep->flags & SLAB_POISON) {
2851 void **objp = page->freelist;
2852
2853 *objp = *list;
2854 *list = objp;
2855 }
2856#endif
2857 page->freelist = NULL;
2858 }
2859 } else
2860 list_add(&page->lru, &n->slabs_partial);
2861}
2862
2863
2864static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2865 struct page *page, bool pfmemalloc)
2866{
2867 if (!page)
2868 return NULL;
2869
2870 if (pfmemalloc)
2871 return page;
2872
2873 if (!PageSlabPfmemalloc(page))
2874 return page;
2875
2876
2877 if (n->free_objects > n->free_limit) {
2878 ClearPageSlabPfmemalloc(page);
2879 return page;
2880 }
2881
2882
2883 list_del(&page->lru);
2884 if (!page->active) {
2885 list_add_tail(&page->lru, &n->slabs_free);
2886 n->free_slabs++;
2887 } else
2888 list_add_tail(&page->lru, &n->slabs_partial);
2889
2890 list_for_each_entry(page, &n->slabs_partial, lru) {
2891 if (!PageSlabPfmemalloc(page))
2892 return page;
2893 }
2894
2895 n->free_touched = 1;
2896 list_for_each_entry(page, &n->slabs_free, lru) {
2897 if (!PageSlabPfmemalloc(page)) {
2898 n->free_slabs--;
2899 return page;
2900 }
2901 }
2902
2903 return NULL;
2904}
2905
2906static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2907{
2908 struct page *page;
2909
2910 assert_spin_locked(&n->list_lock);
2911 page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2912 if (!page) {
2913 n->free_touched = 1;
2914 page = list_first_entry_or_null(&n->slabs_free, struct page,
2915 lru);
2916 if (page)
2917 n->free_slabs--;
2918 }
2919
2920 if (sk_memalloc_socks())
2921 page = get_valid_first_slab(n, page, pfmemalloc);
2922
2923 return page;
2924}
2925
2926static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2927 struct kmem_cache_node *n, gfp_t flags)
2928{
2929 struct page *page;
2930 void *obj;
2931 void *list = NULL;
2932
2933 if (!gfp_pfmemalloc_allowed(flags))
2934 return NULL;
2935
2936 spin_lock(&n->list_lock);
2937 page = get_first_slab(n, true);
2938 if (!page) {
2939 spin_unlock(&n->list_lock);
2940 return NULL;
2941 }
2942
2943 obj = slab_get_obj(cachep, page);
2944 n->free_objects--;
2945
2946 fixup_slab_list(cachep, n, page, &list);
2947
2948 spin_unlock(&n->list_lock);
2949 fixup_objfreelist_debug(cachep, &list);
2950
2951 return obj;
2952}
2953
2954
2955
2956
2957
2958static __always_inline int alloc_block(struct kmem_cache *cachep,
2959 struct array_cache *ac, struct page *page, int batchcount)
2960{
2961
2962
2963
2964
2965 BUG_ON(page->active >= cachep->num);
2966
2967 while (page->active < cachep->num && batchcount--) {
2968 STATS_INC_ALLOCED(cachep);
2969 STATS_INC_ACTIVE(cachep);
2970 STATS_SET_HIGH(cachep);
2971
2972 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2973 }
2974
2975 return batchcount;
2976}
2977
2978static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2979{
2980 int batchcount;
2981 struct kmem_cache_node *n;
2982 struct array_cache *ac, *shared;
2983 int node;
2984 void *list = NULL;
2985 struct page *page;
2986
2987 check_irq_off();
2988 node = numa_mem_id();
2989
2990 ac = cpu_cache_get(cachep);
2991 batchcount = ac->batchcount;
2992 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2993
2994
2995
2996
2997
2998 batchcount = BATCHREFILL_LIMIT;
2999 }
3000 n = get_node(cachep, node);
3001
3002 BUG_ON(ac->avail > 0 || !n);
3003 shared = READ_ONCE(n->shared);
3004 if (!n->free_objects && (!shared || !shared->avail))
3005 goto direct_grow;
3006
3007 spin_lock(&n->list_lock);
3008 shared = READ_ONCE(n->shared);
3009
3010
3011 if (shared && transfer_objects(ac, shared, batchcount)) {
3012 shared->touched = 1;
3013 goto alloc_done;
3014 }
3015
3016 while (batchcount > 0) {
3017
3018 page = get_first_slab(n, false);
3019 if (!page)
3020 goto must_grow;
3021
3022 check_spinlock_acquired(cachep);
3023
3024 batchcount = alloc_block(cachep, ac, page, batchcount);
3025 fixup_slab_list(cachep, n, page, &list);
3026 }
3027
3028must_grow:
3029 n->free_objects -= ac->avail;
3030alloc_done:
3031 spin_unlock(&n->list_lock);
3032 fixup_objfreelist_debug(cachep, &list);
3033
3034direct_grow:
3035 if (unlikely(!ac->avail)) {
3036
3037 if (sk_memalloc_socks()) {
3038 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3039
3040 if (obj)
3041 return obj;
3042 }
3043
3044 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3045
3046
3047
3048
3049
3050 ac = cpu_cache_get(cachep);
3051 if (!ac->avail && page)
3052 alloc_block(cachep, ac, page, batchcount);
3053 cache_grow_end(cachep, page);
3054
3055 if (!ac->avail)
3056 return NULL;
3057 }
3058 ac->touched = 1;
3059
3060 return ac->entry[--ac->avail];
3061}
3062
3063static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3064 gfp_t flags)
3065{
3066 might_sleep_if(gfpflags_allow_blocking(flags));
3067}
3068
3069#if DEBUG
3070static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3071 gfp_t flags, void *objp, unsigned long caller)
3072{
3073 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
3074 if (!objp)
3075 return objp;
3076 if (cachep->flags & SLAB_POISON) {
3077 check_poison_obj(cachep, objp);
3078 slab_kernel_map(cachep, objp, 1, 0);
3079 poison_obj(cachep, objp, POISON_INUSE);
3080 }
3081 if (cachep->flags & SLAB_STORE_USER)
3082 *dbg_userword(cachep, objp) = (void *)caller;
3083
3084 if (cachep->flags & SLAB_RED_ZONE) {
3085 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3086 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3087 slab_error(cachep, "double free, or memory outside object was overwritten");
3088 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3089 objp, *dbg_redzone1(cachep, objp),
3090 *dbg_redzone2(cachep, objp));
3091 }
3092 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3093 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3094 }
3095
3096 objp += obj_offset(cachep);
3097 if (cachep->ctor && cachep->flags & SLAB_POISON)
3098 cachep->ctor(objp);
3099 if (ARCH_SLAB_MINALIGN &&
3100 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3101 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3102 objp, (int)ARCH_SLAB_MINALIGN);
3103 }
3104 return objp;
3105}
3106#else
3107#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3108#endif
3109
3110static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3111{
3112 void *objp;
3113 struct array_cache *ac;
3114
3115 check_irq_off();
3116
3117 ac = cpu_cache_get(cachep);
3118 if (likely(ac->avail)) {
3119 ac->touched = 1;
3120 objp = ac->entry[--ac->avail];
3121
3122 STATS_INC_ALLOCHIT(cachep);
3123 goto out;
3124 }
3125
3126 STATS_INC_ALLOCMISS(cachep);
3127 objp = cache_alloc_refill(cachep, flags);
3128
3129
3130
3131
3132 ac = cpu_cache_get(cachep);
3133
3134out:
3135
3136
3137
3138
3139
3140 if (objp)
3141 kmemleak_erase(&ac->entry[ac->avail]);
3142 return objp;
3143}
3144
3145#ifdef CONFIG_NUMA
3146
3147
3148
3149
3150
3151
3152static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3153{
3154 int nid_alloc, nid_here;
3155
3156 if (in_interrupt() || (flags & __GFP_THISNODE))
3157 return NULL;
3158 nid_alloc = nid_here = numa_mem_id();
3159 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3160 nid_alloc = cpuset_slab_spread_node();
3161 else if (current->mempolicy)
3162 nid_alloc = mempolicy_slab_node();
3163 if (nid_alloc != nid_here)
3164 return ____cache_alloc_node(cachep, flags, nid_alloc);
3165 return NULL;
3166}
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3177{
3178 struct zonelist *zonelist;
3179 struct zoneref *z;
3180 struct zone *zone;
3181 enum zone_type high_zoneidx = gfp_zone(flags);
3182 void *obj = NULL;
3183 struct page *page;
3184 int nid;
3185 unsigned int cpuset_mems_cookie;
3186
3187 if (flags & __GFP_THISNODE)
3188 return NULL;
3189
3190retry_cpuset:
3191 cpuset_mems_cookie = read_mems_allowed_begin();
3192 zonelist = node_zonelist(mempolicy_slab_node(), flags);
3193
3194retry:
3195
3196
3197
3198
3199 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3200 nid = zone_to_nid(zone);
3201
3202 if (cpuset_zone_allowed(zone, flags) &&
3203 get_node(cache, nid) &&
3204 get_node(cache, nid)->free_objects) {
3205 obj = ____cache_alloc_node(cache,
3206 gfp_exact_node(flags), nid);
3207 if (obj)
3208 break;
3209 }
3210 }
3211
3212 if (!obj) {
3213
3214
3215
3216
3217
3218
3219 page = cache_grow_begin(cache, flags, numa_mem_id());
3220 cache_grow_end(cache, page);
3221 if (page) {
3222 nid = page_to_nid(page);
3223 obj = ____cache_alloc_node(cache,
3224 gfp_exact_node(flags), nid);
3225
3226
3227
3228
3229
3230 if (!obj)
3231 goto retry;
3232 }
3233 }
3234
3235 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3236 goto retry_cpuset;
3237 return obj;
3238}
3239
3240
3241
3242
3243static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3244 int nodeid)
3245{
3246 struct page *page;
3247 struct kmem_cache_node *n;
3248 void *obj = NULL;
3249 void *list = NULL;
3250
3251 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3252 n = get_node(cachep, nodeid);
3253 BUG_ON(!n);
3254
3255 check_irq_off();
3256 spin_lock(&n->list_lock);
3257 page = get_first_slab(n, false);
3258 if (!page)
3259 goto must_grow;
3260
3261 check_spinlock_acquired_node(cachep, nodeid);
3262
3263 STATS_INC_NODEALLOCS(cachep);
3264 STATS_INC_ACTIVE(cachep);
3265 STATS_SET_HIGH(cachep);
3266
3267 BUG_ON(page->active == cachep->num);
3268
3269 obj = slab_get_obj(cachep, page);
3270 n->free_objects--;
3271
3272 fixup_slab_list(cachep, n, page, &list);
3273
3274 spin_unlock(&n->list_lock);
3275 fixup_objfreelist_debug(cachep, &list);
3276 return obj;
3277
3278must_grow:
3279 spin_unlock(&n->list_lock);
3280 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3281 if (page) {
3282
3283 obj = slab_get_obj(cachep, page);
3284 }
3285 cache_grow_end(cachep, page);
3286
3287 return obj ? obj : fallback_alloc(cachep, flags);
3288}
3289
3290static __always_inline void *
3291slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3292 unsigned long caller)
3293{
3294 unsigned long save_flags;
3295 void *ptr;
3296 int slab_node = numa_mem_id();
3297
3298 flags &= gfp_allowed_mask;
3299 cachep = slab_pre_alloc_hook(cachep, flags);
3300 if (unlikely(!cachep))
3301 return NULL;
3302
3303 cache_alloc_debugcheck_before(cachep, flags);
3304 local_irq_save(save_flags);
3305
3306 if (nodeid == NUMA_NO_NODE)
3307 nodeid = slab_node;
3308
3309 if (unlikely(!get_node(cachep, nodeid))) {
3310
3311 ptr = fallback_alloc(cachep, flags);
3312 goto out;
3313 }
3314
3315 if (nodeid == slab_node) {
3316
3317
3318
3319
3320
3321
3322 ptr = ____cache_alloc(cachep, flags);
3323 if (ptr)
3324 goto out;
3325 }
3326
3327 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3328 out:
3329 local_irq_restore(save_flags);
3330 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3331
3332 if (unlikely(flags & __GFP_ZERO) && ptr)
3333 memset(ptr, 0, cachep->object_size);
3334
3335 slab_post_alloc_hook(cachep, flags, 1, &ptr);
3336 return ptr;
3337}
3338
3339static __always_inline void *
3340__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3341{
3342 void *objp;
3343
3344 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3345 objp = alternate_node_alloc(cache, flags);
3346 if (objp)
3347 goto out;
3348 }
3349 objp = ____cache_alloc(cache, flags);
3350
3351
3352
3353
3354
3355 if (!objp)
3356 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3357
3358 out:
3359 return objp;
3360}
3361#else
3362
3363static __always_inline void *
3364__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3365{
3366 return ____cache_alloc(cachep, flags);
3367}
3368
3369#endif
3370
3371static __always_inline void *
3372slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3373{
3374 unsigned long save_flags;
3375 void *objp;
3376
3377 flags &= gfp_allowed_mask;
3378 cachep = slab_pre_alloc_hook(cachep, flags);
3379 if (unlikely(!cachep))
3380 return NULL;
3381
3382 cache_alloc_debugcheck_before(cachep, flags);
3383 local_irq_save(save_flags);
3384 objp = __do_cache_alloc(cachep, flags);
3385 local_irq_restore(save_flags);
3386 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3387 prefetchw(objp);
3388
3389 if (unlikely(flags & __GFP_ZERO) && objp)
3390 memset(objp, 0, cachep->object_size);
3391
3392 slab_post_alloc_hook(cachep, flags, 1, &objp);
3393 return objp;
3394}
3395
3396
3397
3398
3399
3400static void free_block(struct kmem_cache *cachep, void **objpp,
3401 int nr_objects, int node, struct list_head *list)
3402{
3403 int i;
3404 struct kmem_cache_node *n = get_node(cachep, node);
3405 struct page *page;
3406
3407 n->free_objects += nr_objects;
3408
3409 for (i = 0; i < nr_objects; i++) {
3410 void *objp;
3411 struct page *page;
3412
3413 objp = objpp[i];
3414
3415 page = virt_to_head_page(objp);
3416 list_del(&page->lru);
3417 check_spinlock_acquired_node(cachep, node);
3418 slab_put_obj(cachep, page, objp);
3419 STATS_DEC_ACTIVE(cachep);
3420
3421
3422 if (page->active == 0) {
3423 list_add(&page->lru, &n->slabs_free);
3424 n->free_slabs++;
3425 } else {
3426
3427
3428
3429
3430 list_add_tail(&page->lru, &n->slabs_partial);
3431 }
3432 }
3433
3434 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3435 n->free_objects -= cachep->num;
3436
3437 page = list_last_entry(&n->slabs_free, struct page, lru);
3438 list_move(&page->lru, list);
3439 n->free_slabs--;
3440 n->total_slabs--;
3441 }
3442}
3443
3444static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3445{
3446 int batchcount;
3447 struct kmem_cache_node *n;
3448 int node = numa_mem_id();
3449 LIST_HEAD(list);
3450
3451 batchcount = ac->batchcount;
3452
3453 check_irq_off();
3454 n = get_node(cachep, node);
3455 spin_lock(&n->list_lock);
3456 if (n->shared) {
3457 struct array_cache *shared_array = n->shared;
3458 int max = shared_array->limit - shared_array->avail;
3459 if (max) {
3460 if (batchcount > max)
3461 batchcount = max;
3462 memcpy(&(shared_array->entry[shared_array->avail]),
3463 ac->entry, sizeof(void *) * batchcount);
3464 shared_array->avail += batchcount;
3465 goto free_done;
3466 }
3467 }
3468
3469 free_block(cachep, ac->entry, batchcount, node, &list);
3470free_done:
3471#if STATS
3472 {
3473 int i = 0;
3474 struct page *page;
3475
3476 list_for_each_entry(page, &n->slabs_free, lru) {
3477 BUG_ON(page->active);
3478
3479 i++;
3480 }
3481 STATS_SET_FREEABLE(cachep, i);
3482 }
3483#endif
3484 spin_unlock(&n->list_lock);
3485 slabs_destroy(cachep, &list);
3486 ac->avail -= batchcount;
3487 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3488}
3489
3490
3491
3492
3493
3494static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
3495 unsigned long caller)
3496{
3497
3498 if (kasan_slab_free(cachep, objp, _RET_IP_))
3499 return;
3500
3501 ___cache_free(cachep, objp, caller);
3502}
3503
3504void ___cache_free(struct kmem_cache *cachep, void *objp,
3505 unsigned long caller)
3506{
3507 struct array_cache *ac = cpu_cache_get(cachep);
3508
3509 check_irq_off();
3510 kmemleak_free_recursive(objp, cachep->flags);
3511 objp = cache_free_debugcheck(cachep, objp, caller);
3512
3513
3514
3515
3516
3517
3518
3519
3520 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3521 return;
3522
3523 if (ac->avail < ac->limit) {
3524 STATS_INC_FREEHIT(cachep);
3525 } else {
3526 STATS_INC_FREEMISS(cachep);
3527 cache_flusharray(cachep, ac);
3528 }
3529
3530 if (sk_memalloc_socks()) {
3531 struct page *page = virt_to_head_page(objp);
3532
3533 if (unlikely(PageSlabPfmemalloc(page))) {
3534 cache_free_pfmemalloc(cachep, page, objp);
3535 return;
3536 }
3537 }
3538
3539 ac->entry[ac->avail++] = objp;
3540}
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3551{
3552 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3553
3554 kasan_slab_alloc(cachep, ret, flags);
3555 trace_kmem_cache_alloc(_RET_IP_, ret,
3556 cachep->object_size, cachep->size, flags);
3557
3558 return ret;
3559}
3560EXPORT_SYMBOL(kmem_cache_alloc);
3561
3562static __always_inline void
3563cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3564 size_t size, void **p, unsigned long caller)
3565{
3566 size_t i;
3567
3568 for (i = 0; i < size; i++)
3569 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3570}
3571
3572int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3573 void **p)
3574{
3575 size_t i;
3576
3577 s = slab_pre_alloc_hook(s, flags);
3578 if (!s)
3579 return 0;
3580
3581 cache_alloc_debugcheck_before(s, flags);
3582
3583 local_irq_disable();
3584 for (i = 0; i < size; i++) {
3585 void *objp = __do_cache_alloc(s, flags);
3586
3587 if (unlikely(!objp))
3588 goto error;
3589 p[i] = objp;
3590 }
3591 local_irq_enable();
3592
3593 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3594
3595
3596 if (unlikely(flags & __GFP_ZERO))
3597 for (i = 0; i < size; i++)
3598 memset(p[i], 0, s->object_size);
3599
3600 slab_post_alloc_hook(s, flags, size, p);
3601
3602 return size;
3603error:
3604 local_irq_enable();
3605 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3606 slab_post_alloc_hook(s, flags, i, p);
3607 __kmem_cache_free_bulk(s, i, p);
3608 return 0;
3609}
3610EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3611
3612#ifdef CONFIG_TRACING
3613void *
3614kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3615{
3616 void *ret;
3617
3618 ret = slab_alloc(cachep, flags, _RET_IP_);
3619
3620 kasan_kmalloc(cachep, ret, size, flags);
3621 trace_kmalloc(_RET_IP_, ret,
3622 size, cachep->size, flags);
3623 return ret;
3624}
3625EXPORT_SYMBOL(kmem_cache_alloc_trace);
3626#endif
3627
3628#ifdef CONFIG_NUMA
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3641{
3642 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3643
3644 kasan_slab_alloc(cachep, ret, flags);
3645 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3646 cachep->object_size, cachep->size,
3647 flags, nodeid);
3648
3649 return ret;
3650}
3651EXPORT_SYMBOL(kmem_cache_alloc_node);
3652
3653#ifdef CONFIG_TRACING
3654void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3655 gfp_t flags,
3656 int nodeid,
3657 size_t size)
3658{
3659 void *ret;
3660
3661 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3662
3663 kasan_kmalloc(cachep, ret, size, flags);
3664 trace_kmalloc_node(_RET_IP_, ret,
3665 size, cachep->size,
3666 flags, nodeid);
3667 return ret;
3668}
3669EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3670#endif
3671
3672static __always_inline void *
3673__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3674{
3675 struct kmem_cache *cachep;
3676 void *ret;
3677
3678 cachep = kmalloc_slab(size, flags);
3679 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3680 return cachep;
3681 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3682 kasan_kmalloc(cachep, ret, size, flags);
3683
3684 return ret;
3685}
3686
3687void *__kmalloc_node(size_t size, gfp_t flags, int node)
3688{
3689 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3690}
3691EXPORT_SYMBOL(__kmalloc_node);
3692
3693void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3694 int node, unsigned long caller)
3695{
3696 return __do_kmalloc_node(size, flags, node, caller);
3697}
3698EXPORT_SYMBOL(__kmalloc_node_track_caller);
3699#endif
3700
3701
3702
3703
3704
3705
3706
3707static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3708 unsigned long caller)
3709{
3710 struct kmem_cache *cachep;
3711 void *ret;
3712
3713 cachep = kmalloc_slab(size, flags);
3714 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3715 return cachep;
3716 ret = slab_alloc(cachep, flags, caller);
3717
3718 kasan_kmalloc(cachep, ret, size, flags);
3719 trace_kmalloc(caller, ret,
3720 size, cachep->size, flags);
3721
3722 return ret;
3723}
3724
3725void *__kmalloc(size_t size, gfp_t flags)
3726{
3727 return __do_kmalloc(size, flags, _RET_IP_);
3728}
3729EXPORT_SYMBOL(__kmalloc);
3730
3731void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3732{
3733 return __do_kmalloc(size, flags, caller);
3734}
3735EXPORT_SYMBOL(__kmalloc_track_caller);
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3746{
3747 unsigned long flags;
3748 cachep = cache_from_obj(cachep, objp);
3749 if (!cachep)
3750 return;
3751
3752 local_irq_save(flags);
3753 debug_check_no_locks_freed(objp, cachep->object_size);
3754 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3755 debug_check_no_obj_freed(objp, cachep->object_size);
3756 __cache_free(cachep, objp, _RET_IP_);
3757 local_irq_restore(flags);
3758
3759 trace_kmem_cache_free(_RET_IP_, objp);
3760}
3761EXPORT_SYMBOL(kmem_cache_free);
3762
3763void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3764{
3765 struct kmem_cache *s;
3766 size_t i;
3767
3768 local_irq_disable();
3769 for (i = 0; i < size; i++) {
3770 void *objp = p[i];
3771
3772 if (!orig_s)
3773 s = virt_to_cache(objp);
3774 else
3775 s = cache_from_obj(orig_s, objp);
3776
3777 debug_check_no_locks_freed(objp, s->object_size);
3778 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3779 debug_check_no_obj_freed(objp, s->object_size);
3780
3781 __cache_free(s, objp, _RET_IP_);
3782 }
3783 local_irq_enable();
3784
3785
3786}
3787EXPORT_SYMBOL(kmem_cache_free_bulk);
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798void kfree(const void *objp)
3799{
3800 struct kmem_cache *c;
3801 unsigned long flags;
3802
3803 trace_kfree(_RET_IP_, objp);
3804
3805 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3806 return;
3807 local_irq_save(flags);
3808 kfree_debugcheck(objp);
3809 c = virt_to_cache(objp);
3810 debug_check_no_locks_freed(objp, c->object_size);
3811
3812 debug_check_no_obj_freed(objp, c->object_size);
3813 __cache_free(c, (void *)objp, _RET_IP_);
3814 local_irq_restore(flags);
3815}
3816EXPORT_SYMBOL(kfree);
3817
3818
3819
3820
3821static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3822{
3823 int ret;
3824 int node;
3825 struct kmem_cache_node *n;
3826
3827 for_each_online_node(node) {
3828 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3829 if (ret)
3830 goto fail;
3831
3832 }
3833
3834 return 0;
3835
3836fail:
3837 if (!cachep->list.next) {
3838
3839 node--;
3840 while (node >= 0) {
3841 n = get_node(cachep, node);
3842 if (n) {
3843 kfree(n->shared);
3844 free_alien_cache(n->alien);
3845 kfree(n);
3846 cachep->node[node] = NULL;
3847 }
3848 node--;
3849 }
3850 }
3851 return -ENOMEM;
3852}
3853
3854
3855static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3856 int batchcount, int shared, gfp_t gfp)
3857{
3858 struct array_cache __percpu *cpu_cache, *prev;
3859 int cpu;
3860
3861 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3862 if (!cpu_cache)
3863 return -ENOMEM;
3864
3865 prev = cachep->cpu_cache;
3866 cachep->cpu_cache = cpu_cache;
3867
3868
3869
3870
3871 if (prev)
3872 kick_all_cpus_sync();
3873
3874 check_irq_on();
3875 cachep->batchcount = batchcount;
3876 cachep->limit = limit;
3877 cachep->shared = shared;
3878
3879 if (!prev)
3880 goto setup_node;
3881
3882 for_each_online_cpu(cpu) {
3883 LIST_HEAD(list);
3884 int node;
3885 struct kmem_cache_node *n;
3886 struct array_cache *ac = per_cpu_ptr(prev, cpu);
3887
3888 node = cpu_to_mem(cpu);
3889 n = get_node(cachep, node);
3890 spin_lock_irq(&n->list_lock);
3891 free_block(cachep, ac->entry, ac->avail, node, &list);
3892 spin_unlock_irq(&n->list_lock);
3893 slabs_destroy(cachep, &list);
3894 }
3895 free_percpu(prev);
3896
3897setup_node:
3898 return setup_kmem_cache_nodes(cachep, gfp);
3899}
3900
3901static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3902 int batchcount, int shared, gfp_t gfp)
3903{
3904 int ret;
3905 struct kmem_cache *c;
3906
3907 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3908
3909 if (slab_state < FULL)
3910 return ret;
3911
3912 if ((ret < 0) || !is_root_cache(cachep))
3913 return ret;
3914
3915 lockdep_assert_held(&slab_mutex);
3916 for_each_memcg_cache(c, cachep) {
3917
3918 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3919 }
3920
3921 return ret;
3922}
3923
3924
3925static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3926{
3927 int err;
3928 int limit = 0;
3929 int shared = 0;
3930 int batchcount = 0;
3931
3932 err = cache_random_seq_create(cachep, cachep->num, gfp);
3933 if (err)
3934 goto end;
3935
3936 if (!is_root_cache(cachep)) {
3937 struct kmem_cache *root = memcg_root_cache(cachep);
3938 limit = root->limit;
3939 shared = root->shared;
3940 batchcount = root->batchcount;
3941 }
3942
3943 if (limit && shared && batchcount)
3944 goto skip_setup;
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954 if (cachep->size > 131072)
3955 limit = 1;
3956 else if (cachep->size > PAGE_SIZE)
3957 limit = 8;
3958 else if (cachep->size > 1024)
3959 limit = 24;
3960 else if (cachep->size > 256)
3961 limit = 54;
3962 else
3963 limit = 120;
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974 shared = 0;
3975 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3976 shared = 8;
3977
3978#if DEBUG
3979
3980
3981
3982
3983 if (limit > 32)
3984 limit = 32;
3985#endif
3986 batchcount = (limit + 1) / 2;
3987skip_setup:
3988 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3989end:
3990 if (err)
3991 pr_err("enable_cpucache failed for %s, error %d\n",
3992 cachep->name, -err);
3993 return err;
3994}
3995
3996
3997
3998
3999
4000
4001static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4002 struct array_cache *ac, int node)
4003{
4004 LIST_HEAD(list);
4005
4006
4007 check_mutex_acquired();
4008
4009 if (!ac || !ac->avail)
4010 return;
4011
4012 if (ac->touched) {
4013 ac->touched = 0;
4014 return;
4015 }
4016
4017 spin_lock_irq(&n->list_lock);
4018 drain_array_locked(cachep, ac, node, false, &list);
4019 spin_unlock_irq(&n->list_lock);
4020
4021 slabs_destroy(cachep, &list);
4022}
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036static void cache_reap(struct work_struct *w)
4037{
4038 struct kmem_cache *searchp;
4039 struct kmem_cache_node *n;
4040 int node = numa_mem_id();
4041 struct delayed_work *work = to_delayed_work(w);
4042
4043 if (!mutex_trylock(&slab_mutex))
4044
4045 goto out;
4046
4047 list_for_each_entry(searchp, &slab_caches, list) {
4048 check_irq_on();
4049
4050
4051
4052
4053
4054
4055 n = get_node(searchp, node);
4056
4057 reap_alien(searchp, n);
4058
4059 drain_array(searchp, n, cpu_cache_get(searchp), node);
4060
4061
4062
4063
4064
4065 if (time_after(n->next_reap, jiffies))
4066 goto next;
4067
4068 n->next_reap = jiffies + REAPTIMEOUT_NODE;
4069
4070 drain_array(searchp, n, n->shared, node);
4071
4072 if (n->free_touched)
4073 n->free_touched = 0;
4074 else {
4075 int freed;
4076
4077 freed = drain_freelist(searchp, n, (n->free_limit +
4078 5 * searchp->num - 1) / (5 * searchp->num));
4079 STATS_ADD_REAPED(searchp, freed);
4080 }
4081next:
4082 cond_resched();
4083 }
4084 check_irq_on();
4085 mutex_unlock(&slab_mutex);
4086 next_reap_node();
4087out:
4088
4089 schedule_delayed_work_on(smp_processor_id(), work,
4090 round_jiffies_relative(REAPTIMEOUT_AC));
4091}
4092
4093void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4094{
4095 unsigned long active_objs, num_objs, active_slabs;
4096 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4097 unsigned long free_slabs = 0;
4098 int node;
4099 struct kmem_cache_node *n;
4100
4101 for_each_kmem_cache_node(cachep, node, n) {
4102 check_irq_on();
4103 spin_lock_irq(&n->list_lock);
4104
4105 total_slabs += n->total_slabs;
4106 free_slabs += n->free_slabs;
4107 free_objs += n->free_objects;
4108
4109 if (n->shared)
4110 shared_avail += n->shared->avail;
4111
4112 spin_unlock_irq(&n->list_lock);
4113 }
4114 num_objs = total_slabs * cachep->num;
4115 active_slabs = total_slabs - free_slabs;
4116 active_objs = num_objs - free_objs;
4117
4118 sinfo->active_objs = active_objs;
4119 sinfo->num_objs = num_objs;
4120 sinfo->active_slabs = active_slabs;
4121 sinfo->num_slabs = total_slabs;
4122 sinfo->shared_avail = shared_avail;
4123 sinfo->limit = cachep->limit;
4124 sinfo->batchcount = cachep->batchcount;
4125 sinfo->shared = cachep->shared;
4126 sinfo->objects_per_slab = cachep->num;
4127 sinfo->cache_order = cachep->gfporder;
4128}
4129
4130void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4131{
4132#if STATS
4133 {
4134 unsigned long high = cachep->high_mark;
4135 unsigned long allocs = cachep->num_allocations;
4136 unsigned long grown = cachep->grown;
4137 unsigned long reaped = cachep->reaped;
4138 unsigned long errors = cachep->errors;
4139 unsigned long max_freeable = cachep->max_freeable;
4140 unsigned long node_allocs = cachep->node_allocs;
4141 unsigned long node_frees = cachep->node_frees;
4142 unsigned long overflows = cachep->node_overflow;
4143
4144 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4145 allocs, high, grown,
4146 reaped, errors, max_freeable, node_allocs,
4147 node_frees, overflows);
4148 }
4149
4150 {
4151 unsigned long allochit = atomic_read(&cachep->allochit);
4152 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4153 unsigned long freehit = atomic_read(&cachep->freehit);
4154 unsigned long freemiss = atomic_read(&cachep->freemiss);
4155
4156 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4157 allochit, allocmiss, freehit, freemiss);
4158 }
4159#endif
4160}
4161
4162#define MAX_SLABINFO_WRITE 128
4163
4164
4165
4166
4167
4168
4169
4170ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4171 size_t count, loff_t *ppos)
4172{
4173 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4174 int limit, batchcount, shared, res;
4175 struct kmem_cache *cachep;
4176
4177 if (count > MAX_SLABINFO_WRITE)
4178 return -EINVAL;
4179 if (copy_from_user(&kbuf, buffer, count))
4180 return -EFAULT;
4181 kbuf[MAX_SLABINFO_WRITE] = '\0';
4182
4183 tmp = strchr(kbuf, ' ');
4184 if (!tmp)
4185 return -EINVAL;
4186 *tmp = '\0';
4187 tmp++;
4188 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4189 return -EINVAL;
4190
4191
4192 mutex_lock(&slab_mutex);
4193 res = -EINVAL;
4194 list_for_each_entry(cachep, &slab_caches, list) {
4195 if (!strcmp(cachep->name, kbuf)) {
4196 if (limit < 1 || batchcount < 1 ||
4197 batchcount > limit || shared < 0) {
4198 res = 0;
4199 } else {
4200 res = do_tune_cpucache(cachep, limit,
4201 batchcount, shared,
4202 GFP_KERNEL);
4203 }
4204 break;
4205 }
4206 }
4207 mutex_unlock(&slab_mutex);
4208 if (res >= 0)
4209 res = count;
4210 return res;
4211}
4212
4213#ifdef CONFIG_DEBUG_SLAB_LEAK
4214
4215static inline int add_caller(unsigned long *n, unsigned long v)
4216{
4217 unsigned long *p;
4218 int l;
4219 if (!v)
4220 return 1;
4221 l = n[1];
4222 p = n + 2;
4223 while (l) {
4224 int i = l/2;
4225 unsigned long *q = p + 2 * i;
4226 if (*q == v) {
4227 q[1]++;
4228 return 1;
4229 }
4230 if (*q > v) {
4231 l = i;
4232 } else {
4233 p = q + 2;
4234 l -= i + 1;
4235 }
4236 }
4237 if (++n[1] == n[0])
4238 return 0;
4239 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4240 p[0] = v;
4241 p[1] = 1;
4242 return 1;
4243}
4244
4245static void handle_slab(unsigned long *n, struct kmem_cache *c,
4246 struct page *page)
4247{
4248 void *p;
4249 int i, j;
4250 unsigned long v;
4251
4252 if (n[0] == n[1])
4253 return;
4254 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4255 bool active = true;
4256
4257 for (j = page->active; j < c->num; j++) {
4258 if (get_free_obj(page, j) == i) {
4259 active = false;
4260 break;
4261 }
4262 }
4263
4264 if (!active)
4265 continue;
4266
4267
4268
4269
4270
4271
4272
4273 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4274 continue;
4275
4276 if (!add_caller(n, v))
4277 return;
4278 }
4279}
4280
4281static void show_symbol(struct seq_file *m, unsigned long address)
4282{
4283#ifdef CONFIG_KALLSYMS
4284 unsigned long offset, size;
4285 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4286
4287 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4288 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4289 if (modname[0])
4290 seq_printf(m, " [%s]", modname);
4291 return;
4292 }
4293#endif
4294 seq_printf(m, "%px", (void *)address);
4295}
4296
4297static int leaks_show(struct seq_file *m, void *p)
4298{
4299 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4300 struct page *page;
4301 struct kmem_cache_node *n;
4302 const char *name;
4303 unsigned long *x = m->private;
4304 int node;
4305 int i;
4306
4307 if (!(cachep->flags & SLAB_STORE_USER))
4308 return 0;
4309 if (!(cachep->flags & SLAB_RED_ZONE))
4310 return 0;
4311
4312
4313
4314
4315
4316
4317
4318 do {
4319 set_store_user_clean(cachep);
4320 drain_cpu_caches(cachep);
4321
4322 x[1] = 0;
4323
4324 for_each_kmem_cache_node(cachep, node, n) {
4325
4326 check_irq_on();
4327 spin_lock_irq(&n->list_lock);
4328
4329 list_for_each_entry(page, &n->slabs_full, lru)
4330 handle_slab(x, cachep, page);
4331 list_for_each_entry(page, &n->slabs_partial, lru)
4332 handle_slab(x, cachep, page);
4333 spin_unlock_irq(&n->list_lock);
4334 }
4335 } while (!is_store_user_clean(cachep));
4336
4337 name = cachep->name;
4338 if (x[0] == x[1]) {
4339
4340 mutex_unlock(&slab_mutex);
4341 m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
4342 GFP_KERNEL);
4343 if (!m->private) {
4344
4345 m->private = x;
4346 mutex_lock(&slab_mutex);
4347 return -ENOMEM;
4348 }
4349 *(unsigned long *)m->private = x[0] * 2;
4350 kfree(x);
4351 mutex_lock(&slab_mutex);
4352
4353 m->count = m->size;
4354 return 0;
4355 }
4356 for (i = 0; i < x[1]; i++) {
4357 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4358 show_symbol(m, x[2*i+2]);
4359 seq_putc(m, '\n');
4360 }
4361
4362 return 0;
4363}
4364
4365static const struct seq_operations slabstats_op = {
4366 .start = slab_start,
4367 .next = slab_next,
4368 .stop = slab_stop,
4369 .show = leaks_show,
4370};
4371
4372static int slabstats_open(struct inode *inode, struct file *file)
4373{
4374 unsigned long *n;
4375
4376 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4377 if (!n)
4378 return -ENOMEM;
4379
4380 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4381
4382 return 0;
4383}
4384
4385static const struct file_operations proc_slabstats_operations = {
4386 .open = slabstats_open,
4387 .read = seq_read,
4388 .llseek = seq_lseek,
4389 .release = seq_release_private,
4390};
4391#endif
4392
4393static int __init slab_proc_init(void)
4394{
4395#ifdef CONFIG_DEBUG_SLAB_LEAK
4396 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4397#endif
4398 return 0;
4399}
4400module_init(slab_proc_init);
4401
4402#ifdef CONFIG_HARDENED_USERCOPY
4403
4404
4405
4406
4407
4408
4409
4410
4411void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4412 bool to_user)
4413{
4414 struct kmem_cache *cachep;
4415 unsigned int objnr;
4416 unsigned long offset;
4417
4418
4419 cachep = page->slab_cache;
4420 objnr = obj_to_index(cachep, page, (void *)ptr);
4421 BUG_ON(objnr >= cachep->num);
4422
4423
4424 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4425
4426
4427 if (offset >= cachep->useroffset &&
4428 offset - cachep->useroffset <= cachep->usersize &&
4429 n <= cachep->useroffset - offset + cachep->usersize)
4430 return;
4431
4432
4433
4434
4435
4436
4437
4438 if (usercopy_fallback &&
4439 offset <= cachep->object_size &&
4440 n <= cachep->object_size - offset) {
4441 usercopy_warn("SLAB object", cachep->name, to_user, offset, n);
4442 return;
4443 }
4444
4445 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4446}
4447#endif
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461size_t ksize(const void *objp)
4462{
4463 size_t size;
4464
4465 BUG_ON(!objp);
4466 if (unlikely(objp == ZERO_SIZE_PTR))
4467 return 0;
4468
4469 size = virt_to_cache(objp)->object_size;
4470
4471
4472
4473 kasan_unpoison_shadow(objp, size);
4474
4475 return size;
4476}
4477EXPORT_SYMBOL(ksize);
4478