1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#include <linux/slab.h>
90#include <linux/mm.h>
91#include <linux/poison.h>
92#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
97#include <linux/cpuset.h>
98#include <linux/proc_fs.h>
99#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
106#include <linux/string.h>
107#include <linux/uaccess.h>
108#include <linux/nodemask.h>
109#include <linux/kmemleak.h>
110#include <linux/mempolicy.h>
111#include <linux/mutex.h>
112#include <linux/fault-inject.h>
113#include <linux/rtmutex.h>
114#include <linux/reciprocal_div.h>
115#include <linux/debugobjects.h>
116#include <linux/kmemcheck.h>
117#include <linux/memory.h>
118#include <linux/prefetch.h>
119
120#include <net/sock.h>
121
122#include <asm/cacheflush.h>
123#include <asm/tlbflush.h>
124#include <asm/page.h>
125
126#include <trace/events/kmem.h>
127
128#include "internal.h"
129
130#include "slab.h"
131
132
133
134
135
136
137
138
139
140
141
142#ifdef CONFIG_DEBUG_SLAB
143#define DEBUG 1
144#define STATS 1
145#define FORCED_DEBUG 1
146#else
147#define DEBUG 0
148#define STATS 0
149#define FORCED_DEBUG 0
150#endif
151
152
153#define BYTES_PER_WORD sizeof(void *)
154#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
155
156#ifndef ARCH_KMALLOC_FLAGS
157#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
158#endif
159
160#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
161 <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)
162
163#if FREELIST_BYTE_INDEX
164typedef unsigned char freelist_idx_t;
165#else
166typedef unsigned short freelist_idx_t;
167#endif
168
169#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170
171
172
173
174
175
176
177
178
179
180
181
182
183struct array_cache {
184 unsigned int avail;
185 unsigned int limit;
186 unsigned int batchcount;
187 unsigned int touched;
188 void *entry[];
189
190
191
192
193};
194
195struct alien_cache {
196 spinlock_t lock;
197 struct array_cache ac;
198};
199
200
201
202
203#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
204static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
205#define CACHE_CACHE 0
206#define SIZE_NODE (MAX_NUMNODES)
207
208static int drain_freelist(struct kmem_cache *cache,
209 struct kmem_cache_node *n, int tofree);
210static void free_block(struct kmem_cache *cachep, void **objpp, int len,
211 int node, struct list_head *list);
212static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
213static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
214static void cache_reap(struct work_struct *unused);
215
216static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
217 void **list);
218static inline void fixup_slab_list(struct kmem_cache *cachep,
219 struct kmem_cache_node *n, struct page *page,
220 void **list);
221static int slab_early_init = 1;
222
223#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
224
225static void kmem_cache_node_init(struct kmem_cache_node *parent)
226{
227 INIT_LIST_HEAD(&parent->slabs_full);
228 INIT_LIST_HEAD(&parent->slabs_partial);
229 INIT_LIST_HEAD(&parent->slabs_free);
230 parent->total_slabs = 0;
231 parent->free_slabs = 0;
232 parent->shared = NULL;
233 parent->alien = NULL;
234 parent->colour_next = 0;
235 spin_lock_init(&parent->list_lock);
236 parent->free_objects = 0;
237 parent->free_touched = 0;
238}
239
240#define MAKE_LIST(cachep, listp, slab, nodeid) \
241 do { \
242 INIT_LIST_HEAD(listp); \
243 list_splice(&get_node(cachep, nodeid)->slab, listp); \
244 } while (0)
245
246#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
247 do { \
248 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
249 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
250 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
251 } while (0)
252
253#define CFLGS_OBJFREELIST_SLAB (0x40000000UL)
254#define CFLGS_OFF_SLAB (0x80000000UL)
255#define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB)
256#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
257
258#define BATCHREFILL_LIMIT 16
259
260
261
262
263
264
265
266#define REAPTIMEOUT_AC (2*HZ)
267#define REAPTIMEOUT_NODE (4*HZ)
268
269#if STATS
270#define STATS_INC_ACTIVE(x) ((x)->num_active++)
271#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
272#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
273#define STATS_INC_GROWN(x) ((x)->grown++)
274#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
275#define STATS_SET_HIGH(x) \
276 do { \
277 if ((x)->num_active > (x)->high_mark) \
278 (x)->high_mark = (x)->num_active; \
279 } while (0)
280#define STATS_INC_ERR(x) ((x)->errors++)
281#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
282#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
283#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
284#define STATS_SET_FREEABLE(x, i) \
285 do { \
286 if ((x)->max_freeable < i) \
287 (x)->max_freeable = i; \
288 } while (0)
289#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
290#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
291#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
292#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
293#else
294#define STATS_INC_ACTIVE(x) do { } while (0)
295#define STATS_DEC_ACTIVE(x) do { } while (0)
296#define STATS_INC_ALLOCED(x) do { } while (0)
297#define STATS_INC_GROWN(x) do { } while (0)
298#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
299#define STATS_SET_HIGH(x) do { } while (0)
300#define STATS_INC_ERR(x) do { } while (0)
301#define STATS_INC_NODEALLOCS(x) do { } while (0)
302#define STATS_INC_NODEFREES(x) do { } while (0)
303#define STATS_INC_ACOVERFLOW(x) do { } while (0)
304#define STATS_SET_FREEABLE(x, i) do { } while (0)
305#define STATS_INC_ALLOCHIT(x) do { } while (0)
306#define STATS_INC_ALLOCMISS(x) do { } while (0)
307#define STATS_INC_FREEHIT(x) do { } while (0)
308#define STATS_INC_FREEMISS(x) do { } while (0)
309#endif
310
311#if DEBUG
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static int obj_offset(struct kmem_cache *cachep)
327{
328 return cachep->obj_offset;
329}
330
331static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
332{
333 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
334 return (unsigned long long*) (objp + obj_offset(cachep) -
335 sizeof(unsigned long long));
336}
337
338static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
339{
340 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
341 if (cachep->flags & SLAB_STORE_USER)
342 return (unsigned long long *)(objp + cachep->size -
343 sizeof(unsigned long long) -
344 REDZONE_ALIGN);
345 return (unsigned long long *) (objp + cachep->size -
346 sizeof(unsigned long long));
347}
348
349static void **dbg_userword(struct kmem_cache *cachep, void *objp)
350{
351 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
352 return (void **)(objp + cachep->size - BYTES_PER_WORD);
353}
354
355#else
356
357#define obj_offset(x) 0
358#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
359#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
360#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
361
362#endif
363
364#ifdef CONFIG_DEBUG_SLAB_LEAK
365
366static inline bool is_store_user_clean(struct kmem_cache *cachep)
367{
368 return atomic_read(&cachep->store_user_clean) == 1;
369}
370
371static inline void set_store_user_clean(struct kmem_cache *cachep)
372{
373 atomic_set(&cachep->store_user_clean, 1);
374}
375
376static inline void set_store_user_dirty(struct kmem_cache *cachep)
377{
378 if (is_store_user_clean(cachep))
379 atomic_set(&cachep->store_user_clean, 0);
380}
381
382#else
383static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
384
385#endif
386
387
388
389
390
391#define SLAB_MAX_ORDER_HI 1
392#define SLAB_MAX_ORDER_LO 0
393static int slab_max_order = SLAB_MAX_ORDER_LO;
394static bool slab_max_order_set __initdata;
395
396static inline struct kmem_cache *virt_to_cache(const void *obj)
397{
398 struct page *page = virt_to_head_page(obj);
399 return page->slab_cache;
400}
401
402static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
403 unsigned int idx)
404{
405 return page->s_mem + cache->size * idx;
406}
407
408
409
410
411
412
413
414static inline unsigned int obj_to_index(const struct kmem_cache *cache,
415 const struct page *page, void *obj)
416{
417 u32 offset = (obj - page->s_mem);
418 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
419}
420
421#define BOOT_CPUCACHE_ENTRIES 1
422
423static struct kmem_cache kmem_cache_boot = {
424 .batchcount = 1,
425 .limit = BOOT_CPUCACHE_ENTRIES,
426 .shared = 1,
427 .size = sizeof(struct kmem_cache),
428 .name = "kmem_cache",
429};
430
431static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
432
433static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
434{
435 return this_cpu_ptr(cachep->cpu_cache);
436}
437
438
439
440
441static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
442 unsigned long flags, size_t *left_over)
443{
444 unsigned int num;
445 size_t slab_size = PAGE_SIZE << gfporder;
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) {
465 num = slab_size / buffer_size;
466 *left_over = slab_size % buffer_size;
467 } else {
468 num = slab_size / (buffer_size + sizeof(freelist_idx_t));
469 *left_over = slab_size %
470 (buffer_size + sizeof(freelist_idx_t));
471 }
472
473 return num;
474}
475
476#if DEBUG
477#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
478
479static void __slab_error(const char *function, struct kmem_cache *cachep,
480 char *msg)
481{
482 pr_err("slab error in %s(): cache `%s': %s\n",
483 function, cachep->name, msg);
484 dump_stack();
485 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
486}
487#endif
488
489
490
491
492
493
494
495
496
497static int use_alien_caches __read_mostly = 1;
498static int __init noaliencache_setup(char *s)
499{
500 use_alien_caches = 0;
501 return 1;
502}
503__setup("noaliencache", noaliencache_setup);
504
505static int __init slab_max_order_setup(char *str)
506{
507 get_option(&str, &slab_max_order);
508 slab_max_order = slab_max_order < 0 ? 0 :
509 min(slab_max_order, MAX_ORDER - 1);
510 slab_max_order_set = true;
511
512 return 1;
513}
514__setup("slab_max_order=", slab_max_order_setup);
515
516#ifdef CONFIG_NUMA
517
518
519
520
521
522
523static DEFINE_PER_CPU(unsigned long, slab_reap_node);
524
525static void init_reap_node(int cpu)
526{
527 per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu),
528 node_online_map);
529}
530
531static void next_reap_node(void)
532{
533 int node = __this_cpu_read(slab_reap_node);
534
535 node = next_node_in(node, node_online_map);
536 __this_cpu_write(slab_reap_node, node);
537}
538
539#else
540#define init_reap_node(cpu) do { } while (0)
541#define next_reap_node(void) do { } while (0)
542#endif
543
544
545
546
547
548
549
550
551static void start_cpu_timer(int cpu)
552{
553 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
554
555 if (reap_work->work.func == NULL) {
556 init_reap_node(cpu);
557 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
558 schedule_delayed_work_on(cpu, reap_work,
559 __round_jiffies_relative(HZ, cpu));
560 }
561}
562
563static void init_arraycache(struct array_cache *ac, int limit, int batch)
564{
565
566
567
568
569
570
571
572 kmemleak_no_scan(ac);
573 if (ac) {
574 ac->avail = 0;
575 ac->limit = limit;
576 ac->batchcount = batch;
577 ac->touched = 0;
578 }
579}
580
581static struct array_cache *alloc_arraycache(int node, int entries,
582 int batchcount, gfp_t gfp)
583{
584 size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
585 struct array_cache *ac = NULL;
586
587 ac = kmalloc_node(memsize, gfp, node);
588 init_arraycache(ac, entries, batchcount);
589 return ac;
590}
591
592static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
593 struct page *page, void *objp)
594{
595 struct kmem_cache_node *n;
596 int page_node;
597 LIST_HEAD(list);
598
599 page_node = page_to_nid(page);
600 n = get_node(cachep, page_node);
601
602 spin_lock(&n->list_lock);
603 free_block(cachep, &objp, 1, page_node, &list);
604 spin_unlock(&n->list_lock);
605
606 slabs_destroy(cachep, &list);
607}
608
609
610
611
612
613
614
615static int transfer_objects(struct array_cache *to,
616 struct array_cache *from, unsigned int max)
617{
618
619 int nr = min3(from->avail, max, to->limit - to->avail);
620
621 if (!nr)
622 return 0;
623
624 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
625 sizeof(void *) *nr);
626
627 from->avail -= nr;
628 to->avail += nr;
629 return nr;
630}
631
632#ifndef CONFIG_NUMA
633
634#define drain_alien_cache(cachep, alien) do { } while (0)
635#define reap_alien(cachep, n) do { } while (0)
636
637static inline struct alien_cache **alloc_alien_cache(int node,
638 int limit, gfp_t gfp)
639{
640 return NULL;
641}
642
643static inline void free_alien_cache(struct alien_cache **ac_ptr)
644{
645}
646
647static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
648{
649 return 0;
650}
651
652static inline void *alternate_node_alloc(struct kmem_cache *cachep,
653 gfp_t flags)
654{
655 return NULL;
656}
657
658static inline void *____cache_alloc_node(struct kmem_cache *cachep,
659 gfp_t flags, int nodeid)
660{
661 return NULL;
662}
663
664static inline gfp_t gfp_exact_node(gfp_t flags)
665{
666 return flags & ~__GFP_NOFAIL;
667}
668
669#else
670
671static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
672static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
673
674static struct alien_cache *__alloc_alien_cache(int node, int entries,
675 int batch, gfp_t gfp)
676{
677 size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
678 struct alien_cache *alc = NULL;
679
680 alc = kmalloc_node(memsize, gfp, node);
681 init_arraycache(&alc->ac, entries, batch);
682 spin_lock_init(&alc->lock);
683 return alc;
684}
685
686static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
687{
688 struct alien_cache **alc_ptr;
689 size_t memsize = sizeof(void *) * nr_node_ids;
690 int i;
691
692 if (limit > 1)
693 limit = 12;
694 alc_ptr = kzalloc_node(memsize, gfp, node);
695 if (!alc_ptr)
696 return NULL;
697
698 for_each_node(i) {
699 if (i == node || !node_online(i))
700 continue;
701 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
702 if (!alc_ptr[i]) {
703 for (i--; i >= 0; i--)
704 kfree(alc_ptr[i]);
705 kfree(alc_ptr);
706 return NULL;
707 }
708 }
709 return alc_ptr;
710}
711
712static void free_alien_cache(struct alien_cache **alc_ptr)
713{
714 int i;
715
716 if (!alc_ptr)
717 return;
718 for_each_node(i)
719 kfree(alc_ptr[i]);
720 kfree(alc_ptr);
721}
722
723static void __drain_alien_cache(struct kmem_cache *cachep,
724 struct array_cache *ac, int node,
725 struct list_head *list)
726{
727 struct kmem_cache_node *n = get_node(cachep, node);
728
729 if (ac->avail) {
730 spin_lock(&n->list_lock);
731
732
733
734
735
736 if (n->shared)
737 transfer_objects(n->shared, ac, ac->limit);
738
739 free_block(cachep, ac->entry, ac->avail, node, list);
740 ac->avail = 0;
741 spin_unlock(&n->list_lock);
742 }
743}
744
745
746
747
748static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
749{
750 int node = __this_cpu_read(slab_reap_node);
751
752 if (n->alien) {
753 struct alien_cache *alc = n->alien[node];
754 struct array_cache *ac;
755
756 if (alc) {
757 ac = &alc->ac;
758 if (ac->avail && spin_trylock_irq(&alc->lock)) {
759 LIST_HEAD(list);
760
761 __drain_alien_cache(cachep, ac, node, &list);
762 spin_unlock_irq(&alc->lock);
763 slabs_destroy(cachep, &list);
764 }
765 }
766 }
767}
768
769static void drain_alien_cache(struct kmem_cache *cachep,
770 struct alien_cache **alien)
771{
772 int i = 0;
773 struct alien_cache *alc;
774 struct array_cache *ac;
775 unsigned long flags;
776
777 for_each_online_node(i) {
778 alc = alien[i];
779 if (alc) {
780 LIST_HEAD(list);
781
782 ac = &alc->ac;
783 spin_lock_irqsave(&alc->lock, flags);
784 __drain_alien_cache(cachep, ac, i, &list);
785 spin_unlock_irqrestore(&alc->lock, flags);
786 slabs_destroy(cachep, &list);
787 }
788 }
789}
790
791static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
792 int node, int page_node)
793{
794 struct kmem_cache_node *n;
795 struct alien_cache *alien = NULL;
796 struct array_cache *ac;
797 LIST_HEAD(list);
798
799 n = get_node(cachep, node);
800 STATS_INC_NODEFREES(cachep);
801 if (n->alien && n->alien[page_node]) {
802 alien = n->alien[page_node];
803 ac = &alien->ac;
804 spin_lock(&alien->lock);
805 if (unlikely(ac->avail == ac->limit)) {
806 STATS_INC_ACOVERFLOW(cachep);
807 __drain_alien_cache(cachep, ac, page_node, &list);
808 }
809 ac->entry[ac->avail++] = objp;
810 spin_unlock(&alien->lock);
811 slabs_destroy(cachep, &list);
812 } else {
813 n = get_node(cachep, page_node);
814 spin_lock(&n->list_lock);
815 free_block(cachep, &objp, 1, page_node, &list);
816 spin_unlock(&n->list_lock);
817 slabs_destroy(cachep, &list);
818 }
819 return 1;
820}
821
822static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
823{
824 int page_node = page_to_nid(virt_to_page(objp));
825 int node = numa_mem_id();
826
827
828
829
830 if (likely(node == page_node))
831 return 0;
832
833 return __cache_free_alien(cachep, objp, node, page_node);
834}
835
836
837
838
839
840static inline gfp_t gfp_exact_node(gfp_t flags)
841{
842 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
843}
844#endif
845
846static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
847{
848 struct kmem_cache_node *n;
849
850
851
852
853
854
855 n = get_node(cachep, node);
856 if (n) {
857 spin_lock_irq(&n->list_lock);
858 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
859 cachep->num;
860 spin_unlock_irq(&n->list_lock);
861
862 return 0;
863 }
864
865 n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
866 if (!n)
867 return -ENOMEM;
868
869 kmem_cache_node_init(n);
870 n->next_reap = jiffies + REAPTIMEOUT_NODE +
871 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
872
873 n->free_limit =
874 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num;
875
876
877
878
879
880
881 cachep->node[node] = n;
882
883 return 0;
884}
885
886#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
887
888
889
890
891
892
893
894
895
896static int init_cache_node_node(int node)
897{
898 int ret;
899 struct kmem_cache *cachep;
900
901 list_for_each_entry(cachep, &slab_caches, list) {
902 ret = init_cache_node(cachep, node, GFP_KERNEL);
903 if (ret)
904 return ret;
905 }
906
907 return 0;
908}
909#endif
910
911static int setup_kmem_cache_node(struct kmem_cache *cachep,
912 int node, gfp_t gfp, bool force_change)
913{
914 int ret = -ENOMEM;
915 struct kmem_cache_node *n;
916 struct array_cache *old_shared = NULL;
917 struct array_cache *new_shared = NULL;
918 struct alien_cache **new_alien = NULL;
919 LIST_HEAD(list);
920
921 if (use_alien_caches) {
922 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
923 if (!new_alien)
924 goto fail;
925 }
926
927 if (cachep->shared) {
928 new_shared = alloc_arraycache(node,
929 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
930 if (!new_shared)
931 goto fail;
932 }
933
934 ret = init_cache_node(cachep, node, gfp);
935 if (ret)
936 goto fail;
937
938 n = get_node(cachep, node);
939 spin_lock_irq(&n->list_lock);
940 if (n->shared && force_change) {
941 free_block(cachep, n->shared->entry,
942 n->shared->avail, node, &list);
943 n->shared->avail = 0;
944 }
945
946 if (!n->shared || force_change) {
947 old_shared = n->shared;
948 n->shared = new_shared;
949 new_shared = NULL;
950 }
951
952 if (!n->alien) {
953 n->alien = new_alien;
954 new_alien = NULL;
955 }
956
957 spin_unlock_irq(&n->list_lock);
958 slabs_destroy(cachep, &list);
959
960
961
962
963
964
965
966 if (old_shared && force_change)
967 synchronize_sched();
968
969fail:
970 kfree(old_shared);
971 kfree(new_shared);
972 free_alien_cache(new_alien);
973
974 return ret;
975}
976
977#ifdef CONFIG_SMP
978
979static void cpuup_canceled(long cpu)
980{
981 struct kmem_cache *cachep;
982 struct kmem_cache_node *n = NULL;
983 int node = cpu_to_mem(cpu);
984 const struct cpumask *mask = cpumask_of_node(node);
985
986 list_for_each_entry(cachep, &slab_caches, list) {
987 struct array_cache *nc;
988 struct array_cache *shared;
989 struct alien_cache **alien;
990 LIST_HEAD(list);
991
992 n = get_node(cachep, node);
993 if (!n)
994 continue;
995
996 spin_lock_irq(&n->list_lock);
997
998
999 n->free_limit -= cachep->batchcount;
1000
1001
1002 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1003 if (nc) {
1004 free_block(cachep, nc->entry, nc->avail, node, &list);
1005 nc->avail = 0;
1006 }
1007
1008 if (!cpumask_empty(mask)) {
1009 spin_unlock_irq(&n->list_lock);
1010 goto free_slab;
1011 }
1012
1013 shared = n->shared;
1014 if (shared) {
1015 free_block(cachep, shared->entry,
1016 shared->avail, node, &list);
1017 n->shared = NULL;
1018 }
1019
1020 alien = n->alien;
1021 n->alien = NULL;
1022
1023 spin_unlock_irq(&n->list_lock);
1024
1025 kfree(shared);
1026 if (alien) {
1027 drain_alien_cache(cachep, alien);
1028 free_alien_cache(alien);
1029 }
1030
1031free_slab:
1032 slabs_destroy(cachep, &list);
1033 }
1034
1035
1036
1037
1038
1039 list_for_each_entry(cachep, &slab_caches, list) {
1040 n = get_node(cachep, node);
1041 if (!n)
1042 continue;
1043 drain_freelist(cachep, n, INT_MAX);
1044 }
1045}
1046
1047static int cpuup_prepare(long cpu)
1048{
1049 struct kmem_cache *cachep;
1050 int node = cpu_to_mem(cpu);
1051 int err;
1052
1053
1054
1055
1056
1057
1058
1059 err = init_cache_node_node(node);
1060 if (err < 0)
1061 goto bad;
1062
1063
1064
1065
1066
1067 list_for_each_entry(cachep, &slab_caches, list) {
1068 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false);
1069 if (err)
1070 goto bad;
1071 }
1072
1073 return 0;
1074bad:
1075 cpuup_canceled(cpu);
1076 return -ENOMEM;
1077}
1078
1079int slab_prepare_cpu(unsigned int cpu)
1080{
1081 int err;
1082
1083 mutex_lock(&slab_mutex);
1084 err = cpuup_prepare(cpu);
1085 mutex_unlock(&slab_mutex);
1086 return err;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099int slab_dead_cpu(unsigned int cpu)
1100{
1101 mutex_lock(&slab_mutex);
1102 cpuup_canceled(cpu);
1103 mutex_unlock(&slab_mutex);
1104 return 0;
1105}
1106#endif
1107
1108static int slab_online_cpu(unsigned int cpu)
1109{
1110 start_cpu_timer(cpu);
1111 return 0;
1112}
1113
1114static int slab_offline_cpu(unsigned int cpu)
1115{
1116
1117
1118
1119
1120
1121
1122 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
1123
1124 per_cpu(slab_reap_work, cpu).work.func = NULL;
1125 return 0;
1126}
1127
1128#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1129
1130
1131
1132
1133
1134
1135
1136static int __meminit drain_cache_node_node(int node)
1137{
1138 struct kmem_cache *cachep;
1139 int ret = 0;
1140
1141 list_for_each_entry(cachep, &slab_caches, list) {
1142 struct kmem_cache_node *n;
1143
1144 n = get_node(cachep, node);
1145 if (!n)
1146 continue;
1147
1148 drain_freelist(cachep, n, INT_MAX);
1149
1150 if (!list_empty(&n->slabs_full) ||
1151 !list_empty(&n->slabs_partial)) {
1152 ret = -EBUSY;
1153 break;
1154 }
1155 }
1156 return ret;
1157}
1158
1159static int __meminit slab_memory_callback(struct notifier_block *self,
1160 unsigned long action, void *arg)
1161{
1162 struct memory_notify *mnb = arg;
1163 int ret = 0;
1164 int nid;
1165
1166 nid = mnb->status_change_nid;
1167 if (nid < 0)
1168 goto out;
1169
1170 switch (action) {
1171 case MEM_GOING_ONLINE:
1172 mutex_lock(&slab_mutex);
1173 ret = init_cache_node_node(nid);
1174 mutex_unlock(&slab_mutex);
1175 break;
1176 case MEM_GOING_OFFLINE:
1177 mutex_lock(&slab_mutex);
1178 ret = drain_cache_node_node(nid);
1179 mutex_unlock(&slab_mutex);
1180 break;
1181 case MEM_ONLINE:
1182 case MEM_OFFLINE:
1183 case MEM_CANCEL_ONLINE:
1184 case MEM_CANCEL_OFFLINE:
1185 break;
1186 }
1187out:
1188 return notifier_from_errno(ret);
1189}
1190#endif
1191
1192
1193
1194
1195static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1196 int nodeid)
1197{
1198 struct kmem_cache_node *ptr;
1199
1200 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1201 BUG_ON(!ptr);
1202
1203 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1204
1205
1206
1207 spin_lock_init(&ptr->list_lock);
1208
1209 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1210 cachep->node[nodeid] = ptr;
1211}
1212
1213
1214
1215
1216
1217static void __init set_up_node(struct kmem_cache *cachep, int index)
1218{
1219 int node;
1220
1221 for_each_online_node(node) {
1222 cachep->node[node] = &init_kmem_cache_node[index + node];
1223 cachep->node[node]->next_reap = jiffies +
1224 REAPTIMEOUT_NODE +
1225 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1226 }
1227}
1228
1229
1230
1231
1232
1233void __init kmem_cache_init(void)
1234{
1235 int i;
1236
1237 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1238 sizeof(struct rcu_head));
1239 kmem_cache = &kmem_cache_boot;
1240
1241 if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
1242 use_alien_caches = 0;
1243
1244 for (i = 0; i < NUM_INIT_LISTS; i++)
1245 kmem_cache_node_init(&init_kmem_cache_node[i]);
1246
1247
1248
1249
1250
1251
1252 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
1253 slab_max_order = SLAB_MAX_ORDER_HI;
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 create_boot_cache(kmem_cache, "kmem_cache",
1281 offsetof(struct kmem_cache, node) +
1282 nr_node_ids * sizeof(struct kmem_cache_node *),
1283 SLAB_HWCACHE_ALIGN);
1284 list_add(&kmem_cache->list, &slab_caches);
1285 slab_state = PARTIAL;
1286
1287
1288
1289
1290
1291 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1292 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1293 slab_state = PARTIAL_NODE;
1294 setup_kmalloc_cache_index_table();
1295
1296 slab_early_init = 0;
1297
1298
1299 {
1300 int nid;
1301
1302 for_each_online_node(nid) {
1303 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1304
1305 init_list(kmalloc_caches[INDEX_NODE],
1306 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1307 }
1308 }
1309
1310 create_kmalloc_caches(ARCH_KMALLOC_FLAGS);
1311}
1312
1313void __init kmem_cache_init_late(void)
1314{
1315 struct kmem_cache *cachep;
1316
1317 slab_state = UP;
1318
1319
1320 mutex_lock(&slab_mutex);
1321 list_for_each_entry(cachep, &slab_caches, list)
1322 if (enable_cpucache(cachep, GFP_NOWAIT))
1323 BUG();
1324 mutex_unlock(&slab_mutex);
1325
1326
1327 slab_state = FULL;
1328
1329#ifdef CONFIG_NUMA
1330
1331
1332
1333
1334 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1335#endif
1336
1337
1338
1339
1340
1341}
1342
1343static int __init cpucache_init(void)
1344{
1345 int ret;
1346
1347
1348
1349
1350 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online",
1351 slab_online_cpu, slab_offline_cpu);
1352 WARN_ON(ret < 0);
1353
1354
1355 slab_state = FULL;
1356 return 0;
1357}
1358__initcall(cpucache_init);
1359
1360static noinline void
1361slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1362{
1363#if DEBUG
1364 struct kmem_cache_node *n;
1365 unsigned long flags;
1366 int node;
1367 static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
1368 DEFAULT_RATELIMIT_BURST);
1369
1370 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
1371 return;
1372
1373 pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
1374 nodeid, gfpflags, &gfpflags);
1375 pr_warn(" cache: %s, object size: %d, order: %d\n",
1376 cachep->name, cachep->size, cachep->gfporder);
1377
1378 for_each_kmem_cache_node(cachep, node, n) {
1379 unsigned long total_slabs, free_slabs, free_objs;
1380
1381 spin_lock_irqsave(&n->list_lock, flags);
1382 total_slabs = n->total_slabs;
1383 free_slabs = n->free_slabs;
1384 free_objs = n->free_objects;
1385 spin_unlock_irqrestore(&n->list_lock, flags);
1386
1387 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
1388 node, total_slabs - free_slabs, total_slabs,
1389 (total_slabs * cachep->num) - free_objs,
1390 total_slabs * cachep->num);
1391 }
1392#endif
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1404 int nodeid)
1405{
1406 struct page *page;
1407 int nr_pages;
1408
1409 flags |= cachep->allocflags;
1410 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1411 flags |= __GFP_RECLAIMABLE;
1412
1413 page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
1414 if (!page) {
1415 slab_out_of_memory(cachep, flags, nodeid);
1416 return NULL;
1417 }
1418
1419 if (memcg_charge_slab(page, flags, cachep->gfporder, cachep)) {
1420 __free_pages(page, cachep->gfporder);
1421 return NULL;
1422 }
1423
1424 nr_pages = (1 << cachep->gfporder);
1425 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1426 add_zone_page_state(page_zone(page),
1427 NR_SLAB_RECLAIMABLE, nr_pages);
1428 else
1429 add_zone_page_state(page_zone(page),
1430 NR_SLAB_UNRECLAIMABLE, nr_pages);
1431
1432 __SetPageSlab(page);
1433
1434 if (sk_memalloc_socks() && page_is_pfmemalloc(page))
1435 SetPageSlabPfmemalloc(page);
1436
1437 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1438 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1439
1440 if (cachep->ctor)
1441 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1442 else
1443 kmemcheck_mark_unallocated_pages(page, nr_pages);
1444 }
1445
1446 return page;
1447}
1448
1449
1450
1451
1452static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
1453{
1454 int order = cachep->gfporder;
1455 unsigned long nr_freed = (1 << order);
1456
1457 kmemcheck_free_shadow(page, order);
1458
1459 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1460 sub_zone_page_state(page_zone(page),
1461 NR_SLAB_RECLAIMABLE, nr_freed);
1462 else
1463 sub_zone_page_state(page_zone(page),
1464 NR_SLAB_UNRECLAIMABLE, nr_freed);
1465
1466 BUG_ON(!PageSlab(page));
1467 __ClearPageSlabPfmemalloc(page);
1468 __ClearPageSlab(page);
1469 page_mapcount_reset(page);
1470 page->mapping = NULL;
1471
1472 if (current->reclaim_state)
1473 current->reclaim_state->reclaimed_slab += nr_freed;
1474 memcg_uncharge_slab(page, order, cachep);
1475 __free_pages(page, order);
1476}
1477
1478static void kmem_rcu_free(struct rcu_head *head)
1479{
1480 struct kmem_cache *cachep;
1481 struct page *page;
1482
1483 page = container_of(head, struct page, rcu_head);
1484 cachep = page->slab_cache;
1485
1486 kmem_freepages(cachep, page);
1487}
1488
1489#if DEBUG
1490static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
1491{
1492 if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
1493 (cachep->size % PAGE_SIZE) == 0)
1494 return true;
1495
1496 return false;
1497}
1498
1499#ifdef CONFIG_DEBUG_PAGEALLOC
1500static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1501 unsigned long caller)
1502{
1503 int size = cachep->object_size;
1504
1505 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1506
1507 if (size < 5 * sizeof(unsigned long))
1508 return;
1509
1510 *addr++ = 0x12345678;
1511 *addr++ = caller;
1512 *addr++ = smp_processor_id();
1513 size -= 3 * sizeof(unsigned long);
1514 {
1515 unsigned long *sptr = &caller;
1516 unsigned long svalue;
1517
1518 while (!kstack_end(sptr)) {
1519 svalue = *sptr++;
1520 if (kernel_text_address(svalue)) {
1521 *addr++ = svalue;
1522 size -= sizeof(unsigned long);
1523 if (size <= sizeof(unsigned long))
1524 break;
1525 }
1526 }
1527
1528 }
1529 *addr++ = 0x87654321;
1530}
1531
1532static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1533 int map, unsigned long caller)
1534{
1535 if (!is_debug_pagealloc_cache(cachep))
1536 return;
1537
1538 if (caller)
1539 store_stackinfo(cachep, objp, caller);
1540
1541 kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
1542}
1543
1544#else
1545static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
1546 int map, unsigned long caller) {}
1547
1548#endif
1549
1550static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1551{
1552 int size = cachep->object_size;
1553 addr = &((char *)addr)[obj_offset(cachep)];
1554
1555 memset(addr, val, size);
1556 *(unsigned char *)(addr + size - 1) = POISON_END;
1557}
1558
1559static void dump_line(char *data, int offset, int limit)
1560{
1561 int i;
1562 unsigned char error = 0;
1563 int bad_count = 0;
1564
1565 pr_err("%03x: ", offset);
1566 for (i = 0; i < limit; i++) {
1567 if (data[offset + i] != POISON_FREE) {
1568 error = data[offset + i];
1569 bad_count++;
1570 }
1571 }
1572 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1573 &data[offset], limit, 1);
1574
1575 if (bad_count == 1) {
1576 error ^= POISON_FREE;
1577 if (!(error & (error - 1))) {
1578 pr_err("Single bit error detected. Probably bad RAM.\n");
1579#ifdef CONFIG_X86
1580 pr_err("Run memtest86+ or a similar memory test tool.\n");
1581#else
1582 pr_err("Run a memory test tool.\n");
1583#endif
1584 }
1585 }
1586}
1587#endif
1588
1589#if DEBUG
1590
1591static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1592{
1593 int i, size;
1594 char *realobj;
1595
1596 if (cachep->flags & SLAB_RED_ZONE) {
1597 pr_err("Redzone: 0x%llx/0x%llx\n",
1598 *dbg_redzone1(cachep, objp),
1599 *dbg_redzone2(cachep, objp));
1600 }
1601
1602 if (cachep->flags & SLAB_STORE_USER) {
1603 pr_err("Last user: [<%p>](%pSR)\n",
1604 *dbg_userword(cachep, objp),
1605 *dbg_userword(cachep, objp));
1606 }
1607 realobj = (char *)objp + obj_offset(cachep);
1608 size = cachep->object_size;
1609 for (i = 0; i < size && lines; i += 16, lines--) {
1610 int limit;
1611 limit = 16;
1612 if (i + limit > size)
1613 limit = size - i;
1614 dump_line(realobj, i, limit);
1615 }
1616}
1617
1618static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1619{
1620 char *realobj;
1621 int size, i;
1622 int lines = 0;
1623
1624 if (is_debug_pagealloc_cache(cachep))
1625 return;
1626
1627 realobj = (char *)objp + obj_offset(cachep);
1628 size = cachep->object_size;
1629
1630 for (i = 0; i < size; i++) {
1631 char exp = POISON_FREE;
1632 if (i == size - 1)
1633 exp = POISON_END;
1634 if (realobj[i] != exp) {
1635 int limit;
1636
1637
1638 if (lines == 0) {
1639 pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1640 print_tainted(), cachep->name,
1641 realobj, size);
1642 print_objinfo(cachep, objp, 0);
1643 }
1644
1645 i = (i / 16) * 16;
1646 limit = 16;
1647 if (i + limit > size)
1648 limit = size - i;
1649 dump_line(realobj, i, limit);
1650 i += 16;
1651 lines++;
1652
1653 if (lines > 5)
1654 break;
1655 }
1656 }
1657 if (lines != 0) {
1658
1659
1660
1661 struct page *page = virt_to_head_page(objp);
1662 unsigned int objnr;
1663
1664 objnr = obj_to_index(cachep, page, objp);
1665 if (objnr) {
1666 objp = index_to_obj(cachep, page, objnr - 1);
1667 realobj = (char *)objp + obj_offset(cachep);
1668 pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
1669 print_objinfo(cachep, objp, 2);
1670 }
1671 if (objnr + 1 < cachep->num) {
1672 objp = index_to_obj(cachep, page, objnr + 1);
1673 realobj = (char *)objp + obj_offset(cachep);
1674 pr_err("Next obj: start=%p, len=%d\n", realobj, size);
1675 print_objinfo(cachep, objp, 2);
1676 }
1677 }
1678}
1679#endif
1680
1681#if DEBUG
1682static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1683 struct page *page)
1684{
1685 int i;
1686
1687 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) {
1688 poison_obj(cachep, page->freelist - obj_offset(cachep),
1689 POISON_FREE);
1690 }
1691
1692 for (i = 0; i < cachep->num; i++) {
1693 void *objp = index_to_obj(cachep, page, i);
1694
1695 if (cachep->flags & SLAB_POISON) {
1696 check_poison_obj(cachep, objp);
1697 slab_kernel_map(cachep, objp, 1, 0);
1698 }
1699 if (cachep->flags & SLAB_RED_ZONE) {
1700 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1701 slab_error(cachep, "start of a freed object was overwritten");
1702 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1703 slab_error(cachep, "end of a freed object was overwritten");
1704 }
1705 }
1706}
1707#else
1708static void slab_destroy_debugcheck(struct kmem_cache *cachep,
1709 struct page *page)
1710{
1711}
1712#endif
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723static void slab_destroy(struct kmem_cache *cachep, struct page *page)
1724{
1725 void *freelist;
1726
1727 freelist = page->freelist;
1728 slab_destroy_debugcheck(cachep, page);
1729 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
1730 call_rcu(&page->rcu_head, kmem_rcu_free);
1731 else
1732 kmem_freepages(cachep, page);
1733
1734
1735
1736
1737
1738 if (OFF_SLAB(cachep))
1739 kmem_cache_free(cachep->freelist_cache, freelist);
1740}
1741
1742static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
1743{
1744 struct page *page, *n;
1745
1746 list_for_each_entry_safe(page, n, list, lru) {
1747 list_del(&page->lru);
1748 slab_destroy(cachep, page);
1749 }
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static size_t calculate_slab_order(struct kmem_cache *cachep,
1765 size_t size, unsigned long flags)
1766{
1767 size_t left_over = 0;
1768 int gfporder;
1769
1770 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
1771 unsigned int num;
1772 size_t remainder;
1773
1774 num = cache_estimate(gfporder, size, flags, &remainder);
1775 if (!num)
1776 continue;
1777
1778
1779 if (num > SLAB_OBJ_MAX_NUM)
1780 break;
1781
1782 if (flags & CFLGS_OFF_SLAB) {
1783 struct kmem_cache *freelist_cache;
1784 size_t freelist_size;
1785
1786 freelist_size = num * sizeof(freelist_idx_t);
1787 freelist_cache = kmalloc_slab(freelist_size, 0u);
1788 if (!freelist_cache)
1789 continue;
1790
1791
1792
1793
1794
1795 if (OFF_SLAB(freelist_cache))
1796 continue;
1797
1798
1799 if (freelist_cache->size > cachep->size / 2)
1800 continue;
1801 }
1802
1803
1804 cachep->num = num;
1805 cachep->gfporder = gfporder;
1806 left_over = remainder;
1807
1808
1809
1810
1811
1812
1813 if (flags & SLAB_RECLAIM_ACCOUNT)
1814 break;
1815
1816
1817
1818
1819
1820 if (gfporder >= slab_max_order)
1821 break;
1822
1823
1824
1825
1826 if (left_over * 8 <= (PAGE_SIZE << gfporder))
1827 break;
1828 }
1829 return left_over;
1830}
1831
1832static struct array_cache __percpu *alloc_kmem_cache_cpus(
1833 struct kmem_cache *cachep, int entries, int batchcount)
1834{
1835 int cpu;
1836 size_t size;
1837 struct array_cache __percpu *cpu_cache;
1838
1839 size = sizeof(void *) * entries + sizeof(struct array_cache);
1840 cpu_cache = __alloc_percpu(size, sizeof(void *));
1841
1842 if (!cpu_cache)
1843 return NULL;
1844
1845 for_each_possible_cpu(cpu) {
1846 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
1847 entries, batchcount);
1848 }
1849
1850 return cpu_cache;
1851}
1852
1853static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
1854{
1855 if (slab_state >= FULL)
1856 return enable_cpucache(cachep, gfp);
1857
1858 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
1859 if (!cachep->cpu_cache)
1860 return 1;
1861
1862 if (slab_state == DOWN) {
1863
1864 set_up_node(kmem_cache, CACHE_CACHE);
1865 } else if (slab_state == PARTIAL) {
1866
1867 set_up_node(cachep, SIZE_NODE);
1868 } else {
1869 int node;
1870
1871 for_each_online_node(node) {
1872 cachep->node[node] = kmalloc_node(
1873 sizeof(struct kmem_cache_node), gfp, node);
1874 BUG_ON(!cachep->node[node]);
1875 kmem_cache_node_init(cachep->node[node]);
1876 }
1877 }
1878
1879 cachep->node[numa_mem_id()]->next_reap =
1880 jiffies + REAPTIMEOUT_NODE +
1881 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1882
1883 cpu_cache_get(cachep)->avail = 0;
1884 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1885 cpu_cache_get(cachep)->batchcount = 1;
1886 cpu_cache_get(cachep)->touched = 0;
1887 cachep->batchcount = 1;
1888 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1889 return 0;
1890}
1891
1892unsigned long kmem_cache_flags(unsigned long object_size,
1893 unsigned long flags, const char *name,
1894 void (*ctor)(void *))
1895{
1896 return flags;
1897}
1898
1899struct kmem_cache *
1900__kmem_cache_alias(const char *name, size_t size, size_t align,
1901 unsigned long flags, void (*ctor)(void *))
1902{
1903 struct kmem_cache *cachep;
1904
1905 cachep = find_mergeable(size, align, flags, name, ctor);
1906 if (cachep) {
1907 cachep->refcount++;
1908
1909
1910
1911
1912
1913 cachep->object_size = max_t(int, cachep->object_size, size);
1914 }
1915 return cachep;
1916}
1917
1918static bool set_objfreelist_slab_cache(struct kmem_cache *cachep,
1919 size_t size, unsigned long flags)
1920{
1921 size_t left;
1922
1923 cachep->num = 0;
1924
1925 if (cachep->ctor || flags & SLAB_DESTROY_BY_RCU)
1926 return false;
1927
1928 left = calculate_slab_order(cachep, size,
1929 flags | CFLGS_OBJFREELIST_SLAB);
1930 if (!cachep->num)
1931 return false;
1932
1933 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size)
1934 return false;
1935
1936 cachep->colour = left / cachep->colour_off;
1937
1938 return true;
1939}
1940
1941static bool set_off_slab_cache(struct kmem_cache *cachep,
1942 size_t size, unsigned long flags)
1943{
1944 size_t left;
1945
1946 cachep->num = 0;
1947
1948
1949
1950
1951
1952 if (flags & SLAB_NOLEAKTRACE)
1953 return false;
1954
1955
1956
1957
1958
1959 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB);
1960 if (!cachep->num)
1961 return false;
1962
1963
1964
1965
1966
1967 if (left >= cachep->num * sizeof(freelist_idx_t))
1968 return false;
1969
1970 cachep->colour = left / cachep->colour_off;
1971
1972 return true;
1973}
1974
1975static bool set_on_slab_cache(struct kmem_cache *cachep,
1976 size_t size, unsigned long flags)
1977{
1978 size_t left;
1979
1980 cachep->num = 0;
1981
1982 left = calculate_slab_order(cachep, size, flags);
1983 if (!cachep->num)
1984 return false;
1985
1986 cachep->colour = left / cachep->colour_off;
1987
1988 return true;
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012int
2013__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2014{
2015 size_t ralign = BYTES_PER_WORD;
2016 gfp_t gfp;
2017 int err;
2018 size_t size = cachep->size;
2019
2020#if DEBUG
2021#if FORCED_DEBUG
2022
2023
2024
2025
2026
2027
2028 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2029 2 * sizeof(unsigned long long)))
2030 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2031 if (!(flags & SLAB_DESTROY_BY_RCU))
2032 flags |= SLAB_POISON;
2033#endif
2034#endif
2035
2036
2037
2038
2039
2040
2041 if (size & (BYTES_PER_WORD - 1)) {
2042 size += (BYTES_PER_WORD - 1);
2043 size &= ~(BYTES_PER_WORD - 1);
2044 }
2045
2046 if (flags & SLAB_RED_ZONE) {
2047 ralign = REDZONE_ALIGN;
2048
2049
2050 size += REDZONE_ALIGN - 1;
2051 size &= ~(REDZONE_ALIGN - 1);
2052 }
2053
2054
2055 if (ralign < cachep->align) {
2056 ralign = cachep->align;
2057 }
2058
2059 if (ralign > __alignof__(unsigned long long))
2060 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2061
2062
2063
2064 cachep->align = ralign;
2065 cachep->colour_off = cache_line_size();
2066
2067 if (cachep->colour_off < cachep->align)
2068 cachep->colour_off = cachep->align;
2069
2070 if (slab_is_available())
2071 gfp = GFP_KERNEL;
2072 else
2073 gfp = GFP_NOWAIT;
2074
2075#if DEBUG
2076
2077
2078
2079
2080
2081 if (flags & SLAB_RED_ZONE) {
2082
2083 cachep->obj_offset += sizeof(unsigned long long);
2084 size += 2 * sizeof(unsigned long long);
2085 }
2086 if (flags & SLAB_STORE_USER) {
2087
2088
2089
2090
2091 if (flags & SLAB_RED_ZONE)
2092 size += REDZONE_ALIGN;
2093 else
2094 size += BYTES_PER_WORD;
2095 }
2096#endif
2097
2098 kasan_cache_create(cachep, &size, &flags);
2099
2100 size = ALIGN(size, cachep->align);
2101
2102
2103
2104
2105 if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
2106 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
2107
2108#if DEBUG
2109
2110
2111
2112
2113
2114
2115
2116 if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
2117 size >= 256 && cachep->object_size > cache_line_size()) {
2118 if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
2119 size_t tmp_size = ALIGN(size, PAGE_SIZE);
2120
2121 if (set_off_slab_cache(cachep, tmp_size, flags)) {
2122 flags |= CFLGS_OFF_SLAB;
2123 cachep->obj_offset += tmp_size - size;
2124 size = tmp_size;
2125 goto done;
2126 }
2127 }
2128 }
2129#endif
2130
2131 if (set_objfreelist_slab_cache(cachep, size, flags)) {
2132 flags |= CFLGS_OBJFREELIST_SLAB;
2133 goto done;
2134 }
2135
2136 if (set_off_slab_cache(cachep, size, flags)) {
2137 flags |= CFLGS_OFF_SLAB;
2138 goto done;
2139 }
2140
2141 if (set_on_slab_cache(cachep, size, flags))
2142 goto done;
2143
2144 return -E2BIG;
2145
2146done:
2147 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
2148 cachep->flags = flags;
2149 cachep->allocflags = __GFP_COMP;
2150 if (flags & SLAB_CACHE_DMA)
2151 cachep->allocflags |= GFP_DMA;
2152 cachep->size = size;
2153 cachep->reciprocal_buffer_size = reciprocal_value(size);
2154
2155#if DEBUG
2156
2157
2158
2159
2160
2161 if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
2162 (cachep->flags & SLAB_POISON) &&
2163 is_debug_pagealloc_cache(cachep))
2164 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2165#endif
2166
2167 if (OFF_SLAB(cachep)) {
2168 cachep->freelist_cache =
2169 kmalloc_slab(cachep->freelist_size, 0u);
2170 }
2171
2172 err = setup_cpu_cache(cachep, gfp);
2173 if (err) {
2174 __kmem_cache_release(cachep);
2175 return err;
2176 }
2177
2178 return 0;
2179}
2180
2181#if DEBUG
2182static void check_irq_off(void)
2183{
2184 BUG_ON(!irqs_disabled());
2185}
2186
2187static void check_irq_on(void)
2188{
2189 BUG_ON(irqs_disabled());
2190}
2191
2192static void check_mutex_acquired(void)
2193{
2194 BUG_ON(!mutex_is_locked(&slab_mutex));
2195}
2196
2197static void check_spinlock_acquired(struct kmem_cache *cachep)
2198{
2199#ifdef CONFIG_SMP
2200 check_irq_off();
2201 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2202#endif
2203}
2204
2205static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2206{
2207#ifdef CONFIG_SMP
2208 check_irq_off();
2209 assert_spin_locked(&get_node(cachep, node)->list_lock);
2210#endif
2211}
2212
2213#else
2214#define check_irq_off() do { } while(0)
2215#define check_irq_on() do { } while(0)
2216#define check_mutex_acquired() do { } while(0)
2217#define check_spinlock_acquired(x) do { } while(0)
2218#define check_spinlock_acquired_node(x, y) do { } while(0)
2219#endif
2220
2221static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
2222 int node, bool free_all, struct list_head *list)
2223{
2224 int tofree;
2225
2226 if (!ac || !ac->avail)
2227 return;
2228
2229 tofree = free_all ? ac->avail : (ac->limit + 4) / 5;
2230 if (tofree > ac->avail)
2231 tofree = (ac->avail + 1) / 2;
2232
2233 free_block(cachep, ac->entry, tofree, node, list);
2234 ac->avail -= tofree;
2235 memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail);
2236}
2237
2238static void do_drain(void *arg)
2239{
2240 struct kmem_cache *cachep = arg;
2241 struct array_cache *ac;
2242 int node = numa_mem_id();
2243 struct kmem_cache_node *n;
2244 LIST_HEAD(list);
2245
2246 check_irq_off();
2247 ac = cpu_cache_get(cachep);
2248 n = get_node(cachep, node);
2249 spin_lock(&n->list_lock);
2250 free_block(cachep, ac->entry, ac->avail, node, &list);
2251 spin_unlock(&n->list_lock);
2252 slabs_destroy(cachep, &list);
2253 ac->avail = 0;
2254}
2255
2256static void drain_cpu_caches(struct kmem_cache *cachep)
2257{
2258 struct kmem_cache_node *n;
2259 int node;
2260 LIST_HEAD(list);
2261
2262 on_each_cpu(do_drain, cachep, 1);
2263 check_irq_on();
2264 for_each_kmem_cache_node(cachep, node, n)
2265 if (n->alien)
2266 drain_alien_cache(cachep, n->alien);
2267
2268 for_each_kmem_cache_node(cachep, node, n) {
2269 spin_lock_irq(&n->list_lock);
2270 drain_array_locked(cachep, n->shared, node, true, &list);
2271 spin_unlock_irq(&n->list_lock);
2272
2273 slabs_destroy(cachep, &list);
2274 }
2275}
2276
2277
2278
2279
2280
2281
2282
2283static int drain_freelist(struct kmem_cache *cache,
2284 struct kmem_cache_node *n, int tofree)
2285{
2286 struct list_head *p;
2287 int nr_freed;
2288 struct page *page;
2289
2290 nr_freed = 0;
2291 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
2292
2293 spin_lock_irq(&n->list_lock);
2294 p = n->slabs_free.prev;
2295 if (p == &n->slabs_free) {
2296 spin_unlock_irq(&n->list_lock);
2297 goto out;
2298 }
2299
2300 page = list_entry(p, struct page, lru);
2301 list_del(&page->lru);
2302 n->free_slabs--;
2303 n->total_slabs--;
2304
2305
2306
2307
2308 n->free_objects -= cache->num;
2309 spin_unlock_irq(&n->list_lock);
2310 slab_destroy(cache, page);
2311 nr_freed++;
2312 }
2313out:
2314 return nr_freed;
2315}
2316
2317int __kmem_cache_shrink(struct kmem_cache *cachep)
2318{
2319 int ret = 0;
2320 int node;
2321 struct kmem_cache_node *n;
2322
2323 drain_cpu_caches(cachep);
2324
2325 check_irq_on();
2326 for_each_kmem_cache_node(cachep, node, n) {
2327 drain_freelist(cachep, n, INT_MAX);
2328
2329 ret += !list_empty(&n->slabs_full) ||
2330 !list_empty(&n->slabs_partial);
2331 }
2332 return (ret ? 1 : 0);
2333}
2334
2335int __kmem_cache_shutdown(struct kmem_cache *cachep)
2336{
2337 return __kmem_cache_shrink(cachep);
2338}
2339
2340void __kmem_cache_release(struct kmem_cache *cachep)
2341{
2342 int i;
2343 struct kmem_cache_node *n;
2344
2345 cache_random_seq_destroy(cachep);
2346
2347 free_percpu(cachep->cpu_cache);
2348
2349
2350 for_each_kmem_cache_node(cachep, i, n) {
2351 kfree(n->shared);
2352 free_alien_cache(n->alien);
2353 kfree(n);
2354 cachep->node[i] = NULL;
2355 }
2356}
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372static void *alloc_slabmgmt(struct kmem_cache *cachep,
2373 struct page *page, int colour_off,
2374 gfp_t local_flags, int nodeid)
2375{
2376 void *freelist;
2377 void *addr = page_address(page);
2378
2379 page->s_mem = addr + colour_off;
2380 page->active = 0;
2381
2382 if (OBJFREELIST_SLAB(cachep))
2383 freelist = NULL;
2384 else if (OFF_SLAB(cachep)) {
2385
2386 freelist = kmem_cache_alloc_node(cachep->freelist_cache,
2387 local_flags, nodeid);
2388 if (!freelist)
2389 return NULL;
2390 } else {
2391
2392 freelist = addr + (PAGE_SIZE << cachep->gfporder) -
2393 cachep->freelist_size;
2394 }
2395
2396 return freelist;
2397}
2398
2399static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
2400{
2401 return ((freelist_idx_t *)page->freelist)[idx];
2402}
2403
2404static inline void set_free_obj(struct page *page,
2405 unsigned int idx, freelist_idx_t val)
2406{
2407 ((freelist_idx_t *)(page->freelist))[idx] = val;
2408}
2409
2410static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
2411{
2412#if DEBUG
2413 int i;
2414
2415 for (i = 0; i < cachep->num; i++) {
2416 void *objp = index_to_obj(cachep, page, i);
2417
2418 if (cachep->flags & SLAB_STORE_USER)
2419 *dbg_userword(cachep, objp) = NULL;
2420
2421 if (cachep->flags & SLAB_RED_ZONE) {
2422 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2423 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2424 }
2425
2426
2427
2428
2429
2430 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
2431 kasan_unpoison_object_data(cachep,
2432 objp + obj_offset(cachep));
2433 cachep->ctor(objp + obj_offset(cachep));
2434 kasan_poison_object_data(
2435 cachep, objp + obj_offset(cachep));
2436 }
2437
2438 if (cachep->flags & SLAB_RED_ZONE) {
2439 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2440 slab_error(cachep, "constructor overwrote the end of an object");
2441 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2442 slab_error(cachep, "constructor overwrote the start of an object");
2443 }
2444
2445 if (cachep->flags & SLAB_POISON) {
2446 poison_obj(cachep, objp, POISON_FREE);
2447 slab_kernel_map(cachep, objp, 0, 0);
2448 }
2449 }
2450#endif
2451}
2452
2453#ifdef CONFIG_SLAB_FREELIST_RANDOM
2454
2455union freelist_init_state {
2456 struct {
2457 unsigned int pos;
2458 unsigned int *list;
2459 unsigned int count;
2460 };
2461 struct rnd_state rnd_state;
2462};
2463
2464
2465
2466
2467
2468static bool freelist_state_initialize(union freelist_init_state *state,
2469 struct kmem_cache *cachep,
2470 unsigned int count)
2471{
2472 bool ret;
2473 unsigned int rand;
2474
2475
2476 rand = get_random_int();
2477
2478
2479 if (!cachep->random_seq) {
2480 prandom_seed_state(&state->rnd_state, rand);
2481 ret = false;
2482 } else {
2483 state->list = cachep->random_seq;
2484 state->count = count;
2485 state->pos = rand % count;
2486 ret = true;
2487 }
2488 return ret;
2489}
2490
2491
2492static freelist_idx_t next_random_slot(union freelist_init_state *state)
2493{
2494 if (state->pos >= state->count)
2495 state->pos = 0;
2496 return state->list[state->pos++];
2497}
2498
2499
2500static void swap_free_obj(struct page *page, unsigned int a, unsigned int b)
2501{
2502 swap(((freelist_idx_t *)page->freelist)[a],
2503 ((freelist_idx_t *)page->freelist)[b]);
2504}
2505
2506
2507
2508
2509
2510static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
2511{
2512 unsigned int objfreelist = 0, i, rand, count = cachep->num;
2513 union freelist_init_state state;
2514 bool precomputed;
2515
2516 if (count < 2)
2517 return false;
2518
2519 precomputed = freelist_state_initialize(&state, cachep, count);
2520
2521
2522 if (OBJFREELIST_SLAB(cachep)) {
2523 if (!precomputed)
2524 objfreelist = count - 1;
2525 else
2526 objfreelist = next_random_slot(&state);
2527 page->freelist = index_to_obj(cachep, page, objfreelist) +
2528 obj_offset(cachep);
2529 count--;
2530 }
2531
2532
2533
2534
2535
2536 if (!precomputed) {
2537 for (i = 0; i < count; i++)
2538 set_free_obj(page, i, i);
2539
2540
2541 for (i = count - 1; i > 0; i--) {
2542 rand = prandom_u32_state(&state.rnd_state);
2543 rand %= (i + 1);
2544 swap_free_obj(page, i, rand);
2545 }
2546 } else {
2547 for (i = 0; i < count; i++)
2548 set_free_obj(page, i, next_random_slot(&state));
2549 }
2550
2551 if (OBJFREELIST_SLAB(cachep))
2552 set_free_obj(page, cachep->num - 1, objfreelist);
2553
2554 return true;
2555}
2556#else
2557static inline bool shuffle_freelist(struct kmem_cache *cachep,
2558 struct page *page)
2559{
2560 return false;
2561}
2562#endif
2563
2564static void cache_init_objs(struct kmem_cache *cachep,
2565 struct page *page)
2566{
2567 int i;
2568 void *objp;
2569 bool shuffled;
2570
2571 cache_init_objs_debug(cachep, page);
2572
2573
2574 shuffled = shuffle_freelist(cachep, page);
2575
2576 if (!shuffled && OBJFREELIST_SLAB(cachep)) {
2577 page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
2578 obj_offset(cachep);
2579 }
2580
2581 for (i = 0; i < cachep->num; i++) {
2582 objp = index_to_obj(cachep, page, i);
2583 kasan_init_slab_obj(cachep, objp);
2584
2585
2586 if (DEBUG == 0 && cachep->ctor) {
2587 kasan_unpoison_object_data(cachep, objp);
2588 cachep->ctor(objp);
2589 kasan_poison_object_data(cachep, objp);
2590 }
2591
2592 if (!shuffled)
2593 set_free_obj(page, i, i);
2594 }
2595}
2596
2597static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
2598{
2599 void *objp;
2600
2601 objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
2602 page->active++;
2603
2604#if DEBUG
2605 if (cachep->flags & SLAB_STORE_USER)
2606 set_store_user_dirty(cachep);
2607#endif
2608
2609 return objp;
2610}
2611
2612static void slab_put_obj(struct kmem_cache *cachep,
2613 struct page *page, void *objp)
2614{
2615 unsigned int objnr = obj_to_index(cachep, page, objp);
2616#if DEBUG
2617 unsigned int i;
2618
2619
2620 for (i = page->active; i < cachep->num; i++) {
2621 if (get_free_obj(page, i) == objnr) {
2622 pr_err("slab: double free detected in cache '%s', objp %p\n",
2623 cachep->name, objp);
2624 BUG();
2625 }
2626 }
2627#endif
2628 page->active--;
2629 if (!page->freelist)
2630 page->freelist = objp + obj_offset(cachep);
2631
2632 set_free_obj(page, page->active, objnr);
2633}
2634
2635
2636
2637
2638
2639
2640static void slab_map_pages(struct kmem_cache *cache, struct page *page,
2641 void *freelist)
2642{
2643 page->slab_cache = cache;
2644 page->freelist = freelist;
2645}
2646
2647
2648
2649
2650
2651static struct page *cache_grow_begin(struct kmem_cache *cachep,
2652 gfp_t flags, int nodeid)
2653{
2654 void *freelist;
2655 size_t offset;
2656 gfp_t local_flags;
2657 int page_node;
2658 struct kmem_cache_node *n;
2659 struct page *page;
2660
2661
2662
2663
2664
2665 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
2666 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
2667 flags &= ~GFP_SLAB_BUG_MASK;
2668 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
2669 invalid_mask, &invalid_mask, flags, &flags);
2670 dump_stack();
2671 }
2672 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2673
2674 check_irq_off();
2675 if (gfpflags_allow_blocking(local_flags))
2676 local_irq_enable();
2677
2678
2679
2680
2681
2682 page = kmem_getpages(cachep, local_flags, nodeid);
2683 if (!page)
2684 goto failed;
2685
2686 page_node = page_to_nid(page);
2687 n = get_node(cachep, page_node);
2688
2689
2690 n->colour_next++;
2691 if (n->colour_next >= cachep->colour)
2692 n->colour_next = 0;
2693
2694 offset = n->colour_next;
2695 if (offset >= cachep->colour)
2696 offset = 0;
2697
2698 offset *= cachep->colour_off;
2699
2700
2701 freelist = alloc_slabmgmt(cachep, page, offset,
2702 local_flags & ~GFP_CONSTRAINT_MASK, page_node);
2703 if (OFF_SLAB(cachep) && !freelist)
2704 goto opps1;
2705
2706 slab_map_pages(cachep, page, freelist);
2707
2708 kasan_poison_slab(page);
2709 cache_init_objs(cachep, page);
2710
2711 if (gfpflags_allow_blocking(local_flags))
2712 local_irq_disable();
2713
2714 return page;
2715
2716opps1:
2717 kmem_freepages(cachep, page);
2718failed:
2719 if (gfpflags_allow_blocking(local_flags))
2720 local_irq_disable();
2721 return NULL;
2722}
2723
2724static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
2725{
2726 struct kmem_cache_node *n;
2727 void *list = NULL;
2728
2729 check_irq_off();
2730
2731 if (!page)
2732 return;
2733
2734 INIT_LIST_HEAD(&page->lru);
2735 n = get_node(cachep, page_to_nid(page));
2736
2737 spin_lock(&n->list_lock);
2738 n->total_slabs++;
2739 if (!page->active) {
2740 list_add_tail(&page->lru, &(n->slabs_free));
2741 n->free_slabs++;
2742 } else
2743 fixup_slab_list(cachep, n, page, &list);
2744
2745 STATS_INC_GROWN(cachep);
2746 n->free_objects += cachep->num - page->active;
2747 spin_unlock(&n->list_lock);
2748
2749 fixup_objfreelist_debug(cachep, &list);
2750}
2751
2752#if DEBUG
2753
2754
2755
2756
2757
2758
2759static void kfree_debugcheck(const void *objp)
2760{
2761 if (!virt_addr_valid(objp)) {
2762 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2763 (unsigned long)objp);
2764 BUG();
2765 }
2766}
2767
2768static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2769{
2770 unsigned long long redzone1, redzone2;
2771
2772 redzone1 = *dbg_redzone1(cache, obj);
2773 redzone2 = *dbg_redzone2(cache, obj);
2774
2775
2776
2777
2778 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2779 return;
2780
2781 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2782 slab_error(cache, "double free detected");
2783 else
2784 slab_error(cache, "memory outside object was overwritten");
2785
2786 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2787 obj, redzone1, redzone2);
2788}
2789
2790static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2791 unsigned long caller)
2792{
2793 unsigned int objnr;
2794 struct page *page;
2795
2796 BUG_ON(virt_to_cache(objp) != cachep);
2797
2798 objp -= obj_offset(cachep);
2799 kfree_debugcheck(objp);
2800 page = virt_to_head_page(objp);
2801
2802 if (cachep->flags & SLAB_RED_ZONE) {
2803 verify_redzone_free(cachep, objp);
2804 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2805 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2806 }
2807 if (cachep->flags & SLAB_STORE_USER) {
2808 set_store_user_dirty(cachep);
2809 *dbg_userword(cachep, objp) = (void *)caller;
2810 }
2811
2812 objnr = obj_to_index(cachep, page, objp);
2813
2814 BUG_ON(objnr >= cachep->num);
2815 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2816
2817 if (cachep->flags & SLAB_POISON) {
2818 poison_obj(cachep, objp, POISON_FREE);
2819 slab_kernel_map(cachep, objp, 0, caller);
2820 }
2821 return objp;
2822}
2823
2824#else
2825#define kfree_debugcheck(x) do { } while(0)
2826#define cache_free_debugcheck(x,objp,z) (objp)
2827#endif
2828
2829static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
2830 void **list)
2831{
2832#if DEBUG
2833 void *next = *list;
2834 void *objp;
2835
2836 while (next) {
2837 objp = next - obj_offset(cachep);
2838 next = *(void **)next;
2839 poison_obj(cachep, objp, POISON_FREE);
2840 }
2841#endif
2842}
2843
2844static inline void fixup_slab_list(struct kmem_cache *cachep,
2845 struct kmem_cache_node *n, struct page *page,
2846 void **list)
2847{
2848
2849 list_del(&page->lru);
2850 if (page->active == cachep->num) {
2851 list_add(&page->lru, &n->slabs_full);
2852 if (OBJFREELIST_SLAB(cachep)) {
2853#if DEBUG
2854
2855 if (cachep->flags & SLAB_POISON) {
2856 void **objp = page->freelist;
2857
2858 *objp = *list;
2859 *list = objp;
2860 }
2861#endif
2862 page->freelist = NULL;
2863 }
2864 } else
2865 list_add(&page->lru, &n->slabs_partial);
2866}
2867
2868
2869static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
2870 struct page *page, bool pfmemalloc)
2871{
2872 if (!page)
2873 return NULL;
2874
2875 if (pfmemalloc)
2876 return page;
2877
2878 if (!PageSlabPfmemalloc(page))
2879 return page;
2880
2881
2882 if (n->free_objects > n->free_limit) {
2883 ClearPageSlabPfmemalloc(page);
2884 return page;
2885 }
2886
2887
2888 list_del(&page->lru);
2889 if (!page->active) {
2890 list_add_tail(&page->lru, &n->slabs_free);
2891 n->free_slabs++;
2892 } else
2893 list_add_tail(&page->lru, &n->slabs_partial);
2894
2895 list_for_each_entry(page, &n->slabs_partial, lru) {
2896 if (!PageSlabPfmemalloc(page))
2897 return page;
2898 }
2899
2900 n->free_touched = 1;
2901 list_for_each_entry(page, &n->slabs_free, lru) {
2902 if (!PageSlabPfmemalloc(page)) {
2903 n->free_slabs--;
2904 return page;
2905 }
2906 }
2907
2908 return NULL;
2909}
2910
2911static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
2912{
2913 struct page *page;
2914
2915 assert_spin_locked(&n->list_lock);
2916 page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
2917 if (!page) {
2918 n->free_touched = 1;
2919 page = list_first_entry_or_null(&n->slabs_free, struct page,
2920 lru);
2921 if (page)
2922 n->free_slabs--;
2923 }
2924
2925 if (sk_memalloc_socks())
2926 page = get_valid_first_slab(n, page, pfmemalloc);
2927
2928 return page;
2929}
2930
2931static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
2932 struct kmem_cache_node *n, gfp_t flags)
2933{
2934 struct page *page;
2935 void *obj;
2936 void *list = NULL;
2937
2938 if (!gfp_pfmemalloc_allowed(flags))
2939 return NULL;
2940
2941 spin_lock(&n->list_lock);
2942 page = get_first_slab(n, true);
2943 if (!page) {
2944 spin_unlock(&n->list_lock);
2945 return NULL;
2946 }
2947
2948 obj = slab_get_obj(cachep, page);
2949 n->free_objects--;
2950
2951 fixup_slab_list(cachep, n, page, &list);
2952
2953 spin_unlock(&n->list_lock);
2954 fixup_objfreelist_debug(cachep, &list);
2955
2956 return obj;
2957}
2958
2959
2960
2961
2962
2963static __always_inline int alloc_block(struct kmem_cache *cachep,
2964 struct array_cache *ac, struct page *page, int batchcount)
2965{
2966
2967
2968
2969
2970 BUG_ON(page->active >= cachep->num);
2971
2972 while (page->active < cachep->num && batchcount--) {
2973 STATS_INC_ALLOCED(cachep);
2974 STATS_INC_ACTIVE(cachep);
2975 STATS_SET_HIGH(cachep);
2976
2977 ac->entry[ac->avail++] = slab_get_obj(cachep, page);
2978 }
2979
2980 return batchcount;
2981}
2982
2983static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2984{
2985 int batchcount;
2986 struct kmem_cache_node *n;
2987 struct array_cache *ac, *shared;
2988 int node;
2989 void *list = NULL;
2990 struct page *page;
2991
2992 check_irq_off();
2993 node = numa_mem_id();
2994
2995 ac = cpu_cache_get(cachep);
2996 batchcount = ac->batchcount;
2997 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2998
2999
3000
3001
3002
3003 batchcount = BATCHREFILL_LIMIT;
3004 }
3005 n = get_node(cachep, node);
3006
3007 BUG_ON(ac->avail > 0 || !n);
3008 shared = READ_ONCE(n->shared);
3009 if (!n->free_objects && (!shared || !shared->avail))
3010 goto direct_grow;
3011
3012 spin_lock(&n->list_lock);
3013 shared = READ_ONCE(n->shared);
3014
3015
3016 if (shared && transfer_objects(ac, shared, batchcount)) {
3017 shared->touched = 1;
3018 goto alloc_done;
3019 }
3020
3021 while (batchcount > 0) {
3022
3023 page = get_first_slab(n, false);
3024 if (!page)
3025 goto must_grow;
3026
3027 check_spinlock_acquired(cachep);
3028
3029 batchcount = alloc_block(cachep, ac, page, batchcount);
3030 fixup_slab_list(cachep, n, page, &list);
3031 }
3032
3033must_grow:
3034 n->free_objects -= ac->avail;
3035alloc_done:
3036 spin_unlock(&n->list_lock);
3037 fixup_objfreelist_debug(cachep, &list);
3038
3039direct_grow:
3040 if (unlikely(!ac->avail)) {
3041
3042 if (sk_memalloc_socks()) {
3043 void *obj = cache_alloc_pfmemalloc(cachep, n, flags);
3044
3045 if (obj)
3046 return obj;
3047 }
3048
3049 page = cache_grow_begin(cachep, gfp_exact_node(flags), node);
3050
3051
3052
3053
3054
3055 ac = cpu_cache_get(cachep);
3056 if (!ac->avail && page)
3057 alloc_block(cachep, ac, page, batchcount);
3058 cache_grow_end(cachep, page);
3059
3060 if (!ac->avail)
3061 return NULL;
3062 }
3063 ac->touched = 1;
3064
3065 return ac->entry[--ac->avail];
3066}
3067
3068static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3069 gfp_t flags)
3070{
3071 might_sleep_if(gfpflags_allow_blocking(flags));
3072}
3073
3074#if DEBUG
3075static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3076 gfp_t flags, void *objp, unsigned long caller)
3077{
3078 if (!objp)
3079 return objp;
3080 if (cachep->flags & SLAB_POISON) {
3081 check_poison_obj(cachep, objp);
3082 slab_kernel_map(cachep, objp, 1, 0);
3083 poison_obj(cachep, objp, POISON_INUSE);
3084 }
3085 if (cachep->flags & SLAB_STORE_USER)
3086 *dbg_userword(cachep, objp) = (void *)caller;
3087
3088 if (cachep->flags & SLAB_RED_ZONE) {
3089 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3090 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3091 slab_error(cachep, "double free, or memory outside object was overwritten");
3092 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3093 objp, *dbg_redzone1(cachep, objp),
3094 *dbg_redzone2(cachep, objp));
3095 }
3096 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3097 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3098 }
3099
3100 objp += obj_offset(cachep);
3101 if (cachep->ctor && cachep->flags & SLAB_POISON)
3102 cachep->ctor(objp);
3103 if (ARCH_SLAB_MINALIGN &&
3104 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3105 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3106 objp, (int)ARCH_SLAB_MINALIGN);
3107 }
3108 return objp;
3109}
3110#else
3111#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3112#endif
3113
3114static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3115{
3116 void *objp;
3117 struct array_cache *ac;
3118
3119 check_irq_off();
3120
3121 ac = cpu_cache_get(cachep);
3122 if (likely(ac->avail)) {
3123 ac->touched = 1;
3124 objp = ac->entry[--ac->avail];
3125
3126 STATS_INC_ALLOCHIT(cachep);
3127 goto out;
3128 }
3129
3130 STATS_INC_ALLOCMISS(cachep);
3131 objp = cache_alloc_refill(cachep, flags);
3132
3133
3134
3135
3136 ac = cpu_cache_get(cachep);
3137
3138out:
3139
3140
3141
3142
3143
3144 if (objp)
3145 kmemleak_erase(&ac->entry[ac->avail]);
3146 return objp;
3147}
3148
3149#ifdef CONFIG_NUMA
3150
3151
3152
3153
3154
3155
3156static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3157{
3158 int nid_alloc, nid_here;
3159
3160 if (in_interrupt() || (flags & __GFP_THISNODE))
3161 return NULL;
3162 nid_alloc = nid_here = numa_mem_id();
3163 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3164 nid_alloc = cpuset_slab_spread_node();
3165 else if (current->mempolicy)
3166 nid_alloc = mempolicy_slab_node();
3167 if (nid_alloc != nid_here)
3168 return ____cache_alloc_node(cachep, flags, nid_alloc);
3169 return NULL;
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3181{
3182 struct zonelist *zonelist;
3183 struct zoneref *z;
3184 struct zone *zone;
3185 enum zone_type high_zoneidx = gfp_zone(flags);
3186 void *obj = NULL;
3187 struct page *page;
3188 int nid;
3189 unsigned int cpuset_mems_cookie;
3190
3191 if (flags & __GFP_THISNODE)
3192 return NULL;
3193
3194retry_cpuset:
3195 cpuset_mems_cookie = read_mems_allowed_begin();
3196 zonelist = node_zonelist(mempolicy_slab_node(), flags);
3197
3198retry:
3199
3200
3201
3202
3203 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3204 nid = zone_to_nid(zone);
3205
3206 if (cpuset_zone_allowed(zone, flags) &&
3207 get_node(cache, nid) &&
3208 get_node(cache, nid)->free_objects) {
3209 obj = ____cache_alloc_node(cache,
3210 gfp_exact_node(flags), nid);
3211 if (obj)
3212 break;
3213 }
3214 }
3215
3216 if (!obj) {
3217
3218
3219
3220
3221
3222
3223 page = cache_grow_begin(cache, flags, numa_mem_id());
3224 cache_grow_end(cache, page);
3225 if (page) {
3226 nid = page_to_nid(page);
3227 obj = ____cache_alloc_node(cache,
3228 gfp_exact_node(flags), nid);
3229
3230
3231
3232
3233
3234 if (!obj)
3235 goto retry;
3236 }
3237 }
3238
3239 if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie)))
3240 goto retry_cpuset;
3241 return obj;
3242}
3243
3244
3245
3246
3247static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3248 int nodeid)
3249{
3250 struct page *page;
3251 struct kmem_cache_node *n;
3252 void *obj = NULL;
3253 void *list = NULL;
3254
3255 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3256 n = get_node(cachep, nodeid);
3257 BUG_ON(!n);
3258
3259 check_irq_off();
3260 spin_lock(&n->list_lock);
3261 page = get_first_slab(n, false);
3262 if (!page)
3263 goto must_grow;
3264
3265 check_spinlock_acquired_node(cachep, nodeid);
3266
3267 STATS_INC_NODEALLOCS(cachep);
3268 STATS_INC_ACTIVE(cachep);
3269 STATS_SET_HIGH(cachep);
3270
3271 BUG_ON(page->active == cachep->num);
3272
3273 obj = slab_get_obj(cachep, page);
3274 n->free_objects--;
3275
3276 fixup_slab_list(cachep, n, page, &list);
3277
3278 spin_unlock(&n->list_lock);
3279 fixup_objfreelist_debug(cachep, &list);
3280 return obj;
3281
3282must_grow:
3283 spin_unlock(&n->list_lock);
3284 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
3285 if (page) {
3286
3287 obj = slab_get_obj(cachep, page);
3288 }
3289 cache_grow_end(cachep, page);
3290
3291 return obj ? obj : fallback_alloc(cachep, flags);
3292}
3293
3294static __always_inline void *
3295slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3296 unsigned long caller)
3297{
3298 unsigned long save_flags;
3299 void *ptr;
3300 int slab_node = numa_mem_id();
3301
3302 flags &= gfp_allowed_mask;
3303 cachep = slab_pre_alloc_hook(cachep, flags);
3304 if (unlikely(!cachep))
3305 return NULL;
3306
3307 cache_alloc_debugcheck_before(cachep, flags);
3308 local_irq_save(save_flags);
3309
3310 if (nodeid == NUMA_NO_NODE)
3311 nodeid = slab_node;
3312
3313 if (unlikely(!get_node(cachep, nodeid))) {
3314
3315 ptr = fallback_alloc(cachep, flags);
3316 goto out;
3317 }
3318
3319 if (nodeid == slab_node) {
3320
3321
3322
3323
3324
3325
3326 ptr = ____cache_alloc(cachep, flags);
3327 if (ptr)
3328 goto out;
3329 }
3330
3331 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3332 out:
3333 local_irq_restore(save_flags);
3334 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3335
3336 if (unlikely(flags & __GFP_ZERO) && ptr)
3337 memset(ptr, 0, cachep->object_size);
3338
3339 slab_post_alloc_hook(cachep, flags, 1, &ptr);
3340 return ptr;
3341}
3342
3343static __always_inline void *
3344__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3345{
3346 void *objp;
3347
3348 if (current->mempolicy || cpuset_do_slab_mem_spread()) {
3349 objp = alternate_node_alloc(cache, flags);
3350 if (objp)
3351 goto out;
3352 }
3353 objp = ____cache_alloc(cache, flags);
3354
3355
3356
3357
3358
3359 if (!objp)
3360 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3361
3362 out:
3363 return objp;
3364}
3365#else
3366
3367static __always_inline void *
3368__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3369{
3370 return ____cache_alloc(cachep, flags);
3371}
3372
3373#endif
3374
3375static __always_inline void *
3376slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
3377{
3378 unsigned long save_flags;
3379 void *objp;
3380
3381 flags &= gfp_allowed_mask;
3382 cachep = slab_pre_alloc_hook(cachep, flags);
3383 if (unlikely(!cachep))
3384 return NULL;
3385
3386 cache_alloc_debugcheck_before(cachep, flags);
3387 local_irq_save(save_flags);
3388 objp = __do_cache_alloc(cachep, flags);
3389 local_irq_restore(save_flags);
3390 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3391 prefetchw(objp);
3392
3393 if (unlikely(flags & __GFP_ZERO) && objp)
3394 memset(objp, 0, cachep->object_size);
3395
3396 slab_post_alloc_hook(cachep, flags, 1, &objp);
3397 return objp;
3398}
3399
3400
3401
3402
3403
3404static void free_block(struct kmem_cache *cachep, void **objpp,
3405 int nr_objects, int node, struct list_head *list)
3406{
3407 int i;
3408 struct kmem_cache_node *n = get_node(cachep, node);
3409 struct page *page;
3410
3411 n->free_objects += nr_objects;
3412
3413 for (i = 0; i < nr_objects; i++) {
3414 void *objp;
3415 struct page *page;
3416
3417 objp = objpp[i];
3418
3419 page = virt_to_head_page(objp);
3420 list_del(&page->lru);
3421 check_spinlock_acquired_node(cachep, node);
3422 slab_put_obj(cachep, page, objp);
3423 STATS_DEC_ACTIVE(cachep);
3424
3425
3426 if (page->active == 0) {
3427 list_add(&page->lru, &n->slabs_free);
3428 n->free_slabs++;
3429 } else {
3430
3431
3432
3433
3434 list_add_tail(&page->lru, &n->slabs_partial);
3435 }
3436 }
3437
3438 while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
3439 n->free_objects -= cachep->num;
3440
3441 page = list_last_entry(&n->slabs_free, struct page, lru);
3442 list_move(&page->lru, list);
3443 n->free_slabs--;
3444 n->total_slabs--;
3445 }
3446}
3447
3448static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3449{
3450 int batchcount;
3451 struct kmem_cache_node *n;
3452 int node = numa_mem_id();
3453 LIST_HEAD(list);
3454
3455 batchcount = ac->batchcount;
3456
3457 check_irq_off();
3458 n = get_node(cachep, node);
3459 spin_lock(&n->list_lock);
3460 if (n->shared) {
3461 struct array_cache *shared_array = n->shared;
3462 int max = shared_array->limit - shared_array->avail;
3463 if (max) {
3464 if (batchcount > max)
3465 batchcount = max;
3466 memcpy(&(shared_array->entry[shared_array->avail]),
3467 ac->entry, sizeof(void *) * batchcount);
3468 shared_array->avail += batchcount;
3469 goto free_done;
3470 }
3471 }
3472
3473 free_block(cachep, ac->entry, batchcount, node, &list);
3474free_done:
3475#if STATS
3476 {
3477 int i = 0;
3478 struct page *page;
3479
3480 list_for_each_entry(page, &n->slabs_free, lru) {
3481 BUG_ON(page->active);
3482
3483 i++;
3484 }
3485 STATS_SET_FREEABLE(cachep, i);
3486 }
3487#endif
3488 spin_unlock(&n->list_lock);
3489 slabs_destroy(cachep, &list);
3490 ac->avail -= batchcount;
3491 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3492}
3493
3494
3495
3496
3497
3498static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3499 unsigned long caller)
3500{
3501
3502 if (kasan_slab_free(cachep, objp))
3503 return;
3504
3505 ___cache_free(cachep, objp, caller);
3506}
3507
3508void ___cache_free(struct kmem_cache *cachep, void *objp,
3509 unsigned long caller)
3510{
3511 struct array_cache *ac = cpu_cache_get(cachep);
3512
3513 check_irq_off();
3514 kmemleak_free_recursive(objp, cachep->flags);
3515 objp = cache_free_debugcheck(cachep, objp, caller);
3516
3517 kmemcheck_slab_free(cachep, objp, cachep->object_size);
3518
3519
3520
3521
3522
3523
3524
3525
3526 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3527 return;
3528
3529 if (ac->avail < ac->limit) {
3530 STATS_INC_FREEHIT(cachep);
3531 } else {
3532 STATS_INC_FREEMISS(cachep);
3533 cache_flusharray(cachep, ac);
3534 }
3535
3536 if (sk_memalloc_socks()) {
3537 struct page *page = virt_to_head_page(objp);
3538
3539 if (unlikely(PageSlabPfmemalloc(page))) {
3540 cache_free_pfmemalloc(cachep, page, objp);
3541 return;
3542 }
3543 }
3544
3545 ac->entry[ac->avail++] = objp;
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3557{
3558 void *ret = slab_alloc(cachep, flags, _RET_IP_);
3559
3560 kasan_slab_alloc(cachep, ret, flags);
3561 trace_kmem_cache_alloc(_RET_IP_, ret,
3562 cachep->object_size, cachep->size, flags);
3563
3564 return ret;
3565}
3566EXPORT_SYMBOL(kmem_cache_alloc);
3567
3568static __always_inline void
3569cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
3570 size_t size, void **p, unsigned long caller)
3571{
3572 size_t i;
3573
3574 for (i = 0; i < size; i++)
3575 p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
3576}
3577
3578int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3579 void **p)
3580{
3581 size_t i;
3582
3583 s = slab_pre_alloc_hook(s, flags);
3584 if (!s)
3585 return 0;
3586
3587 cache_alloc_debugcheck_before(s, flags);
3588
3589 local_irq_disable();
3590 for (i = 0; i < size; i++) {
3591 void *objp = __do_cache_alloc(s, flags);
3592
3593 if (unlikely(!objp))
3594 goto error;
3595 p[i] = objp;
3596 }
3597 local_irq_enable();
3598
3599 cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
3600
3601
3602 if (unlikely(flags & __GFP_ZERO))
3603 for (i = 0; i < size; i++)
3604 memset(p[i], 0, s->object_size);
3605
3606 slab_post_alloc_hook(s, flags, size, p);
3607
3608 return size;
3609error:
3610 local_irq_enable();
3611 cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
3612 slab_post_alloc_hook(s, flags, i, p);
3613 __kmem_cache_free_bulk(s, i, p);
3614 return 0;
3615}
3616EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3617
3618#ifdef CONFIG_TRACING
3619void *
3620kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
3621{
3622 void *ret;
3623
3624 ret = slab_alloc(cachep, flags, _RET_IP_);
3625
3626 kasan_kmalloc(cachep, ret, size, flags);
3627 trace_kmalloc(_RET_IP_, ret,
3628 size, cachep->size, flags);
3629 return ret;
3630}
3631EXPORT_SYMBOL(kmem_cache_alloc_trace);
3632#endif
3633
3634#ifdef CONFIG_NUMA
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3647{
3648 void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3649
3650 kasan_slab_alloc(cachep, ret, flags);
3651 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3652 cachep->object_size, cachep->size,
3653 flags, nodeid);
3654
3655 return ret;
3656}
3657EXPORT_SYMBOL(kmem_cache_alloc_node);
3658
3659#ifdef CONFIG_TRACING
3660void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
3661 gfp_t flags,
3662 int nodeid,
3663 size_t size)
3664{
3665 void *ret;
3666
3667 ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
3668
3669 kasan_kmalloc(cachep, ret, size, flags);
3670 trace_kmalloc_node(_RET_IP_, ret,
3671 size, cachep->size,
3672 flags, nodeid);
3673 return ret;
3674}
3675EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3676#endif
3677
3678static __always_inline void *
3679__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3680{
3681 struct kmem_cache *cachep;
3682 void *ret;
3683
3684 cachep = kmalloc_slab(size, flags);
3685 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3686 return cachep;
3687 ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3688 kasan_kmalloc(cachep, ret, size, flags);
3689
3690 return ret;
3691}
3692
3693void *__kmalloc_node(size_t size, gfp_t flags, int node)
3694{
3695 return __do_kmalloc_node(size, flags, node, _RET_IP_);
3696}
3697EXPORT_SYMBOL(__kmalloc_node);
3698
3699void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3700 int node, unsigned long caller)
3701{
3702 return __do_kmalloc_node(size, flags, node, caller);
3703}
3704EXPORT_SYMBOL(__kmalloc_node_track_caller);
3705#endif
3706
3707
3708
3709
3710
3711
3712
3713static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3714 unsigned long caller)
3715{
3716 struct kmem_cache *cachep;
3717 void *ret;
3718
3719 cachep = kmalloc_slab(size, flags);
3720 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3721 return cachep;
3722 ret = slab_alloc(cachep, flags, caller);
3723
3724 kasan_kmalloc(cachep, ret, size, flags);
3725 trace_kmalloc(caller, ret,
3726 size, cachep->size, flags);
3727
3728 return ret;
3729}
3730
3731void *__kmalloc(size_t size, gfp_t flags)
3732{
3733 return __do_kmalloc(size, flags, _RET_IP_);
3734}
3735EXPORT_SYMBOL(__kmalloc);
3736
3737void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3738{
3739 return __do_kmalloc(size, flags, caller);
3740}
3741EXPORT_SYMBOL(__kmalloc_track_caller);
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3752{
3753 unsigned long flags;
3754 cachep = cache_from_obj(cachep, objp);
3755 if (!cachep)
3756 return;
3757
3758 local_irq_save(flags);
3759 debug_check_no_locks_freed(objp, cachep->object_size);
3760 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3761 debug_check_no_obj_freed(objp, cachep->object_size);
3762 __cache_free(cachep, objp, _RET_IP_);
3763 local_irq_restore(flags);
3764
3765 trace_kmem_cache_free(_RET_IP_, objp);
3766}
3767EXPORT_SYMBOL(kmem_cache_free);
3768
3769void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
3770{
3771 struct kmem_cache *s;
3772 size_t i;
3773
3774 local_irq_disable();
3775 for (i = 0; i < size; i++) {
3776 void *objp = p[i];
3777
3778 if (!orig_s)
3779 s = virt_to_cache(objp);
3780 else
3781 s = cache_from_obj(orig_s, objp);
3782
3783 debug_check_no_locks_freed(objp, s->object_size);
3784 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3785 debug_check_no_obj_freed(objp, s->object_size);
3786
3787 __cache_free(s, objp, _RET_IP_);
3788 }
3789 local_irq_enable();
3790
3791
3792}
3793EXPORT_SYMBOL(kmem_cache_free_bulk);
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804void kfree(const void *objp)
3805{
3806 struct kmem_cache *c;
3807 unsigned long flags;
3808
3809 trace_kfree(_RET_IP_, objp);
3810
3811 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3812 return;
3813 local_irq_save(flags);
3814 kfree_debugcheck(objp);
3815 c = virt_to_cache(objp);
3816 debug_check_no_locks_freed(objp, c->object_size);
3817
3818 debug_check_no_obj_freed(objp, c->object_size);
3819 __cache_free(c, (void *)objp, _RET_IP_);
3820 local_irq_restore(flags);
3821}
3822EXPORT_SYMBOL(kfree);
3823
3824
3825
3826
3827static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
3828{
3829 int ret;
3830 int node;
3831 struct kmem_cache_node *n;
3832
3833 for_each_online_node(node) {
3834 ret = setup_kmem_cache_node(cachep, node, gfp, true);
3835 if (ret)
3836 goto fail;
3837
3838 }
3839
3840 return 0;
3841
3842fail:
3843 if (!cachep->list.next) {
3844
3845 node--;
3846 while (node >= 0) {
3847 n = get_node(cachep, node);
3848 if (n) {
3849 kfree(n->shared);
3850 free_alien_cache(n->alien);
3851 kfree(n);
3852 cachep->node[node] = NULL;
3853 }
3854 node--;
3855 }
3856 }
3857 return -ENOMEM;
3858}
3859
3860
3861static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3862 int batchcount, int shared, gfp_t gfp)
3863{
3864 struct array_cache __percpu *cpu_cache, *prev;
3865 int cpu;
3866
3867 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3868 if (!cpu_cache)
3869 return -ENOMEM;
3870
3871 prev = cachep->cpu_cache;
3872 cachep->cpu_cache = cpu_cache;
3873 kick_all_cpus_sync();
3874
3875 check_irq_on();
3876 cachep->batchcount = batchcount;
3877 cachep->limit = limit;
3878 cachep->shared = shared;
3879
3880 if (!prev)
3881 goto setup_node;
3882
3883 for_each_online_cpu(cpu) {
3884 LIST_HEAD(list);
3885 int node;
3886 struct kmem_cache_node *n;
3887 struct array_cache *ac = per_cpu_ptr(prev, cpu);
3888
3889 node = cpu_to_mem(cpu);
3890 n = get_node(cachep, node);
3891 spin_lock_irq(&n->list_lock);
3892 free_block(cachep, ac->entry, ac->avail, node, &list);
3893 spin_unlock_irq(&n->list_lock);
3894 slabs_destroy(cachep, &list);
3895 }
3896 free_percpu(prev);
3897
3898setup_node:
3899 return setup_kmem_cache_nodes(cachep, gfp);
3900}
3901
3902static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3903 int batchcount, int shared, gfp_t gfp)
3904{
3905 int ret;
3906 struct kmem_cache *c;
3907
3908 ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3909
3910 if (slab_state < FULL)
3911 return ret;
3912
3913 if ((ret < 0) || !is_root_cache(cachep))
3914 return ret;
3915
3916 lockdep_assert_held(&slab_mutex);
3917 for_each_memcg_cache(c, cachep) {
3918
3919 __do_tune_cpucache(c, limit, batchcount, shared, gfp);
3920 }
3921
3922 return ret;
3923}
3924
3925
3926static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
3927{
3928 int err;
3929 int limit = 0;
3930 int shared = 0;
3931 int batchcount = 0;
3932
3933 err = cache_random_seq_create(cachep, cachep->num, gfp);
3934 if (err)
3935 goto end;
3936
3937 if (!is_root_cache(cachep)) {
3938 struct kmem_cache *root = memcg_root_cache(cachep);
3939 limit = root->limit;
3940 shared = root->shared;
3941 batchcount = root->batchcount;
3942 }
3943
3944 if (limit && shared && batchcount)
3945 goto skip_setup;
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955 if (cachep->size > 131072)
3956 limit = 1;
3957 else if (cachep->size > PAGE_SIZE)
3958 limit = 8;
3959 else if (cachep->size > 1024)
3960 limit = 24;
3961 else if (cachep->size > 256)
3962 limit = 54;
3963 else
3964 limit = 120;
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975 shared = 0;
3976 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
3977 shared = 8;
3978
3979#if DEBUG
3980
3981
3982
3983
3984 if (limit > 32)
3985 limit = 32;
3986#endif
3987 batchcount = (limit + 1) / 2;
3988skip_setup:
3989 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3990end:
3991 if (err)
3992 pr_err("enable_cpucache failed for %s, error %d\n",
3993 cachep->name, -err);
3994 return err;
3995}
3996
3997
3998
3999
4000
4001
4002static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
4003 struct array_cache *ac, int node)
4004{
4005 LIST_HEAD(list);
4006
4007
4008 check_mutex_acquired();
4009
4010 if (!ac || !ac->avail)
4011 return;
4012
4013 if (ac->touched) {
4014 ac->touched = 0;
4015 return;
4016 }
4017
4018 spin_lock_irq(&n->list_lock);
4019 drain_array_locked(cachep, ac, node, false, &list);
4020 spin_unlock_irq(&n->list_lock);
4021
4022 slabs_destroy(cachep, &list);
4023}
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037static void cache_reap(struct work_struct *w)
4038{
4039 struct kmem_cache *searchp;
4040 struct kmem_cache_node *n;
4041 int node = numa_mem_id();
4042 struct delayed_work *work = to_delayed_work(w);
4043
4044 if (!mutex_trylock(&slab_mutex))
4045
4046 goto out;
4047
4048 list_for_each_entry(searchp, &slab_caches, list) {
4049 check_irq_on();
4050
4051
4052
4053
4054
4055
4056 n = get_node(searchp, node);
4057
4058 reap_alien(searchp, n);
4059
4060 drain_array(searchp, n, cpu_cache_get(searchp), node);
4061
4062
4063
4064
4065
4066 if (time_after(n->next_reap, jiffies))
4067 goto next;
4068
4069 n->next_reap = jiffies + REAPTIMEOUT_NODE;
4070
4071 drain_array(searchp, n, n->shared, node);
4072
4073 if (n->free_touched)
4074 n->free_touched = 0;
4075 else {
4076 int freed;
4077
4078 freed = drain_freelist(searchp, n, (n->free_limit +
4079 5 * searchp->num - 1) / (5 * searchp->num));
4080 STATS_ADD_REAPED(searchp, freed);
4081 }
4082next:
4083 cond_resched();
4084 }
4085 check_irq_on();
4086 mutex_unlock(&slab_mutex);
4087 next_reap_node();
4088out:
4089
4090 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
4091}
4092
4093#ifdef CONFIG_SLABINFO
4094void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4095{
4096 unsigned long active_objs, num_objs, active_slabs;
4097 unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
4098 unsigned long free_slabs = 0;
4099 int node;
4100 struct kmem_cache_node *n;
4101
4102 for_each_kmem_cache_node(cachep, node, n) {
4103 check_irq_on();
4104 spin_lock_irq(&n->list_lock);
4105
4106 total_slabs += n->total_slabs;
4107 free_slabs += n->free_slabs;
4108 free_objs += n->free_objects;
4109
4110 if (n->shared)
4111 shared_avail += n->shared->avail;
4112
4113 spin_unlock_irq(&n->list_lock);
4114 }
4115 num_objs = total_slabs * cachep->num;
4116 active_slabs = total_slabs - free_slabs;
4117 active_objs = num_objs - free_objs;
4118
4119 sinfo->active_objs = active_objs;
4120 sinfo->num_objs = num_objs;
4121 sinfo->active_slabs = active_slabs;
4122 sinfo->num_slabs = total_slabs;
4123 sinfo->shared_avail = shared_avail;
4124 sinfo->limit = cachep->limit;
4125 sinfo->batchcount = cachep->batchcount;
4126 sinfo->shared = cachep->shared;
4127 sinfo->objects_per_slab = cachep->num;
4128 sinfo->cache_order = cachep->gfporder;
4129}
4130
4131void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
4132{
4133#if STATS
4134 {
4135 unsigned long high = cachep->high_mark;
4136 unsigned long allocs = cachep->num_allocations;
4137 unsigned long grown = cachep->grown;
4138 unsigned long reaped = cachep->reaped;
4139 unsigned long errors = cachep->errors;
4140 unsigned long max_freeable = cachep->max_freeable;
4141 unsigned long node_allocs = cachep->node_allocs;
4142 unsigned long node_frees = cachep->node_frees;
4143 unsigned long overflows = cachep->node_overflow;
4144
4145 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
4146 allocs, high, grown,
4147 reaped, errors, max_freeable, node_allocs,
4148 node_frees, overflows);
4149 }
4150
4151 {
4152 unsigned long allochit = atomic_read(&cachep->allochit);
4153 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4154 unsigned long freehit = atomic_read(&cachep->freehit);
4155 unsigned long freemiss = atomic_read(&cachep->freemiss);
4156
4157 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4158 allochit, allocmiss, freehit, freemiss);
4159 }
4160#endif
4161}
4162
4163#define MAX_SLABINFO_WRITE 128
4164
4165
4166
4167
4168
4169
4170
4171ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4172 size_t count, loff_t *ppos)
4173{
4174 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4175 int limit, batchcount, shared, res;
4176 struct kmem_cache *cachep;
4177
4178 if (count > MAX_SLABINFO_WRITE)
4179 return -EINVAL;
4180 if (copy_from_user(&kbuf, buffer, count))
4181 return -EFAULT;
4182 kbuf[MAX_SLABINFO_WRITE] = '\0';
4183
4184 tmp = strchr(kbuf, ' ');
4185 if (!tmp)
4186 return -EINVAL;
4187 *tmp = '\0';
4188 tmp++;
4189 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4190 return -EINVAL;
4191
4192
4193 mutex_lock(&slab_mutex);
4194 res = -EINVAL;
4195 list_for_each_entry(cachep, &slab_caches, list) {
4196 if (!strcmp(cachep->name, kbuf)) {
4197 if (limit < 1 || batchcount < 1 ||
4198 batchcount > limit || shared < 0) {
4199 res = 0;
4200 } else {
4201 res = do_tune_cpucache(cachep, limit,
4202 batchcount, shared,
4203 GFP_KERNEL);
4204 }
4205 break;
4206 }
4207 }
4208 mutex_unlock(&slab_mutex);
4209 if (res >= 0)
4210 res = count;
4211 return res;
4212}
4213
4214#ifdef CONFIG_DEBUG_SLAB_LEAK
4215
4216static inline int add_caller(unsigned long *n, unsigned long v)
4217{
4218 unsigned long *p;
4219 int l;
4220 if (!v)
4221 return 1;
4222 l = n[1];
4223 p = n + 2;
4224 while (l) {
4225 int i = l/2;
4226 unsigned long *q = p + 2 * i;
4227 if (*q == v) {
4228 q[1]++;
4229 return 1;
4230 }
4231 if (*q > v) {
4232 l = i;
4233 } else {
4234 p = q + 2;
4235 l -= i + 1;
4236 }
4237 }
4238 if (++n[1] == n[0])
4239 return 0;
4240 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4241 p[0] = v;
4242 p[1] = 1;
4243 return 1;
4244}
4245
4246static void handle_slab(unsigned long *n, struct kmem_cache *c,
4247 struct page *page)
4248{
4249 void *p;
4250 int i, j;
4251 unsigned long v;
4252
4253 if (n[0] == n[1])
4254 return;
4255 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4256 bool active = true;
4257
4258 for (j = page->active; j < c->num; j++) {
4259 if (get_free_obj(page, j) == i) {
4260 active = false;
4261 break;
4262 }
4263 }
4264
4265 if (!active)
4266 continue;
4267
4268
4269
4270
4271
4272
4273
4274 if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
4275 continue;
4276
4277 if (!add_caller(n, v))
4278 return;
4279 }
4280}
4281
4282static void show_symbol(struct seq_file *m, unsigned long address)
4283{
4284#ifdef CONFIG_KALLSYMS
4285 unsigned long offset, size;
4286 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4287
4288 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4289 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4290 if (modname[0])
4291 seq_printf(m, " [%s]", modname);
4292 return;
4293 }
4294#endif
4295 seq_printf(m, "%p", (void *)address);
4296}
4297
4298static int leaks_show(struct seq_file *m, void *p)
4299{
4300 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4301 struct page *page;
4302 struct kmem_cache_node *n;
4303 const char *name;
4304 unsigned long *x = m->private;
4305 int node;
4306 int i;
4307
4308 if (!(cachep->flags & SLAB_STORE_USER))
4309 return 0;
4310 if (!(cachep->flags & SLAB_RED_ZONE))
4311 return 0;
4312
4313
4314
4315
4316
4317
4318
4319 do {
4320 set_store_user_clean(cachep);
4321 drain_cpu_caches(cachep);
4322
4323 x[1] = 0;
4324
4325 for_each_kmem_cache_node(cachep, node, n) {
4326
4327 check_irq_on();
4328 spin_lock_irq(&n->list_lock);
4329
4330 list_for_each_entry(page, &n->slabs_full, lru)
4331 handle_slab(x, cachep, page);
4332 list_for_each_entry(page, &n->slabs_partial, lru)
4333 handle_slab(x, cachep, page);
4334 spin_unlock_irq(&n->list_lock);
4335 }
4336 } while (!is_store_user_clean(cachep));
4337
4338 name = cachep->name;
4339 if (x[0] == x[1]) {
4340
4341 mutex_unlock(&slab_mutex);
4342 m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4343 if (!m->private) {
4344
4345 m->private = x;
4346 mutex_lock(&slab_mutex);
4347 return -ENOMEM;
4348 }
4349 *(unsigned long *)m->private = x[0] * 2;
4350 kfree(x);
4351 mutex_lock(&slab_mutex);
4352
4353 m->count = m->size;
4354 return 0;
4355 }
4356 for (i = 0; i < x[1]; i++) {
4357 seq_printf(m, "%s: %lu ", name, x[2*i+3]);
4358 show_symbol(m, x[2*i+2]);
4359 seq_putc(m, '\n');
4360 }
4361
4362 return 0;
4363}
4364
4365static const struct seq_operations slabstats_op = {
4366 .start = slab_start,
4367 .next = slab_next,
4368 .stop = slab_stop,
4369 .show = leaks_show,
4370};
4371
4372static int slabstats_open(struct inode *inode, struct file *file)
4373{
4374 unsigned long *n;
4375
4376 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4377 if (!n)
4378 return -ENOMEM;
4379
4380 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4381
4382 return 0;
4383}
4384
4385static const struct file_operations proc_slabstats_operations = {
4386 .open = slabstats_open,
4387 .read = seq_read,
4388 .llseek = seq_lseek,
4389 .release = seq_release_private,
4390};
4391#endif
4392
4393static int __init slab_proc_init(void)
4394{
4395#ifdef CONFIG_DEBUG_SLAB_LEAK
4396 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4397#endif
4398 return 0;
4399}
4400module_init(slab_proc_init);
4401#endif
4402
4403#ifdef CONFIG_HARDENED_USERCOPY
4404
4405
4406
4407
4408
4409
4410const char *__check_heap_object(const void *ptr, unsigned long n,
4411 struct page *page)
4412{
4413 struct kmem_cache *cachep;
4414 unsigned int objnr;
4415 unsigned long offset;
4416
4417
4418 cachep = page->slab_cache;
4419 objnr = obj_to_index(cachep, page, (void *)ptr);
4420 BUG_ON(objnr >= cachep->num);
4421
4422
4423 offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
4424
4425
4426 if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4427 return NULL;
4428
4429 return cachep->name;
4430}
4431#endif
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445size_t ksize(const void *objp)
4446{
4447 size_t size;
4448
4449 BUG_ON(!objp);
4450 if (unlikely(objp == ZERO_SIZE_PTR))
4451 return 0;
4452
4453 size = virt_to_cache(objp)->object_size;
4454
4455
4456
4457 kasan_unpoison_shadow(objp, size);
4458
4459 return size;
4460}
4461EXPORT_SYMBOL(ksize);
4462