1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mm.h>
13#include <linux/swap.h>
14#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
19#include "slab.h"
20#include <linux/proc_fs.h>
21#include <linux/notifier.h>
22#include <linux/seq_file.h>
23#include <linux/kasan.h>
24#include <linux/kmemcheck.h>
25#include <linux/cpu.h>
26#include <linux/cpuset.h>
27#include <linux/mempolicy.h>
28#include <linux/ctype.h>
29#include <linux/debugobjects.h>
30#include <linux/kallsyms.h>
31#include <linux/memory.h>
32#include <linux/math64.h>
33#include <linux/fault-inject.h>
34#include <linux/stacktrace.h>
35#include <linux/prefetch.h>
36#include <linux/memcontrol.h>
37
38#include <trace/events/kmem.h>
39
40#include "internal.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118static inline int kmem_cache_debug(struct kmem_cache *s)
119{
120#ifdef CONFIG_SLUB_DEBUG
121 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
122#else
123 return 0;
124#endif
125}
126
127static inline void *fixup_red_left(struct kmem_cache *s, void *p)
128{
129 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130 p += s->red_left_pad;
131
132 return p;
133}
134
135static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
136{
137#ifdef CONFIG_SLUB_CPU_PARTIAL
138 return !kmem_cache_debug(s);
139#else
140 return false;
141#endif
142}
143
144
145
146
147
148
149
150
151
152
153#undef SLUB_RESILIENCY_TEST
154
155
156#undef SLUB_DEBUG_CMPXCHG
157
158
159
160
161
162#define MIN_PARTIAL 5
163
164
165
166
167
168
169#define MAX_PARTIAL 10
170
171#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
172 SLAB_POISON | SLAB_STORE_USER)
173
174
175
176
177
178#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
179 SLAB_TRACE)
180
181
182
183
184
185
186
187#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
188
189#define OO_SHIFT 16
190#define OO_MASK ((1 << OO_SHIFT) - 1)
191#define MAX_OBJS_PER_PAGE 32767
192
193
194#define __OBJECT_POISON 0x80000000UL
195#define __CMPXCHG_DOUBLE 0x40000000UL
196
197#ifdef CONFIG_SMP
198static struct notifier_block slab_notifier;
199#endif
200
201
202
203
204#define TRACK_ADDRS_COUNT 16
205struct track {
206 unsigned long addr;
207#ifdef CONFIG_STACKTRACE
208 unsigned long addrs[TRACK_ADDRS_COUNT];
209#endif
210 int cpu;
211 int pid;
212 unsigned long when;
213};
214
215enum track_item { TRACK_ALLOC, TRACK_FREE };
216
217#ifdef CONFIG_SYSFS
218static int sysfs_slab_add(struct kmem_cache *);
219static int sysfs_slab_alias(struct kmem_cache *, const char *);
220static void memcg_propagate_slab_attrs(struct kmem_cache *s);
221#else
222static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
223static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
224 { return 0; }
225static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
226#endif
227
228static inline void stat(const struct kmem_cache *s, enum stat_item si)
229{
230#ifdef CONFIG_SLUB_STATS
231
232
233
234
235 raw_cpu_inc(s->cpu_slab->stat[si]);
236#endif
237}
238
239
240
241
242
243static inline void *get_freepointer(struct kmem_cache *s, void *object)
244{
245 return *(void **)(object + s->offset);
246}
247
248static void prefetch_freepointer(const struct kmem_cache *s, void *object)
249{
250 prefetch(object + s->offset);
251}
252
253static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
254{
255 void *p;
256
257 if (!debug_pagealloc_enabled())
258 return get_freepointer(s, object);
259
260 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
261 return p;
262}
263
264static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
265{
266 *(void **)(object + s->offset) = fp;
267}
268
269
270#define for_each_object(__p, __s, __addr, __objects) \
271 for (__p = fixup_red_left(__s, __addr); \
272 __p < (__addr) + (__objects) * (__s)->size; \
273 __p += (__s)->size)
274
275#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
276 for (__p = fixup_red_left(__s, __addr), __idx = 1; \
277 __idx <= __objects; \
278 __p += (__s)->size, __idx++)
279
280
281static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
282{
283 return (p - addr) / s->size;
284}
285
286static inline int order_objects(int order, unsigned long size, int reserved)
287{
288 return ((PAGE_SIZE << order) - reserved) / size;
289}
290
291static inline struct kmem_cache_order_objects oo_make(int order,
292 unsigned long size, int reserved)
293{
294 struct kmem_cache_order_objects x = {
295 (order << OO_SHIFT) + order_objects(order, size, reserved)
296 };
297
298 return x;
299}
300
301static inline int oo_order(struct kmem_cache_order_objects x)
302{
303 return x.x >> OO_SHIFT;
304}
305
306static inline int oo_objects(struct kmem_cache_order_objects x)
307{
308 return x.x & OO_MASK;
309}
310
311
312
313
314static __always_inline void slab_lock(struct page *page)
315{
316 VM_BUG_ON_PAGE(PageTail(page), page);
317 bit_spin_lock(PG_locked, &page->flags);
318}
319
320static __always_inline void slab_unlock(struct page *page)
321{
322 VM_BUG_ON_PAGE(PageTail(page), page);
323 __bit_spin_unlock(PG_locked, &page->flags);
324}
325
326static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
327{
328 struct page tmp;
329 tmp.counters = counters_new;
330
331
332
333
334
335
336 page->frozen = tmp.frozen;
337 page->inuse = tmp.inuse;
338 page->objects = tmp.objects;
339}
340
341
342static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
343 void *freelist_old, unsigned long counters_old,
344 void *freelist_new, unsigned long counters_new,
345 const char *n)
346{
347 VM_BUG_ON(!irqs_disabled());
348#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
349 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
350 if (s->flags & __CMPXCHG_DOUBLE) {
351 if (cmpxchg_double(&page->freelist, &page->counters,
352 freelist_old, counters_old,
353 freelist_new, counters_new))
354 return true;
355 } else
356#endif
357 {
358 slab_lock(page);
359 if (page->freelist == freelist_old &&
360 page->counters == counters_old) {
361 page->freelist = freelist_new;
362 set_page_slub_counters(page, counters_new);
363 slab_unlock(page);
364 return true;
365 }
366 slab_unlock(page);
367 }
368
369 cpu_relax();
370 stat(s, CMPXCHG_DOUBLE_FAIL);
371
372#ifdef SLUB_DEBUG_CMPXCHG
373 pr_info("%s %s: cmpxchg double redo ", n, s->name);
374#endif
375
376 return false;
377}
378
379static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
380 void *freelist_old, unsigned long counters_old,
381 void *freelist_new, unsigned long counters_new,
382 const char *n)
383{
384#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
385 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
386 if (s->flags & __CMPXCHG_DOUBLE) {
387 if (cmpxchg_double(&page->freelist, &page->counters,
388 freelist_old, counters_old,
389 freelist_new, counters_new))
390 return true;
391 } else
392#endif
393 {
394 unsigned long flags;
395
396 local_irq_save(flags);
397 slab_lock(page);
398 if (page->freelist == freelist_old &&
399 page->counters == counters_old) {
400 page->freelist = freelist_new;
401 set_page_slub_counters(page, counters_new);
402 slab_unlock(page);
403 local_irq_restore(flags);
404 return true;
405 }
406 slab_unlock(page);
407 local_irq_restore(flags);
408 }
409
410 cpu_relax();
411 stat(s, CMPXCHG_DOUBLE_FAIL);
412
413#ifdef SLUB_DEBUG_CMPXCHG
414 pr_info("%s %s: cmpxchg double redo ", n, s->name);
415#endif
416
417 return false;
418}
419
420#ifdef CONFIG_SLUB_DEBUG
421
422
423
424
425
426
427static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
428{
429 void *p;
430 void *addr = page_address(page);
431
432 for (p = page->freelist; p; p = get_freepointer(s, p))
433 set_bit(slab_index(p, s, addr), map);
434}
435
436static inline int size_from_object(struct kmem_cache *s)
437{
438 if (s->flags & SLAB_RED_ZONE)
439 return s->size - s->red_left_pad;
440
441 return s->size;
442}
443
444static inline void *restore_red_left(struct kmem_cache *s, void *p)
445{
446 if (s->flags & SLAB_RED_ZONE)
447 p -= s->red_left_pad;
448
449 return p;
450}
451
452
453
454
455#if defined(CONFIG_SLUB_DEBUG_ON)
456static int slub_debug = DEBUG_DEFAULT_FLAGS;
457#elif defined(CONFIG_KASAN)
458static int slub_debug = SLAB_STORE_USER;
459#else
460static int slub_debug;
461#endif
462
463static char *slub_debug_slabs;
464static int disable_higher_order_debug;
465
466
467
468
469
470
471
472static inline void metadata_access_enable(void)
473{
474 kasan_disable_current();
475}
476
477static inline void metadata_access_disable(void)
478{
479 kasan_enable_current();
480}
481
482
483
484
485
486
487static inline int check_valid_pointer(struct kmem_cache *s,
488 struct page *page, void *object)
489{
490 void *base;
491
492 if (!object)
493 return 1;
494
495 base = page_address(page);
496 object = restore_red_left(s, object);
497 if (object < base || object >= base + page->objects * s->size ||
498 (object - base) % s->size) {
499 return 0;
500 }
501
502 return 1;
503}
504
505static void print_section(char *text, u8 *addr, unsigned int length)
506{
507 metadata_access_enable();
508 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
509 length, 1);
510 metadata_access_disable();
511}
512
513static struct track *get_track(struct kmem_cache *s, void *object,
514 enum track_item alloc)
515{
516 struct track *p;
517
518 if (s->offset)
519 p = object + s->offset + sizeof(void *);
520 else
521 p = object + s->inuse;
522
523 return p + alloc;
524}
525
526static void set_track(struct kmem_cache *s, void *object,
527 enum track_item alloc, unsigned long addr)
528{
529 struct track *p = get_track(s, object, alloc);
530
531 if (addr) {
532#ifdef CONFIG_STACKTRACE
533 struct stack_trace trace;
534 int i;
535
536 trace.nr_entries = 0;
537 trace.max_entries = TRACK_ADDRS_COUNT;
538 trace.entries = p->addrs;
539 trace.skip = 3;
540 metadata_access_enable();
541 save_stack_trace(&trace);
542 metadata_access_disable();
543
544
545 if (trace.nr_entries != 0 &&
546 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
547 trace.nr_entries--;
548
549 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
550 p->addrs[i] = 0;
551#endif
552 p->addr = addr;
553 p->cpu = smp_processor_id();
554 p->pid = current->pid;
555 p->when = jiffies;
556 } else
557 memset(p, 0, sizeof(struct track));
558}
559
560static void init_tracking(struct kmem_cache *s, void *object)
561{
562 if (!(s->flags & SLAB_STORE_USER))
563 return;
564
565 set_track(s, object, TRACK_FREE, 0UL);
566 set_track(s, object, TRACK_ALLOC, 0UL);
567}
568
569static void print_track(const char *s, struct track *t)
570{
571 if (!t->addr)
572 return;
573
574 pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
575 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
576#ifdef CONFIG_STACKTRACE
577 {
578 int i;
579 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
580 if (t->addrs[i])
581 pr_err("\t%pS\n", (void *)t->addrs[i]);
582 else
583 break;
584 }
585#endif
586}
587
588static void print_tracking(struct kmem_cache *s, void *object)
589{
590 if (!(s->flags & SLAB_STORE_USER))
591 return;
592
593 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
594 print_track("Freed", get_track(s, object, TRACK_FREE));
595}
596
597static void print_page_info(struct page *page)
598{
599 pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
600 page, page->objects, page->inuse, page->freelist, page->flags);
601
602}
603
604static void slab_bug(struct kmem_cache *s, char *fmt, ...)
605{
606 struct va_format vaf;
607 va_list args;
608
609 va_start(args, fmt);
610 vaf.fmt = fmt;
611 vaf.va = &args;
612 pr_err("=============================================================================\n");
613 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
614 pr_err("-----------------------------------------------------------------------------\n\n");
615
616 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
617 va_end(args);
618}
619
620static void slab_fix(struct kmem_cache *s, char *fmt, ...)
621{
622 struct va_format vaf;
623 va_list args;
624
625 va_start(args, fmt);
626 vaf.fmt = fmt;
627 vaf.va = &args;
628 pr_err("FIX %s: %pV\n", s->name, &vaf);
629 va_end(args);
630}
631
632static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
633{
634 unsigned int off;
635 u8 *addr = page_address(page);
636
637 print_tracking(s, p);
638
639 print_page_info(page);
640
641 pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
642 p, p - addr, get_freepointer(s, p));
643
644 if (s->flags & SLAB_RED_ZONE)
645 print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
646 else if (p > addr + 16)
647 print_section("Bytes b4 ", p - 16, 16);
648
649 print_section("Object ", p, min_t(unsigned long, s->object_size,
650 PAGE_SIZE));
651 if (s->flags & SLAB_RED_ZONE)
652 print_section("Redzone ", p + s->object_size,
653 s->inuse - s->object_size);
654
655 if (s->offset)
656 off = s->offset + sizeof(void *);
657 else
658 off = s->inuse;
659
660 if (s->flags & SLAB_STORE_USER)
661 off += 2 * sizeof(struct track);
662
663 if (off != size_from_object(s))
664
665 print_section("Padding ", p + off, size_from_object(s) - off);
666
667 dump_stack();
668}
669
670void object_err(struct kmem_cache *s, struct page *page,
671 u8 *object, char *reason)
672{
673 slab_bug(s, "%s", reason);
674 print_trailer(s, page, object);
675}
676
677static void slab_err(struct kmem_cache *s, struct page *page,
678 const char *fmt, ...)
679{
680 va_list args;
681 char buf[100];
682
683 va_start(args, fmt);
684 vsnprintf(buf, sizeof(buf), fmt, args);
685 va_end(args);
686 slab_bug(s, "%s", buf);
687 print_page_info(page);
688 dump_stack();
689}
690
691static void init_object(struct kmem_cache *s, void *object, u8 val)
692{
693 u8 *p = object;
694
695 if (s->flags & SLAB_RED_ZONE)
696 memset(p - s->red_left_pad, val, s->red_left_pad);
697
698 if (s->flags & __OBJECT_POISON) {
699 memset(p, POISON_FREE, s->object_size - 1);
700 p[s->object_size - 1] = POISON_END;
701 }
702
703 if (s->flags & SLAB_RED_ZONE)
704 memset(p + s->object_size, val, s->inuse - s->object_size);
705}
706
707static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
708 void *from, void *to)
709{
710 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
711 memset(from, data, to - from);
712}
713
714static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
715 u8 *object, char *what,
716 u8 *start, unsigned int value, unsigned int bytes)
717{
718 u8 *fault;
719 u8 *end;
720
721 metadata_access_enable();
722 fault = memchr_inv(start, value, bytes);
723 metadata_access_disable();
724 if (!fault)
725 return 1;
726
727 end = start + bytes;
728 while (end > fault && end[-1] == value)
729 end--;
730
731 slab_bug(s, "%s overwritten", what);
732 pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
733 fault, end - 1, fault[0], value);
734 print_trailer(s, page, object);
735
736 restore_bytes(s, what, value, fault, end);
737 return 0;
738}
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
779{
780 unsigned long off = s->inuse;
781
782 if (s->offset)
783
784 off += sizeof(void *);
785
786 if (s->flags & SLAB_STORE_USER)
787
788 off += 2 * sizeof(struct track);
789
790 if (size_from_object(s) == off)
791 return 1;
792
793 return check_bytes_and_report(s, page, p, "Object padding",
794 p + off, POISON_INUSE, size_from_object(s) - off);
795}
796
797
798static int slab_pad_check(struct kmem_cache *s, struct page *page)
799{
800 u8 *start;
801 u8 *fault;
802 u8 *end;
803 int length;
804 int remainder;
805
806 if (!(s->flags & SLAB_POISON))
807 return 1;
808
809 start = page_address(page);
810 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
811 end = start + length;
812 remainder = length % s->size;
813 if (!remainder)
814 return 1;
815
816 metadata_access_enable();
817 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
818 metadata_access_disable();
819 if (!fault)
820 return 1;
821 while (end > fault && end[-1] == POISON_INUSE)
822 end--;
823
824 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
825 print_section("Padding ", end - remainder, remainder);
826
827 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
828 return 0;
829}
830
831static int check_object(struct kmem_cache *s, struct page *page,
832 void *object, u8 val)
833{
834 u8 *p = object;
835 u8 *endobject = object + s->object_size;
836
837 if (s->flags & SLAB_RED_ZONE) {
838 if (!check_bytes_and_report(s, page, object, "Redzone",
839 object - s->red_left_pad, val, s->red_left_pad))
840 return 0;
841
842 if (!check_bytes_and_report(s, page, object, "Redzone",
843 endobject, val, s->inuse - s->object_size))
844 return 0;
845 } else {
846 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
847 check_bytes_and_report(s, page, p, "Alignment padding",
848 endobject, POISON_INUSE,
849 s->inuse - s->object_size);
850 }
851 }
852
853 if (s->flags & SLAB_POISON) {
854 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
855 (!check_bytes_and_report(s, page, p, "Poison", p,
856 POISON_FREE, s->object_size - 1) ||
857 !check_bytes_and_report(s, page, p, "Poison",
858 p + s->object_size - 1, POISON_END, 1)))
859 return 0;
860
861
862
863 check_pad_bytes(s, page, p);
864 }
865
866 if (!s->offset && val == SLUB_RED_ACTIVE)
867
868
869
870
871 return 1;
872
873
874 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
875 object_err(s, page, p, "Freepointer corrupt");
876
877
878
879
880
881 set_freepointer(s, p, NULL);
882 return 0;
883 }
884 return 1;
885}
886
887static int check_slab(struct kmem_cache *s, struct page *page)
888{
889 int maxobj;
890
891 VM_BUG_ON(!irqs_disabled());
892
893 if (!PageSlab(page)) {
894 slab_err(s, page, "Not a valid slab page");
895 return 0;
896 }
897
898 maxobj = order_objects(compound_order(page), s->size, s->reserved);
899 if (page->objects > maxobj) {
900 slab_err(s, page, "objects %u > max %u",
901 page->objects, maxobj);
902 return 0;
903 }
904 if (page->inuse > page->objects) {
905 slab_err(s, page, "inuse %u > max %u",
906 page->inuse, page->objects);
907 return 0;
908 }
909
910 slab_pad_check(s, page);
911 return 1;
912}
913
914
915
916
917
918static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
919{
920 int nr = 0;
921 void *fp;
922 void *object = NULL;
923 int max_objects;
924
925 fp = page->freelist;
926 while (fp && nr <= page->objects) {
927 if (fp == search)
928 return 1;
929 if (!check_valid_pointer(s, page, fp)) {
930 if (object) {
931 object_err(s, page, object,
932 "Freechain corrupt");
933 set_freepointer(s, object, NULL);
934 } else {
935 slab_err(s, page, "Freepointer corrupt");
936 page->freelist = NULL;
937 page->inuse = page->objects;
938 slab_fix(s, "Freelist cleared");
939 return 0;
940 }
941 break;
942 }
943 object = fp;
944 fp = get_freepointer(s, object);
945 nr++;
946 }
947
948 max_objects = order_objects(compound_order(page), s->size, s->reserved);
949 if (max_objects > MAX_OBJS_PER_PAGE)
950 max_objects = MAX_OBJS_PER_PAGE;
951
952 if (page->objects != max_objects) {
953 slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
954 page->objects, max_objects);
955 page->objects = max_objects;
956 slab_fix(s, "Number of objects adjusted.");
957 }
958 if (page->inuse != page->objects - nr) {
959 slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
960 page->inuse, page->objects - nr);
961 page->inuse = page->objects - nr;
962 slab_fix(s, "Object count adjusted.");
963 }
964 return search == NULL;
965}
966
967static void trace(struct kmem_cache *s, struct page *page, void *object,
968 int alloc)
969{
970 if (s->flags & SLAB_TRACE) {
971 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
972 s->name,
973 alloc ? "alloc" : "free",
974 object, page->inuse,
975 page->freelist);
976
977 if (!alloc)
978 print_section("Object ", (void *)object,
979 s->object_size);
980
981 dump_stack();
982 }
983}
984
985
986
987
988static void add_full(struct kmem_cache *s,
989 struct kmem_cache_node *n, struct page *page)
990{
991 if (!(s->flags & SLAB_STORE_USER))
992 return;
993
994 lockdep_assert_held(&n->list_lock);
995 list_add(&page->lru, &n->full);
996}
997
998static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
999{
1000 if (!(s->flags & SLAB_STORE_USER))
1001 return;
1002
1003 lockdep_assert_held(&n->list_lock);
1004 list_del(&page->lru);
1005}
1006
1007
1008static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1009{
1010 struct kmem_cache_node *n = get_node(s, node);
1011
1012 return atomic_long_read(&n->nr_slabs);
1013}
1014
1015static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1016{
1017 return atomic_long_read(&n->nr_slabs);
1018}
1019
1020static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1021{
1022 struct kmem_cache_node *n = get_node(s, node);
1023
1024
1025
1026
1027
1028
1029
1030 if (likely(n)) {
1031 atomic_long_inc(&n->nr_slabs);
1032 atomic_long_add(objects, &n->total_objects);
1033 }
1034}
1035static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1036{
1037 struct kmem_cache_node *n = get_node(s, node);
1038
1039 atomic_long_dec(&n->nr_slabs);
1040 atomic_long_sub(objects, &n->total_objects);
1041}
1042
1043
1044static void setup_object_debug(struct kmem_cache *s, struct page *page,
1045 void *object)
1046{
1047 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1048 return;
1049
1050 init_object(s, object, SLUB_RED_INACTIVE);
1051 init_tracking(s, object);
1052}
1053
1054static inline int alloc_consistency_checks(struct kmem_cache *s,
1055 struct page *page,
1056 void *object, unsigned long addr)
1057{
1058 if (!check_slab(s, page))
1059 return 0;
1060
1061 if (!check_valid_pointer(s, page, object)) {
1062 object_err(s, page, object, "Freelist Pointer check fails");
1063 return 0;
1064 }
1065
1066 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1067 return 0;
1068
1069 return 1;
1070}
1071
1072static noinline int alloc_debug_processing(struct kmem_cache *s,
1073 struct page *page,
1074 void *object, unsigned long addr)
1075{
1076 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1077 if (!alloc_consistency_checks(s, page, object, addr))
1078 goto bad;
1079 }
1080
1081
1082 if (s->flags & SLAB_STORE_USER)
1083 set_track(s, object, TRACK_ALLOC, addr);
1084 trace(s, page, object, 1);
1085 init_object(s, object, SLUB_RED_ACTIVE);
1086 return 1;
1087
1088bad:
1089 if (PageSlab(page)) {
1090
1091
1092
1093
1094
1095 slab_fix(s, "Marking all objects used");
1096 page->inuse = page->objects;
1097 page->freelist = NULL;
1098 }
1099 return 0;
1100}
1101
1102static inline int free_consistency_checks(struct kmem_cache *s,
1103 struct page *page, void *object, unsigned long addr)
1104{
1105 if (!check_valid_pointer(s, page, object)) {
1106 slab_err(s, page, "Invalid object pointer 0x%p", object);
1107 return 0;
1108 }
1109
1110 if (on_freelist(s, page, object)) {
1111 object_err(s, page, object, "Object already free");
1112 return 0;
1113 }
1114
1115 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1116 return 0;
1117
1118 if (unlikely(s != page->slab_cache)) {
1119 if (!PageSlab(page)) {
1120 slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1121 object);
1122 } else if (!page->slab_cache) {
1123 pr_err("SLUB <none>: no slab for object 0x%p.\n",
1124 object);
1125 dump_stack();
1126 } else
1127 object_err(s, page, object,
1128 "page slab pointer corrupt.");
1129 return 0;
1130 }
1131 return 1;
1132}
1133
1134
1135static noinline int free_debug_processing(
1136 struct kmem_cache *s, struct page *page,
1137 void *head, void *tail, int bulk_cnt,
1138 unsigned long addr)
1139{
1140 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1141 void *object = head;
1142 int cnt = 0;
1143 unsigned long uninitialized_var(flags);
1144 int ret = 0;
1145
1146 spin_lock_irqsave(&n->list_lock, flags);
1147 slab_lock(page);
1148
1149 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1150 if (!check_slab(s, page))
1151 goto out;
1152 }
1153
1154next_object:
1155 cnt++;
1156
1157 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1158 if (!free_consistency_checks(s, page, object, addr))
1159 goto out;
1160 }
1161
1162 if (s->flags & SLAB_STORE_USER)
1163 set_track(s, object, TRACK_FREE, addr);
1164 trace(s, page, object, 0);
1165
1166 init_object(s, object, SLUB_RED_INACTIVE);
1167
1168
1169 if (object != tail) {
1170 object = get_freepointer(s, object);
1171 goto next_object;
1172 }
1173 ret = 1;
1174
1175out:
1176 if (cnt != bulk_cnt)
1177 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1178 bulk_cnt, cnt);
1179
1180 slab_unlock(page);
1181 spin_unlock_irqrestore(&n->list_lock, flags);
1182 if (!ret)
1183 slab_fix(s, "Object at 0x%p not freed", object);
1184 return ret;
1185}
1186
1187static int __init setup_slub_debug(char *str)
1188{
1189 slub_debug = DEBUG_DEFAULT_FLAGS;
1190 if (*str++ != '=' || !*str)
1191
1192
1193
1194 goto out;
1195
1196 if (*str == ',')
1197
1198
1199
1200
1201 goto check_slabs;
1202
1203 slub_debug = 0;
1204 if (*str == '-')
1205
1206
1207
1208 goto out;
1209
1210
1211
1212
1213 for (; *str && *str != ','; str++) {
1214 switch (tolower(*str)) {
1215 case 'f':
1216 slub_debug |= SLAB_CONSISTENCY_CHECKS;
1217 break;
1218 case 'z':
1219 slub_debug |= SLAB_RED_ZONE;
1220 break;
1221 case 'p':
1222 slub_debug |= SLAB_POISON;
1223 break;
1224 case 'u':
1225 slub_debug |= SLAB_STORE_USER;
1226 break;
1227 case 't':
1228 slub_debug |= SLAB_TRACE;
1229 break;
1230 case 'a':
1231 slub_debug |= SLAB_FAILSLAB;
1232 break;
1233 case 'o':
1234
1235
1236
1237
1238 disable_higher_order_debug = 1;
1239 break;
1240 default:
1241 pr_err("slub_debug option '%c' unknown. skipped\n",
1242 *str);
1243 }
1244 }
1245
1246check_slabs:
1247 if (*str == ',')
1248 slub_debug_slabs = str + 1;
1249out:
1250 return 1;
1251}
1252
1253__setup("slub_debug", setup_slub_debug);
1254
1255unsigned long kmem_cache_flags(unsigned long object_size,
1256 unsigned long flags, const char *name,
1257 void (*ctor)(void *))
1258{
1259
1260
1261
1262 if (slub_debug && (!slub_debug_slabs || (name &&
1263 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1264 flags |= slub_debug;
1265
1266 return flags;
1267}
1268#else
1269static inline void setup_object_debug(struct kmem_cache *s,
1270 struct page *page, void *object) {}
1271
1272static inline int alloc_debug_processing(struct kmem_cache *s,
1273 struct page *page, void *object, unsigned long addr) { return 0; }
1274
1275static inline int free_debug_processing(
1276 struct kmem_cache *s, struct page *page,
1277 void *head, void *tail, int bulk_cnt,
1278 unsigned long addr) { return 0; }
1279
1280static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1281 { return 1; }
1282static inline int check_object(struct kmem_cache *s, struct page *page,
1283 void *object, u8 val) { return 1; }
1284static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1285 struct page *page) {}
1286static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1287 struct page *page) {}
1288unsigned long kmem_cache_flags(unsigned long object_size,
1289 unsigned long flags, const char *name,
1290 void (*ctor)(void *))
1291{
1292 return flags;
1293}
1294#define slub_debug 0
1295
1296#define disable_higher_order_debug 0
1297
1298static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1299 { return 0; }
1300static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1301 { return 0; }
1302static inline void inc_slabs_node(struct kmem_cache *s, int node,
1303 int objects) {}
1304static inline void dec_slabs_node(struct kmem_cache *s, int node,
1305 int objects) {}
1306
1307#endif
1308
1309
1310
1311
1312
1313static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1314{
1315 kmemleak_alloc(ptr, size, 1, flags);
1316 kasan_kmalloc_large(ptr, size, flags);
1317}
1318
1319static inline void kfree_hook(const void *x)
1320{
1321 kmemleak_free(x);
1322 kasan_kfree_large(x);
1323}
1324
1325static inline void slab_free_hook(struct kmem_cache *s, void *x)
1326{
1327 kmemleak_free_recursive(x, s->flags);
1328
1329
1330
1331
1332
1333
1334#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
1335 {
1336 unsigned long flags;
1337
1338 local_irq_save(flags);
1339 kmemcheck_slab_free(s, x, s->object_size);
1340 debug_check_no_locks_freed(x, s->object_size);
1341 local_irq_restore(flags);
1342 }
1343#endif
1344 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1345 debug_check_no_obj_freed(x, s->object_size);
1346
1347 kasan_slab_free(s, x);
1348}
1349
1350static inline void slab_free_freelist_hook(struct kmem_cache *s,
1351 void *head, void *tail)
1352{
1353
1354
1355
1356
1357#if defined(CONFIG_KMEMCHECK) || \
1358 defined(CONFIG_LOCKDEP) || \
1359 defined(CONFIG_DEBUG_KMEMLEAK) || \
1360 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1361 defined(CONFIG_KASAN)
1362
1363 void *object = head;
1364 void *tail_obj = tail ? : head;
1365
1366 do {
1367 slab_free_hook(s, object);
1368 } while ((object != tail_obj) &&
1369 (object = get_freepointer(s, object)));
1370#endif
1371}
1372
1373static void setup_object(struct kmem_cache *s, struct page *page,
1374 void *object)
1375{
1376 setup_object_debug(s, page, object);
1377 if (unlikely(s->ctor)) {
1378 kasan_unpoison_object_data(s, object);
1379 s->ctor(object);
1380 kasan_poison_object_data(s, object);
1381 }
1382}
1383
1384
1385
1386
1387static inline struct page *alloc_slab_page(struct kmem_cache *s,
1388 gfp_t flags, int node, struct kmem_cache_order_objects oo)
1389{
1390 struct page *page;
1391 int order = oo_order(oo);
1392
1393 flags |= __GFP_NOTRACK;
1394
1395 if (node == NUMA_NO_NODE)
1396 page = alloc_pages(flags, order);
1397 else
1398 page = __alloc_pages_node(node, flags, order);
1399
1400 if (page && memcg_charge_slab(page, flags, order, s)) {
1401 __free_pages(page, order);
1402 page = NULL;
1403 }
1404
1405 return page;
1406}
1407
1408static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1409{
1410 struct page *page;
1411 struct kmem_cache_order_objects oo = s->oo;
1412 gfp_t alloc_gfp;
1413 void *start, *p;
1414 int idx, order;
1415
1416 flags &= gfp_allowed_mask;
1417
1418 if (gfpflags_allow_blocking(flags))
1419 local_irq_enable();
1420
1421 flags |= s->allocflags;
1422
1423
1424
1425
1426
1427 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1428 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1429 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1430
1431 page = alloc_slab_page(s, alloc_gfp, node, oo);
1432 if (unlikely(!page)) {
1433 oo = s->min;
1434 alloc_gfp = flags;
1435
1436
1437
1438
1439 page = alloc_slab_page(s, alloc_gfp, node, oo);
1440 if (unlikely(!page))
1441 goto out;
1442 stat(s, ORDER_FALLBACK);
1443 }
1444
1445 if (kmemcheck_enabled &&
1446 !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1447 int pages = 1 << oo_order(oo);
1448
1449 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1450
1451
1452
1453
1454
1455 if (s->ctor)
1456 kmemcheck_mark_uninitialized_pages(page, pages);
1457 else
1458 kmemcheck_mark_unallocated_pages(page, pages);
1459 }
1460
1461 page->objects = oo_objects(oo);
1462
1463 order = compound_order(page);
1464 page->slab_cache = s;
1465 __SetPageSlab(page);
1466 if (page_is_pfmemalloc(page))
1467 SetPageSlabPfmemalloc(page);
1468
1469 start = page_address(page);
1470
1471 if (unlikely(s->flags & SLAB_POISON))
1472 memset(start, POISON_INUSE, PAGE_SIZE << order);
1473
1474 kasan_poison_slab(page);
1475
1476 for_each_object_idx(p, idx, s, start, page->objects) {
1477 setup_object(s, page, p);
1478 if (likely(idx < page->objects))
1479 set_freepointer(s, p, p + s->size);
1480 else
1481 set_freepointer(s, p, NULL);
1482 }
1483
1484 page->freelist = fixup_red_left(s, start);
1485 page->inuse = page->objects;
1486 page->frozen = 1;
1487
1488out:
1489 if (gfpflags_allow_blocking(flags))
1490 local_irq_disable();
1491 if (!page)
1492 return NULL;
1493
1494 mod_zone_page_state(page_zone(page),
1495 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1496 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1497 1 << oo_order(oo));
1498
1499 inc_slabs_node(s, page_to_nid(page), page->objects);
1500
1501 return page;
1502}
1503
1504static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1505{
1506 if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1507 pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
1508 BUG();
1509 }
1510
1511 return allocate_slab(s,
1512 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1513}
1514
1515static void __free_slab(struct kmem_cache *s, struct page *page)
1516{
1517 int order = compound_order(page);
1518 int pages = 1 << order;
1519
1520 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1521 void *p;
1522
1523 slab_pad_check(s, page);
1524 for_each_object(p, s, page_address(page),
1525 page->objects)
1526 check_object(s, page, p, SLUB_RED_INACTIVE);
1527 }
1528
1529 kmemcheck_free_shadow(page, compound_order(page));
1530
1531 mod_zone_page_state(page_zone(page),
1532 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1533 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1534 -pages);
1535
1536 __ClearPageSlabPfmemalloc(page);
1537 __ClearPageSlab(page);
1538
1539 page_mapcount_reset(page);
1540 if (current->reclaim_state)
1541 current->reclaim_state->reclaimed_slab += pages;
1542 memcg_uncharge_slab(page, order, s);
1543 __free_pages(page, order);
1544}
1545
1546#define need_reserve_slab_rcu \
1547 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1548
1549static void rcu_free_slab(struct rcu_head *h)
1550{
1551 struct page *page;
1552
1553 if (need_reserve_slab_rcu)
1554 page = virt_to_head_page(h);
1555 else
1556 page = container_of((struct list_head *)h, struct page, lru);
1557
1558 __free_slab(page->slab_cache, page);
1559}
1560
1561static void free_slab(struct kmem_cache *s, struct page *page)
1562{
1563 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1564 struct rcu_head *head;
1565
1566 if (need_reserve_slab_rcu) {
1567 int order = compound_order(page);
1568 int offset = (PAGE_SIZE << order) - s->reserved;
1569
1570 VM_BUG_ON(s->reserved != sizeof(*head));
1571 head = page_address(page) + offset;
1572 } else {
1573 head = &page->rcu_head;
1574 }
1575
1576 call_rcu(head, rcu_free_slab);
1577 } else
1578 __free_slab(s, page);
1579}
1580
1581static void discard_slab(struct kmem_cache *s, struct page *page)
1582{
1583 dec_slabs_node(s, page_to_nid(page), page->objects);
1584 free_slab(s, page);
1585}
1586
1587
1588
1589
1590static inline void
1591__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1592{
1593 n->nr_partial++;
1594 if (tail == DEACTIVATE_TO_TAIL)
1595 list_add_tail(&page->lru, &n->partial);
1596 else
1597 list_add(&page->lru, &n->partial);
1598}
1599
1600static inline void add_partial(struct kmem_cache_node *n,
1601 struct page *page, int tail)
1602{
1603 lockdep_assert_held(&n->list_lock);
1604 __add_partial(n, page, tail);
1605}
1606
1607static inline void remove_partial(struct kmem_cache_node *n,
1608 struct page *page)
1609{
1610 lockdep_assert_held(&n->list_lock);
1611 list_del(&page->lru);
1612 n->nr_partial--;
1613}
1614
1615
1616
1617
1618
1619
1620
1621static inline void *acquire_slab(struct kmem_cache *s,
1622 struct kmem_cache_node *n, struct page *page,
1623 int mode, int *objects)
1624{
1625 void *freelist;
1626 unsigned long counters;
1627 struct page new;
1628
1629 lockdep_assert_held(&n->list_lock);
1630
1631
1632
1633
1634
1635
1636 freelist = page->freelist;
1637 counters = page->counters;
1638 new.counters = counters;
1639 *objects = new.objects - new.inuse;
1640 if (mode) {
1641 new.inuse = page->objects;
1642 new.freelist = NULL;
1643 } else {
1644 new.freelist = freelist;
1645 }
1646
1647 VM_BUG_ON(new.frozen);
1648 new.frozen = 1;
1649
1650 if (!__cmpxchg_double_slab(s, page,
1651 freelist, counters,
1652 new.freelist, new.counters,
1653 "acquire_slab"))
1654 return NULL;
1655
1656 remove_partial(n, page);
1657 WARN_ON(!freelist);
1658 return freelist;
1659}
1660
1661static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1662static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1663
1664
1665
1666
1667static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1668 struct kmem_cache_cpu *c, gfp_t flags)
1669{
1670 struct page *page, *page2;
1671 void *object = NULL;
1672 int available = 0;
1673 int objects;
1674
1675
1676
1677
1678
1679
1680
1681 if (!n || !n->nr_partial)
1682 return NULL;
1683
1684 spin_lock(&n->list_lock);
1685 list_for_each_entry_safe(page, page2, &n->partial, lru) {
1686 void *t;
1687
1688 if (!pfmemalloc_match(page, flags))
1689 continue;
1690
1691 t = acquire_slab(s, n, page, object == NULL, &objects);
1692 if (!t)
1693 break;
1694
1695 available += objects;
1696 if (!object) {
1697 c->page = page;
1698 stat(s, ALLOC_FROM_PARTIAL);
1699 object = t;
1700 } else {
1701 put_cpu_partial(s, page, 0);
1702 stat(s, CPU_PARTIAL_NODE);
1703 }
1704 if (!kmem_cache_has_cpu_partial(s)
1705 || available > s->cpu_partial / 2)
1706 break;
1707
1708 }
1709 spin_unlock(&n->list_lock);
1710 return object;
1711}
1712
1713
1714
1715
1716static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1717 struct kmem_cache_cpu *c)
1718{
1719#ifdef CONFIG_NUMA
1720 struct zonelist *zonelist;
1721 struct zoneref *z;
1722 struct zone *zone;
1723 enum zone_type high_zoneidx = gfp_zone(flags);
1724 void *object;
1725 unsigned int cpuset_mems_cookie;
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745 if (!s->remote_node_defrag_ratio ||
1746 get_cycles() % 1024 > s->remote_node_defrag_ratio)
1747 return NULL;
1748
1749 do {
1750 cpuset_mems_cookie = read_mems_allowed_begin();
1751 zonelist = node_zonelist(mempolicy_slab_node(), flags);
1752 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1753 struct kmem_cache_node *n;
1754
1755 n = get_node(s, zone_to_nid(zone));
1756
1757 if (n && cpuset_zone_allowed(zone, flags) &&
1758 n->nr_partial > s->min_partial) {
1759 object = get_partial_node(s, n, c, flags);
1760 if (object) {
1761
1762
1763
1764
1765
1766
1767
1768 return object;
1769 }
1770 }
1771 }
1772 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1773#endif
1774 return NULL;
1775}
1776
1777
1778
1779
1780static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1781 struct kmem_cache_cpu *c)
1782{
1783 void *object;
1784 int searchnode = node;
1785
1786 if (node == NUMA_NO_NODE)
1787 searchnode = numa_mem_id();
1788 else if (!node_present_pages(node))
1789 searchnode = node_to_mem_node(node);
1790
1791 object = get_partial_node(s, get_node(s, searchnode), c, flags);
1792 if (object || node != NUMA_NO_NODE)
1793 return object;
1794
1795 return get_any_partial(s, flags, c);
1796}
1797
1798#ifdef CONFIG_PREEMPT
1799
1800
1801
1802
1803
1804#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1805#else
1806
1807
1808
1809
1810#define TID_STEP 1
1811#endif
1812
1813static inline unsigned long next_tid(unsigned long tid)
1814{
1815 return tid + TID_STEP;
1816}
1817
1818static inline unsigned int tid_to_cpu(unsigned long tid)
1819{
1820 return tid % TID_STEP;
1821}
1822
1823static inline unsigned long tid_to_event(unsigned long tid)
1824{
1825 return tid / TID_STEP;
1826}
1827
1828static inline unsigned int init_tid(int cpu)
1829{
1830 return cpu;
1831}
1832
1833static inline void note_cmpxchg_failure(const char *n,
1834 const struct kmem_cache *s, unsigned long tid)
1835{
1836#ifdef SLUB_DEBUG_CMPXCHG
1837 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1838
1839 pr_info("%s %s: cmpxchg redo ", n, s->name);
1840
1841#ifdef CONFIG_PREEMPT
1842 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1843 pr_warn("due to cpu change %d -> %d\n",
1844 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1845 else
1846#endif
1847 if (tid_to_event(tid) != tid_to_event(actual_tid))
1848 pr_warn("due to cpu running other code. Event %ld->%ld\n",
1849 tid_to_event(tid), tid_to_event(actual_tid));
1850 else
1851 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
1852 actual_tid, tid, next_tid(tid));
1853#endif
1854 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1855}
1856
1857static void init_kmem_cache_cpus(struct kmem_cache *s)
1858{
1859 int cpu;
1860
1861 for_each_possible_cpu(cpu)
1862 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1863}
1864
1865
1866
1867
1868static void deactivate_slab(struct kmem_cache *s, struct page *page,
1869 void *freelist)
1870{
1871 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1872 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1873 int lock = 0;
1874 enum slab_modes l = M_NONE, m = M_NONE;
1875 void *nextfree;
1876 int tail = DEACTIVATE_TO_HEAD;
1877 struct page new;
1878 struct page old;
1879
1880 if (page->freelist) {
1881 stat(s, DEACTIVATE_REMOTE_FREES);
1882 tail = DEACTIVATE_TO_TAIL;
1883 }
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1894 void *prior;
1895 unsigned long counters;
1896
1897 do {
1898 prior = page->freelist;
1899 counters = page->counters;
1900 set_freepointer(s, freelist, prior);
1901 new.counters = counters;
1902 new.inuse--;
1903 VM_BUG_ON(!new.frozen);
1904
1905 } while (!__cmpxchg_double_slab(s, page,
1906 prior, counters,
1907 freelist, new.counters,
1908 "drain percpu freelist"));
1909
1910 freelist = nextfree;
1911 }
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927redo:
1928
1929 old.freelist = page->freelist;
1930 old.counters = page->counters;
1931 VM_BUG_ON(!old.frozen);
1932
1933
1934 new.counters = old.counters;
1935 if (freelist) {
1936 new.inuse--;
1937 set_freepointer(s, freelist, old.freelist);
1938 new.freelist = freelist;
1939 } else
1940 new.freelist = old.freelist;
1941
1942 new.frozen = 0;
1943
1944 if (!new.inuse && n->nr_partial >= s->min_partial)
1945 m = M_FREE;
1946 else if (new.freelist) {
1947 m = M_PARTIAL;
1948 if (!lock) {
1949 lock = 1;
1950
1951
1952
1953
1954
1955 spin_lock(&n->list_lock);
1956 }
1957 } else {
1958 m = M_FULL;
1959 if (kmem_cache_debug(s) && !lock) {
1960 lock = 1;
1961
1962
1963
1964
1965
1966 spin_lock(&n->list_lock);
1967 }
1968 }
1969
1970 if (l != m) {
1971
1972 if (l == M_PARTIAL)
1973
1974 remove_partial(n, page);
1975
1976 else if (l == M_FULL)
1977
1978 remove_full(s, n, page);
1979
1980 if (m == M_PARTIAL) {
1981
1982 add_partial(n, page, tail);
1983 stat(s, tail);
1984
1985 } else if (m == M_FULL) {
1986
1987 stat(s, DEACTIVATE_FULL);
1988 add_full(s, n, page);
1989
1990 }
1991 }
1992
1993 l = m;
1994 if (!__cmpxchg_double_slab(s, page,
1995 old.freelist, old.counters,
1996 new.freelist, new.counters,
1997 "unfreezing slab"))
1998 goto redo;
1999
2000 if (lock)
2001 spin_unlock(&n->list_lock);
2002
2003 if (m == M_FREE) {
2004 stat(s, DEACTIVATE_EMPTY);
2005 discard_slab(s, page);
2006 stat(s, FREE_SLAB);
2007 }
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017static void unfreeze_partials(struct kmem_cache *s,
2018 struct kmem_cache_cpu *c)
2019{
2020#ifdef CONFIG_SLUB_CPU_PARTIAL
2021 struct kmem_cache_node *n = NULL, *n2 = NULL;
2022 struct page *page, *discard_page = NULL;
2023
2024 while ((page = c->partial)) {
2025 struct page new;
2026 struct page old;
2027
2028 c->partial = page->next;
2029
2030 n2 = get_node(s, page_to_nid(page));
2031 if (n != n2) {
2032 if (n)
2033 spin_unlock(&n->list_lock);
2034
2035 n = n2;
2036 spin_lock(&n->list_lock);
2037 }
2038
2039 do {
2040
2041 old.freelist = page->freelist;
2042 old.counters = page->counters;
2043 VM_BUG_ON(!old.frozen);
2044
2045 new.counters = old.counters;
2046 new.freelist = old.freelist;
2047
2048 new.frozen = 0;
2049
2050 } while (!__cmpxchg_double_slab(s, page,
2051 old.freelist, old.counters,
2052 new.freelist, new.counters,
2053 "unfreezing slab"));
2054
2055 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2056 page->next = discard_page;
2057 discard_page = page;
2058 } else {
2059 add_partial(n, page, DEACTIVATE_TO_TAIL);
2060 stat(s, FREE_ADD_PARTIAL);
2061 }
2062 }
2063
2064 if (n)
2065 spin_unlock(&n->list_lock);
2066
2067 while (discard_page) {
2068 page = discard_page;
2069 discard_page = discard_page->next;
2070
2071 stat(s, DEACTIVATE_EMPTY);
2072 discard_slab(s, page);
2073 stat(s, FREE_SLAB);
2074 }
2075#endif
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2088{
2089#ifdef CONFIG_SLUB_CPU_PARTIAL
2090 struct page *oldpage;
2091 int pages;
2092 int pobjects;
2093
2094 preempt_disable();
2095 do {
2096 pages = 0;
2097 pobjects = 0;
2098 oldpage = this_cpu_read(s->cpu_slab->partial);
2099
2100 if (oldpage) {
2101 pobjects = oldpage->pobjects;
2102 pages = oldpage->pages;
2103 if (drain && pobjects > s->cpu_partial) {
2104 unsigned long flags;
2105
2106
2107
2108
2109 local_irq_save(flags);
2110 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2111 local_irq_restore(flags);
2112 oldpage = NULL;
2113 pobjects = 0;
2114 pages = 0;
2115 stat(s, CPU_PARTIAL_DRAIN);
2116 }
2117 }
2118
2119 pages++;
2120 pobjects += page->objects - page->inuse;
2121
2122 page->pages = pages;
2123 page->pobjects = pobjects;
2124 page->next = oldpage;
2125
2126 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2127 != oldpage);
2128 if (unlikely(!s->cpu_partial)) {
2129 unsigned long flags;
2130
2131 local_irq_save(flags);
2132 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2133 local_irq_restore(flags);
2134 }
2135 preempt_enable();
2136#endif
2137}
2138
2139static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2140{
2141 stat(s, CPUSLAB_FLUSH);
2142 deactivate_slab(s, c->page, c->freelist);
2143
2144 c->tid = next_tid(c->tid);
2145 c->page = NULL;
2146 c->freelist = NULL;
2147}
2148
2149
2150
2151
2152
2153
2154static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2155{
2156 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2157
2158 if (likely(c)) {
2159 if (c->page)
2160 flush_slab(s, c);
2161
2162 unfreeze_partials(s, c);
2163 }
2164}
2165
2166static void flush_cpu_slab(void *d)
2167{
2168 struct kmem_cache *s = d;
2169
2170 __flush_cpu_slab(s, smp_processor_id());
2171}
2172
2173static bool has_cpu_slab(int cpu, void *info)
2174{
2175 struct kmem_cache *s = info;
2176 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2177
2178 return c->page || c->partial;
2179}
2180
2181static void flush_all(struct kmem_cache *s)
2182{
2183 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2184}
2185
2186
2187
2188
2189
2190static inline int node_match(struct page *page, int node)
2191{
2192#ifdef CONFIG_NUMA
2193 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2194 return 0;
2195#endif
2196 return 1;
2197}
2198
2199#ifdef CONFIG_SLUB_DEBUG
2200static int count_free(struct page *page)
2201{
2202 return page->objects - page->inuse;
2203}
2204
2205static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2206{
2207 return atomic_long_read(&n->total_objects);
2208}
2209#endif
2210
2211#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
2212static unsigned long count_partial(struct kmem_cache_node *n,
2213 int (*get_count)(struct page *))
2214{
2215 unsigned long flags;
2216 unsigned long x = 0;
2217 struct page *page;
2218
2219 spin_lock_irqsave(&n->list_lock, flags);
2220 list_for_each_entry(page, &n->partial, lru)
2221 x += get_count(page);
2222 spin_unlock_irqrestore(&n->list_lock, flags);
2223 return x;
2224}
2225#endif
2226
2227static noinline void
2228slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2229{
2230#ifdef CONFIG_SLUB_DEBUG
2231 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2232 DEFAULT_RATELIMIT_BURST);
2233 int node;
2234 struct kmem_cache_node *n;
2235
2236 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2237 return;
2238
2239 pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2240 nid, gfpflags, &gfpflags);
2241 pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
2242 s->name, s->object_size, s->size, oo_order(s->oo),
2243 oo_order(s->min));
2244
2245 if (oo_order(s->min) > get_order(s->object_size))
2246 pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
2247 s->name);
2248
2249 for_each_kmem_cache_node(s, node, n) {
2250 unsigned long nr_slabs;
2251 unsigned long nr_objs;
2252 unsigned long nr_free;
2253
2254 nr_free = count_partial(n, count_free);
2255 nr_slabs = node_nr_slabs(n);
2256 nr_objs = node_nr_objs(n);
2257
2258 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
2259 node, nr_slabs, nr_objs, nr_free);
2260 }
2261#endif
2262}
2263
2264static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2265 int node, struct kmem_cache_cpu **pc)
2266{
2267 void *freelist;
2268 struct kmem_cache_cpu *c = *pc;
2269 struct page *page;
2270
2271 freelist = get_partial(s, flags, node, c);
2272
2273 if (freelist)
2274 return freelist;
2275
2276 page = new_slab(s, flags, node);
2277 if (page) {
2278 c = raw_cpu_ptr(s->cpu_slab);
2279 if (c->page)
2280 flush_slab(s, c);
2281
2282
2283
2284
2285
2286 freelist = page->freelist;
2287 page->freelist = NULL;
2288
2289 stat(s, ALLOC_SLAB);
2290 c->page = page;
2291 *pc = c;
2292 } else
2293 freelist = NULL;
2294
2295 return freelist;
2296}
2297
2298static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2299{
2300 if (unlikely(PageSlabPfmemalloc(page)))
2301 return gfp_pfmemalloc_allowed(gfpflags);
2302
2303 return true;
2304}
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2317{
2318 struct page new;
2319 unsigned long counters;
2320 void *freelist;
2321
2322 do {
2323 freelist = page->freelist;
2324 counters = page->counters;
2325
2326 new.counters = counters;
2327 VM_BUG_ON(!new.frozen);
2328
2329 new.inuse = page->objects;
2330 new.frozen = freelist != NULL;
2331
2332 } while (!__cmpxchg_double_slab(s, page,
2333 freelist, counters,
2334 NULL, new.counters,
2335 "get_freelist"));
2336
2337 return freelist;
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2360 unsigned long addr, struct kmem_cache_cpu *c)
2361{
2362 void *freelist;
2363 struct page *page;
2364
2365 page = c->page;
2366 if (!page)
2367 goto new_slab;
2368redo:
2369
2370 if (unlikely(!node_match(page, node))) {
2371 int searchnode = node;
2372
2373 if (node != NUMA_NO_NODE && !node_present_pages(node))
2374 searchnode = node_to_mem_node(node);
2375
2376 if (unlikely(!node_match(page, searchnode))) {
2377 stat(s, ALLOC_NODE_MISMATCH);
2378 deactivate_slab(s, page, c->freelist);
2379 c->page = NULL;
2380 c->freelist = NULL;
2381 goto new_slab;
2382 }
2383 }
2384
2385
2386
2387
2388
2389
2390 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2391 deactivate_slab(s, page, c->freelist);
2392 c->page = NULL;
2393 c->freelist = NULL;
2394 goto new_slab;
2395 }
2396
2397
2398 freelist = c->freelist;
2399 if (freelist)
2400 goto load_freelist;
2401
2402 freelist = get_freelist(s, page);
2403
2404 if (!freelist) {
2405 c->page = NULL;
2406 stat(s, DEACTIVATE_BYPASS);
2407 goto new_slab;
2408 }
2409
2410 stat(s, ALLOC_REFILL);
2411
2412load_freelist:
2413
2414
2415
2416
2417
2418 VM_BUG_ON(!c->page->frozen);
2419 c->freelist = get_freepointer(s, freelist);
2420 c->tid = next_tid(c->tid);
2421 return freelist;
2422
2423new_slab:
2424
2425 if (c->partial) {
2426 page = c->page = c->partial;
2427 c->partial = page->next;
2428 stat(s, CPU_PARTIAL_ALLOC);
2429 c->freelist = NULL;
2430 goto redo;
2431 }
2432
2433 freelist = new_slab_objects(s, gfpflags, node, &c);
2434
2435 if (unlikely(!freelist)) {
2436 slab_out_of_memory(s, gfpflags, node);
2437 return NULL;
2438 }
2439
2440 page = c->page;
2441 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2442 goto load_freelist;
2443
2444
2445 if (kmem_cache_debug(s) &&
2446 !alloc_debug_processing(s, page, freelist, addr))
2447 goto new_slab;
2448
2449 deactivate_slab(s, page, get_freepointer(s, freelist));
2450 c->page = NULL;
2451 c->freelist = NULL;
2452 return freelist;
2453}
2454
2455
2456
2457
2458
2459static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2460 unsigned long addr, struct kmem_cache_cpu *c)
2461{
2462 void *p;
2463 unsigned long flags;
2464
2465 local_irq_save(flags);
2466#ifdef CONFIG_PREEMPT
2467
2468
2469
2470
2471
2472 c = this_cpu_ptr(s->cpu_slab);
2473#endif
2474
2475 p = ___slab_alloc(s, gfpflags, node, addr, c);
2476 local_irq_restore(flags);
2477 return p;
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2491 gfp_t gfpflags, int node, unsigned long addr)
2492{
2493 void *object;
2494 struct kmem_cache_cpu *c;
2495 struct page *page;
2496 unsigned long tid;
2497
2498 s = slab_pre_alloc_hook(s, gfpflags);
2499 if (!s)
2500 return NULL;
2501redo:
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512 do {
2513 tid = this_cpu_read(s->cpu_slab->tid);
2514 c = raw_cpu_ptr(s->cpu_slab);
2515 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2516 unlikely(tid != READ_ONCE(c->tid)));
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 barrier();
2527
2528
2529
2530
2531
2532
2533
2534
2535 object = c->freelist;
2536 page = c->page;
2537 if (unlikely(!object || !node_match(page, node))) {
2538 object = __slab_alloc(s, gfpflags, node, addr, c);
2539 stat(s, ALLOC_SLOWPATH);
2540 } else {
2541 void *next_object = get_freepointer_safe(s, object);
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557 if (unlikely(!this_cpu_cmpxchg_double(
2558 s->cpu_slab->freelist, s->cpu_slab->tid,
2559 object, tid,
2560 next_object, next_tid(tid)))) {
2561
2562 note_cmpxchg_failure("slab_alloc", s, tid);
2563 goto redo;
2564 }
2565 prefetch_freepointer(s, next_object);
2566 stat(s, ALLOC_FASTPATH);
2567 }
2568
2569 if (unlikely(gfpflags & __GFP_ZERO) && object)
2570 memset(object, 0, s->object_size);
2571
2572 slab_post_alloc_hook(s, gfpflags, 1, &object);
2573
2574 return object;
2575}
2576
2577static __always_inline void *slab_alloc(struct kmem_cache *s,
2578 gfp_t gfpflags, unsigned long addr)
2579{
2580 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2581}
2582
2583void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2584{
2585 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2586
2587 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2588 s->size, gfpflags);
2589
2590 return ret;
2591}
2592EXPORT_SYMBOL(kmem_cache_alloc);
2593
2594#ifdef CONFIG_TRACING
2595void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2596{
2597 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2598 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2599 kasan_kmalloc(s, ret, size, gfpflags);
2600 return ret;
2601}
2602EXPORT_SYMBOL(kmem_cache_alloc_trace);
2603#endif
2604
2605#ifdef CONFIG_NUMA
2606void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2607{
2608 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2609
2610 trace_kmem_cache_alloc_node(_RET_IP_, ret,
2611 s->object_size, s->size, gfpflags, node);
2612
2613 return ret;
2614}
2615EXPORT_SYMBOL(kmem_cache_alloc_node);
2616
2617#ifdef CONFIG_TRACING
2618void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2619 gfp_t gfpflags,
2620 int node, size_t size)
2621{
2622 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2623
2624 trace_kmalloc_node(_RET_IP_, ret,
2625 size, s->size, gfpflags, node);
2626
2627 kasan_kmalloc(s, ret, size, gfpflags);
2628 return ret;
2629}
2630EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2631#endif
2632#endif
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642static void __slab_free(struct kmem_cache *s, struct page *page,
2643 void *head, void *tail, int cnt,
2644 unsigned long addr)
2645
2646{
2647 void *prior;
2648 int was_frozen;
2649 struct page new;
2650 unsigned long counters;
2651 struct kmem_cache_node *n = NULL;
2652 unsigned long uninitialized_var(flags);
2653
2654 stat(s, FREE_SLOWPATH);
2655
2656 if (kmem_cache_debug(s) &&
2657 !free_debug_processing(s, page, head, tail, cnt, addr))
2658 return;
2659
2660 do {
2661 if (unlikely(n)) {
2662 spin_unlock_irqrestore(&n->list_lock, flags);
2663 n = NULL;
2664 }
2665 prior = page->freelist;
2666 counters = page->counters;
2667 set_freepointer(s, tail, prior);
2668 new.counters = counters;
2669 was_frozen = new.frozen;
2670 new.inuse -= cnt;
2671 if ((!new.inuse || !prior) && !was_frozen) {
2672
2673 if (kmem_cache_has_cpu_partial(s) && !prior) {
2674
2675
2676
2677
2678
2679
2680
2681 new.frozen = 1;
2682
2683 } else {
2684
2685 n = get_node(s, page_to_nid(page));
2686
2687
2688
2689
2690
2691
2692
2693
2694 spin_lock_irqsave(&n->list_lock, flags);
2695
2696 }
2697 }
2698
2699 } while (!cmpxchg_double_slab(s, page,
2700 prior, counters,
2701 head, new.counters,
2702 "__slab_free"));
2703
2704 if (likely(!n)) {
2705
2706
2707
2708
2709
2710 if (new.frozen && !was_frozen) {
2711 put_cpu_partial(s, page, 1);
2712 stat(s, CPU_PARTIAL_FREE);
2713 }
2714
2715
2716
2717
2718 if (was_frozen)
2719 stat(s, FREE_FROZEN);
2720 return;
2721 }
2722
2723 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2724 goto slab_empty;
2725
2726
2727
2728
2729
2730 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2731 if (kmem_cache_debug(s))
2732 remove_full(s, n, page);
2733 add_partial(n, page, DEACTIVATE_TO_TAIL);
2734 stat(s, FREE_ADD_PARTIAL);
2735 }
2736 spin_unlock_irqrestore(&n->list_lock, flags);
2737 return;
2738
2739slab_empty:
2740 if (prior) {
2741
2742
2743
2744 remove_partial(n, page);
2745 stat(s, FREE_REMOVE_PARTIAL);
2746 } else {
2747
2748 remove_full(s, n, page);
2749 }
2750
2751 spin_unlock_irqrestore(&n->list_lock, flags);
2752 stat(s, FREE_SLAB);
2753 discard_slab(s, page);
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2772 void *head, void *tail, int cnt,
2773 unsigned long addr)
2774{
2775 void *tail_obj = tail ? : head;
2776 struct kmem_cache_cpu *c;
2777 unsigned long tid;
2778
2779 slab_free_freelist_hook(s, head, tail);
2780
2781redo:
2782
2783
2784
2785
2786
2787
2788 do {
2789 tid = this_cpu_read(s->cpu_slab->tid);
2790 c = raw_cpu_ptr(s->cpu_slab);
2791 } while (IS_ENABLED(CONFIG_PREEMPT) &&
2792 unlikely(tid != READ_ONCE(c->tid)));
2793
2794
2795 barrier();
2796
2797 if (likely(page == c->page)) {
2798 set_freepointer(s, tail_obj, c->freelist);
2799
2800 if (unlikely(!this_cpu_cmpxchg_double(
2801 s->cpu_slab->freelist, s->cpu_slab->tid,
2802 c->freelist, tid,
2803 head, next_tid(tid)))) {
2804
2805 note_cmpxchg_failure("slab_free", s, tid);
2806 goto redo;
2807 }
2808 stat(s, FREE_FASTPATH);
2809 } else
2810 __slab_free(s, page, head, tail_obj, cnt, addr);
2811
2812}
2813
2814void kmem_cache_free(struct kmem_cache *s, void *x)
2815{
2816 s = cache_from_obj(s, x);
2817 if (!s)
2818 return;
2819 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2820 trace_kmem_cache_free(_RET_IP_, x);
2821}
2822EXPORT_SYMBOL(kmem_cache_free);
2823
2824struct detached_freelist {
2825 struct page *page;
2826 void *tail;
2827 void *freelist;
2828 int cnt;
2829 struct kmem_cache *s;
2830};
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static inline
2845int build_detached_freelist(struct kmem_cache *s, size_t size,
2846 void **p, struct detached_freelist *df)
2847{
2848 size_t first_skipped_index = 0;
2849 int lookahead = 3;
2850 void *object;
2851 struct page *page;
2852
2853
2854 df->page = NULL;
2855
2856 do {
2857 object = p[--size];
2858
2859 } while (!object && size);
2860
2861 if (!object)
2862 return 0;
2863
2864 page = virt_to_head_page(object);
2865 if (!s) {
2866
2867 if (unlikely(!PageSlab(page))) {
2868 BUG_ON(!PageCompound(page));
2869 kfree_hook(object);
2870 __free_kmem_pages(page, compound_order(page));
2871 p[size] = NULL;
2872 return size;
2873 }
2874
2875 df->s = page->slab_cache;
2876 } else {
2877 df->s = cache_from_obj(s, object);
2878 }
2879
2880
2881 df->page = page;
2882 set_freepointer(df->s, object, NULL);
2883 df->tail = object;
2884 df->freelist = object;
2885 p[size] = NULL;
2886 df->cnt = 1;
2887
2888 while (size) {
2889 object = p[--size];
2890 if (!object)
2891 continue;
2892
2893
2894 if (df->page == virt_to_head_page(object)) {
2895
2896 set_freepointer(df->s, object, df->freelist);
2897 df->freelist = object;
2898 df->cnt++;
2899 p[size] = NULL;
2900
2901 continue;
2902 }
2903
2904
2905 if (!--lookahead)
2906 break;
2907
2908 if (!first_skipped_index)
2909 first_skipped_index = size + 1;
2910 }
2911
2912 return first_skipped_index;
2913}
2914
2915
2916void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
2917{
2918 if (WARN_ON(!size))
2919 return;
2920
2921 do {
2922 struct detached_freelist df;
2923
2924 size = build_detached_freelist(s, size, p, &df);
2925 if (unlikely(!df.page))
2926 continue;
2927
2928 slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
2929 } while (likely(size));
2930}
2931EXPORT_SYMBOL(kmem_cache_free_bulk);
2932
2933
2934int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2935 void **p)
2936{
2937 struct kmem_cache_cpu *c;
2938 int i;
2939
2940
2941 s = slab_pre_alloc_hook(s, flags);
2942 if (unlikely(!s))
2943 return false;
2944
2945
2946
2947
2948
2949 local_irq_disable();
2950 c = this_cpu_ptr(s->cpu_slab);
2951
2952 for (i = 0; i < size; i++) {
2953 void *object = c->freelist;
2954
2955 if (unlikely(!object)) {
2956
2957
2958
2959
2960 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2961 _RET_IP_, c);
2962 if (unlikely(!p[i]))
2963 goto error;
2964
2965 c = this_cpu_ptr(s->cpu_slab);
2966 continue;
2967 }
2968 c->freelist = get_freepointer(s, object);
2969 p[i] = object;
2970 }
2971 c->tid = next_tid(c->tid);
2972 local_irq_enable();
2973
2974
2975 if (unlikely(flags & __GFP_ZERO)) {
2976 int j;
2977
2978 for (j = 0; j < i; j++)
2979 memset(p[j], 0, s->object_size);
2980 }
2981
2982
2983 slab_post_alloc_hook(s, flags, size, p);
2984 return i;
2985error:
2986 local_irq_enable();
2987 slab_post_alloc_hook(s, flags, i, p);
2988 __kmem_cache_free_bulk(s, i, p);
2989 return 0;
2990}
2991EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013static int slub_min_order;
3014static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3015static int slub_min_objects;
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042static inline int slab_order(int size, int min_objects,
3043 int max_order, int fract_leftover, int reserved)
3044{
3045 int order;
3046 int rem;
3047 int min_order = slub_min_order;
3048
3049 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3050 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3051
3052 for (order = max(min_order, get_order(min_objects * size + reserved));
3053 order <= max_order; order++) {
3054
3055 unsigned long slab_size = PAGE_SIZE << order;
3056
3057 rem = (slab_size - reserved) % size;
3058
3059 if (rem <= slab_size / fract_leftover)
3060 break;
3061 }
3062
3063 return order;
3064}
3065
3066static inline int calculate_order(int size, int reserved)
3067{
3068 int order;
3069 int min_objects;
3070 int fraction;
3071 int max_objects;
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081 min_objects = slub_min_objects;
3082 if (!min_objects)
3083 min_objects = 4 * (fls(nr_cpu_ids) + 1);
3084 max_objects = order_objects(slub_max_order, size, reserved);
3085 min_objects = min(min_objects, max_objects);
3086
3087 while (min_objects > 1) {
3088 fraction = 16;
3089 while (fraction >= 4) {
3090 order = slab_order(size, min_objects,
3091 slub_max_order, fraction, reserved);
3092 if (order <= slub_max_order)
3093 return order;
3094 fraction /= 2;
3095 }
3096 min_objects--;
3097 }
3098
3099
3100
3101
3102
3103 order = slab_order(size, 1, slub_max_order, 1, reserved);
3104 if (order <= slub_max_order)
3105 return order;
3106
3107
3108
3109
3110 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3111 if (order < MAX_ORDER)
3112 return order;
3113 return -ENOSYS;
3114}
3115
3116static void
3117init_kmem_cache_node(struct kmem_cache_node *n)
3118{
3119 n->nr_partial = 0;
3120 spin_lock_init(&n->list_lock);
3121 INIT_LIST_HEAD(&n->partial);
3122#ifdef CONFIG_SLUB_DEBUG
3123 atomic_long_set(&n->nr_slabs, 0);
3124 atomic_long_set(&n->total_objects, 0);
3125 INIT_LIST_HEAD(&n->full);
3126#endif
3127}
3128
3129static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3130{
3131 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3132 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3133
3134
3135
3136
3137
3138 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3139 2 * sizeof(void *));
3140
3141 if (!s->cpu_slab)
3142 return 0;
3143
3144 init_kmem_cache_cpus(s);
3145
3146 return 1;
3147}
3148
3149static struct kmem_cache *kmem_cache_node;
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160static void early_kmem_cache_node_alloc(int node)
3161{
3162 struct page *page;
3163 struct kmem_cache_node *n;
3164
3165 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3166
3167 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3168
3169 BUG_ON(!page);
3170 if (page_to_nid(page) != node) {
3171 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3172 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3173 }
3174
3175 n = page->freelist;
3176 BUG_ON(!n);
3177 page->freelist = get_freepointer(kmem_cache_node, n);
3178 page->inuse = 1;
3179 page->frozen = 0;
3180 kmem_cache_node->node[node] = n;
3181#ifdef CONFIG_SLUB_DEBUG
3182 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3183 init_tracking(kmem_cache_node, n);
3184#endif
3185 kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3186 GFP_KERNEL);
3187 init_kmem_cache_node(n);
3188 inc_slabs_node(kmem_cache_node, node, page->objects);
3189
3190
3191
3192
3193
3194 __add_partial(n, page, DEACTIVATE_TO_HEAD);
3195}
3196
3197static void free_kmem_cache_nodes(struct kmem_cache *s)
3198{
3199 int node;
3200 struct kmem_cache_node *n;
3201
3202 for_each_kmem_cache_node(s, node, n) {
3203 kmem_cache_free(kmem_cache_node, n);
3204 s->node[node] = NULL;
3205 }
3206}
3207
3208void __kmem_cache_release(struct kmem_cache *s)
3209{
3210 free_percpu(s->cpu_slab);
3211 free_kmem_cache_nodes(s);
3212}
3213
3214static int init_kmem_cache_nodes(struct kmem_cache *s)
3215{
3216 int node;
3217
3218 for_each_node_state(node, N_NORMAL_MEMORY) {
3219 struct kmem_cache_node *n;
3220
3221 if (slab_state == DOWN) {
3222 early_kmem_cache_node_alloc(node);
3223 continue;
3224 }
3225 n = kmem_cache_alloc_node(kmem_cache_node,
3226 GFP_KERNEL, node);
3227
3228 if (!n) {
3229 free_kmem_cache_nodes(s);
3230 return 0;
3231 }
3232
3233 s->node[node] = n;
3234 init_kmem_cache_node(n);
3235 }
3236 return 1;
3237}
3238
3239static void set_min_partial(struct kmem_cache *s, unsigned long min)
3240{
3241 if (min < MIN_PARTIAL)
3242 min = MIN_PARTIAL;
3243 else if (min > MAX_PARTIAL)
3244 min = MAX_PARTIAL;
3245 s->min_partial = min;
3246}
3247
3248
3249
3250
3251
3252static int calculate_sizes(struct kmem_cache *s, int forced_order)
3253{
3254 unsigned long flags = s->flags;
3255 unsigned long size = s->object_size;
3256 int order;
3257
3258
3259
3260
3261
3262
3263 size = ALIGN(size, sizeof(void *));
3264
3265#ifdef CONFIG_SLUB_DEBUG
3266
3267
3268
3269
3270
3271 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
3272 !s->ctor)
3273 s->flags |= __OBJECT_POISON;
3274 else
3275 s->flags &= ~__OBJECT_POISON;
3276
3277
3278
3279
3280
3281
3282
3283 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3284 size += sizeof(void *);
3285#endif
3286
3287
3288
3289
3290
3291 s->inuse = size;
3292
3293 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
3294 s->ctor)) {
3295
3296
3297
3298
3299
3300
3301
3302
3303 s->offset = size;
3304 size += sizeof(void *);
3305 }
3306
3307#ifdef CONFIG_SLUB_DEBUG
3308 if (flags & SLAB_STORE_USER)
3309
3310
3311
3312
3313 size += 2 * sizeof(struct track);
3314
3315 if (flags & SLAB_RED_ZONE) {
3316
3317
3318
3319
3320
3321
3322
3323 size += sizeof(void *);
3324
3325 s->red_left_pad = sizeof(void *);
3326 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3327 size += s->red_left_pad;
3328 }
3329#endif
3330
3331
3332
3333
3334
3335
3336 size = ALIGN(size, s->align);
3337 s->size = size;
3338 if (forced_order >= 0)
3339 order = forced_order;
3340 else
3341 order = calculate_order(size, s->reserved);
3342
3343 if (order < 0)
3344 return 0;
3345
3346 s->allocflags = 0;
3347 if (order)
3348 s->allocflags |= __GFP_COMP;
3349
3350 if (s->flags & SLAB_CACHE_DMA)
3351 s->allocflags |= GFP_DMA;
3352
3353 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3354 s->allocflags |= __GFP_RECLAIMABLE;
3355
3356
3357
3358
3359 s->oo = oo_make(order, size, s->reserved);
3360 s->min = oo_make(get_order(size), size, s->reserved);
3361 if (oo_objects(s->oo) > oo_objects(s->max))
3362 s->max = s->oo;
3363
3364 return !!oo_objects(s->oo);
3365}
3366
3367static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3368{
3369 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3370 s->reserved = 0;
3371
3372 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3373 s->reserved = sizeof(struct rcu_head);
3374
3375 if (!calculate_sizes(s, -1))
3376 goto error;
3377 if (disable_higher_order_debug) {
3378
3379
3380
3381
3382 if (get_order(s->size) > get_order(s->object_size)) {
3383 s->flags &= ~DEBUG_METADATA_FLAGS;
3384 s->offset = 0;
3385 if (!calculate_sizes(s, -1))
3386 goto error;
3387 }
3388 }
3389
3390#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3391 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3392 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3393
3394 s->flags |= __CMPXCHG_DOUBLE;
3395#endif
3396
3397
3398
3399
3400
3401 set_min_partial(s, ilog2(s->size) / 2);
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420 if (!kmem_cache_has_cpu_partial(s))
3421 s->cpu_partial = 0;
3422 else if (s->size >= PAGE_SIZE)
3423 s->cpu_partial = 2;
3424 else if (s->size >= 1024)
3425 s->cpu_partial = 6;
3426 else if (s->size >= 256)
3427 s->cpu_partial = 13;
3428 else
3429 s->cpu_partial = 30;
3430
3431#ifdef CONFIG_NUMA
3432 s->remote_node_defrag_ratio = 1000;
3433#endif
3434 if (!init_kmem_cache_nodes(s))
3435 goto error;
3436
3437 if (alloc_kmem_cache_cpus(s))
3438 return 0;
3439
3440 free_kmem_cache_nodes(s);
3441error:
3442 if (flags & SLAB_PANIC)
3443 panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3444 s->name, (unsigned long)s->size, s->size,
3445 oo_order(s->oo), s->offset, flags);
3446 return -EINVAL;
3447}
3448
3449static void list_slab_objects(struct kmem_cache *s, struct page *page,
3450 const char *text)
3451{
3452#ifdef CONFIG_SLUB_DEBUG
3453 void *addr = page_address(page);
3454 void *p;
3455 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3456 sizeof(long), GFP_ATOMIC);
3457 if (!map)
3458 return;
3459 slab_err(s, page, text, s->name);
3460 slab_lock(page);
3461
3462 get_map(s, page, map);
3463 for_each_object(p, s, addr, page->objects) {
3464
3465 if (!test_bit(slab_index(p, s, addr), map)) {
3466 pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3467 print_tracking(s, p);
3468 }
3469 }
3470 slab_unlock(page);
3471 kfree(map);
3472#endif
3473}
3474
3475
3476
3477
3478
3479
3480static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3481{
3482 struct page *page, *h;
3483
3484 BUG_ON(irqs_disabled());
3485 spin_lock_irq(&n->list_lock);
3486 list_for_each_entry_safe(page, h, &n->partial, lru) {
3487 if (!page->inuse) {
3488 remove_partial(n, page);
3489 discard_slab(s, page);
3490 } else {
3491 list_slab_objects(s, page,
3492 "Objects remaining in %s on __kmem_cache_shutdown()");
3493 }
3494 }
3495 spin_unlock_irq(&n->list_lock);
3496}
3497
3498
3499
3500
3501int __kmem_cache_shutdown(struct kmem_cache *s)
3502{
3503 int node;
3504 struct kmem_cache_node *n;
3505
3506 flush_all(s);
3507
3508 for_each_kmem_cache_node(s, node, n) {
3509 free_partial(s, n);
3510 if (n->nr_partial || slabs_node(s, node))
3511 return 1;
3512 }
3513 return 0;
3514}
3515
3516
3517
3518
3519
3520static int __init setup_slub_min_order(char *str)
3521{
3522 get_option(&str, &slub_min_order);
3523
3524 return 1;
3525}
3526
3527__setup("slub_min_order=", setup_slub_min_order);
3528
3529static int __init setup_slub_max_order(char *str)
3530{
3531 get_option(&str, &slub_max_order);
3532 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3533
3534 return 1;
3535}
3536
3537__setup("slub_max_order=", setup_slub_max_order);
3538
3539static int __init setup_slub_min_objects(char *str)
3540{
3541 get_option(&str, &slub_min_objects);
3542
3543 return 1;
3544}
3545
3546__setup("slub_min_objects=", setup_slub_min_objects);
3547
3548void *__kmalloc(size_t size, gfp_t flags)
3549{
3550 struct kmem_cache *s;
3551 void *ret;
3552
3553 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3554 return kmalloc_large(size, flags);
3555
3556 s = kmalloc_slab(size, flags);
3557
3558 if (unlikely(ZERO_OR_NULL_PTR(s)))
3559 return s;
3560
3561 ret = slab_alloc(s, flags, _RET_IP_);
3562
3563 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3564
3565 kasan_kmalloc(s, ret, size, flags);
3566
3567 return ret;
3568}
3569EXPORT_SYMBOL(__kmalloc);
3570
3571#ifdef CONFIG_NUMA
3572static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3573{
3574 struct page *page;
3575 void *ptr = NULL;
3576
3577 flags |= __GFP_COMP | __GFP_NOTRACK;
3578 page = alloc_kmem_pages_node(node, flags, get_order(size));
3579 if (page)
3580 ptr = page_address(page);
3581
3582 kmalloc_large_node_hook(ptr, size, flags);
3583 return ptr;
3584}
3585
3586void *__kmalloc_node(size_t size, gfp_t flags, int node)
3587{
3588 struct kmem_cache *s;
3589 void *ret;
3590
3591 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3592 ret = kmalloc_large_node(size, flags, node);
3593
3594 trace_kmalloc_node(_RET_IP_, ret,
3595 size, PAGE_SIZE << get_order(size),
3596 flags, node);
3597
3598 return ret;
3599 }
3600
3601 s = kmalloc_slab(size, flags);
3602
3603 if (unlikely(ZERO_OR_NULL_PTR(s)))
3604 return s;
3605
3606 ret = slab_alloc_node(s, flags, node, _RET_IP_);
3607
3608 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3609
3610 kasan_kmalloc(s, ret, size, flags);
3611
3612 return ret;
3613}
3614EXPORT_SYMBOL(__kmalloc_node);
3615#endif
3616
3617static size_t __ksize(const void *object)
3618{
3619 struct page *page;
3620
3621 if (unlikely(object == ZERO_SIZE_PTR))
3622 return 0;
3623
3624 page = virt_to_head_page(object);
3625
3626 if (unlikely(!PageSlab(page))) {
3627 WARN_ON(!PageCompound(page));
3628 return PAGE_SIZE << compound_order(page);
3629 }
3630
3631 return slab_ksize(page->slab_cache);
3632}
3633
3634size_t ksize(const void *object)
3635{
3636 size_t size = __ksize(object);
3637
3638
3639 kasan_krealloc(object, size, GFP_NOWAIT);
3640 return size;
3641}
3642EXPORT_SYMBOL(ksize);
3643
3644void kfree(const void *x)
3645{
3646 struct page *page;
3647 void *object = (void *)x;
3648
3649 trace_kfree(_RET_IP_, x);
3650
3651 if (unlikely(ZERO_OR_NULL_PTR(x)))
3652 return;
3653
3654 page = virt_to_head_page(x);
3655 if (unlikely(!PageSlab(page))) {
3656 BUG_ON(!PageCompound(page));
3657 kfree_hook(x);
3658 __free_kmem_pages(page, compound_order(page));
3659 return;
3660 }
3661 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3662}
3663EXPORT_SYMBOL(kfree);
3664
3665#define SHRINK_PROMOTE_MAX 32
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
3677{
3678 int node;
3679 int i;
3680 struct kmem_cache_node *n;
3681 struct page *page;
3682 struct page *t;
3683 struct list_head discard;
3684 struct list_head promote[SHRINK_PROMOTE_MAX];
3685 unsigned long flags;
3686 int ret = 0;
3687
3688 if (deactivate) {
3689
3690
3691
3692
3693 s->cpu_partial = 0;
3694 s->min_partial = 0;
3695
3696
3697
3698
3699
3700 kick_all_cpus_sync();
3701 }
3702
3703 flush_all(s);
3704 for_each_kmem_cache_node(s, node, n) {
3705 INIT_LIST_HEAD(&discard);
3706 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3707 INIT_LIST_HEAD(promote + i);
3708
3709 spin_lock_irqsave(&n->list_lock, flags);
3710
3711
3712
3713
3714
3715
3716
3717 list_for_each_entry_safe(page, t, &n->partial, lru) {
3718 int free = page->objects - page->inuse;
3719
3720
3721 barrier();
3722
3723
3724 BUG_ON(free <= 0);
3725
3726 if (free == page->objects) {
3727 list_move(&page->lru, &discard);
3728 n->nr_partial--;
3729 } else if (free <= SHRINK_PROMOTE_MAX)
3730 list_move(&page->lru, promote + free - 1);
3731 }
3732
3733
3734
3735
3736
3737 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3738 list_splice(promote + i, &n->partial);
3739
3740 spin_unlock_irqrestore(&n->list_lock, flags);
3741
3742
3743 list_for_each_entry_safe(page, t, &discard, lru)
3744 discard_slab(s, page);
3745
3746 if (slabs_node(s, node))
3747 ret = 1;
3748 }
3749
3750 return ret;
3751}
3752
3753static int slab_mem_going_offline_callback(void *arg)
3754{
3755 struct kmem_cache *s;
3756
3757 mutex_lock(&slab_mutex);
3758 list_for_each_entry(s, &slab_caches, list)
3759 __kmem_cache_shrink(s, false);
3760 mutex_unlock(&slab_mutex);
3761
3762 return 0;
3763}
3764
3765static void slab_mem_offline_callback(void *arg)
3766{
3767 struct kmem_cache_node *n;
3768 struct kmem_cache *s;
3769 struct memory_notify *marg = arg;
3770 int offline_node;
3771
3772 offline_node = marg->status_change_nid_normal;
3773
3774
3775
3776
3777
3778 if (offline_node < 0)
3779 return;
3780
3781 mutex_lock(&slab_mutex);
3782 list_for_each_entry(s, &slab_caches, list) {
3783 n = get_node(s, offline_node);
3784 if (n) {
3785
3786
3787
3788
3789
3790
3791 BUG_ON(slabs_node(s, offline_node));
3792
3793 s->node[offline_node] = NULL;
3794 kmem_cache_free(kmem_cache_node, n);
3795 }
3796 }
3797 mutex_unlock(&slab_mutex);
3798}
3799
3800static int slab_mem_going_online_callback(void *arg)
3801{
3802 struct kmem_cache_node *n;
3803 struct kmem_cache *s;
3804 struct memory_notify *marg = arg;
3805 int nid = marg->status_change_nid_normal;
3806 int ret = 0;
3807
3808
3809
3810
3811
3812 if (nid < 0)
3813 return 0;
3814
3815
3816
3817
3818
3819
3820 mutex_lock(&slab_mutex);
3821 list_for_each_entry(s, &slab_caches, list) {
3822
3823
3824
3825
3826
3827 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
3828 if (!n) {
3829 ret = -ENOMEM;
3830 goto out;
3831 }
3832 init_kmem_cache_node(n);
3833 s->node[nid] = n;
3834 }
3835out:
3836 mutex_unlock(&slab_mutex);
3837 return ret;
3838}
3839
3840static int slab_memory_callback(struct notifier_block *self,
3841 unsigned long action, void *arg)
3842{
3843 int ret = 0;
3844
3845 switch (action) {
3846 case MEM_GOING_ONLINE:
3847 ret = slab_mem_going_online_callback(arg);
3848 break;
3849 case MEM_GOING_OFFLINE:
3850 ret = slab_mem_going_offline_callback(arg);
3851 break;
3852 case MEM_OFFLINE:
3853 case MEM_CANCEL_ONLINE:
3854 slab_mem_offline_callback(arg);
3855 break;
3856 case MEM_ONLINE:
3857 case MEM_CANCEL_OFFLINE:
3858 break;
3859 }
3860 if (ret)
3861 ret = notifier_from_errno(ret);
3862 else
3863 ret = NOTIFY_OK;
3864 return ret;
3865}
3866
3867static struct notifier_block slab_memory_callback_nb = {
3868 .notifier_call = slab_memory_callback,
3869 .priority = SLAB_CALLBACK_PRI,
3870};
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
3883{
3884 int node;
3885 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
3886 struct kmem_cache_node *n;
3887
3888 memcpy(s, static_cache, kmem_cache->object_size);
3889
3890
3891
3892
3893
3894
3895 __flush_cpu_slab(s, smp_processor_id());
3896 for_each_kmem_cache_node(s, node, n) {
3897 struct page *p;
3898
3899 list_for_each_entry(p, &n->partial, lru)
3900 p->slab_cache = s;
3901
3902#ifdef CONFIG_SLUB_DEBUG
3903 list_for_each_entry(p, &n->full, lru)
3904 p->slab_cache = s;
3905#endif
3906 }
3907 slab_init_memcg_params(s);
3908 list_add(&s->list, &slab_caches);
3909 return s;
3910}
3911
3912void __init kmem_cache_init(void)
3913{
3914 static __initdata struct kmem_cache boot_kmem_cache,
3915 boot_kmem_cache_node;
3916
3917 if (debug_guardpage_minorder())
3918 slub_max_order = 0;
3919
3920 kmem_cache_node = &boot_kmem_cache_node;
3921 kmem_cache = &boot_kmem_cache;
3922
3923 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3924 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
3925
3926 register_hotmemory_notifier(&slab_memory_callback_nb);
3927
3928
3929 slab_state = PARTIAL;
3930
3931 create_boot_cache(kmem_cache, "kmem_cache",
3932 offsetof(struct kmem_cache, node) +
3933 nr_node_ids * sizeof(struct kmem_cache_node *),
3934 SLAB_HWCACHE_ALIGN);
3935
3936 kmem_cache = bootstrap(&boot_kmem_cache);
3937
3938
3939
3940
3941
3942
3943 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
3944
3945
3946 setup_kmalloc_cache_index_table();
3947 create_kmalloc_caches(0);
3948
3949#ifdef CONFIG_SMP
3950 register_cpu_notifier(&slab_notifier);
3951#endif
3952
3953 pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
3954 cache_line_size(),
3955 slub_min_order, slub_max_order, slub_min_objects,
3956 nr_cpu_ids, nr_node_ids);
3957}
3958
3959void __init kmem_cache_init_late(void)
3960{
3961}
3962
3963struct kmem_cache *
3964__kmem_cache_alias(const char *name, size_t size, size_t align,
3965 unsigned long flags, void (*ctor)(void *))
3966{
3967 struct kmem_cache *s, *c;
3968
3969 s = find_mergeable(size, align, flags, name, ctor);
3970 if (s) {
3971 s->refcount++;
3972
3973
3974
3975
3976
3977 s->object_size = max(s->object_size, (int)size);
3978 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
3979
3980 for_each_memcg_cache(c, s) {
3981 c->object_size = s->object_size;
3982 c->inuse = max_t(int, c->inuse,
3983 ALIGN(size, sizeof(void *)));
3984 }
3985
3986 if (sysfs_slab_alias(s, name)) {
3987 s->refcount--;
3988 s = NULL;
3989 }
3990 }
3991
3992 return s;
3993}
3994
3995int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3996{
3997 int err;
3998
3999 err = kmem_cache_open(s, flags);
4000 if (err)
4001 return err;
4002
4003
4004 if (slab_state <= UP)
4005 return 0;
4006
4007 memcg_propagate_slab_attrs(s);
4008 err = sysfs_slab_add(s);
4009 if (err)
4010 __kmem_cache_release(s);
4011
4012 return err;
4013}
4014
4015#ifdef CONFIG_SMP
4016
4017
4018
4019
4020static int slab_cpuup_callback(struct notifier_block *nfb,
4021 unsigned long action, void *hcpu)
4022{
4023 long cpu = (long)hcpu;
4024 struct kmem_cache *s;
4025 unsigned long flags;
4026
4027 switch (action) {
4028 case CPU_UP_CANCELED:
4029 case CPU_UP_CANCELED_FROZEN:
4030 case CPU_DEAD:
4031 case CPU_DEAD_FROZEN:
4032 mutex_lock(&slab_mutex);
4033 list_for_each_entry(s, &slab_caches, list) {
4034 local_irq_save(flags);
4035 __flush_cpu_slab(s, cpu);
4036 local_irq_restore(flags);
4037 }
4038 mutex_unlock(&slab_mutex);
4039 break;
4040 default:
4041 break;
4042 }
4043 return NOTIFY_OK;
4044}
4045
4046static struct notifier_block slab_notifier = {
4047 .notifier_call = slab_cpuup_callback
4048};
4049
4050#endif
4051
4052void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4053{
4054 struct kmem_cache *s;
4055 void *ret;
4056
4057 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4058 return kmalloc_large(size, gfpflags);
4059
4060 s = kmalloc_slab(size, gfpflags);
4061
4062 if (unlikely(ZERO_OR_NULL_PTR(s)))
4063 return s;
4064
4065 ret = slab_alloc(s, gfpflags, caller);
4066
4067
4068 trace_kmalloc(caller, ret, size, s->size, gfpflags);
4069
4070 return ret;
4071}
4072
4073#ifdef CONFIG_NUMA
4074void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4075 int node, unsigned long caller)
4076{
4077 struct kmem_cache *s;
4078 void *ret;
4079
4080 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4081 ret = kmalloc_large_node(size, gfpflags, node);
4082
4083 trace_kmalloc_node(caller, ret,
4084 size, PAGE_SIZE << get_order(size),
4085 gfpflags, node);
4086
4087 return ret;
4088 }
4089
4090 s = kmalloc_slab(size, gfpflags);
4091
4092 if (unlikely(ZERO_OR_NULL_PTR(s)))
4093 return s;
4094
4095 ret = slab_alloc_node(s, gfpflags, node, caller);
4096
4097
4098 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4099
4100 return ret;
4101}
4102#endif
4103
4104#ifdef CONFIG_SYSFS
4105static int count_inuse(struct page *page)
4106{
4107 return page->inuse;
4108}
4109
4110static int count_total(struct page *page)
4111{
4112 return page->objects;
4113}
4114#endif
4115
4116#ifdef CONFIG_SLUB_DEBUG
4117static int validate_slab(struct kmem_cache *s, struct page *page,
4118 unsigned long *map)
4119{
4120 void *p;
4121 void *addr = page_address(page);
4122
4123 if (!check_slab(s, page) ||
4124 !on_freelist(s, page, NULL))
4125 return 0;
4126
4127
4128 bitmap_zero(map, page->objects);
4129
4130 get_map(s, page, map);
4131 for_each_object(p, s, addr, page->objects) {
4132 if (test_bit(slab_index(p, s, addr), map))
4133 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4134 return 0;
4135 }
4136
4137 for_each_object(p, s, addr, page->objects)
4138 if (!test_bit(slab_index(p, s, addr), map))
4139 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4140 return 0;
4141 return 1;
4142}
4143
4144static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4145 unsigned long *map)
4146{
4147 slab_lock(page);
4148 validate_slab(s, page, map);
4149 slab_unlock(page);
4150}
4151
4152static int validate_slab_node(struct kmem_cache *s,
4153 struct kmem_cache_node *n, unsigned long *map)
4154{
4155 unsigned long count = 0;
4156 struct page *page;
4157 unsigned long flags;
4158
4159 spin_lock_irqsave(&n->list_lock, flags);
4160
4161 list_for_each_entry(page, &n->partial, lru) {
4162 validate_slab_slab(s, page, map);
4163 count++;
4164 }
4165 if (count != n->nr_partial)
4166 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4167 s->name, count, n->nr_partial);
4168
4169 if (!(s->flags & SLAB_STORE_USER))
4170 goto out;
4171
4172 list_for_each_entry(page, &n->full, lru) {
4173 validate_slab_slab(s, page, map);
4174 count++;
4175 }
4176 if (count != atomic_long_read(&n->nr_slabs))
4177 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4178 s->name, count, atomic_long_read(&n->nr_slabs));
4179
4180out:
4181 spin_unlock_irqrestore(&n->list_lock, flags);
4182 return count;
4183}
4184
4185static long validate_slab_cache(struct kmem_cache *s)
4186{
4187 int node;
4188 unsigned long count = 0;
4189 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4190 sizeof(unsigned long), GFP_KERNEL);
4191 struct kmem_cache_node *n;
4192
4193 if (!map)
4194 return -ENOMEM;
4195
4196 flush_all(s);
4197 for_each_kmem_cache_node(s, node, n)
4198 count += validate_slab_node(s, n, map);
4199 kfree(map);
4200 return count;
4201}
4202
4203
4204
4205
4206
4207struct location {
4208 unsigned long count;
4209 unsigned long addr;
4210 long long sum_time;
4211 long min_time;
4212 long max_time;
4213 long min_pid;
4214 long max_pid;
4215 DECLARE_BITMAP(cpus, NR_CPUS);
4216 nodemask_t nodes;
4217};
4218
4219struct loc_track {
4220 unsigned long max;
4221 unsigned long count;
4222 struct location *loc;
4223};
4224
4225static void free_loc_track(struct loc_track *t)
4226{
4227 if (t->max)
4228 free_pages((unsigned long)t->loc,
4229 get_order(sizeof(struct location) * t->max));
4230}
4231
4232static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4233{
4234 struct location *l;
4235 int order;
4236
4237 order = get_order(sizeof(struct location) * max);
4238
4239 l = (void *)__get_free_pages(flags, order);
4240 if (!l)
4241 return 0;
4242
4243 if (t->count) {
4244 memcpy(l, t->loc, sizeof(struct location) * t->count);
4245 free_loc_track(t);
4246 }
4247 t->max = max;
4248 t->loc = l;
4249 return 1;
4250}
4251
4252static int add_location(struct loc_track *t, struct kmem_cache *s,
4253 const struct track *track)
4254{
4255 long start, end, pos;
4256 struct location *l;
4257 unsigned long caddr;
4258 unsigned long age = jiffies - track->when;
4259
4260 start = -1;
4261 end = t->count;
4262
4263 for ( ; ; ) {
4264 pos = start + (end - start + 1) / 2;
4265
4266
4267
4268
4269
4270 if (pos == end)
4271 break;
4272
4273 caddr = t->loc[pos].addr;
4274 if (track->addr == caddr) {
4275
4276 l = &t->loc[pos];
4277 l->count++;
4278 if (track->when) {
4279 l->sum_time += age;
4280 if (age < l->min_time)
4281 l->min_time = age;
4282 if (age > l->max_time)
4283 l->max_time = age;
4284
4285 if (track->pid < l->min_pid)
4286 l->min_pid = track->pid;
4287 if (track->pid > l->max_pid)
4288 l->max_pid = track->pid;
4289
4290 cpumask_set_cpu(track->cpu,
4291 to_cpumask(l->cpus));
4292 }
4293 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4294 return 1;
4295 }
4296
4297 if (track->addr < caddr)
4298 end = pos;
4299 else
4300 start = pos;
4301 }
4302
4303
4304
4305
4306 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4307 return 0;
4308
4309 l = t->loc + pos;
4310 if (pos < t->count)
4311 memmove(l + 1, l,
4312 (t->count - pos) * sizeof(struct location));
4313 t->count++;
4314 l->count = 1;
4315 l->addr = track->addr;
4316 l->sum_time = age;
4317 l->min_time = age;
4318 l->max_time = age;
4319 l->min_pid = track->pid;
4320 l->max_pid = track->pid;
4321 cpumask_clear(to_cpumask(l->cpus));
4322 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4323 nodes_clear(l->nodes);
4324 node_set(page_to_nid(virt_to_page(track)), l->nodes);
4325 return 1;
4326}
4327
4328static void process_slab(struct loc_track *t, struct kmem_cache *s,
4329 struct page *page, enum track_item alloc,
4330 unsigned long *map)
4331{
4332 void *addr = page_address(page);
4333 void *p;
4334
4335 bitmap_zero(map, page->objects);
4336 get_map(s, page, map);
4337
4338 for_each_object(p, s, addr, page->objects)
4339 if (!test_bit(slab_index(p, s, addr), map))
4340 add_location(t, s, get_track(s, p, alloc));
4341}
4342
4343static int list_locations(struct kmem_cache *s, char *buf,
4344 enum track_item alloc)
4345{
4346 int len = 0;
4347 unsigned long i;
4348 struct loc_track t = { 0, 0, NULL };
4349 int node;
4350 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4351 sizeof(unsigned long), GFP_KERNEL);
4352 struct kmem_cache_node *n;
4353
4354 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4355 GFP_TEMPORARY)) {
4356 kfree(map);
4357 return sprintf(buf, "Out of memory\n");
4358 }
4359
4360 flush_all(s);
4361
4362 for_each_kmem_cache_node(s, node, n) {
4363 unsigned long flags;
4364 struct page *page;
4365
4366 if (!atomic_long_read(&n->nr_slabs))
4367 continue;
4368
4369 spin_lock_irqsave(&n->list_lock, flags);
4370 list_for_each_entry(page, &n->partial, lru)
4371 process_slab(&t, s, page, alloc, map);
4372 list_for_each_entry(page, &n->full, lru)
4373 process_slab(&t, s, page, alloc, map);
4374 spin_unlock_irqrestore(&n->list_lock, flags);
4375 }
4376
4377 for (i = 0; i < t.count; i++) {
4378 struct location *l = &t.loc[i];
4379
4380 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4381 break;
4382 len += sprintf(buf + len, "%7ld ", l->count);
4383
4384 if (l->addr)
4385 len += sprintf(buf + len, "%pS", (void *)l->addr);
4386 else
4387 len += sprintf(buf + len, "<not-available>");
4388
4389 if (l->sum_time != l->min_time) {
4390 len += sprintf(buf + len, " age=%ld/%ld/%ld",
4391 l->min_time,
4392 (long)div_u64(l->sum_time, l->count),
4393 l->max_time);
4394 } else
4395 len += sprintf(buf + len, " age=%ld",
4396 l->min_time);
4397
4398 if (l->min_pid != l->max_pid)
4399 len += sprintf(buf + len, " pid=%ld-%ld",
4400 l->min_pid, l->max_pid);
4401 else
4402 len += sprintf(buf + len, " pid=%ld",
4403 l->min_pid);
4404
4405 if (num_online_cpus() > 1 &&
4406 !cpumask_empty(to_cpumask(l->cpus)) &&
4407 len < PAGE_SIZE - 60)
4408 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4409 " cpus=%*pbl",
4410 cpumask_pr_args(to_cpumask(l->cpus)));
4411
4412 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4413 len < PAGE_SIZE - 60)
4414 len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4415 " nodes=%*pbl",
4416 nodemask_pr_args(&l->nodes));
4417
4418 len += sprintf(buf + len, "\n");
4419 }
4420
4421 free_loc_track(&t);
4422 kfree(map);
4423 if (!t.count)
4424 len += sprintf(buf, "No data\n");
4425 return len;
4426}
4427#endif
4428
4429#ifdef SLUB_RESILIENCY_TEST
4430static void __init resiliency_test(void)
4431{
4432 u8 *p;
4433
4434 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4435
4436 pr_err("SLUB resiliency testing\n");
4437 pr_err("-----------------------\n");
4438 pr_err("A. Corruption after allocation\n");
4439
4440 p = kzalloc(16, GFP_KERNEL);
4441 p[16] = 0x12;
4442 pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4443 p + 16);
4444
4445 validate_slab_cache(kmalloc_caches[4]);
4446
4447
4448 p = kzalloc(32, GFP_KERNEL);
4449 p[32 + sizeof(void *)] = 0x34;
4450 pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4451 p);
4452 pr_err("If allocated object is overwritten then not detectable\n\n");
4453
4454 validate_slab_cache(kmalloc_caches[5]);
4455 p = kzalloc(64, GFP_KERNEL);
4456 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4457 *p = 0x56;
4458 pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4459 p);
4460 pr_err("If allocated object is overwritten then not detectable\n\n");
4461 validate_slab_cache(kmalloc_caches[6]);
4462
4463 pr_err("\nB. Corruption after free\n");
4464 p = kzalloc(128, GFP_KERNEL);
4465 kfree(p);
4466 *p = 0x78;
4467 pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4468 validate_slab_cache(kmalloc_caches[7]);
4469
4470 p = kzalloc(256, GFP_KERNEL);
4471 kfree(p);
4472 p[50] = 0x9a;
4473 pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4474 validate_slab_cache(kmalloc_caches[8]);
4475
4476 p = kzalloc(512, GFP_KERNEL);
4477 kfree(p);
4478 p[512] = 0xab;
4479 pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4480 validate_slab_cache(kmalloc_caches[9]);
4481}
4482#else
4483#ifdef CONFIG_SYSFS
4484static void resiliency_test(void) {};
4485#endif
4486#endif
4487
4488#ifdef CONFIG_SYSFS
4489enum slab_stat_type {
4490 SL_ALL,
4491 SL_PARTIAL,
4492 SL_CPU,
4493 SL_OBJECTS,
4494 SL_TOTAL
4495};
4496
4497#define SO_ALL (1 << SL_ALL)
4498#define SO_PARTIAL (1 << SL_PARTIAL)
4499#define SO_CPU (1 << SL_CPU)
4500#define SO_OBJECTS (1 << SL_OBJECTS)
4501#define SO_TOTAL (1 << SL_TOTAL)
4502
4503static ssize_t show_slab_objects(struct kmem_cache *s,
4504 char *buf, unsigned long flags)
4505{
4506 unsigned long total = 0;
4507 int node;
4508 int x;
4509 unsigned long *nodes;
4510
4511 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4512 if (!nodes)
4513 return -ENOMEM;
4514
4515 if (flags & SO_CPU) {
4516 int cpu;
4517
4518 for_each_possible_cpu(cpu) {
4519 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4520 cpu);
4521 int node;
4522 struct page *page;
4523
4524 page = READ_ONCE(c->page);
4525 if (!page)
4526 continue;
4527
4528 node = page_to_nid(page);
4529 if (flags & SO_TOTAL)
4530 x = page->objects;
4531 else if (flags & SO_OBJECTS)
4532 x = page->inuse;
4533 else
4534 x = 1;
4535
4536 total += x;
4537 nodes[node] += x;
4538
4539 page = READ_ONCE(c->partial);
4540 if (page) {
4541 node = page_to_nid(page);
4542 if (flags & SO_TOTAL)
4543 WARN_ON_ONCE(1);
4544 else if (flags & SO_OBJECTS)
4545 WARN_ON_ONCE(1);
4546 else
4547 x = page->pages;
4548 total += x;
4549 nodes[node] += x;
4550 }
4551 }
4552 }
4553
4554 get_online_mems();
4555#ifdef CONFIG_SLUB_DEBUG
4556 if (flags & SO_ALL) {
4557 struct kmem_cache_node *n;
4558
4559 for_each_kmem_cache_node(s, node, n) {
4560
4561 if (flags & SO_TOTAL)
4562 x = atomic_long_read(&n->total_objects);
4563 else if (flags & SO_OBJECTS)
4564 x = atomic_long_read(&n->total_objects) -
4565 count_partial(n, count_free);
4566 else
4567 x = atomic_long_read(&n->nr_slabs);
4568 total += x;
4569 nodes[node] += x;
4570 }
4571
4572 } else
4573#endif
4574 if (flags & SO_PARTIAL) {
4575 struct kmem_cache_node *n;
4576
4577 for_each_kmem_cache_node(s, node, n) {
4578 if (flags & SO_TOTAL)
4579 x = count_partial(n, count_total);
4580 else if (flags & SO_OBJECTS)
4581 x = count_partial(n, count_inuse);
4582 else
4583 x = n->nr_partial;
4584 total += x;
4585 nodes[node] += x;
4586 }
4587 }
4588 x = sprintf(buf, "%lu", total);
4589#ifdef CONFIG_NUMA
4590 for (node = 0; node < nr_node_ids; node++)
4591 if (nodes[node])
4592 x += sprintf(buf + x, " N%d=%lu",
4593 node, nodes[node]);
4594#endif
4595 put_online_mems();
4596 kfree(nodes);
4597 return x + sprintf(buf + x, "\n");
4598}
4599
4600#ifdef CONFIG_SLUB_DEBUG
4601static int any_slab_objects(struct kmem_cache *s)
4602{
4603 int node;
4604 struct kmem_cache_node *n;
4605
4606 for_each_kmem_cache_node(s, node, n)
4607 if (atomic_long_read(&n->total_objects))
4608 return 1;
4609
4610 return 0;
4611}
4612#endif
4613
4614#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4615#define to_slab(n) container_of(n, struct kmem_cache, kobj)
4616
4617struct slab_attribute {
4618 struct attribute attr;
4619 ssize_t (*show)(struct kmem_cache *s, char *buf);
4620 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4621};
4622
4623#define SLAB_ATTR_RO(_name) \
4624 static struct slab_attribute _name##_attr = \
4625 __ATTR(_name, 0400, _name##_show, NULL)
4626
4627#define SLAB_ATTR(_name) \
4628 static struct slab_attribute _name##_attr = \
4629 __ATTR(_name, 0600, _name##_show, _name##_store)
4630
4631static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4632{
4633 return sprintf(buf, "%d\n", s->size);
4634}
4635SLAB_ATTR_RO(slab_size);
4636
4637static ssize_t align_show(struct kmem_cache *s, char *buf)
4638{
4639 return sprintf(buf, "%d\n", s->align);
4640}
4641SLAB_ATTR_RO(align);
4642
4643static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4644{
4645 return sprintf(buf, "%d\n", s->object_size);
4646}
4647SLAB_ATTR_RO(object_size);
4648
4649static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4650{
4651 return sprintf(buf, "%d\n", oo_objects(s->oo));
4652}
4653SLAB_ATTR_RO(objs_per_slab);
4654
4655static ssize_t order_store(struct kmem_cache *s,
4656 const char *buf, size_t length)
4657{
4658 unsigned long order;
4659 int err;
4660
4661 err = kstrtoul(buf, 10, &order);
4662 if (err)
4663 return err;
4664
4665 if (order > slub_max_order || order < slub_min_order)
4666 return -EINVAL;
4667
4668 calculate_sizes(s, order);
4669 return length;
4670}
4671
4672static ssize_t order_show(struct kmem_cache *s, char *buf)
4673{
4674 return sprintf(buf, "%d\n", oo_order(s->oo));
4675}
4676SLAB_ATTR(order);
4677
4678static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4679{
4680 return sprintf(buf, "%lu\n", s->min_partial);
4681}
4682
4683static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4684 size_t length)
4685{
4686 unsigned long min;
4687 int err;
4688
4689 err = kstrtoul(buf, 10, &min);
4690 if (err)
4691 return err;
4692
4693 set_min_partial(s, min);
4694 return length;
4695}
4696SLAB_ATTR(min_partial);
4697
4698static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4699{
4700 return sprintf(buf, "%u\n", s->cpu_partial);
4701}
4702
4703static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4704 size_t length)
4705{
4706 unsigned long objects;
4707 int err;
4708
4709 err = kstrtoul(buf, 10, &objects);
4710 if (err)
4711 return err;
4712 if (objects && !kmem_cache_has_cpu_partial(s))
4713 return -EINVAL;
4714
4715 s->cpu_partial = objects;
4716 flush_all(s);
4717 return length;
4718}
4719SLAB_ATTR(cpu_partial);
4720
4721static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4722{
4723 if (!s->ctor)
4724 return 0;
4725 return sprintf(buf, "%pS\n", s->ctor);
4726}
4727SLAB_ATTR_RO(ctor);
4728
4729static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4730{
4731 return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
4732}
4733SLAB_ATTR_RO(aliases);
4734
4735static ssize_t partial_show(struct kmem_cache *s, char *buf)
4736{
4737 return show_slab_objects(s, buf, SO_PARTIAL);
4738}
4739SLAB_ATTR_RO(partial);
4740
4741static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4742{
4743 return show_slab_objects(s, buf, SO_CPU);
4744}
4745SLAB_ATTR_RO(cpu_slabs);
4746
4747static ssize_t objects_show(struct kmem_cache *s, char *buf)
4748{
4749 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4750}
4751SLAB_ATTR_RO(objects);
4752
4753static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4754{
4755 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4756}
4757SLAB_ATTR_RO(objects_partial);
4758
4759static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4760{
4761 int objects = 0;
4762 int pages = 0;
4763 int cpu;
4764 int len;
4765
4766 for_each_online_cpu(cpu) {
4767 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4768
4769 if (page) {
4770 pages += page->pages;
4771 objects += page->pobjects;
4772 }
4773 }
4774
4775 len = sprintf(buf, "%d(%d)", objects, pages);
4776
4777#ifdef CONFIG_SMP
4778 for_each_online_cpu(cpu) {
4779 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4780
4781 if (page && len < PAGE_SIZE - 20)
4782 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4783 page->pobjects, page->pages);
4784 }
4785#endif
4786 return len + sprintf(buf + len, "\n");
4787}
4788SLAB_ATTR_RO(slabs_cpu_partial);
4789
4790static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4791{
4792 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4793}
4794
4795static ssize_t reclaim_account_store(struct kmem_cache *s,
4796 const char *buf, size_t length)
4797{
4798 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4799 if (buf[0] == '1')
4800 s->flags |= SLAB_RECLAIM_ACCOUNT;
4801 return length;
4802}
4803SLAB_ATTR(reclaim_account);
4804
4805static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4806{
4807 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4808}
4809SLAB_ATTR_RO(hwcache_align);
4810
4811#ifdef CONFIG_ZONE_DMA
4812static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4813{
4814 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4815}
4816SLAB_ATTR_RO(cache_dma);
4817#endif
4818
4819static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4820{
4821 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4822}
4823SLAB_ATTR_RO(destroy_by_rcu);
4824
4825static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4826{
4827 return sprintf(buf, "%d\n", s->reserved);
4828}
4829SLAB_ATTR_RO(reserved);
4830
4831#ifdef CONFIG_SLUB_DEBUG
4832static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4833{
4834 return show_slab_objects(s, buf, SO_ALL);
4835}
4836SLAB_ATTR_RO(slabs);
4837
4838static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4839{
4840 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4841}
4842SLAB_ATTR_RO(total_objects);
4843
4844static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4845{
4846 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
4847}
4848
4849static ssize_t sanity_checks_store(struct kmem_cache *s,
4850 const char *buf, size_t length)
4851{
4852 s->flags &= ~SLAB_CONSISTENCY_CHECKS;
4853 if (buf[0] == '1') {
4854 s->flags &= ~__CMPXCHG_DOUBLE;
4855 s->flags |= SLAB_CONSISTENCY_CHECKS;
4856 }
4857 return length;
4858}
4859SLAB_ATTR(sanity_checks);
4860
4861static ssize_t trace_show(struct kmem_cache *s, char *buf)
4862{
4863 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4864}
4865
4866static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4867 size_t length)
4868{
4869
4870
4871
4872
4873
4874 if (s->refcount > 1)
4875 return -EINVAL;
4876
4877 s->flags &= ~SLAB_TRACE;
4878 if (buf[0] == '1') {
4879 s->flags &= ~__CMPXCHG_DOUBLE;
4880 s->flags |= SLAB_TRACE;
4881 }
4882 return length;
4883}
4884SLAB_ATTR(trace);
4885
4886static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4887{
4888 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4889}
4890
4891static ssize_t red_zone_store(struct kmem_cache *s,
4892 const char *buf, size_t length)
4893{
4894 if (any_slab_objects(s))
4895 return -EBUSY;
4896
4897 s->flags &= ~SLAB_RED_ZONE;
4898 if (buf[0] == '1') {
4899 s->flags |= SLAB_RED_ZONE;
4900 }
4901 calculate_sizes(s, -1);
4902 return length;
4903}
4904SLAB_ATTR(red_zone);
4905
4906static ssize_t poison_show(struct kmem_cache *s, char *buf)
4907{
4908 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4909}
4910
4911static ssize_t poison_store(struct kmem_cache *s,
4912 const char *buf, size_t length)
4913{
4914 if (any_slab_objects(s))
4915 return -EBUSY;
4916
4917 s->flags &= ~SLAB_POISON;
4918 if (buf[0] == '1') {
4919 s->flags |= SLAB_POISON;
4920 }
4921 calculate_sizes(s, -1);
4922 return length;
4923}
4924SLAB_ATTR(poison);
4925
4926static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4927{
4928 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4929}
4930
4931static ssize_t store_user_store(struct kmem_cache *s,
4932 const char *buf, size_t length)
4933{
4934 if (any_slab_objects(s))
4935 return -EBUSY;
4936
4937 s->flags &= ~SLAB_STORE_USER;
4938 if (buf[0] == '1') {
4939 s->flags &= ~__CMPXCHG_DOUBLE;
4940 s->flags |= SLAB_STORE_USER;
4941 }
4942 calculate_sizes(s, -1);
4943 return length;
4944}
4945SLAB_ATTR(store_user);
4946
4947static ssize_t validate_show(struct kmem_cache *s, char *buf)
4948{
4949 return 0;
4950}
4951
4952static ssize_t validate_store(struct kmem_cache *s,
4953 const char *buf, size_t length)
4954{
4955 int ret = -EINVAL;
4956
4957 if (buf[0] == '1') {
4958 ret = validate_slab_cache(s);
4959 if (ret >= 0)
4960 ret = length;
4961 }
4962 return ret;
4963}
4964SLAB_ATTR(validate);
4965
4966static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4967{
4968 if (!(s->flags & SLAB_STORE_USER))
4969 return -ENOSYS;
4970 return list_locations(s, buf, TRACK_ALLOC);
4971}
4972SLAB_ATTR_RO(alloc_calls);
4973
4974static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4975{
4976 if (!(s->flags & SLAB_STORE_USER))
4977 return -ENOSYS;
4978 return list_locations(s, buf, TRACK_FREE);
4979}
4980SLAB_ATTR_RO(free_calls);
4981#endif
4982
4983#ifdef CONFIG_FAILSLAB
4984static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4985{
4986 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4987}
4988
4989static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4990 size_t length)
4991{
4992 if (s->refcount > 1)
4993 return -EINVAL;
4994
4995 s->flags &= ~SLAB_FAILSLAB;
4996 if (buf[0] == '1')
4997 s->flags |= SLAB_FAILSLAB;
4998 return length;
4999}
5000SLAB_ATTR(failslab);
5001#endif
5002
5003static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5004{
5005 return 0;
5006}
5007
5008static ssize_t shrink_store(struct kmem_cache *s,
5009 const char *buf, size_t length)
5010{
5011 if (buf[0] == '1')
5012 kmem_cache_shrink(s);
5013 else
5014 return -EINVAL;
5015 return length;
5016}
5017SLAB_ATTR(shrink);
5018
5019#ifdef CONFIG_NUMA
5020static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5021{
5022 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
5023}
5024
5025static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5026 const char *buf, size_t length)
5027{
5028 unsigned long ratio;
5029 int err;
5030
5031 err = kstrtoul(buf, 10, &ratio);
5032 if (err)
5033 return err;
5034
5035 if (ratio <= 100)
5036 s->remote_node_defrag_ratio = ratio * 10;
5037
5038 return length;
5039}
5040SLAB_ATTR(remote_node_defrag_ratio);
5041#endif
5042
5043#ifdef CONFIG_SLUB_STATS
5044static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5045{
5046 unsigned long sum = 0;
5047 int cpu;
5048 int len;
5049 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5050
5051 if (!data)
5052 return -ENOMEM;
5053
5054 for_each_online_cpu(cpu) {
5055 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5056
5057 data[cpu] = x;
5058 sum += x;
5059 }
5060
5061 len = sprintf(buf, "%lu", sum);
5062
5063#ifdef CONFIG_SMP
5064 for_each_online_cpu(cpu) {
5065 if (data[cpu] && len < PAGE_SIZE - 20)
5066 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5067 }
5068#endif
5069 kfree(data);
5070 return len + sprintf(buf + len, "\n");
5071}
5072
5073static void clear_stat(struct kmem_cache *s, enum stat_item si)
5074{
5075 int cpu;
5076
5077 for_each_online_cpu(cpu)
5078 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5079}
5080
5081#define STAT_ATTR(si, text) \
5082static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5083{ \
5084 return show_stat(s, buf, si); \
5085} \
5086static ssize_t text##_store(struct kmem_cache *s, \
5087 const char *buf, size_t length) \
5088{ \
5089 if (buf[0] != '0') \
5090 return -EINVAL; \
5091 clear_stat(s, si); \
5092 return length; \
5093} \
5094SLAB_ATTR(text); \
5095
5096STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5097STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5098STAT_ATTR(FREE_FASTPATH, free_fastpath);
5099STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5100STAT_ATTR(FREE_FROZEN, free_frozen);
5101STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5102STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5103STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5104STAT_ATTR(ALLOC_SLAB, alloc_slab);
5105STAT_ATTR(ALLOC_REFILL, alloc_refill);
5106STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5107STAT_ATTR(FREE_SLAB, free_slab);
5108STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5109STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5110STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5111STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5112STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5113STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5114STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5115STAT_ATTR(ORDER_FALLBACK, order_fallback);
5116STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5117STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5118STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5119STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5120STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5121STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5122#endif
5123
5124static struct attribute *slab_attrs[] = {
5125 &slab_size_attr.attr,
5126 &object_size_attr.attr,
5127 &objs_per_slab_attr.attr,
5128 &order_attr.attr,
5129 &min_partial_attr.attr,
5130 &cpu_partial_attr.attr,
5131 &objects_attr.attr,
5132 &objects_partial_attr.attr,
5133 &partial_attr.attr,
5134 &cpu_slabs_attr.attr,
5135 &ctor_attr.attr,
5136 &aliases_attr.attr,
5137 &align_attr.attr,
5138 &hwcache_align_attr.attr,
5139 &reclaim_account_attr.attr,
5140 &destroy_by_rcu_attr.attr,
5141 &shrink_attr.attr,
5142 &reserved_attr.attr,
5143 &slabs_cpu_partial_attr.attr,
5144#ifdef CONFIG_SLUB_DEBUG
5145 &total_objects_attr.attr,
5146 &slabs_attr.attr,
5147 &sanity_checks_attr.attr,
5148 &trace_attr.attr,
5149 &red_zone_attr.attr,
5150 &poison_attr.attr,
5151 &store_user_attr.attr,
5152 &validate_attr.attr,
5153 &alloc_calls_attr.attr,
5154 &free_calls_attr.attr,
5155#endif
5156#ifdef CONFIG_ZONE_DMA
5157 &cache_dma_attr.attr,
5158#endif
5159#ifdef CONFIG_NUMA
5160 &remote_node_defrag_ratio_attr.attr,
5161#endif
5162#ifdef CONFIG_SLUB_STATS
5163 &alloc_fastpath_attr.attr,
5164 &alloc_slowpath_attr.attr,
5165 &free_fastpath_attr.attr,
5166 &free_slowpath_attr.attr,
5167 &free_frozen_attr.attr,
5168 &free_add_partial_attr.attr,
5169 &free_remove_partial_attr.attr,
5170 &alloc_from_partial_attr.attr,
5171 &alloc_slab_attr.attr,
5172 &alloc_refill_attr.attr,
5173 &alloc_node_mismatch_attr.attr,
5174 &free_slab_attr.attr,
5175 &cpuslab_flush_attr.attr,
5176 &deactivate_full_attr.attr,
5177 &deactivate_empty_attr.attr,
5178 &deactivate_to_head_attr.attr,
5179 &deactivate_to_tail_attr.attr,
5180 &deactivate_remote_frees_attr.attr,
5181 &deactivate_bypass_attr.attr,
5182 &order_fallback_attr.attr,
5183 &cmpxchg_double_fail_attr.attr,
5184 &cmpxchg_double_cpu_fail_attr.attr,
5185 &cpu_partial_alloc_attr.attr,
5186 &cpu_partial_free_attr.attr,
5187 &cpu_partial_node_attr.attr,
5188 &cpu_partial_drain_attr.attr,
5189#endif
5190#ifdef CONFIG_FAILSLAB
5191 &failslab_attr.attr,
5192#endif
5193
5194 NULL
5195};
5196
5197static struct attribute_group slab_attr_group = {
5198 .attrs = slab_attrs,
5199};
5200
5201static ssize_t slab_attr_show(struct kobject *kobj,
5202 struct attribute *attr,
5203 char *buf)
5204{
5205 struct slab_attribute *attribute;
5206 struct kmem_cache *s;
5207 int err;
5208
5209 attribute = to_slab_attr(attr);
5210 s = to_slab(kobj);
5211
5212 if (!attribute->show)
5213 return -EIO;
5214
5215 err = attribute->show(s, buf);
5216
5217 return err;
5218}
5219
5220static ssize_t slab_attr_store(struct kobject *kobj,
5221 struct attribute *attr,
5222 const char *buf, size_t len)
5223{
5224 struct slab_attribute *attribute;
5225 struct kmem_cache *s;
5226 int err;
5227
5228 attribute = to_slab_attr(attr);
5229 s = to_slab(kobj);
5230
5231 if (!attribute->store)
5232 return -EIO;
5233
5234 err = attribute->store(s, buf, len);
5235#ifdef CONFIG_MEMCG
5236 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5237 struct kmem_cache *c;
5238
5239 mutex_lock(&slab_mutex);
5240 if (s->max_attr_size < len)
5241 s->max_attr_size = len;
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260 for_each_memcg_cache(c, s)
5261 attribute->store(c, buf, len);
5262 mutex_unlock(&slab_mutex);
5263 }
5264#endif
5265 return err;
5266}
5267
5268static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5269{
5270#ifdef CONFIG_MEMCG
5271 int i;
5272 char *buffer = NULL;
5273 struct kmem_cache *root_cache;
5274
5275 if (is_root_cache(s))
5276 return;
5277
5278 root_cache = s->memcg_params.root_cache;
5279
5280
5281
5282
5283
5284 if (!root_cache->max_attr_size)
5285 return;
5286
5287 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5288 char mbuf[64];
5289 char *buf;
5290 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5291
5292 if (!attr || !attr->store || !attr->show)
5293 continue;
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304 if (buffer)
5305 buf = buffer;
5306 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5307 buf = mbuf;
5308 else {
5309 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5310 if (WARN_ON(!buffer))
5311 continue;
5312 buf = buffer;
5313 }
5314
5315 attr->show(root_cache, buf);
5316 attr->store(s, buf, strlen(buf));
5317 }
5318
5319 if (buffer)
5320 free_page((unsigned long)buffer);
5321#endif
5322}
5323
5324static void kmem_cache_release(struct kobject *k)
5325{
5326 slab_kmem_cache_release(to_slab(k));
5327}
5328
5329static const struct sysfs_ops slab_sysfs_ops = {
5330 .show = slab_attr_show,
5331 .store = slab_attr_store,
5332};
5333
5334static struct kobj_type slab_ktype = {
5335 .sysfs_ops = &slab_sysfs_ops,
5336 .release = kmem_cache_release,
5337};
5338
5339static int uevent_filter(struct kset *kset, struct kobject *kobj)
5340{
5341 struct kobj_type *ktype = get_ktype(kobj);
5342
5343 if (ktype == &slab_ktype)
5344 return 1;
5345 return 0;
5346}
5347
5348static const struct kset_uevent_ops slab_uevent_ops = {
5349 .filter = uevent_filter,
5350};
5351
5352static struct kset *slab_kset;
5353
5354static inline struct kset *cache_kset(struct kmem_cache *s)
5355{
5356#ifdef CONFIG_MEMCG
5357 if (!is_root_cache(s))
5358 return s->memcg_params.root_cache->memcg_kset;
5359#endif
5360 return slab_kset;
5361}
5362
5363#define ID_STR_LENGTH 64
5364
5365
5366
5367
5368
5369static char *create_unique_id(struct kmem_cache *s)
5370{
5371 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5372 char *p = name;
5373
5374 BUG_ON(!name);
5375
5376 *p++ = ':';
5377
5378
5379
5380
5381
5382
5383
5384 if (s->flags & SLAB_CACHE_DMA)
5385 *p++ = 'd';
5386 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5387 *p++ = 'a';
5388 if (s->flags & SLAB_CONSISTENCY_CHECKS)
5389 *p++ = 'F';
5390 if (!(s->flags & SLAB_NOTRACK))
5391 *p++ = 't';
5392 if (s->flags & SLAB_ACCOUNT)
5393 *p++ = 'A';
5394 if (p != name + 1)
5395 *p++ = '-';
5396 p += sprintf(p, "%07d", s->size);
5397
5398 BUG_ON(p > name + ID_STR_LENGTH - 1);
5399 return name;
5400}
5401
5402static int sysfs_slab_add(struct kmem_cache *s)
5403{
5404 int err;
5405 const char *name;
5406 int unmergeable = slab_unmergeable(s);
5407
5408 if (unmergeable) {
5409
5410
5411
5412
5413
5414 sysfs_remove_link(&slab_kset->kobj, s->name);
5415 name = s->name;
5416 } else {
5417
5418
5419
5420
5421 name = create_unique_id(s);
5422 }
5423
5424 s->kobj.kset = cache_kset(s);
5425 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5426 if (err)
5427 goto out;
5428
5429 err = sysfs_create_group(&s->kobj, &slab_attr_group);
5430 if (err)
5431 goto out_del_kobj;
5432
5433#ifdef CONFIG_MEMCG
5434 if (is_root_cache(s)) {
5435 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5436 if (!s->memcg_kset) {
5437 err = -ENOMEM;
5438 goto out_del_kobj;
5439 }
5440 }
5441#endif
5442
5443 kobject_uevent(&s->kobj, KOBJ_ADD);
5444 if (!unmergeable) {
5445
5446 sysfs_slab_alias(s, s->name);
5447 }
5448out:
5449 if (!unmergeable)
5450 kfree(name);
5451 return err;
5452out_del_kobj:
5453 kobject_del(&s->kobj);
5454 goto out;
5455}
5456
5457void sysfs_slab_remove(struct kmem_cache *s)
5458{
5459 if (slab_state < FULL)
5460
5461
5462
5463
5464 return;
5465
5466#ifdef CONFIG_MEMCG
5467 kset_unregister(s->memcg_kset);
5468#endif
5469 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5470 kobject_del(&s->kobj);
5471 kobject_put(&s->kobj);
5472}
5473
5474
5475
5476
5477
5478struct saved_alias {
5479 struct kmem_cache *s;
5480 const char *name;
5481 struct saved_alias *next;
5482};
5483
5484static struct saved_alias *alias_list;
5485
5486static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5487{
5488 struct saved_alias *al;
5489
5490 if (slab_state == FULL) {
5491
5492
5493
5494 sysfs_remove_link(&slab_kset->kobj, name);
5495 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5496 }
5497
5498 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5499 if (!al)
5500 return -ENOMEM;
5501
5502 al->s = s;
5503 al->name = name;
5504 al->next = alias_list;
5505 alias_list = al;
5506 return 0;
5507}
5508
5509static int __init slab_sysfs_init(void)
5510{
5511 struct kmem_cache *s;
5512 int err;
5513
5514 mutex_lock(&slab_mutex);
5515
5516 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5517 if (!slab_kset) {
5518 mutex_unlock(&slab_mutex);
5519 pr_err("Cannot register slab subsystem.\n");
5520 return -ENOSYS;
5521 }
5522
5523 slab_state = FULL;
5524
5525 list_for_each_entry(s, &slab_caches, list) {
5526 err = sysfs_slab_add(s);
5527 if (err)
5528 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5529 s->name);
5530 }
5531
5532 while (alias_list) {
5533 struct saved_alias *al = alias_list;
5534
5535 alias_list = alias_list->next;
5536 err = sysfs_slab_alias(al->s, al->name);
5537 if (err)
5538 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5539 al->name);
5540 kfree(al);
5541 }
5542
5543 mutex_unlock(&slab_mutex);
5544 resiliency_test();
5545 return 0;
5546}
5547
5548__initcall(slab_sysfs_init);
5549#endif
5550
5551
5552
5553
5554#ifdef CONFIG_SLABINFO
5555void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5556{
5557 unsigned long nr_slabs = 0;
5558 unsigned long nr_objs = 0;
5559 unsigned long nr_free = 0;
5560 int node;
5561 struct kmem_cache_node *n;
5562
5563 for_each_kmem_cache_node(s, node, n) {
5564 nr_slabs += node_nr_slabs(n);
5565 nr_objs += node_nr_objs(n);
5566 nr_free += count_partial(n, count_free);
5567 }
5568
5569 sinfo->active_objs = nr_objs - nr_free;
5570 sinfo->num_objs = nr_objs;
5571 sinfo->active_slabs = nr_slabs;
5572 sinfo->num_slabs = nr_slabs;
5573 sinfo->objects_per_slab = oo_objects(s->oo);
5574 sinfo->cache_order = oo_order(s->oo);
5575}
5576
5577void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5578{
5579}
5580
5581ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5582 size_t count, loff_t *ppos)
5583{
5584 return -EIO;
5585}
5586#endif
5587