1
2#ifndef MM_SLAB_H
3#define MM_SLAB_H
4
5
6
7
8
9struct slab {
10 unsigned long __page_flags;
11
12#if defined(CONFIG_SLAB)
13
14 union {
15 struct list_head slab_list;
16 struct rcu_head rcu_head;
17 };
18 struct kmem_cache *slab_cache;
19 void *freelist;
20 void *s_mem;
21 unsigned int active;
22
23#elif defined(CONFIG_SLUB)
24
25 union {
26 struct list_head slab_list;
27 struct rcu_head rcu_head;
28#ifdef CONFIG_SLUB_CPU_PARTIAL
29 struct {
30 struct slab *next;
31 int slabs;
32 };
33#endif
34 };
35 struct kmem_cache *slab_cache;
36
37 void *freelist;
38 union {
39 unsigned long counters;
40 struct {
41 unsigned inuse:16;
42 unsigned objects:15;
43 unsigned frozen:1;
44 };
45 };
46 unsigned int __unused;
47
48#elif defined(CONFIG_SLOB)
49
50 struct list_head slab_list;
51 void *__unused_1;
52 void *freelist;
53 long units;
54 unsigned int __unused_2;
55
56#else
57#error "Unexpected slab allocator configured"
58#endif
59
60 atomic_t __page_refcount;
61#ifdef CONFIG_MEMCG
62 unsigned long memcg_data;
63#endif
64};
65
66#define SLAB_MATCH(pg, sl) \
67 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
68SLAB_MATCH(flags, __page_flags);
69SLAB_MATCH(compound_head, slab_list);
70#ifndef CONFIG_SLOB
71SLAB_MATCH(rcu_head, rcu_head);
72#endif
73SLAB_MATCH(_refcount, __page_refcount);
74#ifdef CONFIG_MEMCG
75SLAB_MATCH(memcg_data, memcg_data);
76#endif
77#undef SLAB_MATCH
78static_assert(sizeof(struct slab) <= sizeof(struct page));
79
80
81
82
83
84
85
86
87
88
89#define folio_slab(folio) (_Generic((folio), \
90 const struct folio *: (const struct slab *)(folio), \
91 struct folio *: (struct slab *)(folio)))
92
93
94
95
96
97
98
99
100
101
102
103
104#define slab_folio(s) (_Generic((s), \
105 const struct slab *: (const struct folio *)s, \
106 struct slab *: (struct folio *)s))
107
108
109
110
111
112
113
114
115
116
117
118
119
120#define page_slab(p) (_Generic((p), \
121 const struct page *: (const struct slab *)(p), \
122 struct page *: (struct slab *)(p)))
123
124
125
126
127
128
129
130
131
132#define slab_page(s) folio_page(slab_folio(s), 0)
133
134
135
136
137
138static inline bool slab_test_pfmemalloc(const struct slab *slab)
139{
140 return folio_test_active((struct folio *)slab_folio(slab));
141}
142
143static inline void slab_set_pfmemalloc(struct slab *slab)
144{
145 folio_set_active(slab_folio(slab));
146}
147
148static inline void slab_clear_pfmemalloc(struct slab *slab)
149{
150 folio_clear_active(slab_folio(slab));
151}
152
153static inline void __slab_clear_pfmemalloc(struct slab *slab)
154{
155 __folio_clear_active(slab_folio(slab));
156}
157
158static inline void *slab_address(const struct slab *slab)
159{
160 return folio_address(slab_folio(slab));
161}
162
163static inline int slab_nid(const struct slab *slab)
164{
165 return folio_nid(slab_folio(slab));
166}
167
168static inline pg_data_t *slab_pgdat(const struct slab *slab)
169{
170 return folio_pgdat(slab_folio(slab));
171}
172
173static inline struct slab *virt_to_slab(const void *addr)
174{
175 struct folio *folio = virt_to_folio(addr);
176
177 if (!folio_test_slab(folio))
178 return NULL;
179
180 return folio_slab(folio);
181}
182
183static inline int slab_order(const struct slab *slab)
184{
185 return folio_order((struct folio *)slab_folio(slab));
186}
187
188static inline size_t slab_size(const struct slab *slab)
189{
190 return PAGE_SIZE << slab_order(slab);
191}
192
193#ifdef CONFIG_SLOB
194
195
196
197
198
199
200
201
202
203
204
205struct kmem_cache {
206 unsigned int object_size;
207 unsigned int size;
208 unsigned int align;
209 slab_flags_t flags;
210 unsigned int useroffset;
211 unsigned int usersize;
212 const char *name;
213 int refcount;
214 void (*ctor)(void *);
215 struct list_head list;
216};
217
218#endif
219
220#ifdef CONFIG_SLAB
221#include <linux/slab_def.h>
222#endif
223
224#ifdef CONFIG_SLUB
225#include <linux/slub_def.h>
226#endif
227
228#include <linux/memcontrol.h>
229#include <linux/fault-inject.h>
230#include <linux/kasan.h>
231#include <linux/kmemleak.h>
232#include <linux/random.h>
233#include <linux/sched/mm.h>
234#include <linux/list_lru.h>
235
236
237
238
239
240
241
242
243
244enum slab_state {
245 DOWN,
246 PARTIAL,
247 PARTIAL_NODE,
248 UP,
249 FULL
250};
251
252extern enum slab_state slab_state;
253
254
255extern struct mutex slab_mutex;
256
257
258extern struct list_head slab_caches;
259
260
261extern struct kmem_cache *kmem_cache;
262
263
264extern const struct kmalloc_info_struct {
265 const char *name[NR_KMALLOC_TYPES];
266 unsigned int size;
267} kmalloc_info[];
268
269#ifndef CONFIG_SLOB
270
271void setup_kmalloc_cache_index_table(void);
272void create_kmalloc_caches(slab_flags_t);
273
274
275struct kmem_cache *kmalloc_slab(size_t, gfp_t);
276#endif
277
278gfp_t kmalloc_fix_flags(gfp_t flags);
279
280
281int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
282
283struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
284 slab_flags_t flags, unsigned int useroffset,
285 unsigned int usersize);
286extern void create_boot_cache(struct kmem_cache *, const char *name,
287 unsigned int size, slab_flags_t flags,
288 unsigned int useroffset, unsigned int usersize);
289
290int slab_unmergeable(struct kmem_cache *s);
291struct kmem_cache *find_mergeable(unsigned size, unsigned align,
292 slab_flags_t flags, const char *name, void (*ctor)(void *));
293#ifndef CONFIG_SLOB
294struct kmem_cache *
295__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
296 slab_flags_t flags, void (*ctor)(void *));
297
298slab_flags_t kmem_cache_flags(unsigned int object_size,
299 slab_flags_t flags, const char *name);
300#else
301static inline struct kmem_cache *
302__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
303 slab_flags_t flags, void (*ctor)(void *))
304{ return NULL; }
305
306static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
307 slab_flags_t flags, const char *name)
308{
309 return flags;
310}
311#endif
312
313
314
315#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
316 SLAB_CACHE_DMA32 | SLAB_PANIC | \
317 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
318
319#if defined(CONFIG_DEBUG_SLAB)
320#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
321#elif defined(CONFIG_SLUB_DEBUG)
322#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
323 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
324#else
325#define SLAB_DEBUG_FLAGS (0)
326#endif
327
328#if defined(CONFIG_SLAB)
329#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
330 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
331 SLAB_ACCOUNT)
332#elif defined(CONFIG_SLUB)
333#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
334 SLAB_TEMPORARY | SLAB_ACCOUNT | SLAB_NO_USER_FLAGS)
335#else
336#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
337#endif
338
339
340#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
341
342
343#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
344 SLAB_RED_ZONE | \
345 SLAB_POISON | \
346 SLAB_STORE_USER | \
347 SLAB_TRACE | \
348 SLAB_CONSISTENCY_CHECKS | \
349 SLAB_MEM_SPREAD | \
350 SLAB_NOLEAKTRACE | \
351 SLAB_RECLAIM_ACCOUNT | \
352 SLAB_TEMPORARY | \
353 SLAB_ACCOUNT | \
354 SLAB_NO_USER_FLAGS)
355
356bool __kmem_cache_empty(struct kmem_cache *);
357int __kmem_cache_shutdown(struct kmem_cache *);
358void __kmem_cache_release(struct kmem_cache *);
359int __kmem_cache_shrink(struct kmem_cache *);
360void slab_kmem_cache_release(struct kmem_cache *);
361
362struct seq_file;
363struct file;
364
365struct slabinfo {
366 unsigned long active_objs;
367 unsigned long num_objs;
368 unsigned long active_slabs;
369 unsigned long num_slabs;
370 unsigned long shared_avail;
371 unsigned int limit;
372 unsigned int batchcount;
373 unsigned int shared;
374 unsigned int objects_per_slab;
375 unsigned int cache_order;
376};
377
378void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
379void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
380ssize_t slabinfo_write(struct file *file, const char __user *buffer,
381 size_t count, loff_t *ppos);
382
383
384
385
386
387
388
389void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
390int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
391
392static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
393{
394 return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
395 NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
396}
397
398#ifdef CONFIG_SLUB_DEBUG
399#ifdef CONFIG_SLUB_DEBUG_ON
400DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
401#else
402DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
403#endif
404extern void print_tracking(struct kmem_cache *s, void *object);
405long validate_slab_cache(struct kmem_cache *s);
406static inline bool __slub_debug_enabled(void)
407{
408 return static_branch_unlikely(&slub_debug_enabled);
409}
410#else
411static inline void print_tracking(struct kmem_cache *s, void *object)
412{
413}
414static inline bool __slub_debug_enabled(void)
415{
416 return false;
417}
418#endif
419
420
421
422
423
424
425static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
426{
427 if (IS_ENABLED(CONFIG_SLUB_DEBUG))
428 VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
429 if (__slub_debug_enabled())
430 return s->flags & flags;
431 return false;
432}
433
434#ifdef CONFIG_MEMCG_KMEM
435
436
437
438
439
440
441
442static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
443{
444 unsigned long memcg_data = READ_ONCE(slab->memcg_data);
445
446 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS),
447 slab_page(slab));
448 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab));
449
450 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
451}
452
453int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
454 gfp_t gfp, bool new_slab);
455void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
456 enum node_stat_item idx, int nr);
457
458static inline void memcg_free_slab_cgroups(struct slab *slab)
459{
460 kfree(slab_objcgs(slab));
461 slab->memcg_data = 0;
462}
463
464static inline size_t obj_full_size(struct kmem_cache *s)
465{
466
467
468
469
470 return s->size + sizeof(struct obj_cgroup *);
471}
472
473
474
475
476static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
477 struct list_lru *lru,
478 struct obj_cgroup **objcgp,
479 size_t objects, gfp_t flags)
480{
481 struct obj_cgroup *objcg;
482
483 if (!memcg_kmem_enabled())
484 return true;
485
486 if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
487 return true;
488
489 objcg = get_obj_cgroup_from_current();
490 if (!objcg)
491 return true;
492
493 if (lru) {
494 int ret;
495 struct mem_cgroup *memcg;
496
497 memcg = get_mem_cgroup_from_objcg(objcg);
498 ret = memcg_list_lru_alloc(memcg, lru, flags);
499 css_put(&memcg->css);
500
501 if (ret)
502 goto out;
503 }
504
505 if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s)))
506 goto out;
507
508 *objcgp = objcg;
509 return true;
510out:
511 obj_cgroup_put(objcg);
512 return false;
513}
514
515static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
516 struct obj_cgroup *objcg,
517 gfp_t flags, size_t size,
518 void **p)
519{
520 struct slab *slab;
521 unsigned long off;
522 size_t i;
523
524 if (!memcg_kmem_enabled() || !objcg)
525 return;
526
527 for (i = 0; i < size; i++) {
528 if (likely(p[i])) {
529 slab = virt_to_slab(p[i]);
530
531 if (!slab_objcgs(slab) &&
532 memcg_alloc_slab_cgroups(slab, s, flags,
533 false)) {
534 obj_cgroup_uncharge(objcg, obj_full_size(s));
535 continue;
536 }
537
538 off = obj_to_index(s, slab, p[i]);
539 obj_cgroup_get(objcg);
540 slab_objcgs(slab)[off] = objcg;
541 mod_objcg_state(objcg, slab_pgdat(slab),
542 cache_vmstat_idx(s), obj_full_size(s));
543 } else {
544 obj_cgroup_uncharge(objcg, obj_full_size(s));
545 }
546 }
547 obj_cgroup_put(objcg);
548}
549
550static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
551 void **p, int objects)
552{
553 struct kmem_cache *s;
554 struct obj_cgroup **objcgs;
555 struct obj_cgroup *objcg;
556 struct slab *slab;
557 unsigned int off;
558 int i;
559
560 if (!memcg_kmem_enabled())
561 return;
562
563 for (i = 0; i < objects; i++) {
564 if (unlikely(!p[i]))
565 continue;
566
567 slab = virt_to_slab(p[i]);
568
569 if (!slab)
570 continue;
571
572 objcgs = slab_objcgs(slab);
573 if (!objcgs)
574 continue;
575
576 if (!s_orig)
577 s = slab->slab_cache;
578 else
579 s = s_orig;
580
581 off = obj_to_index(s, slab, p[i]);
582 objcg = objcgs[off];
583 if (!objcg)
584 continue;
585
586 objcgs[off] = NULL;
587 obj_cgroup_uncharge(objcg, obj_full_size(s));
588 mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
589 -obj_full_size(s));
590 obj_cgroup_put(objcg);
591 }
592}
593
594#else
595static inline struct obj_cgroup **slab_objcgs(struct slab *slab)
596{
597 return NULL;
598}
599
600static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
601{
602 return NULL;
603}
604
605static inline int memcg_alloc_slab_cgroups(struct slab *slab,
606 struct kmem_cache *s, gfp_t gfp,
607 bool new_slab)
608{
609 return 0;
610}
611
612static inline void memcg_free_slab_cgroups(struct slab *slab)
613{
614}
615
616static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
617 struct list_lru *lru,
618 struct obj_cgroup **objcgp,
619 size_t objects, gfp_t flags)
620{
621 return true;
622}
623
624static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
625 struct obj_cgroup *objcg,
626 gfp_t flags, size_t size,
627 void **p)
628{
629}
630
631static inline void memcg_slab_free_hook(struct kmem_cache *s,
632 void **p, int objects)
633{
634}
635#endif
636
637#ifndef CONFIG_SLOB
638static inline struct kmem_cache *virt_to_cache(const void *obj)
639{
640 struct slab *slab;
641
642 slab = virt_to_slab(obj);
643 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n",
644 __func__))
645 return NULL;
646 return slab->slab_cache;
647}
648
649static __always_inline void account_slab(struct slab *slab, int order,
650 struct kmem_cache *s, gfp_t gfp)
651{
652 if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
653 memcg_alloc_slab_cgroups(slab, s, gfp, true);
654
655 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
656 PAGE_SIZE << order);
657}
658
659static __always_inline void unaccount_slab(struct slab *slab, int order,
660 struct kmem_cache *s)
661{
662 if (memcg_kmem_enabled())
663 memcg_free_slab_cgroups(slab);
664
665 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
666 -(PAGE_SIZE << order));
667}
668
669static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
670{
671 struct kmem_cache *cachep;
672
673 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
674 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
675 return s;
676
677 cachep = virt_to_cache(x);
678 if (WARN(cachep && cachep != s,
679 "%s: Wrong slab cache. %s but object is from %s\n",
680 __func__, s->name, cachep->name))
681 print_tracking(cachep, x);
682 return cachep;
683}
684#endif
685
686static inline size_t slab_ksize(const struct kmem_cache *s)
687{
688#ifndef CONFIG_SLUB
689 return s->object_size;
690
691#else
692# ifdef CONFIG_SLUB_DEBUG
693
694
695
696
697 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
698 return s->object_size;
699# endif
700 if (s->flags & SLAB_KASAN)
701 return s->object_size;
702
703
704
705
706
707 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
708 return s->inuse;
709
710
711
712 return s->size;
713#endif
714}
715
716static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
717 struct list_lru *lru,
718 struct obj_cgroup **objcgp,
719 size_t size, gfp_t flags)
720{
721 flags &= gfp_allowed_mask;
722
723 might_alloc(flags);
724
725 if (should_failslab(s, flags))
726 return NULL;
727
728 if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
729 return NULL;
730
731 return s;
732}
733
734static inline void slab_post_alloc_hook(struct kmem_cache *s,
735 struct obj_cgroup *objcg, gfp_t flags,
736 size_t size, void **p, bool init)
737{
738 size_t i;
739
740 flags &= gfp_allowed_mask;
741
742
743
744
745
746
747
748
749 for (i = 0; i < size; i++) {
750 p[i] = kasan_slab_alloc(s, p[i], flags, init);
751 if (p[i] && init && !kasan_has_integrated_init())
752 memset(p[i], 0, s->object_size);
753 kmemleak_alloc_recursive(p[i], s->object_size, 1,
754 s->flags, flags);
755 }
756
757 memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
758}
759
760#ifndef CONFIG_SLOB
761
762
763
764struct kmem_cache_node {
765 spinlock_t list_lock;
766
767#ifdef CONFIG_SLAB
768 struct list_head slabs_partial;
769 struct list_head slabs_full;
770 struct list_head slabs_free;
771 unsigned long total_slabs;
772 unsigned long free_slabs;
773 unsigned long free_objects;
774 unsigned int free_limit;
775 unsigned int colour_next;
776 struct array_cache *shared;
777 struct alien_cache **alien;
778 unsigned long next_reap;
779 int free_touched;
780#endif
781
782#ifdef CONFIG_SLUB
783 unsigned long nr_partial;
784 struct list_head partial;
785#ifdef CONFIG_SLUB_DEBUG
786 atomic_long_t nr_slabs;
787 atomic_long_t total_objects;
788 struct list_head full;
789#endif
790#endif
791
792};
793
794static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
795{
796 return s->node[node];
797}
798
799
800
801
802
803#define for_each_kmem_cache_node(__s, __node, __n) \
804 for (__node = 0; __node < nr_node_ids; __node++) \
805 if ((__n = get_node(__s, __node)))
806
807#endif
808
809#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
810void dump_unreclaimable_slab(void);
811#else
812static inline void dump_unreclaimable_slab(void)
813{
814}
815#endif
816
817void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
818
819#ifdef CONFIG_SLAB_FREELIST_RANDOM
820int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
821 gfp_t gfp);
822void cache_random_seq_destroy(struct kmem_cache *cachep);
823#else
824static inline int cache_random_seq_create(struct kmem_cache *cachep,
825 unsigned int count, gfp_t gfp)
826{
827 return 0;
828}
829static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
830#endif
831
832static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
833{
834 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
835 &init_on_alloc)) {
836 if (c->ctor)
837 return false;
838 if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
839 return flags & __GFP_ZERO;
840 return true;
841 }
842 return flags & __GFP_ZERO;
843}
844
845static inline bool slab_want_init_on_free(struct kmem_cache *c)
846{
847 if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
848 &init_on_free))
849 return !(c->ctor ||
850 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
851 return false;
852}
853
854#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
855void debugfs_slab_release(struct kmem_cache *);
856#else
857static inline void debugfs_slab_release(struct kmem_cache *s) { }
858#endif
859
860#ifdef CONFIG_PRINTK
861#define KS_ADDRS_COUNT 16
862struct kmem_obj_info {
863 void *kp_ptr;
864 struct slab *kp_slab;
865 void *kp_objp;
866 unsigned long kp_data_offset;
867 struct kmem_cache *kp_slab_cache;
868 void *kp_ret;
869 void *kp_stack[KS_ADDRS_COUNT];
870 void *kp_free_stack[KS_ADDRS_COUNT];
871};
872void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
873#endif
874
875#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
876void __check_heap_object(const void *ptr, unsigned long n,
877 const struct slab *slab, bool to_user);
878#else
879static inline
880void __check_heap_object(const void *ptr, unsigned long n,
881 const struct slab *slab, bool to_user)
882{
883}
884#endif
885
886#endif
887