linux/include/linux/kmemcheck.h
<<
>>
Prefs
   1#ifndef LINUX_KMEMCHECK_H
   2#define LINUX_KMEMCHECK_H
   3
   4#include <linux/mm_types.h>
   5#include <linux/types.h>
   6
   7#ifdef CONFIG_KMEMCHECK
   8extern int kmemcheck_enabled;
   9
  10/* The slab-related functions. */
  11void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
  12void kmemcheck_free_shadow(struct page *page, int order);
  13void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  14                          size_t size);
  15void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
  16
  17void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
  18                               gfp_t gfpflags);
  19
  20void kmemcheck_show_pages(struct page *p, unsigned int n);
  21void kmemcheck_hide_pages(struct page *p, unsigned int n);
  22
  23bool kmemcheck_page_is_tracked(struct page *p);
  24
  25void kmemcheck_mark_unallocated(void *address, unsigned int n);
  26void kmemcheck_mark_uninitialized(void *address, unsigned int n);
  27void kmemcheck_mark_initialized(void *address, unsigned int n);
  28void kmemcheck_mark_freed(void *address, unsigned int n);
  29
  30void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
  31void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
  32void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
  33
  34int kmemcheck_show_addr(unsigned long address);
  35int kmemcheck_hide_addr(unsigned long address);
  36
  37bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
  38
  39/*
  40 * Bitfield annotations
  41 *
  42 * How to use: If you have a struct using bitfields, for example
  43 *
  44 *     struct a {
  45 *             int x:8, y:8;
  46 *     };
  47 *
  48 * then this should be rewritten as
  49 *
  50 *     struct a {
  51 *             kmemcheck_bitfield_begin(flags);
  52 *             int x:8, y:8;
  53 *             kmemcheck_bitfield_end(flags);
  54 *     };
  55 *
  56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
  57 * beginning and end, respectively, of the bitfield (and things like
  58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
  59 * fields should be annotated:
  60 *
  61 *     struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
  62 *     kmemcheck_annotate_bitfield(a, flags);
  63 */
  64#define kmemcheck_bitfield_begin(name)  \
  65        int name##_begin[0];
  66
  67#define kmemcheck_bitfield_end(name)    \
  68        int name##_end[0];
  69
  70#define kmemcheck_annotate_bitfield(ptr, name)                          \
  71        do {                                                            \
  72                int _n;                                                 \
  73                                                                        \
  74                if (!ptr)                                               \
  75                        break;                                          \
  76                                                                        \
  77                _n = (long) &((ptr)->name##_end)                        \
  78                        - (long) &((ptr)->name##_begin);                \
  79                BUILD_BUG_ON(_n < 0);                                   \
  80                                                                        \
  81                kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
  82        } while (0)
  83
  84#define kmemcheck_annotate_variable(var)                                \
  85        do {                                                            \
  86                kmemcheck_mark_initialized(&(var), sizeof(var));        \
  87        } while (0)                                                     \
  88
  89#else
  90#define kmemcheck_enabled 0
  91
  92static inline void
  93kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  94{
  95}
  96
  97static inline void
  98kmemcheck_free_shadow(struct page *page, int order)
  99{
 100}
 101
 102static inline void
 103kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
 104                     size_t size)
 105{
 106}
 107
 108static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
 109                                       size_t size)
 110{
 111}
 112
 113static inline void kmemcheck_pagealloc_alloc(struct page *p,
 114        unsigned int order, gfp_t gfpflags)
 115{
 116}
 117
 118static inline bool kmemcheck_page_is_tracked(struct page *p)
 119{
 120        return false;
 121}
 122
 123static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
 124{
 125}
 126
 127static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
 128{
 129}
 130
 131static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
 132{
 133}
 134
 135static inline void kmemcheck_mark_freed(void *address, unsigned int n)
 136{
 137}
 138
 139static inline void kmemcheck_mark_unallocated_pages(struct page *p,
 140                                                    unsigned int n)
 141{
 142}
 143
 144static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
 145                                                      unsigned int n)
 146{
 147}
 148
 149static inline void kmemcheck_mark_initialized_pages(struct page *p,
 150                                                    unsigned int n)
 151{
 152}
 153
 154static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
 155{
 156        return true;
 157}
 158
 159#define kmemcheck_bitfield_begin(name)
 160#define kmemcheck_bitfield_end(name)
 161#define kmemcheck_annotate_bitfield(ptr, name)  \
 162        do {                                    \
 163        } while (0)
 164
 165#define kmemcheck_annotate_variable(var)        \
 166        do {                                    \
 167        } while (0)
 168
 169#endif /* CONFIG_KMEMCHECK */
 170
 171#endif /* LINUX_KMEMCHECK_H */
 172