linux/include/linux/kcsan-checks.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * KCSAN access checks and modifiers. These can be used to explicitly check
   4 * uninstrumented accesses, or change KCSAN checking behaviour of accesses.
   5 *
   6 * Copyright (C) 2019, Google LLC.
   7 */
   8
   9#ifndef _LINUX_KCSAN_CHECKS_H
  10#define _LINUX_KCSAN_CHECKS_H
  11
  12/* Note: Only include what is already included by compiler.h. */
  13#include <linux/compiler_attributes.h>
  14#include <linux/types.h>
  15
  16/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
  17#define KCSAN_ACCESS_WRITE      (1 << 0) /* Access is a write. */
  18#define KCSAN_ACCESS_COMPOUND   (1 << 1) /* Compounded read-write instrumentation. */
  19#define KCSAN_ACCESS_ATOMIC     (1 << 2) /* Access is atomic. */
  20/* The following are special, and never due to compiler instrumentation. */
  21#define KCSAN_ACCESS_ASSERT     (1 << 3) /* Access is an assertion. */
  22#define KCSAN_ACCESS_SCOPED     (1 << 4) /* Access is a scoped access. */
  23
  24/*
  25 * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
  26 * even in compilation units that selectively disable KCSAN, but must use KCSAN
  27 * to validate access to an address. Never use these in header files!
  28 */
  29#ifdef CONFIG_KCSAN
  30/**
  31 * __kcsan_check_access - check generic access for races
  32 *
  33 * @ptr: address of access
  34 * @size: size of access
  35 * @type: access type modifier
  36 */
  37void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
  38
  39/**
  40 * kcsan_disable_current - disable KCSAN for the current context
  41 *
  42 * Supports nesting.
  43 */
  44void kcsan_disable_current(void);
  45
  46/**
  47 * kcsan_enable_current - re-enable KCSAN for the current context
  48 *
  49 * Supports nesting.
  50 */
  51void kcsan_enable_current(void);
  52void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
  53
  54/**
  55 * kcsan_nestable_atomic_begin - begin nestable atomic region
  56 *
  57 * Accesses within the atomic region may appear to race with other accesses but
  58 * should be considered atomic.
  59 */
  60void kcsan_nestable_atomic_begin(void);
  61
  62/**
  63 * kcsan_nestable_atomic_end - end nestable atomic region
  64 */
  65void kcsan_nestable_atomic_end(void);
  66
  67/**
  68 * kcsan_flat_atomic_begin - begin flat atomic region
  69 *
  70 * Accesses within the atomic region may appear to race with other accesses but
  71 * should be considered atomic.
  72 */
  73void kcsan_flat_atomic_begin(void);
  74
  75/**
  76 * kcsan_flat_atomic_end - end flat atomic region
  77 */
  78void kcsan_flat_atomic_end(void);
  79
  80/**
  81 * kcsan_atomic_next - consider following accesses as atomic
  82 *
  83 * Force treating the next n memory accesses for the current context as atomic
  84 * operations.
  85 *
  86 * @n: number of following memory accesses to treat as atomic.
  87 */
  88void kcsan_atomic_next(int n);
  89
  90/**
  91 * kcsan_set_access_mask - set access mask
  92 *
  93 * Set the access mask for all accesses for the current context if non-zero.
  94 * Only value changes to bits set in the mask will be reported.
  95 *
  96 * @mask: bitmask
  97 */
  98void kcsan_set_access_mask(unsigned long mask);
  99
 100/* Scoped access information. */
 101struct kcsan_scoped_access {
 102        struct list_head list;
 103        const volatile void *ptr;
 104        size_t size;
 105        int type;
 106};
 107/*
 108 * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
 109 * out of scope; relies on attribute "cleanup", which is supported by all
 110 * compilers that support KCSAN.
 111 */
 112#define __kcsan_cleanup_scoped                                                 \
 113        __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
 114
 115/**
 116 * kcsan_begin_scoped_access - begin scoped access
 117 *
 118 * Begin scoped access and initialize @sa, which will cause KCSAN to
 119 * continuously check the memory range in the current thread until
 120 * kcsan_end_scoped_access() is called for @sa.
 121 *
 122 * Scoped accesses are implemented by appending @sa to an internal list for the
 123 * current execution context, and then checked on every call into the KCSAN
 124 * runtime.
 125 *
 126 * @ptr: address of access
 127 * @size: size of access
 128 * @type: access type modifier
 129 * @sa: struct kcsan_scoped_access to use for the scope of the access
 130 */
 131struct kcsan_scoped_access *
 132kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
 133                          struct kcsan_scoped_access *sa);
 134
 135/**
 136 * kcsan_end_scoped_access - end scoped access
 137 *
 138 * End a scoped access, which will stop KCSAN checking the memory range.
 139 * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
 140 *
 141 * @sa: a previously initialized struct kcsan_scoped_access
 142 */
 143void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
 144
 145
 146#else /* CONFIG_KCSAN */
 147
 148static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
 149                                        int type) { }
 150
 151static inline void kcsan_disable_current(void)          { }
 152static inline void kcsan_enable_current(void)           { }
 153static inline void kcsan_enable_current_nowarn(void)    { }
 154static inline void kcsan_nestable_atomic_begin(void)    { }
 155static inline void kcsan_nestable_atomic_end(void)      { }
 156static inline void kcsan_flat_atomic_begin(void)        { }
 157static inline void kcsan_flat_atomic_end(void)          { }
 158static inline void kcsan_atomic_next(int n)             { }
 159static inline void kcsan_set_access_mask(unsigned long mask) { }
 160
 161struct kcsan_scoped_access { };
 162#define __kcsan_cleanup_scoped __maybe_unused
 163static inline struct kcsan_scoped_access *
 164kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
 165                          struct kcsan_scoped_access *sa) { return sa; }
 166static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
 167
 168#endif /* CONFIG_KCSAN */
 169
 170#ifdef __SANITIZE_THREAD__
 171/*
 172 * Only calls into the runtime when the particular compilation unit has KCSAN
 173 * instrumentation enabled. May be used in header files.
 174 */
 175#define kcsan_check_access __kcsan_check_access
 176
 177/*
 178 * Only use these to disable KCSAN for accesses in the current compilation unit;
 179 * calls into libraries may still perform KCSAN checks.
 180 */
 181#define __kcsan_disable_current kcsan_disable_current
 182#define __kcsan_enable_current kcsan_enable_current_nowarn
 183#else
 184static inline void kcsan_check_access(const volatile void *ptr, size_t size,
 185                                      int type) { }
 186static inline void __kcsan_enable_current(void)  { }
 187static inline void __kcsan_disable_current(void) { }
 188#endif
 189
 190/**
 191 * __kcsan_check_read - check regular read access for races
 192 *
 193 * @ptr: address of access
 194 * @size: size of access
 195 */
 196#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
 197
 198/**
 199 * __kcsan_check_write - check regular write access for races
 200 *
 201 * @ptr: address of access
 202 * @size: size of access
 203 */
 204#define __kcsan_check_write(ptr, size)                                         \
 205        __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
 206
 207/**
 208 * __kcsan_check_read_write - check regular read-write access for races
 209 *
 210 * @ptr: address of access
 211 * @size: size of access
 212 */
 213#define __kcsan_check_read_write(ptr, size)                                    \
 214        __kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
 215
 216/**
 217 * kcsan_check_read - check regular read access for races
 218 *
 219 * @ptr: address of access
 220 * @size: size of access
 221 */
 222#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
 223
 224/**
 225 * kcsan_check_write - check regular write access for races
 226 *
 227 * @ptr: address of access
 228 * @size: size of access
 229 */
 230#define kcsan_check_write(ptr, size)                                           \
 231        kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
 232
 233/**
 234 * kcsan_check_read_write - check regular read-write access for races
 235 *
 236 * @ptr: address of access
 237 * @size: size of access
 238 */
 239#define kcsan_check_read_write(ptr, size)                                      \
 240        kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
 241
 242/*
 243 * Check for atomic accesses: if atomic accesses are not ignored, this simply
 244 * aliases to kcsan_check_access(), otherwise becomes a no-op.
 245 */
 246#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
 247#define kcsan_check_atomic_read(...)            do { } while (0)
 248#define kcsan_check_atomic_write(...)           do { } while (0)
 249#define kcsan_check_atomic_read_write(...)      do { } while (0)
 250#else
 251#define kcsan_check_atomic_read(ptr, size)                                     \
 252        kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
 253#define kcsan_check_atomic_write(ptr, size)                                    \
 254        kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
 255#define kcsan_check_atomic_read_write(ptr, size)                               \
 256        kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
 257#endif
 258
 259/**
 260 * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
 261 *
 262 * Assert that there are no concurrent writes to @var; other readers are
 263 * allowed. This assertion can be used to specify properties of concurrent code,
 264 * where violation cannot be detected as a normal data race.
 265 *
 266 * For example, if we only have a single writer, but multiple concurrent
 267 * readers, to avoid data races, all these accesses must be marked; even
 268 * concurrent marked writes racing with the single writer are bugs.
 269 * Unfortunately, due to being marked, they are no longer data races. For cases
 270 * like these, we can use the macro as follows:
 271 *
 272 * .. code-block:: c
 273 *
 274 *      void writer(void) {
 275 *              spin_lock(&update_foo_lock);
 276 *              ASSERT_EXCLUSIVE_WRITER(shared_foo);
 277 *              WRITE_ONCE(shared_foo, ...);
 278 *              spin_unlock(&update_foo_lock);
 279 *      }
 280 *      void reader(void) {
 281 *              // update_foo_lock does not need to be held!
 282 *              ... = READ_ONCE(shared_foo);
 283 *      }
 284 *
 285 * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
 286 * checking if a clear scope where no concurrent writes are expected exists.
 287 *
 288 * @var: variable to assert on
 289 */
 290#define ASSERT_EXCLUSIVE_WRITER(var)                                           \
 291        __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
 292
 293/*
 294 * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
 295 * expected to be unique for the scope in which instances of kcsan_scoped_access
 296 * are declared.
 297 */
 298#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
 299#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id)                               \
 300        struct kcsan_scoped_access __kcsan_scoped_name(id, _)                  \
 301                __kcsan_cleanup_scoped;                                        \
 302        struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p)          \
 303                __maybe_unused = kcsan_begin_scoped_access(                    \
 304                        &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type),     \
 305                        &__kcsan_scoped_name(id, _))
 306
 307/**
 308 * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
 309 *
 310 * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
 311 *
 312 * Assert that there are no concurrent writes to @var for the duration of the
 313 * scope in which it is introduced. This provides a better way to fully cover
 314 * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
 315 * increases the likelihood for KCSAN to detect racing accesses.
 316 *
 317 * For example, it allows finding race-condition bugs that only occur due to
 318 * state changes within the scope itself:
 319 *
 320 * .. code-block:: c
 321 *
 322 *      void writer(void) {
 323 *              spin_lock(&update_foo_lock);
 324 *              {
 325 *                      ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
 326 *                      WRITE_ONCE(shared_foo, 42);
 327 *                      ...
 328 *                      // shared_foo should still be 42 here!
 329 *              }
 330 *              spin_unlock(&update_foo_lock);
 331 *      }
 332 *      void buggy(void) {
 333 *              if (READ_ONCE(shared_foo) == 42)
 334 *                      WRITE_ONCE(shared_foo, 1); // bug!
 335 *      }
 336 *
 337 * @var: variable to assert on
 338 */
 339#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var)                                    \
 340        __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
 341
 342/**
 343 * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
 344 *
 345 * Assert that there are no concurrent accesses to @var (no readers nor
 346 * writers). This assertion can be used to specify properties of concurrent
 347 * code, where violation cannot be detected as a normal data race.
 348 *
 349 * For example, where exclusive access is expected after determining no other
 350 * users of an object are left, but the object is not actually freed. We can
 351 * check that this property actually holds as follows:
 352 *
 353 * .. code-block:: c
 354 *
 355 *      if (refcount_dec_and_test(&obj->refcnt)) {
 356 *              ASSERT_EXCLUSIVE_ACCESS(*obj);
 357 *              do_some_cleanup(obj);
 358 *              release_for_reuse(obj);
 359 *      }
 360 *
 361 * Note:
 362 *
 363 * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
 364 *    checking if a clear scope where no concurrent accesses are expected exists.
 365 *
 366 * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better
 367 *    fit to detect use-after-free bugs.
 368 *
 369 * @var: variable to assert on
 370 */
 371#define ASSERT_EXCLUSIVE_ACCESS(var)                                           \
 372        __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
 373
 374/**
 375 * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
 376 *
 377 * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
 378 *
 379 * Assert that there are no concurrent accesses to @var (no readers nor writers)
 380 * for the entire duration of the scope in which it is introduced. This provides
 381 * a better way to fully cover the enclosing scope, compared to multiple
 382 * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
 383 * racing accesses.
 384 *
 385 * @var: variable to assert on
 386 */
 387#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var)                                    \
 388        __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
 389
 390/**
 391 * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
 392 *
 393 * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
 394 *
 395 * Assert that there are no concurrent writes to a subset of bits in @var;
 396 * concurrent readers are permitted. This assertion captures more detailed
 397 * bit-level properties, compared to the other (word granularity) assertions.
 398 * Only the bits set in @mask are checked for concurrent modifications, while
 399 * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
 400 * are ignored.
 401 *
 402 * Use this for variables, where some bits must not be modified concurrently,
 403 * yet other bits are expected to be modified concurrently.
 404 *
 405 * For example, variables where, after initialization, some bits are read-only,
 406 * but other bits may still be modified concurrently. A reader may wish to
 407 * assert that this is true as follows:
 408 *
 409 * .. code-block:: c
 410 *
 411 *      ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
 412 *      foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
 413 *
 414 * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
 415 * to access the masked bits only, and KCSAN optimistically assumes it is
 416 * therefore safe, even in the presence of data races, and marking it with
 417 * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
 418 * it may still be advisable to do so, since we cannot reason about all compiler
 419 * optimizations when it comes to bit manipulations (on the reader and writer
 420 * side). If you are sure nothing can go wrong, we can write the above simply
 421 * as:
 422 *
 423 * .. code-block:: c
 424 *
 425 *      ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
 426 *      foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
 427 *
 428 * Another example, where this may be used, is when certain bits of @var may
 429 * only be modified when holding the appropriate lock, but other bits may still
 430 * be modified concurrently. Writers, where other bits may change concurrently,
 431 * could use the assertion as follows:
 432 *
 433 * .. code-block:: c
 434 *
 435 *      spin_lock(&foo_lock);
 436 *      ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
 437 *      old_flags = flags;
 438 *      new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
 439 *      if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
 440 *      spin_unlock(&foo_lock);
 441 *
 442 * @var: variable to assert on
 443 * @mask: only check for modifications to bits set in @mask
 444 */
 445#define ASSERT_EXCLUSIVE_BITS(var, mask)                                       \
 446        do {                                                                   \
 447                kcsan_set_access_mask(mask);                                   \
 448                __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
 449                kcsan_set_access_mask(0);                                      \
 450                kcsan_atomic_next(1);                                          \
 451        } while (0)
 452
 453#endif /* _LINUX_KCSAN_CHECKS_H */
 454