linux/include/linux/compiler.h
<<
>>
Prefs
   1#ifndef __LINUX_COMPILER_H
   2#define __LINUX_COMPILER_H
   3
   4#ifndef __ASSEMBLY__
   5
   6#ifdef __CHECKER__
   7# define __user         __attribute__((noderef, address_space(1)))
   8# define __kernel       __attribute__((address_space(0)))
   9# define __safe         __attribute__((safe))
  10# define __force        __attribute__((force))
  11# define __nocast       __attribute__((nocast))
  12# define __iomem        __attribute__((noderef, address_space(2)))
  13# define __must_hold(x) __attribute__((context(x,1,1)))
  14# define __acquires(x)  __attribute__((context(x,0,1)))
  15# define __releases(x)  __attribute__((context(x,1,0)))
  16# define __acquire(x)   __context__(x,1)
  17# define __release(x)   __context__(x,-1)
  18# define __cond_lock(x,c)       ((c) ? ({ __acquire(x); 1; }) : 0)
  19# define __percpu       __attribute__((noderef, address_space(3)))
  20#ifdef CONFIG_SPARSE_RCU_POINTER
  21# define __rcu          __attribute__((noderef, address_space(4)))
  22#else /* CONFIG_SPARSE_RCU_POINTER */
  23# define __rcu
  24#endif /* CONFIG_SPARSE_RCU_POINTER */
  25# define __private      __attribute__((noderef))
  26extern void __chk_user_ptr(const volatile void __user *);
  27extern void __chk_io_ptr(const volatile void __iomem *);
  28# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
  29#else /* __CHECKER__ */
  30# define __user
  31# define __kernel
  32# define __safe
  33# define __force
  34# define __nocast
  35# define __iomem
  36# define __chk_user_ptr(x) (void)0
  37# define __chk_io_ptr(x) (void)0
  38# define __builtin_warning(x, y...) (1)
  39# define __must_hold(x)
  40# define __acquires(x)
  41# define __releases(x)
  42# define __acquire(x) (void)0
  43# define __release(x) (void)0
  44# define __cond_lock(x,c) (c)
  45# define __percpu
  46# define __rcu
  47# define __private
  48# define ACCESS_PRIVATE(p, member) ((p)->member)
  49#endif /* __CHECKER__ */
  50
  51/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
  52#define ___PASTE(a,b) a##b
  53#define __PASTE(a,b) ___PASTE(a,b)
  54
  55#ifdef __KERNEL__
  56
  57#ifdef __GNUC__
  58#include <linux/compiler-gcc.h>
  59#endif
  60
  61#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
  62#define notrace __attribute__((hotpatch(0,0)))
  63#else
  64#define notrace __attribute__((no_instrument_function))
  65#endif
  66
  67/* Intel compiler defines __GNUC__. So we will overwrite implementations
  68 * coming from above header files here
  69 */
  70#ifdef __INTEL_COMPILER
  71# include <linux/compiler-intel.h>
  72#endif
  73
  74/* Clang compiler defines __GNUC__. So we will overwrite implementations
  75 * coming from above header files here
  76 */
  77#ifdef __clang__
  78#include <linux/compiler-clang.h>
  79#endif
  80
  81/*
  82 * Generic compiler-dependent macros required for kernel
  83 * build go below this comment. Actual compiler/compiler version
  84 * specific implementations come from the above header files
  85 */
  86
  87struct ftrace_branch_data {
  88        const char *func;
  89        const char *file;
  90        unsigned line;
  91        union {
  92                struct {
  93                        unsigned long correct;
  94                        unsigned long incorrect;
  95                };
  96                struct {
  97                        unsigned long miss;
  98                        unsigned long hit;
  99                };
 100                unsigned long miss_hit[2];
 101        };
 102};
 103
 104/*
 105 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 106 * to disable branch tracing on a per file basis.
 107 */
 108#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
 109    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 110void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 111
 112#define likely_notrace(x)       __builtin_expect(!!(x), 1)
 113#define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
 114
 115#define __branch_check__(x, expect) ({                                  \
 116                        int ______r;                                    \
 117                        static struct ftrace_branch_data                \
 118                                __attribute__((__aligned__(4)))         \
 119                                __attribute__((section("_ftrace_annotated_branch"))) \
 120                                ______f = {                             \
 121                                .func = __func__,                       \
 122                                .file = __FILE__,                       \
 123                                .line = __LINE__,                       \
 124                        };                                              \
 125                        ______r = likely_notrace(x);                    \
 126                        ftrace_likely_update(&______f, ______r, expect); \
 127                        ______r;                                        \
 128                })
 129
 130/*
 131 * Using __builtin_constant_p(x) to ignore cases where the return
 132 * value is always the same.  This idea is taken from a similar patch
 133 * written by Daniel Walker.
 134 */
 135# ifndef likely
 136#  define likely(x)     (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
 137# endif
 138# ifndef unlikely
 139#  define unlikely(x)   (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
 140# endif
 141
 142#ifdef CONFIG_PROFILE_ALL_BRANCHES
 143/*
 144 * "Define 'is'", Bill Clinton
 145 * "Define 'if'", Steven Rostedt
 146 */
 147#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
 148#define __trace_if(cond) \
 149        if (__builtin_constant_p(!!(cond)) ? !!(cond) :                 \
 150        ({                                                              \
 151                int ______r;                                            \
 152                static struct ftrace_branch_data                        \
 153                        __attribute__((__aligned__(4)))                 \
 154                        __attribute__((section("_ftrace_branch")))      \
 155                        ______f = {                                     \
 156                                .func = __func__,                       \
 157                                .file = __FILE__,                       \
 158                                .line = __LINE__,                       \
 159                        };                                              \
 160                ______r = !!(cond);                                     \
 161                ______f.miss_hit[______r]++;                                    \
 162                ______r;                                                \
 163        }))
 164#endif /* CONFIG_PROFILE_ALL_BRANCHES */
 165
 166#else
 167# define likely(x)      __builtin_expect(!!(x), 1)
 168# define unlikely(x)    __builtin_expect(!!(x), 0)
 169#endif
 170
 171/* Optimization barrier */
 172#ifndef barrier
 173# define barrier() __memory_barrier()
 174#endif
 175
 176#ifndef barrier_data
 177# define barrier_data(ptr) barrier()
 178#endif
 179
 180/* Unreachable code */
 181#ifndef unreachable
 182# define unreachable() do { } while (1)
 183#endif
 184
 185#ifndef RELOC_HIDE
 186# define RELOC_HIDE(ptr, off)                                   \
 187  ({ unsigned long __ptr;                                       \
 188     __ptr = (unsigned long) (ptr);                             \
 189    (typeof(ptr)) (__ptr + (off)); })
 190#endif
 191
 192#ifndef OPTIMIZER_HIDE_VAR
 193#define OPTIMIZER_HIDE_VAR(var) barrier()
 194#endif
 195
 196/* Not-quite-unique ID. */
 197#ifndef __UNIQUE_ID
 198# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 199#endif
 200
 201#include <uapi/linux/types.h>
 202
 203#define __READ_ONCE_SIZE                                                \
 204({                                                                      \
 205        switch (size) {                                                 \
 206        case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
 207        case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
 208        case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
 209        case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
 210        default:                                                        \
 211                barrier();                                              \
 212                __builtin_memcpy((void *)res, (const void *)p, size);   \
 213                barrier();                                              \
 214        }                                                               \
 215})
 216
 217static __always_inline
 218void __read_once_size(const volatile void *p, void *res, int size)
 219{
 220        __READ_ONCE_SIZE;
 221}
 222
 223#ifdef CONFIG_KASAN
 224/*
 225 * This function is not 'inline' because __no_sanitize_address confilcts
 226 * with inlining. Attempt to inline it may cause a build failure.
 227 *      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
 228 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
 229 */
 230static __no_sanitize_address __maybe_unused
 231void __read_once_size_nocheck(const volatile void *p, void *res, int size)
 232{
 233        __READ_ONCE_SIZE;
 234}
 235#else
 236static __always_inline
 237void __read_once_size_nocheck(const volatile void *p, void *res, int size)
 238{
 239        __READ_ONCE_SIZE;
 240}
 241#endif
 242
 243static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 244{
 245        switch (size) {
 246        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
 247        case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
 248        case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
 249        case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
 250        default:
 251                barrier();
 252                __builtin_memcpy((void *)p, (const void *)res, size);
 253                barrier();
 254        }
 255}
 256
 257/*
 258 * Prevent the compiler from merging or refetching reads or writes. The
 259 * compiler is also forbidden from reordering successive instances of
 260 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
 261 * compiler is aware of some particular ordering.  One way to make the
 262 * compiler aware of ordering is to put the two invocations of READ_ONCE,
 263 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
 264 *
 265 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
 266 * data types like structs or unions. If the size of the accessed data
 267 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
 268 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
 269 * least two memcpy()s: one for the __builtin_memcpy() and then one for
 270 * the macro doing the copy of variable - '__u' allocated on the stack.
 271 *
 272 * Their two major use cases are: (1) Mediating communication between
 273 * process-level code and irq/NMI handlers, all running on the same CPU,
 274 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
 275 * mutilate accesses that either do not require ordering or that interact
 276 * with an explicit memory barrier or atomic instruction that provides the
 277 * required ordering.
 278 */
 279
 280#define __READ_ONCE(x, check)                                           \
 281({                                                                      \
 282        union { typeof(x) __val; char __c[1]; } __u;                    \
 283        if (check)                                                      \
 284                __read_once_size(&(x), __u.__c, sizeof(x));             \
 285        else                                                            \
 286                __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
 287        __u.__val;                                                      \
 288})
 289#define READ_ONCE(x) __READ_ONCE(x, 1)
 290
 291/*
 292 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
 293 * to hide memory access from KASAN.
 294 */
 295#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
 296
 297#define WRITE_ONCE(x, val) \
 298({                                                      \
 299        union { typeof(x) __val; char __c[1]; } __u =   \
 300                { .__val = (__force typeof(x)) (val) }; \
 301        __write_once_size(&(x), __u.__c, sizeof(x));    \
 302        __u.__val;                                      \
 303})
 304
 305#endif /* __KERNEL__ */
 306
 307#endif /* __ASSEMBLY__ */
 308
 309#ifdef __KERNEL__
 310/*
 311 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
 312 * warning for each use, in hopes of speeding the functions removal.
 313 * Usage is:
 314 *              int __deprecated foo(void)
 315 */
 316#ifndef __deprecated
 317# define __deprecated           /* unimplemented */
 318#endif
 319
 320#ifdef MODULE
 321#define __deprecated_for_modules __deprecated
 322#else
 323#define __deprecated_for_modules
 324#endif
 325
 326#ifndef __must_check
 327#define __must_check
 328#endif
 329
 330#ifndef CONFIG_ENABLE_MUST_CHECK
 331#undef __must_check
 332#define __must_check
 333#endif
 334#ifndef CONFIG_ENABLE_WARN_DEPRECATED
 335#undef __deprecated
 336#undef __deprecated_for_modules
 337#define __deprecated
 338#define __deprecated_for_modules
 339#endif
 340
 341#ifndef __malloc
 342#define __malloc
 343#endif
 344
 345/*
 346 * Allow us to avoid 'defined but not used' warnings on functions and data,
 347 * as well as force them to be emitted to the assembly file.
 348 *
 349 * As of gcc 3.4, static functions that are not marked with attribute((used))
 350 * may be elided from the assembly file.  As of gcc 3.4, static data not so
 351 * marked will not be elided, but this may change in a future gcc version.
 352 *
 353 * NOTE: Because distributions shipped with a backported unit-at-a-time
 354 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
 355 * for gcc >=3.3 instead of 3.4.
 356 *
 357 * In prior versions of gcc, such functions and data would be emitted, but
 358 * would be warned about except with attribute((unused)).
 359 *
 360 * Mark functions that are referenced only in inline assembly as __used so
 361 * the code is emitted even though it appears to be unreferenced.
 362 */
 363#ifndef __used
 364# define __used                 /* unimplemented */
 365#endif
 366
 367#ifndef __maybe_unused
 368# define __maybe_unused         /* unimplemented */
 369#endif
 370
 371#ifndef __always_unused
 372# define __always_unused        /* unimplemented */
 373#endif
 374
 375#ifndef noinline
 376#define noinline
 377#endif
 378
 379/*
 380 * Rather then using noinline to prevent stack consumption, use
 381 * noinline_for_stack instead.  For documentation reasons.
 382 */
 383#define noinline_for_stack noinline
 384
 385#ifndef __always_inline
 386#define __always_inline inline
 387#endif
 388
 389#endif /* __KERNEL__ */
 390
 391/*
 392 * From the GCC manual:
 393 *
 394 * Many functions do not examine any values except their arguments,
 395 * and have no effects except the return value.  Basically this is
 396 * just slightly more strict class than the `pure' attribute above,
 397 * since function is not allowed to read global memory.
 398 *
 399 * Note that a function that has pointer arguments and examines the
 400 * data pointed to must _not_ be declared `const'.  Likewise, a
 401 * function that calls a non-`const' function usually must not be
 402 * `const'.  It does not make sense for a `const' function to return
 403 * `void'.
 404 */
 405#ifndef __attribute_const__
 406# define __attribute_const__    /* unimplemented */
 407#endif
 408
 409/*
 410 * Tell gcc if a function is cold. The compiler will assume any path
 411 * directly leading to the call is unlikely.
 412 */
 413
 414#ifndef __cold
 415#define __cold
 416#endif
 417
 418/* Simple shorthand for a section definition */
 419#ifndef __section
 420# define __section(S) __attribute__ ((__section__(#S)))
 421#endif
 422
 423#ifndef __visible
 424#define __visible
 425#endif
 426
 427/*
 428 * Assume alignment of return value.
 429 */
 430#ifndef __assume_aligned
 431#define __assume_aligned(a, ...)
 432#endif
 433
 434
 435/* Are two types/vars the same type (ignoring qualifiers)? */
 436#ifndef __same_type
 437# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 438#endif
 439
 440/* Is this type a native word size -- useful for atomic operations */
 441#ifndef __native_word
 442# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 443#endif
 444
 445/* Compile time object size, -1 for unknown */
 446#ifndef __compiletime_object_size
 447# define __compiletime_object_size(obj) -1
 448#endif
 449#ifndef __compiletime_warning
 450# define __compiletime_warning(message)
 451#endif
 452#ifndef __compiletime_error
 453# define __compiletime_error(message)
 454/*
 455 * Sparse complains of variable sized arrays due to the temporary variable in
 456 * __compiletime_assert. Unfortunately we can't just expand it out to make
 457 * sparse see a constant array size without breaking compiletime_assert on old
 458 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
 459 */
 460# ifndef __CHECKER__
 461#  define __compiletime_error_fallback(condition) \
 462        do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
 463# endif
 464#endif
 465#ifndef __compiletime_error_fallback
 466# define __compiletime_error_fallback(condition) do { } while (0)
 467#endif
 468
 469#define __compiletime_assert(condition, msg, prefix, suffix)            \
 470        do {                                                            \
 471                bool __cond = !(condition);                             \
 472                extern void prefix ## suffix(void) __compiletime_error(msg); \
 473                if (__cond)                                             \
 474                        prefix ## suffix();                             \
 475                __compiletime_error_fallback(__cond);                   \
 476        } while (0)
 477
 478#define _compiletime_assert(condition, msg, prefix, suffix) \
 479        __compiletime_assert(condition, msg, prefix, suffix)
 480
 481/**
 482 * compiletime_assert - break build and emit msg if condition is false
 483 * @condition: a compile-time constant condition to check
 484 * @msg:       a message to emit if condition is false
 485 *
 486 * In tradition of POSIX assert, this macro will break the build if the
 487 * supplied condition is *false*, emitting the supplied error message if the
 488 * compiler has support to do so.
 489 */
 490#define compiletime_assert(condition, msg) \
 491        _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 492
 493#define compiletime_assert_atomic_type(t)                               \
 494        compiletime_assert(__native_word(t),                            \
 495                "Need native word sized stores/loads for atomicity.")
 496
 497/*
 498 * Prevent the compiler from merging or refetching accesses.  The compiler
 499 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
 500 * but only when the compiler is aware of some particular ordering.  One way
 501 * to make the compiler aware of ordering is to put the two invocations of
 502 * ACCESS_ONCE() in different C statements.
 503 *
 504 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
 505 * on a union member will work as long as the size of the member matches the
 506 * size of the union and the size is smaller than word size.
 507 *
 508 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
 509 * between process-level code and irq/NMI handlers, all running on the same CPU,
 510 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
 511 * mutilate accesses that either do not require ordering or that interact
 512 * with an explicit memory barrier or atomic instruction that provides the
 513 * required ordering.
 514 *
 515 * If possible use READ_ONCE()/WRITE_ONCE() instead.
 516 */
 517#define __ACCESS_ONCE(x) ({ \
 518         __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
 519        (volatile typeof(x) *)&(x); })
 520#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 521
 522/**
 523 * lockless_dereference() - safely load a pointer for later dereference
 524 * @p: The pointer to load
 525 *
 526 * Similar to rcu_dereference(), but for situations where the pointed-to
 527 * object's lifetime is managed by something other than RCU.  That
 528 * "something other" might be reference counting or simple immortality.
 529 *
 530 * The seemingly unused variable ___typecheck_p validates that @p is
 531 * indeed a pointer type by using a pointer to typeof(*p) as the type.
 532 * Taking a pointer to typeof(*p) again is needed in case p is void *.
 533 */
 534#define lockless_dereference(p) \
 535({ \
 536        typeof(p) _________p1 = READ_ONCE(p); \
 537        typeof(*(p)) *___typecheck_p __maybe_unused; \
 538        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
 539        (_________p1); \
 540})
 541
 542/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
 543#ifdef CONFIG_KPROBES
 544# define __kprobes      __attribute__((__section__(".kprobes.text")))
 545# define nokprobe_inline        __always_inline
 546#else
 547# define __kprobes
 548# define nokprobe_inline        inline
 549#endif
 550#endif /* __LINUX_COMPILER_H */
 551