linux/include/linux/compiler.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_COMPILER_H
   3#define __LINUX_COMPILER_H
   4
   5#ifndef __ASSEMBLY__
   6
   7#ifdef __CHECKER__
   8# define __user         __attribute__((noderef, address_space(1)))
   9# define __kernel       __attribute__((address_space(0)))
  10# define __safe         __attribute__((safe))
  11# define __force        __attribute__((force))
  12# define __nocast       __attribute__((nocast))
  13# define __iomem        __attribute__((noderef, address_space(2)))
  14# define __must_hold(x) __attribute__((context(x,1,1)))
  15# define __acquires(x)  __attribute__((context(x,0,1)))
  16# define __releases(x)  __attribute__((context(x,1,0)))
  17# define __acquire(x)   __context__(x,1)
  18# define __release(x)   __context__(x,-1)
  19# define __cond_lock(x,c)       ((c) ? ({ __acquire(x); 1; }) : 0)
  20# define __percpu       __attribute__((noderef, address_space(3)))
  21# define __rcu          __attribute__((noderef, address_space(4)))
  22# define __private      __attribute__((noderef))
  23extern void __chk_user_ptr(const volatile void __user *);
  24extern void __chk_io_ptr(const volatile void __iomem *);
  25# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
  26#else /* __CHECKER__ */
  27# ifdef STRUCTLEAK_PLUGIN
  28#  define __user __attribute__((user))
  29# else
  30#  define __user
  31# endif
  32# define __kernel
  33# define __safe
  34# define __force
  35# define __nocast
  36# define __iomem
  37# define __chk_user_ptr(x) (void)0
  38# define __chk_io_ptr(x) (void)0
  39# define __builtin_warning(x, y...) (1)
  40# define __must_hold(x)
  41# define __acquires(x)
  42# define __releases(x)
  43# define __acquire(x) (void)0
  44# define __release(x) (void)0
  45# define __cond_lock(x,c) (c)
  46# define __percpu
  47# define __rcu
  48# define __private
  49# define ACCESS_PRIVATE(p, member) ((p)->member)
  50#endif /* __CHECKER__ */
  51
  52/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
  53#define ___PASTE(a,b) a##b
  54#define __PASTE(a,b) ___PASTE(a,b)
  55
  56#ifdef __KERNEL__
  57
  58#ifdef __GNUC__
  59#include <linux/compiler-gcc.h>
  60#endif
  61
  62#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
  63#define notrace __attribute__((hotpatch(0,0)))
  64#else
  65#define notrace __attribute__((no_instrument_function))
  66#endif
  67
  68/* Intel compiler defines __GNUC__. So we will overwrite implementations
  69 * coming from above header files here
  70 */
  71#ifdef __INTEL_COMPILER
  72# include <linux/compiler-intel.h>
  73#endif
  74
  75/* Clang compiler defines __GNUC__. So we will overwrite implementations
  76 * coming from above header files here
  77 */
  78#ifdef __clang__
  79#include <linux/compiler-clang.h>
  80#endif
  81
  82/*
  83 * Generic compiler-dependent macros required for kernel
  84 * build go below this comment. Actual compiler/compiler version
  85 * specific implementations come from the above header files
  86 */
  87
  88struct ftrace_branch_data {
  89        const char *func;
  90        const char *file;
  91        unsigned line;
  92        union {
  93                struct {
  94                        unsigned long correct;
  95                        unsigned long incorrect;
  96                };
  97                struct {
  98                        unsigned long miss;
  99                        unsigned long hit;
 100                };
 101                unsigned long miss_hit[2];
 102        };
 103};
 104
 105struct ftrace_likely_data {
 106        struct ftrace_branch_data       data;
 107        unsigned long                   constant;
 108};
 109
 110/*
 111 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
 112 * to disable branch tracing on a per file basis.
 113 */
 114#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
 115    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 116void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 117                          int expect, int is_constant);
 118
 119#define likely_notrace(x)       __builtin_expect(!!(x), 1)
 120#define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
 121
 122#define __branch_check__(x, expect, is_constant) ({                     \
 123                        int ______r;                                    \
 124                        static struct ftrace_likely_data                \
 125                                __attribute__((__aligned__(4)))         \
 126                                __attribute__((section("_ftrace_annotated_branch"))) \
 127                                ______f = {                             \
 128                                .data.func = __func__,                  \
 129                                .data.file = __FILE__,                  \
 130                                .data.line = __LINE__,                  \
 131                        };                                              \
 132                        ______r = __builtin_expect(!!(x), expect);      \
 133                        ftrace_likely_update(&______f, ______r,         \
 134                                             expect, is_constant);      \
 135                        ______r;                                        \
 136                })
 137
 138/*
 139 * Using __builtin_constant_p(x) to ignore cases where the return
 140 * value is always the same.  This idea is taken from a similar patch
 141 * written by Daniel Walker.
 142 */
 143# ifndef likely
 144#  define likely(x)     (__branch_check__(x, 1, __builtin_constant_p(x)))
 145# endif
 146# ifndef unlikely
 147#  define unlikely(x)   (__branch_check__(x, 0, __builtin_constant_p(x)))
 148# endif
 149
 150#ifdef CONFIG_PROFILE_ALL_BRANCHES
 151/*
 152 * "Define 'is'", Bill Clinton
 153 * "Define 'if'", Steven Rostedt
 154 */
 155#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
 156#define __trace_if(cond) \
 157        if (__builtin_constant_p(!!(cond)) ? !!(cond) :                 \
 158        ({                                                              \
 159                int ______r;                                            \
 160                static struct ftrace_branch_data                        \
 161                        __attribute__((__aligned__(4)))                 \
 162                        __attribute__((section("_ftrace_branch")))      \
 163                        ______f = {                                     \
 164                                .func = __func__,                       \
 165                                .file = __FILE__,                       \
 166                                .line = __LINE__,                       \
 167                        };                                              \
 168                ______r = !!(cond);                                     \
 169                ______f.miss_hit[______r]++;                                    \
 170                ______r;                                                \
 171        }))
 172#endif /* CONFIG_PROFILE_ALL_BRANCHES */
 173
 174#else
 175# define likely(x)      __builtin_expect(!!(x), 1)
 176# define unlikely(x)    __builtin_expect(!!(x), 0)
 177#endif
 178
 179/* Optimization barrier */
 180#ifndef barrier
 181# define barrier() __memory_barrier()
 182#endif
 183
 184#ifndef barrier_data
 185# define barrier_data(ptr) barrier()
 186#endif
 187
 188/* Unreachable code */
 189#ifdef CONFIG_STACK_VALIDATION
 190#define annotate_reachable() ({                                         \
 191        asm("%c0:\n\t"                                                  \
 192            ".pushsection .discard.reachable\n\t"                       \
 193            ".long %c0b - .\n\t"                                        \
 194            ".popsection\n\t" : : "i" (__COUNTER__));                   \
 195})
 196#define annotate_unreachable() ({                                       \
 197        asm("%c0:\n\t"                                                  \
 198            ".pushsection .discard.unreachable\n\t"                     \
 199            ".long %c0b - .\n\t"                                        \
 200            ".popsection\n\t" : : "i" (__COUNTER__));                   \
 201})
 202#define ASM_UNREACHABLE                                                 \
 203        "999:\n\t"                                                      \
 204        ".pushsection .discard.unreachable\n\t"                         \
 205        ".long 999b - .\n\t"                                            \
 206        ".popsection\n\t"
 207#else
 208#define annotate_reachable()
 209#define annotate_unreachable()
 210#endif
 211
 212#ifndef ASM_UNREACHABLE
 213# define ASM_UNREACHABLE
 214#endif
 215#ifndef unreachable
 216# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
 217#endif
 218
 219/*
 220 * KENTRY - kernel entry point
 221 * This can be used to annotate symbols (functions or data) that are used
 222 * without their linker symbol being referenced explicitly. For example,
 223 * interrupt vector handlers, or functions in the kernel image that are found
 224 * programatically.
 225 *
 226 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
 227 * are handled in their own way (with KEEP() in linker scripts).
 228 *
 229 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
 230 * linker script. For example an architecture could KEEP() its entire
 231 * boot/exception vector code rather than annotate each function and data.
 232 */
 233#ifndef KENTRY
 234# define KENTRY(sym)                                            \
 235        extern typeof(sym) sym;                                 \
 236        static const unsigned long __kentry_##sym               \
 237        __used                                                  \
 238        __attribute__((section("___kentry" "+" #sym ), used))   \
 239        = (unsigned long)&sym;
 240#endif
 241
 242#ifndef RELOC_HIDE
 243# define RELOC_HIDE(ptr, off)                                   \
 244  ({ unsigned long __ptr;                                       \
 245     __ptr = (unsigned long) (ptr);                             \
 246    (typeof(ptr)) (__ptr + (off)); })
 247#endif
 248
 249#ifndef OPTIMIZER_HIDE_VAR
 250#define OPTIMIZER_HIDE_VAR(var) barrier()
 251#endif
 252
 253/* Not-quite-unique ID. */
 254#ifndef __UNIQUE_ID
 255# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 256#endif
 257
 258#include <uapi/linux/types.h>
 259
 260#define __READ_ONCE_SIZE                                                \
 261({                                                                      \
 262        switch (size) {                                                 \
 263        case 1: *(__u8 *)res = *(volatile __u8 *)p; break;              \
 264        case 2: *(__u16 *)res = *(volatile __u16 *)p; break;            \
 265        case 4: *(__u32 *)res = *(volatile __u32 *)p; break;            \
 266        case 8: *(__u64 *)res = *(volatile __u64 *)p; break;            \
 267        default:                                                        \
 268                barrier();                                              \
 269                __builtin_memcpy((void *)res, (const void *)p, size);   \
 270                barrier();                                              \
 271        }                                                               \
 272})
 273
 274static __always_inline
 275void __read_once_size(const volatile void *p, void *res, int size)
 276{
 277        __READ_ONCE_SIZE;
 278}
 279
 280#ifdef CONFIG_KASAN
 281/*
 282 * This function is not 'inline' because __no_sanitize_address confilcts
 283 * with inlining. Attempt to inline it may cause a build failure.
 284 *      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
 285 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
 286 */
 287static __no_sanitize_address __maybe_unused
 288void __read_once_size_nocheck(const volatile void *p, void *res, int size)
 289{
 290        __READ_ONCE_SIZE;
 291}
 292#else
 293static __always_inline
 294void __read_once_size_nocheck(const volatile void *p, void *res, int size)
 295{
 296        __READ_ONCE_SIZE;
 297}
 298#endif
 299
 300static __always_inline void __write_once_size(volatile void *p, void *res, int size)
 301{
 302        switch (size) {
 303        case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
 304        case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
 305        case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
 306        case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
 307        default:
 308                barrier();
 309                __builtin_memcpy((void *)p, (const void *)res, size);
 310                barrier();
 311        }
 312}
 313
 314/*
 315 * Prevent the compiler from merging or refetching reads or writes. The
 316 * compiler is also forbidden from reordering successive instances of
 317 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
 318 * compiler is aware of some particular ordering.  One way to make the
 319 * compiler aware of ordering is to put the two invocations of READ_ONCE,
 320 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
 321 *
 322 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
 323 * data types like structs or unions. If the size of the accessed data
 324 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
 325 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
 326 * least two memcpy()s: one for the __builtin_memcpy() and then one for
 327 * the macro doing the copy of variable - '__u' allocated on the stack.
 328 *
 329 * Their two major use cases are: (1) Mediating communication between
 330 * process-level code and irq/NMI handlers, all running on the same CPU,
 331 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
 332 * mutilate accesses that either do not require ordering or that interact
 333 * with an explicit memory barrier or atomic instruction that provides the
 334 * required ordering.
 335 */
 336
 337#define __READ_ONCE(x, check)                                           \
 338({                                                                      \
 339        union { typeof(x) __val; char __c[1]; } __u;                    \
 340        if (check)                                                      \
 341                __read_once_size(&(x), __u.__c, sizeof(x));             \
 342        else                                                            \
 343                __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
 344        __u.__val;                                                      \
 345})
 346#define READ_ONCE(x) __READ_ONCE(x, 1)
 347
 348/*
 349 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
 350 * to hide memory access from KASAN.
 351 */
 352#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
 353
 354#define WRITE_ONCE(x, val) \
 355({                                                      \
 356        union { typeof(x) __val; char __c[1]; } __u =   \
 357                { .__val = (__force typeof(x)) (val) }; \
 358        __write_once_size(&(x), __u.__c, sizeof(x));    \
 359        __u.__val;                                      \
 360})
 361
 362#endif /* __KERNEL__ */
 363
 364#endif /* __ASSEMBLY__ */
 365
 366#ifdef __KERNEL__
 367/*
 368 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
 369 * warning for each use, in hopes of speeding the functions removal.
 370 * Usage is:
 371 *              int __deprecated foo(void)
 372 */
 373#ifndef __deprecated
 374# define __deprecated           /* unimplemented */
 375#endif
 376
 377#ifdef MODULE
 378#define __deprecated_for_modules __deprecated
 379#else
 380#define __deprecated_for_modules
 381#endif
 382
 383#ifndef __must_check
 384#define __must_check
 385#endif
 386
 387#ifndef CONFIG_ENABLE_MUST_CHECK
 388#undef __must_check
 389#define __must_check
 390#endif
 391#ifndef CONFIG_ENABLE_WARN_DEPRECATED
 392#undef __deprecated
 393#undef __deprecated_for_modules
 394#define __deprecated
 395#define __deprecated_for_modules
 396#endif
 397
 398#ifndef __malloc
 399#define __malloc
 400#endif
 401
 402/*
 403 * Allow us to avoid 'defined but not used' warnings on functions and data,
 404 * as well as force them to be emitted to the assembly file.
 405 *
 406 * As of gcc 3.4, static functions that are not marked with attribute((used))
 407 * may be elided from the assembly file.  As of gcc 3.4, static data not so
 408 * marked will not be elided, but this may change in a future gcc version.
 409 *
 410 * NOTE: Because distributions shipped with a backported unit-at-a-time
 411 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
 412 * for gcc >=3.3 instead of 3.4.
 413 *
 414 * In prior versions of gcc, such functions and data would be emitted, but
 415 * would be warned about except with attribute((unused)).
 416 *
 417 * Mark functions that are referenced only in inline assembly as __used so
 418 * the code is emitted even though it appears to be unreferenced.
 419 */
 420#ifndef __used
 421# define __used                 /* unimplemented */
 422#endif
 423
 424#ifndef __maybe_unused
 425# define __maybe_unused         /* unimplemented */
 426#endif
 427
 428#ifndef __always_unused
 429# define __always_unused        /* unimplemented */
 430#endif
 431
 432#ifndef noinline
 433#define noinline
 434#endif
 435
 436/*
 437 * Rather then using noinline to prevent stack consumption, use
 438 * noinline_for_stack instead.  For documentation reasons.
 439 */
 440#define noinline_for_stack noinline
 441
 442#ifndef __always_inline
 443#define __always_inline inline
 444#endif
 445
 446#endif /* __KERNEL__ */
 447
 448/*
 449 * From the GCC manual:
 450 *
 451 * Many functions do not examine any values except their arguments,
 452 * and have no effects except the return value.  Basically this is
 453 * just slightly more strict class than the `pure' attribute above,
 454 * since function is not allowed to read global memory.
 455 *
 456 * Note that a function that has pointer arguments and examines the
 457 * data pointed to must _not_ be declared `const'.  Likewise, a
 458 * function that calls a non-`const' function usually must not be
 459 * `const'.  It does not make sense for a `const' function to return
 460 * `void'.
 461 */
 462#ifndef __attribute_const__
 463# define __attribute_const__    /* unimplemented */
 464#endif
 465
 466#ifndef __designated_init
 467# define __designated_init
 468#endif
 469
 470#ifndef __latent_entropy
 471# define __latent_entropy
 472#endif
 473
 474#ifndef __randomize_layout
 475# define __randomize_layout __designated_init
 476#endif
 477
 478#ifndef __no_randomize_layout
 479# define __no_randomize_layout
 480#endif
 481
 482#ifndef randomized_struct_fields_start
 483# define randomized_struct_fields_start
 484# define randomized_struct_fields_end
 485#endif
 486
 487/*
 488 * Tell gcc if a function is cold. The compiler will assume any path
 489 * directly leading to the call is unlikely.
 490 */
 491
 492#ifndef __cold
 493#define __cold
 494#endif
 495
 496/* Simple shorthand for a section definition */
 497#ifndef __section
 498# define __section(S) __attribute__ ((__section__(#S)))
 499#endif
 500
 501#ifndef __visible
 502#define __visible
 503#endif
 504
 505#ifndef __nostackprotector
 506# define __nostackprotector
 507#endif
 508
 509/*
 510 * Assume alignment of return value.
 511 */
 512#ifndef __assume_aligned
 513#define __assume_aligned(a, ...)
 514#endif
 515
 516
 517/* Are two types/vars the same type (ignoring qualifiers)? */
 518#ifndef __same_type
 519# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 520#endif
 521
 522/* Is this type a native word size -- useful for atomic operations */
 523#ifndef __native_word
 524# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 525#endif
 526
 527/* Compile time object size, -1 for unknown */
 528#ifndef __compiletime_object_size
 529# define __compiletime_object_size(obj) -1
 530#endif
 531#ifndef __compiletime_warning
 532# define __compiletime_warning(message)
 533#endif
 534#ifndef __compiletime_error
 535# define __compiletime_error(message)
 536/*
 537 * Sparse complains of variable sized arrays due to the temporary variable in
 538 * __compiletime_assert. Unfortunately we can't just expand it out to make
 539 * sparse see a constant array size without breaking compiletime_assert on old
 540 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
 541 */
 542# ifndef __CHECKER__
 543#  define __compiletime_error_fallback(condition) \
 544        do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
 545# endif
 546#endif
 547#ifndef __compiletime_error_fallback
 548# define __compiletime_error_fallback(condition) do { } while (0)
 549#endif
 550
 551#ifdef __OPTIMIZE__
 552# define __compiletime_assert(condition, msg, prefix, suffix)           \
 553        do {                                                            \
 554                bool __cond = !(condition);                             \
 555                extern void prefix ## suffix(void) __compiletime_error(msg); \
 556                if (__cond)                                             \
 557                        prefix ## suffix();                             \
 558                __compiletime_error_fallback(__cond);                   \
 559        } while (0)
 560#else
 561# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
 562#endif
 563
 564#define _compiletime_assert(condition, msg, prefix, suffix) \
 565        __compiletime_assert(condition, msg, prefix, suffix)
 566
 567/**
 568 * compiletime_assert - break build and emit msg if condition is false
 569 * @condition: a compile-time constant condition to check
 570 * @msg:       a message to emit if condition is false
 571 *
 572 * In tradition of POSIX assert, this macro will break the build if the
 573 * supplied condition is *false*, emitting the supplied error message if the
 574 * compiler has support to do so.
 575 */
 576#define compiletime_assert(condition, msg) \
 577        _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 578
 579#define compiletime_assert_atomic_type(t)                               \
 580        compiletime_assert(__native_word(t),                            \
 581                "Need native word sized stores/loads for atomicity.")
 582
 583/*
 584 * Prevent the compiler from merging or refetching accesses.  The compiler
 585 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
 586 * but only when the compiler is aware of some particular ordering.  One way
 587 * to make the compiler aware of ordering is to put the two invocations of
 588 * ACCESS_ONCE() in different C statements.
 589 *
 590 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
 591 * on a union member will work as long as the size of the member matches the
 592 * size of the union and the size is smaller than word size.
 593 *
 594 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
 595 * between process-level code and irq/NMI handlers, all running on the same CPU,
 596 * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
 597 * mutilate accesses that either do not require ordering or that interact
 598 * with an explicit memory barrier or atomic instruction that provides the
 599 * required ordering.
 600 *
 601 * If possible use READ_ONCE()/WRITE_ONCE() instead.
 602 */
 603#define __ACCESS_ONCE(x) ({ \
 604         __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
 605        (volatile typeof(x) *)&(x); })
 606#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 607
 608/**
 609 * lockless_dereference() - safely load a pointer for later dereference
 610 * @p: The pointer to load
 611 *
 612 * Similar to rcu_dereference(), but for situations where the pointed-to
 613 * object's lifetime is managed by something other than RCU.  That
 614 * "something other" might be reference counting or simple immortality.
 615 *
 616 * The seemingly unused variable ___typecheck_p validates that @p is
 617 * indeed a pointer type by using a pointer to typeof(*p) as the type.
 618 * Taking a pointer to typeof(*p) again is needed in case p is void *.
 619 */
 620#define lockless_dereference(p) \
 621({ \
 622        typeof(p) _________p1 = READ_ONCE(p); \
 623        typeof(*(p)) *___typecheck_p __maybe_unused; \
 624        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
 625        (_________p1); \
 626})
 627
 628#endif /* __LINUX_COMPILER_H */
 629