linux/include/linux/compiler.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_COMPILER_H
   3#define __LINUX_COMPILER_H
   4
   5#include <linux/compiler_types.h>
   6
   7#ifndef __ASSEMBLY__
   8
   9#ifdef __KERNEL__
  10
  11/*
  12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  13 * to disable branch tracing on a per file basis.
  14 */
  15#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
  16    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
  17void ftrace_likely_update(struct ftrace_likely_data *f, int val,
  18                          int expect, int is_constant);
  19
  20#define likely_notrace(x)       __builtin_expect(!!(x), 1)
  21#define unlikely_notrace(x)     __builtin_expect(!!(x), 0)
  22
  23#define __branch_check__(x, expect, is_constant) ({                     \
  24                        long ______r;                                   \
  25                        static struct ftrace_likely_data                \
  26                                __aligned(4)                            \
  27                                __section("_ftrace_annotated_branch")   \
  28                                ______f = {                             \
  29                                .data.func = __func__,                  \
  30                                .data.file = __FILE__,                  \
  31                                .data.line = __LINE__,                  \
  32                        };                                              \
  33                        ______r = __builtin_expect(!!(x), expect);      \
  34                        ftrace_likely_update(&______f, ______r,         \
  35                                             expect, is_constant);      \
  36                        ______r;                                        \
  37                })
  38
  39/*
  40 * Using __builtin_constant_p(x) to ignore cases where the return
  41 * value is always the same.  This idea is taken from a similar patch
  42 * written by Daniel Walker.
  43 */
  44# ifndef likely
  45#  define likely(x)     (__branch_check__(x, 1, __builtin_constant_p(x)))
  46# endif
  47# ifndef unlikely
  48#  define unlikely(x)   (__branch_check__(x, 0, __builtin_constant_p(x)))
  49# endif
  50
  51#ifdef CONFIG_PROFILE_ALL_BRANCHES
  52/*
  53 * "Define 'is'", Bill Clinton
  54 * "Define 'if'", Steven Rostedt
  55 */
  56#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
  57
  58#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
  59
  60#define __trace_if_value(cond) ({                       \
  61        static struct ftrace_branch_data                \
  62                __aligned(4)                            \
  63                __section("_ftrace_branch")             \
  64                __if_trace = {                          \
  65                        .func = __func__,               \
  66                        .file = __FILE__,               \
  67                        .line = __LINE__,               \
  68                };                                      \
  69        (cond) ?                                        \
  70                (__if_trace.miss_hit[1]++,1) :          \
  71                (__if_trace.miss_hit[0]++,0);           \
  72})
  73
  74#endif /* CONFIG_PROFILE_ALL_BRANCHES */
  75
  76#else
  77# define likely(x)      __builtin_expect(!!(x), 1)
  78# define unlikely(x)    __builtin_expect(!!(x), 0)
  79# define likely_notrace(x)      likely(x)
  80# define unlikely_notrace(x)    unlikely(x)
  81#endif
  82
  83/* Optimization barrier */
  84#ifndef barrier
  85/* The "volatile" is due to gcc bugs */
  86# define barrier() __asm__ __volatile__("": : :"memory")
  87#endif
  88
  89#ifndef barrier_data
  90/*
  91 * This version is i.e. to prevent dead stores elimination on @ptr
  92 * where gcc and llvm may behave differently when otherwise using
  93 * normal barrier(): while gcc behavior gets along with a normal
  94 * barrier(), llvm needs an explicit input variable to be assumed
  95 * clobbered. The issue is as follows: while the inline asm might
  96 * access any memory it wants, the compiler could have fit all of
  97 * @ptr into memory registers instead, and since @ptr never escaped
  98 * from that, it proved that the inline asm wasn't touching any of
  99 * it. This version works well with both compilers, i.e. we're telling
 100 * the compiler that the inline asm absolutely may see the contents
 101 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
 102 */
 103# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
 104#endif
 105
 106/* workaround for GCC PR82365 if needed */
 107#ifndef barrier_before_unreachable
 108# define barrier_before_unreachable() do { } while (0)
 109#endif
 110
 111/* Unreachable code */
 112#ifdef CONFIG_STACK_VALIDATION
 113/*
 114 * These macros help objtool understand GCC code flow for unreachable code.
 115 * The __COUNTER__ based labels are a hack to make each instance of the macros
 116 * unique, to convince GCC not to merge duplicate inline asm statements.
 117 */
 118#define __stringify_label(n) #n
 119
 120#define __annotate_reachable(c) ({                                      \
 121        asm volatile(__stringify_label(c) ":\n\t"                       \
 122                     ".pushsection .discard.reachable\n\t"              \
 123                     ".long " __stringify_label(c) "b - .\n\t"          \
 124                     ".popsection\n\t");                                \
 125})
 126#define annotate_reachable() __annotate_reachable(__COUNTER__)
 127
 128#define __annotate_unreachable(c) ({                                    \
 129        asm volatile(__stringify_label(c) ":\n\t"                       \
 130                     ".pushsection .discard.unreachable\n\t"            \
 131                     ".long " __stringify_label(c) "b - .\n\t"          \
 132                     ".popsection\n\t");                                \
 133})
 134#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
 135
 136#define ASM_UNREACHABLE                                                 \
 137        "999:\n\t"                                                      \
 138        ".pushsection .discard.unreachable\n\t"                         \
 139        ".long 999b - .\n\t"                                            \
 140        ".popsection\n\t"
 141
 142/* Annotate a C jump table to allow objtool to follow the code flow */
 143#define __annotate_jump_table __section(".rodata..c_jump_table")
 144
 145#else
 146#define annotate_reachable()
 147#define annotate_unreachable()
 148#define __annotate_jump_table
 149#endif
 150
 151#ifndef ASM_UNREACHABLE
 152# define ASM_UNREACHABLE
 153#endif
 154#ifndef unreachable
 155# define unreachable() do {             \
 156        annotate_unreachable();         \
 157        __builtin_unreachable();        \
 158} while (0)
 159#endif
 160
 161/*
 162 * KENTRY - kernel entry point
 163 * This can be used to annotate symbols (functions or data) that are used
 164 * without their linker symbol being referenced explicitly. For example,
 165 * interrupt vector handlers, or functions in the kernel image that are found
 166 * programatically.
 167 *
 168 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
 169 * are handled in their own way (with KEEP() in linker scripts).
 170 *
 171 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
 172 * linker script. For example an architecture could KEEP() its entire
 173 * boot/exception vector code rather than annotate each function and data.
 174 */
 175#ifndef KENTRY
 176# define KENTRY(sym)                                            \
 177        extern typeof(sym) sym;                                 \
 178        static const unsigned long __kentry_##sym               \
 179        __used                                                  \
 180        __attribute__((__section__("___kentry+" #sym)))         \
 181        = (unsigned long)&sym;
 182#endif
 183
 184#ifndef RELOC_HIDE
 185# define RELOC_HIDE(ptr, off)                                   \
 186  ({ unsigned long __ptr;                                       \
 187     __ptr = (unsigned long) (ptr);                             \
 188    (typeof(ptr)) (__ptr + (off)); })
 189#endif
 190
 191#define absolute_pointer(val)   RELOC_HIDE((void *)(val), 0)
 192
 193#ifndef OPTIMIZER_HIDE_VAR
 194/* Make the optimizer believe the variable can be manipulated arbitrarily. */
 195#define OPTIMIZER_HIDE_VAR(var)                                         \
 196        __asm__ ("" : "=r" (var) : "0" (var))
 197#endif
 198
 199/* Not-quite-unique ID. */
 200#ifndef __UNIQUE_ID
 201# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 202#endif
 203
 204/**
 205 * data_race - mark an expression as containing intentional data races
 206 *
 207 * This data_race() macro is useful for situations in which data races
 208 * should be forgiven.  One example is diagnostic code that accesses
 209 * shared variables but is not a part of the core synchronization design.
 210 *
 211 * This macro *does not* affect normal code generation, but is a hint
 212 * to tooling that data races here are to be ignored.
 213 */
 214#define data_race(expr)                                                 \
 215({                                                                      \
 216        __unqual_scalar_typeof(({ expr; })) __v = ({                    \
 217                __kcsan_disable_current();                              \
 218                expr;                                                   \
 219        });                                                             \
 220        __kcsan_enable_current();                                       \
 221        __v;                                                            \
 222})
 223
 224/*
 225 * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
 226 * instrumented C code with jump table addresses. Architectures that
 227 * support CFI can define this macro to return the actual function address
 228 * when needed.
 229 */
 230#ifndef function_nocfi
 231#define function_nocfi(x) (x)
 232#endif
 233
 234#endif /* __KERNEL__ */
 235
 236/*
 237 * Force the compiler to emit 'sym' as a symbol, so that we can reference
 238 * it from inline assembler. Necessary in case 'sym' could be inlined
 239 * otherwise, or eliminated entirely due to lack of references that are
 240 * visible to the compiler.
 241 */
 242#define __ADDRESSABLE(sym) \
 243        static void * __section(".discard.addressable") __used \
 244                __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
 245
 246/**
 247 * offset_to_ptr - convert a relative memory offset to an absolute pointer
 248 * @off:        the address of the 32-bit offset value
 249 */
 250static inline void *offset_to_ptr(const int *off)
 251{
 252        return (void *)((unsigned long)off + *off);
 253}
 254
 255#endif /* __ASSEMBLY__ */
 256
 257/* &a[0] degrades to a pointer: a different type from an array */
 258#define __must_be_array(a)      BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 259
 260/*
 261 * This is needed in functions which generate the stack canary, see
 262 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
 263 */
 264#define prevent_tail_call_optimization()        mb()
 265
 266#include <asm/rwonce.h>
 267
 268#endif /* __LINUX_COMPILER_H */
 269