linux/arch/x86/include/asm/percpu.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PERCPU_H
   2#define _ASM_X86_PERCPU_H
   3
   4#ifdef CONFIG_X86_64
   5#define __percpu_seg            gs
   6#define __percpu_mov_op         movq
   7#else
   8#define __percpu_seg            fs
   9#define __percpu_mov_op         movl
  10#endif
  11
  12#ifdef __ASSEMBLY__
  13
  14/*
  15 * PER_CPU finds an address of a per-cpu variable.
  16 *
  17 * Args:
  18 *    var - variable name
  19 *    reg - 32bit register
  20 *
  21 * The resulting address is stored in the "reg" argument.
  22 *
  23 * Example:
  24 *    PER_CPU(cpu_gdt_descr, %ebx)
  25 */
  26#ifdef CONFIG_SMP
  27#define PER_CPU(var, reg)                                               \
  28        __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;       \
  29        lea per_cpu__##var(reg), reg
  30#define PER_CPU_VAR(var)        %__percpu_seg:per_cpu__##var
  31#else /* ! SMP */
  32#define PER_CPU(var, reg)                                               \
  33        __percpu_mov_op $per_cpu__##var, reg
  34#define PER_CPU_VAR(var)        per_cpu__##var
  35#endif  /* SMP */
  36
  37#ifdef CONFIG_X86_64_SMP
  38#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
  39#else
  40#define INIT_PER_CPU_VAR(var)  per_cpu__##var
  41#endif
  42
  43#else /* ...!ASSEMBLY */
  44
  45#include <linux/kernel.h>
  46#include <linux/stringify.h>
  47
  48#ifdef CONFIG_SMP
  49#define __percpu_arg(x)         "%%"__stringify(__percpu_seg)":%P" #x
  50#define __my_cpu_offset         percpu_read(this_cpu_off)
  51#else
  52#define __percpu_arg(x)         "%P" #x
  53#endif
  54
  55/*
  56 * Initialized pointers to per-cpu variables needed for the boot
  57 * processor need to use these macros to get the proper address
  58 * offset from __per_cpu_load on SMP.
  59 *
  60 * There also must be an entry in vmlinux_64.lds.S
  61 */
  62#define DECLARE_INIT_PER_CPU(var) \
  63       extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
  64
  65#ifdef CONFIG_X86_64_SMP
  66#define init_per_cpu_var(var)  init_per_cpu__##var
  67#else
  68#define init_per_cpu_var(var)  per_cpu_var(var)
  69#endif
  70
  71/* For arch-specific code, we can use direct single-insn ops (they
  72 * don't give an lvalue though). */
  73extern void __bad_percpu_size(void);
  74
  75#define percpu_to_op(op, var, val)                      \
  76do {                                                    \
  77        typedef typeof(var) T__;                        \
  78        if (0) {                                        \
  79                T__ tmp__;                              \
  80                tmp__ = (val);                          \
  81        }                                               \
  82        switch (sizeof(var)) {                          \
  83        case 1:                                         \
  84                asm(op "b %1,"__percpu_arg(0)           \
  85                    : "+m" (var)                        \
  86                    : "qi" ((T__)(val)));               \
  87                break;                                  \
  88        case 2:                                         \
  89                asm(op "w %1,"__percpu_arg(0)           \
  90                    : "+m" (var)                        \
  91                    : "ri" ((T__)(val)));               \
  92                break;                                  \
  93        case 4:                                         \
  94                asm(op "l %1,"__percpu_arg(0)           \
  95                    : "+m" (var)                        \
  96                    : "ri" ((T__)(val)));               \
  97                break;                                  \
  98        case 8:                                         \
  99                asm(op "q %1,"__percpu_arg(0)           \
 100                    : "+m" (var)                        \
 101                    : "re" ((T__)(val)));               \
 102                break;                                  \
 103        default: __bad_percpu_size();                   \
 104        }                                               \
 105} while (0)
 106
 107#define percpu_from_op(op, var, constraint)             \
 108({                                                      \
 109        typeof(var) ret__;                              \
 110        switch (sizeof(var)) {                          \
 111        case 1:                                         \
 112                asm(op "b "__percpu_arg(1)",%0"         \
 113                    : "=q" (ret__)                      \
 114                    : constraint);                      \
 115                break;                                  \
 116        case 2:                                         \
 117                asm(op "w "__percpu_arg(1)",%0"         \
 118                    : "=r" (ret__)                      \
 119                    : constraint);                      \
 120                break;                                  \
 121        case 4:                                         \
 122                asm(op "l "__percpu_arg(1)",%0"         \
 123                    : "=r" (ret__)                      \
 124                    : constraint);                      \
 125                break;                                  \
 126        case 8:                                         \
 127                asm(op "q "__percpu_arg(1)",%0"         \
 128                    : "=r" (ret__)                      \
 129                    : constraint);                      \
 130                break;                                  \
 131        default: __bad_percpu_size();                   \
 132        }                                               \
 133        ret__;                                          \
 134})
 135
 136/*
 137 * percpu_read() makes gcc load the percpu variable every time it is
 138 * accessed while percpu_read_stable() allows the value to be cached.
 139 * percpu_read_stable() is more efficient and can be used if its value
 140 * is guaranteed to be valid across cpus.  The current users include
 141 * get_current() and get_thread_info() both of which are actually
 142 * per-thread variables implemented as per-cpu variables and thus
 143 * stable for the duration of the respective task.
 144 */
 145#define percpu_read(var)        percpu_from_op("mov", per_cpu__##var,   \
 146                                               "m" (per_cpu__##var))
 147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var,   \
 148                                               "p" (&per_cpu__##var))
 149#define percpu_write(var, val)  percpu_to_op("mov", per_cpu__##var, val)
 150#define percpu_add(var, val)    percpu_to_op("add", per_cpu__##var, val)
 151#define percpu_sub(var, val)    percpu_to_op("sub", per_cpu__##var, val)
 152#define percpu_and(var, val)    percpu_to_op("and", per_cpu__##var, val)
 153#define percpu_or(var, val)     percpu_to_op("or", per_cpu__##var, val)
 154#define percpu_xor(var, val)    percpu_to_op("xor", per_cpu__##var, val)
 155
 156/* This is not atomic against other CPUs -- CPU preemption needs to be off */
 157#define x86_test_and_clear_bit_percpu(bit, var)                         \
 158({                                                                      \
 159        int old__;                                                      \
 160        asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
 161                     : "=r" (old__), "+m" (per_cpu__##var)              \
 162                     : "dIr" (bit));                                    \
 163        old__;                                                          \
 164})
 165
 166#include <asm-generic/percpu.h>
 167
 168/* We can use this directly for local CPU (faster). */
 169DECLARE_PER_CPU(unsigned long, this_cpu_off);
 170
 171#endif /* !__ASSEMBLY__ */
 172
 173#ifdef CONFIG_SMP
 174
 175/*
 176 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
 177 * variables that are initialized and accessed before there are per_cpu
 178 * areas allocated.
 179 */
 180
 181#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)                  \
 182        DEFINE_PER_CPU(_type, _name) = _initvalue;                      \
 183        __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =       \
 184                                { [0 ... NR_CPUS-1] = _initvalue };     \
 185        __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
 186
 187#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
 188        EXPORT_PER_CPU_SYMBOL(_name)
 189
 190#define DECLARE_EARLY_PER_CPU(_type, _name)                     \
 191        DECLARE_PER_CPU(_type, _name);                          \
 192        extern __typeof__(_type) *_name##_early_ptr;            \
 193        extern __typeof__(_type)  _name##_early_map[]
 194
 195#define early_per_cpu_ptr(_name) (_name##_early_ptr)
 196#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
 197#define early_per_cpu(_name, _cpu)                              \
 198        *(early_per_cpu_ptr(_name) ?                            \
 199                &early_per_cpu_ptr(_name)[_cpu] :               \
 200                &per_cpu(_name, _cpu))
 201
 202#else   /* !CONFIG_SMP */
 203#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
 204        DEFINE_PER_CPU(_type, _name) = _initvalue
 205
 206#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
 207        EXPORT_PER_CPU_SYMBOL(_name)
 208
 209#define DECLARE_EARLY_PER_CPU(_type, _name)                     \
 210        DECLARE_PER_CPU(_type, _name)
 211
 212#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
 213#define early_per_cpu_ptr(_name) NULL
 214/* no early_per_cpu_map() */
 215
 216#endif  /* !CONFIG_SMP */
 217
 218#endif /* _ASM_X86_PERCPU_H */
 219