linux/arch/x86/include/asm/alternative.h
<<
>>
Prefs
   1#ifndef _ASM_X86_ALTERNATIVE_H
   2#define _ASM_X86_ALTERNATIVE_H
   3
   4#include <linux/types.h>
   5#include <linux/stddef.h>
   6#include <linux/stringify.h>
   7#include <asm/asm.h>
   8
   9/*
  10 * Alternative inline assembly for SMP.
  11 *
  12 * The LOCK_PREFIX macro defined here replaces the LOCK and
  13 * LOCK_PREFIX macros used everywhere in the source tree.
  14 *
  15 * SMP alternatives use the same data structures as the other
  16 * alternatives and the X86_FEATURE_UP flag to indicate the case of a
  17 * UP system running a SMP kernel.  The existing apply_alternatives()
  18 * works fine for patching a SMP kernel for UP.
  19 *
  20 * The SMP alternative tables can be kept after boot and contain both
  21 * UP and SMP versions of the instructions to allow switching back to
  22 * SMP at runtime, when hotplugging in a new CPU, which is especially
  23 * useful in virtualized environments.
  24 *
  25 * The very common lock prefix is handled as special case in a
  26 * separate table which is a pure address list without replacement ptr
  27 * and size information.  That keeps the table sizes small.
  28 */
  29
  30#ifdef CONFIG_SMP
  31#define LOCK_PREFIX \
  32                ".section .smp_locks,\"a\"\n"   \
  33                _ASM_ALIGN "\n"                 \
  34                _ASM_PTR "661f\n" /* address */ \
  35                ".previous\n"                   \
  36                "661:\n\tlock; "
  37
  38#else /* ! CONFIG_SMP */
  39#define LOCK_PREFIX ""
  40#endif
  41
  42/* This must be included *after* the definition of LOCK_PREFIX */
  43#include <asm/cpufeature.h>
  44
  45struct alt_instr {
  46        u8 *instr;              /* original instruction */
  47        u8 *replacement;
  48        u8  cpuid;              /* cpuid bit set for replacement */
  49        u8  instrlen;           /* length of original instruction */
  50        u8  replacementlen;     /* length of new instruction, <= instrlen */
  51        u8  pad1;
  52#ifdef CONFIG_X86_64
  53        u32 pad2;
  54#endif
  55};
  56
  57extern void alternative_instructions(void);
  58extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
  59
  60struct module;
  61
  62#ifdef CONFIG_SMP
  63extern void alternatives_smp_module_add(struct module *mod, char *name,
  64                                        void *locks, void *locks_end,
  65                                        void *text, void *text_end);
  66extern void alternatives_smp_module_del(struct module *mod);
  67extern void alternatives_smp_switch(int smp);
  68#else
  69static inline void alternatives_smp_module_add(struct module *mod, char *name,
  70                                               void *locks, void *locks_end,
  71                                               void *text, void *text_end) {}
  72static inline void alternatives_smp_module_del(struct module *mod) {}
  73static inline void alternatives_smp_switch(int smp) {}
  74#endif  /* CONFIG_SMP */
  75
  76/* alternative assembly primitive: */
  77#define ALTERNATIVE(oldinstr, newinstr, feature)                        \
  78                                                                        \
  79      "661:\n\t" oldinstr "\n662:\n"                                    \
  80      ".section .altinstructions,\"a\"\n"                               \
  81      _ASM_ALIGN "\n"                                                   \
  82      _ASM_PTR "661b\n"                         /* label           */   \
  83      _ASM_PTR "663f\n"                         /* new instruction */   \
  84      "  .byte " __stringify(feature) "\n"      /* feature bit     */   \
  85      "  .byte 662b-661b\n"                     /* sourcelen       */   \
  86      "  .byte 664f-663f\n"                     /* replacementlen  */   \
  87      ".previous\n"                                                     \
  88      ".section .altinstr_replacement, \"ax\"\n"                        \
  89      "663:\n\t" newinstr "\n664:\n"            /* replacement     */   \
  90      ".previous"
  91
  92/*
  93 * Alternative instructions for different CPU types or capabilities.
  94 *
  95 * This allows to use optimized instructions even on generic binary
  96 * kernels.
  97 *
  98 * length of oldinstr must be longer or equal the length of newinstr
  99 * It can be padded with nops as needed.
 100 *
 101 * For non barrier like inlines please define new variants
 102 * without volatile and memory clobber.
 103 */
 104#define alternative(oldinstr, newinstr, feature)                        \
 105        asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
 106
 107/*
 108 * Alternative inline assembly with input.
 109 *
 110 * Pecularities:
 111 * No memory clobber here.
 112 * Argument numbers start with 1.
 113 * Best is to use constraints that are fixed size (like (%1) ... "r")
 114 * If you use variable sized constraints like "m" or "g" in the
 115 * replacement make sure to pad to the worst case length.
 116 * Leaving an unused argument 0 to keep API compatibility.
 117 */
 118#define alternative_input(oldinstr, newinstr, feature, input...)        \
 119        asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)          \
 120                : : "i" (0), ## input)
 121
 122/* Like alternative_input, but with a single output argument */
 123#define alternative_io(oldinstr, newinstr, feature, output, input...)   \
 124        asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)          \
 125                : output : "i" (0), ## input)
 126
 127/*
 128 * use this macro(s) if you need more than one output parameter
 129 * in alternative_io
 130 */
 131#define ASM_OUTPUT2(a, b) a, b
 132
 133struct paravirt_patch_site;
 134#ifdef CONFIG_PARAVIRT
 135void apply_paravirt(struct paravirt_patch_site *start,
 136                    struct paravirt_patch_site *end);
 137#else
 138static inline void apply_paravirt(struct paravirt_patch_site *start,
 139                                  struct paravirt_patch_site *end)
 140{}
 141#define __parainstructions      NULL
 142#define __parainstructions_end  NULL
 143#endif
 144
 145/*
 146 * Clear and restore the kernel write-protection flag on the local CPU.
 147 * Allows the kernel to edit read-only pages.
 148 * Side-effect: any interrupt handler running between save and restore will have
 149 * the ability to write to read-only pages.
 150 *
 151 * Warning:
 152 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
 153 * no thread can be preempted in the instructions being modified (no iret to an
 154 * invalid instruction possible) or if the instructions are changed from a
 155 * consistent state to another consistent state atomically.
 156 * More care must be taken when modifying code in the SMP case because of
 157 * Intel's errata.
 158 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
 159 * inconsistent instruction while you patch.
 160 */
 161extern void *text_poke(void *addr, const void *opcode, size_t len);
 162
 163#endif /* _ASM_X86_ALTERNATIVE_H */
 164