linux/arch/x86/kernel/paravirt_patch_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <asm/paravirt.h>
   3
   4DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
   5DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
   6DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
   7DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
   8DEF_NATIVE(pv_cpu_ops, iret, "iret");
   9DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
  10DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
  11DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
  12
  13#if defined(CONFIG_PARAVIRT_SPINLOCKS)
  14DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
  15DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
  16#endif
  17
  18unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
  19{
  20        /* arg in %eax, return in %eax */
  21        return 0;
  22}
  23
  24unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
  25{
  26        /* arg in %edx:%eax, return in %edx:%eax */
  27        return 0;
  28}
  29
  30extern bool pv_is_native_spin_unlock(void);
  31extern bool pv_is_native_vcpu_is_preempted(void);
  32
  33unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
  34                      unsigned long addr, unsigned len)
  35{
  36        const unsigned char *start, *end;
  37        unsigned ret;
  38
  39#define PATCH_SITE(ops, x)                                      \
  40                case PARAVIRT_PATCH(ops.x):                     \
  41                        start = start_##ops##_##x;              \
  42                        end = end_##ops##_##x;                  \
  43                        goto patch_site
  44        switch (type) {
  45                PATCH_SITE(pv_irq_ops, irq_disable);
  46                PATCH_SITE(pv_irq_ops, irq_enable);
  47                PATCH_SITE(pv_irq_ops, restore_fl);
  48                PATCH_SITE(pv_irq_ops, save_fl);
  49                PATCH_SITE(pv_cpu_ops, iret);
  50                PATCH_SITE(pv_mmu_ops, read_cr2);
  51                PATCH_SITE(pv_mmu_ops, read_cr3);
  52                PATCH_SITE(pv_mmu_ops, write_cr3);
  53#if defined(CONFIG_PARAVIRT_SPINLOCKS)
  54                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
  55                        if (pv_is_native_spin_unlock()) {
  56                                start = start_pv_lock_ops_queued_spin_unlock;
  57                                end   = end_pv_lock_ops_queued_spin_unlock;
  58                                goto patch_site;
  59                        }
  60                        goto patch_default;
  61
  62                case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
  63                        if (pv_is_native_vcpu_is_preempted()) {
  64                                start = start_pv_lock_ops_vcpu_is_preempted;
  65                                end   = end_pv_lock_ops_vcpu_is_preempted;
  66                                goto patch_site;
  67                        }
  68                        goto patch_default;
  69#endif
  70
  71        default:
  72patch_default: __maybe_unused
  73                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
  74                break;
  75
  76patch_site:
  77                ret = paravirt_patch_insns(ibuf, len, start, end);
  78                break;
  79        }
  80#undef PATCH_SITE
  81        return ret;
  82}
  83