1#include <asm/paravirt.h> 2#include <asm/asm-offsets.h> 3#include <asm/processor.h> 4#include <asm/cpufeature.h> 5#include <linux/stringify.h> 6 7DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); 8DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); 9DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); 10DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); 11DEF_NATIVE(pv_cpu_ops, iret, "iretq"); 12DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); 13DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); 14DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); 15DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); 16DEF_NATIVE(pv_cpu_ops, clts, "clts"); 17DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); 18 19DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit"); 20DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); 21DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); 22DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); 23 24DEF_NATIVE(, mov32, "mov %edi, %eax"); 25DEF_NATIVE(, mov64, "mov %rdi, %rax"); 26 27#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) 28DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); 29DEF_NATIVE(pv_lock_ops, unlock_kick, "nop"); 30#endif 31 32unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) 33{ 34 return paravirt_patch_insns(insnbuf, len, 35 start__mov32, end__mov32); 36} 37 38unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) 39{ 40 return paravirt_patch_insns(insnbuf, len, 41 start__mov64, end__mov64); 42} 43 44extern bool pv_is_native_spin_unlock(void); 45 46unsigned native_patch(u8 type, u16 clobbers, void *ibuf, 47 unsigned long addr, unsigned len) 48{ 49 const unsigned char *start, *end; 50 unsigned ret; 51 52#define PATCH_SITE(ops, x) \ 53 case PARAVIRT_PATCH(ops.x): \ 54 start = start_##ops##_##x; \ 55 end = end_##ops##_##x; \ 56 goto patch_site 57 switch(type) { 58 PATCH_SITE(pv_irq_ops, restore_fl); 59 PATCH_SITE(pv_irq_ops, save_fl); 60 PATCH_SITE(pv_irq_ops, irq_enable); 61 PATCH_SITE(pv_irq_ops, irq_disable); 62 PATCH_SITE(pv_cpu_ops, iret); 63 PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); 64 PATCH_SITE(pv_cpu_ops, usergs_sysret32); 65 PATCH_SITE(pv_cpu_ops, usergs_sysret64); 66 PATCH_SITE(pv_cpu_ops, swapgs); 67 PATCH_SITE(pv_mmu_ops, read_cr2); 68 PATCH_SITE(pv_mmu_ops, read_cr3); 69 PATCH_SITE(pv_mmu_ops, write_cr3); 70 PATCH_SITE(pv_cpu_ops, clts); 71 case PARAVIRT_PATCH(pv_mmu_ops.flush_tlb_single): 72 if (!boot_cpu_has(X86_FEATURE_PCID)) { 73 start = start_pv_mmu_ops_flush_tlb_single; 74 end = end_pv_mmu_ops_flush_tlb_single; 75 goto patch_site; 76 } else { 77 goto patch_default; 78 } 79 PATCH_SITE(pv_cpu_ops, wbinvd); 80#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) 81 case PARAVIRT_PATCH(pv_lock_ops.unlock_kick): 82 if (pv_is_native_spin_unlock()) { 83 start = start_pv_lock_ops_unlock_kick; 84 end = end_pv_lock_ops_unlock_kick; 85 goto patch_site; 86 } else { 87 goto patch_default; 88 } 89 90 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): 91 if (pv_is_native_spin_unlock()) { 92 start = start_pv_lock_ops_queued_spin_unlock; 93 end = end_pv_lock_ops_queued_spin_unlock; 94 goto patch_site; 95 } 96#endif 97 98patch_default: 99 default: 100 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); 101 break; 102 103patch_site: 104 ret = paravirt_patch_insns(ibuf, len, start, end); 105 break; 106 } 107#undef PATCH_SITE 108 return ret; 109} 110