linux/arch/x86/include/asm/virtext.h
<<
>>
Prefs
   1/* CPU virtualization extensions handling
   2 *
   3 * This should carry the code for handling CPU virtualization extensions
   4 * that needs to live in the kernel core.
   5 *
   6 * Author: Eduardo Habkost <ehabkost@redhat.com>
   7 *
   8 * Copyright (C) 2008, Red Hat Inc.
   9 *
  10 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
  11 *
  12 * This work is licensed under the terms of the GNU GPL, version 2.  See
  13 * the COPYING file in the top-level directory.
  14 */
  15#ifndef _ASM_X86_VIRTEX_H
  16#define _ASM_X86_VIRTEX_H
  17
  18#include <asm/processor.h>
  19
  20#include <asm/vmx.h>
  21#include <asm/svm.h>
  22
  23/*
  24 * VMX functions:
  25 */
  26
  27static inline int cpu_has_vmx(void)
  28{
  29        unsigned long ecx = cpuid_ecx(1);
  30        return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
  31}
  32
  33
  34/** Disable VMX on the current CPU
  35 *
  36 * vmxoff causes a undefined-opcode exception if vmxon was not run
  37 * on the CPU previously. Only call this function if you know VMX
  38 * is enabled.
  39 */
  40static inline void cpu_vmxoff(void)
  41{
  42        asm volatile (ASM_VMX_VMXOFF : : : "cc");
  43        write_cr4(read_cr4() & ~X86_CR4_VMXE);
  44}
  45
  46static inline int cpu_vmx_enabled(void)
  47{
  48        return read_cr4() & X86_CR4_VMXE;
  49}
  50
  51/** Disable VMX if it is enabled on the current CPU
  52 *
  53 * You shouldn't call this if cpu_has_vmx() returns 0.
  54 */
  55static inline void __cpu_emergency_vmxoff(void)
  56{
  57        if (cpu_vmx_enabled())
  58                cpu_vmxoff();
  59}
  60
  61/** Disable VMX if it is supported and enabled on the current CPU
  62 */
  63static inline void cpu_emergency_vmxoff(void)
  64{
  65        if (cpu_has_vmx())
  66                __cpu_emergency_vmxoff();
  67}
  68
  69
  70
  71
  72/*
  73 * SVM functions:
  74 */
  75
  76/** Check if the CPU has SVM support
  77 *
  78 * You can use the 'msg' arg to get a message describing the problem,
  79 * if the function returns zero. Simply pass NULL if you are not interested
  80 * on the messages; gcc should take care of not generating code for
  81 * the messages on this case.
  82 */
  83static inline int cpu_has_svm(const char **msg)
  84{
  85        uint32_t eax, ebx, ecx, edx;
  86
  87        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
  88                if (msg)
  89                        *msg = "not amd";
  90                return 0;
  91        }
  92
  93        cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
  94        if (eax < SVM_CPUID_FUNC) {
  95                if (msg)
  96                        *msg = "can't execute cpuid_8000000a";
  97                return 0;
  98        }
  99
 100        cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
 101        if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
 102                if (msg)
 103                        *msg = "svm not available";
 104                return 0;
 105        }
 106        return 1;
 107}
 108
 109
 110/** Disable SVM on the current CPU
 111 *
 112 * You should call this only if cpu_has_svm() returned true.
 113 */
 114static inline void cpu_svm_disable(void)
 115{
 116        uint64_t efer;
 117
 118        wrmsrl(MSR_VM_HSAVE_PA, 0);
 119        rdmsrl(MSR_EFER, efer);
 120        wrmsrl(MSR_EFER, efer & ~EFER_SVME);
 121}
 122
 123/** Makes sure SVM is disabled, if it is supported on the CPU
 124 */
 125static inline void cpu_emergency_svm_disable(void)
 126{
 127        if (cpu_has_svm(NULL))
 128                cpu_svm_disable();
 129}
 130
 131#endif /* _ASM_X86_VIRTEX_H */
 132