qemu/target/i386/hvf/x86.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Veertu Inc,
   3 * Copyright (C) 2017 Google Inc,
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU Lesser General Public
   7 * License as published by the Free Software Foundation; either
   8 * version 2.1 of the License, or (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * Lesser General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU Lesser General Public
  16 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20
  21#include "cpu.h"
  22#include "qemu-common.h"
  23#include "x86_decode.h"
  24#include "x86_emu.h"
  25#include "vmcs.h"
  26#include "vmx.h"
  27#include "x86_mmu.h"
  28#include "x86_descr.h"
  29
  30/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
  31{
  32   uint32_t ar;
  33
  34   if (!var->p) {
  35       ar = 1 << 16;
  36       return ar;
  37   }
  38
  39   ar = var->type & 15;
  40   ar |= (var->s & 1) << 4;
  41   ar |= (var->dpl & 3) << 5;
  42   ar |= (var->p & 1) << 7;
  43   ar |= (var->avl & 1) << 12;
  44   ar |= (var->l & 1) << 13;
  45   ar |= (var->db & 1) << 14;
  46   ar |= (var->g & 1) << 15;
  47   return ar;
  48}*/
  49
  50bool x86_read_segment_descriptor(struct CPUState *cpu,
  51                                 struct x86_segment_descriptor *desc,
  52                                 x68_segment_selector sel)
  53{
  54    target_ulong base;
  55    uint32_t limit;
  56
  57    memset(desc, 0, sizeof(*desc));
  58
  59    /* valid gdt descriptors start from index 1 */
  60    if (!sel.index && GDT_SEL == sel.ti) {
  61        return false;
  62    }
  63
  64    if (GDT_SEL == sel.ti) {
  65        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
  66        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
  67    } else {
  68        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
  69        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
  70    }
  71
  72    if (sel.index * 8 >= limit) {
  73        return false;
  74    }
  75
  76    vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
  77    return true;
  78}
  79
  80bool x86_write_segment_descriptor(struct CPUState *cpu,
  81                                  struct x86_segment_descriptor *desc,
  82                                  x68_segment_selector sel)
  83{
  84    target_ulong base;
  85    uint32_t limit;
  86    
  87    if (GDT_SEL == sel.ti) {
  88        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_BASE);
  89        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_GDTR_LIMIT);
  90    } else {
  91        base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_BASE);
  92        limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_LDTR_LIMIT);
  93    }
  94    
  95    if (sel.index * 8 >= limit) {
  96        printf("%s: gdt limit\n", __func__);
  97        return false;
  98    }
  99    vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
 100    return true;
 101}
 102
 103bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
 104                        int gate)
 105{
 106    target_ulong base  = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_BASE);
 107    uint32_t limit = rvmcs(cpu->hvf->fd, VMCS_GUEST_IDTR_LIMIT);
 108
 109    memset(idt_desc, 0, sizeof(*idt_desc));
 110    if (gate * 8 >= limit) {
 111        printf("%s: idt limit\n", __func__);
 112        return false;
 113    }
 114
 115    vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
 116    return true;
 117}
 118
 119bool x86_is_protected(struct CPUState *cpu)
 120{
 121    uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
 122    return cr0 & CR0_PE;
 123}
 124
 125bool x86_is_real(struct CPUState *cpu)
 126{
 127    return !x86_is_protected(cpu);
 128}
 129
 130bool x86_is_v8086(struct CPUState *cpu)
 131{
 132    X86CPU *x86_cpu = X86_CPU(cpu);
 133    CPUX86State *env = &x86_cpu->env;
 134    return x86_is_protected(cpu) && (env->eflags & VM_MASK);
 135}
 136
 137bool x86_is_long_mode(struct CPUState *cpu)
 138{
 139    return rvmcs(cpu->hvf->fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
 140}
 141
 142bool x86_is_long64_mode(struct CPUState *cpu)
 143{
 144    struct vmx_segment desc;
 145    vmx_read_segment_descriptor(cpu, &desc, R_CS);
 146
 147    return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
 148}
 149
 150bool x86_is_paging_mode(struct CPUState *cpu)
 151{
 152    uint64_t cr0 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0);
 153    return cr0 & CR0_PG;
 154}
 155
 156bool x86_is_pae_enabled(struct CPUState *cpu)
 157{
 158    uint64_t cr4 = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
 159    return cr4 & CR4_PAE;
 160}
 161
 162target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
 163{
 164    return vmx_read_segment_base(cpu, seg) + addr;
 165}
 166
 167target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
 168                              X86Seg seg)
 169{
 170    switch (size) {
 171    case 2:
 172        addr = (uint16_t)addr;
 173        break;
 174    case 4:
 175        addr = (uint32_t)addr;
 176        break;
 177    default:
 178        break;
 179    }
 180    return linear_addr(cpu, addr, seg);
 181}
 182
 183target_ulong linear_rip(struct CPUState *cpu, target_ulong rip)
 184{
 185    return linear_addr(cpu, rip, R_CS);
 186}
 187