1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "exec/helper-proto.h"
24#include "exec/cpu_ldst.h"
25#include "tcg/helper-tcg.h"
26#include "../seg_helper.h"
27
28#ifdef TARGET_X86_64
29void helper_syscall(CPUX86State *env, int next_eip_addend)
30{
31 int selector;
32
33 if (!(env->efer & MSR_EFER_SCE)) {
34 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
35 }
36 selector = (env->star >> 32) & 0xffff;
37 if (env->hflags & HF_LMA_MASK) {
38 int code64;
39
40 env->regs[R_ECX] = env->eip + next_eip_addend;
41 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
42
43 code64 = env->hflags & HF_CS64_MASK;
44
45 env->eflags &= ~(env->fmask | RF_MASK);
46 cpu_load_eflags(env, env->eflags, 0);
47 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
48 0, 0xffffffff,
49 DESC_G_MASK | DESC_P_MASK |
50 DESC_S_MASK |
51 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
52 DESC_L_MASK);
53 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
54 0, 0xffffffff,
55 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
56 DESC_S_MASK |
57 DESC_W_MASK | DESC_A_MASK);
58 if (code64) {
59 env->eip = env->lstar;
60 } else {
61 env->eip = env->cstar;
62 }
63 } else {
64 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
65
66 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
67 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
68 0, 0xffffffff,
69 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
70 DESC_S_MASK |
71 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
72 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
73 0, 0xffffffff,
74 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
75 DESC_S_MASK |
76 DESC_W_MASK | DESC_A_MASK);
77 env->eip = (uint32_t)env->star;
78 }
79}
80#endif
81
82void handle_even_inj(CPUX86State *env, int intno, int is_int,
83 int error_code, int is_hw, int rm)
84{
85 CPUState *cs = env_cpu(env);
86 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
87 control.event_inj));
88
89 if (!(event_inj & SVM_EVTINJ_VALID)) {
90 int type;
91
92 if (is_int) {
93 type = SVM_EVTINJ_TYPE_SOFT;
94 } else {
95 type = SVM_EVTINJ_TYPE_EXEPT;
96 }
97 event_inj = intno | type | SVM_EVTINJ_VALID;
98 if (!rm && exception_has_error_code(intno)) {
99 event_inj |= SVM_EVTINJ_VALID_ERR;
100 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
101 control.event_inj_err),
102 error_code);
103 }
104 x86_stl_phys(cs,
105 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
106 event_inj);
107 }
108}
109
110void x86_cpu_do_interrupt(CPUState *cs)
111{
112 X86CPU *cpu = X86_CPU(cs);
113 CPUX86State *env = &cpu->env;
114
115 if (cs->exception_index == EXCP_VMEXIT) {
116 assert(env->old_exception == -1);
117 do_vmexit(env);
118 } else {
119 do_interrupt_all(cpu, cs->exception_index,
120 env->exception_is_int,
121 env->error_code,
122 env->exception_next_eip, 0);
123
124 env->old_exception = -1;
125 }
126}
127
128bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
129{
130 X86CPU *cpu = X86_CPU(cs);
131 CPUX86State *env = &cpu->env;
132 int intno;
133
134 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
135 if (!interrupt_request) {
136 return false;
137 }
138
139
140
141
142 switch (interrupt_request) {
143 case CPU_INTERRUPT_POLL:
144 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
145 apic_poll_irq(cpu->apic_state);
146 break;
147 case CPU_INTERRUPT_SIPI:
148 do_cpu_sipi(cpu);
149 break;
150 case CPU_INTERRUPT_SMI:
151 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
152 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
153 do_smm_enter(cpu);
154 break;
155 case CPU_INTERRUPT_NMI:
156 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
157 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
158 env->hflags2 |= HF2_NMI_MASK;
159 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
160 break;
161 case CPU_INTERRUPT_MCE:
162 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
163 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
164 break;
165 case CPU_INTERRUPT_HARD:
166 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
167 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
168 CPU_INTERRUPT_VIRQ);
169 intno = cpu_get_pic_interrupt(env);
170 qemu_log_mask(CPU_LOG_TB_IN_ASM,
171 "Servicing hardware INT=0x%02x\n", intno);
172 do_interrupt_x86_hardirq(env, intno, 1);
173 break;
174 case CPU_INTERRUPT_VIRQ:
175 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
176 intno = x86_ldl_phys(cs, env->vm_vmcb
177 + offsetof(struct vmcb, control.int_vector));
178 qemu_log_mask(CPU_LOG_TB_IN_ASM,
179 "Servicing virtual hardware INT=0x%02x\n", intno);
180 do_interrupt_x86_hardirq(env, intno, 1);
181 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
182 env->int_ctl &= ~V_IRQ_MASK;
183 break;
184 }
185
186
187 return true;
188}
189
190
191void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
192{
193 uintptr_t retaddr = GETPC();
194 uint32_t io_offset, val, mask;
195
196
197 if (!(env->tr.flags & DESC_P_MASK) ||
198 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
199 env->tr.limit < 103) {
200 goto fail;
201 }
202 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
203 io_offset += (addr >> 3);
204
205 if ((io_offset + 1) > env->tr.limit) {
206 goto fail;
207 }
208 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
209 val >>= (addr & 7);
210 mask = (1 << size) - 1;
211
212 if ((val & mask) != 0) {
213 fail:
214 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
215 }
216}
217