1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "qemu/log.h"
24#include "exec/helper-proto.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27#include "exec/log.h"
28#include "helper-tcg.h"
29
30
31
32#ifdef DEBUG_PCALL
33# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
34# define LOG_PCALL_STATE(cpu) \
35 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36#else
37# define LOG_PCALL(...) do { } while (0)
38# define LOG_PCALL_STATE(cpu) do { } while (0)
39#endif
40
41
42
43
44
45#define cpu_ldub_kernel_ra(e, p, r) \
46 cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
47#define cpu_lduw_kernel_ra(e, p, r) \
48 cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
49#define cpu_ldl_kernel_ra(e, p, r) \
50 cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
51#define cpu_ldq_kernel_ra(e, p, r) \
52 cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
53
54#define cpu_stb_kernel_ra(e, p, v, r) \
55 cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
56#define cpu_stw_kernel_ra(e, p, v, r) \
57 cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
58#define cpu_stl_kernel_ra(e, p, v, r) \
59 cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
60#define cpu_stq_kernel_ra(e, p, v, r) \
61 cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
62
63#define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
64#define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
65#define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
66#define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
67
68#define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
69#define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
70#define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
71#define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
72
73
74static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
75 uint32_t *e2_ptr, int selector,
76 uintptr_t retaddr)
77{
78 SegmentCache *dt;
79 int index;
80 target_ulong ptr;
81
82 if (selector & 0x4) {
83 dt = &env->ldt;
84 } else {
85 dt = &env->gdt;
86 }
87 index = selector & ~7;
88 if ((index + 7) > dt->limit) {
89 return -1;
90 }
91 ptr = dt->base + index;
92 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
93 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
94 return 0;
95}
96
97static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
98 uint32_t *e2_ptr, int selector)
99{
100 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
101}
102
103static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
104{
105 unsigned int limit;
106
107 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
108 if (e2 & DESC_G_MASK) {
109 limit = (limit << 12) | 0xfff;
110 }
111 return limit;
112}
113
114static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
115{
116 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
117}
118
119static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
120 uint32_t e2)
121{
122 sc->base = get_seg_base(e1, e2);
123 sc->limit = get_seg_limit(e1, e2);
124 sc->flags = e2;
125}
126
127
128static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
129{
130 selector &= 0xffff;
131
132 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
133 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
134 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
135}
136
137static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
138 uint32_t *esp_ptr, int dpl,
139 uintptr_t retaddr)
140{
141 X86CPU *cpu = env_archcpu(env);
142 int type, index, shift;
143
144#if 0
145 {
146 int i;
147 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
148 for (i = 0; i < env->tr.limit; i++) {
149 printf("%02x ", env->tr.base[i]);
150 if ((i & 7) == 7) {
151 printf("\n");
152 }
153 }
154 printf("\n");
155 }
156#endif
157
158 if (!(env->tr.flags & DESC_P_MASK)) {
159 cpu_abort(CPU(cpu), "invalid tss");
160 }
161 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
162 if ((type & 7) != 1) {
163 cpu_abort(CPU(cpu), "invalid tss type");
164 }
165 shift = type >> 3;
166 index = (dpl * 4 + 2) << shift;
167 if (index + (4 << shift) - 1 > env->tr.limit) {
168 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
169 }
170 if (shift == 0) {
171 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
172 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
173 } else {
174 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
175 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
176 }
177}
178
179static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
180 int cpl, uintptr_t retaddr)
181{
182 uint32_t e1, e2;
183 int rpl, dpl;
184
185 if ((selector & 0xfffc) != 0) {
186 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
187 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 }
189 if (!(e2 & DESC_S_MASK)) {
190 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
191 }
192 rpl = selector & 3;
193 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
194 if (seg_reg == R_CS) {
195 if (!(e2 & DESC_CS_MASK)) {
196 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 }
198 if (dpl != rpl) {
199 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
200 }
201 } else if (seg_reg == R_SS) {
202
203 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
204 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 }
206 if (dpl != cpl || dpl != rpl) {
207 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
208 }
209 } else {
210
211 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
212 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
213 }
214
215 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
216 if (dpl < cpl || dpl < rpl) {
217 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
218 }
219 }
220 }
221 if (!(e2 & DESC_P_MASK)) {
222 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
223 }
224 cpu_x86_load_seg_cache(env, seg_reg, selector,
225 get_seg_base(e1, e2),
226 get_seg_limit(e1, e2),
227 e2);
228 } else {
229 if (seg_reg == R_SS || seg_reg == R_CS) {
230 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 }
232 }
233}
234
235#define SWITCH_TSS_JMP 0
236#define SWITCH_TSS_IRET 1
237#define SWITCH_TSS_CALL 2
238
239
240static void switch_tss_ra(CPUX86State *env, int tss_selector,
241 uint32_t e1, uint32_t e2, int source,
242 uint32_t next_eip, uintptr_t retaddr)
243{
244 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
245 target_ulong tss_base;
246 uint32_t new_regs[8], new_segs[6];
247 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
248 uint32_t old_eflags, eflags_mask;
249 SegmentCache *dt;
250 int index;
251 target_ulong ptr;
252
253 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
254 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
255 source);
256
257
258 if (type == 5) {
259 if (!(e2 & DESC_P_MASK)) {
260 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
261 }
262 tss_selector = e1 >> 16;
263 if (tss_selector & 4) {
264 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
265 }
266 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
267 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 }
269 if (e2 & DESC_S_MASK) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
271 }
272 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
273 if ((type & 7) != 1) {
274 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
275 }
276 }
277
278 if (!(e2 & DESC_P_MASK)) {
279 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
280 }
281
282 if (type & 8) {
283 tss_limit_max = 103;
284 } else {
285 tss_limit_max = 43;
286 }
287 tss_limit = get_seg_limit(e1, e2);
288 tss_base = get_seg_base(e1, e2);
289 if ((tss_selector & 4) != 0 ||
290 tss_limit < tss_limit_max) {
291 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
292 }
293 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
294 if (old_type & 8) {
295 old_tss_limit_max = 103;
296 } else {
297 old_tss_limit_max = 43;
298 }
299
300
301 if (type & 8) {
302
303 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
304 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
305 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
306 for (i = 0; i < 8; i++) {
307 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
308 retaddr);
309 }
310 for (i = 0; i < 6; i++) {
311 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
312 retaddr);
313 }
314 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
315 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
316 } else {
317
318 new_cr3 = 0;
319 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
320 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
321 for (i = 0; i < 8; i++) {
322 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
323 retaddr) | 0xffff0000;
324 }
325 for (i = 0; i < 4; i++) {
326 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
327 retaddr);
328 }
329 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
330 new_segs[R_FS] = 0;
331 new_segs[R_GS] = 0;
332 new_trap = 0;
333 }
334
335
336
337 (void)new_trap;
338
339
340
341
342
343
344 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
345 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
347 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
348
349
350 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
351 target_ulong ptr;
352 uint32_t e2;
353
354 ptr = env->gdt.base + (env->tr.selector & ~7);
355 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
356 e2 &= ~DESC_TSS_BUSY_MASK;
357 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
358 }
359 old_eflags = cpu_compute_eflags(env);
360 if (source == SWITCH_TSS_IRET) {
361 old_eflags &= ~NT_MASK;
362 }
363
364
365 if (type & 8) {
366
367 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
376 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
377 for (i = 0; i < 6; i++) {
378 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
379 env->segs[i].selector, retaddr);
380 }
381 } else {
382
383 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
392 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
393 for (i = 0; i < 4; i++) {
394 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
395 env->segs[i].selector, retaddr);
396 }
397 }
398
399
400
401
402 if (source == SWITCH_TSS_CALL) {
403 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
404 new_eflags |= NT_MASK;
405 }
406
407
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411
412 ptr = env->gdt.base + (tss_selector & ~7);
413 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
414 e2 |= DESC_TSS_BUSY_MASK;
415 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
416 }
417
418
419
420 env->cr[0] |= CR0_TS_MASK;
421 env->hflags |= HF_TS_MASK;
422 env->tr.selector = tss_selector;
423 env->tr.base = tss_base;
424 env->tr.limit = tss_limit;
425 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426
427 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
428 cpu_x86_update_cr3(env, new_cr3);
429 }
430
431
432
433 env->eip = new_eip;
434 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
435 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
436 if (!(type & 8)) {
437 eflags_mask &= 0xffff;
438 }
439 cpu_load_eflags(env, new_eflags, eflags_mask);
440
441 env->regs[R_EAX] = new_regs[0];
442 env->regs[R_ECX] = new_regs[1];
443 env->regs[R_EDX] = new_regs[2];
444 env->regs[R_EBX] = new_regs[3];
445 env->regs[R_ESP] = new_regs[4];
446 env->regs[R_EBP] = new_regs[5];
447 env->regs[R_ESI] = new_regs[6];
448 env->regs[R_EDI] = new_regs[7];
449 if (new_eflags & VM_MASK) {
450 for (i = 0; i < 6; i++) {
451 load_seg_vm(env, i, new_segs[i]);
452 }
453 } else {
454
455 for (i = 0; i < 6; i++) {
456 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
457 }
458 }
459
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465
466 if (new_ldt & 4) {
467 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
468 }
469
470 if ((new_ldt & 0xfffc) != 0) {
471 dt = &env->gdt;
472 index = new_ldt & ~7;
473 if ((index + 7) > dt->limit) {
474 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
475 }
476 ptr = dt->base + index;
477 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
478 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
479 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
480 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 }
482 if (!(e2 & DESC_P_MASK)) {
483 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
484 }
485 load_seg_cache_raw_dt(&env->ldt, e1, e2);
486 }
487
488
489 if (!(new_eflags & VM_MASK)) {
490 int cpl = new_segs[R_CS] & 3;
491 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
492 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
493 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
494 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
495 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
496 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
497 }
498
499
500 if (new_eip > env->segs[R_CS].limit) {
501
502 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
503 }
504
505#ifndef CONFIG_USER_ONLY
506
507 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
508 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
509 }
510#endif
511}
512
513static void switch_tss(CPUX86State *env, int tss_selector,
514 uint32_t e1, uint32_t e2, int source,
515 uint32_t next_eip)
516{
517 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
518}
519
520static inline unsigned int get_sp_mask(unsigned int e2)
521{
522#ifdef TARGET_X86_64
523 if (e2 & DESC_L_MASK) {
524 return 0;
525 } else
526#endif
527 if (e2 & DESC_B_MASK) {
528 return 0xffffffff;
529 } else {
530 return 0xffff;
531 }
532}
533
534static int exception_has_error_code(int intno)
535{
536 switch (intno) {
537 case 8:
538 case 10:
539 case 11:
540 case 12:
541 case 13:
542 case 14:
543 case 17:
544 return 1;
545 }
546 return 0;
547}
548
549#ifdef TARGET_X86_64
550#define SET_ESP(val, sp_mask) \
551 do { \
552 if ((sp_mask) == 0xffff) { \
553 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
554 ((val) & 0xffff); \
555 } else if ((sp_mask) == 0xffffffffLL) { \
556 env->regs[R_ESP] = (uint32_t)(val); \
557 } else { \
558 env->regs[R_ESP] = (val); \
559 } \
560 } while (0)
561#else
562#define SET_ESP(val, sp_mask) \
563 do { \
564 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
565 ((val) & (sp_mask)); \
566 } while (0)
567#endif
568
569
570
571#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
572
573
574#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
575 { \
576 sp -= 2; \
577 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
578 }
579
580#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
581 { \
582 sp -= 4; \
583 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
584 }
585
586#define POPW_RA(ssp, sp, sp_mask, val, ra) \
587 { \
588 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
589 sp += 2; \
590 }
591
592#define POPL_RA(ssp, sp, sp_mask, val, ra) \
593 { \
594 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
595 sp += 4; \
596 }
597
598#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
599#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
600#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
601#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
602
603
604static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
605 int error_code, unsigned int next_eip,
606 int is_hw)
607{
608 SegmentCache *dt;
609 target_ulong ptr, ssp;
610 int type, dpl, selector, ss_dpl, cpl;
611 int has_error_code, new_stack, shift;
612 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
613 uint32_t old_eip, sp_mask;
614 int vm86 = env->eflags & VM_MASK;
615
616 has_error_code = 0;
617 if (!is_int && !is_hw) {
618 has_error_code = exception_has_error_code(intno);
619 }
620 if (is_int) {
621 old_eip = next_eip;
622 } else {
623 old_eip = env->eip;
624 }
625
626 dt = &env->idt;
627 if (intno * 8 + 7 > dt->limit) {
628 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
629 }
630 ptr = dt->base + intno * 8;
631 e1 = cpu_ldl_kernel(env, ptr);
632 e2 = cpu_ldl_kernel(env, ptr + 4);
633
634 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
635 switch (type) {
636 case 5:
637 case 6:
638 case 7:
639 case 14:
640 case 15:
641 break;
642 default:
643 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
644 break;
645 }
646 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
647 cpl = env->hflags & HF_CPL_MASK;
648
649 if (is_int && dpl < cpl) {
650 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
651 }
652
653 if (type == 5) {
654
655
656 if (!(e2 & DESC_P_MASK)) {
657 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
658 }
659 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
660 if (has_error_code) {
661 int type;
662 uint32_t mask;
663
664
665 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
666 shift = type >> 3;
667 if (env->segs[R_SS].flags & DESC_B_MASK) {
668 mask = 0xffffffff;
669 } else {
670 mask = 0xffff;
671 }
672 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
673 ssp = env->segs[R_SS].base + esp;
674 if (shift) {
675 cpu_stl_kernel(env, ssp, error_code);
676 } else {
677 cpu_stw_kernel(env, ssp, error_code);
678 }
679 SET_ESP(esp, mask);
680 }
681 return;
682 }
683
684
685
686
687 if (!(e2 & DESC_P_MASK)) {
688 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
689 }
690 selector = e1 >> 16;
691 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
692 if ((selector & 0xfffc) == 0) {
693 raise_exception_err(env, EXCP0D_GPF, 0);
694 }
695 if (load_segment(env, &e1, &e2, selector) != 0) {
696 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
697 }
698 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
699 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
700 }
701 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702 if (dpl > cpl) {
703 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
704 }
705 if (!(e2 & DESC_P_MASK)) {
706 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
707 }
708 if (e2 & DESC_C_MASK) {
709 dpl = cpl;
710 }
711 if (dpl < cpl) {
712
713 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
714 if ((ss & 0xfffc) == 0) {
715 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
716 }
717 if ((ss & 3) != dpl) {
718 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 }
720 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
721 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722 }
723 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
724 if (ss_dpl != dpl) {
725 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
726 }
727 if (!(ss_e2 & DESC_S_MASK) ||
728 (ss_e2 & DESC_CS_MASK) ||
729 !(ss_e2 & DESC_W_MASK)) {
730 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
731 }
732 if (!(ss_e2 & DESC_P_MASK)) {
733 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
734 }
735 new_stack = 1;
736 sp_mask = get_sp_mask(ss_e2);
737 ssp = get_seg_base(ss_e1, ss_e2);
738 } else {
739
740 if (vm86) {
741 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
742 }
743 new_stack = 0;
744 sp_mask = get_sp_mask(env->segs[R_SS].flags);
745 ssp = env->segs[R_SS].base;
746 esp = env->regs[R_ESP];
747 }
748
749 shift = type >> 3;
750
751#if 0
752
753 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
754 if (vm86) {
755 push_size += 8;
756 }
757 push_size <<= shift;
758#endif
759 if (shift == 1) {
760 if (new_stack) {
761 if (vm86) {
762 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766 }
767 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
769 }
770 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
771 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772 PUSHL(ssp, esp, sp_mask, old_eip);
773 if (has_error_code) {
774 PUSHL(ssp, esp, sp_mask, error_code);
775 }
776 } else {
777 if (new_stack) {
778 if (vm86) {
779 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783 }
784 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
786 }
787 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
788 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789 PUSHW(ssp, esp, sp_mask, old_eip);
790 if (has_error_code) {
791 PUSHW(ssp, esp, sp_mask, error_code);
792 }
793 }
794
795
796 if ((type & 1) == 0) {
797 env->eflags &= ~IF_MASK;
798 }
799 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
800
801 if (new_stack) {
802 if (vm86) {
803 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
805 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
806 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
807 }
808 ss = (ss & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_SS, ss,
810 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
811 }
812 SET_ESP(esp, sp_mask);
813
814 selector = (selector & ~3) | dpl;
815 cpu_x86_load_seg_cache(env, R_CS, selector,
816 get_seg_base(e1, e2),
817 get_seg_limit(e1, e2),
818 e2);
819 env->eip = offset;
820}
821
822#ifdef TARGET_X86_64
823
824#define PUSHQ_RA(sp, val, ra) \
825 { \
826 sp -= 8; \
827 cpu_stq_kernel_ra(env, sp, (val), ra); \
828 }
829
830#define POPQ_RA(sp, val, ra) \
831 { \
832 val = cpu_ldq_kernel_ra(env, sp, ra); \
833 sp += 8; \
834 }
835
836#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
837#define POPQ(sp, val) POPQ_RA(sp, val, 0)
838
839static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
840{
841 X86CPU *cpu = env_archcpu(env);
842 int index;
843
844#if 0
845 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
846 env->tr.base, env->tr.limit);
847#endif
848
849 if (!(env->tr.flags & DESC_P_MASK)) {
850 cpu_abort(CPU(cpu), "invalid tss");
851 }
852 index = 8 * level + 4;
853 if ((index + 7) > env->tr.limit) {
854 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
855 }
856 return cpu_ldq_kernel(env, env->tr.base + index);
857}
858
859
860static void do_interrupt64(CPUX86State *env, int intno, int is_int,
861 int error_code, target_ulong next_eip, int is_hw)
862{
863 SegmentCache *dt;
864 target_ulong ptr;
865 int type, dpl, selector, cpl, ist;
866 int has_error_code, new_stack;
867 uint32_t e1, e2, e3, ss;
868 target_ulong old_eip, esp, offset;
869
870 has_error_code = 0;
871 if (!is_int && !is_hw) {
872 has_error_code = exception_has_error_code(intno);
873 }
874 if (is_int) {
875 old_eip = next_eip;
876 } else {
877 old_eip = env->eip;
878 }
879
880 dt = &env->idt;
881 if (intno * 16 + 15 > dt->limit) {
882 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
883 }
884 ptr = dt->base + intno * 16;
885 e1 = cpu_ldl_kernel(env, ptr);
886 e2 = cpu_ldl_kernel(env, ptr + 4);
887 e3 = cpu_ldl_kernel(env, ptr + 8);
888
889 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
890 switch (type) {
891 case 14:
892 case 15:
893 break;
894 default:
895 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
896 break;
897 }
898 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
899 cpl = env->hflags & HF_CPL_MASK;
900
901 if (is_int && dpl < cpl) {
902 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
903 }
904
905 if (!(e2 & DESC_P_MASK)) {
906 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
907 }
908 selector = e1 >> 16;
909 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
910 ist = e2 & 7;
911 if ((selector & 0xfffc) == 0) {
912 raise_exception_err(env, EXCP0D_GPF, 0);
913 }
914
915 if (load_segment(env, &e1, &e2, selector) != 0) {
916 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
917 }
918 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
919 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
920 }
921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922 if (dpl > cpl) {
923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
924 }
925 if (!(e2 & DESC_P_MASK)) {
926 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
927 }
928 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
930 }
931 if (e2 & DESC_C_MASK) {
932 dpl = cpl;
933 }
934 if (dpl < cpl || ist != 0) {
935
936 new_stack = 1;
937 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
938 ss = 0;
939 } else {
940
941 if (env->eflags & VM_MASK) {
942 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
943 }
944 new_stack = 0;
945 esp = env->regs[R_ESP];
946 }
947 esp &= ~0xfLL;
948
949 PUSHQ(esp, env->segs[R_SS].selector);
950 PUSHQ(esp, env->regs[R_ESP]);
951 PUSHQ(esp, cpu_compute_eflags(env));
952 PUSHQ(esp, env->segs[R_CS].selector);
953 PUSHQ(esp, old_eip);
954 if (has_error_code) {
955 PUSHQ(esp, error_code);
956 }
957
958
959 if ((type & 1) == 0) {
960 env->eflags &= ~IF_MASK;
961 }
962 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
963
964 if (new_stack) {
965 ss = 0 | dpl;
966 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
967 }
968 env->regs[R_ESP] = esp;
969
970 selector = (selector & ~3) | dpl;
971 cpu_x86_load_seg_cache(env, R_CS, selector,
972 get_seg_base(e1, e2),
973 get_seg_limit(e1, e2),
974 e2);
975 env->eip = offset;
976}
977#endif
978
979#ifdef TARGET_X86_64
980#if defined(CONFIG_USER_ONLY)
981void helper_syscall(CPUX86State *env, int next_eip_addend)
982{
983 CPUState *cs = env_cpu(env);
984
985 cs->exception_index = EXCP_SYSCALL;
986 env->exception_is_int = 0;
987 env->exception_next_eip = env->eip + next_eip_addend;
988 cpu_loop_exit(cs);
989}
990#else
991void helper_syscall(CPUX86State *env, int next_eip_addend)
992{
993 int selector;
994
995 if (!(env->efer & MSR_EFER_SCE)) {
996 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
997 }
998 selector = (env->star >> 32) & 0xffff;
999 if (env->hflags & HF_LMA_MASK) {
1000 int code64;
1001
1002 env->regs[R_ECX] = env->eip + next_eip_addend;
1003 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
1004
1005 code64 = env->hflags & HF_CS64_MASK;
1006
1007 env->eflags &= ~(env->fmask | RF_MASK);
1008 cpu_load_eflags(env, env->eflags, 0);
1009 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010 0, 0xffffffff,
1011 DESC_G_MASK | DESC_P_MASK |
1012 DESC_S_MASK |
1013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1014 DESC_L_MASK);
1015 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1016 0, 0xffffffff,
1017 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018 DESC_S_MASK |
1019 DESC_W_MASK | DESC_A_MASK);
1020 if (code64) {
1021 env->eip = env->lstar;
1022 } else {
1023 env->eip = env->cstar;
1024 }
1025 } else {
1026 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1027
1028 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1029 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037 DESC_S_MASK |
1038 DESC_W_MASK | DESC_A_MASK);
1039 env->eip = (uint32_t)env->star;
1040 }
1041}
1042#endif
1043#endif
1044
1045#ifdef TARGET_X86_64
1046void helper_sysret(CPUX86State *env, int dflag)
1047{
1048 int cpl, selector;
1049
1050 if (!(env->efer & MSR_EFER_SCE)) {
1051 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1052 }
1053 cpl = env->hflags & HF_CPL_MASK;
1054 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1055 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1056 }
1057 selector = (env->star >> 48) & 0xffff;
1058 if (env->hflags & HF_LMA_MASK) {
1059 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1060 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1061 NT_MASK);
1062 if (dflag == 2) {
1063 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1068 DESC_L_MASK);
1069 env->eip = env->regs[R_ECX];
1070 } else {
1071 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1072 0, 0xffffffff,
1073 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1076 env->eip = (uint32_t)env->regs[R_ECX];
1077 }
1078 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1079 0, 0xffffffff,
1080 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082 DESC_W_MASK | DESC_A_MASK);
1083 } else {
1084 env->eflags |= IF_MASK;
1085 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086 0, 0xffffffff,
1087 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090 env->eip = (uint32_t)env->regs[R_ECX];
1091 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1092 0, 0xffffffff,
1093 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095 DESC_W_MASK | DESC_A_MASK);
1096 }
1097}
1098#endif
1099
1100
1101static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1102 int error_code, unsigned int next_eip)
1103{
1104 SegmentCache *dt;
1105 target_ulong ptr, ssp;
1106 int selector;
1107 uint32_t offset, esp;
1108 uint32_t old_cs, old_eip;
1109
1110
1111 dt = &env->idt;
1112 if (intno * 4 + 3 > dt->limit) {
1113 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1114 }
1115 ptr = dt->base + intno * 4;
1116 offset = cpu_lduw_kernel(env, ptr);
1117 selector = cpu_lduw_kernel(env, ptr + 2);
1118 esp = env->regs[R_ESP];
1119 ssp = env->segs[R_SS].base;
1120 if (is_int) {
1121 old_eip = next_eip;
1122 } else {
1123 old_eip = env->eip;
1124 }
1125 old_cs = env->segs[R_CS].selector;
1126
1127 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1128 PUSHW(ssp, esp, 0xffff, old_cs);
1129 PUSHW(ssp, esp, 0xffff, old_eip);
1130
1131
1132 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1133 env->eip = offset;
1134 env->segs[R_CS].selector = selector;
1135 env->segs[R_CS].base = (selector << 4);
1136 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1137}
1138
1139#if defined(CONFIG_USER_ONLY)
1140
1141
1142
1143
1144
1145static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1146 int error_code, target_ulong next_eip)
1147{
1148 if (is_int) {
1149 SegmentCache *dt;
1150 target_ulong ptr;
1151 int dpl, cpl, shift;
1152 uint32_t e2;
1153
1154 dt = &env->idt;
1155 if (env->hflags & HF_LMA_MASK) {
1156 shift = 4;
1157 } else {
1158 shift = 3;
1159 }
1160 ptr = dt->base + (intno << shift);
1161 e2 = cpu_ldl_kernel(env, ptr + 4);
1162
1163 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1164 cpl = env->hflags & HF_CPL_MASK;
1165
1166 if (dpl < cpl) {
1167 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1168 }
1169 }
1170
1171
1172
1173
1174 if (is_int || intno == EXCP_SYSCALL) {
1175 env->eip = next_eip;
1176 }
1177}
1178
1179#else
1180
1181static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1182 int error_code, int is_hw, int rm)
1183{
1184 CPUState *cs = env_cpu(env);
1185 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1186 control.event_inj));
1187
1188 if (!(event_inj & SVM_EVTINJ_VALID)) {
1189 int type;
1190
1191 if (is_int) {
1192 type = SVM_EVTINJ_TYPE_SOFT;
1193 } else {
1194 type = SVM_EVTINJ_TYPE_EXEPT;
1195 }
1196 event_inj = intno | type | SVM_EVTINJ_VALID;
1197 if (!rm && exception_has_error_code(intno)) {
1198 event_inj |= SVM_EVTINJ_VALID_ERR;
1199 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1200 control.event_inj_err),
1201 error_code);
1202 }
1203 x86_stl_phys(cs,
1204 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1205 event_inj);
1206 }
1207}
1208#endif
1209
1210
1211
1212
1213
1214
1215static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1216 int error_code, target_ulong next_eip, int is_hw)
1217{
1218 CPUX86State *env = &cpu->env;
1219
1220 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1221 if ((env->cr[0] & CR0_PE_MASK)) {
1222 static int count;
1223
1224 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1225 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1226 count, intno, error_code, is_int,
1227 env->hflags & HF_CPL_MASK,
1228 env->segs[R_CS].selector, env->eip,
1229 (int)env->segs[R_CS].base + env->eip,
1230 env->segs[R_SS].selector, env->regs[R_ESP]);
1231 if (intno == 0x0e) {
1232 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1233 } else {
1234 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1235 }
1236 qemu_log("\n");
1237 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1238#if 0
1239 {
1240 int i;
1241 target_ulong ptr;
1242
1243 qemu_log(" code=");
1244 ptr = env->segs[R_CS].base + env->eip;
1245 for (i = 0; i < 16; i++) {
1246 qemu_log(" %02x", ldub(ptr + i));
1247 }
1248 qemu_log("\n");
1249 }
1250#endif
1251 count++;
1252 }
1253 }
1254 if (env->cr[0] & CR0_PE_MASK) {
1255#if !defined(CONFIG_USER_ONLY)
1256 if (env->hflags & HF_GUEST_MASK) {
1257 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1258 }
1259#endif
1260#ifdef TARGET_X86_64
1261 if (env->hflags & HF_LMA_MASK) {
1262 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1263 } else
1264#endif
1265 {
1266 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1267 is_hw);
1268 }
1269 } else {
1270#if !defined(CONFIG_USER_ONLY)
1271 if (env->hflags & HF_GUEST_MASK) {
1272 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1273 }
1274#endif
1275 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1276 }
1277
1278#if !defined(CONFIG_USER_ONLY)
1279 if (env->hflags & HF_GUEST_MASK) {
1280 CPUState *cs = CPU(cpu);
1281 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1282 offsetof(struct vmcb,
1283 control.event_inj));
1284
1285 x86_stl_phys(cs,
1286 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1287 event_inj & ~SVM_EVTINJ_VALID);
1288 }
1289#endif
1290}
1291
1292void x86_cpu_do_interrupt(CPUState *cs)
1293{
1294 X86CPU *cpu = X86_CPU(cs);
1295 CPUX86State *env = &cpu->env;
1296
1297#if defined(CONFIG_USER_ONLY)
1298
1299
1300
1301 do_interrupt_user(env, cs->exception_index,
1302 env->exception_is_int,
1303 env->error_code,
1304 env->exception_next_eip);
1305
1306 env->old_exception = -1;
1307#else
1308 if (cs->exception_index == EXCP_VMEXIT) {
1309 assert(env->old_exception == -1);
1310 do_vmexit(env);
1311 } else {
1312 do_interrupt_all(cpu, cs->exception_index,
1313 env->exception_is_int,
1314 env->error_code,
1315 env->exception_next_eip, 0);
1316
1317 env->old_exception = -1;
1318 }
1319#endif
1320}
1321
1322void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1323{
1324 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1325}
1326
1327bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1328{
1329 X86CPU *cpu = X86_CPU(cs);
1330 CPUX86State *env = &cpu->env;
1331 int intno;
1332
1333 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1334 if (!interrupt_request) {
1335 return false;
1336 }
1337
1338
1339
1340
1341 switch (interrupt_request) {
1342#if !defined(CONFIG_USER_ONLY)
1343 case CPU_INTERRUPT_POLL:
1344 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1345 apic_poll_irq(cpu->apic_state);
1346 break;
1347#endif
1348 case CPU_INTERRUPT_SIPI:
1349 do_cpu_sipi(cpu);
1350 break;
1351 case CPU_INTERRUPT_SMI:
1352 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1353 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1354 do_smm_enter(cpu);
1355 break;
1356 case CPU_INTERRUPT_NMI:
1357 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1358 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1359 env->hflags2 |= HF2_NMI_MASK;
1360 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1361 break;
1362 case CPU_INTERRUPT_MCE:
1363 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1364 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1365 break;
1366 case CPU_INTERRUPT_HARD:
1367 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1368 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1369 CPU_INTERRUPT_VIRQ);
1370 intno = cpu_get_pic_interrupt(env);
1371 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1372 "Servicing hardware INT=0x%02x\n", intno);
1373 do_interrupt_x86_hardirq(env, intno, 1);
1374 break;
1375#if !defined(CONFIG_USER_ONLY)
1376 case CPU_INTERRUPT_VIRQ:
1377
1378 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1379 intno = x86_ldl_phys(cs, env->vm_vmcb
1380 + offsetof(struct vmcb, control.int_vector));
1381 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1382 "Servicing virtual hardware INT=0x%02x\n", intno);
1383 do_interrupt_x86_hardirq(env, intno, 1);
1384 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1385 break;
1386#endif
1387 }
1388
1389
1390 return true;
1391}
1392
1393void helper_lldt(CPUX86State *env, int selector)
1394{
1395 SegmentCache *dt;
1396 uint32_t e1, e2;
1397 int index, entry_limit;
1398 target_ulong ptr;
1399
1400 selector &= 0xffff;
1401 if ((selector & 0xfffc) == 0) {
1402
1403 env->ldt.base = 0;
1404 env->ldt.limit = 0;
1405 } else {
1406 if (selector & 0x4) {
1407 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1408 }
1409 dt = &env->gdt;
1410 index = selector & ~7;
1411#ifdef TARGET_X86_64
1412 if (env->hflags & HF_LMA_MASK) {
1413 entry_limit = 15;
1414 } else
1415#endif
1416 {
1417 entry_limit = 7;
1418 }
1419 if ((index + entry_limit) > dt->limit) {
1420 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421 }
1422 ptr = dt->base + index;
1423 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1424 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1425 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1426 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1427 }
1428 if (!(e2 & DESC_P_MASK)) {
1429 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1430 }
1431#ifdef TARGET_X86_64
1432 if (env->hflags & HF_LMA_MASK) {
1433 uint32_t e3;
1434
1435 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1436 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1437 env->ldt.base |= (target_ulong)e3 << 32;
1438 } else
1439#endif
1440 {
1441 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1442 }
1443 }
1444 env->ldt.selector = selector;
1445}
1446
1447void helper_ltr(CPUX86State *env, int selector)
1448{
1449 SegmentCache *dt;
1450 uint32_t e1, e2;
1451 int index, type, entry_limit;
1452 target_ulong ptr;
1453
1454 selector &= 0xffff;
1455 if ((selector & 0xfffc) == 0) {
1456
1457 env->tr.base = 0;
1458 env->tr.limit = 0;
1459 env->tr.flags = 0;
1460 } else {
1461 if (selector & 0x4) {
1462 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1463 }
1464 dt = &env->gdt;
1465 index = selector & ~7;
1466#ifdef TARGET_X86_64
1467 if (env->hflags & HF_LMA_MASK) {
1468 entry_limit = 15;
1469 } else
1470#endif
1471 {
1472 entry_limit = 7;
1473 }
1474 if ((index + entry_limit) > dt->limit) {
1475 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1476 }
1477 ptr = dt->base + index;
1478 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1479 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1480 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1481 if ((e2 & DESC_S_MASK) ||
1482 (type != 1 && type != 9)) {
1483 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1484 }
1485 if (!(e2 & DESC_P_MASK)) {
1486 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1487 }
1488#ifdef TARGET_X86_64
1489 if (env->hflags & HF_LMA_MASK) {
1490 uint32_t e3, e4;
1491
1492 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1493 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1494 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1495 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1496 }
1497 load_seg_cache_raw_dt(&env->tr, e1, e2);
1498 env->tr.base |= (target_ulong)e3 << 32;
1499 } else
1500#endif
1501 {
1502 load_seg_cache_raw_dt(&env->tr, e1, e2);
1503 }
1504 e2 |= DESC_TSS_BUSY_MASK;
1505 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1506 }
1507 env->tr.selector = selector;
1508}
1509
1510
1511void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1512{
1513 uint32_t e1, e2;
1514 int cpl, dpl, rpl;
1515 SegmentCache *dt;
1516 int index;
1517 target_ulong ptr;
1518
1519 selector &= 0xffff;
1520 cpl = env->hflags & HF_CPL_MASK;
1521 if ((selector & 0xfffc) == 0) {
1522
1523 if (seg_reg == R_SS
1524#ifdef TARGET_X86_64
1525 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1526#endif
1527 ) {
1528 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1529 }
1530 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1531 } else {
1532
1533 if (selector & 0x4) {
1534 dt = &env->ldt;
1535 } else {
1536 dt = &env->gdt;
1537 }
1538 index = selector & ~7;
1539 if ((index + 7) > dt->limit) {
1540 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541 }
1542 ptr = dt->base + index;
1543 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1544 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1545
1546 if (!(e2 & DESC_S_MASK)) {
1547 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 }
1549 rpl = selector & 3;
1550 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1551 if (seg_reg == R_SS) {
1552
1553 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1554 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1555 }
1556 if (rpl != cpl || dpl != cpl) {
1557 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1558 }
1559 } else {
1560
1561 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1562 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1563 }
1564
1565 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1566
1567 if (dpl < cpl || dpl < rpl) {
1568 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1569 }
1570 }
1571 }
1572
1573 if (!(e2 & DESC_P_MASK)) {
1574 if (seg_reg == R_SS) {
1575 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1576 } else {
1577 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1578 }
1579 }
1580
1581
1582 if (!(e2 & DESC_A_MASK)) {
1583 e2 |= DESC_A_MASK;
1584 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1585 }
1586
1587 cpu_x86_load_seg_cache(env, seg_reg, selector,
1588 get_seg_base(e1, e2),
1589 get_seg_limit(e1, e2),
1590 e2);
1591#if 0
1592 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1593 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1594#endif
1595 }
1596}
1597
1598
1599void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1600 target_ulong next_eip)
1601{
1602 int gate_cs, type;
1603 uint32_t e1, e2, cpl, dpl, rpl, limit;
1604
1605 if ((new_cs & 0xfffc) == 0) {
1606 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1607 }
1608 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1609 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1610 }
1611 cpl = env->hflags & HF_CPL_MASK;
1612 if (e2 & DESC_S_MASK) {
1613 if (!(e2 & DESC_CS_MASK)) {
1614 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1615 }
1616 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1617 if (e2 & DESC_C_MASK) {
1618
1619 if (dpl > cpl) {
1620 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1621 }
1622 } else {
1623
1624 rpl = new_cs & 3;
1625 if (rpl > cpl) {
1626 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1627 }
1628 if (dpl != cpl) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1630 }
1631 }
1632 if (!(e2 & DESC_P_MASK)) {
1633 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1634 }
1635 limit = get_seg_limit(e1, e2);
1636 if (new_eip > limit &&
1637 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1638 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1639 }
1640 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1641 get_seg_base(e1, e2), limit, e2);
1642 env->eip = new_eip;
1643 } else {
1644
1645 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1646 rpl = new_cs & 3;
1647 cpl = env->hflags & HF_CPL_MASK;
1648 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1649
1650#ifdef TARGET_X86_64
1651 if (env->efer & MSR_EFER_LMA) {
1652 if (type != 12) {
1653 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1654 }
1655 }
1656#endif
1657 switch (type) {
1658 case 1:
1659 case 9:
1660 case 5:
1661 if (dpl < cpl || dpl < rpl) {
1662 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1663 }
1664 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1665 break;
1666 case 4:
1667 case 12:
1668 if ((dpl < cpl) || (dpl < rpl)) {
1669 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1670 }
1671 if (!(e2 & DESC_P_MASK)) {
1672 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1673 }
1674 gate_cs = e1 >> 16;
1675 new_eip = (e1 & 0xffff);
1676 if (type == 12) {
1677 new_eip |= (e2 & 0xffff0000);
1678 }
1679
1680#ifdef TARGET_X86_64
1681 if (env->efer & MSR_EFER_LMA) {
1682
1683 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1684 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1685 GETPC());
1686 }
1687 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1688 if (type != 0) {
1689 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1690 GETPC());
1691 }
1692 new_eip |= ((target_ulong)e1) << 32;
1693 }
1694#endif
1695
1696 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1697 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1698 }
1699 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1700
1701 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1702 (DESC_S_MASK | DESC_CS_MASK))) {
1703 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1704 }
1705 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1706 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1707 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1708 }
1709#ifdef TARGET_X86_64
1710 if (env->efer & MSR_EFER_LMA) {
1711 if (!(e2 & DESC_L_MASK)) {
1712 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1713 }
1714 if (e2 & DESC_B_MASK) {
1715 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1716 }
1717 }
1718#endif
1719 if (!(e2 & DESC_P_MASK)) {
1720 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1721 }
1722 limit = get_seg_limit(e1, e2);
1723 if (new_eip > limit &&
1724 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1725 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1726 }
1727 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1728 get_seg_base(e1, e2), limit, e2);
1729 env->eip = new_eip;
1730 break;
1731 default:
1732 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1733 break;
1734 }
1735 }
1736}
1737
1738
1739void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1740 int shift, int next_eip)
1741{
1742 int new_eip;
1743 uint32_t esp, esp_mask;
1744 target_ulong ssp;
1745
1746 new_eip = new_eip1;
1747 esp = env->regs[R_ESP];
1748 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1749 ssp = env->segs[R_SS].base;
1750 if (shift) {
1751 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1752 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1753 } else {
1754 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1755 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1756 }
1757
1758 SET_ESP(esp, esp_mask);
1759 env->eip = new_eip;
1760 env->segs[R_CS].selector = new_cs;
1761 env->segs[R_CS].base = (new_cs << 4);
1762}
1763
1764
1765void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1766 int shift, target_ulong next_eip)
1767{
1768 int new_stack, i;
1769 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1770 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1771 uint32_t val, limit, old_sp_mask;
1772 target_ulong ssp, old_ssp, offset, sp;
1773
1774 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1775 LOG_PCALL_STATE(env_cpu(env));
1776 if ((new_cs & 0xfffc) == 0) {
1777 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1778 }
1779 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1780 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1781 }
1782 cpl = env->hflags & HF_CPL_MASK;
1783 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1784 if (e2 & DESC_S_MASK) {
1785 if (!(e2 & DESC_CS_MASK)) {
1786 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1787 }
1788 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1789 if (e2 & DESC_C_MASK) {
1790
1791 if (dpl > cpl) {
1792 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1793 }
1794 } else {
1795
1796 rpl = new_cs & 3;
1797 if (rpl > cpl) {
1798 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1799 }
1800 if (dpl != cpl) {
1801 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1802 }
1803 }
1804 if (!(e2 & DESC_P_MASK)) {
1805 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1806 }
1807
1808#ifdef TARGET_X86_64
1809
1810 if (shift == 2) {
1811 target_ulong rsp;
1812
1813
1814 rsp = env->regs[R_ESP];
1815 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1816 PUSHQ_RA(rsp, next_eip, GETPC());
1817
1818 env->regs[R_ESP] = rsp;
1819 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1820 get_seg_base(e1, e2),
1821 get_seg_limit(e1, e2), e2);
1822 env->eip = new_eip;
1823 } else
1824#endif
1825 {
1826 sp = env->regs[R_ESP];
1827 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1828 ssp = env->segs[R_SS].base;
1829 if (shift) {
1830 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1831 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1832 } else {
1833 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1834 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1835 }
1836
1837 limit = get_seg_limit(e1, e2);
1838 if (new_eip > limit) {
1839 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1840 }
1841
1842 SET_ESP(sp, sp_mask);
1843 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1844 get_seg_base(e1, e2), limit, e2);
1845 env->eip = new_eip;
1846 }
1847 } else {
1848
1849 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1850 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1851 rpl = new_cs & 3;
1852
1853#ifdef TARGET_X86_64
1854 if (env->efer & MSR_EFER_LMA) {
1855 if (type != 12) {
1856 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1857 }
1858 }
1859#endif
1860
1861 switch (type) {
1862 case 1:
1863 case 9:
1864 case 5:
1865 if (dpl < cpl || dpl < rpl) {
1866 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1867 }
1868 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1869 return;
1870 case 4:
1871 case 12:
1872 break;
1873 default:
1874 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1875 break;
1876 }
1877 shift = type >> 3;
1878
1879 if (dpl < cpl || dpl < rpl) {
1880 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1881 }
1882
1883 if (!(e2 & DESC_P_MASK)) {
1884 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1885 }
1886 selector = e1 >> 16;
1887 param_count = e2 & 0x1f;
1888 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1889#ifdef TARGET_X86_64
1890 if (env->efer & MSR_EFER_LMA) {
1891
1892 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1893 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1894 GETPC());
1895 }
1896 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1897 if (type != 0) {
1898 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1899 GETPC());
1900 }
1901 offset |= ((target_ulong)e1) << 32;
1902 }
1903#endif
1904 if ((selector & 0xfffc) == 0) {
1905 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1906 }
1907
1908 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1909 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1910 }
1911 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1913 }
1914 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1915 if (dpl > cpl) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1917 }
1918#ifdef TARGET_X86_64
1919 if (env->efer & MSR_EFER_LMA) {
1920 if (!(e2 & DESC_L_MASK)) {
1921 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1922 }
1923 if (e2 & DESC_B_MASK) {
1924 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1925 }
1926 shift++;
1927 }
1928#endif
1929 if (!(e2 & DESC_P_MASK)) {
1930 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1931 }
1932
1933 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1934
1935#ifdef TARGET_X86_64
1936 if (shift == 2) {
1937 sp = get_rsp_from_tss(env, dpl);
1938 ss = dpl;
1939 new_stack = 1;
1940 sp_mask = 0;
1941 ssp = 0;
1942 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1943 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1944 } else
1945#endif
1946 {
1947 uint32_t sp32;
1948 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1949 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1950 TARGET_FMT_lx "\n", ss, sp32, param_count,
1951 env->regs[R_ESP]);
1952 sp = sp32;
1953 if ((ss & 0xfffc) == 0) {
1954 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1955 }
1956 if ((ss & 3) != dpl) {
1957 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1958 }
1959 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1960 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1961 }
1962 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1963 if (ss_dpl != dpl) {
1964 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1965 }
1966 if (!(ss_e2 & DESC_S_MASK) ||
1967 (ss_e2 & DESC_CS_MASK) ||
1968 !(ss_e2 & DESC_W_MASK)) {
1969 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1970 }
1971 if (!(ss_e2 & DESC_P_MASK)) {
1972 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1973 }
1974
1975 sp_mask = get_sp_mask(ss_e2);
1976 ssp = get_seg_base(ss_e1, ss_e2);
1977 }
1978
1979
1980
1981 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1982 old_ssp = env->segs[R_SS].base;
1983#ifdef TARGET_X86_64
1984 if (shift == 2) {
1985
1986 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1987 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1988
1989 } else
1990#endif
1991 if (shift == 1) {
1992 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1993 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1994 for (i = param_count - 1; i >= 0; i--) {
1995 val = cpu_ldl_kernel_ra(env, old_ssp +
1996 ((env->regs[R_ESP] + i * 4) &
1997 old_sp_mask), GETPC());
1998 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1999 }
2000 } else {
2001 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
2002 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
2003 for (i = param_count - 1; i >= 0; i--) {
2004 val = cpu_lduw_kernel_ra(env, old_ssp +
2005 ((env->regs[R_ESP] + i * 2) &
2006 old_sp_mask), GETPC());
2007 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
2008 }
2009 }
2010 new_stack = 1;
2011 } else {
2012
2013 sp = env->regs[R_ESP];
2014 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2015 ssp = env->segs[R_SS].base;
2016
2017 new_stack = 0;
2018 }
2019
2020#ifdef TARGET_X86_64
2021 if (shift == 2) {
2022 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2023 PUSHQ_RA(sp, next_eip, GETPC());
2024 } else
2025#endif
2026 if (shift == 1) {
2027 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2028 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2029 } else {
2030 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2031 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2032 }
2033
2034
2035
2036 if (new_stack) {
2037#ifdef TARGET_X86_64
2038 if (shift == 2) {
2039 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2040 } else
2041#endif
2042 {
2043 ss = (ss & ~3) | dpl;
2044 cpu_x86_load_seg_cache(env, R_SS, ss,
2045 ssp,
2046 get_seg_limit(ss_e1, ss_e2),
2047 ss_e2);
2048 }
2049 }
2050
2051 selector = (selector & ~3) | dpl;
2052 cpu_x86_load_seg_cache(env, R_CS, selector,
2053 get_seg_base(e1, e2),
2054 get_seg_limit(e1, e2),
2055 e2);
2056 SET_ESP(sp, sp_mask);
2057 env->eip = offset;
2058 }
2059}
2060
2061
2062void helper_iret_real(CPUX86State *env, int shift)
2063{
2064 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2065 target_ulong ssp;
2066 int eflags_mask;
2067
2068 sp_mask = 0xffff;
2069 sp = env->regs[R_ESP];
2070 ssp = env->segs[R_SS].base;
2071 if (shift == 1) {
2072
2073 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2074 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2075 new_cs &= 0xffff;
2076 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2077 } else {
2078
2079 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2080 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2081 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2082 }
2083 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2084 env->segs[R_CS].selector = new_cs;
2085 env->segs[R_CS].base = (new_cs << 4);
2086 env->eip = new_eip;
2087 if (env->eflags & VM_MASK) {
2088 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2089 NT_MASK;
2090 } else {
2091 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2092 RF_MASK | NT_MASK;
2093 }
2094 if (shift == 0) {
2095 eflags_mask &= 0xffff;
2096 }
2097 cpu_load_eflags(env, new_eflags, eflags_mask);
2098 env->hflags2 &= ~HF2_NMI_MASK;
2099}
2100
2101static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
2102{
2103 int dpl;
2104 uint32_t e2;
2105
2106
2107
2108
2109 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2110 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2111 return;
2112 }
2113
2114 e2 = env->segs[seg_reg].flags;
2115 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2116 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2117
2118 if (dpl < cpl) {
2119 cpu_x86_load_seg_cache(env, seg_reg, 0,
2120 env->segs[seg_reg].base,
2121 env->segs[seg_reg].limit,
2122 env->segs[seg_reg].flags & ~DESC_P_MASK);
2123 }
2124 }
2125}
2126
2127
2128static inline void helper_ret_protected(CPUX86State *env, int shift,
2129 int is_iret, int addend,
2130 uintptr_t retaddr)
2131{
2132 uint32_t new_cs, new_eflags, new_ss;
2133 uint32_t new_es, new_ds, new_fs, new_gs;
2134 uint32_t e1, e2, ss_e1, ss_e2;
2135 int cpl, dpl, rpl, eflags_mask, iopl;
2136 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2137
2138#ifdef TARGET_X86_64
2139 if (shift == 2) {
2140 sp_mask = -1;
2141 } else
2142#endif
2143 {
2144 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2145 }
2146 sp = env->regs[R_ESP];
2147 ssp = env->segs[R_SS].base;
2148 new_eflags = 0;
2149#ifdef TARGET_X86_64
2150 if (shift == 2) {
2151 POPQ_RA(sp, new_eip, retaddr);
2152 POPQ_RA(sp, new_cs, retaddr);
2153 new_cs &= 0xffff;
2154 if (is_iret) {
2155 POPQ_RA(sp, new_eflags, retaddr);
2156 }
2157 } else
2158#endif
2159 {
2160 if (shift == 1) {
2161
2162 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2163 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2164 new_cs &= 0xffff;
2165 if (is_iret) {
2166 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2167 if (new_eflags & VM_MASK) {
2168 goto return_to_vm86;
2169 }
2170 }
2171 } else {
2172
2173 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2174 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2175 if (is_iret) {
2176 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2177 }
2178 }
2179 }
2180 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2181 new_cs, new_eip, shift, addend);
2182 LOG_PCALL_STATE(env_cpu(env));
2183 if ((new_cs & 0xfffc) == 0) {
2184 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2185 }
2186 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2187 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2188 }
2189 if (!(e2 & DESC_S_MASK) ||
2190 !(e2 & DESC_CS_MASK)) {
2191 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2192 }
2193 cpl = env->hflags & HF_CPL_MASK;
2194 rpl = new_cs & 3;
2195 if (rpl < cpl) {
2196 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2197 }
2198 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2199 if (e2 & DESC_C_MASK) {
2200 if (dpl > rpl) {
2201 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2202 }
2203 } else {
2204 if (dpl != rpl) {
2205 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2206 }
2207 }
2208 if (!(e2 & DESC_P_MASK)) {
2209 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2210 }
2211
2212 sp += addend;
2213 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2214 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2215
2216 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2217 get_seg_base(e1, e2),
2218 get_seg_limit(e1, e2),
2219 e2);
2220 } else {
2221
2222#ifdef TARGET_X86_64
2223 if (shift == 2) {
2224 POPQ_RA(sp, new_esp, retaddr);
2225 POPQ_RA(sp, new_ss, retaddr);
2226 new_ss &= 0xffff;
2227 } else
2228#endif
2229 {
2230 if (shift == 1) {
2231
2232 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2233 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2234 new_ss &= 0xffff;
2235 } else {
2236
2237 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2238 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2239 }
2240 }
2241 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2242 new_ss, new_esp);
2243 if ((new_ss & 0xfffc) == 0) {
2244#ifdef TARGET_X86_64
2245
2246
2247 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2248 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2249 0, 0xffffffff,
2250 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2251 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2252 DESC_W_MASK | DESC_A_MASK);
2253 ss_e2 = DESC_B_MASK;
2254 } else
2255#endif
2256 {
2257 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2258 }
2259 } else {
2260 if ((new_ss & 3) != rpl) {
2261 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2262 }
2263 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2264 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2265 }
2266 if (!(ss_e2 & DESC_S_MASK) ||
2267 (ss_e2 & DESC_CS_MASK) ||
2268 !(ss_e2 & DESC_W_MASK)) {
2269 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2270 }
2271 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2272 if (dpl != rpl) {
2273 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2274 }
2275 if (!(ss_e2 & DESC_P_MASK)) {
2276 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2277 }
2278 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2279 get_seg_base(ss_e1, ss_e2),
2280 get_seg_limit(ss_e1, ss_e2),
2281 ss_e2);
2282 }
2283
2284 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2285 get_seg_base(e1, e2),
2286 get_seg_limit(e1, e2),
2287 e2);
2288 sp = new_esp;
2289#ifdef TARGET_X86_64
2290 if (env->hflags & HF_CS64_MASK) {
2291 sp_mask = -1;
2292 } else
2293#endif
2294 {
2295 sp_mask = get_sp_mask(ss_e2);
2296 }
2297
2298
2299 validate_seg(env, R_ES, rpl);
2300 validate_seg(env, R_DS, rpl);
2301 validate_seg(env, R_FS, rpl);
2302 validate_seg(env, R_GS, rpl);
2303
2304 sp += addend;
2305 }
2306 SET_ESP(sp, sp_mask);
2307 env->eip = new_eip;
2308 if (is_iret) {
2309
2310 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2311 if (cpl == 0) {
2312 eflags_mask |= IOPL_MASK;
2313 }
2314 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2315 if (cpl <= iopl) {
2316 eflags_mask |= IF_MASK;
2317 }
2318 if (shift == 0) {
2319 eflags_mask &= 0xffff;
2320 }
2321 cpu_load_eflags(env, new_eflags, eflags_mask);
2322 }
2323 return;
2324
2325 return_to_vm86:
2326 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2327 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2328 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2329 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2330 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2331 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2332
2333
2334 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2335 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2336 VIP_MASK);
2337 load_seg_vm(env, R_CS, new_cs & 0xffff);
2338 load_seg_vm(env, R_SS, new_ss & 0xffff);
2339 load_seg_vm(env, R_ES, new_es & 0xffff);
2340 load_seg_vm(env, R_DS, new_ds & 0xffff);
2341 load_seg_vm(env, R_FS, new_fs & 0xffff);
2342 load_seg_vm(env, R_GS, new_gs & 0xffff);
2343
2344 env->eip = new_eip & 0xffff;
2345 env->regs[R_ESP] = new_esp;
2346}
2347
2348void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2349{
2350 int tss_selector, type;
2351 uint32_t e1, e2;
2352
2353
2354 if (env->eflags & NT_MASK) {
2355#ifdef TARGET_X86_64
2356 if (env->hflags & HF_LMA_MASK) {
2357 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2358 }
2359#endif
2360 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2361 if (tss_selector & 4) {
2362 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2363 }
2364 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2365 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2366 }
2367 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2368
2369 if (type != 3) {
2370 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2371 }
2372 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2373 } else {
2374 helper_ret_protected(env, shift, 1, 0, GETPC());
2375 }
2376 env->hflags2 &= ~HF2_NMI_MASK;
2377}
2378
2379void helper_lret_protected(CPUX86State *env, int shift, int addend)
2380{
2381 helper_ret_protected(env, shift, 0, addend, GETPC());
2382}
2383
2384void helper_sysenter(CPUX86State *env)
2385{
2386 if (env->sysenter_cs == 0) {
2387 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2388 }
2389 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2390
2391#ifdef TARGET_X86_64
2392 if (env->hflags & HF_LMA_MASK) {
2393 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2394 0, 0xffffffff,
2395 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2396 DESC_S_MASK |
2397 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2398 DESC_L_MASK);
2399 } else
2400#endif
2401 {
2402 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2403 0, 0xffffffff,
2404 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2405 DESC_S_MASK |
2406 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2407 }
2408 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2409 0, 0xffffffff,
2410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2411 DESC_S_MASK |
2412 DESC_W_MASK | DESC_A_MASK);
2413 env->regs[R_ESP] = env->sysenter_esp;
2414 env->eip = env->sysenter_eip;
2415}
2416
2417void helper_sysexit(CPUX86State *env, int dflag)
2418{
2419 int cpl;
2420
2421 cpl = env->hflags & HF_CPL_MASK;
2422 if (env->sysenter_cs == 0 || cpl != 0) {
2423 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2424 }
2425#ifdef TARGET_X86_64
2426 if (dflag == 2) {
2427 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2428 3, 0, 0xffffffff,
2429 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2430 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2431 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2432 DESC_L_MASK);
2433 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2434 3, 0, 0xffffffff,
2435 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2436 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2437 DESC_W_MASK | DESC_A_MASK);
2438 } else
2439#endif
2440 {
2441 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2442 3, 0, 0xffffffff,
2443 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2444 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2445 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2446 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2447 3, 0, 0xffffffff,
2448 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2449 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2450 DESC_W_MASK | DESC_A_MASK);
2451 }
2452 env->regs[R_ESP] = env->regs[R_ECX];
2453 env->eip = env->regs[R_EDX];
2454}
2455
2456target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2457{
2458 unsigned int limit;
2459 uint32_t e1, e2, eflags, selector;
2460 int rpl, dpl, cpl, type;
2461
2462 selector = selector1 & 0xffff;
2463 eflags = cpu_cc_compute_all(env, CC_OP);
2464 if ((selector & 0xfffc) == 0) {
2465 goto fail;
2466 }
2467 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2468 goto fail;
2469 }
2470 rpl = selector & 3;
2471 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2472 cpl = env->hflags & HF_CPL_MASK;
2473 if (e2 & DESC_S_MASK) {
2474 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2475
2476 } else {
2477 if (dpl < cpl || dpl < rpl) {
2478 goto fail;
2479 }
2480 }
2481 } else {
2482 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2483 switch (type) {
2484 case 1:
2485 case 2:
2486 case 3:
2487 case 9:
2488 case 11:
2489 break;
2490 default:
2491 goto fail;
2492 }
2493 if (dpl < cpl || dpl < rpl) {
2494 fail:
2495 CC_SRC = eflags & ~CC_Z;
2496 return 0;
2497 }
2498 }
2499 limit = get_seg_limit(e1, e2);
2500 CC_SRC = eflags | CC_Z;
2501 return limit;
2502}
2503
2504target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2505{
2506 uint32_t e1, e2, eflags, selector;
2507 int rpl, dpl, cpl, type;
2508
2509 selector = selector1 & 0xffff;
2510 eflags = cpu_cc_compute_all(env, CC_OP);
2511 if ((selector & 0xfffc) == 0) {
2512 goto fail;
2513 }
2514 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2515 goto fail;
2516 }
2517 rpl = selector & 3;
2518 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2519 cpl = env->hflags & HF_CPL_MASK;
2520 if (e2 & DESC_S_MASK) {
2521 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2522
2523 } else {
2524 if (dpl < cpl || dpl < rpl) {
2525 goto fail;
2526 }
2527 }
2528 } else {
2529 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2530 switch (type) {
2531 case 1:
2532 case 2:
2533 case 3:
2534 case 4:
2535 case 5:
2536 case 9:
2537 case 11:
2538 case 12:
2539 break;
2540 default:
2541 goto fail;
2542 }
2543 if (dpl < cpl || dpl < rpl) {
2544 fail:
2545 CC_SRC = eflags & ~CC_Z;
2546 return 0;
2547 }
2548 }
2549 CC_SRC = eflags | CC_Z;
2550 return e2 & 0x00f0ff00;
2551}
2552
2553void helper_verr(CPUX86State *env, target_ulong selector1)
2554{
2555 uint32_t e1, e2, eflags, selector;
2556 int rpl, dpl, cpl;
2557
2558 selector = selector1 & 0xffff;
2559 eflags = cpu_cc_compute_all(env, CC_OP);
2560 if ((selector & 0xfffc) == 0) {
2561 goto fail;
2562 }
2563 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2564 goto fail;
2565 }
2566 if (!(e2 & DESC_S_MASK)) {
2567 goto fail;
2568 }
2569 rpl = selector & 3;
2570 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2571 cpl = env->hflags & HF_CPL_MASK;
2572 if (e2 & DESC_CS_MASK) {
2573 if (!(e2 & DESC_R_MASK)) {
2574 goto fail;
2575 }
2576 if (!(e2 & DESC_C_MASK)) {
2577 if (dpl < cpl || dpl < rpl) {
2578 goto fail;
2579 }
2580 }
2581 } else {
2582 if (dpl < cpl || dpl < rpl) {
2583 fail:
2584 CC_SRC = eflags & ~CC_Z;
2585 return;
2586 }
2587 }
2588 CC_SRC = eflags | CC_Z;
2589}
2590
2591void helper_verw(CPUX86State *env, target_ulong selector1)
2592{
2593 uint32_t e1, e2, eflags, selector;
2594 int rpl, dpl, cpl;
2595
2596 selector = selector1 & 0xffff;
2597 eflags = cpu_cc_compute_all(env, CC_OP);
2598 if ((selector & 0xfffc) == 0) {
2599 goto fail;
2600 }
2601 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2602 goto fail;
2603 }
2604 if (!(e2 & DESC_S_MASK)) {
2605 goto fail;
2606 }
2607 rpl = selector & 3;
2608 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2609 cpl = env->hflags & HF_CPL_MASK;
2610 if (e2 & DESC_CS_MASK) {
2611 goto fail;
2612 } else {
2613 if (dpl < cpl || dpl < rpl) {
2614 goto fail;
2615 }
2616 if (!(e2 & DESC_W_MASK)) {
2617 fail:
2618 CC_SRC = eflags & ~CC_Z;
2619 return;
2620 }
2621 }
2622 CC_SRC = eflags | CC_Z;
2623}
2624
2625#if defined(CONFIG_USER_ONLY)
2626void cpu_x86_load_seg(CPUX86State *env, X86Seg seg_reg, int selector)
2627{
2628 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2629 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2630 selector &= 0xffff;
2631 cpu_x86_load_seg_cache(env, seg_reg, selector,
2632 (selector << 4), 0xffff,
2633 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2634 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2635 } else {
2636 helper_load_seg(env, seg_reg, selector);
2637 }
2638}
2639#endif
2640
2641
2642static inline void check_io(CPUX86State *env, int addr, int size,
2643 uintptr_t retaddr)
2644{
2645 int io_offset, val, mask;
2646
2647
2648 if (!(env->tr.flags & DESC_P_MASK) ||
2649 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2650 env->tr.limit < 103) {
2651 goto fail;
2652 }
2653 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2654 io_offset += (addr >> 3);
2655
2656 if ((io_offset + 1) > env->tr.limit) {
2657 goto fail;
2658 }
2659 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2660 val >>= (addr & 7);
2661 mask = (1 << size) - 1;
2662
2663 if ((val & mask) != 0) {
2664 fail:
2665 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2666 }
2667}
2668
2669void helper_check_iob(CPUX86State *env, uint32_t t0)
2670{
2671 check_io(env, t0, 1, GETPC());
2672}
2673
2674void helper_check_iow(CPUX86State *env, uint32_t t0)
2675{
2676 check_io(env, t0, 2, GETPC());
2677}
2678
2679void helper_check_iol(CPUX86State *env, uint32_t t0)
2680{
2681 check_io(env, t0, 4, GETPC());
2682}
2683