1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "qemu/log.h"
24#include "exec/helper-proto.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27#include "exec/log.h"
28
29
30
31#ifdef DEBUG_PCALL
32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33# define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35#else
36# define LOG_PCALL(...) do { } while (0)
37# define LOG_PCALL_STATE(cpu) do { } while (0)
38#endif
39
40#ifdef CONFIG_USER_ONLY
41#define MEMSUFFIX _kernel
42#define DATA_SIZE 1
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 2
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 4
49#include "exec/cpu_ldst_useronly_template.h"
50
51#define DATA_SIZE 8
52#include "exec/cpu_ldst_useronly_template.h"
53#undef MEMSUFFIX
54#else
55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56#define MEMSUFFIX _kernel
57#define DATA_SIZE 1
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 2
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 4
64#include "exec/cpu_ldst_template.h"
65
66#define DATA_SIZE 8
67#include "exec/cpu_ldst_template.h"
68#undef CPU_MMU_INDEX
69#undef MEMSUFFIX
70#endif
71
72
73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
76{
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
80
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
85 }
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
89 }
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
94}
95
96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
98{
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100}
101
102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103{
104 unsigned int limit;
105
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
109 }
110 return limit;
111}
112
113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114{
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
116}
117
118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
120{
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
124}
125
126
127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128{
129 selector &= 0xffff;
130
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
134}
135
136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
139{
140 X86CPU *cpu = x86_env_get_cpu(env);
141 int type, index, shift;
142
143#if 0
144 {
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
151 }
152 }
153 printf("\n");
154 }
155#endif
156
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
159 }
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
163 }
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
168 }
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 }
176}
177
178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
180{
181 uint32_t e1, e2;
182 int rpl, dpl;
183
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
187 }
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
190 }
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
196 }
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199 }
200 } else if (seg_reg == R_SS) {
201
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
204 }
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207 }
208 } else {
209
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212 }
213
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 }
218 }
219 }
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
222 }
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
230 }
231 }
232}
233
234#define SWITCH_TSS_JMP 0
235#define SWITCH_TSS_IRET 1
236#define SWITCH_TSS_CALL 2
237
238
239static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
242{
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
251
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
255
256
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
260 }
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
264 }
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
267 }
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
270 }
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 }
275 }
276
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
279 }
280
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
285 }
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
291 }
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
297 }
298
299
300 if (type & 8) {
301
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
308 }
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
312 }
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
323 }
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
327 }
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
332 }
333
334
335
336 (void)new_trap;
337
338
339
340
341
342
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
347
348
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
352
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
357 }
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
361 }
362
363
364 if (type & 8) {
365
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
379 }
380 } else {
381
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
395 }
396 }
397
398
399
400
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
404 }
405
406
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
410
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
415 }
416
417
418
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430
431
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
437 }
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
451 }
452 } else {
453
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 }
457 }
458
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
463
464
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
467 }
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
474 }
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
480 }
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
483 }
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
496 }
497
498
499 if (new_eip > env->segs[R_CS].limit) {
500
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
502 }
503
504#ifndef CONFIG_USER_ONLY
505
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
508 }
509#endif
510}
511
512static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
515{
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517}
518
519static inline unsigned int get_sp_mask(unsigned int e2)
520{
521 if (e2 & DESC_B_MASK) {
522 return 0xffffffff;
523 } else {
524 return 0xffff;
525 }
526}
527
528static int exception_has_error_code(int intno)
529{
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
539 }
540 return 0;
541}
542
543#ifdef TARGET_X86_64
544#define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
553 } \
554 } while (0)
555#else
556#define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
560 } while (0)
561#endif
562
563
564
565#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566
567
568#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
569 { \
570 sp -= 2; \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
572 }
573
574#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
575 { \
576 sp -= 4; \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
578 }
579
580#define POPW_RA(ssp, sp, sp_mask, val, ra) \
581 { \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 sp += 2; \
584 }
585
586#define POPL_RA(ssp, sp, sp_mask, val, ra) \
587 { \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 sp += 4; \
590 }
591
592#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596
597
598static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
601{
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
607 uint32_t old_eip, sp_mask;
608 int vm86 = env->eflags & VM_MASK;
609
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
613 }
614 if (is_int) {
615 old_eip = next_eip;
616 } else {
617 old_eip = env->eip;
618 }
619
620 dt = &env->idt;
621 if (intno * 8 + 7 > dt->limit) {
622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
623 }
624 ptr = dt->base + intno * 8;
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
627
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
629 switch (type) {
630 case 5:
631
632 if (!(e2 & DESC_P_MASK)) {
633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
634 }
635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
639
640
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
643 if (env->segs[R_SS].flags & DESC_B_MASK) {
644 mask = 0xffffffff;
645 } else {
646 mask = 0xffff;
647 }
648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
649 ssp = env->segs[R_SS].base + esp;
650 if (shift) {
651 cpu_stl_kernel(env, ssp, error_code);
652 } else {
653 cpu_stw_kernel(env, ssp, error_code);
654 }
655 SET_ESP(esp, mask);
656 }
657 return;
658 case 6:
659 case 7:
660 case 14:
661 case 15:
662 break;
663 default:
664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
665 break;
666 }
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
669
670 if (is_int && dpl < cpl) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
672 }
673
674 if (!(e2 & DESC_P_MASK)) {
675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
676 }
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0) {
680 raise_exception_err(env, EXCP0D_GPF, 0);
681 }
682 if (load_segment(env, &e1, &e2, selector) != 0) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
684 }
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
687 }
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 }
692 if (!(e2 & DESC_P_MASK)) {
693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
694 }
695 if (e2 & DESC_C_MASK) {
696 dpl = cpl;
697 }
698 if (dpl < cpl) {
699
700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
701 if ((ss & 0xfffc) == 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
703 }
704 if ((ss & 3) != dpl) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
706 }
707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
709 }
710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
711 if (ss_dpl != dpl) {
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
713 }
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
716 !(ss_e2 & DESC_W_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
718 }
719 if (!(ss_e2 & DESC_P_MASK)) {
720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
721 }
722 new_stack = 1;
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else {
726
727 if (vm86) {
728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
729 }
730 new_stack = 0;
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
733 esp = env->regs[R_ESP];
734 }
735
736 shift = type >> 3;
737
738#if 0
739
740 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
741 if (vm86) {
742 push_size += 8;
743 }
744 push_size <<= shift;
745#endif
746 if (shift == 1) {
747 if (new_stack) {
748 if (vm86) {
749 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
750 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
751 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
752 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
753 }
754 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
755 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
756 }
757 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
758 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
759 PUSHL(ssp, esp, sp_mask, old_eip);
760 if (has_error_code) {
761 PUSHL(ssp, esp, sp_mask, error_code);
762 }
763 } else {
764 if (new_stack) {
765 if (vm86) {
766 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
767 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
768 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
769 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
770 }
771 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
772 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
773 }
774 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
775 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
776 PUSHW(ssp, esp, sp_mask, old_eip);
777 if (has_error_code) {
778 PUSHW(ssp, esp, sp_mask, error_code);
779 }
780 }
781
782
783 if ((type & 1) == 0) {
784 env->eflags &= ~IF_MASK;
785 }
786 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
787
788 if (new_stack) {
789 if (vm86) {
790 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
791 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
792 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
794 }
795 ss = (ss & ~3) | dpl;
796 cpu_x86_load_seg_cache(env, R_SS, ss,
797 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
798 }
799 SET_ESP(esp, sp_mask);
800
801 selector = (selector & ~3) | dpl;
802 cpu_x86_load_seg_cache(env, R_CS, selector,
803 get_seg_base(e1, e2),
804 get_seg_limit(e1, e2),
805 e2);
806 env->eip = offset;
807}
808
809#ifdef TARGET_X86_64
810
811#define PUSHQ_RA(sp, val, ra) \
812 { \
813 sp -= 8; \
814 cpu_stq_kernel_ra(env, sp, (val), ra); \
815 }
816
817#define POPQ_RA(sp, val, ra) \
818 { \
819 val = cpu_ldq_kernel_ra(env, sp, ra); \
820 sp += 8; \
821 }
822
823#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
824#define POPQ(sp, val) POPQ_RA(sp, val, 0)
825
826static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
827{
828 X86CPU *cpu = x86_env_get_cpu(env);
829 int index;
830
831#if 0
832 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
833 env->tr.base, env->tr.limit);
834#endif
835
836 if (!(env->tr.flags & DESC_P_MASK)) {
837 cpu_abort(CPU(cpu), "invalid tss");
838 }
839 index = 8 * level + 4;
840 if ((index + 7) > env->tr.limit) {
841 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
842 }
843 return cpu_ldq_kernel(env, env->tr.base + index);
844}
845
846
847static void do_interrupt64(CPUX86State *env, int intno, int is_int,
848 int error_code, target_ulong next_eip, int is_hw)
849{
850 SegmentCache *dt;
851 target_ulong ptr;
852 int type, dpl, selector, cpl, ist;
853 int has_error_code, new_stack;
854 uint32_t e1, e2, e3, ss;
855 target_ulong old_eip, esp, offset;
856
857 has_error_code = 0;
858 if (!is_int && !is_hw) {
859 has_error_code = exception_has_error_code(intno);
860 }
861 if (is_int) {
862 old_eip = next_eip;
863 } else {
864 old_eip = env->eip;
865 }
866
867 dt = &env->idt;
868 if (intno * 16 + 15 > dt->limit) {
869 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
870 }
871 ptr = dt->base + intno * 16;
872 e1 = cpu_ldl_kernel(env, ptr);
873 e2 = cpu_ldl_kernel(env, ptr + 4);
874 e3 = cpu_ldl_kernel(env, ptr + 8);
875
876 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
877 switch (type) {
878 case 14:
879 case 15:
880 break;
881 default:
882 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
883 break;
884 }
885 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
886 cpl = env->hflags & HF_CPL_MASK;
887
888 if (is_int && dpl < cpl) {
889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
890 }
891
892 if (!(e2 & DESC_P_MASK)) {
893 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
894 }
895 selector = e1 >> 16;
896 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
897 ist = e2 & 7;
898 if ((selector & 0xfffc) == 0) {
899 raise_exception_err(env, EXCP0D_GPF, 0);
900 }
901
902 if (load_segment(env, &e1, &e2, selector) != 0) {
903 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
904 }
905 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
907 }
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 if (dpl > cpl) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
911 }
912 if (!(e2 & DESC_P_MASK)) {
913 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
914 }
915 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
916 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
917 }
918 if (e2 & DESC_C_MASK) {
919 dpl = cpl;
920 }
921 if (dpl < cpl || ist != 0) {
922
923 new_stack = 1;
924 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
925 ss = 0;
926 } else {
927
928 if (env->eflags & VM_MASK) {
929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
930 }
931 new_stack = 0;
932 esp = env->regs[R_ESP];
933 }
934 esp &= ~0xfLL;
935
936 PUSHQ(esp, env->segs[R_SS].selector);
937 PUSHQ(esp, env->regs[R_ESP]);
938 PUSHQ(esp, cpu_compute_eflags(env));
939 PUSHQ(esp, env->segs[R_CS].selector);
940 PUSHQ(esp, old_eip);
941 if (has_error_code) {
942 PUSHQ(esp, error_code);
943 }
944
945
946 if ((type & 1) == 0) {
947 env->eflags &= ~IF_MASK;
948 }
949 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
950
951 if (new_stack) {
952 ss = 0 | dpl;
953 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
954 }
955 env->regs[R_ESP] = esp;
956
957 selector = (selector & ~3) | dpl;
958 cpu_x86_load_seg_cache(env, R_CS, selector,
959 get_seg_base(e1, e2),
960 get_seg_limit(e1, e2),
961 e2);
962 env->eip = offset;
963}
964#endif
965
966#ifdef TARGET_X86_64
967#if defined(CONFIG_USER_ONLY)
968void helper_syscall(CPUX86State *env, int next_eip_addend)
969{
970 CPUState *cs = CPU(x86_env_get_cpu(env));
971
972 cs->exception_index = EXCP_SYSCALL;
973 env->exception_next_eip = env->eip + next_eip_addend;
974 cpu_loop_exit(cs);
975}
976#else
977void helper_syscall(CPUX86State *env, int next_eip_addend)
978{
979 int selector;
980
981 if (!(env->efer & MSR_EFER_SCE)) {
982 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
983 }
984 selector = (env->star >> 32) & 0xffff;
985 if (env->hflags & HF_LMA_MASK) {
986 int code64;
987
988 env->regs[R_ECX] = env->eip + next_eip_addend;
989 env->regs[11] = cpu_compute_eflags(env);
990
991 code64 = env->hflags & HF_CS64_MASK;
992
993 env->eflags &= ~env->fmask;
994 cpu_load_eflags(env, env->eflags, 0);
995 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
996 0, 0xffffffff,
997 DESC_G_MASK | DESC_P_MASK |
998 DESC_S_MASK |
999 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1000 DESC_L_MASK);
1001 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
1005 DESC_W_MASK | DESC_A_MASK);
1006 if (code64) {
1007 env->eip = env->lstar;
1008 } else {
1009 env->eip = env->cstar;
1010 }
1011 } else {
1012 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1013
1014 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1015 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1016 0, 0xffffffff,
1017 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018 DESC_S_MASK |
1019 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1020 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_W_MASK | DESC_A_MASK);
1025 env->eip = (uint32_t)env->star;
1026 }
1027}
1028#endif
1029#endif
1030
1031#ifdef TARGET_X86_64
1032void helper_sysret(CPUX86State *env, int dflag)
1033{
1034 int cpl, selector;
1035
1036 if (!(env->efer & MSR_EFER_SCE)) {
1037 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1038 }
1039 cpl = env->hflags & HF_CPL_MASK;
1040 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1041 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1042 }
1043 selector = (env->star >> 48) & 0xffff;
1044 if (env->hflags & HF_LMA_MASK) {
1045 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1046 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1047 NT_MASK);
1048 if (dflag == 2) {
1049 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1050 0, 0xffffffff,
1051 DESC_G_MASK | DESC_P_MASK |
1052 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1053 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1054 DESC_L_MASK);
1055 env->eip = env->regs[R_ECX];
1056 } else {
1057 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1061 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1062 env->eip = (uint32_t)env->regs[R_ECX];
1063 }
1064 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1065 0, 0xffffffff,
1066 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1067 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1068 DESC_W_MASK | DESC_A_MASK);
1069 } else {
1070 env->eflags |= IF_MASK;
1071 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1072 0, 0xffffffff,
1073 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1076 env->eip = (uint32_t)env->regs[R_ECX];
1077 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_W_MASK | DESC_A_MASK);
1082 }
1083}
1084#endif
1085
1086
1087static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1088 int error_code, unsigned int next_eip)
1089{
1090 SegmentCache *dt;
1091 target_ulong ptr, ssp;
1092 int selector;
1093 uint32_t offset, esp;
1094 uint32_t old_cs, old_eip;
1095
1096
1097 dt = &env->idt;
1098 if (intno * 4 + 3 > dt->limit) {
1099 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1100 }
1101 ptr = dt->base + intno * 4;
1102 offset = cpu_lduw_kernel(env, ptr);
1103 selector = cpu_lduw_kernel(env, ptr + 2);
1104 esp = env->regs[R_ESP];
1105 ssp = env->segs[R_SS].base;
1106 if (is_int) {
1107 old_eip = next_eip;
1108 } else {
1109 old_eip = env->eip;
1110 }
1111 old_cs = env->segs[R_CS].selector;
1112
1113 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1114 PUSHW(ssp, esp, 0xffff, old_cs);
1115 PUSHW(ssp, esp, 0xffff, old_eip);
1116
1117
1118 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1119 env->eip = offset;
1120 env->segs[R_CS].selector = selector;
1121 env->segs[R_CS].base = (selector << 4);
1122 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1123}
1124
1125#if defined(CONFIG_USER_ONLY)
1126
1127
1128
1129
1130
1131static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1132 int error_code, target_ulong next_eip)
1133{
1134 if (is_int) {
1135 SegmentCache *dt;
1136 target_ulong ptr;
1137 int dpl, cpl, shift;
1138 uint32_t e2;
1139
1140 dt = &env->idt;
1141 if (env->hflags & HF_LMA_MASK) {
1142 shift = 4;
1143 } else {
1144 shift = 3;
1145 }
1146 ptr = dt->base + (intno << shift);
1147 e2 = cpu_ldl_kernel(env, ptr + 4);
1148
1149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1150 cpl = env->hflags & HF_CPL_MASK;
1151
1152 if (dpl < cpl) {
1153 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1154 }
1155 }
1156
1157
1158
1159
1160 if (is_int || intno == EXCP_SYSCALL) {
1161 env->eip = next_eip;
1162 }
1163}
1164
1165#else
1166
1167static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1168 int error_code, int is_hw, int rm)
1169{
1170 CPUState *cs = CPU(x86_env_get_cpu(env));
1171 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1172 control.event_inj));
1173
1174 if (!(event_inj & SVM_EVTINJ_VALID)) {
1175 int type;
1176
1177 if (is_int) {
1178 type = SVM_EVTINJ_TYPE_SOFT;
1179 } else {
1180 type = SVM_EVTINJ_TYPE_EXEPT;
1181 }
1182 event_inj = intno | type | SVM_EVTINJ_VALID;
1183 if (!rm && exception_has_error_code(intno)) {
1184 event_inj |= SVM_EVTINJ_VALID_ERR;
1185 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1186 control.event_inj_err),
1187 error_code);
1188 }
1189 x86_stl_phys(cs,
1190 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1191 event_inj);
1192 }
1193}
1194#endif
1195
1196
1197
1198
1199
1200
1201static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1202 int error_code, target_ulong next_eip, int is_hw)
1203{
1204 CPUX86State *env = &cpu->env;
1205
1206 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1207 if ((env->cr[0] & CR0_PE_MASK)) {
1208 static int count;
1209
1210 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1211 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1212 count, intno, error_code, is_int,
1213 env->hflags & HF_CPL_MASK,
1214 env->segs[R_CS].selector, env->eip,
1215 (int)env->segs[R_CS].base + env->eip,
1216 env->segs[R_SS].selector, env->regs[R_ESP]);
1217 if (intno == 0x0e) {
1218 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1219 } else {
1220 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1221 }
1222 qemu_log("\n");
1223 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1224#if 0
1225 {
1226 int i;
1227 target_ulong ptr;
1228
1229 qemu_log(" code=");
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for (i = 0; i < 16; i++) {
1232 qemu_log(" %02x", ldub(ptr + i));
1233 }
1234 qemu_log("\n");
1235 }
1236#endif
1237 count++;
1238 }
1239 }
1240 if (env->cr[0] & CR0_PE_MASK) {
1241#if !defined(CONFIG_USER_ONLY)
1242 if (env->hflags & HF_SVMI_MASK) {
1243 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1244 }
1245#endif
1246#ifdef TARGET_X86_64
1247 if (env->hflags & HF_LMA_MASK) {
1248 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1249 } else
1250#endif
1251 {
1252 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1253 is_hw);
1254 }
1255 } else {
1256#if !defined(CONFIG_USER_ONLY)
1257 if (env->hflags & HF_SVMI_MASK) {
1258 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1259 }
1260#endif
1261 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1262 }
1263
1264#if !defined(CONFIG_USER_ONLY)
1265 if (env->hflags & HF_SVMI_MASK) {
1266 CPUState *cs = CPU(cpu);
1267 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1268 offsetof(struct vmcb,
1269 control.event_inj));
1270
1271 x86_stl_phys(cs,
1272 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1273 event_inj & ~SVM_EVTINJ_VALID);
1274 }
1275#endif
1276}
1277
1278void x86_cpu_do_interrupt(CPUState *cs)
1279{
1280 X86CPU *cpu = X86_CPU(cs);
1281 CPUX86State *env = &cpu->env;
1282
1283#if defined(CONFIG_USER_ONLY)
1284
1285
1286
1287 do_interrupt_user(env, cs->exception_index,
1288 env->exception_is_int,
1289 env->error_code,
1290 env->exception_next_eip);
1291
1292 env->old_exception = -1;
1293#else
1294 if (cs->exception_index >= EXCP_VMEXIT) {
1295 assert(env->old_exception == -1);
1296 do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1297 } else {
1298 do_interrupt_all(cpu, cs->exception_index,
1299 env->exception_is_int,
1300 env->error_code,
1301 env->exception_next_eip, 0);
1302
1303 env->old_exception = -1;
1304 }
1305#endif
1306}
1307
1308void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1309{
1310 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1311}
1312
1313bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1314{
1315 X86CPU *cpu = X86_CPU(cs);
1316 CPUX86State *env = &cpu->env;
1317 bool ret = false;
1318
1319#if !defined(CONFIG_USER_ONLY)
1320 if (interrupt_request & CPU_INTERRUPT_POLL) {
1321 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1322 apic_poll_irq(cpu->apic_state);
1323
1324
1325 return true;
1326 }
1327#endif
1328 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1329 do_cpu_sipi(cpu);
1330 ret = true;
1331 } else if (env->hflags2 & HF2_GIF_MASK) {
1332 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1333 !(env->hflags & HF_SMM_MASK)) {
1334 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1335 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1336 do_smm_enter(cpu);
1337 ret = true;
1338 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1339 !(env->hflags2 & HF2_NMI_MASK)) {
1340 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1341 env->hflags2 |= HF2_NMI_MASK;
1342 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1343 ret = true;
1344 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1345 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1346 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1347 ret = true;
1348 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1349 (((env->hflags2 & HF2_VINTR_MASK) &&
1350 (env->hflags2 & HF2_HIF_MASK)) ||
1351 (!(env->hflags2 & HF2_VINTR_MASK) &&
1352 (env->eflags & IF_MASK &&
1353 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1354 int intno;
1355 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1356 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1357 CPU_INTERRUPT_VIRQ);
1358 intno = cpu_get_pic_interrupt(env);
1359 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1360 "Servicing hardware INT=0x%02x\n", intno);
1361 do_interrupt_x86_hardirq(env, intno, 1);
1362
1363
1364 ret = true;
1365#if !defined(CONFIG_USER_ONLY)
1366 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1367 (env->eflags & IF_MASK) &&
1368 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1369 int intno;
1370
1371 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1372 intno = x86_ldl_phys(cs, env->vm_vmcb
1373 + offsetof(struct vmcb, control.int_vector));
1374 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1375 "Servicing virtual hardware INT=0x%02x\n", intno);
1376 do_interrupt_x86_hardirq(env, intno, 1);
1377 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1378 ret = true;
1379#endif
1380 }
1381 }
1382
1383 return ret;
1384}
1385
1386void helper_lldt(CPUX86State *env, int selector)
1387{
1388 SegmentCache *dt;
1389 uint32_t e1, e2;
1390 int index, entry_limit;
1391 target_ulong ptr;
1392
1393 selector &= 0xffff;
1394 if ((selector & 0xfffc) == 0) {
1395
1396 env->ldt.base = 0;
1397 env->ldt.limit = 0;
1398 } else {
1399 if (selector & 0x4) {
1400 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1401 }
1402 dt = &env->gdt;
1403 index = selector & ~7;
1404#ifdef TARGET_X86_64
1405 if (env->hflags & HF_LMA_MASK) {
1406 entry_limit = 15;
1407 } else
1408#endif
1409 {
1410 entry_limit = 7;
1411 }
1412 if ((index + entry_limit) > dt->limit) {
1413 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1414 }
1415 ptr = dt->base + index;
1416 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1417 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1418 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1419 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1420 }
1421 if (!(e2 & DESC_P_MASK)) {
1422 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1423 }
1424#ifdef TARGET_X86_64
1425 if (env->hflags & HF_LMA_MASK) {
1426 uint32_t e3;
1427
1428 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1429 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1430 env->ldt.base |= (target_ulong)e3 << 32;
1431 } else
1432#endif
1433 {
1434 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1435 }
1436 }
1437 env->ldt.selector = selector;
1438}
1439
1440void helper_ltr(CPUX86State *env, int selector)
1441{
1442 SegmentCache *dt;
1443 uint32_t e1, e2;
1444 int index, type, entry_limit;
1445 target_ulong ptr;
1446
1447 selector &= 0xffff;
1448 if ((selector & 0xfffc) == 0) {
1449
1450 env->tr.base = 0;
1451 env->tr.limit = 0;
1452 env->tr.flags = 0;
1453 } else {
1454 if (selector & 0x4) {
1455 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1456 }
1457 dt = &env->gdt;
1458 index = selector & ~7;
1459#ifdef TARGET_X86_64
1460 if (env->hflags & HF_LMA_MASK) {
1461 entry_limit = 15;
1462 } else
1463#endif
1464 {
1465 entry_limit = 7;
1466 }
1467 if ((index + entry_limit) > dt->limit) {
1468 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1469 }
1470 ptr = dt->base + index;
1471 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1472 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1473 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1474 if ((e2 & DESC_S_MASK) ||
1475 (type != 1 && type != 9)) {
1476 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1477 }
1478 if (!(e2 & DESC_P_MASK)) {
1479 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1480 }
1481#ifdef TARGET_X86_64
1482 if (env->hflags & HF_LMA_MASK) {
1483 uint32_t e3, e4;
1484
1485 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1486 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1487 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1488 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1489 }
1490 load_seg_cache_raw_dt(&env->tr, e1, e2);
1491 env->tr.base |= (target_ulong)e3 << 32;
1492 } else
1493#endif
1494 {
1495 load_seg_cache_raw_dt(&env->tr, e1, e2);
1496 }
1497 e2 |= DESC_TSS_BUSY_MASK;
1498 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1499 }
1500 env->tr.selector = selector;
1501}
1502
1503
1504void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1505{
1506 uint32_t e1, e2;
1507 int cpl, dpl, rpl;
1508 SegmentCache *dt;
1509 int index;
1510 target_ulong ptr;
1511
1512 selector &= 0xffff;
1513 cpl = env->hflags & HF_CPL_MASK;
1514 if ((selector & 0xfffc) == 0) {
1515
1516 if (seg_reg == R_SS
1517#ifdef TARGET_X86_64
1518 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1519#endif
1520 ) {
1521 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1522 }
1523 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1524 } else {
1525
1526 if (selector & 0x4) {
1527 dt = &env->ldt;
1528 } else {
1529 dt = &env->gdt;
1530 }
1531 index = selector & ~7;
1532 if ((index + 7) > dt->limit) {
1533 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1534 }
1535 ptr = dt->base + index;
1536 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1537 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1538
1539 if (!(e2 & DESC_S_MASK)) {
1540 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1541 }
1542 rpl = selector & 3;
1543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1544 if (seg_reg == R_SS) {
1545
1546 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1547 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 }
1549 if (rpl != cpl || dpl != cpl) {
1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1551 }
1552 } else {
1553
1554 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1555 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1556 }
1557
1558 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1559
1560 if (dpl < cpl || dpl < rpl) {
1561 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1562 }
1563 }
1564 }
1565
1566 if (!(e2 & DESC_P_MASK)) {
1567 if (seg_reg == R_SS) {
1568 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1569 } else {
1570 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1571 }
1572 }
1573
1574
1575 if (!(e2 & DESC_A_MASK)) {
1576 e2 |= DESC_A_MASK;
1577 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1578 }
1579
1580 cpu_x86_load_seg_cache(env, seg_reg, selector,
1581 get_seg_base(e1, e2),
1582 get_seg_limit(e1, e2),
1583 e2);
1584#if 0
1585 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1586 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1587#endif
1588 }
1589}
1590
1591
1592void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1593 target_ulong next_eip)
1594{
1595 int gate_cs, type;
1596 uint32_t e1, e2, cpl, dpl, rpl, limit;
1597
1598 if ((new_cs & 0xfffc) == 0) {
1599 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1600 }
1601 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1602 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1603 }
1604 cpl = env->hflags & HF_CPL_MASK;
1605 if (e2 & DESC_S_MASK) {
1606 if (!(e2 & DESC_CS_MASK)) {
1607 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1608 }
1609 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1610 if (e2 & DESC_C_MASK) {
1611
1612 if (dpl > cpl) {
1613 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1614 }
1615 } else {
1616
1617 rpl = new_cs & 3;
1618 if (rpl > cpl) {
1619 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1620 }
1621 if (dpl != cpl) {
1622 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1623 }
1624 }
1625 if (!(e2 & DESC_P_MASK)) {
1626 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1627 }
1628 limit = get_seg_limit(e1, e2);
1629 if (new_eip > limit &&
1630 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1631 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1632 }
1633 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1634 get_seg_base(e1, e2), limit, e2);
1635 env->eip = new_eip;
1636 } else {
1637
1638 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1639 rpl = new_cs & 3;
1640 cpl = env->hflags & HF_CPL_MASK;
1641 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1642 switch (type) {
1643 case 1:
1644 case 9:
1645 case 5:
1646 if (dpl < cpl || dpl < rpl) {
1647 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1648 }
1649 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1650 break;
1651 case 4:
1652 case 12:
1653 if ((dpl < cpl) || (dpl < rpl)) {
1654 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1655 }
1656 if (!(e2 & DESC_P_MASK)) {
1657 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1658 }
1659 gate_cs = e1 >> 16;
1660 new_eip = (e1 & 0xffff);
1661 if (type == 12) {
1662 new_eip |= (e2 & 0xffff0000);
1663 }
1664 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1665 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1666 }
1667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1668
1669 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1670 (DESC_S_MASK | DESC_CS_MASK))) {
1671 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1672 }
1673 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1674 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1676 }
1677 if (!(e2 & DESC_P_MASK)) {
1678 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1679 }
1680 limit = get_seg_limit(e1, e2);
1681 if (new_eip > limit) {
1682 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1683 }
1684 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1685 get_seg_base(e1, e2), limit, e2);
1686 env->eip = new_eip;
1687 break;
1688 default:
1689 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1690 break;
1691 }
1692 }
1693}
1694
1695
1696void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1697 int shift, int next_eip)
1698{
1699 int new_eip;
1700 uint32_t esp, esp_mask;
1701 target_ulong ssp;
1702
1703 new_eip = new_eip1;
1704 esp = env->regs[R_ESP];
1705 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1706 ssp = env->segs[R_SS].base;
1707 if (shift) {
1708 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1709 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1710 } else {
1711 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1712 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1713 }
1714
1715 SET_ESP(esp, esp_mask);
1716 env->eip = new_eip;
1717 env->segs[R_CS].selector = new_cs;
1718 env->segs[R_CS].base = (new_cs << 4);
1719}
1720
1721
1722void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1723 int shift, target_ulong next_eip)
1724{
1725 int new_stack, i;
1726 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1727 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1728 uint32_t val, limit, old_sp_mask;
1729 target_ulong ssp, old_ssp;
1730
1731 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1732 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1733 if ((new_cs & 0xfffc) == 0) {
1734 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1735 }
1736 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1737 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1738 }
1739 cpl = env->hflags & HF_CPL_MASK;
1740 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1741 if (e2 & DESC_S_MASK) {
1742 if (!(e2 & DESC_CS_MASK)) {
1743 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1744 }
1745 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1746 if (e2 & DESC_C_MASK) {
1747
1748 if (dpl > cpl) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1750 }
1751 } else {
1752
1753 rpl = new_cs & 3;
1754 if (rpl > cpl) {
1755 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1756 }
1757 if (dpl != cpl) {
1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1759 }
1760 }
1761 if (!(e2 & DESC_P_MASK)) {
1762 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1763 }
1764
1765#ifdef TARGET_X86_64
1766
1767 if (shift == 2) {
1768 target_ulong rsp;
1769
1770
1771 rsp = env->regs[R_ESP];
1772 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1773 PUSHQ_RA(rsp, next_eip, GETPC());
1774
1775 env->regs[R_ESP] = rsp;
1776 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1777 get_seg_base(e1, e2),
1778 get_seg_limit(e1, e2), e2);
1779 env->eip = new_eip;
1780 } else
1781#endif
1782 {
1783 sp = env->regs[R_ESP];
1784 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1785 ssp = env->segs[R_SS].base;
1786 if (shift) {
1787 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1788 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1789 } else {
1790 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1791 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1792 }
1793
1794 limit = get_seg_limit(e1, e2);
1795 if (new_eip > limit) {
1796 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1797 }
1798
1799 SET_ESP(sp, sp_mask);
1800 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1801 get_seg_base(e1, e2), limit, e2);
1802 env->eip = new_eip;
1803 }
1804 } else {
1805
1806 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1807 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1808 rpl = new_cs & 3;
1809 switch (type) {
1810 case 1:
1811 case 9:
1812 case 5:
1813 if (dpl < cpl || dpl < rpl) {
1814 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1815 }
1816 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1817 return;
1818 case 4:
1819 case 12:
1820 break;
1821 default:
1822 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1823 break;
1824 }
1825 shift = type >> 3;
1826
1827 if (dpl < cpl || dpl < rpl) {
1828 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1829 }
1830
1831 if (!(e2 & DESC_P_MASK)) {
1832 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1833 }
1834 selector = e1 >> 16;
1835 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1836 param_count = e2 & 0x1f;
1837 if ((selector & 0xfffc) == 0) {
1838 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1839 }
1840
1841 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1842 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1843 }
1844 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1845 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1846 }
1847 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1848 if (dpl > cpl) {
1849 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1850 }
1851 if (!(e2 & DESC_P_MASK)) {
1852 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1853 }
1854
1855 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1856
1857 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1858 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1859 TARGET_FMT_lx "\n", ss, sp, param_count,
1860 env->regs[R_ESP]);
1861 if ((ss & 0xfffc) == 0) {
1862 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1863 }
1864 if ((ss & 3) != dpl) {
1865 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1866 }
1867 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1868 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1869 }
1870 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1871 if (ss_dpl != dpl) {
1872 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1873 }
1874 if (!(ss_e2 & DESC_S_MASK) ||
1875 (ss_e2 & DESC_CS_MASK) ||
1876 !(ss_e2 & DESC_W_MASK)) {
1877 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1878 }
1879 if (!(ss_e2 & DESC_P_MASK)) {
1880 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1881 }
1882
1883
1884
1885 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1886 old_ssp = env->segs[R_SS].base;
1887
1888 sp_mask = get_sp_mask(ss_e2);
1889 ssp = get_seg_base(ss_e1, ss_e2);
1890 if (shift) {
1891 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1892 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1893 for (i = param_count - 1; i >= 0; i--) {
1894 val = cpu_ldl_kernel_ra(env, old_ssp +
1895 ((env->regs[R_ESP] + i * 4) &
1896 old_sp_mask), GETPC());
1897 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1898 }
1899 } else {
1900 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1901 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1902 for (i = param_count - 1; i >= 0; i--) {
1903 val = cpu_lduw_kernel_ra(env, old_ssp +
1904 ((env->regs[R_ESP] + i * 2) &
1905 old_sp_mask), GETPC());
1906 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1907 }
1908 }
1909 new_stack = 1;
1910 } else {
1911
1912 sp = env->regs[R_ESP];
1913 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1914 ssp = env->segs[R_SS].base;
1915
1916 new_stack = 0;
1917 }
1918
1919 if (shift) {
1920 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1921 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1922 } else {
1923 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1924 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1925 }
1926
1927
1928
1929 if (new_stack) {
1930 ss = (ss & ~3) | dpl;
1931 cpu_x86_load_seg_cache(env, R_SS, ss,
1932 ssp,
1933 get_seg_limit(ss_e1, ss_e2),
1934 ss_e2);
1935 }
1936
1937 selector = (selector & ~3) | dpl;
1938 cpu_x86_load_seg_cache(env, R_CS, selector,
1939 get_seg_base(e1, e2),
1940 get_seg_limit(e1, e2),
1941 e2);
1942 SET_ESP(sp, sp_mask);
1943 env->eip = offset;
1944 }
1945}
1946
1947
1948void helper_iret_real(CPUX86State *env, int shift)
1949{
1950 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1951 target_ulong ssp;
1952 int eflags_mask;
1953
1954 sp_mask = 0xffff;
1955 sp = env->regs[R_ESP];
1956 ssp = env->segs[R_SS].base;
1957 if (shift == 1) {
1958
1959 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1960 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1961 new_cs &= 0xffff;
1962 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1963 } else {
1964
1965 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1966 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1967 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1968 }
1969 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1970 env->segs[R_CS].selector = new_cs;
1971 env->segs[R_CS].base = (new_cs << 4);
1972 env->eip = new_eip;
1973 if (env->eflags & VM_MASK) {
1974 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1975 NT_MASK;
1976 } else {
1977 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1978 RF_MASK | NT_MASK;
1979 }
1980 if (shift == 0) {
1981 eflags_mask &= 0xffff;
1982 }
1983 cpu_load_eflags(env, new_eflags, eflags_mask);
1984 env->hflags2 &= ~HF2_NMI_MASK;
1985}
1986
1987static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1988{
1989 int dpl;
1990 uint32_t e2;
1991
1992
1993
1994
1995 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1996 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1997 return;
1998 }
1999
2000 e2 = env->segs[seg_reg].flags;
2001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2002 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2003
2004 if (dpl < cpl) {
2005 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2006 }
2007 }
2008}
2009
2010
2011static inline void helper_ret_protected(CPUX86State *env, int shift,
2012 int is_iret, int addend,
2013 uintptr_t retaddr)
2014{
2015 uint32_t new_cs, new_eflags, new_ss;
2016 uint32_t new_es, new_ds, new_fs, new_gs;
2017 uint32_t e1, e2, ss_e1, ss_e2;
2018 int cpl, dpl, rpl, eflags_mask, iopl;
2019 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2020
2021#ifdef TARGET_X86_64
2022 if (shift == 2) {
2023 sp_mask = -1;
2024 } else
2025#endif
2026 {
2027 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2028 }
2029 sp = env->regs[R_ESP];
2030 ssp = env->segs[R_SS].base;
2031 new_eflags = 0;
2032#ifdef TARGET_X86_64
2033 if (shift == 2) {
2034 POPQ_RA(sp, new_eip, retaddr);
2035 POPQ_RA(sp, new_cs, retaddr);
2036 new_cs &= 0xffff;
2037 if (is_iret) {
2038 POPQ_RA(sp, new_eflags, retaddr);
2039 }
2040 } else
2041#endif
2042 {
2043 if (shift == 1) {
2044
2045 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2046 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2047 new_cs &= 0xffff;
2048 if (is_iret) {
2049 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2050 if (new_eflags & VM_MASK) {
2051 goto return_to_vm86;
2052 }
2053 }
2054 } else {
2055
2056 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2057 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2058 if (is_iret) {
2059 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2060 }
2061 }
2062 }
2063 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2064 new_cs, new_eip, shift, addend);
2065 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2066 if ((new_cs & 0xfffc) == 0) {
2067 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2068 }
2069 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2070 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2071 }
2072 if (!(e2 & DESC_S_MASK) ||
2073 !(e2 & DESC_CS_MASK)) {
2074 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2075 }
2076 cpl = env->hflags & HF_CPL_MASK;
2077 rpl = new_cs & 3;
2078 if (rpl < cpl) {
2079 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2080 }
2081 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2082 if (e2 & DESC_C_MASK) {
2083 if (dpl > rpl) {
2084 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2085 }
2086 } else {
2087 if (dpl != rpl) {
2088 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2089 }
2090 }
2091 if (!(e2 & DESC_P_MASK)) {
2092 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2093 }
2094
2095 sp += addend;
2096 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2097 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2098
2099 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2100 get_seg_base(e1, e2),
2101 get_seg_limit(e1, e2),
2102 e2);
2103 } else {
2104
2105#ifdef TARGET_X86_64
2106 if (shift == 2) {
2107 POPQ_RA(sp, new_esp, retaddr);
2108 POPQ_RA(sp, new_ss, retaddr);
2109 new_ss &= 0xffff;
2110 } else
2111#endif
2112 {
2113 if (shift == 1) {
2114
2115 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2116 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2117 new_ss &= 0xffff;
2118 } else {
2119
2120 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2121 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2122 }
2123 }
2124 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2125 new_ss, new_esp);
2126 if ((new_ss & 0xfffc) == 0) {
2127#ifdef TARGET_X86_64
2128
2129
2130 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2131 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2132 0, 0xffffffff,
2133 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2134 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2135 DESC_W_MASK | DESC_A_MASK);
2136 ss_e2 = DESC_B_MASK;
2137 } else
2138#endif
2139 {
2140 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2141 }
2142 } else {
2143 if ((new_ss & 3) != rpl) {
2144 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2145 }
2146 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2147 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2148 }
2149 if (!(ss_e2 & DESC_S_MASK) ||
2150 (ss_e2 & DESC_CS_MASK) ||
2151 !(ss_e2 & DESC_W_MASK)) {
2152 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2153 }
2154 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2155 if (dpl != rpl) {
2156 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2157 }
2158 if (!(ss_e2 & DESC_P_MASK)) {
2159 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2160 }
2161 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2162 get_seg_base(ss_e1, ss_e2),
2163 get_seg_limit(ss_e1, ss_e2),
2164 ss_e2);
2165 }
2166
2167 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2168 get_seg_base(e1, e2),
2169 get_seg_limit(e1, e2),
2170 e2);
2171 sp = new_esp;
2172#ifdef TARGET_X86_64
2173 if (env->hflags & HF_CS64_MASK) {
2174 sp_mask = -1;
2175 } else
2176#endif
2177 {
2178 sp_mask = get_sp_mask(ss_e2);
2179 }
2180
2181
2182 validate_seg(env, R_ES, rpl);
2183 validate_seg(env, R_DS, rpl);
2184 validate_seg(env, R_FS, rpl);
2185 validate_seg(env, R_GS, rpl);
2186
2187 sp += addend;
2188 }
2189 SET_ESP(sp, sp_mask);
2190 env->eip = new_eip;
2191 if (is_iret) {
2192
2193 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2194 if (cpl == 0) {
2195 eflags_mask |= IOPL_MASK;
2196 }
2197 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2198 if (cpl <= iopl) {
2199 eflags_mask |= IF_MASK;
2200 }
2201 if (shift == 0) {
2202 eflags_mask &= 0xffff;
2203 }
2204 cpu_load_eflags(env, new_eflags, eflags_mask);
2205 }
2206 return;
2207
2208 return_to_vm86:
2209 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2210 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2211 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2212 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2213 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2214 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2215
2216
2217 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2218 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2219 VIP_MASK);
2220 load_seg_vm(env, R_CS, new_cs & 0xffff);
2221 load_seg_vm(env, R_SS, new_ss & 0xffff);
2222 load_seg_vm(env, R_ES, new_es & 0xffff);
2223 load_seg_vm(env, R_DS, new_ds & 0xffff);
2224 load_seg_vm(env, R_FS, new_fs & 0xffff);
2225 load_seg_vm(env, R_GS, new_gs & 0xffff);
2226
2227 env->eip = new_eip & 0xffff;
2228 env->regs[R_ESP] = new_esp;
2229}
2230
2231void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2232{
2233 int tss_selector, type;
2234 uint32_t e1, e2;
2235
2236
2237 if (env->eflags & NT_MASK) {
2238#ifdef TARGET_X86_64
2239 if (env->hflags & HF_LMA_MASK) {
2240 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2241 }
2242#endif
2243 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2244 if (tss_selector & 4) {
2245 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2246 }
2247 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2248 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2249 }
2250 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2251
2252 if (type != 3) {
2253 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2254 }
2255 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2256 } else {
2257 helper_ret_protected(env, shift, 1, 0, GETPC());
2258 }
2259 env->hflags2 &= ~HF2_NMI_MASK;
2260}
2261
2262void helper_lret_protected(CPUX86State *env, int shift, int addend)
2263{
2264 helper_ret_protected(env, shift, 0, addend, GETPC());
2265}
2266
2267void helper_sysenter(CPUX86State *env)
2268{
2269 if (env->sysenter_cs == 0) {
2270 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2271 }
2272 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2273
2274#ifdef TARGET_X86_64
2275 if (env->hflags & HF_LMA_MASK) {
2276 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2277 0, 0xffffffff,
2278 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2279 DESC_S_MASK |
2280 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2281 DESC_L_MASK);
2282 } else
2283#endif
2284 {
2285 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2286 0, 0xffffffff,
2287 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2288 DESC_S_MASK |
2289 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2290 }
2291 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2292 0, 0xffffffff,
2293 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2294 DESC_S_MASK |
2295 DESC_W_MASK | DESC_A_MASK);
2296 env->regs[R_ESP] = env->sysenter_esp;
2297 env->eip = env->sysenter_eip;
2298}
2299
2300void helper_sysexit(CPUX86State *env, int dflag)
2301{
2302 int cpl;
2303
2304 cpl = env->hflags & HF_CPL_MASK;
2305 if (env->sysenter_cs == 0 || cpl != 0) {
2306 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2307 }
2308#ifdef TARGET_X86_64
2309 if (dflag == 2) {
2310 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2311 3, 0, 0xffffffff,
2312 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2313 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2314 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2315 DESC_L_MASK);
2316 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2317 3, 0, 0xffffffff,
2318 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2319 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2320 DESC_W_MASK | DESC_A_MASK);
2321 } else
2322#endif
2323 {
2324 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2325 3, 0, 0xffffffff,
2326 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2327 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2328 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2329 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2330 3, 0, 0xffffffff,
2331 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2332 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2333 DESC_W_MASK | DESC_A_MASK);
2334 }
2335 env->regs[R_ESP] = env->regs[R_ECX];
2336 env->eip = env->regs[R_EDX];
2337}
2338
2339target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2340{
2341 unsigned int limit;
2342 uint32_t e1, e2, eflags, selector;
2343 int rpl, dpl, cpl, type;
2344
2345 selector = selector1 & 0xffff;
2346 eflags = cpu_cc_compute_all(env, CC_OP);
2347 if ((selector & 0xfffc) == 0) {
2348 goto fail;
2349 }
2350 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2351 goto fail;
2352 }
2353 rpl = selector & 3;
2354 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2355 cpl = env->hflags & HF_CPL_MASK;
2356 if (e2 & DESC_S_MASK) {
2357 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2358
2359 } else {
2360 if (dpl < cpl || dpl < rpl) {
2361 goto fail;
2362 }
2363 }
2364 } else {
2365 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2366 switch (type) {
2367 case 1:
2368 case 2:
2369 case 3:
2370 case 9:
2371 case 11:
2372 break;
2373 default:
2374 goto fail;
2375 }
2376 if (dpl < cpl || dpl < rpl) {
2377 fail:
2378 CC_SRC = eflags & ~CC_Z;
2379 return 0;
2380 }
2381 }
2382 limit = get_seg_limit(e1, e2);
2383 CC_SRC = eflags | CC_Z;
2384 return limit;
2385}
2386
2387target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2388{
2389 uint32_t e1, e2, eflags, selector;
2390 int rpl, dpl, cpl, type;
2391
2392 selector = selector1 & 0xffff;
2393 eflags = cpu_cc_compute_all(env, CC_OP);
2394 if ((selector & 0xfffc) == 0) {
2395 goto fail;
2396 }
2397 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2398 goto fail;
2399 }
2400 rpl = selector & 3;
2401 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2402 cpl = env->hflags & HF_CPL_MASK;
2403 if (e2 & DESC_S_MASK) {
2404 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2405
2406 } else {
2407 if (dpl < cpl || dpl < rpl) {
2408 goto fail;
2409 }
2410 }
2411 } else {
2412 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2413 switch (type) {
2414 case 1:
2415 case 2:
2416 case 3:
2417 case 4:
2418 case 5:
2419 case 9:
2420 case 11:
2421 case 12:
2422 break;
2423 default:
2424 goto fail;
2425 }
2426 if (dpl < cpl || dpl < rpl) {
2427 fail:
2428 CC_SRC = eflags & ~CC_Z;
2429 return 0;
2430 }
2431 }
2432 CC_SRC = eflags | CC_Z;
2433 return e2 & 0x00f0ff00;
2434}
2435
2436void helper_verr(CPUX86State *env, target_ulong selector1)
2437{
2438 uint32_t e1, e2, eflags, selector;
2439 int rpl, dpl, cpl;
2440
2441 selector = selector1 & 0xffff;
2442 eflags = cpu_cc_compute_all(env, CC_OP);
2443 if ((selector & 0xfffc) == 0) {
2444 goto fail;
2445 }
2446 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2447 goto fail;
2448 }
2449 if (!(e2 & DESC_S_MASK)) {
2450 goto fail;
2451 }
2452 rpl = selector & 3;
2453 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2454 cpl = env->hflags & HF_CPL_MASK;
2455 if (e2 & DESC_CS_MASK) {
2456 if (!(e2 & DESC_R_MASK)) {
2457 goto fail;
2458 }
2459 if (!(e2 & DESC_C_MASK)) {
2460 if (dpl < cpl || dpl < rpl) {
2461 goto fail;
2462 }
2463 }
2464 } else {
2465 if (dpl < cpl || dpl < rpl) {
2466 fail:
2467 CC_SRC = eflags & ~CC_Z;
2468 return;
2469 }
2470 }
2471 CC_SRC = eflags | CC_Z;
2472}
2473
2474void helper_verw(CPUX86State *env, target_ulong selector1)
2475{
2476 uint32_t e1, e2, eflags, selector;
2477 int rpl, dpl, cpl;
2478
2479 selector = selector1 & 0xffff;
2480 eflags = cpu_cc_compute_all(env, CC_OP);
2481 if ((selector & 0xfffc) == 0) {
2482 goto fail;
2483 }
2484 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2485 goto fail;
2486 }
2487 if (!(e2 & DESC_S_MASK)) {
2488 goto fail;
2489 }
2490 rpl = selector & 3;
2491 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2492 cpl = env->hflags & HF_CPL_MASK;
2493 if (e2 & DESC_CS_MASK) {
2494 goto fail;
2495 } else {
2496 if (dpl < cpl || dpl < rpl) {
2497 goto fail;
2498 }
2499 if (!(e2 & DESC_W_MASK)) {
2500 fail:
2501 CC_SRC = eflags & ~CC_Z;
2502 return;
2503 }
2504 }
2505 CC_SRC = eflags | CC_Z;
2506}
2507
2508#if defined(CONFIG_USER_ONLY)
2509void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2510{
2511 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2512 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2513 selector &= 0xffff;
2514 cpu_x86_load_seg_cache(env, seg_reg, selector,
2515 (selector << 4), 0xffff,
2516 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2517 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2518 } else {
2519 helper_load_seg(env, seg_reg, selector);
2520 }
2521}
2522#endif
2523
2524
2525static inline void check_io(CPUX86State *env, int addr, int size,
2526 uintptr_t retaddr)
2527{
2528 int io_offset, val, mask;
2529
2530
2531 if (!(env->tr.flags & DESC_P_MASK) ||
2532 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2533 env->tr.limit < 103) {
2534 goto fail;
2535 }
2536 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2537 io_offset += (addr >> 3);
2538
2539 if ((io_offset + 1) > env->tr.limit) {
2540 goto fail;
2541 }
2542 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2543 val >>= (addr & 7);
2544 mask = (1 << size) - 1;
2545
2546 if ((val & mask) != 0) {
2547 fail:
2548 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2549 }
2550}
2551
2552void helper_check_iob(CPUX86State *env, uint32_t t0)
2553{
2554 check_io(env, t0, 1, GETPC());
2555}
2556
2557void helper_check_iow(CPUX86State *env, uint32_t t0)
2558{
2559 check_io(env, t0, 2, GETPC());
2560}
2561
2562void helper_check_iol(CPUX86State *env, uint32_t t0)
2563{
2564 check_io(env, t0, 4, GETPC());
2565}
2566