1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "qemu/log.h"
24#include "exec/helper-proto.h"
25#include "exec/exec-all.h"
26#include "exec/cpu_ldst.h"
27#include "exec/log.h"
28
29
30
31#ifdef DEBUG_PCALL
32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33# define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35#else
36# define LOG_PCALL(...) do { } while (0)
37# define LOG_PCALL_STATE(cpu) do { } while (0)
38#endif
39
40#ifdef CONFIG_USER_ONLY
41#define MEMSUFFIX _kernel
42#define DATA_SIZE 1
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 2
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 4
49#include "exec/cpu_ldst_useronly_template.h"
50
51#define DATA_SIZE 8
52#include "exec/cpu_ldst_useronly_template.h"
53#undef MEMSUFFIX
54#else
55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56#define MEMSUFFIX _kernel
57#define DATA_SIZE 1
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 2
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 4
64#include "exec/cpu_ldst_template.h"
65
66#define DATA_SIZE 8
67#include "exec/cpu_ldst_template.h"
68#undef CPU_MMU_INDEX
69#undef MEMSUFFIX
70#endif
71
72
73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
76{
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
80
81 if (selector & 0x4) {
82 dt = &env->ldt;
83 } else {
84 dt = &env->gdt;
85 }
86 index = selector & ~7;
87 if ((index + 7) > dt->limit) {
88 return -1;
89 }
90 ptr = dt->base + index;
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
93 return 0;
94}
95
96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
98{
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100}
101
102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103{
104 unsigned int limit;
105
106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
107 if (e2 & DESC_G_MASK) {
108 limit = (limit << 12) | 0xfff;
109 }
110 return limit;
111}
112
113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114{
115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
116}
117
118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
120{
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
124}
125
126
127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128{
129 selector &= 0xffff;
130
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
134}
135
136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
139{
140 X86CPU *cpu = x86_env_get_cpu(env);
141 int type, index, shift;
142
143#if 0
144 {
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
147 for (i = 0; i < env->tr.limit; i++) {
148 printf("%02x ", env->tr.base[i]);
149 if ((i & 7) == 7) {
150 printf("\n");
151 }
152 }
153 printf("\n");
154 }
155#endif
156
157 if (!(env->tr.flags & DESC_P_MASK)) {
158 cpu_abort(CPU(cpu), "invalid tss");
159 }
160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
161 if ((type & 7) != 1) {
162 cpu_abort(CPU(cpu), "invalid tss type");
163 }
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
166 if (index + (4 << shift) - 1 > env->tr.limit) {
167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
168 }
169 if (shift == 0) {
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
172 } else {
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 }
176}
177
178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
180{
181 uint32_t e1, e2;
182 int rpl, dpl;
183
184 if ((selector & 0xfffc) != 0) {
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
187 }
188 if (!(e2 & DESC_S_MASK)) {
189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
190 }
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
193 if (seg_reg == R_CS) {
194 if (!(e2 & DESC_CS_MASK)) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
196 }
197 if (dpl != rpl) {
198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199 }
200 } else if (seg_reg == R_SS) {
201
202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
204 }
205 if (dpl != cpl || dpl != rpl) {
206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207 }
208 } else {
209
210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212 }
213
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
215 if (dpl < cpl || dpl < rpl) {
216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 }
218 }
219 }
220 if (!(e2 & DESC_P_MASK)) {
221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
222 }
223 cpu_x86_load_seg_cache(env, seg_reg, selector,
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
227 } else {
228 if (seg_reg == R_SS || seg_reg == R_CS) {
229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
230 }
231 }
232}
233
234#define SWITCH_TSS_JMP 0
235#define SWITCH_TSS_IRET 1
236#define SWITCH_TSS_CALL 2
237
238
239static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
242{
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
251
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
255
256
257 if (type == 5) {
258 if (!(e2 & DESC_P_MASK)) {
259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
260 }
261 tss_selector = e1 >> 16;
262 if (tss_selector & 4) {
263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
264 }
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
267 }
268 if (e2 & DESC_S_MASK) {
269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
270 }
271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
272 if ((type & 7) != 1) {
273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 }
275 }
276
277 if (!(e2 & DESC_P_MASK)) {
278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
279 }
280
281 if (type & 8) {
282 tss_limit_max = 103;
283 } else {
284 tss_limit_max = 43;
285 }
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
289 tss_limit < tss_limit_max) {
290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
291 }
292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
293 if (old_type & 8) {
294 old_tss_limit_max = 103;
295 } else {
296 old_tss_limit_max = 43;
297 }
298
299
300 if (type & 8) {
301
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
305 for (i = 0; i < 8; i++) {
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
308 }
309 for (i = 0; i < 6; i++) {
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
312 }
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
315 } else {
316
317 new_cr3 = 0;
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
320 for (i = 0; i < 8; i++) {
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
323 }
324 for (i = 0; i < 4; i++) {
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
327 }
328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
332 }
333
334
335
336 (void)new_trap;
337
338
339
340
341
342
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
347
348
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
352
353 ptr = env->gdt.base + (env->tr.selector & ~7);
354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
355 e2 &= ~DESC_TSS_BUSY_MASK;
356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
357 }
358 old_eflags = cpu_compute_eflags(env);
359 if (source == SWITCH_TSS_IRET) {
360 old_eflags &= ~NT_MASK;
361 }
362
363
364 if (type & 8) {
365
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
376 for (i = 0; i < 6; i++) {
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
379 }
380 } else {
381
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
392 for (i = 0; i < 4; i++) {
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
395 }
396 }
397
398
399
400
401 if (source == SWITCH_TSS_CALL) {
402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
403 new_eflags |= NT_MASK;
404 }
405
406
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
410
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
413 e2 |= DESC_TSS_BUSY_MASK;
414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
415 }
416
417
418
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430
431
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8)) {
436 eflags_mask &= 0xffff;
437 }
438 cpu_load_eflags(env, new_eflags, eflags_mask);
439
440 env->regs[R_EAX] = new_regs[0];
441 env->regs[R_ECX] = new_regs[1];
442 env->regs[R_EDX] = new_regs[2];
443 env->regs[R_EBX] = new_regs[3];
444 env->regs[R_ESP] = new_regs[4];
445 env->regs[R_EBP] = new_regs[5];
446 env->regs[R_ESI] = new_regs[6];
447 env->regs[R_EDI] = new_regs[7];
448 if (new_eflags & VM_MASK) {
449 for (i = 0; i < 6; i++) {
450 load_seg_vm(env, i, new_segs[i]);
451 }
452 } else {
453
454 for (i = 0; i < 6; i++) {
455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 }
457 }
458
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
463
464
465 if (new_ldt & 4) {
466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
467 }
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit) {
473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
474 }
475 ptr = dt->base + index;
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
480 }
481 if (!(e2 & DESC_P_MASK)) {
482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
483 }
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487
488 if (!(new_eflags & VM_MASK)) {
489 int cpl = new_segs[R_CS] & 3;
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
496 }
497
498
499 if (new_eip > env->segs[R_CS].limit) {
500
501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
502 }
503
504#ifndef CONFIG_USER_ONLY
505
506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
508 }
509#endif
510}
511
512static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
515{
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517}
518
519static inline unsigned int get_sp_mask(unsigned int e2)
520{
521 if (e2 & DESC_B_MASK) {
522 return 0xffffffff;
523 } else {
524 return 0xffff;
525 }
526}
527
528static int exception_has_error_code(int intno)
529{
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
539 }
540 return 0;
541}
542
543#ifdef TARGET_X86_64
544#define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
553 } \
554 } while (0)
555#else
556#define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
560 } while (0)
561#endif
562
563
564
565#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566
567
568#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
569 { \
570 sp -= 2; \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
572 }
573
574#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
575 { \
576 sp -= 4; \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
578 }
579
580#define POPW_RA(ssp, sp, sp_mask, val, ra) \
581 { \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 sp += 2; \
584 }
585
586#define POPL_RA(ssp, sp, sp_mask, val, ra) \
587 { \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 sp += 4; \
590 }
591
592#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596
597
598static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
601{
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
607 uint32_t old_eip, sp_mask;
608 int vm86 = env->eflags & VM_MASK;
609
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
613 }
614 if (is_int) {
615 old_eip = next_eip;
616 } else {
617 old_eip = env->eip;
618 }
619
620 dt = &env->idt;
621 if (intno * 8 + 7 > dt->limit) {
622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
623 }
624 ptr = dt->base + intno * 8;
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
627
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
629 switch (type) {
630 case 5:
631
632 if (!(e2 & DESC_P_MASK)) {
633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
634 }
635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
639
640
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
643 if (env->segs[R_SS].flags & DESC_B_MASK) {
644 mask = 0xffffffff;
645 } else {
646 mask = 0xffff;
647 }
648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
649 ssp = env->segs[R_SS].base + esp;
650 if (shift) {
651 cpu_stl_kernel(env, ssp, error_code);
652 } else {
653 cpu_stw_kernel(env, ssp, error_code);
654 }
655 SET_ESP(esp, mask);
656 }
657 return;
658 case 6:
659 case 7:
660 case 14:
661 case 15:
662 break;
663 default:
664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
665 break;
666 }
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
669
670 if (is_int && dpl < cpl) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
672 }
673
674 if (!(e2 & DESC_P_MASK)) {
675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
676 }
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0) {
680 raise_exception_err(env, EXCP0D_GPF, 0);
681 }
682 if (load_segment(env, &e1, &e2, selector) != 0) {
683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
684 }
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
687 }
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 }
692 if (!(e2 & DESC_P_MASK)) {
693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
694 }
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696
697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
698 if ((ss & 0xfffc) == 0) {
699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
700 }
701 if ((ss & 3) != dpl) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
703 }
704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
706 }
707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
708 if (ss_dpl != dpl) {
709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 }
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
713 !(ss_e2 & DESC_W_MASK)) {
714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
715 }
716 if (!(ss_e2 & DESC_P_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
718 }
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723
724 if (vm86) {
725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
726 }
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
730 esp = env->regs[R_ESP];
731 dpl = cpl;
732 } else {
733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
734 new_stack = 0;
735 sp_mask = 0;
736 ssp = 0;
737 esp = 0;
738 }
739
740 shift = type >> 3;
741
742#if 0
743
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
745 if (vm86) {
746 push_size += 8;
747 }
748 push_size <<= shift;
749#endif
750 if (shift == 1) {
751 if (new_stack) {
752 if (vm86) {
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757 }
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
760 }
761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
766 }
767 } else {
768 if (new_stack) {
769 if (vm86) {
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774 }
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
777 }
778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
783 }
784 }
785
786
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
789 }
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791
792 if (new_stack) {
793 if (vm86) {
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798 }
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 }
803 SET_ESP(esp, sp_mask);
804
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
810 env->eip = offset;
811}
812
813#ifdef TARGET_X86_64
814
815#define PUSHQ_RA(sp, val, ra) \
816 { \
817 sp -= 8; \
818 cpu_stq_kernel_ra(env, sp, (val), ra); \
819 }
820
821#define POPQ_RA(sp, val, ra) \
822 { \
823 val = cpu_ldq_kernel_ra(env, sp, ra); \
824 sp += 8; \
825 }
826
827#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828#define POPQ(sp, val) POPQ_RA(sp, val, 0)
829
830static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
831{
832 X86CPU *cpu = x86_env_get_cpu(env);
833 int index;
834
835#if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838#endif
839
840 if (!(env->tr.flags & DESC_P_MASK)) {
841 cpu_abort(CPU(cpu), "invalid tss");
842 }
843 index = 8 * level + 4;
844 if ((index + 7) > env->tr.limit) {
845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
846 }
847 return cpu_ldq_kernel(env, env->tr.base + index);
848}
849
850
851static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
853{
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
860
861 has_error_code = 0;
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
864 }
865 if (is_int) {
866 old_eip = next_eip;
867 } else {
868 old_eip = env->eip;
869 }
870
871 dt = &env->idt;
872 if (intno * 16 + 15 > dt->limit) {
873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
874 }
875 ptr = dt->base + intno * 16;
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
879
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
881 switch (type) {
882 case 14:
883 case 15:
884 break;
885 default:
886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
887 break;
888 }
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
891
892 if (is_int && dpl < cpl) {
893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
894 }
895
896 if (!(e2 & DESC_P_MASK)) {
897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
898 }
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
902 if ((selector & 0xfffc) == 0) {
903 raise_exception_err(env, EXCP0D_GPF, 0);
904 }
905
906 if (load_segment(env, &e1, &e2, selector) != 0) {
907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
908 }
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
911 }
912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
913 if (dpl > cpl) {
914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 }
916 if (!(e2 & DESC_P_MASK)) {
917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
918 }
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
921 }
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923
924 new_stack = 1;
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928
929 if (env->eflags & VM_MASK) {
930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
931 }
932 new_stack = 0;
933 esp = env->regs[R_ESP];
934 dpl = cpl;
935 } else {
936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
937 new_stack = 0;
938 esp = 0;
939 }
940 esp &= ~0xfLL;
941
942 PUSHQ(esp, env->segs[R_SS].selector);
943 PUSHQ(esp, env->regs[R_ESP]);
944 PUSHQ(esp, cpu_compute_eflags(env));
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
949 }
950
951
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
954 }
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
960 }
961 env->regs[R_ESP] = esp;
962
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
968 env->eip = offset;
969}
970#endif
971
972#ifdef TARGET_X86_64
973#if defined(CONFIG_USER_ONLY)
974void helper_syscall(CPUX86State *env, int next_eip_addend)
975{
976 CPUState *cs = CPU(x86_env_get_cpu(env));
977
978 cs->exception_index = EXCP_SYSCALL;
979 env->exception_next_eip = env->eip + next_eip_addend;
980 cpu_loop_exit(cs);
981}
982#else
983void helper_syscall(CPUX86State *env, int next_eip_addend)
984{
985 int selector;
986
987 if (!(env->efer & MSR_EFER_SCE)) {
988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
989 }
990 selector = (env->star >> 32) & 0xffff;
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
993
994 env->regs[R_ECX] = env->eip + next_eip_addend;
995 env->regs[11] = cpu_compute_eflags(env);
996
997 code64 = env->hflags & HF_CS64_MASK;
998
999 env->eflags &= ~env->fmask;
1000 cpu_load_eflags(env, env->eflags, 0);
1001 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006 DESC_L_MASK);
1007 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008 0, 0xffffffff,
1009 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010 DESC_S_MASK |
1011 DESC_W_MASK | DESC_A_MASK);
1012 if (code64) {
1013 env->eip = env->lstar;
1014 } else {
1015 env->eip = env->cstar;
1016 }
1017 } else {
1018 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1019
1020 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
1031 env->eip = (uint32_t)env->star;
1032 }
1033}
1034#endif
1035#endif
1036
1037#ifdef TARGET_X86_64
1038void helper_sysret(CPUX86State *env, int dflag)
1039{
1040 int cpl, selector;
1041
1042 if (!(env->efer & MSR_EFER_SCE)) {
1043 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044 }
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048 }
1049 selector = (env->star >> 48) & 0xffff;
1050 if (env->hflags & HF_LMA_MASK) {
1051 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053 NT_MASK);
1054 if (dflag == 2) {
1055 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_P_MASK |
1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 DESC_L_MASK);
1061 env->eip = env->regs[R_ECX];
1062 } else {
1063 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068 env->eip = (uint32_t)env->regs[R_ECX];
1069 }
1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_W_MASK | DESC_A_MASK);
1075 } else {
1076 env->eflags |= IF_MASK;
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082 env->eip = (uint32_t)env->regs[R_ECX];
1083 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_W_MASK | DESC_A_MASK);
1088 }
1089}
1090#endif
1091
1092
1093static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094 int error_code, unsigned int next_eip)
1095{
1096 SegmentCache *dt;
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
1101
1102
1103 dt = &env->idt;
1104 if (intno * 4 + 3 > dt->limit) {
1105 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1106 }
1107 ptr = dt->base + intno * 4;
1108 offset = cpu_lduw_kernel(env, ptr);
1109 selector = cpu_lduw_kernel(env, ptr + 2);
1110 esp = env->regs[R_ESP];
1111 ssp = env->segs[R_SS].base;
1112 if (is_int) {
1113 old_eip = next_eip;
1114 } else {
1115 old_eip = env->eip;
1116 }
1117 old_cs = env->segs[R_CS].selector;
1118
1119 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, old_eip);
1122
1123
1124 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1129}
1130
1131#if defined(CONFIG_USER_ONLY)
1132
1133
1134
1135
1136
1137static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1138 int error_code, target_ulong next_eip)
1139{
1140 if (is_int) {
1141 SegmentCache *dt;
1142 target_ulong ptr;
1143 int dpl, cpl, shift;
1144 uint32_t e2;
1145
1146 dt = &env->idt;
1147 if (env->hflags & HF_LMA_MASK) {
1148 shift = 4;
1149 } else {
1150 shift = 3;
1151 }
1152 ptr = dt->base + (intno << shift);
1153 e2 = cpu_ldl_kernel(env, ptr + 4);
1154
1155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1156 cpl = env->hflags & HF_CPL_MASK;
1157
1158 if (dpl < cpl) {
1159 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 }
1161 }
1162
1163
1164
1165
1166 if (is_int || intno == EXCP_SYSCALL) {
1167 env->eip = next_eip;
1168 }
1169}
1170
1171#else
1172
1173static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1174 int error_code, int is_hw, int rm)
1175{
1176 CPUState *cs = CPU(x86_env_get_cpu(env));
1177 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1178 control.event_inj));
1179
1180 if (!(event_inj & SVM_EVTINJ_VALID)) {
1181 int type;
1182
1183 if (is_int) {
1184 type = SVM_EVTINJ_TYPE_SOFT;
1185 } else {
1186 type = SVM_EVTINJ_TYPE_EXEPT;
1187 }
1188 event_inj = intno | type | SVM_EVTINJ_VALID;
1189 if (!rm && exception_has_error_code(intno)) {
1190 event_inj |= SVM_EVTINJ_VALID_ERR;
1191 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1192 control.event_inj_err),
1193 error_code);
1194 }
1195 x86_stl_phys(cs,
1196 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1197 event_inj);
1198 }
1199}
1200#endif
1201
1202
1203
1204
1205
1206
1207static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1208 int error_code, target_ulong next_eip, int is_hw)
1209{
1210 CPUX86State *env = &cpu->env;
1211
1212 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1213 if ((env->cr[0] & CR0_PE_MASK)) {
1214 static int count;
1215
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
1220 env->segs[R_CS].selector, env->eip,
1221 (int)env->segs[R_CS].base + env->eip,
1222 env->segs[R_SS].selector, env->regs[R_ESP]);
1223 if (intno == 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225 } else {
1226 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1227 }
1228 qemu_log("\n");
1229 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1230#if 0
1231 {
1232 int i;
1233 target_ulong ptr;
1234
1235 qemu_log(" code=");
1236 ptr = env->segs[R_CS].base + env->eip;
1237 for (i = 0; i < 16; i++) {
1238 qemu_log(" %02x", ldub(ptr + i));
1239 }
1240 qemu_log("\n");
1241 }
1242#endif
1243 count++;
1244 }
1245 }
1246 if (env->cr[0] & CR0_PE_MASK) {
1247#if !defined(CONFIG_USER_ONLY)
1248 if (env->hflags & HF_SVMI_MASK) {
1249 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1250 }
1251#endif
1252#ifdef TARGET_X86_64
1253 if (env->hflags & HF_LMA_MASK) {
1254 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1255 } else
1256#endif
1257 {
1258 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259 is_hw);
1260 }
1261 } else {
1262#if !defined(CONFIG_USER_ONLY)
1263 if (env->hflags & HF_SVMI_MASK) {
1264 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1265 }
1266#endif
1267 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1268 }
1269
1270#if !defined(CONFIG_USER_ONLY)
1271 if (env->hflags & HF_SVMI_MASK) {
1272 CPUState *cs = CPU(cpu);
1273 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1274 offsetof(struct vmcb,
1275 control.event_inj));
1276
1277 x86_stl_phys(cs,
1278 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1279 event_inj & ~SVM_EVTINJ_VALID);
1280 }
1281#endif
1282}
1283
1284void x86_cpu_do_interrupt(CPUState *cs)
1285{
1286 X86CPU *cpu = X86_CPU(cs);
1287 CPUX86State *env = &cpu->env;
1288
1289#if defined(CONFIG_USER_ONLY)
1290
1291
1292
1293 do_interrupt_user(env, cs->exception_index,
1294 env->exception_is_int,
1295 env->error_code,
1296 env->exception_next_eip);
1297
1298 env->old_exception = -1;
1299#else
1300
1301
1302
1303 do_interrupt_all(cpu, cs->exception_index,
1304 env->exception_is_int,
1305 env->error_code,
1306 env->exception_next_eip, 0);
1307
1308 env->old_exception = -1;
1309#endif
1310}
1311
1312void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1313{
1314 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1315}
1316
1317bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1318{
1319 X86CPU *cpu = X86_CPU(cs);
1320 CPUX86State *env = &cpu->env;
1321 bool ret = false;
1322
1323#if !defined(CONFIG_USER_ONLY)
1324 if (interrupt_request & CPU_INTERRUPT_POLL) {
1325 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1326 apic_poll_irq(cpu->apic_state);
1327
1328
1329 return true;
1330 }
1331#endif
1332 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1333 do_cpu_sipi(cpu);
1334 } else if (env->hflags2 & HF2_GIF_MASK) {
1335 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1336 !(env->hflags & HF_SMM_MASK)) {
1337 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1338 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1339 do_smm_enter(cpu);
1340 ret = true;
1341 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1342 !(env->hflags2 & HF2_NMI_MASK)) {
1343 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1344 env->hflags2 |= HF2_NMI_MASK;
1345 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1346 ret = true;
1347 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1348 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1349 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1350 ret = true;
1351 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1352 (((env->hflags2 & HF2_VINTR_MASK) &&
1353 (env->hflags2 & HF2_HIF_MASK)) ||
1354 (!(env->hflags2 & HF2_VINTR_MASK) &&
1355 (env->eflags & IF_MASK &&
1356 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1357 int intno;
1358 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1359 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1360 CPU_INTERRUPT_VIRQ);
1361 intno = cpu_get_pic_interrupt(env);
1362 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1363 "Servicing hardware INT=0x%02x\n", intno);
1364 do_interrupt_x86_hardirq(env, intno, 1);
1365
1366
1367 ret = true;
1368#if !defined(CONFIG_USER_ONLY)
1369 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1370 (env->eflags & IF_MASK) &&
1371 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1372 int intno;
1373
1374 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1375 intno = x86_ldl_phys(cs, env->vm_vmcb
1376 + offsetof(struct vmcb, control.int_vector));
1377 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1378 "Servicing virtual hardware INT=0x%02x\n", intno);
1379 do_interrupt_x86_hardirq(env, intno, 1);
1380 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1381 ret = true;
1382#endif
1383 }
1384 }
1385
1386 return ret;
1387}
1388
1389void helper_lldt(CPUX86State *env, int selector)
1390{
1391 SegmentCache *dt;
1392 uint32_t e1, e2;
1393 int index, entry_limit;
1394 target_ulong ptr;
1395
1396 selector &= 0xffff;
1397 if ((selector & 0xfffc) == 0) {
1398
1399 env->ldt.base = 0;
1400 env->ldt.limit = 0;
1401 } else {
1402 if (selector & 0x4) {
1403 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1404 }
1405 dt = &env->gdt;
1406 index = selector & ~7;
1407#ifdef TARGET_X86_64
1408 if (env->hflags & HF_LMA_MASK) {
1409 entry_limit = 15;
1410 } else
1411#endif
1412 {
1413 entry_limit = 7;
1414 }
1415 if ((index + entry_limit) > dt->limit) {
1416 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1417 }
1418 ptr = dt->base + index;
1419 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1420 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1421 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1422 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1423 }
1424 if (!(e2 & DESC_P_MASK)) {
1425 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1426 }
1427#ifdef TARGET_X86_64
1428 if (env->hflags & HF_LMA_MASK) {
1429 uint32_t e3;
1430
1431 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1432 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1433 env->ldt.base |= (target_ulong)e3 << 32;
1434 } else
1435#endif
1436 {
1437 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1438 }
1439 }
1440 env->ldt.selector = selector;
1441}
1442
1443void helper_ltr(CPUX86State *env, int selector)
1444{
1445 SegmentCache *dt;
1446 uint32_t e1, e2;
1447 int index, type, entry_limit;
1448 target_ulong ptr;
1449
1450 selector &= 0xffff;
1451 if ((selector & 0xfffc) == 0) {
1452
1453 env->tr.base = 0;
1454 env->tr.limit = 0;
1455 env->tr.flags = 0;
1456 } else {
1457 if (selector & 0x4) {
1458 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1459 }
1460 dt = &env->gdt;
1461 index = selector & ~7;
1462#ifdef TARGET_X86_64
1463 if (env->hflags & HF_LMA_MASK) {
1464 entry_limit = 15;
1465 } else
1466#endif
1467 {
1468 entry_limit = 7;
1469 }
1470 if ((index + entry_limit) > dt->limit) {
1471 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1472 }
1473 ptr = dt->base + index;
1474 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1475 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1476 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1477 if ((e2 & DESC_S_MASK) ||
1478 (type != 1 && type != 9)) {
1479 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1480 }
1481 if (!(e2 & DESC_P_MASK)) {
1482 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1483 }
1484#ifdef TARGET_X86_64
1485 if (env->hflags & HF_LMA_MASK) {
1486 uint32_t e3, e4;
1487
1488 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1489 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1490 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1491 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1492 }
1493 load_seg_cache_raw_dt(&env->tr, e1, e2);
1494 env->tr.base |= (target_ulong)e3 << 32;
1495 } else
1496#endif
1497 {
1498 load_seg_cache_raw_dt(&env->tr, e1, e2);
1499 }
1500 e2 |= DESC_TSS_BUSY_MASK;
1501 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1502 }
1503 env->tr.selector = selector;
1504}
1505
1506
1507void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1508{
1509 uint32_t e1, e2;
1510 int cpl, dpl, rpl;
1511 SegmentCache *dt;
1512 int index;
1513 target_ulong ptr;
1514
1515 selector &= 0xffff;
1516 cpl = env->hflags & HF_CPL_MASK;
1517 if ((selector & 0xfffc) == 0) {
1518
1519 if (seg_reg == R_SS
1520#ifdef TARGET_X86_64
1521 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1522#endif
1523 ) {
1524 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1525 }
1526 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1527 } else {
1528
1529 if (selector & 0x4) {
1530 dt = &env->ldt;
1531 } else {
1532 dt = &env->gdt;
1533 }
1534 index = selector & ~7;
1535 if ((index + 7) > dt->limit) {
1536 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1537 }
1538 ptr = dt->base + index;
1539 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1540 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1541
1542 if (!(e2 & DESC_S_MASK)) {
1543 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1544 }
1545 rpl = selector & 3;
1546 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1547 if (seg_reg == R_SS) {
1548
1549 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1551 }
1552 if (rpl != cpl || dpl != cpl) {
1553 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1554 }
1555 } else {
1556
1557 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1558 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1559 }
1560
1561 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1562
1563 if (dpl < cpl || dpl < rpl) {
1564 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1565 }
1566 }
1567 }
1568
1569 if (!(e2 & DESC_P_MASK)) {
1570 if (seg_reg == R_SS) {
1571 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1572 } else {
1573 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1574 }
1575 }
1576
1577
1578 if (!(e2 & DESC_A_MASK)) {
1579 e2 |= DESC_A_MASK;
1580 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1581 }
1582
1583 cpu_x86_load_seg_cache(env, seg_reg, selector,
1584 get_seg_base(e1, e2),
1585 get_seg_limit(e1, e2),
1586 e2);
1587#if 0
1588 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1589 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1590#endif
1591 }
1592}
1593
1594
1595void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1596 target_ulong next_eip)
1597{
1598 int gate_cs, type;
1599 uint32_t e1, e2, cpl, dpl, rpl, limit;
1600
1601 if ((new_cs & 0xfffc) == 0) {
1602 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1603 }
1604 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1605 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1606 }
1607 cpl = env->hflags & HF_CPL_MASK;
1608 if (e2 & DESC_S_MASK) {
1609 if (!(e2 & DESC_CS_MASK)) {
1610 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1611 }
1612 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1613 if (e2 & DESC_C_MASK) {
1614
1615 if (dpl > cpl) {
1616 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1617 }
1618 } else {
1619
1620 rpl = new_cs & 3;
1621 if (rpl > cpl) {
1622 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1623 }
1624 if (dpl != cpl) {
1625 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1626 }
1627 }
1628 if (!(e2 & DESC_P_MASK)) {
1629 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1630 }
1631 limit = get_seg_limit(e1, e2);
1632 if (new_eip > limit &&
1633 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1634 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1635 }
1636 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1637 get_seg_base(e1, e2), limit, e2);
1638 env->eip = new_eip;
1639 } else {
1640
1641 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1642 rpl = new_cs & 3;
1643 cpl = env->hflags & HF_CPL_MASK;
1644 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1645 switch (type) {
1646 case 1:
1647 case 9:
1648 case 5:
1649 if (dpl < cpl || dpl < rpl) {
1650 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1651 }
1652 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1653 break;
1654 case 4:
1655 case 12:
1656 if ((dpl < cpl) || (dpl < rpl)) {
1657 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1658 }
1659 if (!(e2 & DESC_P_MASK)) {
1660 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1661 }
1662 gate_cs = e1 >> 16;
1663 new_eip = (e1 & 0xffff);
1664 if (type == 12) {
1665 new_eip |= (e2 & 0xffff0000);
1666 }
1667 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1668 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1669 }
1670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1671
1672 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1673 (DESC_S_MASK | DESC_CS_MASK))) {
1674 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1675 }
1676 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1677 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1678 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1679 }
1680 if (!(e2 & DESC_P_MASK)) {
1681 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1682 }
1683 limit = get_seg_limit(e1, e2);
1684 if (new_eip > limit) {
1685 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1686 }
1687 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1688 get_seg_base(e1, e2), limit, e2);
1689 env->eip = new_eip;
1690 break;
1691 default:
1692 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1693 break;
1694 }
1695 }
1696}
1697
1698
1699void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1700 int shift, int next_eip)
1701{
1702 int new_eip;
1703 uint32_t esp, esp_mask;
1704 target_ulong ssp;
1705
1706 new_eip = new_eip1;
1707 esp = env->regs[R_ESP];
1708 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1709 ssp = env->segs[R_SS].base;
1710 if (shift) {
1711 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1712 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1713 } else {
1714 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1715 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1716 }
1717
1718 SET_ESP(esp, esp_mask);
1719 env->eip = new_eip;
1720 env->segs[R_CS].selector = new_cs;
1721 env->segs[R_CS].base = (new_cs << 4);
1722}
1723
1724
1725void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1726 int shift, target_ulong next_eip)
1727{
1728 int new_stack, i;
1729 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1730 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1731 uint32_t val, limit, old_sp_mask;
1732 target_ulong ssp, old_ssp;
1733
1734 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1735 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1736 if ((new_cs & 0xfffc) == 0) {
1737 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1738 }
1739 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1741 }
1742 cpl = env->hflags & HF_CPL_MASK;
1743 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1744 if (e2 & DESC_S_MASK) {
1745 if (!(e2 & DESC_CS_MASK)) {
1746 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1747 }
1748 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1749 if (e2 & DESC_C_MASK) {
1750
1751 if (dpl > cpl) {
1752 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1753 }
1754 } else {
1755
1756 rpl = new_cs & 3;
1757 if (rpl > cpl) {
1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1759 }
1760 if (dpl != cpl) {
1761 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1762 }
1763 }
1764 if (!(e2 & DESC_P_MASK)) {
1765 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1766 }
1767
1768#ifdef TARGET_X86_64
1769
1770 if (shift == 2) {
1771 target_ulong rsp;
1772
1773
1774 rsp = env->regs[R_ESP];
1775 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1776 PUSHQ_RA(rsp, next_eip, GETPC());
1777
1778 env->regs[R_ESP] = rsp;
1779 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1780 get_seg_base(e1, e2),
1781 get_seg_limit(e1, e2), e2);
1782 env->eip = new_eip;
1783 } else
1784#endif
1785 {
1786 sp = env->regs[R_ESP];
1787 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1788 ssp = env->segs[R_SS].base;
1789 if (shift) {
1790 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1791 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1792 } else {
1793 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1794 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1795 }
1796
1797 limit = get_seg_limit(e1, e2);
1798 if (new_eip > limit) {
1799 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1800 }
1801
1802 SET_ESP(sp, sp_mask);
1803 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1804 get_seg_base(e1, e2), limit, e2);
1805 env->eip = new_eip;
1806 }
1807 } else {
1808
1809 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1810 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1811 rpl = new_cs & 3;
1812 switch (type) {
1813 case 1:
1814 case 9:
1815 case 5:
1816 if (dpl < cpl || dpl < rpl) {
1817 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1818 }
1819 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1820 return;
1821 case 4:
1822 case 12:
1823 break;
1824 default:
1825 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1826 break;
1827 }
1828 shift = type >> 3;
1829
1830 if (dpl < cpl || dpl < rpl) {
1831 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1832 }
1833
1834 if (!(e2 & DESC_P_MASK)) {
1835 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1836 }
1837 selector = e1 >> 16;
1838 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1839 param_count = e2 & 0x1f;
1840 if ((selector & 0xfffc) == 0) {
1841 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1842 }
1843
1844 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1845 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1846 }
1847 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1848 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1849 }
1850 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1851 if (dpl > cpl) {
1852 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1853 }
1854 if (!(e2 & DESC_P_MASK)) {
1855 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1856 }
1857
1858 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1859
1860 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1861 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1862 TARGET_FMT_lx "\n", ss, sp, param_count,
1863 env->regs[R_ESP]);
1864 if ((ss & 0xfffc) == 0) {
1865 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1866 }
1867 if ((ss & 3) != dpl) {
1868 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1869 }
1870 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1871 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1872 }
1873 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1874 if (ss_dpl != dpl) {
1875 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1876 }
1877 if (!(ss_e2 & DESC_S_MASK) ||
1878 (ss_e2 & DESC_CS_MASK) ||
1879 !(ss_e2 & DESC_W_MASK)) {
1880 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1881 }
1882 if (!(ss_e2 & DESC_P_MASK)) {
1883 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1884 }
1885
1886
1887
1888 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1889 old_ssp = env->segs[R_SS].base;
1890
1891 sp_mask = get_sp_mask(ss_e2);
1892 ssp = get_seg_base(ss_e1, ss_e2);
1893 if (shift) {
1894 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1895 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1896 for (i = param_count - 1; i >= 0; i--) {
1897 val = cpu_ldl_kernel_ra(env, old_ssp +
1898 ((env->regs[R_ESP] + i * 4) &
1899 old_sp_mask), GETPC());
1900 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1901 }
1902 } else {
1903 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1904 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1905 for (i = param_count - 1; i >= 0; i--) {
1906 val = cpu_lduw_kernel_ra(env, old_ssp +
1907 ((env->regs[R_ESP] + i * 2) &
1908 old_sp_mask), GETPC());
1909 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1910 }
1911 }
1912 new_stack = 1;
1913 } else {
1914
1915 sp = env->regs[R_ESP];
1916 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1917 ssp = env->segs[R_SS].base;
1918
1919 new_stack = 0;
1920 }
1921
1922 if (shift) {
1923 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1924 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1925 } else {
1926 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1927 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1928 }
1929
1930
1931
1932 if (new_stack) {
1933 ss = (ss & ~3) | dpl;
1934 cpu_x86_load_seg_cache(env, R_SS, ss,
1935 ssp,
1936 get_seg_limit(ss_e1, ss_e2),
1937 ss_e2);
1938 }
1939
1940 selector = (selector & ~3) | dpl;
1941 cpu_x86_load_seg_cache(env, R_CS, selector,
1942 get_seg_base(e1, e2),
1943 get_seg_limit(e1, e2),
1944 e2);
1945 SET_ESP(sp, sp_mask);
1946 env->eip = offset;
1947 }
1948}
1949
1950
1951void helper_iret_real(CPUX86State *env, int shift)
1952{
1953 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1954 target_ulong ssp;
1955 int eflags_mask;
1956
1957 sp_mask = 0xffff;
1958 sp = env->regs[R_ESP];
1959 ssp = env->segs[R_SS].base;
1960 if (shift == 1) {
1961
1962 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1963 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1964 new_cs &= 0xffff;
1965 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1966 } else {
1967
1968 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1969 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1970 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1971 }
1972 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1973 env->segs[R_CS].selector = new_cs;
1974 env->segs[R_CS].base = (new_cs << 4);
1975 env->eip = new_eip;
1976 if (env->eflags & VM_MASK) {
1977 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1978 NT_MASK;
1979 } else {
1980 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1981 RF_MASK | NT_MASK;
1982 }
1983 if (shift == 0) {
1984 eflags_mask &= 0xffff;
1985 }
1986 cpu_load_eflags(env, new_eflags, eflags_mask);
1987 env->hflags2 &= ~HF2_NMI_MASK;
1988}
1989
1990static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1991{
1992 int dpl;
1993 uint32_t e2;
1994
1995
1996
1997
1998 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1999 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2000 return;
2001 }
2002
2003 e2 = env->segs[seg_reg].flags;
2004 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2005 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2006
2007 if (dpl < cpl) {
2008 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2009 }
2010 }
2011}
2012
2013
2014static inline void helper_ret_protected(CPUX86State *env, int shift,
2015 int is_iret, int addend,
2016 uintptr_t retaddr)
2017{
2018 uint32_t new_cs, new_eflags, new_ss;
2019 uint32_t new_es, new_ds, new_fs, new_gs;
2020 uint32_t e1, e2, ss_e1, ss_e2;
2021 int cpl, dpl, rpl, eflags_mask, iopl;
2022 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2023
2024#ifdef TARGET_X86_64
2025 if (shift == 2) {
2026 sp_mask = -1;
2027 } else
2028#endif
2029 {
2030 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2031 }
2032 sp = env->regs[R_ESP];
2033 ssp = env->segs[R_SS].base;
2034 new_eflags = 0;
2035#ifdef TARGET_X86_64
2036 if (shift == 2) {
2037 POPQ_RA(sp, new_eip, retaddr);
2038 POPQ_RA(sp, new_cs, retaddr);
2039 new_cs &= 0xffff;
2040 if (is_iret) {
2041 POPQ_RA(sp, new_eflags, retaddr);
2042 }
2043 } else
2044#endif
2045 {
2046 if (shift == 1) {
2047
2048 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2049 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2050 new_cs &= 0xffff;
2051 if (is_iret) {
2052 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2053 if (new_eflags & VM_MASK) {
2054 goto return_to_vm86;
2055 }
2056 }
2057 } else {
2058
2059 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2060 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2061 if (is_iret) {
2062 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2063 }
2064 }
2065 }
2066 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2067 new_cs, new_eip, shift, addend);
2068 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2069 if ((new_cs & 0xfffc) == 0) {
2070 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2071 }
2072 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2073 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2074 }
2075 if (!(e2 & DESC_S_MASK) ||
2076 !(e2 & DESC_CS_MASK)) {
2077 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2078 }
2079 cpl = env->hflags & HF_CPL_MASK;
2080 rpl = new_cs & 3;
2081 if (rpl < cpl) {
2082 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2083 }
2084 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2085 if (e2 & DESC_C_MASK) {
2086 if (dpl > rpl) {
2087 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2088 }
2089 } else {
2090 if (dpl != rpl) {
2091 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2092 }
2093 }
2094 if (!(e2 & DESC_P_MASK)) {
2095 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2096 }
2097
2098 sp += addend;
2099 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2100 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2101
2102 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2103 get_seg_base(e1, e2),
2104 get_seg_limit(e1, e2),
2105 e2);
2106 } else {
2107
2108#ifdef TARGET_X86_64
2109 if (shift == 2) {
2110 POPQ_RA(sp, new_esp, retaddr);
2111 POPQ_RA(sp, new_ss, retaddr);
2112 new_ss &= 0xffff;
2113 } else
2114#endif
2115 {
2116 if (shift == 1) {
2117
2118 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2119 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2120 new_ss &= 0xffff;
2121 } else {
2122
2123 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2124 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2125 }
2126 }
2127 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2128 new_ss, new_esp);
2129 if ((new_ss & 0xfffc) == 0) {
2130#ifdef TARGET_X86_64
2131
2132
2133 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2134 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2135 0, 0xffffffff,
2136 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2137 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2138 DESC_W_MASK | DESC_A_MASK);
2139 ss_e2 = DESC_B_MASK;
2140 } else
2141#endif
2142 {
2143 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2144 }
2145 } else {
2146 if ((new_ss & 3) != rpl) {
2147 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2148 }
2149 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2150 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2151 }
2152 if (!(ss_e2 & DESC_S_MASK) ||
2153 (ss_e2 & DESC_CS_MASK) ||
2154 !(ss_e2 & DESC_W_MASK)) {
2155 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2156 }
2157 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2158 if (dpl != rpl) {
2159 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2160 }
2161 if (!(ss_e2 & DESC_P_MASK)) {
2162 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2163 }
2164 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2165 get_seg_base(ss_e1, ss_e2),
2166 get_seg_limit(ss_e1, ss_e2),
2167 ss_e2);
2168 }
2169
2170 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
2174 sp = new_esp;
2175#ifdef TARGET_X86_64
2176 if (env->hflags & HF_CS64_MASK) {
2177 sp_mask = -1;
2178 } else
2179#endif
2180 {
2181 sp_mask = get_sp_mask(ss_e2);
2182 }
2183
2184
2185 validate_seg(env, R_ES, rpl);
2186 validate_seg(env, R_DS, rpl);
2187 validate_seg(env, R_FS, rpl);
2188 validate_seg(env, R_GS, rpl);
2189
2190 sp += addend;
2191 }
2192 SET_ESP(sp, sp_mask);
2193 env->eip = new_eip;
2194 if (is_iret) {
2195
2196 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2197 if (cpl == 0) {
2198 eflags_mask |= IOPL_MASK;
2199 }
2200 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2201 if (cpl <= iopl) {
2202 eflags_mask |= IF_MASK;
2203 }
2204 if (shift == 0) {
2205 eflags_mask &= 0xffff;
2206 }
2207 cpu_load_eflags(env, new_eflags, eflags_mask);
2208 }
2209 return;
2210
2211 return_to_vm86:
2212 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2213 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2214 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2215 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2216 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2217 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2218
2219
2220 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2221 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2222 VIP_MASK);
2223 load_seg_vm(env, R_CS, new_cs & 0xffff);
2224 load_seg_vm(env, R_SS, new_ss & 0xffff);
2225 load_seg_vm(env, R_ES, new_es & 0xffff);
2226 load_seg_vm(env, R_DS, new_ds & 0xffff);
2227 load_seg_vm(env, R_FS, new_fs & 0xffff);
2228 load_seg_vm(env, R_GS, new_gs & 0xffff);
2229
2230 env->eip = new_eip & 0xffff;
2231 env->regs[R_ESP] = new_esp;
2232}
2233
2234void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2235{
2236 int tss_selector, type;
2237 uint32_t e1, e2;
2238
2239
2240 if (env->eflags & NT_MASK) {
2241#ifdef TARGET_X86_64
2242 if (env->hflags & HF_LMA_MASK) {
2243 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2244 }
2245#endif
2246 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2247 if (tss_selector & 4) {
2248 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2249 }
2250 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2251 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2252 }
2253 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2254
2255 if (type != 3) {
2256 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2257 }
2258 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2259 } else {
2260 helper_ret_protected(env, shift, 1, 0, GETPC());
2261 }
2262 env->hflags2 &= ~HF2_NMI_MASK;
2263}
2264
2265void helper_lret_protected(CPUX86State *env, int shift, int addend)
2266{
2267 helper_ret_protected(env, shift, 0, addend, GETPC());
2268}
2269
2270void helper_sysenter(CPUX86State *env)
2271{
2272 if (env->sysenter_cs == 0) {
2273 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2274 }
2275 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2276
2277#ifdef TARGET_X86_64
2278 if (env->hflags & HF_LMA_MASK) {
2279 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2280 0, 0xffffffff,
2281 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2282 DESC_S_MASK |
2283 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2284 DESC_L_MASK);
2285 } else
2286#endif
2287 {
2288 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2289 0, 0xffffffff,
2290 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2291 DESC_S_MASK |
2292 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2293 }
2294 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2295 0, 0xffffffff,
2296 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2297 DESC_S_MASK |
2298 DESC_W_MASK | DESC_A_MASK);
2299 env->regs[R_ESP] = env->sysenter_esp;
2300 env->eip = env->sysenter_eip;
2301}
2302
2303void helper_sysexit(CPUX86State *env, int dflag)
2304{
2305 int cpl;
2306
2307 cpl = env->hflags & HF_CPL_MASK;
2308 if (env->sysenter_cs == 0 || cpl != 0) {
2309 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2310 }
2311#ifdef TARGET_X86_64
2312 if (dflag == 2) {
2313 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2314 3, 0, 0xffffffff,
2315 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2316 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2317 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2318 DESC_L_MASK);
2319 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2320 3, 0, 0xffffffff,
2321 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2322 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2323 DESC_W_MASK | DESC_A_MASK);
2324 } else
2325#endif
2326 {
2327 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2328 3, 0, 0xffffffff,
2329 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2330 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2331 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2332 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2333 3, 0, 0xffffffff,
2334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2335 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2336 DESC_W_MASK | DESC_A_MASK);
2337 }
2338 env->regs[R_ESP] = env->regs[R_ECX];
2339 env->eip = env->regs[R_EDX];
2340}
2341
2342target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2343{
2344 unsigned int limit;
2345 uint32_t e1, e2, eflags, selector;
2346 int rpl, dpl, cpl, type;
2347
2348 selector = selector1 & 0xffff;
2349 eflags = cpu_cc_compute_all(env, CC_OP);
2350 if ((selector & 0xfffc) == 0) {
2351 goto fail;
2352 }
2353 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2354 goto fail;
2355 }
2356 rpl = selector & 3;
2357 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2358 cpl = env->hflags & HF_CPL_MASK;
2359 if (e2 & DESC_S_MASK) {
2360 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2361
2362 } else {
2363 if (dpl < cpl || dpl < rpl) {
2364 goto fail;
2365 }
2366 }
2367 } else {
2368 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2369 switch (type) {
2370 case 1:
2371 case 2:
2372 case 3:
2373 case 9:
2374 case 11:
2375 break;
2376 default:
2377 goto fail;
2378 }
2379 if (dpl < cpl || dpl < rpl) {
2380 fail:
2381 CC_SRC = eflags & ~CC_Z;
2382 return 0;
2383 }
2384 }
2385 limit = get_seg_limit(e1, e2);
2386 CC_SRC = eflags | CC_Z;
2387 return limit;
2388}
2389
2390target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2391{
2392 uint32_t e1, e2, eflags, selector;
2393 int rpl, dpl, cpl, type;
2394
2395 selector = selector1 & 0xffff;
2396 eflags = cpu_cc_compute_all(env, CC_OP);
2397 if ((selector & 0xfffc) == 0) {
2398 goto fail;
2399 }
2400 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2401 goto fail;
2402 }
2403 rpl = selector & 3;
2404 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2405 cpl = env->hflags & HF_CPL_MASK;
2406 if (e2 & DESC_S_MASK) {
2407 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2408
2409 } else {
2410 if (dpl < cpl || dpl < rpl) {
2411 goto fail;
2412 }
2413 }
2414 } else {
2415 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2416 switch (type) {
2417 case 1:
2418 case 2:
2419 case 3:
2420 case 4:
2421 case 5:
2422 case 9:
2423 case 11:
2424 case 12:
2425 break;
2426 default:
2427 goto fail;
2428 }
2429 if (dpl < cpl || dpl < rpl) {
2430 fail:
2431 CC_SRC = eflags & ~CC_Z;
2432 return 0;
2433 }
2434 }
2435 CC_SRC = eflags | CC_Z;
2436 return e2 & 0x00f0ff00;
2437}
2438
2439void helper_verr(CPUX86State *env, target_ulong selector1)
2440{
2441 uint32_t e1, e2, eflags, selector;
2442 int rpl, dpl, cpl;
2443
2444 selector = selector1 & 0xffff;
2445 eflags = cpu_cc_compute_all(env, CC_OP);
2446 if ((selector & 0xfffc) == 0) {
2447 goto fail;
2448 }
2449 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2450 goto fail;
2451 }
2452 if (!(e2 & DESC_S_MASK)) {
2453 goto fail;
2454 }
2455 rpl = selector & 3;
2456 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2457 cpl = env->hflags & HF_CPL_MASK;
2458 if (e2 & DESC_CS_MASK) {
2459 if (!(e2 & DESC_R_MASK)) {
2460 goto fail;
2461 }
2462 if (!(e2 & DESC_C_MASK)) {
2463 if (dpl < cpl || dpl < rpl) {
2464 goto fail;
2465 }
2466 }
2467 } else {
2468 if (dpl < cpl || dpl < rpl) {
2469 fail:
2470 CC_SRC = eflags & ~CC_Z;
2471 return;
2472 }
2473 }
2474 CC_SRC = eflags | CC_Z;
2475}
2476
2477void helper_verw(CPUX86State *env, target_ulong selector1)
2478{
2479 uint32_t e1, e2, eflags, selector;
2480 int rpl, dpl, cpl;
2481
2482 selector = selector1 & 0xffff;
2483 eflags = cpu_cc_compute_all(env, CC_OP);
2484 if ((selector & 0xfffc) == 0) {
2485 goto fail;
2486 }
2487 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2488 goto fail;
2489 }
2490 if (!(e2 & DESC_S_MASK)) {
2491 goto fail;
2492 }
2493 rpl = selector & 3;
2494 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2495 cpl = env->hflags & HF_CPL_MASK;
2496 if (e2 & DESC_CS_MASK) {
2497 goto fail;
2498 } else {
2499 if (dpl < cpl || dpl < rpl) {
2500 goto fail;
2501 }
2502 if (!(e2 & DESC_W_MASK)) {
2503 fail:
2504 CC_SRC = eflags & ~CC_Z;
2505 return;
2506 }
2507 }
2508 CC_SRC = eflags | CC_Z;
2509}
2510
2511#if defined(CONFIG_USER_ONLY)
2512void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2513{
2514 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2515 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2516 selector &= 0xffff;
2517 cpu_x86_load_seg_cache(env, seg_reg, selector,
2518 (selector << 4), 0xffff,
2519 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2520 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2521 } else {
2522 helper_load_seg(env, seg_reg, selector);
2523 }
2524}
2525#endif
2526
2527
2528static inline void check_io(CPUX86State *env, int addr, int size,
2529 uintptr_t retaddr)
2530{
2531 int io_offset, val, mask;
2532
2533
2534 if (!(env->tr.flags & DESC_P_MASK) ||
2535 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2536 env->tr.limit < 103) {
2537 goto fail;
2538 }
2539 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2540 io_offset += (addr >> 3);
2541
2542 if ((io_offset + 1) > env->tr.limit) {
2543 goto fail;
2544 }
2545 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2546 val >>= (addr & 7);
2547 mask = (1 << size) - 1;
2548
2549 if ((val & mask) != 0) {
2550 fail:
2551 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2552 }
2553}
2554
2555void helper_check_iob(CPUX86State *env, uint32_t t0)
2556{
2557 check_io(env, t0, 1, GETPC());
2558}
2559
2560void helper_check_iow(CPUX86State *env, uint32_t t0)
2561{
2562 check_io(env, t0, 2, GETPC());
2563}
2564
2565void helper_check_iol(CPUX86State *env, uint32_t t0)
2566{
2567 check_io(env, t0, 4, GETPC());
2568}
2569