qemu/target-i386/seg_helper.c
<<
>>
Prefs
   1/*
   2 *  x86 segmentation related helpers:
   3 *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
   4 *
   5 *  Copyright (c) 2003 Fabrice Bellard
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "cpu.h"
  22#include "qemu/log.h"
  23#include "helper.h"
  24
  25//#define DEBUG_PCALL
  26
  27#if !defined(CONFIG_USER_ONLY)
  28#include "exec/softmmu_exec.h"
  29#endif /* !defined(CONFIG_USER_ONLY) */
  30
  31#ifdef DEBUG_PCALL
  32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
  33# define LOG_PCALL_STATE(env)                                  \
  34    log_cpu_state_mask(CPU_LOG_PCALL, (env), CPU_DUMP_CCOP)
  35#else
  36# define LOG_PCALL(...) do { } while (0)
  37# define LOG_PCALL_STATE(env) do { } while (0)
  38#endif
  39
  40/* return non zero if error */
  41static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
  42                               uint32_t *e2_ptr, int selector)
  43{
  44    SegmentCache *dt;
  45    int index;
  46    target_ulong ptr;
  47
  48    if (selector & 0x4) {
  49        dt = &env->ldt;
  50    } else {
  51        dt = &env->gdt;
  52    }
  53    index = selector & ~7;
  54    if ((index + 7) > dt->limit) {
  55        return -1;
  56    }
  57    ptr = dt->base + index;
  58    *e1_ptr = cpu_ldl_kernel(env, ptr);
  59    *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
  60    return 0;
  61}
  62
  63static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
  64{
  65    unsigned int limit;
  66
  67    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
  68    if (e2 & DESC_G_MASK) {
  69        limit = (limit << 12) | 0xfff;
  70    }
  71    return limit;
  72}
  73
  74static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
  75{
  76    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
  77}
  78
  79static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
  80                                         uint32_t e2)
  81{
  82    sc->base = get_seg_base(e1, e2);
  83    sc->limit = get_seg_limit(e1, e2);
  84    sc->flags = e2;
  85}
  86
  87/* init the segment cache in vm86 mode. */
  88static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
  89{
  90    selector &= 0xffff;
  91    cpu_x86_load_seg_cache(env, seg, selector,
  92                           (selector << 4), 0xffff, 0);
  93}
  94
  95static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
  96                                       uint32_t *esp_ptr, int dpl)
  97{
  98    int type, index, shift;
  99
 100#if 0
 101    {
 102        int i;
 103        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
 104        for (i = 0; i < env->tr.limit; i++) {
 105            printf("%02x ", env->tr.base[i]);
 106            if ((i & 7) == 7) {
 107                printf("\n");
 108            }
 109        }
 110        printf("\n");
 111    }
 112#endif
 113
 114    if (!(env->tr.flags & DESC_P_MASK)) {
 115        cpu_abort(env, "invalid tss");
 116    }
 117    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 118    if ((type & 7) != 1) {
 119        cpu_abort(env, "invalid tss type");
 120    }
 121    shift = type >> 3;
 122    index = (dpl * 4 + 2) << shift;
 123    if (index + (4 << shift) - 1 > env->tr.limit) {
 124        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
 125    }
 126    if (shift == 0) {
 127        *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
 128        *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
 129    } else {
 130        *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
 131        *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
 132    }
 133}
 134
 135/* XXX: merge with load_seg() */
 136static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
 137{
 138    uint32_t e1, e2;
 139    int rpl, dpl, cpl;
 140
 141    if ((selector & 0xfffc) != 0) {
 142        if (load_segment(env, &e1, &e2, selector) != 0) {
 143            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 144        }
 145        if (!(e2 & DESC_S_MASK)) {
 146            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 147        }
 148        rpl = selector & 3;
 149        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 150        cpl = env->hflags & HF_CPL_MASK;
 151        if (seg_reg == R_CS) {
 152            if (!(e2 & DESC_CS_MASK)) {
 153                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 154            }
 155            /* XXX: is it correct? */
 156            if (dpl != rpl) {
 157                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 158            }
 159            if ((e2 & DESC_C_MASK) && dpl > rpl) {
 160                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 161            }
 162        } else if (seg_reg == R_SS) {
 163            /* SS must be writable data */
 164            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
 165                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 166            }
 167            if (dpl != cpl || dpl != rpl) {
 168                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 169            }
 170        } else {
 171            /* not readable code */
 172            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
 173                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 174            }
 175            /* if data or non conforming code, checks the rights */
 176            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
 177                if (dpl < cpl || dpl < rpl) {
 178                    raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 179                }
 180            }
 181        }
 182        if (!(e2 & DESC_P_MASK)) {
 183            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 184        }
 185        cpu_x86_load_seg_cache(env, seg_reg, selector,
 186                               get_seg_base(e1, e2),
 187                               get_seg_limit(e1, e2),
 188                               e2);
 189    } else {
 190        if (seg_reg == R_SS || seg_reg == R_CS) {
 191            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
 192        }
 193    }
 194}
 195
 196#define SWITCH_TSS_JMP  0
 197#define SWITCH_TSS_IRET 1
 198#define SWITCH_TSS_CALL 2
 199
 200/* XXX: restore CPU state in registers (PowerPC case) */
 201static void switch_tss(CPUX86State *env, int tss_selector,
 202                       uint32_t e1, uint32_t e2, int source,
 203                       uint32_t next_eip)
 204{
 205    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
 206    target_ulong tss_base;
 207    uint32_t new_regs[8], new_segs[6];
 208    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
 209    uint32_t old_eflags, eflags_mask;
 210    SegmentCache *dt;
 211    int index;
 212    target_ulong ptr;
 213
 214    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 215    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
 216              source);
 217
 218    /* if task gate, we read the TSS segment and we load it */
 219    if (type == 5) {
 220        if (!(e2 & DESC_P_MASK)) {
 221            raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
 222        }
 223        tss_selector = e1 >> 16;
 224        if (tss_selector & 4) {
 225            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
 226        }
 227        if (load_segment(env, &e1, &e2, tss_selector) != 0) {
 228            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
 229        }
 230        if (e2 & DESC_S_MASK) {
 231            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
 232        }
 233        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 234        if ((type & 7) != 1) {
 235            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
 236        }
 237    }
 238
 239    if (!(e2 & DESC_P_MASK)) {
 240        raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
 241    }
 242
 243    if (type & 8) {
 244        tss_limit_max = 103;
 245    } else {
 246        tss_limit_max = 43;
 247    }
 248    tss_limit = get_seg_limit(e1, e2);
 249    tss_base = get_seg_base(e1, e2);
 250    if ((tss_selector & 4) != 0 ||
 251        tss_limit < tss_limit_max) {
 252        raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
 253    }
 254    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 255    if (old_type & 8) {
 256        old_tss_limit_max = 103;
 257    } else {
 258        old_tss_limit_max = 43;
 259    }
 260
 261    /* read all the registers from the new TSS */
 262    if (type & 8) {
 263        /* 32 bit */
 264        new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
 265        new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
 266        new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
 267        for (i = 0; i < 8; i++) {
 268            new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
 269        }
 270        for (i = 0; i < 6; i++) {
 271            new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
 272        }
 273        new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
 274        new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
 275    } else {
 276        /* 16 bit */
 277        new_cr3 = 0;
 278        new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
 279        new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
 280        for (i = 0; i < 8; i++) {
 281            new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
 282                0xffff0000;
 283        }
 284        for (i = 0; i < 4; i++) {
 285            new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
 286        }
 287        new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
 288        new_segs[R_FS] = 0;
 289        new_segs[R_GS] = 0;
 290        new_trap = 0;
 291    }
 292    /* XXX: avoid a compiler warning, see
 293     http://support.amd.com/us/Processor_TechDocs/24593.pdf
 294     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
 295    (void)new_trap;
 296
 297    /* NOTE: we must avoid memory exceptions during the task switch,
 298       so we make dummy accesses before */
 299    /* XXX: it can still fail in some cases, so a bigger hack is
 300       necessary to valid the TLB after having done the accesses */
 301
 302    v1 = cpu_ldub_kernel(env, env->tr.base);
 303    v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
 304    cpu_stb_kernel(env, env->tr.base, v1);
 305    cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
 306
 307    /* clear busy bit (it is restartable) */
 308    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
 309        target_ulong ptr;
 310        uint32_t e2;
 311
 312        ptr = env->gdt.base + (env->tr.selector & ~7);
 313        e2 = cpu_ldl_kernel(env, ptr + 4);
 314        e2 &= ~DESC_TSS_BUSY_MASK;
 315        cpu_stl_kernel(env, ptr + 4, e2);
 316    }
 317    old_eflags = cpu_compute_eflags(env);
 318    if (source == SWITCH_TSS_IRET) {
 319        old_eflags &= ~NT_MASK;
 320    }
 321
 322    /* save the current state in the old TSS */
 323    if (type & 8) {
 324        /* 32 bit */
 325        cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
 326        cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
 327        cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), EAX);
 328        cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), ECX);
 329        cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), EDX);
 330        cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), EBX);
 331        cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), ESP);
 332        cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), EBP);
 333        cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), ESI);
 334        cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), EDI);
 335        for (i = 0; i < 6; i++) {
 336            cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
 337                           env->segs[i].selector);
 338        }
 339    } else {
 340        /* 16 bit */
 341        cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
 342        cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
 343        cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), EAX);
 344        cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), ECX);
 345        cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), EDX);
 346        cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), EBX);
 347        cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), ESP);
 348        cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), EBP);
 349        cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), ESI);
 350        cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), EDI);
 351        for (i = 0; i < 4; i++) {
 352            cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
 353                           env->segs[i].selector);
 354        }
 355    }
 356
 357    /* now if an exception occurs, it will occurs in the next task
 358       context */
 359
 360    if (source == SWITCH_TSS_CALL) {
 361        cpu_stw_kernel(env, tss_base, env->tr.selector);
 362        new_eflags |= NT_MASK;
 363    }
 364
 365    /* set busy bit */
 366    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
 367        target_ulong ptr;
 368        uint32_t e2;
 369
 370        ptr = env->gdt.base + (tss_selector & ~7);
 371        e2 = cpu_ldl_kernel(env, ptr + 4);
 372        e2 |= DESC_TSS_BUSY_MASK;
 373        cpu_stl_kernel(env, ptr + 4, e2);
 374    }
 375
 376    /* set the new CPU state */
 377    /* from this point, any exception which occurs can give problems */
 378    env->cr[0] |= CR0_TS_MASK;
 379    env->hflags |= HF_TS_MASK;
 380    env->tr.selector = tss_selector;
 381    env->tr.base = tss_base;
 382    env->tr.limit = tss_limit;
 383    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
 384
 385    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
 386        cpu_x86_update_cr3(env, new_cr3);
 387    }
 388
 389    /* load all registers without an exception, then reload them with
 390       possible exception */
 391    env->eip = new_eip;
 392    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
 393        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
 394    if (!(type & 8)) {
 395        eflags_mask &= 0xffff;
 396    }
 397    cpu_load_eflags(env, new_eflags, eflags_mask);
 398    /* XXX: what to do in 16 bit case? */
 399    EAX = new_regs[0];
 400    ECX = new_regs[1];
 401    EDX = new_regs[2];
 402    EBX = new_regs[3];
 403    ESP = new_regs[4];
 404    EBP = new_regs[5];
 405    ESI = new_regs[6];
 406    EDI = new_regs[7];
 407    if (new_eflags & VM_MASK) {
 408        for (i = 0; i < 6; i++) {
 409            load_seg_vm(env, i, new_segs[i]);
 410        }
 411        /* in vm86, CPL is always 3 */
 412        cpu_x86_set_cpl(env, 3);
 413    } else {
 414        /* CPL is set the RPL of CS */
 415        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
 416        /* first just selectors as the rest may trigger exceptions */
 417        for (i = 0; i < 6; i++) {
 418            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
 419        }
 420    }
 421
 422    env->ldt.selector = new_ldt & ~4;
 423    env->ldt.base = 0;
 424    env->ldt.limit = 0;
 425    env->ldt.flags = 0;
 426
 427    /* load the LDT */
 428    if (new_ldt & 4) {
 429        raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
 430    }
 431
 432    if ((new_ldt & 0xfffc) != 0) {
 433        dt = &env->gdt;
 434        index = new_ldt & ~7;
 435        if ((index + 7) > dt->limit) {
 436            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
 437        }
 438        ptr = dt->base + index;
 439        e1 = cpu_ldl_kernel(env, ptr);
 440        e2 = cpu_ldl_kernel(env, ptr + 4);
 441        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
 442            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
 443        }
 444        if (!(e2 & DESC_P_MASK)) {
 445            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
 446        }
 447        load_seg_cache_raw_dt(&env->ldt, e1, e2);
 448    }
 449
 450    /* load the segments */
 451    if (!(new_eflags & VM_MASK)) {
 452        tss_load_seg(env, R_CS, new_segs[R_CS]);
 453        tss_load_seg(env, R_SS, new_segs[R_SS]);
 454        tss_load_seg(env, R_ES, new_segs[R_ES]);
 455        tss_load_seg(env, R_DS, new_segs[R_DS]);
 456        tss_load_seg(env, R_FS, new_segs[R_FS]);
 457        tss_load_seg(env, R_GS, new_segs[R_GS]);
 458    }
 459
 460    /* check that EIP is in the CS segment limits */
 461    if (new_eip > env->segs[R_CS].limit) {
 462        /* XXX: different exception if CALL? */
 463        raise_exception_err(env, EXCP0D_GPF, 0);
 464    }
 465
 466#ifndef CONFIG_USER_ONLY
 467    /* reset local breakpoints */
 468    if (env->dr[7] & DR7_LOCAL_BP_MASK) {
 469        for (i = 0; i < DR7_MAX_BP; i++) {
 470            if (hw_local_breakpoint_enabled(env->dr[7], i) &&
 471                !hw_global_breakpoint_enabled(env->dr[7], i)) {
 472                hw_breakpoint_remove(env, i);
 473            }
 474        }
 475        env->dr[7] &= ~DR7_LOCAL_BP_MASK;
 476    }
 477#endif
 478}
 479
 480static inline unsigned int get_sp_mask(unsigned int e2)
 481{
 482    if (e2 & DESC_B_MASK) {
 483        return 0xffffffff;
 484    } else {
 485        return 0xffff;
 486    }
 487}
 488
 489static int exception_has_error_code(int intno)
 490{
 491    switch (intno) {
 492    case 8:
 493    case 10:
 494    case 11:
 495    case 12:
 496    case 13:
 497    case 14:
 498    case 17:
 499        return 1;
 500    }
 501    return 0;
 502}
 503
 504#ifdef TARGET_X86_64
 505#define SET_ESP(val, sp_mask)                           \
 506    do {                                                \
 507        if ((sp_mask) == 0xffff) {                      \
 508            ESP = (ESP & ~0xffff) | ((val) & 0xffff);   \
 509        } else if ((sp_mask) == 0xffffffffLL) {         \
 510            ESP = (uint32_t)(val);                      \
 511        } else {                                        \
 512            ESP = (val);                                \
 513        }                                               \
 514    } while (0)
 515#else
 516#define SET_ESP(val, sp_mask)                           \
 517    do {                                                \
 518        ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
 519    } while (0)
 520#endif
 521
 522/* in 64-bit machines, this can overflow. So this segment addition macro
 523 * can be used to trim the value to 32-bit whenever needed */
 524#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
 525
 526/* XXX: add a is_user flag to have proper security support */
 527#define PUSHW(ssp, sp, sp_mask, val)                             \
 528    {                                                            \
 529        sp -= 2;                                                 \
 530        cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val));    \
 531    }
 532
 533#define PUSHL(ssp, sp, sp_mask, val)                                    \
 534    {                                                                   \
 535        sp -= 4;                                                        \
 536        cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
 537    }
 538
 539#define POPW(ssp, sp, sp_mask, val)                              \
 540    {                                                            \
 541        val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask)));    \
 542        sp += 2;                                                 \
 543    }
 544
 545#define POPL(ssp, sp, sp_mask, val)                                     \
 546    {                                                                   \
 547        val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
 548        sp += 4;                                                        \
 549    }
 550
 551/* protected mode interrupt */
 552static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
 553                                   int error_code, unsigned int next_eip,
 554                                   int is_hw)
 555{
 556    SegmentCache *dt;
 557    target_ulong ptr, ssp;
 558    int type, dpl, selector, ss_dpl, cpl;
 559    int has_error_code, new_stack, shift;
 560    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
 561    uint32_t old_eip, sp_mask;
 562
 563    has_error_code = 0;
 564    if (!is_int && !is_hw) {
 565        has_error_code = exception_has_error_code(intno);
 566    }
 567    if (is_int) {
 568        old_eip = next_eip;
 569    } else {
 570        old_eip = env->eip;
 571    }
 572
 573    dt = &env->idt;
 574    if (intno * 8 + 7 > dt->limit) {
 575        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 576    }
 577    ptr = dt->base + intno * 8;
 578    e1 = cpu_ldl_kernel(env, ptr);
 579    e2 = cpu_ldl_kernel(env, ptr + 4);
 580    /* check gate type */
 581    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 582    switch (type) {
 583    case 5: /* task gate */
 584        /* must do that check here to return the correct error code */
 585        if (!(e2 & DESC_P_MASK)) {
 586            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 587        }
 588        switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
 589        if (has_error_code) {
 590            int type;
 591            uint32_t mask;
 592
 593            /* push the error code */
 594            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 595            shift = type >> 3;
 596            if (env->segs[R_SS].flags & DESC_B_MASK) {
 597                mask = 0xffffffff;
 598            } else {
 599                mask = 0xffff;
 600            }
 601            esp = (ESP - (2 << shift)) & mask;
 602            ssp = env->segs[R_SS].base + esp;
 603            if (shift) {
 604                cpu_stl_kernel(env, ssp, error_code);
 605            } else {
 606                cpu_stw_kernel(env, ssp, error_code);
 607            }
 608            SET_ESP(esp, mask);
 609        }
 610        return;
 611    case 6: /* 286 interrupt gate */
 612    case 7: /* 286 trap gate */
 613    case 14: /* 386 interrupt gate */
 614    case 15: /* 386 trap gate */
 615        break;
 616    default:
 617        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 618        break;
 619    }
 620    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 621    cpl = env->hflags & HF_CPL_MASK;
 622    /* check privilege if software int */
 623    if (is_int && dpl < cpl) {
 624        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 625    }
 626    /* check valid bit */
 627    if (!(e2 & DESC_P_MASK)) {
 628        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 629    }
 630    selector = e1 >> 16;
 631    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 632    if ((selector & 0xfffc) == 0) {
 633        raise_exception_err(env, EXCP0D_GPF, 0);
 634    }
 635    if (load_segment(env, &e1, &e2, selector) != 0) {
 636        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 637    }
 638    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 639        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 640    }
 641    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 642    if (dpl > cpl) {
 643        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 644    }
 645    if (!(e2 & DESC_P_MASK)) {
 646        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 647    }
 648    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
 649        /* to inner privilege */
 650        get_ss_esp_from_tss(env, &ss, &esp, dpl);
 651        if ((ss & 0xfffc) == 0) {
 652            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 653        }
 654        if ((ss & 3) != dpl) {
 655            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 656        }
 657        if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
 658            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 659        }
 660        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
 661        if (ss_dpl != dpl) {
 662            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 663        }
 664        if (!(ss_e2 & DESC_S_MASK) ||
 665            (ss_e2 & DESC_CS_MASK) ||
 666            !(ss_e2 & DESC_W_MASK)) {
 667            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 668        }
 669        if (!(ss_e2 & DESC_P_MASK)) {
 670            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 671        }
 672        new_stack = 1;
 673        sp_mask = get_sp_mask(ss_e2);
 674        ssp = get_seg_base(ss_e1, ss_e2);
 675    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
 676        /* to same privilege */
 677        if (env->eflags & VM_MASK) {
 678            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 679        }
 680        new_stack = 0;
 681        sp_mask = get_sp_mask(env->segs[R_SS].flags);
 682        ssp = env->segs[R_SS].base;
 683        esp = ESP;
 684        dpl = cpl;
 685    } else {
 686        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 687        new_stack = 0; /* avoid warning */
 688        sp_mask = 0; /* avoid warning */
 689        ssp = 0; /* avoid warning */
 690        esp = 0; /* avoid warning */
 691    }
 692
 693    shift = type >> 3;
 694
 695#if 0
 696    /* XXX: check that enough room is available */
 697    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
 698    if (env->eflags & VM_MASK) {
 699        push_size += 8;
 700    }
 701    push_size <<= shift;
 702#endif
 703    if (shift == 1) {
 704        if (new_stack) {
 705            if (env->eflags & VM_MASK) {
 706                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
 707                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
 708                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
 709                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
 710            }
 711            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
 712            PUSHL(ssp, esp, sp_mask, ESP);
 713        }
 714        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
 715        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
 716        PUSHL(ssp, esp, sp_mask, old_eip);
 717        if (has_error_code) {
 718            PUSHL(ssp, esp, sp_mask, error_code);
 719        }
 720    } else {
 721        if (new_stack) {
 722            if (env->eflags & VM_MASK) {
 723                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
 724                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
 725                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
 726                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
 727            }
 728            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
 729            PUSHW(ssp, esp, sp_mask, ESP);
 730        }
 731        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
 732        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
 733        PUSHW(ssp, esp, sp_mask, old_eip);
 734        if (has_error_code) {
 735            PUSHW(ssp, esp, sp_mask, error_code);
 736        }
 737    }
 738
 739    if (new_stack) {
 740        if (env->eflags & VM_MASK) {
 741            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
 742            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
 743            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
 744            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
 745        }
 746        ss = (ss & ~3) | dpl;
 747        cpu_x86_load_seg_cache(env, R_SS, ss,
 748                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
 749    }
 750    SET_ESP(esp, sp_mask);
 751
 752    selector = (selector & ~3) | dpl;
 753    cpu_x86_load_seg_cache(env, R_CS, selector,
 754                   get_seg_base(e1, e2),
 755                   get_seg_limit(e1, e2),
 756                   e2);
 757    cpu_x86_set_cpl(env, dpl);
 758    env->eip = offset;
 759
 760    /* interrupt gate clear IF mask */
 761    if ((type & 1) == 0) {
 762        env->eflags &= ~IF_MASK;
 763    }
 764    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 765}
 766
 767#ifdef TARGET_X86_64
 768
 769#define PUSHQ(sp, val)                          \
 770    {                                           \
 771        sp -= 8;                                \
 772        cpu_stq_kernel(env, sp, (val));         \
 773    }
 774
 775#define POPQ(sp, val)                           \
 776    {                                           \
 777        val = cpu_ldq_kernel(env, sp);          \
 778        sp += 8;                                \
 779    }
 780
 781static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
 782{
 783    int index;
 784
 785#if 0
 786    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
 787           env->tr.base, env->tr.limit);
 788#endif
 789
 790    if (!(env->tr.flags & DESC_P_MASK)) {
 791        cpu_abort(env, "invalid tss");
 792    }
 793    index = 8 * level + 4;
 794    if ((index + 7) > env->tr.limit) {
 795        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
 796    }
 797    return cpu_ldq_kernel(env, env->tr.base + index);
 798}
 799
 800/* 64 bit interrupt */
 801static void do_interrupt64(CPUX86State *env, int intno, int is_int,
 802                           int error_code, target_ulong next_eip, int is_hw)
 803{
 804    SegmentCache *dt;
 805    target_ulong ptr;
 806    int type, dpl, selector, cpl, ist;
 807    int has_error_code, new_stack;
 808    uint32_t e1, e2, e3, ss;
 809    target_ulong old_eip, esp, offset;
 810
 811    has_error_code = 0;
 812    if (!is_int && !is_hw) {
 813        has_error_code = exception_has_error_code(intno);
 814    }
 815    if (is_int) {
 816        old_eip = next_eip;
 817    } else {
 818        old_eip = env->eip;
 819    }
 820
 821    dt = &env->idt;
 822    if (intno * 16 + 15 > dt->limit) {
 823        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 824    }
 825    ptr = dt->base + intno * 16;
 826    e1 = cpu_ldl_kernel(env, ptr);
 827    e2 = cpu_ldl_kernel(env, ptr + 4);
 828    e3 = cpu_ldl_kernel(env, ptr + 8);
 829    /* check gate type */
 830    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 831    switch (type) {
 832    case 14: /* 386 interrupt gate */
 833    case 15: /* 386 trap gate */
 834        break;
 835    default:
 836        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 837        break;
 838    }
 839    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 840    cpl = env->hflags & HF_CPL_MASK;
 841    /* check privilege if software int */
 842    if (is_int && dpl < cpl) {
 843        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 844    }
 845    /* check valid bit */
 846    if (!(e2 & DESC_P_MASK)) {
 847        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
 848    }
 849    selector = e1 >> 16;
 850    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 851    ist = e2 & 7;
 852    if ((selector & 0xfffc) == 0) {
 853        raise_exception_err(env, EXCP0D_GPF, 0);
 854    }
 855
 856    if (load_segment(env, &e1, &e2, selector) != 0) {
 857        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 858    }
 859    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 860        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 861    }
 862    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 863    if (dpl > cpl) {
 864        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 865    }
 866    if (!(e2 & DESC_P_MASK)) {
 867        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 868    }
 869    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
 870        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 871    }
 872    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
 873        /* to inner privilege */
 874        if (ist != 0) {
 875            esp = get_rsp_from_tss(env, ist + 3);
 876        } else {
 877            esp = get_rsp_from_tss(env, dpl);
 878        }
 879        esp &= ~0xfLL; /* align stack */
 880        ss = 0;
 881        new_stack = 1;
 882    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
 883        /* to same privilege */
 884        if (env->eflags & VM_MASK) {
 885            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 886        }
 887        new_stack = 0;
 888        if (ist != 0) {
 889            esp = get_rsp_from_tss(env, ist + 3);
 890        } else {
 891            esp = ESP;
 892        }
 893        esp &= ~0xfLL; /* align stack */
 894        dpl = cpl;
 895    } else {
 896        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 897        new_stack = 0; /* avoid warning */
 898        esp = 0; /* avoid warning */
 899    }
 900
 901    PUSHQ(esp, env->segs[R_SS].selector);
 902    PUSHQ(esp, ESP);
 903    PUSHQ(esp, cpu_compute_eflags(env));
 904    PUSHQ(esp, env->segs[R_CS].selector);
 905    PUSHQ(esp, old_eip);
 906    if (has_error_code) {
 907        PUSHQ(esp, error_code);
 908    }
 909
 910    if (new_stack) {
 911        ss = 0 | dpl;
 912        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
 913    }
 914    ESP = esp;
 915
 916    selector = (selector & ~3) | dpl;
 917    cpu_x86_load_seg_cache(env, R_CS, selector,
 918                   get_seg_base(e1, e2),
 919                   get_seg_limit(e1, e2),
 920                   e2);
 921    cpu_x86_set_cpl(env, dpl);
 922    env->eip = offset;
 923
 924    /* interrupt gate clear IF mask */
 925    if ((type & 1) == 0) {
 926        env->eflags &= ~IF_MASK;
 927    }
 928    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 929}
 930#endif
 931
 932#ifdef TARGET_X86_64
 933#if defined(CONFIG_USER_ONLY)
 934void helper_syscall(CPUX86State *env, int next_eip_addend)
 935{
 936    env->exception_index = EXCP_SYSCALL;
 937    env->exception_next_eip = env->eip + next_eip_addend;
 938    cpu_loop_exit(env);
 939}
 940#else
 941void helper_syscall(CPUX86State *env, int next_eip_addend)
 942{
 943    int selector;
 944
 945    if (!(env->efer & MSR_EFER_SCE)) {
 946        raise_exception_err(env, EXCP06_ILLOP, 0);
 947    }
 948    selector = (env->star >> 32) & 0xffff;
 949    if (env->hflags & HF_LMA_MASK) {
 950        int code64;
 951
 952        ECX = env->eip + next_eip_addend;
 953        env->regs[11] = cpu_compute_eflags(env);
 954
 955        code64 = env->hflags & HF_CS64_MASK;
 956
 957        cpu_x86_set_cpl(env, 0);
 958        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
 959                           0, 0xffffffff,
 960                               DESC_G_MASK | DESC_P_MASK |
 961                               DESC_S_MASK |
 962                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
 963                               DESC_L_MASK);
 964        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
 965                               0, 0xffffffff,
 966                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
 967                               DESC_S_MASK |
 968                               DESC_W_MASK | DESC_A_MASK);
 969        env->eflags &= ~env->fmask;
 970        cpu_load_eflags(env, env->eflags, 0);
 971        if (code64) {
 972            env->eip = env->lstar;
 973        } else {
 974            env->eip = env->cstar;
 975        }
 976    } else {
 977        ECX = (uint32_t)(env->eip + next_eip_addend);
 978
 979        cpu_x86_set_cpl(env, 0);
 980        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
 981                           0, 0xffffffff,
 982                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
 983                               DESC_S_MASK |
 984                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
 985        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
 986                               0, 0xffffffff,
 987                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
 988                               DESC_S_MASK |
 989                               DESC_W_MASK | DESC_A_MASK);
 990        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
 991        env->eip = (uint32_t)env->star;
 992    }
 993}
 994#endif
 995#endif
 996
 997#ifdef TARGET_X86_64
 998void helper_sysret(CPUX86State *env, int dflag)
 999{
1000    int cpl, selector;
1001
1002    if (!(env->efer & MSR_EFER_SCE)) {
1003        raise_exception_err(env, EXCP06_ILLOP, 0);
1004    }
1005    cpl = env->hflags & HF_CPL_MASK;
1006    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1007        raise_exception_err(env, EXCP0D_GPF, 0);
1008    }
1009    selector = (env->star >> 48) & 0xffff;
1010    if (env->hflags & HF_LMA_MASK) {
1011        if (dflag == 2) {
1012            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1013                                   0, 0xffffffff,
1014                                   DESC_G_MASK | DESC_P_MASK |
1015                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1016                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1017                                   DESC_L_MASK);
1018            env->eip = ECX;
1019        } else {
1020            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1021                                   0, 0xffffffff,
1022                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1024                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025            env->eip = (uint32_t)ECX;
1026        }
1027        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1028                               0, 0xffffffff,
1029                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1031                               DESC_W_MASK | DESC_A_MASK);
1032        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1033                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1034                        NT_MASK);
1035        cpu_x86_set_cpl(env, 3);
1036    } else {
1037        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1038                               0, 0xffffffff,
1039                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1041                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1042        env->eip = (uint32_t)ECX;
1043        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1044                               0, 0xffffffff,
1045                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1047                               DESC_W_MASK | DESC_A_MASK);
1048        env->eflags |= IF_MASK;
1049        cpu_x86_set_cpl(env, 3);
1050    }
1051}
1052#endif
1053
1054/* real mode interrupt */
1055static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1056                              int error_code, unsigned int next_eip)
1057{
1058    SegmentCache *dt;
1059    target_ulong ptr, ssp;
1060    int selector;
1061    uint32_t offset, esp;
1062    uint32_t old_cs, old_eip;
1063
1064    /* real mode (simpler!) */
1065    dt = &env->idt;
1066    if (intno * 4 + 3 > dt->limit) {
1067        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1068    }
1069    ptr = dt->base + intno * 4;
1070    offset = cpu_lduw_kernel(env, ptr);
1071    selector = cpu_lduw_kernel(env, ptr + 2);
1072    esp = ESP;
1073    ssp = env->segs[R_SS].base;
1074    if (is_int) {
1075        old_eip = next_eip;
1076    } else {
1077        old_eip = env->eip;
1078    }
1079    old_cs = env->segs[R_CS].selector;
1080    /* XXX: use SS segment size? */
1081    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1082    PUSHW(ssp, esp, 0xffff, old_cs);
1083    PUSHW(ssp, esp, 0xffff, old_eip);
1084
1085    /* update processor state */
1086    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1087    env->eip = offset;
1088    env->segs[R_CS].selector = selector;
1089    env->segs[R_CS].base = (selector << 4);
1090    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1091}
1092
1093#if defined(CONFIG_USER_ONLY)
1094/* fake user mode interrupt */
1095static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1096                              int error_code, target_ulong next_eip)
1097{
1098    SegmentCache *dt;
1099    target_ulong ptr;
1100    int dpl, cpl, shift;
1101    uint32_t e2;
1102
1103    dt = &env->idt;
1104    if (env->hflags & HF_LMA_MASK) {
1105        shift = 4;
1106    } else {
1107        shift = 3;
1108    }
1109    ptr = dt->base + (intno << shift);
1110    e2 = cpu_ldl_kernel(env, ptr + 4);
1111
1112    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1113    cpl = env->hflags & HF_CPL_MASK;
1114    /* check privilege if software int */
1115    if (is_int && dpl < cpl) {
1116        raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1117    }
1118
1119    /* Since we emulate only user space, we cannot do more than
1120       exiting the emulation with the suitable exception and error
1121       code */
1122    if (is_int) {
1123        EIP = next_eip;
1124    }
1125}
1126
1127#else
1128
1129static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1130                            int error_code, int is_hw, int rm)
1131{
1132    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1133                                                          control.event_inj));
1134
1135    if (!(event_inj & SVM_EVTINJ_VALID)) {
1136        int type;
1137
1138        if (is_int) {
1139            type = SVM_EVTINJ_TYPE_SOFT;
1140        } else {
1141            type = SVM_EVTINJ_TYPE_EXEPT;
1142        }
1143        event_inj = intno | type | SVM_EVTINJ_VALID;
1144        if (!rm && exception_has_error_code(intno)) {
1145            event_inj |= SVM_EVTINJ_VALID_ERR;
1146            stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1147                                             control.event_inj_err),
1148                     error_code);
1149        }
1150        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1151                 event_inj);
1152    }
1153}
1154#endif
1155
1156/*
1157 * Begin execution of an interruption. is_int is TRUE if coming from
1158 * the int instruction. next_eip is the EIP value AFTER the interrupt
1159 * instruction. It is only relevant if is_int is TRUE.
1160 */
1161static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
1162                             int error_code, target_ulong next_eip, int is_hw)
1163{
1164    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1165        if ((env->cr[0] & CR0_PE_MASK)) {
1166            static int count;
1167
1168            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1169                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1170                     count, intno, error_code, is_int,
1171                     env->hflags & HF_CPL_MASK,
1172                     env->segs[R_CS].selector, EIP,
1173                     (int)env->segs[R_CS].base + EIP,
1174                     env->segs[R_SS].selector, ESP);
1175            if (intno == 0x0e) {
1176                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1177            } else {
1178                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1179            }
1180            qemu_log("\n");
1181            log_cpu_state(env, CPU_DUMP_CCOP);
1182#if 0
1183            {
1184                int i;
1185                target_ulong ptr;
1186
1187                qemu_log("       code=");
1188                ptr = env->segs[R_CS].base + env->eip;
1189                for (i = 0; i < 16; i++) {
1190                    qemu_log(" %02x", ldub(ptr + i));
1191                }
1192                qemu_log("\n");
1193            }
1194#endif
1195            count++;
1196        }
1197    }
1198    if (env->cr[0] & CR0_PE_MASK) {
1199#if !defined(CONFIG_USER_ONLY)
1200        if (env->hflags & HF_SVMI_MASK) {
1201            handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1202        }
1203#endif
1204#ifdef TARGET_X86_64
1205        if (env->hflags & HF_LMA_MASK) {
1206            do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1207        } else
1208#endif
1209        {
1210            do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1211                                   is_hw);
1212        }
1213    } else {
1214#if !defined(CONFIG_USER_ONLY)
1215        if (env->hflags & HF_SVMI_MASK) {
1216            handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1217        }
1218#endif
1219        do_interrupt_real(env, intno, is_int, error_code, next_eip);
1220    }
1221
1222#if !defined(CONFIG_USER_ONLY)
1223    if (env->hflags & HF_SVMI_MASK) {
1224        uint32_t event_inj = ldl_phys(env->vm_vmcb +
1225                                      offsetof(struct vmcb,
1226                                               control.event_inj));
1227
1228        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1229                 event_inj & ~SVM_EVTINJ_VALID);
1230    }
1231#endif
1232}
1233
1234void x86_cpu_do_interrupt(CPUState *cs)
1235{
1236    X86CPU *cpu = X86_CPU(cs);
1237    CPUX86State *env = &cpu->env;
1238
1239#if defined(CONFIG_USER_ONLY)
1240    /* if user mode only, we simulate a fake exception
1241       which will be handled outside the cpu execution
1242       loop */
1243    do_interrupt_user(env, env->exception_index,
1244                      env->exception_is_int,
1245                      env->error_code,
1246                      env->exception_next_eip);
1247    /* successfully delivered */
1248    env->old_exception = -1;
1249#else
1250    /* simulate a real cpu exception. On i386, it can
1251       trigger new exceptions, but we do not handle
1252       double or triple faults yet. */
1253    do_interrupt_all(env, env->exception_index,
1254                     env->exception_is_int,
1255                     env->error_code,
1256                     env->exception_next_eip, 0);
1257    /* successfully delivered */
1258    env->old_exception = -1;
1259#endif
1260}
1261
1262void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1263{
1264    do_interrupt_all(env, intno, 0, 0, 0, is_hw);
1265}
1266
1267void helper_enter_level(CPUX86State *env, int level, int data32,
1268                        target_ulong t1)
1269{
1270    target_ulong ssp;
1271    uint32_t esp_mask, esp, ebp;
1272
1273    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1274    ssp = env->segs[R_SS].base;
1275    ebp = EBP;
1276    esp = ESP;
1277    if (data32) {
1278        /* 32 bit */
1279        esp -= 4;
1280        while (--level) {
1281            esp -= 4;
1282            ebp -= 4;
1283            cpu_stl_data(env, ssp + (esp & esp_mask),
1284                         cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1285        }
1286        esp -= 4;
1287        cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1288    } else {
1289        /* 16 bit */
1290        esp -= 2;
1291        while (--level) {
1292            esp -= 2;
1293            ebp -= 2;
1294            cpu_stw_data(env, ssp + (esp & esp_mask),
1295                         cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1296        }
1297        esp -= 2;
1298        cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1299    }
1300}
1301
1302#ifdef TARGET_X86_64
1303void helper_enter64_level(CPUX86State *env, int level, int data64,
1304                          target_ulong t1)
1305{
1306    target_ulong esp, ebp;
1307
1308    ebp = EBP;
1309    esp = ESP;
1310
1311    if (data64) {
1312        /* 64 bit */
1313        esp -= 8;
1314        while (--level) {
1315            esp -= 8;
1316            ebp -= 8;
1317            cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1318        }
1319        esp -= 8;
1320        cpu_stq_data(env, esp, t1);
1321    } else {
1322        /* 16 bit */
1323        esp -= 2;
1324        while (--level) {
1325            esp -= 2;
1326            ebp -= 2;
1327            cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1328        }
1329        esp -= 2;
1330        cpu_stw_data(env, esp, t1);
1331    }
1332}
1333#endif
1334
1335void helper_lldt(CPUX86State *env, int selector)
1336{
1337    SegmentCache *dt;
1338    uint32_t e1, e2;
1339    int index, entry_limit;
1340    target_ulong ptr;
1341
1342    selector &= 0xffff;
1343    if ((selector & 0xfffc) == 0) {
1344        /* XXX: NULL selector case: invalid LDT */
1345        env->ldt.base = 0;
1346        env->ldt.limit = 0;
1347    } else {
1348        if (selector & 0x4) {
1349            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1350        }
1351        dt = &env->gdt;
1352        index = selector & ~7;
1353#ifdef TARGET_X86_64
1354        if (env->hflags & HF_LMA_MASK) {
1355            entry_limit = 15;
1356        } else
1357#endif
1358        {
1359            entry_limit = 7;
1360        }
1361        if ((index + entry_limit) > dt->limit) {
1362            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1363        }
1364        ptr = dt->base + index;
1365        e1 = cpu_ldl_kernel(env, ptr);
1366        e2 = cpu_ldl_kernel(env, ptr + 4);
1367        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1368            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1369        }
1370        if (!(e2 & DESC_P_MASK)) {
1371            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1372        }
1373#ifdef TARGET_X86_64
1374        if (env->hflags & HF_LMA_MASK) {
1375            uint32_t e3;
1376
1377            e3 = cpu_ldl_kernel(env, ptr + 8);
1378            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1379            env->ldt.base |= (target_ulong)e3 << 32;
1380        } else
1381#endif
1382        {
1383            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1384        }
1385    }
1386    env->ldt.selector = selector;
1387}
1388
1389void helper_ltr(CPUX86State *env, int selector)
1390{
1391    SegmentCache *dt;
1392    uint32_t e1, e2;
1393    int index, type, entry_limit;
1394    target_ulong ptr;
1395
1396    selector &= 0xffff;
1397    if ((selector & 0xfffc) == 0) {
1398        /* NULL selector case: invalid TR */
1399        env->tr.base = 0;
1400        env->tr.limit = 0;
1401        env->tr.flags = 0;
1402    } else {
1403        if (selector & 0x4) {
1404            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1405        }
1406        dt = &env->gdt;
1407        index = selector & ~7;
1408#ifdef TARGET_X86_64
1409        if (env->hflags & HF_LMA_MASK) {
1410            entry_limit = 15;
1411        } else
1412#endif
1413        {
1414            entry_limit = 7;
1415        }
1416        if ((index + entry_limit) > dt->limit) {
1417            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1418        }
1419        ptr = dt->base + index;
1420        e1 = cpu_ldl_kernel(env, ptr);
1421        e2 = cpu_ldl_kernel(env, ptr + 4);
1422        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1423        if ((e2 & DESC_S_MASK) ||
1424            (type != 1 && type != 9)) {
1425            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1426        }
1427        if (!(e2 & DESC_P_MASK)) {
1428            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1429        }
1430#ifdef TARGET_X86_64
1431        if (env->hflags & HF_LMA_MASK) {
1432            uint32_t e3, e4;
1433
1434            e3 = cpu_ldl_kernel(env, ptr + 8);
1435            e4 = cpu_ldl_kernel(env, ptr + 12);
1436            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1437                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1438            }
1439            load_seg_cache_raw_dt(&env->tr, e1, e2);
1440            env->tr.base |= (target_ulong)e3 << 32;
1441        } else
1442#endif
1443        {
1444            load_seg_cache_raw_dt(&env->tr, e1, e2);
1445        }
1446        e2 |= DESC_TSS_BUSY_MASK;
1447        cpu_stl_kernel(env, ptr + 4, e2);
1448    }
1449    env->tr.selector = selector;
1450}
1451
1452/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1453void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1454{
1455    uint32_t e1, e2;
1456    int cpl, dpl, rpl;
1457    SegmentCache *dt;
1458    int index;
1459    target_ulong ptr;
1460
1461    selector &= 0xffff;
1462    cpl = env->hflags & HF_CPL_MASK;
1463    if ((selector & 0xfffc) == 0) {
1464        /* null selector case */
1465        if (seg_reg == R_SS
1466#ifdef TARGET_X86_64
1467            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1468#endif
1469            ) {
1470            raise_exception_err(env, EXCP0D_GPF, 0);
1471        }
1472        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1473    } else {
1474
1475        if (selector & 0x4) {
1476            dt = &env->ldt;
1477        } else {
1478            dt = &env->gdt;
1479        }
1480        index = selector & ~7;
1481        if ((index + 7) > dt->limit) {
1482            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1483        }
1484        ptr = dt->base + index;
1485        e1 = cpu_ldl_kernel(env, ptr);
1486        e2 = cpu_ldl_kernel(env, ptr + 4);
1487
1488        if (!(e2 & DESC_S_MASK)) {
1489            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1490        }
1491        rpl = selector & 3;
1492        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1493        if (seg_reg == R_SS) {
1494            /* must be writable segment */
1495            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1496                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1497            }
1498            if (rpl != cpl || dpl != cpl) {
1499                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1500            }
1501        } else {
1502            /* must be readable segment */
1503            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1504                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1505            }
1506
1507            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1508                /* if not conforming code, test rights */
1509                if (dpl < cpl || dpl < rpl) {
1510                    raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1511                }
1512            }
1513        }
1514
1515        if (!(e2 & DESC_P_MASK)) {
1516            if (seg_reg == R_SS) {
1517                raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1518            } else {
1519                raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1520            }
1521        }
1522
1523        /* set the access bit if not already set */
1524        if (!(e2 & DESC_A_MASK)) {
1525            e2 |= DESC_A_MASK;
1526            cpu_stl_kernel(env, ptr + 4, e2);
1527        }
1528
1529        cpu_x86_load_seg_cache(env, seg_reg, selector,
1530                       get_seg_base(e1, e2),
1531                       get_seg_limit(e1, e2),
1532                       e2);
1533#if 0
1534        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1535                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1536#endif
1537    }
1538}
1539
1540/* protected mode jump */
1541void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1542                           int next_eip_addend)
1543{
1544    int gate_cs, type;
1545    uint32_t e1, e2, cpl, dpl, rpl, limit;
1546    target_ulong next_eip;
1547
1548    if ((new_cs & 0xfffc) == 0) {
1549        raise_exception_err(env, EXCP0D_GPF, 0);
1550    }
1551    if (load_segment(env, &e1, &e2, new_cs) != 0) {
1552        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1553    }
1554    cpl = env->hflags & HF_CPL_MASK;
1555    if (e2 & DESC_S_MASK) {
1556        if (!(e2 & DESC_CS_MASK)) {
1557            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1558        }
1559        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1560        if (e2 & DESC_C_MASK) {
1561            /* conforming code segment */
1562            if (dpl > cpl) {
1563                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1564            }
1565        } else {
1566            /* non conforming code segment */
1567            rpl = new_cs & 3;
1568            if (rpl > cpl) {
1569                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1570            }
1571            if (dpl != cpl) {
1572                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1573            }
1574        }
1575        if (!(e2 & DESC_P_MASK)) {
1576            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1577        }
1578        limit = get_seg_limit(e1, e2);
1579        if (new_eip > limit &&
1580            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1581            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1582        }
1583        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1584                       get_seg_base(e1, e2), limit, e2);
1585        EIP = new_eip;
1586    } else {
1587        /* jump to call or task gate */
1588        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1589        rpl = new_cs & 3;
1590        cpl = env->hflags & HF_CPL_MASK;
1591        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1592        switch (type) {
1593        case 1: /* 286 TSS */
1594        case 9: /* 386 TSS */
1595        case 5: /* task gate */
1596            if (dpl < cpl || dpl < rpl) {
1597                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1598            }
1599            next_eip = env->eip + next_eip_addend;
1600            switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1601            CC_OP = CC_OP_EFLAGS;
1602            break;
1603        case 4: /* 286 call gate */
1604        case 12: /* 386 call gate */
1605            if ((dpl < cpl) || (dpl < rpl)) {
1606                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1607            }
1608            if (!(e2 & DESC_P_MASK)) {
1609                raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1610            }
1611            gate_cs = e1 >> 16;
1612            new_eip = (e1 & 0xffff);
1613            if (type == 12) {
1614                new_eip |= (e2 & 0xffff0000);
1615            }
1616            if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1617                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1618            }
1619            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1620            /* must be code segment */
1621            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1622                 (DESC_S_MASK | DESC_CS_MASK))) {
1623                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1624            }
1625            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1626                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1627                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1628            }
1629            if (!(e2 & DESC_P_MASK)) {
1630                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1631            }
1632            limit = get_seg_limit(e1, e2);
1633            if (new_eip > limit) {
1634                raise_exception_err(env, EXCP0D_GPF, 0);
1635            }
1636            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1637                                   get_seg_base(e1, e2), limit, e2);
1638            EIP = new_eip;
1639            break;
1640        default:
1641            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1642            break;
1643        }
1644    }
1645}
1646
1647/* real mode call */
1648void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1649                       int shift, int next_eip)
1650{
1651    int new_eip;
1652    uint32_t esp, esp_mask;
1653    target_ulong ssp;
1654
1655    new_eip = new_eip1;
1656    esp = ESP;
1657    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1658    ssp = env->segs[R_SS].base;
1659    if (shift) {
1660        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1661        PUSHL(ssp, esp, esp_mask, next_eip);
1662    } else {
1663        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1664        PUSHW(ssp, esp, esp_mask, next_eip);
1665    }
1666
1667    SET_ESP(esp, esp_mask);
1668    env->eip = new_eip;
1669    env->segs[R_CS].selector = new_cs;
1670    env->segs[R_CS].base = (new_cs << 4);
1671}
1672
1673/* protected mode call */
1674void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1675                            int shift, int next_eip_addend)
1676{
1677    int new_stack, i;
1678    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1679    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1680    uint32_t val, limit, old_sp_mask;
1681    target_ulong ssp, old_ssp, next_eip;
1682
1683    next_eip = env->eip + next_eip_addend;
1684    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1685    LOG_PCALL_STATE(env);
1686    if ((new_cs & 0xfffc) == 0) {
1687        raise_exception_err(env, EXCP0D_GPF, 0);
1688    }
1689    if (load_segment(env, &e1, &e2, new_cs) != 0) {
1690        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1691    }
1692    cpl = env->hflags & HF_CPL_MASK;
1693    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1694    if (e2 & DESC_S_MASK) {
1695        if (!(e2 & DESC_CS_MASK)) {
1696            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1697        }
1698        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1699        if (e2 & DESC_C_MASK) {
1700            /* conforming code segment */
1701            if (dpl > cpl) {
1702                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1703            }
1704        } else {
1705            /* non conforming code segment */
1706            rpl = new_cs & 3;
1707            if (rpl > cpl) {
1708                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1709            }
1710            if (dpl != cpl) {
1711                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1712            }
1713        }
1714        if (!(e2 & DESC_P_MASK)) {
1715            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1716        }
1717
1718#ifdef TARGET_X86_64
1719        /* XXX: check 16/32 bit cases in long mode */
1720        if (shift == 2) {
1721            target_ulong rsp;
1722
1723            /* 64 bit case */
1724            rsp = ESP;
1725            PUSHQ(rsp, env->segs[R_CS].selector);
1726            PUSHQ(rsp, next_eip);
1727            /* from this point, not restartable */
1728            ESP = rsp;
1729            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1730                                   get_seg_base(e1, e2),
1731                                   get_seg_limit(e1, e2), e2);
1732            EIP = new_eip;
1733        } else
1734#endif
1735        {
1736            sp = ESP;
1737            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1738            ssp = env->segs[R_SS].base;
1739            if (shift) {
1740                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1741                PUSHL(ssp, sp, sp_mask, next_eip);
1742            } else {
1743                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1744                PUSHW(ssp, sp, sp_mask, next_eip);
1745            }
1746
1747            limit = get_seg_limit(e1, e2);
1748            if (new_eip > limit) {
1749                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1750            }
1751            /* from this point, not restartable */
1752            SET_ESP(sp, sp_mask);
1753            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1754                                   get_seg_base(e1, e2), limit, e2);
1755            EIP = new_eip;
1756        }
1757    } else {
1758        /* check gate type */
1759        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1760        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1761        rpl = new_cs & 3;
1762        switch (type) {
1763        case 1: /* available 286 TSS */
1764        case 9: /* available 386 TSS */
1765        case 5: /* task gate */
1766            if (dpl < cpl || dpl < rpl) {
1767                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1768            }
1769            switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1770            CC_OP = CC_OP_EFLAGS;
1771            return;
1772        case 4: /* 286 call gate */
1773        case 12: /* 386 call gate */
1774            break;
1775        default:
1776            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1777            break;
1778        }
1779        shift = type >> 3;
1780
1781        if (dpl < cpl || dpl < rpl) {
1782            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1783        }
1784        /* check valid bit */
1785        if (!(e2 & DESC_P_MASK)) {
1786            raise_exception_err(env, EXCP0B_NOSEG,  new_cs & 0xfffc);
1787        }
1788        selector = e1 >> 16;
1789        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1790        param_count = e2 & 0x1f;
1791        if ((selector & 0xfffc) == 0) {
1792            raise_exception_err(env, EXCP0D_GPF, 0);
1793        }
1794
1795        if (load_segment(env, &e1, &e2, selector) != 0) {
1796            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1797        }
1798        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1799            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1800        }
1801        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1802        if (dpl > cpl) {
1803            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1804        }
1805        if (!(e2 & DESC_P_MASK)) {
1806            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1807        }
1808
1809        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1810            /* to inner privilege */
1811            get_ss_esp_from_tss(env, &ss, &sp, dpl);
1812            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
1813                      "\n",
1814                      ss, sp, param_count, ESP);
1815            if ((ss & 0xfffc) == 0) {
1816                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1817            }
1818            if ((ss & 3) != dpl) {
1819                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1820            }
1821            if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1822                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1823            }
1824            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1825            if (ss_dpl != dpl) {
1826                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1827            }
1828            if (!(ss_e2 & DESC_S_MASK) ||
1829                (ss_e2 & DESC_CS_MASK) ||
1830                !(ss_e2 & DESC_W_MASK)) {
1831                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1832            }
1833            if (!(ss_e2 & DESC_P_MASK)) {
1834                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1835            }
1836
1837            /* push_size = ((param_count * 2) + 8) << shift; */
1838
1839            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1840            old_ssp = env->segs[R_SS].base;
1841
1842            sp_mask = get_sp_mask(ss_e2);
1843            ssp = get_seg_base(ss_e1, ss_e2);
1844            if (shift) {
1845                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1846                PUSHL(ssp, sp, sp_mask, ESP);
1847                for (i = param_count - 1; i >= 0; i--) {
1848                    val = cpu_ldl_kernel(env, old_ssp + ((ESP + i * 4) &
1849                                                         old_sp_mask));
1850                    PUSHL(ssp, sp, sp_mask, val);
1851                }
1852            } else {
1853                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1854                PUSHW(ssp, sp, sp_mask, ESP);
1855                for (i = param_count - 1; i >= 0; i--) {
1856                    val = cpu_lduw_kernel(env, old_ssp + ((ESP + i * 2) &
1857                                                          old_sp_mask));
1858                    PUSHW(ssp, sp, sp_mask, val);
1859                }
1860            }
1861            new_stack = 1;
1862        } else {
1863            /* to same privilege */
1864            sp = ESP;
1865            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1866            ssp = env->segs[R_SS].base;
1867            /* push_size = (4 << shift); */
1868            new_stack = 0;
1869        }
1870
1871        if (shift) {
1872            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1873            PUSHL(ssp, sp, sp_mask, next_eip);
1874        } else {
1875            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1876            PUSHW(ssp, sp, sp_mask, next_eip);
1877        }
1878
1879        /* from this point, not restartable */
1880
1881        if (new_stack) {
1882            ss = (ss & ~3) | dpl;
1883            cpu_x86_load_seg_cache(env, R_SS, ss,
1884                                   ssp,
1885                                   get_seg_limit(ss_e1, ss_e2),
1886                                   ss_e2);
1887        }
1888
1889        selector = (selector & ~3) | dpl;
1890        cpu_x86_load_seg_cache(env, R_CS, selector,
1891                       get_seg_base(e1, e2),
1892                       get_seg_limit(e1, e2),
1893                       e2);
1894        cpu_x86_set_cpl(env, dpl);
1895        SET_ESP(sp, sp_mask);
1896        EIP = offset;
1897    }
1898}
1899
1900/* real and vm86 mode iret */
1901void helper_iret_real(CPUX86State *env, int shift)
1902{
1903    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1904    target_ulong ssp;
1905    int eflags_mask;
1906
1907    sp_mask = 0xffff; /* XXXX: use SS segment size? */
1908    sp = ESP;
1909    ssp = env->segs[R_SS].base;
1910    if (shift == 1) {
1911        /* 32 bits */
1912        POPL(ssp, sp, sp_mask, new_eip);
1913        POPL(ssp, sp, sp_mask, new_cs);
1914        new_cs &= 0xffff;
1915        POPL(ssp, sp, sp_mask, new_eflags);
1916    } else {
1917        /* 16 bits */
1918        POPW(ssp, sp, sp_mask, new_eip);
1919        POPW(ssp, sp, sp_mask, new_cs);
1920        POPW(ssp, sp, sp_mask, new_eflags);
1921    }
1922    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
1923    env->segs[R_CS].selector = new_cs;
1924    env->segs[R_CS].base = (new_cs << 4);
1925    env->eip = new_eip;
1926    if (env->eflags & VM_MASK) {
1927        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1928            NT_MASK;
1929    } else {
1930        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1931            RF_MASK | NT_MASK;
1932    }
1933    if (shift == 0) {
1934        eflags_mask &= 0xffff;
1935    }
1936    cpu_load_eflags(env, new_eflags, eflags_mask);
1937    env->hflags2 &= ~HF2_NMI_MASK;
1938}
1939
1940static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1941{
1942    int dpl;
1943    uint32_t e2;
1944
1945    /* XXX: on x86_64, we do not want to nullify FS and GS because
1946       they may still contain a valid base. I would be interested to
1947       know how a real x86_64 CPU behaves */
1948    if ((seg_reg == R_FS || seg_reg == R_GS) &&
1949        (env->segs[seg_reg].selector & 0xfffc) == 0) {
1950        return;
1951    }
1952
1953    e2 = env->segs[seg_reg].flags;
1954    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1955    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1956        /* data or non conforming code segment */
1957        if (dpl < cpl) {
1958            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1959        }
1960    }
1961}
1962
1963/* protected mode iret */
1964static inline void helper_ret_protected(CPUX86State *env, int shift,
1965                                        int is_iret, int addend)
1966{
1967    uint32_t new_cs, new_eflags, new_ss;
1968    uint32_t new_es, new_ds, new_fs, new_gs;
1969    uint32_t e1, e2, ss_e1, ss_e2;
1970    int cpl, dpl, rpl, eflags_mask, iopl;
1971    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1972
1973#ifdef TARGET_X86_64
1974    if (shift == 2) {
1975        sp_mask = -1;
1976    } else
1977#endif
1978    {
1979        sp_mask = get_sp_mask(env->segs[R_SS].flags);
1980    }
1981    sp = ESP;
1982    ssp = env->segs[R_SS].base;
1983    new_eflags = 0; /* avoid warning */
1984#ifdef TARGET_X86_64
1985    if (shift == 2) {
1986        POPQ(sp, new_eip);
1987        POPQ(sp, new_cs);
1988        new_cs &= 0xffff;
1989        if (is_iret) {
1990            POPQ(sp, new_eflags);
1991        }
1992    } else
1993#endif
1994    {
1995        if (shift == 1) {
1996            /* 32 bits */
1997            POPL(ssp, sp, sp_mask, new_eip);
1998            POPL(ssp, sp, sp_mask, new_cs);
1999            new_cs &= 0xffff;
2000            if (is_iret) {
2001                POPL(ssp, sp, sp_mask, new_eflags);
2002                if (new_eflags & VM_MASK) {
2003                    goto return_to_vm86;
2004                }
2005            }
2006        } else {
2007            /* 16 bits */
2008            POPW(ssp, sp, sp_mask, new_eip);
2009            POPW(ssp, sp, sp_mask, new_cs);
2010            if (is_iret) {
2011                POPW(ssp, sp, sp_mask, new_eflags);
2012            }
2013        }
2014    }
2015    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2016              new_cs, new_eip, shift, addend);
2017    LOG_PCALL_STATE(env);
2018    if ((new_cs & 0xfffc) == 0) {
2019        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2020    }
2021    if (load_segment(env, &e1, &e2, new_cs) != 0) {
2022        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2023    }
2024    if (!(e2 & DESC_S_MASK) ||
2025        !(e2 & DESC_CS_MASK)) {
2026        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2027    }
2028    cpl = env->hflags & HF_CPL_MASK;
2029    rpl = new_cs & 3;
2030    if (rpl < cpl) {
2031        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2032    }
2033    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2034    if (e2 & DESC_C_MASK) {
2035        if (dpl > rpl) {
2036            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2037        }
2038    } else {
2039        if (dpl != rpl) {
2040            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2041        }
2042    }
2043    if (!(e2 & DESC_P_MASK)) {
2044        raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2045    }
2046
2047    sp += addend;
2048    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2049                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2050        /* return to same privilege level */
2051        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2052                       get_seg_base(e1, e2),
2053                       get_seg_limit(e1, e2),
2054                       e2);
2055    } else {
2056        /* return to different privilege level */
2057#ifdef TARGET_X86_64
2058        if (shift == 2) {
2059            POPQ(sp, new_esp);
2060            POPQ(sp, new_ss);
2061            new_ss &= 0xffff;
2062        } else
2063#endif
2064        {
2065            if (shift == 1) {
2066                /* 32 bits */
2067                POPL(ssp, sp, sp_mask, new_esp);
2068                POPL(ssp, sp, sp_mask, new_ss);
2069                new_ss &= 0xffff;
2070            } else {
2071                /* 16 bits */
2072                POPW(ssp, sp, sp_mask, new_esp);
2073                POPW(ssp, sp, sp_mask, new_ss);
2074            }
2075        }
2076        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2077                  new_ss, new_esp);
2078        if ((new_ss & 0xfffc) == 0) {
2079#ifdef TARGET_X86_64
2080            /* NULL ss is allowed in long mode if cpl != 3 */
2081            /* XXX: test CS64? */
2082            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2083                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2084                                       0, 0xffffffff,
2085                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2086                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2087                                       DESC_W_MASK | DESC_A_MASK);
2088                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2089            } else
2090#endif
2091            {
2092                raise_exception_err(env, EXCP0D_GPF, 0);
2093            }
2094        } else {
2095            if ((new_ss & 3) != rpl) {
2096                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2097            }
2098            if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2099                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2100            }
2101            if (!(ss_e2 & DESC_S_MASK) ||
2102                (ss_e2 & DESC_CS_MASK) ||
2103                !(ss_e2 & DESC_W_MASK)) {
2104                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2105            }
2106            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2107            if (dpl != rpl) {
2108                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2109            }
2110            if (!(ss_e2 & DESC_P_MASK)) {
2111                raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2112            }
2113            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2114                                   get_seg_base(ss_e1, ss_e2),
2115                                   get_seg_limit(ss_e1, ss_e2),
2116                                   ss_e2);
2117        }
2118
2119        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2120                       get_seg_base(e1, e2),
2121                       get_seg_limit(e1, e2),
2122                       e2);
2123        cpu_x86_set_cpl(env, rpl);
2124        sp = new_esp;
2125#ifdef TARGET_X86_64
2126        if (env->hflags & HF_CS64_MASK) {
2127            sp_mask = -1;
2128        } else
2129#endif
2130        {
2131            sp_mask = get_sp_mask(ss_e2);
2132        }
2133
2134        /* validate data segments */
2135        validate_seg(env, R_ES, rpl);
2136        validate_seg(env, R_DS, rpl);
2137        validate_seg(env, R_FS, rpl);
2138        validate_seg(env, R_GS, rpl);
2139
2140        sp += addend;
2141    }
2142    SET_ESP(sp, sp_mask);
2143    env->eip = new_eip;
2144    if (is_iret) {
2145        /* NOTE: 'cpl' is the _old_ CPL */
2146        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2147        if (cpl == 0) {
2148            eflags_mask |= IOPL_MASK;
2149        }
2150        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2151        if (cpl <= iopl) {
2152            eflags_mask |= IF_MASK;
2153        }
2154        if (shift == 0) {
2155            eflags_mask &= 0xffff;
2156        }
2157        cpu_load_eflags(env, new_eflags, eflags_mask);
2158    }
2159    return;
2160
2161 return_to_vm86:
2162    POPL(ssp, sp, sp_mask, new_esp);
2163    POPL(ssp, sp, sp_mask, new_ss);
2164    POPL(ssp, sp, sp_mask, new_es);
2165    POPL(ssp, sp, sp_mask, new_ds);
2166    POPL(ssp, sp, sp_mask, new_fs);
2167    POPL(ssp, sp, sp_mask, new_gs);
2168
2169    /* modify processor state */
2170    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2171                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2172                    VIP_MASK);
2173    load_seg_vm(env, R_CS, new_cs & 0xffff);
2174    cpu_x86_set_cpl(env, 3);
2175    load_seg_vm(env, R_SS, new_ss & 0xffff);
2176    load_seg_vm(env, R_ES, new_es & 0xffff);
2177    load_seg_vm(env, R_DS, new_ds & 0xffff);
2178    load_seg_vm(env, R_FS, new_fs & 0xffff);
2179    load_seg_vm(env, R_GS, new_gs & 0xffff);
2180
2181    env->eip = new_eip & 0xffff;
2182    ESP = new_esp;
2183}
2184
2185void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2186{
2187    int tss_selector, type;
2188    uint32_t e1, e2;
2189
2190    /* specific case for TSS */
2191    if (env->eflags & NT_MASK) {
2192#ifdef TARGET_X86_64
2193        if (env->hflags & HF_LMA_MASK) {
2194            raise_exception_err(env, EXCP0D_GPF, 0);
2195        }
2196#endif
2197        tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2198        if (tss_selector & 4) {
2199            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2200        }
2201        if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2202            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2203        }
2204        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2205        /* NOTE: we check both segment and busy TSS */
2206        if (type != 3) {
2207            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2208        }
2209        switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2210    } else {
2211        helper_ret_protected(env, shift, 1, 0);
2212    }
2213    env->hflags2 &= ~HF2_NMI_MASK;
2214}
2215
2216void helper_lret_protected(CPUX86State *env, int shift, int addend)
2217{
2218    helper_ret_protected(env, shift, 0, addend);
2219}
2220
2221void helper_sysenter(CPUX86State *env)
2222{
2223    if (env->sysenter_cs == 0) {
2224        raise_exception_err(env, EXCP0D_GPF, 0);
2225    }
2226    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2227    cpu_x86_set_cpl(env, 0);
2228
2229#ifdef TARGET_X86_64
2230    if (env->hflags & HF_LMA_MASK) {
2231        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2232                               0, 0xffffffff,
2233                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2234                               DESC_S_MASK |
2235                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2236                               DESC_L_MASK);
2237    } else
2238#endif
2239    {
2240        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2241                               0, 0xffffffff,
2242                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2243                               DESC_S_MASK |
2244                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2245    }
2246    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2247                           0, 0xffffffff,
2248                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2249                           DESC_S_MASK |
2250                           DESC_W_MASK | DESC_A_MASK);
2251    ESP = env->sysenter_esp;
2252    EIP = env->sysenter_eip;
2253}
2254
2255void helper_sysexit(CPUX86State *env, int dflag)
2256{
2257    int cpl;
2258
2259    cpl = env->hflags & HF_CPL_MASK;
2260    if (env->sysenter_cs == 0 || cpl != 0) {
2261        raise_exception_err(env, EXCP0D_GPF, 0);
2262    }
2263    cpu_x86_set_cpl(env, 3);
2264#ifdef TARGET_X86_64
2265    if (dflag == 2) {
2266        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2267                               3, 0, 0xffffffff,
2268                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2269                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2270                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2271                               DESC_L_MASK);
2272        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2273                               3, 0, 0xffffffff,
2274                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2275                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2276                               DESC_W_MASK | DESC_A_MASK);
2277    } else
2278#endif
2279    {
2280        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2281                               3, 0, 0xffffffff,
2282                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2283                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2284                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2285        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2286                               3, 0, 0xffffffff,
2287                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2288                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2289                               DESC_W_MASK | DESC_A_MASK);
2290    }
2291    ESP = ECX;
2292    EIP = EDX;
2293}
2294
2295target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2296{
2297    unsigned int limit;
2298    uint32_t e1, e2, eflags, selector;
2299    int rpl, dpl, cpl, type;
2300
2301    selector = selector1 & 0xffff;
2302    eflags = cpu_cc_compute_all(env, CC_OP);
2303    if ((selector & 0xfffc) == 0) {
2304        goto fail;
2305    }
2306    if (load_segment(env, &e1, &e2, selector) != 0) {
2307        goto fail;
2308    }
2309    rpl = selector & 3;
2310    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2311    cpl = env->hflags & HF_CPL_MASK;
2312    if (e2 & DESC_S_MASK) {
2313        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2314            /* conforming */
2315        } else {
2316            if (dpl < cpl || dpl < rpl) {
2317                goto fail;
2318            }
2319        }
2320    } else {
2321        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2322        switch (type) {
2323        case 1:
2324        case 2:
2325        case 3:
2326        case 9:
2327        case 11:
2328            break;
2329        default:
2330            goto fail;
2331        }
2332        if (dpl < cpl || dpl < rpl) {
2333        fail:
2334            CC_SRC = eflags & ~CC_Z;
2335            return 0;
2336        }
2337    }
2338    limit = get_seg_limit(e1, e2);
2339    CC_SRC = eflags | CC_Z;
2340    return limit;
2341}
2342
2343target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2344{
2345    uint32_t e1, e2, eflags, selector;
2346    int rpl, dpl, cpl, type;
2347
2348    selector = selector1 & 0xffff;
2349    eflags = cpu_cc_compute_all(env, CC_OP);
2350    if ((selector & 0xfffc) == 0) {
2351        goto fail;
2352    }
2353    if (load_segment(env, &e1, &e2, selector) != 0) {
2354        goto fail;
2355    }
2356    rpl = selector & 3;
2357    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2358    cpl = env->hflags & HF_CPL_MASK;
2359    if (e2 & DESC_S_MASK) {
2360        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2361            /* conforming */
2362        } else {
2363            if (dpl < cpl || dpl < rpl) {
2364                goto fail;
2365            }
2366        }
2367    } else {
2368        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2369        switch (type) {
2370        case 1:
2371        case 2:
2372        case 3:
2373        case 4:
2374        case 5:
2375        case 9:
2376        case 11:
2377        case 12:
2378            break;
2379        default:
2380            goto fail;
2381        }
2382        if (dpl < cpl || dpl < rpl) {
2383        fail:
2384            CC_SRC = eflags & ~CC_Z;
2385            return 0;
2386        }
2387    }
2388    CC_SRC = eflags | CC_Z;
2389    return e2 & 0x00f0ff00;
2390}
2391
2392void helper_verr(CPUX86State *env, target_ulong selector1)
2393{
2394    uint32_t e1, e2, eflags, selector;
2395    int rpl, dpl, cpl;
2396
2397    selector = selector1 & 0xffff;
2398    eflags = cpu_cc_compute_all(env, CC_OP);
2399    if ((selector & 0xfffc) == 0) {
2400        goto fail;
2401    }
2402    if (load_segment(env, &e1, &e2, selector) != 0) {
2403        goto fail;
2404    }
2405    if (!(e2 & DESC_S_MASK)) {
2406        goto fail;
2407    }
2408    rpl = selector & 3;
2409    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2410    cpl = env->hflags & HF_CPL_MASK;
2411    if (e2 & DESC_CS_MASK) {
2412        if (!(e2 & DESC_R_MASK)) {
2413            goto fail;
2414        }
2415        if (!(e2 & DESC_C_MASK)) {
2416            if (dpl < cpl || dpl < rpl) {
2417                goto fail;
2418            }
2419        }
2420    } else {
2421        if (dpl < cpl || dpl < rpl) {
2422        fail:
2423            CC_SRC = eflags & ~CC_Z;
2424            return;
2425        }
2426    }
2427    CC_SRC = eflags | CC_Z;
2428}
2429
2430void helper_verw(CPUX86State *env, target_ulong selector1)
2431{
2432    uint32_t e1, e2, eflags, selector;
2433    int rpl, dpl, cpl;
2434
2435    selector = selector1 & 0xffff;
2436    eflags = cpu_cc_compute_all(env, CC_OP);
2437    if ((selector & 0xfffc) == 0) {
2438        goto fail;
2439    }
2440    if (load_segment(env, &e1, &e2, selector) != 0) {
2441        goto fail;
2442    }
2443    if (!(e2 & DESC_S_MASK)) {
2444        goto fail;
2445    }
2446    rpl = selector & 3;
2447    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2448    cpl = env->hflags & HF_CPL_MASK;
2449    if (e2 & DESC_CS_MASK) {
2450        goto fail;
2451    } else {
2452        if (dpl < cpl || dpl < rpl) {
2453            goto fail;
2454        }
2455        if (!(e2 & DESC_W_MASK)) {
2456        fail:
2457            CC_SRC = eflags & ~CC_Z;
2458            return;
2459        }
2460    }
2461    CC_SRC = eflags | CC_Z;
2462}
2463
2464#if defined(CONFIG_USER_ONLY)
2465void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2466{
2467    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2468        selector &= 0xffff;
2469        cpu_x86_load_seg_cache(env, seg_reg, selector,
2470                               (selector << 4), 0xffff, 0);
2471    } else {
2472        helper_load_seg(env, seg_reg, selector);
2473    }
2474}
2475#endif
2476