qemu/target/i386/seg_helper.c
<<
>>
Prefs
   1/*
   2 *  x86 segmentation related helpers:
   3 *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
   4 *
   5 *  Copyright (c) 2003 Fabrice Bellard
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "qemu/log.h"
  24#include "exec/helper-proto.h"
  25#include "exec/exec-all.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/log.h"
  28
  29//#define DEBUG_PCALL
  30
  31#ifdef DEBUG_PCALL
  32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
  33# define LOG_PCALL_STATE(cpu)                                  \
  34    log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
  35#else
  36# define LOG_PCALL(...) do { } while (0)
  37# define LOG_PCALL_STATE(cpu) do { } while (0)
  38#endif
  39
  40#ifdef CONFIG_USER_ONLY
  41#define MEMSUFFIX _kernel
  42#define DATA_SIZE 1
  43#include "exec/cpu_ldst_useronly_template.h"
  44
  45#define DATA_SIZE 2
  46#include "exec/cpu_ldst_useronly_template.h"
  47
  48#define DATA_SIZE 4
  49#include "exec/cpu_ldst_useronly_template.h"
  50
  51#define DATA_SIZE 8
  52#include "exec/cpu_ldst_useronly_template.h"
  53#undef MEMSUFFIX
  54#else
  55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
  56#define MEMSUFFIX _kernel
  57#define DATA_SIZE 1
  58#include "exec/cpu_ldst_template.h"
  59
  60#define DATA_SIZE 2
  61#include "exec/cpu_ldst_template.h"
  62
  63#define DATA_SIZE 4
  64#include "exec/cpu_ldst_template.h"
  65
  66#define DATA_SIZE 8
  67#include "exec/cpu_ldst_template.h"
  68#undef CPU_MMU_INDEX
  69#undef MEMSUFFIX
  70#endif
  71
  72/* return non zero if error */
  73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
  74                               uint32_t *e2_ptr, int selector,
  75                               uintptr_t retaddr)
  76{
  77    SegmentCache *dt;
  78    int index;
  79    target_ulong ptr;
  80
  81    if (selector & 0x4) {
  82        dt = &env->ldt;
  83    } else {
  84        dt = &env->gdt;
  85    }
  86    index = selector & ~7;
  87    if ((index + 7) > dt->limit) {
  88        return -1;
  89    }
  90    ptr = dt->base + index;
  91    *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
  92    *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
  93    return 0;
  94}
  95
  96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
  97                               uint32_t *e2_ptr, int selector)
  98{
  99    return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
 100}
 101
 102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
 103{
 104    unsigned int limit;
 105
 106    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
 107    if (e2 & DESC_G_MASK) {
 108        limit = (limit << 12) | 0xfff;
 109    }
 110    return limit;
 111}
 112
 113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
 114{
 115    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
 116}
 117
 118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
 119                                         uint32_t e2)
 120{
 121    sc->base = get_seg_base(e1, e2);
 122    sc->limit = get_seg_limit(e1, e2);
 123    sc->flags = e2;
 124}
 125
 126/* init the segment cache in vm86 mode. */
 127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
 128{
 129    selector &= 0xffff;
 130
 131    cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
 132                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
 133                           DESC_A_MASK | (3 << DESC_DPL_SHIFT));
 134}
 135
 136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
 137                                       uint32_t *esp_ptr, int dpl,
 138                                       uintptr_t retaddr)
 139{
 140    X86CPU *cpu = x86_env_get_cpu(env);
 141    int type, index, shift;
 142
 143#if 0
 144    {
 145        int i;
 146        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
 147        for (i = 0; i < env->tr.limit; i++) {
 148            printf("%02x ", env->tr.base[i]);
 149            if ((i & 7) == 7) {
 150                printf("\n");
 151            }
 152        }
 153        printf("\n");
 154    }
 155#endif
 156
 157    if (!(env->tr.flags & DESC_P_MASK)) {
 158        cpu_abort(CPU(cpu), "invalid tss");
 159    }
 160    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 161    if ((type & 7) != 1) {
 162        cpu_abort(CPU(cpu), "invalid tss type");
 163    }
 164    shift = type >> 3;
 165    index = (dpl * 4 + 2) << shift;
 166    if (index + (4 << shift) - 1 > env->tr.limit) {
 167        raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
 168    }
 169    if (shift == 0) {
 170        *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
 171        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
 172    } else {
 173        *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
 174        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
 175    }
 176}
 177
 178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
 179                         uintptr_t retaddr)
 180{
 181    uint32_t e1, e2;
 182    int rpl, dpl;
 183
 184    if ((selector & 0xfffc) != 0) {
 185        if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
 186            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 187        }
 188        if (!(e2 & DESC_S_MASK)) {
 189            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 190        }
 191        rpl = selector & 3;
 192        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 193        if (seg_reg == R_CS) {
 194            if (!(e2 & DESC_CS_MASK)) {
 195                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 196            }
 197            if (dpl != rpl) {
 198                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 199            }
 200        } else if (seg_reg == R_SS) {
 201            /* SS must be writable data */
 202            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
 203                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 204            }
 205            if (dpl != cpl || dpl != rpl) {
 206                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 207            }
 208        } else {
 209            /* not readable code */
 210            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
 211                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 212            }
 213            /* if data or non conforming code, checks the rights */
 214            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
 215                if (dpl < cpl || dpl < rpl) {
 216                    raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 217                }
 218            }
 219        }
 220        if (!(e2 & DESC_P_MASK)) {
 221            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
 222        }
 223        cpu_x86_load_seg_cache(env, seg_reg, selector,
 224                               get_seg_base(e1, e2),
 225                               get_seg_limit(e1, e2),
 226                               e2);
 227    } else {
 228        if (seg_reg == R_SS || seg_reg == R_CS) {
 229            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 230        }
 231    }
 232}
 233
 234#define SWITCH_TSS_JMP  0
 235#define SWITCH_TSS_IRET 1
 236#define SWITCH_TSS_CALL 2
 237
 238/* XXX: restore CPU state in registers (PowerPC case) */
 239static void switch_tss_ra(CPUX86State *env, int tss_selector,
 240                          uint32_t e1, uint32_t e2, int source,
 241                          uint32_t next_eip, uintptr_t retaddr)
 242{
 243    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
 244    target_ulong tss_base;
 245    uint32_t new_regs[8], new_segs[6];
 246    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
 247    uint32_t old_eflags, eflags_mask;
 248    SegmentCache *dt;
 249    int index;
 250    target_ulong ptr;
 251
 252    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 253    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
 254              source);
 255
 256    /* if task gate, we read the TSS segment and we load it */
 257    if (type == 5) {
 258        if (!(e2 & DESC_P_MASK)) {
 259            raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
 260        }
 261        tss_selector = e1 >> 16;
 262        if (tss_selector & 4) {
 263            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
 264        }
 265        if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
 266            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 267        }
 268        if (e2 & DESC_S_MASK) {
 269            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 270        }
 271        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 272        if ((type & 7) != 1) {
 273            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 274        }
 275    }
 276
 277    if (!(e2 & DESC_P_MASK)) {
 278        raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
 279    }
 280
 281    if (type & 8) {
 282        tss_limit_max = 103;
 283    } else {
 284        tss_limit_max = 43;
 285    }
 286    tss_limit = get_seg_limit(e1, e2);
 287    tss_base = get_seg_base(e1, e2);
 288    if ((tss_selector & 4) != 0 ||
 289        tss_limit < tss_limit_max) {
 290        raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
 291    }
 292    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 293    if (old_type & 8) {
 294        old_tss_limit_max = 103;
 295    } else {
 296        old_tss_limit_max = 43;
 297    }
 298
 299    /* read all the registers from the new TSS */
 300    if (type & 8) {
 301        /* 32 bit */
 302        new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
 303        new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
 304        new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
 305        for (i = 0; i < 8; i++) {
 306            new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
 307                                            retaddr);
 308        }
 309        for (i = 0; i < 6; i++) {
 310            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
 311                                             retaddr);
 312        }
 313        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
 314        new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
 315    } else {
 316        /* 16 bit */
 317        new_cr3 = 0;
 318        new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
 319        new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
 320        for (i = 0; i < 8; i++) {
 321            new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
 322                                             retaddr) | 0xffff0000;
 323        }
 324        for (i = 0; i < 4; i++) {
 325            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
 326                                             retaddr);
 327        }
 328        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
 329        new_segs[R_FS] = 0;
 330        new_segs[R_GS] = 0;
 331        new_trap = 0;
 332    }
 333    /* XXX: avoid a compiler warning, see
 334     http://support.amd.com/us/Processor_TechDocs/24593.pdf
 335     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
 336    (void)new_trap;
 337
 338    /* NOTE: we must avoid memory exceptions during the task switch,
 339       so we make dummy accesses before */
 340    /* XXX: it can still fail in some cases, so a bigger hack is
 341       necessary to valid the TLB after having done the accesses */
 342
 343    v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
 344    v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
 345    cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
 346    cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
 347
 348    /* clear busy bit (it is restartable) */
 349    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
 350        target_ulong ptr;
 351        uint32_t e2;
 352
 353        ptr = env->gdt.base + (env->tr.selector & ~7);
 354        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 355        e2 &= ~DESC_TSS_BUSY_MASK;
 356        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
 357    }
 358    old_eflags = cpu_compute_eflags(env);
 359    if (source == SWITCH_TSS_IRET) {
 360        old_eflags &= ~NT_MASK;
 361    }
 362
 363    /* save the current state in the old TSS */
 364    if (type & 8) {
 365        /* 32 bit */
 366        cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
 367        cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
 368        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
 369        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
 370        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
 371        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
 372        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
 373        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
 374        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
 375        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
 376        for (i = 0; i < 6; i++) {
 377            cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
 378                              env->segs[i].selector, retaddr);
 379        }
 380    } else {
 381        /* 16 bit */
 382        cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
 383        cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
 384        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
 385        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
 386        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
 387        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
 388        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
 389        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
 390        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
 391        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
 392        for (i = 0; i < 4; i++) {
 393            cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
 394                              env->segs[i].selector, retaddr);
 395        }
 396    }
 397
 398    /* now if an exception occurs, it will occurs in the next task
 399       context */
 400
 401    if (source == SWITCH_TSS_CALL) {
 402        cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
 403        new_eflags |= NT_MASK;
 404    }
 405
 406    /* set busy bit */
 407    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
 408        target_ulong ptr;
 409        uint32_t e2;
 410
 411        ptr = env->gdt.base + (tss_selector & ~7);
 412        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 413        e2 |= DESC_TSS_BUSY_MASK;
 414        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
 415    }
 416
 417    /* set the new CPU state */
 418    /* from this point, any exception which occurs can give problems */
 419    env->cr[0] |= CR0_TS_MASK;
 420    env->hflags |= HF_TS_MASK;
 421    env->tr.selector = tss_selector;
 422    env->tr.base = tss_base;
 423    env->tr.limit = tss_limit;
 424    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
 425
 426    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
 427        cpu_x86_update_cr3(env, new_cr3);
 428    }
 429
 430    /* load all registers without an exception, then reload them with
 431       possible exception */
 432    env->eip = new_eip;
 433    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
 434        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
 435    if (!(type & 8)) {
 436        eflags_mask &= 0xffff;
 437    }
 438    cpu_load_eflags(env, new_eflags, eflags_mask);
 439    /* XXX: what to do in 16 bit case? */
 440    env->regs[R_EAX] = new_regs[0];
 441    env->regs[R_ECX] = new_regs[1];
 442    env->regs[R_EDX] = new_regs[2];
 443    env->regs[R_EBX] = new_regs[3];
 444    env->regs[R_ESP] = new_regs[4];
 445    env->regs[R_EBP] = new_regs[5];
 446    env->regs[R_ESI] = new_regs[6];
 447    env->regs[R_EDI] = new_regs[7];
 448    if (new_eflags & VM_MASK) {
 449        for (i = 0; i < 6; i++) {
 450            load_seg_vm(env, i, new_segs[i]);
 451        }
 452    } else {
 453        /* first just selectors as the rest may trigger exceptions */
 454        for (i = 0; i < 6; i++) {
 455            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
 456        }
 457    }
 458
 459    env->ldt.selector = new_ldt & ~4;
 460    env->ldt.base = 0;
 461    env->ldt.limit = 0;
 462    env->ldt.flags = 0;
 463
 464    /* load the LDT */
 465    if (new_ldt & 4) {
 466        raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 467    }
 468
 469    if ((new_ldt & 0xfffc) != 0) {
 470        dt = &env->gdt;
 471        index = new_ldt & ~7;
 472        if ((index + 7) > dt->limit) {
 473            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 474        }
 475        ptr = dt->base + index;
 476        e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
 477        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 478        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
 479            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 480        }
 481        if (!(e2 & DESC_P_MASK)) {
 482            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 483        }
 484        load_seg_cache_raw_dt(&env->ldt, e1, e2);
 485    }
 486
 487    /* load the segments */
 488    if (!(new_eflags & VM_MASK)) {
 489        int cpl = new_segs[R_CS] & 3;
 490        tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
 491        tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
 492        tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
 493        tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
 494        tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
 495        tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
 496    }
 497
 498    /* check that env->eip is in the CS segment limits */
 499    if (new_eip > env->segs[R_CS].limit) {
 500        /* XXX: different exception if CALL? */
 501        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
 502    }
 503
 504#ifndef CONFIG_USER_ONLY
 505    /* reset local breakpoints */
 506    if (env->dr[7] & DR7_LOCAL_BP_MASK) {
 507        cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
 508    }
 509#endif
 510}
 511
 512static void switch_tss(CPUX86State *env, int tss_selector,
 513                       uint32_t e1, uint32_t e2, int source,
 514                        uint32_t next_eip)
 515{
 516    switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
 517}
 518
 519static inline unsigned int get_sp_mask(unsigned int e2)
 520{
 521    if (e2 & DESC_B_MASK) {
 522        return 0xffffffff;
 523    } else {
 524        return 0xffff;
 525    }
 526}
 527
 528static int exception_has_error_code(int intno)
 529{
 530    switch (intno) {
 531    case 8:
 532    case 10:
 533    case 11:
 534    case 12:
 535    case 13:
 536    case 14:
 537    case 17:
 538        return 1;
 539    }
 540    return 0;
 541}
 542
 543#ifdef TARGET_X86_64
 544#define SET_ESP(val, sp_mask)                                   \
 545    do {                                                        \
 546        if ((sp_mask) == 0xffff) {                              \
 547            env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
 548                ((val) & 0xffff);                               \
 549        } else if ((sp_mask) == 0xffffffffLL) {                 \
 550            env->regs[R_ESP] = (uint32_t)(val);                 \
 551        } else {                                                \
 552            env->regs[R_ESP] = (val);                           \
 553        }                                                       \
 554    } while (0)
 555#else
 556#define SET_ESP(val, sp_mask)                                   \
 557    do {                                                        \
 558        env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
 559            ((val) & (sp_mask));                                \
 560    } while (0)
 561#endif
 562
 563/* in 64-bit machines, this can overflow. So this segment addition macro
 564 * can be used to trim the value to 32-bit whenever needed */
 565#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
 566
 567/* XXX: add a is_user flag to have proper security support */
 568#define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
 569    {                                                            \
 570        sp -= 2;                                                 \
 571        cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
 572    }
 573
 574#define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
 575    {                                                                   \
 576        sp -= 4;                                                        \
 577        cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
 578    }
 579
 580#define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
 581    {                                                            \
 582        val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
 583        sp += 2;                                                 \
 584    }
 585
 586#define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
 587    {                                                                   \
 588        val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
 589        sp += 4;                                                        \
 590    }
 591
 592#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
 593#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
 594#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
 595#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
 596
 597/* protected mode interrupt */
 598static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
 599                                   int error_code, unsigned int next_eip,
 600                                   int is_hw)
 601{
 602    SegmentCache *dt;
 603    target_ulong ptr, ssp;
 604    int type, dpl, selector, ss_dpl, cpl;
 605    int has_error_code, new_stack, shift;
 606    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
 607    uint32_t old_eip, sp_mask;
 608    int vm86 = env->eflags & VM_MASK;
 609
 610    has_error_code = 0;
 611    if (!is_int && !is_hw) {
 612        has_error_code = exception_has_error_code(intno);
 613    }
 614    if (is_int) {
 615        old_eip = next_eip;
 616    } else {
 617        old_eip = env->eip;
 618    }
 619
 620    dt = &env->idt;
 621    if (intno * 8 + 7 > dt->limit) {
 622        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 623    }
 624    ptr = dt->base + intno * 8;
 625    e1 = cpu_ldl_kernel(env, ptr);
 626    e2 = cpu_ldl_kernel(env, ptr + 4);
 627    /* check gate type */
 628    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 629    switch (type) {
 630    case 5: /* task gate */
 631        /* must do that check here to return the correct error code */
 632        if (!(e2 & DESC_P_MASK)) {
 633            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 634        }
 635        switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
 636        if (has_error_code) {
 637            int type;
 638            uint32_t mask;
 639
 640            /* push the error code */
 641            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 642            shift = type >> 3;
 643            if (env->segs[R_SS].flags & DESC_B_MASK) {
 644                mask = 0xffffffff;
 645            } else {
 646                mask = 0xffff;
 647            }
 648            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
 649            ssp = env->segs[R_SS].base + esp;
 650            if (shift) {
 651                cpu_stl_kernel(env, ssp, error_code);
 652            } else {
 653                cpu_stw_kernel(env, ssp, error_code);
 654            }
 655            SET_ESP(esp, mask);
 656        }
 657        return;
 658    case 6: /* 286 interrupt gate */
 659    case 7: /* 286 trap gate */
 660    case 14: /* 386 interrupt gate */
 661    case 15: /* 386 trap gate */
 662        break;
 663    default:
 664        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 665        break;
 666    }
 667    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 668    cpl = env->hflags & HF_CPL_MASK;
 669    /* check privilege if software int */
 670    if (is_int && dpl < cpl) {
 671        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 672    }
 673    /* check valid bit */
 674    if (!(e2 & DESC_P_MASK)) {
 675        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 676    }
 677    selector = e1 >> 16;
 678    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 679    if ((selector & 0xfffc) == 0) {
 680        raise_exception_err(env, EXCP0D_GPF, 0);
 681    }
 682    if (load_segment(env, &e1, &e2, selector) != 0) {
 683        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 684    }
 685    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 686        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 687    }
 688    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 689    if (dpl > cpl) {
 690        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 691    }
 692    if (!(e2 & DESC_P_MASK)) {
 693        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 694    }
 695    if (e2 & DESC_C_MASK) {
 696        dpl = cpl;
 697    }
 698    if (dpl < cpl) {
 699        /* to inner privilege */
 700        get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
 701        if ((ss & 0xfffc) == 0) {
 702            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 703        }
 704        if ((ss & 3) != dpl) {
 705            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 706        }
 707        if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
 708            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 709        }
 710        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
 711        if (ss_dpl != dpl) {
 712            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 713        }
 714        if (!(ss_e2 & DESC_S_MASK) ||
 715            (ss_e2 & DESC_CS_MASK) ||
 716            !(ss_e2 & DESC_W_MASK)) {
 717            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 718        }
 719        if (!(ss_e2 & DESC_P_MASK)) {
 720            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 721        }
 722        new_stack = 1;
 723        sp_mask = get_sp_mask(ss_e2);
 724        ssp = get_seg_base(ss_e1, ss_e2);
 725    } else  {
 726        /* to same privilege */
 727        if (vm86) {
 728            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 729        }
 730        new_stack = 0;
 731        sp_mask = get_sp_mask(env->segs[R_SS].flags);
 732        ssp = env->segs[R_SS].base;
 733        esp = env->regs[R_ESP];
 734    }
 735
 736    shift = type >> 3;
 737
 738#if 0
 739    /* XXX: check that enough room is available */
 740    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
 741    if (vm86) {
 742        push_size += 8;
 743    }
 744    push_size <<= shift;
 745#endif
 746    if (shift == 1) {
 747        if (new_stack) {
 748            if (vm86) {
 749                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
 750                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
 751                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
 752                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
 753            }
 754            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
 755            PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
 756        }
 757        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
 758        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
 759        PUSHL(ssp, esp, sp_mask, old_eip);
 760        if (has_error_code) {
 761            PUSHL(ssp, esp, sp_mask, error_code);
 762        }
 763    } else {
 764        if (new_stack) {
 765            if (vm86) {
 766                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
 767                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
 768                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
 769                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
 770            }
 771            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
 772            PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
 773        }
 774        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
 775        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
 776        PUSHW(ssp, esp, sp_mask, old_eip);
 777        if (has_error_code) {
 778            PUSHW(ssp, esp, sp_mask, error_code);
 779        }
 780    }
 781
 782    /* interrupt gate clear IF mask */
 783    if ((type & 1) == 0) {
 784        env->eflags &= ~IF_MASK;
 785    }
 786    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 787
 788    if (new_stack) {
 789        if (vm86) {
 790            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
 791            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
 792            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
 793            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
 794        }
 795        ss = (ss & ~3) | dpl;
 796        cpu_x86_load_seg_cache(env, R_SS, ss,
 797                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
 798    }
 799    SET_ESP(esp, sp_mask);
 800
 801    selector = (selector & ~3) | dpl;
 802    cpu_x86_load_seg_cache(env, R_CS, selector,
 803                   get_seg_base(e1, e2),
 804                   get_seg_limit(e1, e2),
 805                   e2);
 806    env->eip = offset;
 807}
 808
 809#ifdef TARGET_X86_64
 810
 811#define PUSHQ_RA(sp, val, ra)                   \
 812    {                                           \
 813        sp -= 8;                                \
 814        cpu_stq_kernel_ra(env, sp, (val), ra);  \
 815    }
 816
 817#define POPQ_RA(sp, val, ra)                    \
 818    {                                           \
 819        val = cpu_ldq_kernel_ra(env, sp, ra);   \
 820        sp += 8;                                \
 821    }
 822
 823#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
 824#define POPQ(sp, val) POPQ_RA(sp, val, 0)
 825
 826static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
 827{
 828    X86CPU *cpu = x86_env_get_cpu(env);
 829    int index;
 830
 831#if 0
 832    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
 833           env->tr.base, env->tr.limit);
 834#endif
 835
 836    if (!(env->tr.flags & DESC_P_MASK)) {
 837        cpu_abort(CPU(cpu), "invalid tss");
 838    }
 839    index = 8 * level + 4;
 840    if ((index + 7) > env->tr.limit) {
 841        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
 842    }
 843    return cpu_ldq_kernel(env, env->tr.base + index);
 844}
 845
 846/* 64 bit interrupt */
 847static void do_interrupt64(CPUX86State *env, int intno, int is_int,
 848                           int error_code, target_ulong next_eip, int is_hw)
 849{
 850    SegmentCache *dt;
 851    target_ulong ptr;
 852    int type, dpl, selector, cpl, ist;
 853    int has_error_code, new_stack;
 854    uint32_t e1, e2, e3, ss;
 855    target_ulong old_eip, esp, offset;
 856
 857    has_error_code = 0;
 858    if (!is_int && !is_hw) {
 859        has_error_code = exception_has_error_code(intno);
 860    }
 861    if (is_int) {
 862        old_eip = next_eip;
 863    } else {
 864        old_eip = env->eip;
 865    }
 866
 867    dt = &env->idt;
 868    if (intno * 16 + 15 > dt->limit) {
 869        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 870    }
 871    ptr = dt->base + intno * 16;
 872    e1 = cpu_ldl_kernel(env, ptr);
 873    e2 = cpu_ldl_kernel(env, ptr + 4);
 874    e3 = cpu_ldl_kernel(env, ptr + 8);
 875    /* check gate type */
 876    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 877    switch (type) {
 878    case 14: /* 386 interrupt gate */
 879    case 15: /* 386 trap gate */
 880        break;
 881    default:
 882        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 883        break;
 884    }
 885    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 886    cpl = env->hflags & HF_CPL_MASK;
 887    /* check privilege if software int */
 888    if (is_int && dpl < cpl) {
 889        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 890    }
 891    /* check valid bit */
 892    if (!(e2 & DESC_P_MASK)) {
 893        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
 894    }
 895    selector = e1 >> 16;
 896    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 897    ist = e2 & 7;
 898    if ((selector & 0xfffc) == 0) {
 899        raise_exception_err(env, EXCP0D_GPF, 0);
 900    }
 901
 902    if (load_segment(env, &e1, &e2, selector) != 0) {
 903        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 904    }
 905    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 906        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 907    }
 908    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 909    if (dpl > cpl) {
 910        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 911    }
 912    if (!(e2 & DESC_P_MASK)) {
 913        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 914    }
 915    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
 916        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 917    }
 918    if (e2 & DESC_C_MASK) {
 919        dpl = cpl;
 920    }
 921    if (dpl < cpl || ist != 0) {
 922        /* to inner privilege */
 923        new_stack = 1;
 924        esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
 925        ss = 0;
 926    } else {
 927        /* to same privilege */
 928        if (env->eflags & VM_MASK) {
 929            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 930        }
 931        new_stack = 0;
 932        esp = env->regs[R_ESP];
 933    }
 934    esp &= ~0xfLL; /* align stack */
 935
 936    PUSHQ(esp, env->segs[R_SS].selector);
 937    PUSHQ(esp, env->regs[R_ESP]);
 938    PUSHQ(esp, cpu_compute_eflags(env));
 939    PUSHQ(esp, env->segs[R_CS].selector);
 940    PUSHQ(esp, old_eip);
 941    if (has_error_code) {
 942        PUSHQ(esp, error_code);
 943    }
 944
 945    /* interrupt gate clear IF mask */
 946    if ((type & 1) == 0) {
 947        env->eflags &= ~IF_MASK;
 948    }
 949    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 950
 951    if (new_stack) {
 952        ss = 0 | dpl;
 953        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
 954    }
 955    env->regs[R_ESP] = esp;
 956
 957    selector = (selector & ~3) | dpl;
 958    cpu_x86_load_seg_cache(env, R_CS, selector,
 959                   get_seg_base(e1, e2),
 960                   get_seg_limit(e1, e2),
 961                   e2);
 962    env->eip = offset;
 963}
 964#endif
 965
 966#ifdef TARGET_X86_64
 967#if defined(CONFIG_USER_ONLY)
 968void helper_syscall(CPUX86State *env, int next_eip_addend)
 969{
 970    CPUState *cs = CPU(x86_env_get_cpu(env));
 971
 972    cs->exception_index = EXCP_SYSCALL;
 973    env->exception_next_eip = env->eip + next_eip_addend;
 974    cpu_loop_exit(cs);
 975}
 976#else
 977void helper_syscall(CPUX86State *env, int next_eip_addend)
 978{
 979    int selector;
 980
 981    if (!(env->efer & MSR_EFER_SCE)) {
 982        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
 983    }
 984    selector = (env->star >> 32) & 0xffff;
 985    if (env->hflags & HF_LMA_MASK) {
 986        int code64;
 987
 988        env->regs[R_ECX] = env->eip + next_eip_addend;
 989        env->regs[11] = cpu_compute_eflags(env);
 990
 991        code64 = env->hflags & HF_CS64_MASK;
 992
 993        env->eflags &= ~env->fmask;
 994        cpu_load_eflags(env, env->eflags, 0);
 995        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
 996                           0, 0xffffffff,
 997                               DESC_G_MASK | DESC_P_MASK |
 998                               DESC_S_MASK |
 999                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1000                               DESC_L_MASK);
1001        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1002                               0, 0xffffffff,
1003                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1004                               DESC_S_MASK |
1005                               DESC_W_MASK | DESC_A_MASK);
1006        if (code64) {
1007            env->eip = env->lstar;
1008        } else {
1009            env->eip = env->cstar;
1010        }
1011    } else {
1012        env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1013
1014        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1015        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1016                           0, 0xffffffff,
1017                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018                               DESC_S_MASK |
1019                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1020        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1021                               0, 0xffffffff,
1022                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023                               DESC_S_MASK |
1024                               DESC_W_MASK | DESC_A_MASK);
1025        env->eip = (uint32_t)env->star;
1026    }
1027}
1028#endif
1029#endif
1030
1031#ifdef TARGET_X86_64
1032void helper_sysret(CPUX86State *env, int dflag)
1033{
1034    int cpl, selector;
1035
1036    if (!(env->efer & MSR_EFER_SCE)) {
1037        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1038    }
1039    cpl = env->hflags & HF_CPL_MASK;
1040    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1041        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1042    }
1043    selector = (env->star >> 48) & 0xffff;
1044    if (env->hflags & HF_LMA_MASK) {
1045        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1046                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1047                        NT_MASK);
1048        if (dflag == 2) {
1049            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1050                                   0, 0xffffffff,
1051                                   DESC_G_MASK | DESC_P_MASK |
1052                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1053                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1054                                   DESC_L_MASK);
1055            env->eip = env->regs[R_ECX];
1056        } else {
1057            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1058                                   0, 0xffffffff,
1059                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1061                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1062            env->eip = (uint32_t)env->regs[R_ECX];
1063        }
1064        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1065                               0, 0xffffffff,
1066                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1067                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1068                               DESC_W_MASK | DESC_A_MASK);
1069    } else {
1070        env->eflags |= IF_MASK;
1071        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1072                               0, 0xffffffff,
1073                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1074                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1075                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1076        env->eip = (uint32_t)env->regs[R_ECX];
1077        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1078                               0, 0xffffffff,
1079                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081                               DESC_W_MASK | DESC_A_MASK);
1082    }
1083}
1084#endif
1085
1086/* real mode interrupt */
1087static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1088                              int error_code, unsigned int next_eip)
1089{
1090    SegmentCache *dt;
1091    target_ulong ptr, ssp;
1092    int selector;
1093    uint32_t offset, esp;
1094    uint32_t old_cs, old_eip;
1095
1096    /* real mode (simpler!) */
1097    dt = &env->idt;
1098    if (intno * 4 + 3 > dt->limit) {
1099        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1100    }
1101    ptr = dt->base + intno * 4;
1102    offset = cpu_lduw_kernel(env, ptr);
1103    selector = cpu_lduw_kernel(env, ptr + 2);
1104    esp = env->regs[R_ESP];
1105    ssp = env->segs[R_SS].base;
1106    if (is_int) {
1107        old_eip = next_eip;
1108    } else {
1109        old_eip = env->eip;
1110    }
1111    old_cs = env->segs[R_CS].selector;
1112    /* XXX: use SS segment size? */
1113    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1114    PUSHW(ssp, esp, 0xffff, old_cs);
1115    PUSHW(ssp, esp, 0xffff, old_eip);
1116
1117    /* update processor state */
1118    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1119    env->eip = offset;
1120    env->segs[R_CS].selector = selector;
1121    env->segs[R_CS].base = (selector << 4);
1122    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1123}
1124
1125#if defined(CONFIG_USER_ONLY)
1126/* fake user mode interrupt. is_int is TRUE if coming from the int
1127 * instruction. next_eip is the env->eip value AFTER the interrupt
1128 * instruction. It is only relevant if is_int is TRUE or if intno
1129 * is EXCP_SYSCALL.
1130 */
1131static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1132                              int error_code, target_ulong next_eip)
1133{
1134    if (is_int) {
1135        SegmentCache *dt;
1136        target_ulong ptr;
1137        int dpl, cpl, shift;
1138        uint32_t e2;
1139
1140        dt = &env->idt;
1141        if (env->hflags & HF_LMA_MASK) {
1142            shift = 4;
1143        } else {
1144            shift = 3;
1145        }
1146        ptr = dt->base + (intno << shift);
1147        e2 = cpu_ldl_kernel(env, ptr + 4);
1148
1149        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1150        cpl = env->hflags & HF_CPL_MASK;
1151        /* check privilege if software int */
1152        if (dpl < cpl) {
1153            raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1154        }
1155    }
1156
1157    /* Since we emulate only user space, we cannot do more than
1158       exiting the emulation with the suitable exception and error
1159       code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1160    if (is_int || intno == EXCP_SYSCALL) {
1161        env->eip = next_eip;
1162    }
1163}
1164
1165#else
1166
1167static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1168                            int error_code, int is_hw, int rm)
1169{
1170    CPUState *cs = CPU(x86_env_get_cpu(env));
1171    uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1172                                                          control.event_inj));
1173
1174    if (!(event_inj & SVM_EVTINJ_VALID)) {
1175        int type;
1176
1177        if (is_int) {
1178            type = SVM_EVTINJ_TYPE_SOFT;
1179        } else {
1180            type = SVM_EVTINJ_TYPE_EXEPT;
1181        }
1182        event_inj = intno | type | SVM_EVTINJ_VALID;
1183        if (!rm && exception_has_error_code(intno)) {
1184            event_inj |= SVM_EVTINJ_VALID_ERR;
1185            x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1186                                             control.event_inj_err),
1187                     error_code);
1188        }
1189        x86_stl_phys(cs,
1190                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1191                 event_inj);
1192    }
1193}
1194#endif
1195
1196/*
1197 * Begin execution of an interruption. is_int is TRUE if coming from
1198 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1199 * instruction. It is only relevant if is_int is TRUE.
1200 */
1201static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1202                             int error_code, target_ulong next_eip, int is_hw)
1203{
1204    CPUX86State *env = &cpu->env;
1205
1206    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1207        if ((env->cr[0] & CR0_PE_MASK)) {
1208            static int count;
1209
1210            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1211                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1212                     count, intno, error_code, is_int,
1213                     env->hflags & HF_CPL_MASK,
1214                     env->segs[R_CS].selector, env->eip,
1215                     (int)env->segs[R_CS].base + env->eip,
1216                     env->segs[R_SS].selector, env->regs[R_ESP]);
1217            if (intno == 0x0e) {
1218                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1219            } else {
1220                qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1221            }
1222            qemu_log("\n");
1223            log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1224#if 0
1225            {
1226                int i;
1227                target_ulong ptr;
1228
1229                qemu_log("       code=");
1230                ptr = env->segs[R_CS].base + env->eip;
1231                for (i = 0; i < 16; i++) {
1232                    qemu_log(" %02x", ldub(ptr + i));
1233                }
1234                qemu_log("\n");
1235            }
1236#endif
1237            count++;
1238        }
1239    }
1240    if (env->cr[0] & CR0_PE_MASK) {
1241#if !defined(CONFIG_USER_ONLY)
1242        if (env->hflags & HF_SVMI_MASK) {
1243            handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1244        }
1245#endif
1246#ifdef TARGET_X86_64
1247        if (env->hflags & HF_LMA_MASK) {
1248            do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1249        } else
1250#endif
1251        {
1252            do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1253                                   is_hw);
1254        }
1255    } else {
1256#if !defined(CONFIG_USER_ONLY)
1257        if (env->hflags & HF_SVMI_MASK) {
1258            handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1259        }
1260#endif
1261        do_interrupt_real(env, intno, is_int, error_code, next_eip);
1262    }
1263
1264#if !defined(CONFIG_USER_ONLY)
1265    if (env->hflags & HF_SVMI_MASK) {
1266        CPUState *cs = CPU(cpu);
1267        uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1268                                      offsetof(struct vmcb,
1269                                               control.event_inj));
1270
1271        x86_stl_phys(cs,
1272                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1273                 event_inj & ~SVM_EVTINJ_VALID);
1274    }
1275#endif
1276}
1277
1278void x86_cpu_do_interrupt(CPUState *cs)
1279{
1280    X86CPU *cpu = X86_CPU(cs);
1281    CPUX86State *env = &cpu->env;
1282
1283#if defined(CONFIG_USER_ONLY)
1284    /* if user mode only, we simulate a fake exception
1285       which will be handled outside the cpu execution
1286       loop */
1287    do_interrupt_user(env, cs->exception_index,
1288                      env->exception_is_int,
1289                      env->error_code,
1290                      env->exception_next_eip);
1291    /* successfully delivered */
1292    env->old_exception = -1;
1293#else
1294    if (cs->exception_index >= EXCP_VMEXIT) {
1295        assert(env->old_exception == -1);
1296        do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1297    } else {
1298        do_interrupt_all(cpu, cs->exception_index,
1299                         env->exception_is_int,
1300                         env->error_code,
1301                         env->exception_next_eip, 0);
1302        /* successfully delivered */
1303        env->old_exception = -1;
1304    }
1305#endif
1306}
1307
1308void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1309{
1310    do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1311}
1312
1313bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1314{
1315    X86CPU *cpu = X86_CPU(cs);
1316    CPUX86State *env = &cpu->env;
1317    bool ret = false;
1318
1319#if !defined(CONFIG_USER_ONLY)
1320    if (interrupt_request & CPU_INTERRUPT_POLL) {
1321        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1322        apic_poll_irq(cpu->apic_state);
1323        /* Don't process multiple interrupt requests in a single call.
1324           This is required to make icount-driven execution deterministic. */
1325        return true;
1326    }
1327#endif
1328    if (interrupt_request & CPU_INTERRUPT_SIPI) {
1329        do_cpu_sipi(cpu);
1330        ret = true;
1331    } else if (env->hflags2 & HF2_GIF_MASK) {
1332        if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1333            !(env->hflags & HF_SMM_MASK)) {
1334            cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1335            cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1336            do_smm_enter(cpu);
1337            ret = true;
1338        } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1339                   !(env->hflags2 & HF2_NMI_MASK)) {
1340            cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1341            cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1342            env->hflags2 |= HF2_NMI_MASK;
1343            do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1344            ret = true;
1345        } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1346            cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1347            do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1348            ret = true;
1349        } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1350                   (((env->hflags2 & HF2_VINTR_MASK) &&
1351                     (env->hflags2 & HF2_HIF_MASK)) ||
1352                    (!(env->hflags2 & HF2_VINTR_MASK) &&
1353                     (env->eflags & IF_MASK &&
1354                      !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1355            int intno;
1356            cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1357            cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1358                                       CPU_INTERRUPT_VIRQ);
1359            intno = cpu_get_pic_interrupt(env);
1360            qemu_log_mask(CPU_LOG_TB_IN_ASM,
1361                          "Servicing hardware INT=0x%02x\n", intno);
1362            do_interrupt_x86_hardirq(env, intno, 1);
1363            /* ensure that no TB jump will be modified as
1364               the program flow was changed */
1365            ret = true;
1366#if !defined(CONFIG_USER_ONLY)
1367        } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1368                   (env->eflags & IF_MASK) &&
1369                   !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1370            int intno;
1371            /* FIXME: this should respect TPR */
1372            cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1373            intno = x86_ldl_phys(cs, env->vm_vmcb
1374                             + offsetof(struct vmcb, control.int_vector));
1375            qemu_log_mask(CPU_LOG_TB_IN_ASM,
1376                          "Servicing virtual hardware INT=0x%02x\n", intno);
1377            do_interrupt_x86_hardirq(env, intno, 1);
1378            cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1379            ret = true;
1380#endif
1381        }
1382    }
1383
1384    return ret;
1385}
1386
1387void helper_lldt(CPUX86State *env, int selector)
1388{
1389    SegmentCache *dt;
1390    uint32_t e1, e2;
1391    int index, entry_limit;
1392    target_ulong ptr;
1393
1394    selector &= 0xffff;
1395    if ((selector & 0xfffc) == 0) {
1396        /* XXX: NULL selector case: invalid LDT */
1397        env->ldt.base = 0;
1398        env->ldt.limit = 0;
1399    } else {
1400        if (selector & 0x4) {
1401            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1402        }
1403        dt = &env->gdt;
1404        index = selector & ~7;
1405#ifdef TARGET_X86_64
1406        if (env->hflags & HF_LMA_MASK) {
1407            entry_limit = 15;
1408        } else
1409#endif
1410        {
1411            entry_limit = 7;
1412        }
1413        if ((index + entry_limit) > dt->limit) {
1414            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1415        }
1416        ptr = dt->base + index;
1417        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1418        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1419        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1420            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1421        }
1422        if (!(e2 & DESC_P_MASK)) {
1423            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1424        }
1425#ifdef TARGET_X86_64
1426        if (env->hflags & HF_LMA_MASK) {
1427            uint32_t e3;
1428
1429            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1430            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1431            env->ldt.base |= (target_ulong)e3 << 32;
1432        } else
1433#endif
1434        {
1435            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1436        }
1437    }
1438    env->ldt.selector = selector;
1439}
1440
1441void helper_ltr(CPUX86State *env, int selector)
1442{
1443    SegmentCache *dt;
1444    uint32_t e1, e2;
1445    int index, type, entry_limit;
1446    target_ulong ptr;
1447
1448    selector &= 0xffff;
1449    if ((selector & 0xfffc) == 0) {
1450        /* NULL selector case: invalid TR */
1451        env->tr.base = 0;
1452        env->tr.limit = 0;
1453        env->tr.flags = 0;
1454    } else {
1455        if (selector & 0x4) {
1456            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1457        }
1458        dt = &env->gdt;
1459        index = selector & ~7;
1460#ifdef TARGET_X86_64
1461        if (env->hflags & HF_LMA_MASK) {
1462            entry_limit = 15;
1463        } else
1464#endif
1465        {
1466            entry_limit = 7;
1467        }
1468        if ((index + entry_limit) > dt->limit) {
1469            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1470        }
1471        ptr = dt->base + index;
1472        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1473        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1474        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1475        if ((e2 & DESC_S_MASK) ||
1476            (type != 1 && type != 9)) {
1477            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1478        }
1479        if (!(e2 & DESC_P_MASK)) {
1480            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1481        }
1482#ifdef TARGET_X86_64
1483        if (env->hflags & HF_LMA_MASK) {
1484            uint32_t e3, e4;
1485
1486            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1487            e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1488            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1489                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1490            }
1491            load_seg_cache_raw_dt(&env->tr, e1, e2);
1492            env->tr.base |= (target_ulong)e3 << 32;
1493        } else
1494#endif
1495        {
1496            load_seg_cache_raw_dt(&env->tr, e1, e2);
1497        }
1498        e2 |= DESC_TSS_BUSY_MASK;
1499        cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1500    }
1501    env->tr.selector = selector;
1502}
1503
1504/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1505void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1506{
1507    uint32_t e1, e2;
1508    int cpl, dpl, rpl;
1509    SegmentCache *dt;
1510    int index;
1511    target_ulong ptr;
1512
1513    selector &= 0xffff;
1514    cpl = env->hflags & HF_CPL_MASK;
1515    if ((selector & 0xfffc) == 0) {
1516        /* null selector case */
1517        if (seg_reg == R_SS
1518#ifdef TARGET_X86_64
1519            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1520#endif
1521            ) {
1522            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1523        }
1524        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1525    } else {
1526
1527        if (selector & 0x4) {
1528            dt = &env->ldt;
1529        } else {
1530            dt = &env->gdt;
1531        }
1532        index = selector & ~7;
1533        if ((index + 7) > dt->limit) {
1534            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1535        }
1536        ptr = dt->base + index;
1537        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1538        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1539
1540        if (!(e2 & DESC_S_MASK)) {
1541            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1542        }
1543        rpl = selector & 3;
1544        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1545        if (seg_reg == R_SS) {
1546            /* must be writable segment */
1547            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1548                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1549            }
1550            if (rpl != cpl || dpl != cpl) {
1551                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1552            }
1553        } else {
1554            /* must be readable segment */
1555            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1556                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1557            }
1558
1559            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1560                /* if not conforming code, test rights */
1561                if (dpl < cpl || dpl < rpl) {
1562                    raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1563                }
1564            }
1565        }
1566
1567        if (!(e2 & DESC_P_MASK)) {
1568            if (seg_reg == R_SS) {
1569                raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1570            } else {
1571                raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1572            }
1573        }
1574
1575        /* set the access bit if not already set */
1576        if (!(e2 & DESC_A_MASK)) {
1577            e2 |= DESC_A_MASK;
1578            cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1579        }
1580
1581        cpu_x86_load_seg_cache(env, seg_reg, selector,
1582                       get_seg_base(e1, e2),
1583                       get_seg_limit(e1, e2),
1584                       e2);
1585#if 0
1586        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1587                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1588#endif
1589    }
1590}
1591
1592/* protected mode jump */
1593void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1594                           target_ulong next_eip)
1595{
1596    int gate_cs, type;
1597    uint32_t e1, e2, cpl, dpl, rpl, limit;
1598
1599    if ((new_cs & 0xfffc) == 0) {
1600        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1601    }
1602    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1603        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1604    }
1605    cpl = env->hflags & HF_CPL_MASK;
1606    if (e2 & DESC_S_MASK) {
1607        if (!(e2 & DESC_CS_MASK)) {
1608            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1609        }
1610        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1611        if (e2 & DESC_C_MASK) {
1612            /* conforming code segment */
1613            if (dpl > cpl) {
1614                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1615            }
1616        } else {
1617            /* non conforming code segment */
1618            rpl = new_cs & 3;
1619            if (rpl > cpl) {
1620                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1621            }
1622            if (dpl != cpl) {
1623                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1624            }
1625        }
1626        if (!(e2 & DESC_P_MASK)) {
1627            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1628        }
1629        limit = get_seg_limit(e1, e2);
1630        if (new_eip > limit &&
1631            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1632            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1633        }
1634        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1635                       get_seg_base(e1, e2), limit, e2);
1636        env->eip = new_eip;
1637    } else {
1638        /* jump to call or task gate */
1639        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1640        rpl = new_cs & 3;
1641        cpl = env->hflags & HF_CPL_MASK;
1642        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1643        switch (type) {
1644        case 1: /* 286 TSS */
1645        case 9: /* 386 TSS */
1646        case 5: /* task gate */
1647            if (dpl < cpl || dpl < rpl) {
1648                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1649            }
1650            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1651            break;
1652        case 4: /* 286 call gate */
1653        case 12: /* 386 call gate */
1654            if ((dpl < cpl) || (dpl < rpl)) {
1655                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1656            }
1657            if (!(e2 & DESC_P_MASK)) {
1658                raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1659            }
1660            gate_cs = e1 >> 16;
1661            new_eip = (e1 & 0xffff);
1662            if (type == 12) {
1663                new_eip |= (e2 & 0xffff0000);
1664            }
1665            if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1666                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1667            }
1668            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1669            /* must be code segment */
1670            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1671                 (DESC_S_MASK | DESC_CS_MASK))) {
1672                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1673            }
1674            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1675                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1676                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1677            }
1678            if (!(e2 & DESC_P_MASK)) {
1679                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1680            }
1681            limit = get_seg_limit(e1, e2);
1682            if (new_eip > limit) {
1683                raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1684            }
1685            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1686                                   get_seg_base(e1, e2), limit, e2);
1687            env->eip = new_eip;
1688            break;
1689        default:
1690            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1691            break;
1692        }
1693    }
1694}
1695
1696/* real mode call */
1697void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1698                       int shift, int next_eip)
1699{
1700    int new_eip;
1701    uint32_t esp, esp_mask;
1702    target_ulong ssp;
1703
1704    new_eip = new_eip1;
1705    esp = env->regs[R_ESP];
1706    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1707    ssp = env->segs[R_SS].base;
1708    if (shift) {
1709        PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1710        PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1711    } else {
1712        PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1713        PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1714    }
1715
1716    SET_ESP(esp, esp_mask);
1717    env->eip = new_eip;
1718    env->segs[R_CS].selector = new_cs;
1719    env->segs[R_CS].base = (new_cs << 4);
1720}
1721
1722/* protected mode call */
1723void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1724                            int shift, target_ulong next_eip)
1725{
1726    int new_stack, i;
1727    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1728    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1729    uint32_t val, limit, old_sp_mask;
1730    target_ulong ssp, old_ssp;
1731
1732    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1733    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1734    if ((new_cs & 0xfffc) == 0) {
1735        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1736    }
1737    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1738        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1739    }
1740    cpl = env->hflags & HF_CPL_MASK;
1741    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1742    if (e2 & DESC_S_MASK) {
1743        if (!(e2 & DESC_CS_MASK)) {
1744            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1745        }
1746        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1747        if (e2 & DESC_C_MASK) {
1748            /* conforming code segment */
1749            if (dpl > cpl) {
1750                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1751            }
1752        } else {
1753            /* non conforming code segment */
1754            rpl = new_cs & 3;
1755            if (rpl > cpl) {
1756                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1757            }
1758            if (dpl != cpl) {
1759                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1760            }
1761        }
1762        if (!(e2 & DESC_P_MASK)) {
1763            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1764        }
1765
1766#ifdef TARGET_X86_64
1767        /* XXX: check 16/32 bit cases in long mode */
1768        if (shift == 2) {
1769            target_ulong rsp;
1770
1771            /* 64 bit case */
1772            rsp = env->regs[R_ESP];
1773            PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1774            PUSHQ_RA(rsp, next_eip, GETPC());
1775            /* from this point, not restartable */
1776            env->regs[R_ESP] = rsp;
1777            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1778                                   get_seg_base(e1, e2),
1779                                   get_seg_limit(e1, e2), e2);
1780            env->eip = new_eip;
1781        } else
1782#endif
1783        {
1784            sp = env->regs[R_ESP];
1785            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1786            ssp = env->segs[R_SS].base;
1787            if (shift) {
1788                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1789                PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1790            } else {
1791                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1792                PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1793            }
1794
1795            limit = get_seg_limit(e1, e2);
1796            if (new_eip > limit) {
1797                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1798            }
1799            /* from this point, not restartable */
1800            SET_ESP(sp, sp_mask);
1801            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1802                                   get_seg_base(e1, e2), limit, e2);
1803            env->eip = new_eip;
1804        }
1805    } else {
1806        /* check gate type */
1807        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1808        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1809        rpl = new_cs & 3;
1810        switch (type) {
1811        case 1: /* available 286 TSS */
1812        case 9: /* available 386 TSS */
1813        case 5: /* task gate */
1814            if (dpl < cpl || dpl < rpl) {
1815                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1816            }
1817            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1818            return;
1819        case 4: /* 286 call gate */
1820        case 12: /* 386 call gate */
1821            break;
1822        default:
1823            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1824            break;
1825        }
1826        shift = type >> 3;
1827
1828        if (dpl < cpl || dpl < rpl) {
1829            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1830        }
1831        /* check valid bit */
1832        if (!(e2 & DESC_P_MASK)) {
1833            raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1834        }
1835        selector = e1 >> 16;
1836        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1837        param_count = e2 & 0x1f;
1838        if ((selector & 0xfffc) == 0) {
1839            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1840        }
1841
1842        if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1843            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1844        }
1845        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1846            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1847        }
1848        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1849        if (dpl > cpl) {
1850            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1851        }
1852        if (!(e2 & DESC_P_MASK)) {
1853            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1854        }
1855
1856        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1857            /* to inner privilege */
1858            get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1859            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1860                      TARGET_FMT_lx "\n", ss, sp, param_count,
1861                      env->regs[R_ESP]);
1862            if ((ss & 0xfffc) == 0) {
1863                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1864            }
1865            if ((ss & 3) != dpl) {
1866                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1867            }
1868            if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1869                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1870            }
1871            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1872            if (ss_dpl != dpl) {
1873                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1874            }
1875            if (!(ss_e2 & DESC_S_MASK) ||
1876                (ss_e2 & DESC_CS_MASK) ||
1877                !(ss_e2 & DESC_W_MASK)) {
1878                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1879            }
1880            if (!(ss_e2 & DESC_P_MASK)) {
1881                raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1882            }
1883
1884            /* push_size = ((param_count * 2) + 8) << shift; */
1885
1886            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1887            old_ssp = env->segs[R_SS].base;
1888
1889            sp_mask = get_sp_mask(ss_e2);
1890            ssp = get_seg_base(ss_e1, ss_e2);
1891            if (shift) {
1892                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1893                PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1894                for (i = param_count - 1; i >= 0; i--) {
1895                    val = cpu_ldl_kernel_ra(env, old_ssp +
1896                                            ((env->regs[R_ESP] + i * 4) &
1897                                             old_sp_mask), GETPC());
1898                    PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1899                }
1900            } else {
1901                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1902                PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1903                for (i = param_count - 1; i >= 0; i--) {
1904                    val = cpu_lduw_kernel_ra(env, old_ssp +
1905                                             ((env->regs[R_ESP] + i * 2) &
1906                                              old_sp_mask), GETPC());
1907                    PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1908                }
1909            }
1910            new_stack = 1;
1911        } else {
1912            /* to same privilege */
1913            sp = env->regs[R_ESP];
1914            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1915            ssp = env->segs[R_SS].base;
1916            /* push_size = (4 << shift); */
1917            new_stack = 0;
1918        }
1919
1920        if (shift) {
1921            PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1922            PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1923        } else {
1924            PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1925            PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1926        }
1927
1928        /* from this point, not restartable */
1929
1930        if (new_stack) {
1931            ss = (ss & ~3) | dpl;
1932            cpu_x86_load_seg_cache(env, R_SS, ss,
1933                                   ssp,
1934                                   get_seg_limit(ss_e1, ss_e2),
1935                                   ss_e2);
1936        }
1937
1938        selector = (selector & ~3) | dpl;
1939        cpu_x86_load_seg_cache(env, R_CS, selector,
1940                       get_seg_base(e1, e2),
1941                       get_seg_limit(e1, e2),
1942                       e2);
1943        SET_ESP(sp, sp_mask);
1944        env->eip = offset;
1945    }
1946}
1947
1948/* real and vm86 mode iret */
1949void helper_iret_real(CPUX86State *env, int shift)
1950{
1951    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1952    target_ulong ssp;
1953    int eflags_mask;
1954
1955    sp_mask = 0xffff; /* XXXX: use SS segment size? */
1956    sp = env->regs[R_ESP];
1957    ssp = env->segs[R_SS].base;
1958    if (shift == 1) {
1959        /* 32 bits */
1960        POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1961        POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1962        new_cs &= 0xffff;
1963        POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1964    } else {
1965        /* 16 bits */
1966        POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1967        POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1968        POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1969    }
1970    env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1971    env->segs[R_CS].selector = new_cs;
1972    env->segs[R_CS].base = (new_cs << 4);
1973    env->eip = new_eip;
1974    if (env->eflags & VM_MASK) {
1975        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1976            NT_MASK;
1977    } else {
1978        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1979            RF_MASK | NT_MASK;
1980    }
1981    if (shift == 0) {
1982        eflags_mask &= 0xffff;
1983    }
1984    cpu_load_eflags(env, new_eflags, eflags_mask);
1985    env->hflags2 &= ~HF2_NMI_MASK;
1986}
1987
1988static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1989{
1990    int dpl;
1991    uint32_t e2;
1992
1993    /* XXX: on x86_64, we do not want to nullify FS and GS because
1994       they may still contain a valid base. I would be interested to
1995       know how a real x86_64 CPU behaves */
1996    if ((seg_reg == R_FS || seg_reg == R_GS) &&
1997        (env->segs[seg_reg].selector & 0xfffc) == 0) {
1998        return;
1999    }
2000
2001    e2 = env->segs[seg_reg].flags;
2002    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2003    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2004        /* data or non conforming code segment */
2005        if (dpl < cpl) {
2006            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2007        }
2008    }
2009}
2010
2011/* protected mode iret */
2012static inline void helper_ret_protected(CPUX86State *env, int shift,
2013                                        int is_iret, int addend,
2014                                        uintptr_t retaddr)
2015{
2016    uint32_t new_cs, new_eflags, new_ss;
2017    uint32_t new_es, new_ds, new_fs, new_gs;
2018    uint32_t e1, e2, ss_e1, ss_e2;
2019    int cpl, dpl, rpl, eflags_mask, iopl;
2020    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2021
2022#ifdef TARGET_X86_64
2023    if (shift == 2) {
2024        sp_mask = -1;
2025    } else
2026#endif
2027    {
2028        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2029    }
2030    sp = env->regs[R_ESP];
2031    ssp = env->segs[R_SS].base;
2032    new_eflags = 0; /* avoid warning */
2033#ifdef TARGET_X86_64
2034    if (shift == 2) {
2035        POPQ_RA(sp, new_eip, retaddr);
2036        POPQ_RA(sp, new_cs, retaddr);
2037        new_cs &= 0xffff;
2038        if (is_iret) {
2039            POPQ_RA(sp, new_eflags, retaddr);
2040        }
2041    } else
2042#endif
2043    {
2044        if (shift == 1) {
2045            /* 32 bits */
2046            POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2047            POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2048            new_cs &= 0xffff;
2049            if (is_iret) {
2050                POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2051                if (new_eflags & VM_MASK) {
2052                    goto return_to_vm86;
2053                }
2054            }
2055        } else {
2056            /* 16 bits */
2057            POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2058            POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2059            if (is_iret) {
2060                POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2061            }
2062        }
2063    }
2064    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2065              new_cs, new_eip, shift, addend);
2066    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2067    if ((new_cs & 0xfffc) == 0) {
2068        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2069    }
2070    if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2071        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2072    }
2073    if (!(e2 & DESC_S_MASK) ||
2074        !(e2 & DESC_CS_MASK)) {
2075        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2076    }
2077    cpl = env->hflags & HF_CPL_MASK;
2078    rpl = new_cs & 3;
2079    if (rpl < cpl) {
2080        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2081    }
2082    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2083    if (e2 & DESC_C_MASK) {
2084        if (dpl > rpl) {
2085            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2086        }
2087    } else {
2088        if (dpl != rpl) {
2089            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2090        }
2091    }
2092    if (!(e2 & DESC_P_MASK)) {
2093        raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2094    }
2095
2096    sp += addend;
2097    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2098                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2099        /* return to same privilege level */
2100        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2101                       get_seg_base(e1, e2),
2102                       get_seg_limit(e1, e2),
2103                       e2);
2104    } else {
2105        /* return to different privilege level */
2106#ifdef TARGET_X86_64
2107        if (shift == 2) {
2108            POPQ_RA(sp, new_esp, retaddr);
2109            POPQ_RA(sp, new_ss, retaddr);
2110            new_ss &= 0xffff;
2111        } else
2112#endif
2113        {
2114            if (shift == 1) {
2115                /* 32 bits */
2116                POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2117                POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2118                new_ss &= 0xffff;
2119            } else {
2120                /* 16 bits */
2121                POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2122                POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2123            }
2124        }
2125        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2126                  new_ss, new_esp);
2127        if ((new_ss & 0xfffc) == 0) {
2128#ifdef TARGET_X86_64
2129            /* NULL ss is allowed in long mode if cpl != 3 */
2130            /* XXX: test CS64? */
2131            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2132                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2133                                       0, 0xffffffff,
2134                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2135                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2136                                       DESC_W_MASK | DESC_A_MASK);
2137                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2138            } else
2139#endif
2140            {
2141                raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2142            }
2143        } else {
2144            if ((new_ss & 3) != rpl) {
2145                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2146            }
2147            if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2148                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2149            }
2150            if (!(ss_e2 & DESC_S_MASK) ||
2151                (ss_e2 & DESC_CS_MASK) ||
2152                !(ss_e2 & DESC_W_MASK)) {
2153                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2154            }
2155            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2156            if (dpl != rpl) {
2157                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2158            }
2159            if (!(ss_e2 & DESC_P_MASK)) {
2160                raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2161            }
2162            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2163                                   get_seg_base(ss_e1, ss_e2),
2164                                   get_seg_limit(ss_e1, ss_e2),
2165                                   ss_e2);
2166        }
2167
2168        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2169                       get_seg_base(e1, e2),
2170                       get_seg_limit(e1, e2),
2171                       e2);
2172        sp = new_esp;
2173#ifdef TARGET_X86_64
2174        if (env->hflags & HF_CS64_MASK) {
2175            sp_mask = -1;
2176        } else
2177#endif
2178        {
2179            sp_mask = get_sp_mask(ss_e2);
2180        }
2181
2182        /* validate data segments */
2183        validate_seg(env, R_ES, rpl);
2184        validate_seg(env, R_DS, rpl);
2185        validate_seg(env, R_FS, rpl);
2186        validate_seg(env, R_GS, rpl);
2187
2188        sp += addend;
2189    }
2190    SET_ESP(sp, sp_mask);
2191    env->eip = new_eip;
2192    if (is_iret) {
2193        /* NOTE: 'cpl' is the _old_ CPL */
2194        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2195        if (cpl == 0) {
2196            eflags_mask |= IOPL_MASK;
2197        }
2198        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2199        if (cpl <= iopl) {
2200            eflags_mask |= IF_MASK;
2201        }
2202        if (shift == 0) {
2203            eflags_mask &= 0xffff;
2204        }
2205        cpu_load_eflags(env, new_eflags, eflags_mask);
2206    }
2207    return;
2208
2209 return_to_vm86:
2210    POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2211    POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2212    POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2213    POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2214    POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2215    POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2216
2217    /* modify processor state */
2218    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2219                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2220                    VIP_MASK);
2221    load_seg_vm(env, R_CS, new_cs & 0xffff);
2222    load_seg_vm(env, R_SS, new_ss & 0xffff);
2223    load_seg_vm(env, R_ES, new_es & 0xffff);
2224    load_seg_vm(env, R_DS, new_ds & 0xffff);
2225    load_seg_vm(env, R_FS, new_fs & 0xffff);
2226    load_seg_vm(env, R_GS, new_gs & 0xffff);
2227
2228    env->eip = new_eip & 0xffff;
2229    env->regs[R_ESP] = new_esp;
2230}
2231
2232void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2233{
2234    int tss_selector, type;
2235    uint32_t e1, e2;
2236
2237    /* specific case for TSS */
2238    if (env->eflags & NT_MASK) {
2239#ifdef TARGET_X86_64
2240        if (env->hflags & HF_LMA_MASK) {
2241            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2242        }
2243#endif
2244        tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2245        if (tss_selector & 4) {
2246            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2247        }
2248        if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2249            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2250        }
2251        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2252        /* NOTE: we check both segment and busy TSS */
2253        if (type != 3) {
2254            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2255        }
2256        switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2257    } else {
2258        helper_ret_protected(env, shift, 1, 0, GETPC());
2259    }
2260    env->hflags2 &= ~HF2_NMI_MASK;
2261}
2262
2263void helper_lret_protected(CPUX86State *env, int shift, int addend)
2264{
2265    helper_ret_protected(env, shift, 0, addend, GETPC());
2266}
2267
2268void helper_sysenter(CPUX86State *env)
2269{
2270    if (env->sysenter_cs == 0) {
2271        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2272    }
2273    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2274
2275#ifdef TARGET_X86_64
2276    if (env->hflags & HF_LMA_MASK) {
2277        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2278                               0, 0xffffffff,
2279                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2280                               DESC_S_MASK |
2281                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2282                               DESC_L_MASK);
2283    } else
2284#endif
2285    {
2286        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2287                               0, 0xffffffff,
2288                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2289                               DESC_S_MASK |
2290                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2291    }
2292    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2293                           0, 0xffffffff,
2294                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2295                           DESC_S_MASK |
2296                           DESC_W_MASK | DESC_A_MASK);
2297    env->regs[R_ESP] = env->sysenter_esp;
2298    env->eip = env->sysenter_eip;
2299}
2300
2301void helper_sysexit(CPUX86State *env, int dflag)
2302{
2303    int cpl;
2304
2305    cpl = env->hflags & HF_CPL_MASK;
2306    if (env->sysenter_cs == 0 || cpl != 0) {
2307        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2308    }
2309#ifdef TARGET_X86_64
2310    if (dflag == 2) {
2311        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2312                               3, 0, 0xffffffff,
2313                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2315                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2316                               DESC_L_MASK);
2317        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2318                               3, 0, 0xffffffff,
2319                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2320                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2321                               DESC_W_MASK | DESC_A_MASK);
2322    } else
2323#endif
2324    {
2325        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2326                               3, 0, 0xffffffff,
2327                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2328                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2329                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2330        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2331                               3, 0, 0xffffffff,
2332                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2333                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2334                               DESC_W_MASK | DESC_A_MASK);
2335    }
2336    env->regs[R_ESP] = env->regs[R_ECX];
2337    env->eip = env->regs[R_EDX];
2338}
2339
2340target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2341{
2342    unsigned int limit;
2343    uint32_t e1, e2, eflags, selector;
2344    int rpl, dpl, cpl, type;
2345
2346    selector = selector1 & 0xffff;
2347    eflags = cpu_cc_compute_all(env, CC_OP);
2348    if ((selector & 0xfffc) == 0) {
2349        goto fail;
2350    }
2351    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2352        goto fail;
2353    }
2354    rpl = selector & 3;
2355    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2356    cpl = env->hflags & HF_CPL_MASK;
2357    if (e2 & DESC_S_MASK) {
2358        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2359            /* conforming */
2360        } else {
2361            if (dpl < cpl || dpl < rpl) {
2362                goto fail;
2363            }
2364        }
2365    } else {
2366        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2367        switch (type) {
2368        case 1:
2369        case 2:
2370        case 3:
2371        case 9:
2372        case 11:
2373            break;
2374        default:
2375            goto fail;
2376        }
2377        if (dpl < cpl || dpl < rpl) {
2378        fail:
2379            CC_SRC = eflags & ~CC_Z;
2380            return 0;
2381        }
2382    }
2383    limit = get_seg_limit(e1, e2);
2384    CC_SRC = eflags | CC_Z;
2385    return limit;
2386}
2387
2388target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2389{
2390    uint32_t e1, e2, eflags, selector;
2391    int rpl, dpl, cpl, type;
2392
2393    selector = selector1 & 0xffff;
2394    eflags = cpu_cc_compute_all(env, CC_OP);
2395    if ((selector & 0xfffc) == 0) {
2396        goto fail;
2397    }
2398    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2399        goto fail;
2400    }
2401    rpl = selector & 3;
2402    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2403    cpl = env->hflags & HF_CPL_MASK;
2404    if (e2 & DESC_S_MASK) {
2405        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2406            /* conforming */
2407        } else {
2408            if (dpl < cpl || dpl < rpl) {
2409                goto fail;
2410            }
2411        }
2412    } else {
2413        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2414        switch (type) {
2415        case 1:
2416        case 2:
2417        case 3:
2418        case 4:
2419        case 5:
2420        case 9:
2421        case 11:
2422        case 12:
2423            break;
2424        default:
2425            goto fail;
2426        }
2427        if (dpl < cpl || dpl < rpl) {
2428        fail:
2429            CC_SRC = eflags & ~CC_Z;
2430            return 0;
2431        }
2432    }
2433    CC_SRC = eflags | CC_Z;
2434    return e2 & 0x00f0ff00;
2435}
2436
2437void helper_verr(CPUX86State *env, target_ulong selector1)
2438{
2439    uint32_t e1, e2, eflags, selector;
2440    int rpl, dpl, cpl;
2441
2442    selector = selector1 & 0xffff;
2443    eflags = cpu_cc_compute_all(env, CC_OP);
2444    if ((selector & 0xfffc) == 0) {
2445        goto fail;
2446    }
2447    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2448        goto fail;
2449    }
2450    if (!(e2 & DESC_S_MASK)) {
2451        goto fail;
2452    }
2453    rpl = selector & 3;
2454    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2455    cpl = env->hflags & HF_CPL_MASK;
2456    if (e2 & DESC_CS_MASK) {
2457        if (!(e2 & DESC_R_MASK)) {
2458            goto fail;
2459        }
2460        if (!(e2 & DESC_C_MASK)) {
2461            if (dpl < cpl || dpl < rpl) {
2462                goto fail;
2463            }
2464        }
2465    } else {
2466        if (dpl < cpl || dpl < rpl) {
2467        fail:
2468            CC_SRC = eflags & ~CC_Z;
2469            return;
2470        }
2471    }
2472    CC_SRC = eflags | CC_Z;
2473}
2474
2475void helper_verw(CPUX86State *env, target_ulong selector1)
2476{
2477    uint32_t e1, e2, eflags, selector;
2478    int rpl, dpl, cpl;
2479
2480    selector = selector1 & 0xffff;
2481    eflags = cpu_cc_compute_all(env, CC_OP);
2482    if ((selector & 0xfffc) == 0) {
2483        goto fail;
2484    }
2485    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2486        goto fail;
2487    }
2488    if (!(e2 & DESC_S_MASK)) {
2489        goto fail;
2490    }
2491    rpl = selector & 3;
2492    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2493    cpl = env->hflags & HF_CPL_MASK;
2494    if (e2 & DESC_CS_MASK) {
2495        goto fail;
2496    } else {
2497        if (dpl < cpl || dpl < rpl) {
2498            goto fail;
2499        }
2500        if (!(e2 & DESC_W_MASK)) {
2501        fail:
2502            CC_SRC = eflags & ~CC_Z;
2503            return;
2504        }
2505    }
2506    CC_SRC = eflags | CC_Z;
2507}
2508
2509#if defined(CONFIG_USER_ONLY)
2510void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2511{
2512    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2513        int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2514        selector &= 0xffff;
2515        cpu_x86_load_seg_cache(env, seg_reg, selector,
2516                               (selector << 4), 0xffff,
2517                               DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2518                               DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2519    } else {
2520        helper_load_seg(env, seg_reg, selector);
2521    }
2522}
2523#endif
2524
2525/* check if Port I/O is allowed in TSS */
2526static inline void check_io(CPUX86State *env, int addr, int size,
2527                            uintptr_t retaddr)
2528{
2529    int io_offset, val, mask;
2530
2531    /* TSS must be a valid 32 bit one */
2532    if (!(env->tr.flags & DESC_P_MASK) ||
2533        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2534        env->tr.limit < 103) {
2535        goto fail;
2536    }
2537    io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2538    io_offset += (addr >> 3);
2539    /* Note: the check needs two bytes */
2540    if ((io_offset + 1) > env->tr.limit) {
2541        goto fail;
2542    }
2543    val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2544    val >>= (addr & 7);
2545    mask = (1 << size) - 1;
2546    /* all bits must be zero to allow the I/O */
2547    if ((val & mask) != 0) {
2548    fail:
2549        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2550    }
2551}
2552
2553void helper_check_iob(CPUX86State *env, uint32_t t0)
2554{
2555    check_io(env, t0, 1, GETPC());
2556}
2557
2558void helper_check_iow(CPUX86State *env, uint32_t t0)
2559{
2560    check_io(env, t0, 2, GETPC());
2561}
2562
2563void helper_check_iol(CPUX86State *env, uint32_t t0)
2564{
2565    check_io(env, t0, 4, GETPC());
2566}
2567