qemu/target/i386/seg_helper.c
<<
>>
Prefs
   1/*
   2 *  x86 segmentation related helpers:
   3 *  TSS, interrupts, system calls, jumps and call/task gates, descriptors
   4 *
   5 *  Copyright (c) 2003 Fabrice Bellard
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "qemu/log.h"
  24#include "exec/helper-proto.h"
  25#include "exec/exec-all.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/log.h"
  28
  29//#define DEBUG_PCALL
  30
  31#ifdef DEBUG_PCALL
  32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
  33# define LOG_PCALL_STATE(cpu)                                  \
  34    log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
  35#else
  36# define LOG_PCALL(...) do { } while (0)
  37# define LOG_PCALL_STATE(cpu) do { } while (0)
  38#endif
  39
  40#ifdef CONFIG_USER_ONLY
  41#define MEMSUFFIX _kernel
  42#define DATA_SIZE 1
  43#include "exec/cpu_ldst_useronly_template.h"
  44
  45#define DATA_SIZE 2
  46#include "exec/cpu_ldst_useronly_template.h"
  47
  48#define DATA_SIZE 4
  49#include "exec/cpu_ldst_useronly_template.h"
  50
  51#define DATA_SIZE 8
  52#include "exec/cpu_ldst_useronly_template.h"
  53#undef MEMSUFFIX
  54#else
  55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
  56#define MEMSUFFIX _kernel
  57#define DATA_SIZE 1
  58#include "exec/cpu_ldst_template.h"
  59
  60#define DATA_SIZE 2
  61#include "exec/cpu_ldst_template.h"
  62
  63#define DATA_SIZE 4
  64#include "exec/cpu_ldst_template.h"
  65
  66#define DATA_SIZE 8
  67#include "exec/cpu_ldst_template.h"
  68#undef CPU_MMU_INDEX
  69#undef MEMSUFFIX
  70#endif
  71
  72/* return non zero if error */
  73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
  74                               uint32_t *e2_ptr, int selector,
  75                               uintptr_t retaddr)
  76{
  77    SegmentCache *dt;
  78    int index;
  79    target_ulong ptr;
  80
  81    if (selector & 0x4) {
  82        dt = &env->ldt;
  83    } else {
  84        dt = &env->gdt;
  85    }
  86    index = selector & ~7;
  87    if ((index + 7) > dt->limit) {
  88        return -1;
  89    }
  90    ptr = dt->base + index;
  91    *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
  92    *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
  93    return 0;
  94}
  95
  96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
  97                               uint32_t *e2_ptr, int selector)
  98{
  99    return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
 100}
 101
 102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
 103{
 104    unsigned int limit;
 105
 106    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
 107    if (e2 & DESC_G_MASK) {
 108        limit = (limit << 12) | 0xfff;
 109    }
 110    return limit;
 111}
 112
 113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
 114{
 115    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
 116}
 117
 118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
 119                                         uint32_t e2)
 120{
 121    sc->base = get_seg_base(e1, e2);
 122    sc->limit = get_seg_limit(e1, e2);
 123    sc->flags = e2;
 124}
 125
 126/* init the segment cache in vm86 mode. */
 127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
 128{
 129    selector &= 0xffff;
 130
 131    cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
 132                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
 133                           DESC_A_MASK | (3 << DESC_DPL_SHIFT));
 134}
 135
 136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
 137                                       uint32_t *esp_ptr, int dpl,
 138                                       uintptr_t retaddr)
 139{
 140    X86CPU *cpu = x86_env_get_cpu(env);
 141    int type, index, shift;
 142
 143#if 0
 144    {
 145        int i;
 146        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
 147        for (i = 0; i < env->tr.limit; i++) {
 148            printf("%02x ", env->tr.base[i]);
 149            if ((i & 7) == 7) {
 150                printf("\n");
 151            }
 152        }
 153        printf("\n");
 154    }
 155#endif
 156
 157    if (!(env->tr.flags & DESC_P_MASK)) {
 158        cpu_abort(CPU(cpu), "invalid tss");
 159    }
 160    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 161    if ((type & 7) != 1) {
 162        cpu_abort(CPU(cpu), "invalid tss type");
 163    }
 164    shift = type >> 3;
 165    index = (dpl * 4 + 2) << shift;
 166    if (index + (4 << shift) - 1 > env->tr.limit) {
 167        raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
 168    }
 169    if (shift == 0) {
 170        *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
 171        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
 172    } else {
 173        *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
 174        *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
 175    }
 176}
 177
 178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
 179                         uintptr_t retaddr)
 180{
 181    uint32_t e1, e2;
 182    int rpl, dpl;
 183
 184    if ((selector & 0xfffc) != 0) {
 185        if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
 186            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 187        }
 188        if (!(e2 & DESC_S_MASK)) {
 189            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 190        }
 191        rpl = selector & 3;
 192        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 193        if (seg_reg == R_CS) {
 194            if (!(e2 & DESC_CS_MASK)) {
 195                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 196            }
 197            if (dpl != rpl) {
 198                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 199            }
 200        } else if (seg_reg == R_SS) {
 201            /* SS must be writable data */
 202            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
 203                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 204            }
 205            if (dpl != cpl || dpl != rpl) {
 206                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 207            }
 208        } else {
 209            /* not readable code */
 210            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
 211                raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 212            }
 213            /* if data or non conforming code, checks the rights */
 214            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
 215                if (dpl < cpl || dpl < rpl) {
 216                    raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 217                }
 218            }
 219        }
 220        if (!(e2 & DESC_P_MASK)) {
 221            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
 222        }
 223        cpu_x86_load_seg_cache(env, seg_reg, selector,
 224                               get_seg_base(e1, e2),
 225                               get_seg_limit(e1, e2),
 226                               e2);
 227    } else {
 228        if (seg_reg == R_SS || seg_reg == R_CS) {
 229            raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
 230        }
 231    }
 232}
 233
 234#define SWITCH_TSS_JMP  0
 235#define SWITCH_TSS_IRET 1
 236#define SWITCH_TSS_CALL 2
 237
 238/* XXX: restore CPU state in registers (PowerPC case) */
 239static void switch_tss_ra(CPUX86State *env, int tss_selector,
 240                          uint32_t e1, uint32_t e2, int source,
 241                          uint32_t next_eip, uintptr_t retaddr)
 242{
 243    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
 244    target_ulong tss_base;
 245    uint32_t new_regs[8], new_segs[6];
 246    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
 247    uint32_t old_eflags, eflags_mask;
 248    SegmentCache *dt;
 249    int index;
 250    target_ulong ptr;
 251
 252    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 253    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
 254              source);
 255
 256    /* if task gate, we read the TSS segment and we load it */
 257    if (type == 5) {
 258        if (!(e2 & DESC_P_MASK)) {
 259            raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
 260        }
 261        tss_selector = e1 >> 16;
 262        if (tss_selector & 4) {
 263            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
 264        }
 265        if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
 266            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 267        }
 268        if (e2 & DESC_S_MASK) {
 269            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 270        }
 271        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
 272        if ((type & 7) != 1) {
 273            raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
 274        }
 275    }
 276
 277    if (!(e2 & DESC_P_MASK)) {
 278        raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
 279    }
 280
 281    if (type & 8) {
 282        tss_limit_max = 103;
 283    } else {
 284        tss_limit_max = 43;
 285    }
 286    tss_limit = get_seg_limit(e1, e2);
 287    tss_base = get_seg_base(e1, e2);
 288    if ((tss_selector & 4) != 0 ||
 289        tss_limit < tss_limit_max) {
 290        raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
 291    }
 292    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 293    if (old_type & 8) {
 294        old_tss_limit_max = 103;
 295    } else {
 296        old_tss_limit_max = 43;
 297    }
 298
 299    /* read all the registers from the new TSS */
 300    if (type & 8) {
 301        /* 32 bit */
 302        new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
 303        new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
 304        new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
 305        for (i = 0; i < 8; i++) {
 306            new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
 307                                            retaddr);
 308        }
 309        for (i = 0; i < 6; i++) {
 310            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
 311                                             retaddr);
 312        }
 313        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
 314        new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
 315    } else {
 316        /* 16 bit */
 317        new_cr3 = 0;
 318        new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
 319        new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
 320        for (i = 0; i < 8; i++) {
 321            new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
 322                                             retaddr) | 0xffff0000;
 323        }
 324        for (i = 0; i < 4; i++) {
 325            new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
 326                                             retaddr);
 327        }
 328        new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
 329        new_segs[R_FS] = 0;
 330        new_segs[R_GS] = 0;
 331        new_trap = 0;
 332    }
 333    /* XXX: avoid a compiler warning, see
 334     http://support.amd.com/us/Processor_TechDocs/24593.pdf
 335     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
 336    (void)new_trap;
 337
 338    /* NOTE: we must avoid memory exceptions during the task switch,
 339       so we make dummy accesses before */
 340    /* XXX: it can still fail in some cases, so a bigger hack is
 341       necessary to valid the TLB after having done the accesses */
 342
 343    v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
 344    v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
 345    cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
 346    cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
 347
 348    /* clear busy bit (it is restartable) */
 349    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
 350        target_ulong ptr;
 351        uint32_t e2;
 352
 353        ptr = env->gdt.base + (env->tr.selector & ~7);
 354        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 355        e2 &= ~DESC_TSS_BUSY_MASK;
 356        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
 357    }
 358    old_eflags = cpu_compute_eflags(env);
 359    if (source == SWITCH_TSS_IRET) {
 360        old_eflags &= ~NT_MASK;
 361    }
 362
 363    /* save the current state in the old TSS */
 364    if (type & 8) {
 365        /* 32 bit */
 366        cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
 367        cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
 368        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
 369        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
 370        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
 371        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
 372        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
 373        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
 374        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
 375        cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
 376        for (i = 0; i < 6; i++) {
 377            cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
 378                              env->segs[i].selector, retaddr);
 379        }
 380    } else {
 381        /* 16 bit */
 382        cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
 383        cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
 384        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
 385        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
 386        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
 387        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
 388        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
 389        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
 390        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
 391        cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
 392        for (i = 0; i < 4; i++) {
 393            cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
 394                              env->segs[i].selector, retaddr);
 395        }
 396    }
 397
 398    /* now if an exception occurs, it will occurs in the next task
 399       context */
 400
 401    if (source == SWITCH_TSS_CALL) {
 402        cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
 403        new_eflags |= NT_MASK;
 404    }
 405
 406    /* set busy bit */
 407    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
 408        target_ulong ptr;
 409        uint32_t e2;
 410
 411        ptr = env->gdt.base + (tss_selector & ~7);
 412        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 413        e2 |= DESC_TSS_BUSY_MASK;
 414        cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
 415    }
 416
 417    /* set the new CPU state */
 418    /* from this point, any exception which occurs can give problems */
 419    env->cr[0] |= CR0_TS_MASK;
 420    env->hflags |= HF_TS_MASK;
 421    env->tr.selector = tss_selector;
 422    env->tr.base = tss_base;
 423    env->tr.limit = tss_limit;
 424    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
 425
 426    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
 427        cpu_x86_update_cr3(env, new_cr3);
 428    }
 429
 430    /* load all registers without an exception, then reload them with
 431       possible exception */
 432    env->eip = new_eip;
 433    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
 434        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
 435    if (!(type & 8)) {
 436        eflags_mask &= 0xffff;
 437    }
 438    cpu_load_eflags(env, new_eflags, eflags_mask);
 439    /* XXX: what to do in 16 bit case? */
 440    env->regs[R_EAX] = new_regs[0];
 441    env->regs[R_ECX] = new_regs[1];
 442    env->regs[R_EDX] = new_regs[2];
 443    env->regs[R_EBX] = new_regs[3];
 444    env->regs[R_ESP] = new_regs[4];
 445    env->regs[R_EBP] = new_regs[5];
 446    env->regs[R_ESI] = new_regs[6];
 447    env->regs[R_EDI] = new_regs[7];
 448    if (new_eflags & VM_MASK) {
 449        for (i = 0; i < 6; i++) {
 450            load_seg_vm(env, i, new_segs[i]);
 451        }
 452    } else {
 453        /* first just selectors as the rest may trigger exceptions */
 454        for (i = 0; i < 6; i++) {
 455            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
 456        }
 457    }
 458
 459    env->ldt.selector = new_ldt & ~4;
 460    env->ldt.base = 0;
 461    env->ldt.limit = 0;
 462    env->ldt.flags = 0;
 463
 464    /* load the LDT */
 465    if (new_ldt & 4) {
 466        raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 467    }
 468
 469    if ((new_ldt & 0xfffc) != 0) {
 470        dt = &env->gdt;
 471        index = new_ldt & ~7;
 472        if ((index + 7) > dt->limit) {
 473            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 474        }
 475        ptr = dt->base + index;
 476        e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
 477        e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
 478        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
 479            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 480        }
 481        if (!(e2 & DESC_P_MASK)) {
 482            raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
 483        }
 484        load_seg_cache_raw_dt(&env->ldt, e1, e2);
 485    }
 486
 487    /* load the segments */
 488    if (!(new_eflags & VM_MASK)) {
 489        int cpl = new_segs[R_CS] & 3;
 490        tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
 491        tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
 492        tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
 493        tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
 494        tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
 495        tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
 496    }
 497
 498    /* check that env->eip is in the CS segment limits */
 499    if (new_eip > env->segs[R_CS].limit) {
 500        /* XXX: different exception if CALL? */
 501        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
 502    }
 503
 504#ifndef CONFIG_USER_ONLY
 505    /* reset local breakpoints */
 506    if (env->dr[7] & DR7_LOCAL_BP_MASK) {
 507        cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
 508    }
 509#endif
 510}
 511
 512static void switch_tss(CPUX86State *env, int tss_selector,
 513                       uint32_t e1, uint32_t e2, int source,
 514                        uint32_t next_eip)
 515{
 516    switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
 517}
 518
 519static inline unsigned int get_sp_mask(unsigned int e2)
 520{
 521#ifdef TARGET_X86_64
 522    if (e2 & DESC_L_MASK) {
 523        return 0;
 524    } else
 525#endif
 526    if (e2 & DESC_B_MASK) {
 527        return 0xffffffff;
 528    } else {
 529        return 0xffff;
 530    }
 531}
 532
 533static int exception_has_error_code(int intno)
 534{
 535    switch (intno) {
 536    case 8:
 537    case 10:
 538    case 11:
 539    case 12:
 540    case 13:
 541    case 14:
 542    case 17:
 543        return 1;
 544    }
 545    return 0;
 546}
 547
 548#ifdef TARGET_X86_64
 549#define SET_ESP(val, sp_mask)                                   \
 550    do {                                                        \
 551        if ((sp_mask) == 0xffff) {                              \
 552            env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) |   \
 553                ((val) & 0xffff);                               \
 554        } else if ((sp_mask) == 0xffffffffLL) {                 \
 555            env->regs[R_ESP] = (uint32_t)(val);                 \
 556        } else {                                                \
 557            env->regs[R_ESP] = (val);                           \
 558        }                                                       \
 559    } while (0)
 560#else
 561#define SET_ESP(val, sp_mask)                                   \
 562    do {                                                        \
 563        env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) |    \
 564            ((val) & (sp_mask));                                \
 565    } while (0)
 566#endif
 567
 568/* in 64-bit machines, this can overflow. So this segment addition macro
 569 * can be used to trim the value to 32-bit whenever needed */
 570#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
 571
 572/* XXX: add a is_user flag to have proper security support */
 573#define PUSHW_RA(ssp, sp, sp_mask, val, ra)                      \
 574    {                                                            \
 575        sp -= 2;                                                 \
 576        cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
 577    }
 578
 579#define PUSHL_RA(ssp, sp, sp_mask, val, ra)                             \
 580    {                                                                   \
 581        sp -= 4;                                                        \
 582        cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
 583    }
 584
 585#define POPW_RA(ssp, sp, sp_mask, val, ra)                       \
 586    {                                                            \
 587        val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
 588        sp += 2;                                                 \
 589    }
 590
 591#define POPL_RA(ssp, sp, sp_mask, val, ra)                              \
 592    {                                                                   \
 593        val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
 594        sp += 4;                                                        \
 595    }
 596
 597#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
 598#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
 599#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
 600#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
 601
 602/* protected mode interrupt */
 603static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
 604                                   int error_code, unsigned int next_eip,
 605                                   int is_hw)
 606{
 607    SegmentCache *dt;
 608    target_ulong ptr, ssp;
 609    int type, dpl, selector, ss_dpl, cpl;
 610    int has_error_code, new_stack, shift;
 611    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
 612    uint32_t old_eip, sp_mask;
 613    int vm86 = env->eflags & VM_MASK;
 614
 615    has_error_code = 0;
 616    if (!is_int && !is_hw) {
 617        has_error_code = exception_has_error_code(intno);
 618    }
 619    if (is_int) {
 620        old_eip = next_eip;
 621    } else {
 622        old_eip = env->eip;
 623    }
 624
 625    dt = &env->idt;
 626    if (intno * 8 + 7 > dt->limit) {
 627        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 628    }
 629    ptr = dt->base + intno * 8;
 630    e1 = cpu_ldl_kernel(env, ptr);
 631    e2 = cpu_ldl_kernel(env, ptr + 4);
 632    /* check gate type */
 633    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 634    switch (type) {
 635    case 5: /* task gate */
 636        /* must do that check here to return the correct error code */
 637        if (!(e2 & DESC_P_MASK)) {
 638            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 639        }
 640        switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
 641        if (has_error_code) {
 642            int type;
 643            uint32_t mask;
 644
 645            /* push the error code */
 646            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
 647            shift = type >> 3;
 648            if (env->segs[R_SS].flags & DESC_B_MASK) {
 649                mask = 0xffffffff;
 650            } else {
 651                mask = 0xffff;
 652            }
 653            esp = (env->regs[R_ESP] - (2 << shift)) & mask;
 654            ssp = env->segs[R_SS].base + esp;
 655            if (shift) {
 656                cpu_stl_kernel(env, ssp, error_code);
 657            } else {
 658                cpu_stw_kernel(env, ssp, error_code);
 659            }
 660            SET_ESP(esp, mask);
 661        }
 662        return;
 663    case 6: /* 286 interrupt gate */
 664    case 7: /* 286 trap gate */
 665    case 14: /* 386 interrupt gate */
 666    case 15: /* 386 trap gate */
 667        break;
 668    default:
 669        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 670        break;
 671    }
 672    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 673    cpl = env->hflags & HF_CPL_MASK;
 674    /* check privilege if software int */
 675    if (is_int && dpl < cpl) {
 676        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
 677    }
 678    /* check valid bit */
 679    if (!(e2 & DESC_P_MASK)) {
 680        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
 681    }
 682    selector = e1 >> 16;
 683    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 684    if ((selector & 0xfffc) == 0) {
 685        raise_exception_err(env, EXCP0D_GPF, 0);
 686    }
 687    if (load_segment(env, &e1, &e2, selector) != 0) {
 688        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 689    }
 690    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 691        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 692    }
 693    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 694    if (dpl > cpl) {
 695        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 696    }
 697    if (!(e2 & DESC_P_MASK)) {
 698        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 699    }
 700    if (e2 & DESC_C_MASK) {
 701        dpl = cpl;
 702    }
 703    if (dpl < cpl) {
 704        /* to inner privilege */
 705        get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
 706        if ((ss & 0xfffc) == 0) {
 707            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 708        }
 709        if ((ss & 3) != dpl) {
 710            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 711        }
 712        if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
 713            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 714        }
 715        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
 716        if (ss_dpl != dpl) {
 717            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 718        }
 719        if (!(ss_e2 & DESC_S_MASK) ||
 720            (ss_e2 & DESC_CS_MASK) ||
 721            !(ss_e2 & DESC_W_MASK)) {
 722            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 723        }
 724        if (!(ss_e2 & DESC_P_MASK)) {
 725            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
 726        }
 727        new_stack = 1;
 728        sp_mask = get_sp_mask(ss_e2);
 729        ssp = get_seg_base(ss_e1, ss_e2);
 730    } else  {
 731        /* to same privilege */
 732        if (vm86) {
 733            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 734        }
 735        new_stack = 0;
 736        sp_mask = get_sp_mask(env->segs[R_SS].flags);
 737        ssp = env->segs[R_SS].base;
 738        esp = env->regs[R_ESP];
 739    }
 740
 741    shift = type >> 3;
 742
 743#if 0
 744    /* XXX: check that enough room is available */
 745    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
 746    if (vm86) {
 747        push_size += 8;
 748    }
 749    push_size <<= shift;
 750#endif
 751    if (shift == 1) {
 752        if (new_stack) {
 753            if (vm86) {
 754                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
 755                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
 756                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
 757                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
 758            }
 759            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
 760            PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
 761        }
 762        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
 763        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
 764        PUSHL(ssp, esp, sp_mask, old_eip);
 765        if (has_error_code) {
 766            PUSHL(ssp, esp, sp_mask, error_code);
 767        }
 768    } else {
 769        if (new_stack) {
 770            if (vm86) {
 771                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
 772                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
 773                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
 774                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
 775            }
 776            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
 777            PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
 778        }
 779        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
 780        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
 781        PUSHW(ssp, esp, sp_mask, old_eip);
 782        if (has_error_code) {
 783            PUSHW(ssp, esp, sp_mask, error_code);
 784        }
 785    }
 786
 787    /* interrupt gate clear IF mask */
 788    if ((type & 1) == 0) {
 789        env->eflags &= ~IF_MASK;
 790    }
 791    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 792
 793    if (new_stack) {
 794        if (vm86) {
 795            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
 796            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
 797            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
 798            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
 799        }
 800        ss = (ss & ~3) | dpl;
 801        cpu_x86_load_seg_cache(env, R_SS, ss,
 802                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
 803    }
 804    SET_ESP(esp, sp_mask);
 805
 806    selector = (selector & ~3) | dpl;
 807    cpu_x86_load_seg_cache(env, R_CS, selector,
 808                   get_seg_base(e1, e2),
 809                   get_seg_limit(e1, e2),
 810                   e2);
 811    env->eip = offset;
 812}
 813
 814#ifdef TARGET_X86_64
 815
 816#define PUSHQ_RA(sp, val, ra)                   \
 817    {                                           \
 818        sp -= 8;                                \
 819        cpu_stq_kernel_ra(env, sp, (val), ra);  \
 820    }
 821
 822#define POPQ_RA(sp, val, ra)                    \
 823    {                                           \
 824        val = cpu_ldq_kernel_ra(env, sp, ra);   \
 825        sp += 8;                                \
 826    }
 827
 828#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
 829#define POPQ(sp, val) POPQ_RA(sp, val, 0)
 830
 831static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
 832{
 833    X86CPU *cpu = x86_env_get_cpu(env);
 834    int index;
 835
 836#if 0
 837    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
 838           env->tr.base, env->tr.limit);
 839#endif
 840
 841    if (!(env->tr.flags & DESC_P_MASK)) {
 842        cpu_abort(CPU(cpu), "invalid tss");
 843    }
 844    index = 8 * level + 4;
 845    if ((index + 7) > env->tr.limit) {
 846        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
 847    }
 848    return cpu_ldq_kernel(env, env->tr.base + index);
 849}
 850
 851/* 64 bit interrupt */
 852static void do_interrupt64(CPUX86State *env, int intno, int is_int,
 853                           int error_code, target_ulong next_eip, int is_hw)
 854{
 855    SegmentCache *dt;
 856    target_ulong ptr;
 857    int type, dpl, selector, cpl, ist;
 858    int has_error_code, new_stack;
 859    uint32_t e1, e2, e3, ss;
 860    target_ulong old_eip, esp, offset;
 861
 862    has_error_code = 0;
 863    if (!is_int && !is_hw) {
 864        has_error_code = exception_has_error_code(intno);
 865    }
 866    if (is_int) {
 867        old_eip = next_eip;
 868    } else {
 869        old_eip = env->eip;
 870    }
 871
 872    dt = &env->idt;
 873    if (intno * 16 + 15 > dt->limit) {
 874        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 875    }
 876    ptr = dt->base + intno * 16;
 877    e1 = cpu_ldl_kernel(env, ptr);
 878    e2 = cpu_ldl_kernel(env, ptr + 4);
 879    e3 = cpu_ldl_kernel(env, ptr + 8);
 880    /* check gate type */
 881    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
 882    switch (type) {
 883    case 14: /* 386 interrupt gate */
 884    case 15: /* 386 trap gate */
 885        break;
 886    default:
 887        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 888        break;
 889    }
 890    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 891    cpl = env->hflags & HF_CPL_MASK;
 892    /* check privilege if software int */
 893    if (is_int && dpl < cpl) {
 894        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
 895    }
 896    /* check valid bit */
 897    if (!(e2 & DESC_P_MASK)) {
 898        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
 899    }
 900    selector = e1 >> 16;
 901    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
 902    ist = e2 & 7;
 903    if ((selector & 0xfffc) == 0) {
 904        raise_exception_err(env, EXCP0D_GPF, 0);
 905    }
 906
 907    if (load_segment(env, &e1, &e2, selector) != 0) {
 908        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 909    }
 910    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
 911        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 912    }
 913    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
 914    if (dpl > cpl) {
 915        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 916    }
 917    if (!(e2 & DESC_P_MASK)) {
 918        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
 919    }
 920    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
 921        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 922    }
 923    if (e2 & DESC_C_MASK) {
 924        dpl = cpl;
 925    }
 926    if (dpl < cpl || ist != 0) {
 927        /* to inner privilege */
 928        new_stack = 1;
 929        esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
 930        ss = 0;
 931    } else {
 932        /* to same privilege */
 933        if (env->eflags & VM_MASK) {
 934            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
 935        }
 936        new_stack = 0;
 937        esp = env->regs[R_ESP];
 938    }
 939    esp &= ~0xfLL; /* align stack */
 940
 941    PUSHQ(esp, env->segs[R_SS].selector);
 942    PUSHQ(esp, env->regs[R_ESP]);
 943    PUSHQ(esp, cpu_compute_eflags(env));
 944    PUSHQ(esp, env->segs[R_CS].selector);
 945    PUSHQ(esp, old_eip);
 946    if (has_error_code) {
 947        PUSHQ(esp, error_code);
 948    }
 949
 950    /* interrupt gate clear IF mask */
 951    if ((type & 1) == 0) {
 952        env->eflags &= ~IF_MASK;
 953    }
 954    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
 955
 956    if (new_stack) {
 957        ss = 0 | dpl;
 958        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
 959    }
 960    env->regs[R_ESP] = esp;
 961
 962    selector = (selector & ~3) | dpl;
 963    cpu_x86_load_seg_cache(env, R_CS, selector,
 964                   get_seg_base(e1, e2),
 965                   get_seg_limit(e1, e2),
 966                   e2);
 967    env->eip = offset;
 968}
 969#endif
 970
 971#ifdef TARGET_X86_64
 972#if defined(CONFIG_USER_ONLY)
 973void helper_syscall(CPUX86State *env, int next_eip_addend)
 974{
 975    CPUState *cs = CPU(x86_env_get_cpu(env));
 976
 977    cs->exception_index = EXCP_SYSCALL;
 978    env->exception_next_eip = env->eip + next_eip_addend;
 979    cpu_loop_exit(cs);
 980}
 981#else
 982void helper_syscall(CPUX86State *env, int next_eip_addend)
 983{
 984    int selector;
 985
 986    if (!(env->efer & MSR_EFER_SCE)) {
 987        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
 988    }
 989    selector = (env->star >> 32) & 0xffff;
 990    if (env->hflags & HF_LMA_MASK) {
 991        int code64;
 992
 993        env->regs[R_ECX] = env->eip + next_eip_addend;
 994        env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
 995
 996        code64 = env->hflags & HF_CS64_MASK;
 997
 998        env->eflags &= ~(env->fmask | RF_MASK);
 999        cpu_load_eflags(env, env->eflags, 0);
1000        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001                           0, 0xffffffff,
1002                               DESC_G_MASK | DESC_P_MASK |
1003                               DESC_S_MASK |
1004                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005                               DESC_L_MASK);
1006        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007                               0, 0xffffffff,
1008                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009                               DESC_S_MASK |
1010                               DESC_W_MASK | DESC_A_MASK);
1011        if (code64) {
1012            env->eip = env->lstar;
1013        } else {
1014            env->eip = env->cstar;
1015        }
1016    } else {
1017        env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1018
1019        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1020        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021                           0, 0xffffffff,
1022                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023                               DESC_S_MASK |
1024                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026                               0, 0xffffffff,
1027                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028                               DESC_S_MASK |
1029                               DESC_W_MASK | DESC_A_MASK);
1030        env->eip = (uint32_t)env->star;
1031    }
1032}
1033#endif
1034#endif
1035
1036#ifdef TARGET_X86_64
1037void helper_sysret(CPUX86State *env, int dflag)
1038{
1039    int cpl, selector;
1040
1041    if (!(env->efer & MSR_EFER_SCE)) {
1042        raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1043    }
1044    cpl = env->hflags & HF_CPL_MASK;
1045    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1047    }
1048    selector = (env->star >> 48) & 0xffff;
1049    if (env->hflags & HF_LMA_MASK) {
1050        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052                        NT_MASK);
1053        if (dflag == 2) {
1054            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055                                   0, 0xffffffff,
1056                                   DESC_G_MASK | DESC_P_MASK |
1057                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059                                   DESC_L_MASK);
1060            env->eip = env->regs[R_ECX];
1061        } else {
1062            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063                                   0, 0xffffffff,
1064                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1067            env->eip = (uint32_t)env->regs[R_ECX];
1068        }
1069        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1070                               0, 0xffffffff,
1071                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073                               DESC_W_MASK | DESC_A_MASK);
1074    } else {
1075        env->eflags |= IF_MASK;
1076        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077                               0, 0xffffffff,
1078                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1081        env->eip = (uint32_t)env->regs[R_ECX];
1082        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1083                               0, 0xffffffff,
1084                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086                               DESC_W_MASK | DESC_A_MASK);
1087    }
1088}
1089#endif
1090
1091/* real mode interrupt */
1092static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093                              int error_code, unsigned int next_eip)
1094{
1095    SegmentCache *dt;
1096    target_ulong ptr, ssp;
1097    int selector;
1098    uint32_t offset, esp;
1099    uint32_t old_cs, old_eip;
1100
1101    /* real mode (simpler!) */
1102    dt = &env->idt;
1103    if (intno * 4 + 3 > dt->limit) {
1104        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1105    }
1106    ptr = dt->base + intno * 4;
1107    offset = cpu_lduw_kernel(env, ptr);
1108    selector = cpu_lduw_kernel(env, ptr + 2);
1109    esp = env->regs[R_ESP];
1110    ssp = env->segs[R_SS].base;
1111    if (is_int) {
1112        old_eip = next_eip;
1113    } else {
1114        old_eip = env->eip;
1115    }
1116    old_cs = env->segs[R_CS].selector;
1117    /* XXX: use SS segment size? */
1118    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1119    PUSHW(ssp, esp, 0xffff, old_cs);
1120    PUSHW(ssp, esp, 0xffff, old_eip);
1121
1122    /* update processor state */
1123    env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1124    env->eip = offset;
1125    env->segs[R_CS].selector = selector;
1126    env->segs[R_CS].base = (selector << 4);
1127    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1128}
1129
1130#if defined(CONFIG_USER_ONLY)
1131/* fake user mode interrupt. is_int is TRUE if coming from the int
1132 * instruction. next_eip is the env->eip value AFTER the interrupt
1133 * instruction. It is only relevant if is_int is TRUE or if intno
1134 * is EXCP_SYSCALL.
1135 */
1136static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137                              int error_code, target_ulong next_eip)
1138{
1139    if (is_int) {
1140        SegmentCache *dt;
1141        target_ulong ptr;
1142        int dpl, cpl, shift;
1143        uint32_t e2;
1144
1145        dt = &env->idt;
1146        if (env->hflags & HF_LMA_MASK) {
1147            shift = 4;
1148        } else {
1149            shift = 3;
1150        }
1151        ptr = dt->base + (intno << shift);
1152        e2 = cpu_ldl_kernel(env, ptr + 4);
1153
1154        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1155        cpl = env->hflags & HF_CPL_MASK;
1156        /* check privilege if software int */
1157        if (dpl < cpl) {
1158            raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1159        }
1160    }
1161
1162    /* Since we emulate only user space, we cannot do more than
1163       exiting the emulation with the suitable exception and error
1164       code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1165    if (is_int || intno == EXCP_SYSCALL) {
1166        env->eip = next_eip;
1167    }
1168}
1169
1170#else
1171
1172static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1173                            int error_code, int is_hw, int rm)
1174{
1175    CPUState *cs = CPU(x86_env_get_cpu(env));
1176    uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1177                                                          control.event_inj));
1178
1179    if (!(event_inj & SVM_EVTINJ_VALID)) {
1180        int type;
1181
1182        if (is_int) {
1183            type = SVM_EVTINJ_TYPE_SOFT;
1184        } else {
1185            type = SVM_EVTINJ_TYPE_EXEPT;
1186        }
1187        event_inj = intno | type | SVM_EVTINJ_VALID;
1188        if (!rm && exception_has_error_code(intno)) {
1189            event_inj |= SVM_EVTINJ_VALID_ERR;
1190            x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1191                                             control.event_inj_err),
1192                     error_code);
1193        }
1194        x86_stl_phys(cs,
1195                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1196                 event_inj);
1197    }
1198}
1199#endif
1200
1201/*
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1205 */
1206static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1207                             int error_code, target_ulong next_eip, int is_hw)
1208{
1209    CPUX86State *env = &cpu->env;
1210
1211    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1212        if ((env->cr[0] & CR0_PE_MASK)) {
1213            static int count;
1214
1215            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1216                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1217                     count, intno, error_code, is_int,
1218                     env->hflags & HF_CPL_MASK,
1219                     env->segs[R_CS].selector, env->eip,
1220                     (int)env->segs[R_CS].base + env->eip,
1221                     env->segs[R_SS].selector, env->regs[R_ESP]);
1222            if (intno == 0x0e) {
1223                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1224            } else {
1225                qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1226            }
1227            qemu_log("\n");
1228            log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1229#if 0
1230            {
1231                int i;
1232                target_ulong ptr;
1233
1234                qemu_log("       code=");
1235                ptr = env->segs[R_CS].base + env->eip;
1236                for (i = 0; i < 16; i++) {
1237                    qemu_log(" %02x", ldub(ptr + i));
1238                }
1239                qemu_log("\n");
1240            }
1241#endif
1242            count++;
1243        }
1244    }
1245    if (env->cr[0] & CR0_PE_MASK) {
1246#if !defined(CONFIG_USER_ONLY)
1247        if (env->hflags & HF_GUEST_MASK) {
1248            handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1249        }
1250#endif
1251#ifdef TARGET_X86_64
1252        if (env->hflags & HF_LMA_MASK) {
1253            do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1254        } else
1255#endif
1256        {
1257            do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1258                                   is_hw);
1259        }
1260    } else {
1261#if !defined(CONFIG_USER_ONLY)
1262        if (env->hflags & HF_GUEST_MASK) {
1263            handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1264        }
1265#endif
1266        do_interrupt_real(env, intno, is_int, error_code, next_eip);
1267    }
1268
1269#if !defined(CONFIG_USER_ONLY)
1270    if (env->hflags & HF_GUEST_MASK) {
1271        CPUState *cs = CPU(cpu);
1272        uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1273                                      offsetof(struct vmcb,
1274                                               control.event_inj));
1275
1276        x86_stl_phys(cs,
1277                 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1278                 event_inj & ~SVM_EVTINJ_VALID);
1279    }
1280#endif
1281}
1282
1283void x86_cpu_do_interrupt(CPUState *cs)
1284{
1285    X86CPU *cpu = X86_CPU(cs);
1286    CPUX86State *env = &cpu->env;
1287
1288#if defined(CONFIG_USER_ONLY)
1289    /* if user mode only, we simulate a fake exception
1290       which will be handled outside the cpu execution
1291       loop */
1292    do_interrupt_user(env, cs->exception_index,
1293                      env->exception_is_int,
1294                      env->error_code,
1295                      env->exception_next_eip);
1296    /* successfully delivered */
1297    env->old_exception = -1;
1298#else
1299    if (cs->exception_index >= EXCP_VMEXIT) {
1300        assert(env->old_exception == -1);
1301        do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1302    } else {
1303        do_interrupt_all(cpu, cs->exception_index,
1304                         env->exception_is_int,
1305                         env->error_code,
1306                         env->exception_next_eip, 0);
1307        /* successfully delivered */
1308        env->old_exception = -1;
1309    }
1310#endif
1311}
1312
1313void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1314{
1315    do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1316}
1317
1318bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1319{
1320    X86CPU *cpu = X86_CPU(cs);
1321    CPUX86State *env = &cpu->env;
1322    int intno;
1323
1324    interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1325    if (!interrupt_request) {
1326        return false;
1327    }
1328
1329    /* Don't process multiple interrupt requests in a single call.
1330     * This is required to make icount-driven execution deterministic.
1331     */
1332    switch (interrupt_request) {
1333#if !defined(CONFIG_USER_ONLY)
1334    case CPU_INTERRUPT_POLL:
1335        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1336        apic_poll_irq(cpu->apic_state);
1337        break;
1338#endif
1339    case CPU_INTERRUPT_SIPI:
1340        do_cpu_sipi(cpu);
1341        break;
1342    case CPU_INTERRUPT_SMI:
1343        cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1344        cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1345        do_smm_enter(cpu);
1346        break;
1347    case CPU_INTERRUPT_NMI:
1348        cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1349        cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1350        env->hflags2 |= HF2_NMI_MASK;
1351        do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1352        break;
1353    case CPU_INTERRUPT_MCE:
1354        cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1355        do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1356        break;
1357    case CPU_INTERRUPT_HARD:
1358        cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1359        cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1360                                   CPU_INTERRUPT_VIRQ);
1361        intno = cpu_get_pic_interrupt(env);
1362        qemu_log_mask(CPU_LOG_TB_IN_ASM,
1363                      "Servicing hardware INT=0x%02x\n", intno);
1364        do_interrupt_x86_hardirq(env, intno, 1);
1365        break;
1366#if !defined(CONFIG_USER_ONLY)
1367    case CPU_INTERRUPT_VIRQ:
1368        /* FIXME: this should respect TPR */
1369        cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1370        intno = x86_ldl_phys(cs, env->vm_vmcb
1371                             + offsetof(struct vmcb, control.int_vector));
1372        qemu_log_mask(CPU_LOG_TB_IN_ASM,
1373                      "Servicing virtual hardware INT=0x%02x\n", intno);
1374        do_interrupt_x86_hardirq(env, intno, 1);
1375        cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1376        break;
1377#endif
1378    }
1379
1380    /* Ensure that no TB jump will be modified as the program flow was changed.  */
1381    return true;
1382}
1383
1384void helper_lldt(CPUX86State *env, int selector)
1385{
1386    SegmentCache *dt;
1387    uint32_t e1, e2;
1388    int index, entry_limit;
1389    target_ulong ptr;
1390
1391    selector &= 0xffff;
1392    if ((selector & 0xfffc) == 0) {
1393        /* XXX: NULL selector case: invalid LDT */
1394        env->ldt.base = 0;
1395        env->ldt.limit = 0;
1396    } else {
1397        if (selector & 0x4) {
1398            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1399        }
1400        dt = &env->gdt;
1401        index = selector & ~7;
1402#ifdef TARGET_X86_64
1403        if (env->hflags & HF_LMA_MASK) {
1404            entry_limit = 15;
1405        } else
1406#endif
1407        {
1408            entry_limit = 7;
1409        }
1410        if ((index + entry_limit) > dt->limit) {
1411            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412        }
1413        ptr = dt->base + index;
1414        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1415        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1416        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1417            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1418        }
1419        if (!(e2 & DESC_P_MASK)) {
1420            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1421        }
1422#ifdef TARGET_X86_64
1423        if (env->hflags & HF_LMA_MASK) {
1424            uint32_t e3;
1425
1426            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1427            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1428            env->ldt.base |= (target_ulong)e3 << 32;
1429        } else
1430#endif
1431        {
1432            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1433        }
1434    }
1435    env->ldt.selector = selector;
1436}
1437
1438void helper_ltr(CPUX86State *env, int selector)
1439{
1440    SegmentCache *dt;
1441    uint32_t e1, e2;
1442    int index, type, entry_limit;
1443    target_ulong ptr;
1444
1445    selector &= 0xffff;
1446    if ((selector & 0xfffc) == 0) {
1447        /* NULL selector case: invalid TR */
1448        env->tr.base = 0;
1449        env->tr.limit = 0;
1450        env->tr.flags = 0;
1451    } else {
1452        if (selector & 0x4) {
1453            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1454        }
1455        dt = &env->gdt;
1456        index = selector & ~7;
1457#ifdef TARGET_X86_64
1458        if (env->hflags & HF_LMA_MASK) {
1459            entry_limit = 15;
1460        } else
1461#endif
1462        {
1463            entry_limit = 7;
1464        }
1465        if ((index + entry_limit) > dt->limit) {
1466            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1467        }
1468        ptr = dt->base + index;
1469        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1470        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1471        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1472        if ((e2 & DESC_S_MASK) ||
1473            (type != 1 && type != 9)) {
1474            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1475        }
1476        if (!(e2 & DESC_P_MASK)) {
1477            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1478        }
1479#ifdef TARGET_X86_64
1480        if (env->hflags & HF_LMA_MASK) {
1481            uint32_t e3, e4;
1482
1483            e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1484            e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1485            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1486                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1487            }
1488            load_seg_cache_raw_dt(&env->tr, e1, e2);
1489            env->tr.base |= (target_ulong)e3 << 32;
1490        } else
1491#endif
1492        {
1493            load_seg_cache_raw_dt(&env->tr, e1, e2);
1494        }
1495        e2 |= DESC_TSS_BUSY_MASK;
1496        cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1497    }
1498    env->tr.selector = selector;
1499}
1500
1501/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1502void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1503{
1504    uint32_t e1, e2;
1505    int cpl, dpl, rpl;
1506    SegmentCache *dt;
1507    int index;
1508    target_ulong ptr;
1509
1510    selector &= 0xffff;
1511    cpl = env->hflags & HF_CPL_MASK;
1512    if ((selector & 0xfffc) == 0) {
1513        /* null selector case */
1514        if (seg_reg == R_SS
1515#ifdef TARGET_X86_64
1516            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1517#endif
1518            ) {
1519            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1520        }
1521        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1522    } else {
1523
1524        if (selector & 0x4) {
1525            dt = &env->ldt;
1526        } else {
1527            dt = &env->gdt;
1528        }
1529        index = selector & ~7;
1530        if ((index + 7) > dt->limit) {
1531            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1532        }
1533        ptr = dt->base + index;
1534        e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1535        e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1536
1537        if (!(e2 & DESC_S_MASK)) {
1538            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1539        }
1540        rpl = selector & 3;
1541        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1542        if (seg_reg == R_SS) {
1543            /* must be writable segment */
1544            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1545                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1546            }
1547            if (rpl != cpl || dpl != cpl) {
1548                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1549            }
1550        } else {
1551            /* must be readable segment */
1552            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1553                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1554            }
1555
1556            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1557                /* if not conforming code, test rights */
1558                if (dpl < cpl || dpl < rpl) {
1559                    raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1560                }
1561            }
1562        }
1563
1564        if (!(e2 & DESC_P_MASK)) {
1565            if (seg_reg == R_SS) {
1566                raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1567            } else {
1568                raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1569            }
1570        }
1571
1572        /* set the access bit if not already set */
1573        if (!(e2 & DESC_A_MASK)) {
1574            e2 |= DESC_A_MASK;
1575            cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1576        }
1577
1578        cpu_x86_load_seg_cache(env, seg_reg, selector,
1579                       get_seg_base(e1, e2),
1580                       get_seg_limit(e1, e2),
1581                       e2);
1582#if 0
1583        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1584                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1585#endif
1586    }
1587}
1588
1589/* protected mode jump */
1590void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1591                           target_ulong next_eip)
1592{
1593    int gate_cs, type;
1594    uint32_t e1, e2, cpl, dpl, rpl, limit;
1595
1596    if ((new_cs & 0xfffc) == 0) {
1597        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1598    }
1599    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1600        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1601    }
1602    cpl = env->hflags & HF_CPL_MASK;
1603    if (e2 & DESC_S_MASK) {
1604        if (!(e2 & DESC_CS_MASK)) {
1605            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1606        }
1607        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1608        if (e2 & DESC_C_MASK) {
1609            /* conforming code segment */
1610            if (dpl > cpl) {
1611                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1612            }
1613        } else {
1614            /* non conforming code segment */
1615            rpl = new_cs & 3;
1616            if (rpl > cpl) {
1617                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1618            }
1619            if (dpl != cpl) {
1620                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1621            }
1622        }
1623        if (!(e2 & DESC_P_MASK)) {
1624            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1625        }
1626        limit = get_seg_limit(e1, e2);
1627        if (new_eip > limit &&
1628            (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1629            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1630        }
1631        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1632                       get_seg_base(e1, e2), limit, e2);
1633        env->eip = new_eip;
1634    } else {
1635        /* jump to call or task gate */
1636        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1637        rpl = new_cs & 3;
1638        cpl = env->hflags & HF_CPL_MASK;
1639        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1640
1641#ifdef TARGET_X86_64
1642        if (env->efer & MSR_EFER_LMA) {
1643            if (type != 12) {
1644                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1645            }
1646        }
1647#endif
1648        switch (type) {
1649        case 1: /* 286 TSS */
1650        case 9: /* 386 TSS */
1651        case 5: /* task gate */
1652            if (dpl < cpl || dpl < rpl) {
1653                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1654            }
1655            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1656            break;
1657        case 4: /* 286 call gate */
1658        case 12: /* 386 call gate */
1659            if ((dpl < cpl) || (dpl < rpl)) {
1660                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1661            }
1662            if (!(e2 & DESC_P_MASK)) {
1663                raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1664            }
1665            gate_cs = e1 >> 16;
1666            new_eip = (e1 & 0xffff);
1667            if (type == 12) {
1668                new_eip |= (e2 & 0xffff0000);
1669            }
1670
1671#ifdef TARGET_X86_64
1672            if (env->efer & MSR_EFER_LMA) {
1673                /* load the upper 8 bytes of the 64-bit call gate */
1674                if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1675                    raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1676                                           GETPC());
1677                }
1678                type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1679                if (type != 0) {
1680                    raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1681                                           GETPC());
1682                }
1683                new_eip |= ((target_ulong)e1) << 32;
1684            }
1685#endif
1686
1687            if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1688                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1689            }
1690            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1691            /* must be code segment */
1692            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1693                 (DESC_S_MASK | DESC_CS_MASK))) {
1694                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1695            }
1696            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1697                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1698                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1699            }
1700#ifdef TARGET_X86_64
1701            if (env->efer & MSR_EFER_LMA) {
1702                if (!(e2 & DESC_L_MASK)) {
1703                    raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1704                }
1705                if (e2 & DESC_B_MASK) {
1706                    raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1707                }
1708            }
1709#endif
1710            if (!(e2 & DESC_P_MASK)) {
1711                raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1712            }
1713            limit = get_seg_limit(e1, e2);
1714            if (new_eip > limit &&
1715                (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1716                raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1717            }
1718            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1719                                   get_seg_base(e1, e2), limit, e2);
1720            env->eip = new_eip;
1721            break;
1722        default:
1723            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1724            break;
1725        }
1726    }
1727}
1728
1729/* real mode call */
1730void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1731                       int shift, int next_eip)
1732{
1733    int new_eip;
1734    uint32_t esp, esp_mask;
1735    target_ulong ssp;
1736
1737    new_eip = new_eip1;
1738    esp = env->regs[R_ESP];
1739    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1740    ssp = env->segs[R_SS].base;
1741    if (shift) {
1742        PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1743        PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1744    } else {
1745        PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1746        PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1747    }
1748
1749    SET_ESP(esp, esp_mask);
1750    env->eip = new_eip;
1751    env->segs[R_CS].selector = new_cs;
1752    env->segs[R_CS].base = (new_cs << 4);
1753}
1754
1755/* protected mode call */
1756void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1757                            int shift, target_ulong next_eip)
1758{
1759    int new_stack, i;
1760    uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1761    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
1762    uint32_t val, limit, old_sp_mask;
1763    target_ulong ssp, old_ssp, offset, sp;
1764
1765    LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1766    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1767    if ((new_cs & 0xfffc) == 0) {
1768        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1769    }
1770    if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1771        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1772    }
1773    cpl = env->hflags & HF_CPL_MASK;
1774    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1775    if (e2 & DESC_S_MASK) {
1776        if (!(e2 & DESC_CS_MASK)) {
1777            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1778        }
1779        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1780        if (e2 & DESC_C_MASK) {
1781            /* conforming code segment */
1782            if (dpl > cpl) {
1783                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1784            }
1785        } else {
1786            /* non conforming code segment */
1787            rpl = new_cs & 3;
1788            if (rpl > cpl) {
1789                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1790            }
1791            if (dpl != cpl) {
1792                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1793            }
1794        }
1795        if (!(e2 & DESC_P_MASK)) {
1796            raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1797        }
1798
1799#ifdef TARGET_X86_64
1800        /* XXX: check 16/32 bit cases in long mode */
1801        if (shift == 2) {
1802            target_ulong rsp;
1803
1804            /* 64 bit case */
1805            rsp = env->regs[R_ESP];
1806            PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1807            PUSHQ_RA(rsp, next_eip, GETPC());
1808            /* from this point, not restartable */
1809            env->regs[R_ESP] = rsp;
1810            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1811                                   get_seg_base(e1, e2),
1812                                   get_seg_limit(e1, e2), e2);
1813            env->eip = new_eip;
1814        } else
1815#endif
1816        {
1817            sp = env->regs[R_ESP];
1818            sp_mask = get_sp_mask(env->segs[R_SS].flags);
1819            ssp = env->segs[R_SS].base;
1820            if (shift) {
1821                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1822                PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1823            } else {
1824                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1825                PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1826            }
1827
1828            limit = get_seg_limit(e1, e2);
1829            if (new_eip > limit) {
1830                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1831            }
1832            /* from this point, not restartable */
1833            SET_ESP(sp, sp_mask);
1834            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1835                                   get_seg_base(e1, e2), limit, e2);
1836            env->eip = new_eip;
1837        }
1838    } else {
1839        /* check gate type */
1840        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1841        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1842        rpl = new_cs & 3;
1843
1844#ifdef TARGET_X86_64
1845        if (env->efer & MSR_EFER_LMA) {
1846            if (type != 12) {
1847                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1848            }
1849        }
1850#endif
1851
1852        switch (type) {
1853        case 1: /* available 286 TSS */
1854        case 9: /* available 386 TSS */
1855        case 5: /* task gate */
1856            if (dpl < cpl || dpl < rpl) {
1857                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1858            }
1859            switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1860            return;
1861        case 4: /* 286 call gate */
1862        case 12: /* 386 call gate */
1863            break;
1864        default:
1865            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1866            break;
1867        }
1868        shift = type >> 3;
1869
1870        if (dpl < cpl || dpl < rpl) {
1871            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1872        }
1873        /* check valid bit */
1874        if (!(e2 & DESC_P_MASK)) {
1875            raise_exception_err_ra(env, EXCP0B_NOSEG,  new_cs & 0xfffc, GETPC());
1876        }
1877        selector = e1 >> 16;
1878        param_count = e2 & 0x1f;
1879        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1880#ifdef TARGET_X86_64
1881        if (env->efer & MSR_EFER_LMA) {
1882            /* load the upper 8 bytes of the 64-bit call gate */
1883            if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1884                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1885                                       GETPC());
1886            }
1887            type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1888            if (type != 0) {
1889                raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1890                                       GETPC());
1891            }
1892            offset |= ((target_ulong)e1) << 32;
1893        }
1894#endif
1895        if ((selector & 0xfffc) == 0) {
1896            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1897        }
1898
1899        if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1900            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1901        }
1902        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1903            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1904        }
1905        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1906        if (dpl > cpl) {
1907            raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1908        }
1909#ifdef TARGET_X86_64
1910        if (env->efer & MSR_EFER_LMA) {
1911            if (!(e2 & DESC_L_MASK)) {
1912                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1913            }
1914            if (e2 & DESC_B_MASK) {
1915                raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1916            }
1917            shift++;
1918        }
1919#endif
1920        if (!(e2 & DESC_P_MASK)) {
1921            raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1922        }
1923
1924        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1925            /* to inner privilege */
1926#ifdef TARGET_X86_64
1927            if (shift == 2) {
1928                sp = get_rsp_from_tss(env, dpl);
1929                ss = dpl;  /* SS = NULL selector with RPL = new CPL */
1930                new_stack = 1;
1931                sp_mask = 0;
1932                ssp = 0;  /* SS base is always zero in IA-32e mode */
1933                LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1934                          TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1935            } else
1936#endif
1937            {
1938                uint32_t sp32;
1939                get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1940                LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1941                          TARGET_FMT_lx "\n", ss, sp32, param_count,
1942                          env->regs[R_ESP]);
1943                sp = sp32;
1944                if ((ss & 0xfffc) == 0) {
1945                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1946                }
1947                if ((ss & 3) != dpl) {
1948                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1949                }
1950                if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1951                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1952                }
1953                ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1954                if (ss_dpl != dpl) {
1955                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1956                }
1957                if (!(ss_e2 & DESC_S_MASK) ||
1958                    (ss_e2 & DESC_CS_MASK) ||
1959                    !(ss_e2 & DESC_W_MASK)) {
1960                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1961                }
1962                if (!(ss_e2 & DESC_P_MASK)) {
1963                    raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1964                }
1965
1966                sp_mask = get_sp_mask(ss_e2);
1967                ssp = get_seg_base(ss_e1, ss_e2);
1968            }
1969
1970            /* push_size = ((param_count * 2) + 8) << shift; */
1971
1972            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1973            old_ssp = env->segs[R_SS].base;
1974#ifdef TARGET_X86_64
1975            if (shift == 2) {
1976                /* XXX: verify if new stack address is canonical */
1977                PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1978                PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1979                /* parameters aren't supported for 64-bit call gates */
1980            } else
1981#endif
1982            if (shift == 1) {
1983                PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1984                PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1985                for (i = param_count - 1; i >= 0; i--) {
1986                    val = cpu_ldl_kernel_ra(env, old_ssp +
1987                                            ((env->regs[R_ESP] + i * 4) &
1988                                             old_sp_mask), GETPC());
1989                    PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1990                }
1991            } else {
1992                PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1993                PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1994                for (i = param_count - 1; i >= 0; i--) {
1995                    val = cpu_lduw_kernel_ra(env, old_ssp +
1996                                             ((env->regs[R_ESP] + i * 2) &
1997                                              old_sp_mask), GETPC());
1998                    PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1999                }
2000            }
2001            new_stack = 1;
2002        } else {
2003            /* to same privilege */
2004            sp = env->regs[R_ESP];
2005            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2006            ssp = env->segs[R_SS].base;
2007            /* push_size = (4 << shift); */
2008            new_stack = 0;
2009        }
2010
2011#ifdef TARGET_X86_64
2012        if (shift == 2) {
2013            PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2014            PUSHQ_RA(sp, next_eip, GETPC());
2015        } else
2016#endif
2017        if (shift == 1) {
2018            PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2019            PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
2020        } else {
2021            PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2022            PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2023        }
2024
2025        /* from this point, not restartable */
2026
2027        if (new_stack) {
2028#ifdef TARGET_X86_64
2029            if (shift == 2) {
2030                cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2031            } else
2032#endif
2033            {
2034                ss = (ss & ~3) | dpl;
2035                cpu_x86_load_seg_cache(env, R_SS, ss,
2036                                       ssp,
2037                                       get_seg_limit(ss_e1, ss_e2),
2038                                       ss_e2);
2039            }
2040        }
2041
2042        selector = (selector & ~3) | dpl;
2043        cpu_x86_load_seg_cache(env, R_CS, selector,
2044                       get_seg_base(e1, e2),
2045                       get_seg_limit(e1, e2),
2046                       e2);
2047        SET_ESP(sp, sp_mask);
2048        env->eip = offset;
2049    }
2050}
2051
2052/* real and vm86 mode iret */
2053void helper_iret_real(CPUX86State *env, int shift)
2054{
2055    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2056    target_ulong ssp;
2057    int eflags_mask;
2058
2059    sp_mask = 0xffff; /* XXXX: use SS segment size? */
2060    sp = env->regs[R_ESP];
2061    ssp = env->segs[R_SS].base;
2062    if (shift == 1) {
2063        /* 32 bits */
2064        POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2065        POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2066        new_cs &= 0xffff;
2067        POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2068    } else {
2069        /* 16 bits */
2070        POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2071        POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2072        POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2073    }
2074    env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2075    env->segs[R_CS].selector = new_cs;
2076    env->segs[R_CS].base = (new_cs << 4);
2077    env->eip = new_eip;
2078    if (env->eflags & VM_MASK) {
2079        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2080            NT_MASK;
2081    } else {
2082        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2083            RF_MASK | NT_MASK;
2084    }
2085    if (shift == 0) {
2086        eflags_mask &= 0xffff;
2087    }
2088    cpu_load_eflags(env, new_eflags, eflags_mask);
2089    env->hflags2 &= ~HF2_NMI_MASK;
2090}
2091
2092static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2093{
2094    int dpl;
2095    uint32_t e2;
2096
2097    /* XXX: on x86_64, we do not want to nullify FS and GS because
2098       they may still contain a valid base. I would be interested to
2099       know how a real x86_64 CPU behaves */
2100    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2101        (env->segs[seg_reg].selector & 0xfffc) == 0) {
2102        return;
2103    }
2104
2105    e2 = env->segs[seg_reg].flags;
2106    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2107    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2108        /* data or non conforming code segment */
2109        if (dpl < cpl) {
2110            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2111        }
2112    }
2113}
2114
2115/* protected mode iret */
2116static inline void helper_ret_protected(CPUX86State *env, int shift,
2117                                        int is_iret, int addend,
2118                                        uintptr_t retaddr)
2119{
2120    uint32_t new_cs, new_eflags, new_ss;
2121    uint32_t new_es, new_ds, new_fs, new_gs;
2122    uint32_t e1, e2, ss_e1, ss_e2;
2123    int cpl, dpl, rpl, eflags_mask, iopl;
2124    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2125
2126#ifdef TARGET_X86_64
2127    if (shift == 2) {
2128        sp_mask = -1;
2129    } else
2130#endif
2131    {
2132        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2133    }
2134    sp = env->regs[R_ESP];
2135    ssp = env->segs[R_SS].base;
2136    new_eflags = 0; /* avoid warning */
2137#ifdef TARGET_X86_64
2138    if (shift == 2) {
2139        POPQ_RA(sp, new_eip, retaddr);
2140        POPQ_RA(sp, new_cs, retaddr);
2141        new_cs &= 0xffff;
2142        if (is_iret) {
2143            POPQ_RA(sp, new_eflags, retaddr);
2144        }
2145    } else
2146#endif
2147    {
2148        if (shift == 1) {
2149            /* 32 bits */
2150            POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2151            POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2152            new_cs &= 0xffff;
2153            if (is_iret) {
2154                POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2155                if (new_eflags & VM_MASK) {
2156                    goto return_to_vm86;
2157                }
2158            }
2159        } else {
2160            /* 16 bits */
2161            POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2162            POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2163            if (is_iret) {
2164                POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2165            }
2166        }
2167    }
2168    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2169              new_cs, new_eip, shift, addend);
2170    LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2171    if ((new_cs & 0xfffc) == 0) {
2172        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2173    }
2174    if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2175        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2176    }
2177    if (!(e2 & DESC_S_MASK) ||
2178        !(e2 & DESC_CS_MASK)) {
2179        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2180    }
2181    cpl = env->hflags & HF_CPL_MASK;
2182    rpl = new_cs & 3;
2183    if (rpl < cpl) {
2184        raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2185    }
2186    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2187    if (e2 & DESC_C_MASK) {
2188        if (dpl > rpl) {
2189            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2190        }
2191    } else {
2192        if (dpl != rpl) {
2193            raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2194        }
2195    }
2196    if (!(e2 & DESC_P_MASK)) {
2197        raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2198    }
2199
2200    sp += addend;
2201    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2202                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2203        /* return to same privilege level */
2204        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2205                       get_seg_base(e1, e2),
2206                       get_seg_limit(e1, e2),
2207                       e2);
2208    } else {
2209        /* return to different privilege level */
2210#ifdef TARGET_X86_64
2211        if (shift == 2) {
2212            POPQ_RA(sp, new_esp, retaddr);
2213            POPQ_RA(sp, new_ss, retaddr);
2214            new_ss &= 0xffff;
2215        } else
2216#endif
2217        {
2218            if (shift == 1) {
2219                /* 32 bits */
2220                POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2221                POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2222                new_ss &= 0xffff;
2223            } else {
2224                /* 16 bits */
2225                POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2226                POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2227            }
2228        }
2229        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2230                  new_ss, new_esp);
2231        if ((new_ss & 0xfffc) == 0) {
2232#ifdef TARGET_X86_64
2233            /* NULL ss is allowed in long mode if cpl != 3 */
2234            /* XXX: test CS64? */
2235            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2236                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2237                                       0, 0xffffffff,
2238                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2239                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2240                                       DESC_W_MASK | DESC_A_MASK);
2241                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2242            } else
2243#endif
2244            {
2245                raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2246            }
2247        } else {
2248            if ((new_ss & 3) != rpl) {
2249                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2250            }
2251            if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2252                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2253            }
2254            if (!(ss_e2 & DESC_S_MASK) ||
2255                (ss_e2 & DESC_CS_MASK) ||
2256                !(ss_e2 & DESC_W_MASK)) {
2257                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2258            }
2259            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2260            if (dpl != rpl) {
2261                raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2262            }
2263            if (!(ss_e2 & DESC_P_MASK)) {
2264                raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2265            }
2266            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2267                                   get_seg_base(ss_e1, ss_e2),
2268                                   get_seg_limit(ss_e1, ss_e2),
2269                                   ss_e2);
2270        }
2271
2272        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2273                       get_seg_base(e1, e2),
2274                       get_seg_limit(e1, e2),
2275                       e2);
2276        sp = new_esp;
2277#ifdef TARGET_X86_64
2278        if (env->hflags & HF_CS64_MASK) {
2279            sp_mask = -1;
2280        } else
2281#endif
2282        {
2283            sp_mask = get_sp_mask(ss_e2);
2284        }
2285
2286        /* validate data segments */
2287        validate_seg(env, R_ES, rpl);
2288        validate_seg(env, R_DS, rpl);
2289        validate_seg(env, R_FS, rpl);
2290        validate_seg(env, R_GS, rpl);
2291
2292        sp += addend;
2293    }
2294    SET_ESP(sp, sp_mask);
2295    env->eip = new_eip;
2296    if (is_iret) {
2297        /* NOTE: 'cpl' is the _old_ CPL */
2298        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2299        if (cpl == 0) {
2300            eflags_mask |= IOPL_MASK;
2301        }
2302        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2303        if (cpl <= iopl) {
2304            eflags_mask |= IF_MASK;
2305        }
2306        if (shift == 0) {
2307            eflags_mask &= 0xffff;
2308        }
2309        cpu_load_eflags(env, new_eflags, eflags_mask);
2310    }
2311    return;
2312
2313 return_to_vm86:
2314    POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2315    POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2316    POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2317    POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2318    POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2319    POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2320
2321    /* modify processor state */
2322    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2323                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2324                    VIP_MASK);
2325    load_seg_vm(env, R_CS, new_cs & 0xffff);
2326    load_seg_vm(env, R_SS, new_ss & 0xffff);
2327    load_seg_vm(env, R_ES, new_es & 0xffff);
2328    load_seg_vm(env, R_DS, new_ds & 0xffff);
2329    load_seg_vm(env, R_FS, new_fs & 0xffff);
2330    load_seg_vm(env, R_GS, new_gs & 0xffff);
2331
2332    env->eip = new_eip & 0xffff;
2333    env->regs[R_ESP] = new_esp;
2334}
2335
2336void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2337{
2338    int tss_selector, type;
2339    uint32_t e1, e2;
2340
2341    /* specific case for TSS */
2342    if (env->eflags & NT_MASK) {
2343#ifdef TARGET_X86_64
2344        if (env->hflags & HF_LMA_MASK) {
2345            raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2346        }
2347#endif
2348        tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2349        if (tss_selector & 4) {
2350            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2351        }
2352        if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2353            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2354        }
2355        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2356        /* NOTE: we check both segment and busy TSS */
2357        if (type != 3) {
2358            raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2359        }
2360        switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2361    } else {
2362        helper_ret_protected(env, shift, 1, 0, GETPC());
2363    }
2364    env->hflags2 &= ~HF2_NMI_MASK;
2365}
2366
2367void helper_lret_protected(CPUX86State *env, int shift, int addend)
2368{
2369    helper_ret_protected(env, shift, 0, addend, GETPC());
2370}
2371
2372void helper_sysenter(CPUX86State *env)
2373{
2374    if (env->sysenter_cs == 0) {
2375        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2376    }
2377    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2378
2379#ifdef TARGET_X86_64
2380    if (env->hflags & HF_LMA_MASK) {
2381        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2382                               0, 0xffffffff,
2383                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2384                               DESC_S_MASK |
2385                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2386                               DESC_L_MASK);
2387    } else
2388#endif
2389    {
2390        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2391                               0, 0xffffffff,
2392                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2393                               DESC_S_MASK |
2394                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2395    }
2396    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2397                           0, 0xffffffff,
2398                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2399                           DESC_S_MASK |
2400                           DESC_W_MASK | DESC_A_MASK);
2401    env->regs[R_ESP] = env->sysenter_esp;
2402    env->eip = env->sysenter_eip;
2403}
2404
2405void helper_sysexit(CPUX86State *env, int dflag)
2406{
2407    int cpl;
2408
2409    cpl = env->hflags & HF_CPL_MASK;
2410    if (env->sysenter_cs == 0 || cpl != 0) {
2411        raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2412    }
2413#ifdef TARGET_X86_64
2414    if (dflag == 2) {
2415        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2416                               3, 0, 0xffffffff,
2417                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2418                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2419                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2420                               DESC_L_MASK);
2421        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2422                               3, 0, 0xffffffff,
2423                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2424                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2425                               DESC_W_MASK | DESC_A_MASK);
2426    } else
2427#endif
2428    {
2429        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2430                               3, 0, 0xffffffff,
2431                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2432                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2433                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2434        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2435                               3, 0, 0xffffffff,
2436                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2437                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2438                               DESC_W_MASK | DESC_A_MASK);
2439    }
2440    env->regs[R_ESP] = env->regs[R_ECX];
2441    env->eip = env->regs[R_EDX];
2442}
2443
2444target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2445{
2446    unsigned int limit;
2447    uint32_t e1, e2, eflags, selector;
2448    int rpl, dpl, cpl, type;
2449
2450    selector = selector1 & 0xffff;
2451    eflags = cpu_cc_compute_all(env, CC_OP);
2452    if ((selector & 0xfffc) == 0) {
2453        goto fail;
2454    }
2455    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2456        goto fail;
2457    }
2458    rpl = selector & 3;
2459    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2460    cpl = env->hflags & HF_CPL_MASK;
2461    if (e2 & DESC_S_MASK) {
2462        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2463            /* conforming */
2464        } else {
2465            if (dpl < cpl || dpl < rpl) {
2466                goto fail;
2467            }
2468        }
2469    } else {
2470        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2471        switch (type) {
2472        case 1:
2473        case 2:
2474        case 3:
2475        case 9:
2476        case 11:
2477            break;
2478        default:
2479            goto fail;
2480        }
2481        if (dpl < cpl || dpl < rpl) {
2482        fail:
2483            CC_SRC = eflags & ~CC_Z;
2484            return 0;
2485        }
2486    }
2487    limit = get_seg_limit(e1, e2);
2488    CC_SRC = eflags | CC_Z;
2489    return limit;
2490}
2491
2492target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2493{
2494    uint32_t e1, e2, eflags, selector;
2495    int rpl, dpl, cpl, type;
2496
2497    selector = selector1 & 0xffff;
2498    eflags = cpu_cc_compute_all(env, CC_OP);
2499    if ((selector & 0xfffc) == 0) {
2500        goto fail;
2501    }
2502    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2503        goto fail;
2504    }
2505    rpl = selector & 3;
2506    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2507    cpl = env->hflags & HF_CPL_MASK;
2508    if (e2 & DESC_S_MASK) {
2509        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2510            /* conforming */
2511        } else {
2512            if (dpl < cpl || dpl < rpl) {
2513                goto fail;
2514            }
2515        }
2516    } else {
2517        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2518        switch (type) {
2519        case 1:
2520        case 2:
2521        case 3:
2522        case 4:
2523        case 5:
2524        case 9:
2525        case 11:
2526        case 12:
2527            break;
2528        default:
2529            goto fail;
2530        }
2531        if (dpl < cpl || dpl < rpl) {
2532        fail:
2533            CC_SRC = eflags & ~CC_Z;
2534            return 0;
2535        }
2536    }
2537    CC_SRC = eflags | CC_Z;
2538    return e2 & 0x00f0ff00;
2539}
2540
2541void helper_verr(CPUX86State *env, target_ulong selector1)
2542{
2543    uint32_t e1, e2, eflags, selector;
2544    int rpl, dpl, cpl;
2545
2546    selector = selector1 & 0xffff;
2547    eflags = cpu_cc_compute_all(env, CC_OP);
2548    if ((selector & 0xfffc) == 0) {
2549        goto fail;
2550    }
2551    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2552        goto fail;
2553    }
2554    if (!(e2 & DESC_S_MASK)) {
2555        goto fail;
2556    }
2557    rpl = selector & 3;
2558    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2559    cpl = env->hflags & HF_CPL_MASK;
2560    if (e2 & DESC_CS_MASK) {
2561        if (!(e2 & DESC_R_MASK)) {
2562            goto fail;
2563        }
2564        if (!(e2 & DESC_C_MASK)) {
2565            if (dpl < cpl || dpl < rpl) {
2566                goto fail;
2567            }
2568        }
2569    } else {
2570        if (dpl < cpl || dpl < rpl) {
2571        fail:
2572            CC_SRC = eflags & ~CC_Z;
2573            return;
2574        }
2575    }
2576    CC_SRC = eflags | CC_Z;
2577}
2578
2579void helper_verw(CPUX86State *env, target_ulong selector1)
2580{
2581    uint32_t e1, e2, eflags, selector;
2582    int rpl, dpl, cpl;
2583
2584    selector = selector1 & 0xffff;
2585    eflags = cpu_cc_compute_all(env, CC_OP);
2586    if ((selector & 0xfffc) == 0) {
2587        goto fail;
2588    }
2589    if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2590        goto fail;
2591    }
2592    if (!(e2 & DESC_S_MASK)) {
2593        goto fail;
2594    }
2595    rpl = selector & 3;
2596    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2597    cpl = env->hflags & HF_CPL_MASK;
2598    if (e2 & DESC_CS_MASK) {
2599        goto fail;
2600    } else {
2601        if (dpl < cpl || dpl < rpl) {
2602            goto fail;
2603        }
2604        if (!(e2 & DESC_W_MASK)) {
2605        fail:
2606            CC_SRC = eflags & ~CC_Z;
2607            return;
2608        }
2609    }
2610    CC_SRC = eflags | CC_Z;
2611}
2612
2613#if defined(CONFIG_USER_ONLY)
2614void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2615{
2616    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2617        int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2618        selector &= 0xffff;
2619        cpu_x86_load_seg_cache(env, seg_reg, selector,
2620                               (selector << 4), 0xffff,
2621                               DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2622                               DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2623    } else {
2624        helper_load_seg(env, seg_reg, selector);
2625    }
2626}
2627#endif
2628
2629/* check if Port I/O is allowed in TSS */
2630static inline void check_io(CPUX86State *env, int addr, int size,
2631                            uintptr_t retaddr)
2632{
2633    int io_offset, val, mask;
2634
2635    /* TSS must be a valid 32 bit one */
2636    if (!(env->tr.flags & DESC_P_MASK) ||
2637        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2638        env->tr.limit < 103) {
2639        goto fail;
2640    }
2641    io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2642    io_offset += (addr >> 3);
2643    /* Note: the check needs two bytes */
2644    if ((io_offset + 1) > env->tr.limit) {
2645        goto fail;
2646    }
2647    val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2648    val >>= (addr & 7);
2649    mask = (1 << size) - 1;
2650    /* all bits must be zero to allow the I/O */
2651    if ((val & mask) != 0) {
2652    fail:
2653        raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2654    }
2655}
2656
2657void helper_check_iob(CPUX86State *env, uint32_t t0)
2658{
2659    check_io(env, t0, 1, GETPC());
2660}
2661
2662void helper_check_iow(CPUX86State *env, uint32_t t0)
2663{
2664    check_io(env, t0, 2, GETPC());
2665}
2666
2667void helper_check_iol(CPUX86State *env, uint32_t t0)
2668{
2669    check_io(env, t0, 4, GETPC());
2670}
2671