qemu/target/i386/helper.c
<<
>>
Prefs
   1/*
   2 *  i386 helpers (without register variable usage)
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qapi/qapi-events-run-state.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "sysemu/runstate.h"
  25#include "kvm/kvm_i386.h"
  26#ifndef CONFIG_USER_ONLY
  27#include "sysemu/hw_accel.h"
  28#include "monitor/monitor.h"
  29#endif
  30
  31void cpu_sync_bndcs_hflags(CPUX86State *env)
  32{
  33    uint32_t hflags = env->hflags;
  34    uint32_t hflags2 = env->hflags2;
  35    uint32_t bndcsr;
  36
  37    if ((hflags & HF_CPL_MASK) == 3) {
  38        bndcsr = env->bndcs_regs.cfgu;
  39    } else {
  40        bndcsr = env->msr_bndcfgs;
  41    }
  42
  43    if ((env->cr[4] & CR4_OSXSAVE_MASK)
  44        && (env->xcr0 & XSTATE_BNDCSR_MASK)
  45        && (bndcsr & BNDCFG_ENABLE)) {
  46        hflags |= HF_MPX_EN_MASK;
  47    } else {
  48        hflags &= ~HF_MPX_EN_MASK;
  49    }
  50
  51    if (bndcsr & BNDCFG_BNDPRESERVE) {
  52        hflags2 |= HF2_MPX_PR_MASK;
  53    } else {
  54        hflags2 &= ~HF2_MPX_PR_MASK;
  55    }
  56
  57    env->hflags = hflags;
  58    env->hflags2 = hflags2;
  59}
  60
  61static void cpu_x86_version(CPUX86State *env, int *family, int *model)
  62{
  63    int cpuver = env->cpuid_version;
  64
  65    if (family == NULL || model == NULL) {
  66        return;
  67    }
  68
  69    *family = (cpuver >> 8) & 0x0f;
  70    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
  71}
  72
  73/* Broadcast MCA signal for processor version 06H_EH and above */
  74int cpu_x86_support_mca_broadcast(CPUX86State *env)
  75{
  76    int family = 0;
  77    int model = 0;
  78
  79    cpu_x86_version(env, &family, &model);
  80    if ((family == 6 && model >= 14) || family > 6) {
  81        return 1;
  82    }
  83
  84    return 0;
  85}
  86
  87/***********************************************************/
  88/* x86 mmu */
  89/* XXX: add PGE support */
  90
  91void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
  92{
  93    CPUX86State *env = &cpu->env;
  94
  95    a20_state = (a20_state != 0);
  96    if (a20_state != ((env->a20_mask >> 20) & 1)) {
  97        CPUState *cs = CPU(cpu);
  98
  99        qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
 100        /* if the cpu is currently executing code, we must unlink it and
 101           all the potentially executing TB */
 102        cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
 103
 104        /* when a20 is changed, all the MMU mappings are invalid, so
 105           we must flush everything */
 106        tlb_flush(cs);
 107        env->a20_mask = ~(1 << 20) | (a20_state << 20);
 108    }
 109}
 110
 111void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
 112{
 113    X86CPU *cpu = env_archcpu(env);
 114    int pe_state;
 115
 116    qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
 117    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
 118        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
 119        tlb_flush(CPU(cpu));
 120    }
 121
 122#ifdef TARGET_X86_64
 123    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
 124        (env->efer & MSR_EFER_LME)) {
 125        /* enter in long mode */
 126        /* XXX: generate an exception */
 127        if (!(env->cr[4] & CR4_PAE_MASK))
 128            return;
 129        env->efer |= MSR_EFER_LMA;
 130        env->hflags |= HF_LMA_MASK;
 131    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
 132               (env->efer & MSR_EFER_LMA)) {
 133        /* exit long mode */
 134        env->efer &= ~MSR_EFER_LMA;
 135        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
 136        env->eip &= 0xffffffff;
 137    }
 138#endif
 139    env->cr[0] = new_cr0 | CR0_ET_MASK;
 140
 141    /* update PE flag in hidden flags */
 142    pe_state = (env->cr[0] & CR0_PE_MASK);
 143    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
 144    /* ensure that ADDSEG is always set in real mode */
 145    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
 146    /* update FPU flags */
 147    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
 148        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
 149}
 150
 151/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
 152   the PDPT */
 153void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
 154{
 155    env->cr[3] = new_cr3;
 156    if (env->cr[0] & CR0_PG_MASK) {
 157        qemu_log_mask(CPU_LOG_MMU,
 158                        "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
 159        tlb_flush(env_cpu(env));
 160    }
 161}
 162
 163void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
 164{
 165    uint32_t hflags;
 166
 167#if defined(DEBUG_MMU)
 168    printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
 169#endif
 170    if ((new_cr4 ^ env->cr[4]) &
 171        (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
 172         CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
 173        tlb_flush(env_cpu(env));
 174    }
 175
 176    /* Clear bits we're going to recompute.  */
 177    hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
 178
 179    /* SSE handling */
 180    if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
 181        new_cr4 &= ~CR4_OSFXSR_MASK;
 182    }
 183    if (new_cr4 & CR4_OSFXSR_MASK) {
 184        hflags |= HF_OSFXSR_MASK;
 185    }
 186
 187    if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
 188        new_cr4 &= ~CR4_SMAP_MASK;
 189    }
 190    if (new_cr4 & CR4_SMAP_MASK) {
 191        hflags |= HF_SMAP_MASK;
 192    }
 193
 194    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
 195        new_cr4 &= ~CR4_PKE_MASK;
 196    }
 197    if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
 198        new_cr4 &= ~CR4_PKS_MASK;
 199    }
 200
 201    env->cr[4] = new_cr4;
 202    env->hflags = hflags;
 203
 204    cpu_sync_bndcs_hflags(env);
 205}
 206
 207#if !defined(CONFIG_USER_ONLY)
 208hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
 209                                         MemTxAttrs *attrs)
 210{
 211    X86CPU *cpu = X86_CPU(cs);
 212    CPUX86State *env = &cpu->env;
 213    target_ulong pde_addr, pte_addr;
 214    uint64_t pte;
 215    int32_t a20_mask;
 216    uint32_t page_offset;
 217    int page_size;
 218
 219    *attrs = cpu_get_mem_attrs(env);
 220
 221    a20_mask = x86_get_a20_mask(env);
 222    if (!(env->cr[0] & CR0_PG_MASK)) {
 223        pte = addr & a20_mask;
 224        page_size = 4096;
 225    } else if (env->cr[4] & CR4_PAE_MASK) {
 226        target_ulong pdpe_addr;
 227        uint64_t pde, pdpe;
 228
 229#ifdef TARGET_X86_64
 230        if (env->hflags & HF_LMA_MASK) {
 231            bool la57 = env->cr[4] & CR4_LA57_MASK;
 232            uint64_t pml5e_addr, pml5e;
 233            uint64_t pml4e_addr, pml4e;
 234            int32_t sext;
 235
 236            /* test virtual address sign extension */
 237            sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
 238            if (sext != 0 && sext != -1) {
 239                return -1;
 240            }
 241
 242            if (la57) {
 243                pml5e_addr = ((env->cr[3] & ~0xfff) +
 244                        (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
 245                pml5e = x86_ldq_phys(cs, pml5e_addr);
 246                if (!(pml5e & PG_PRESENT_MASK)) {
 247                    return -1;
 248                }
 249            } else {
 250                pml5e = env->cr[3];
 251            }
 252
 253            pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
 254                    (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
 255            pml4e = x86_ldq_phys(cs, pml4e_addr);
 256            if (!(pml4e & PG_PRESENT_MASK)) {
 257                return -1;
 258            }
 259            pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
 260                         (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
 261            pdpe = x86_ldq_phys(cs, pdpe_addr);
 262            if (!(pdpe & PG_PRESENT_MASK)) {
 263                return -1;
 264            }
 265            if (pdpe & PG_PSE_MASK) {
 266                page_size = 1024 * 1024 * 1024;
 267                pte = pdpe;
 268                goto out;
 269            }
 270
 271        } else
 272#endif
 273        {
 274            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
 275                a20_mask;
 276            pdpe = x86_ldq_phys(cs, pdpe_addr);
 277            if (!(pdpe & PG_PRESENT_MASK))
 278                return -1;
 279        }
 280
 281        pde_addr = ((pdpe & PG_ADDRESS_MASK) +
 282                    (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
 283        pde = x86_ldq_phys(cs, pde_addr);
 284        if (!(pde & PG_PRESENT_MASK)) {
 285            return -1;
 286        }
 287        if (pde & PG_PSE_MASK) {
 288            /* 2 MB page */
 289            page_size = 2048 * 1024;
 290            pte = pde;
 291        } else {
 292            /* 4 KB page */
 293            pte_addr = ((pde & PG_ADDRESS_MASK) +
 294                        (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
 295            page_size = 4096;
 296            pte = x86_ldq_phys(cs, pte_addr);
 297        }
 298        if (!(pte & PG_PRESENT_MASK)) {
 299            return -1;
 300        }
 301    } else {
 302        uint32_t pde;
 303
 304        /* page directory entry */
 305        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
 306        pde = x86_ldl_phys(cs, pde_addr);
 307        if (!(pde & PG_PRESENT_MASK))
 308            return -1;
 309        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
 310            pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
 311            page_size = 4096 * 1024;
 312        } else {
 313            /* page directory entry */
 314            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
 315            pte = x86_ldl_phys(cs, pte_addr);
 316            if (!(pte & PG_PRESENT_MASK)) {
 317                return -1;
 318            }
 319            page_size = 4096;
 320        }
 321        pte = pte & a20_mask;
 322    }
 323
 324#ifdef TARGET_X86_64
 325out:
 326#endif
 327    pte &= PG_ADDRESS_MASK & ~(page_size - 1);
 328    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
 329    return pte | page_offset;
 330}
 331
 332typedef struct MCEInjectionParams {
 333    Monitor *mon;
 334    int bank;
 335    uint64_t status;
 336    uint64_t mcg_status;
 337    uint64_t addr;
 338    uint64_t misc;
 339    int flags;
 340} MCEInjectionParams;
 341
 342static void emit_guest_memory_failure(MemoryFailureAction action, bool ar,
 343                                      bool recursive)
 344{
 345    MemoryFailureFlags mff = {.action_required = ar, .recursive = recursive};
 346
 347    qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST, action,
 348                                   &mff);
 349}
 350
 351static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
 352{
 353    MCEInjectionParams *params = data.host_ptr;
 354    X86CPU *cpu = X86_CPU(cs);
 355    CPUX86State *cenv = &cpu->env;
 356    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
 357    g_autofree char *msg = NULL;
 358    bool need_reset = false;
 359    bool recursive;
 360    bool ar = !!(params->status & MCI_STATUS_AR);
 361
 362    cpu_synchronize_state(cs);
 363    recursive = !!(cenv->mcg_status & MCG_STATUS_MCIP);
 364
 365    /*
 366     * If there is an MCE exception being processed, ignore this SRAO MCE
 367     * unless unconditional injection was requested.
 368     */
 369    if (!(params->flags & MCE_INJECT_UNCOND_AO) && !ar && recursive) {
 370        emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE, ar, recursive);
 371        return;
 372    }
 373
 374    if (params->status & MCI_STATUS_UC) {
 375        /*
 376         * if MSR_MCG_CTL is not all 1s, the uncorrected error
 377         * reporting is disabled
 378         */
 379        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
 380            monitor_printf(params->mon,
 381                           "CPU %d: Uncorrected error reporting disabled\n",
 382                           cs->cpu_index);
 383            return;
 384        }
 385
 386        /*
 387         * if MSR_MCi_CTL is not all 1s, the uncorrected error
 388         * reporting is disabled for the bank
 389         */
 390        if (banks[0] != ~(uint64_t)0) {
 391            monitor_printf(params->mon,
 392                           "CPU %d: Uncorrected error reporting disabled for"
 393                           " bank %d\n",
 394                           cs->cpu_index, params->bank);
 395            return;
 396        }
 397
 398        if (!(cenv->cr[4] & CR4_MCE_MASK)) {
 399            need_reset = true;
 400            msg = g_strdup_printf("CPU %d: MCE capability is not enabled, "
 401                                  "raising triple fault", cs->cpu_index);
 402        } else if (recursive) {
 403            need_reset = true;
 404            msg = g_strdup_printf("CPU %d: Previous MCE still in progress, "
 405                                  "raising triple fault", cs->cpu_index);
 406        }
 407
 408        if (need_reset) {
 409            emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET, ar,
 410                                      recursive);
 411            monitor_printf(params->mon, "%s", msg);
 412            qemu_log_mask(CPU_LOG_RESET, "%s\n", msg);
 413            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 414            return;
 415        }
 416
 417        if (banks[1] & MCI_STATUS_VAL) {
 418            params->status |= MCI_STATUS_OVER;
 419        }
 420        banks[2] = params->addr;
 421        banks[3] = params->misc;
 422        cenv->mcg_status = params->mcg_status;
 423        banks[1] = params->status;
 424        cpu_interrupt(cs, CPU_INTERRUPT_MCE);
 425    } else if (!(banks[1] & MCI_STATUS_VAL)
 426               || !(banks[1] & MCI_STATUS_UC)) {
 427        if (banks[1] & MCI_STATUS_VAL) {
 428            params->status |= MCI_STATUS_OVER;
 429        }
 430        banks[2] = params->addr;
 431        banks[3] = params->misc;
 432        banks[1] = params->status;
 433    } else {
 434        banks[1] |= MCI_STATUS_OVER;
 435    }
 436
 437    emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT, ar, recursive);
 438}
 439
 440void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
 441                        uint64_t status, uint64_t mcg_status, uint64_t addr,
 442                        uint64_t misc, int flags)
 443{
 444    CPUState *cs = CPU(cpu);
 445    CPUX86State *cenv = &cpu->env;
 446    MCEInjectionParams params = {
 447        .mon = mon,
 448        .bank = bank,
 449        .status = status,
 450        .mcg_status = mcg_status,
 451        .addr = addr,
 452        .misc = misc,
 453        .flags = flags,
 454    };
 455    unsigned bank_num = cenv->mcg_cap & 0xff;
 456
 457    if (!cenv->mcg_cap) {
 458        monitor_printf(mon, "MCE injection not supported\n");
 459        return;
 460    }
 461    if (bank >= bank_num) {
 462        monitor_printf(mon, "Invalid MCE bank number\n");
 463        return;
 464    }
 465    if (!(status & MCI_STATUS_VAL)) {
 466        monitor_printf(mon, "Invalid MCE status code\n");
 467        return;
 468    }
 469    if ((flags & MCE_INJECT_BROADCAST)
 470        && !cpu_x86_support_mca_broadcast(cenv)) {
 471        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
 472        return;
 473    }
 474
 475    run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
 476    if (flags & MCE_INJECT_BROADCAST) {
 477        CPUState *other_cs;
 478
 479        params.bank = 1;
 480        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
 481        params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
 482        params.addr = 0;
 483        params.misc = 0;
 484        CPU_FOREACH(other_cs) {
 485            if (other_cs == cs) {
 486                continue;
 487            }
 488            run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
 489        }
 490    }
 491}
 492
 493void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
 494{
 495    X86CPU *cpu = env_archcpu(env);
 496    CPUState *cs = env_cpu(env);
 497
 498    if (kvm_enabled() || whpx_enabled() || nvmm_enabled()) {
 499        env->tpr_access_type = access;
 500
 501        cpu_interrupt(cs, CPU_INTERRUPT_TPR);
 502    } else if (tcg_enabled()) {
 503        cpu_restore_state(cs, cs->mem_io_pc, false);
 504
 505        apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
 506    }
 507}
 508#endif /* !CONFIG_USER_ONLY */
 509
 510int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
 511                            target_ulong *base, unsigned int *limit,
 512                            unsigned int *flags)
 513{
 514    CPUState *cs = env_cpu(env);
 515    SegmentCache *dt;
 516    target_ulong ptr;
 517    uint32_t e1, e2;
 518    int index;
 519
 520    if (selector & 0x4)
 521        dt = &env->ldt;
 522    else
 523        dt = &env->gdt;
 524    index = selector & ~7;
 525    ptr = dt->base + index;
 526    if ((index + 7) > dt->limit
 527        || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
 528        || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
 529        return 0;
 530
 531    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
 532    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
 533    if (e2 & DESC_G_MASK)
 534        *limit = (*limit << 12) | 0xfff;
 535    *flags = e2;
 536
 537    return 1;
 538}
 539
 540#if !defined(CONFIG_USER_ONLY)
 541void do_cpu_init(X86CPU *cpu)
 542{
 543    CPUState *cs = CPU(cpu);
 544    CPUX86State *env = &cpu->env;
 545    CPUX86State *save = g_new(CPUX86State, 1);
 546    int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
 547
 548    *save = *env;
 549
 550    cpu_reset(cs);
 551    cs->interrupt_request = sipi;
 552    memcpy(&env->start_init_save, &save->start_init_save,
 553           offsetof(CPUX86State, end_init_save) -
 554           offsetof(CPUX86State, start_init_save));
 555    g_free(save);
 556
 557    if (kvm_enabled()) {
 558        kvm_arch_do_init_vcpu(cpu);
 559    }
 560    apic_init_reset(cpu->apic_state);
 561}
 562
 563void do_cpu_sipi(X86CPU *cpu)
 564{
 565    apic_sipi(cpu->apic_state);
 566}
 567#else
 568void do_cpu_init(X86CPU *cpu)
 569{
 570}
 571void do_cpu_sipi(X86CPU *cpu)
 572{
 573}
 574#endif
 575
 576#ifndef CONFIG_USER_ONLY
 577
 578void cpu_load_efer(CPUX86State *env, uint64_t val)
 579{
 580    env->efer = val;
 581    env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
 582    if (env->efer & MSR_EFER_LMA) {
 583        env->hflags |= HF_LMA_MASK;
 584    }
 585    if (env->efer & MSR_EFER_SVME) {
 586        env->hflags |= HF_SVME_MASK;
 587    }
 588}
 589
 590uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
 591{
 592    X86CPU *cpu = X86_CPU(cs);
 593    CPUX86State *env = &cpu->env;
 594    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 595    AddressSpace *as = cpu_addressspace(cs, attrs);
 596
 597    return address_space_ldub(as, addr, attrs, NULL);
 598}
 599
 600uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
 601{
 602    X86CPU *cpu = X86_CPU(cs);
 603    CPUX86State *env = &cpu->env;
 604    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 605    AddressSpace *as = cpu_addressspace(cs, attrs);
 606
 607    return address_space_lduw(as, addr, attrs, NULL);
 608}
 609
 610uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
 611{
 612    X86CPU *cpu = X86_CPU(cs);
 613    CPUX86State *env = &cpu->env;
 614    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 615    AddressSpace *as = cpu_addressspace(cs, attrs);
 616
 617    return address_space_ldl(as, addr, attrs, NULL);
 618}
 619
 620uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
 621{
 622    X86CPU *cpu = X86_CPU(cs);
 623    CPUX86State *env = &cpu->env;
 624    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 625    AddressSpace *as = cpu_addressspace(cs, attrs);
 626
 627    return address_space_ldq(as, addr, attrs, NULL);
 628}
 629
 630void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
 631{
 632    X86CPU *cpu = X86_CPU(cs);
 633    CPUX86State *env = &cpu->env;
 634    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 635    AddressSpace *as = cpu_addressspace(cs, attrs);
 636
 637    address_space_stb(as, addr, val, attrs, NULL);
 638}
 639
 640void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
 641{
 642    X86CPU *cpu = X86_CPU(cs);
 643    CPUX86State *env = &cpu->env;
 644    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 645    AddressSpace *as = cpu_addressspace(cs, attrs);
 646
 647    address_space_stl_notdirty(as, addr, val, attrs, NULL);
 648}
 649
 650void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
 651{
 652    X86CPU *cpu = X86_CPU(cs);
 653    CPUX86State *env = &cpu->env;
 654    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 655    AddressSpace *as = cpu_addressspace(cs, attrs);
 656
 657    address_space_stw(as, addr, val, attrs, NULL);
 658}
 659
 660void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
 661{
 662    X86CPU *cpu = X86_CPU(cs);
 663    CPUX86State *env = &cpu->env;
 664    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 665    AddressSpace *as = cpu_addressspace(cs, attrs);
 666
 667    address_space_stl(as, addr, val, attrs, NULL);
 668}
 669
 670void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
 671{
 672    X86CPU *cpu = X86_CPU(cs);
 673    CPUX86State *env = &cpu->env;
 674    MemTxAttrs attrs = cpu_get_mem_attrs(env);
 675    AddressSpace *as = cpu_addressspace(cs, attrs);
 676
 677    address_space_stq(as, addr, val, attrs, NULL);
 678}
 679#endif
 680