qemu/target/i386/excp_helper.c
<<
>>
Prefs
   1/*
   2 *  x86 exception helpers
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/log.h"
  24#include "sysemu/sysemu.h"
  25#include "exec/helper-proto.h"
  26
  27void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
  28{
  29    raise_interrupt(env, intno, 1, 0, next_eip_addend);
  30}
  31
  32void helper_raise_exception(CPUX86State *env, int exception_index)
  33{
  34    raise_exception(env, exception_index);
  35}
  36
  37/*
  38 * Check nested exceptions and change to double or triple fault if
  39 * needed. It should only be called, if this is not an interrupt.
  40 * Returns the new exception number.
  41 */
  42static int check_exception(CPUX86State *env, int intno, int *error_code,
  43                           uintptr_t retaddr)
  44{
  45    int first_contributory = env->old_exception == 0 ||
  46                              (env->old_exception >= 10 &&
  47                               env->old_exception <= 13);
  48    int second_contributory = intno == 0 ||
  49                               (intno >= 10 && intno <= 13);
  50
  51    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
  52                env->old_exception, intno);
  53
  54#if !defined(CONFIG_USER_ONLY)
  55    if (env->old_exception == EXCP08_DBLE) {
  56        if (env->hflags & HF_SVMI_MASK) {
  57            cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0, retaddr); /* does not return */
  58        }
  59
  60        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
  61
  62        qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
  63        return EXCP_HLT;
  64    }
  65#endif
  66
  67    if ((first_contributory && second_contributory)
  68        || (env->old_exception == EXCP0E_PAGE &&
  69            (second_contributory || (intno == EXCP0E_PAGE)))) {
  70        intno = EXCP08_DBLE;
  71        *error_code = 0;
  72    }
  73
  74    if (second_contributory || (intno == EXCP0E_PAGE) ||
  75        (intno == EXCP08_DBLE)) {
  76        env->old_exception = intno;
  77    }
  78
  79    return intno;
  80}
  81
  82/*
  83 * Signal an interruption. It is executed in the main CPU loop.
  84 * is_int is TRUE if coming from the int instruction. next_eip is the
  85 * env->eip value AFTER the interrupt instruction. It is only relevant if
  86 * is_int is TRUE.
  87 */
  88static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
  89                                           int is_int, int error_code,
  90                                           int next_eip_addend,
  91                                           uintptr_t retaddr)
  92{
  93    CPUState *cs = CPU(x86_env_get_cpu(env));
  94
  95    if (!is_int) {
  96        cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
  97                                      error_code, retaddr);
  98        intno = check_exception(env, intno, &error_code, retaddr);
  99    } else {
 100        cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0, retaddr);
 101    }
 102
 103    cs->exception_index = intno;
 104    env->error_code = error_code;
 105    env->exception_is_int = is_int;
 106    env->exception_next_eip = env->eip + next_eip_addend;
 107    cpu_loop_exit_restore(cs, retaddr);
 108}
 109
 110/* shortcuts to generate exceptions */
 111
 112void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
 113                                   int error_code, int next_eip_addend)
 114{
 115    raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
 116}
 117
 118void raise_exception_err(CPUX86State *env, int exception_index,
 119                         int error_code)
 120{
 121    raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
 122}
 123
 124void raise_exception_err_ra(CPUX86State *env, int exception_index,
 125                            int error_code, uintptr_t retaddr)
 126{
 127    raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
 128}
 129
 130void raise_exception(CPUX86State *env, int exception_index)
 131{
 132    raise_interrupt2(env, exception_index, 0, 0, 0, 0);
 133}
 134
 135void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
 136{
 137    raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
 138}
 139
 140#if defined(CONFIG_USER_ONLY)
 141int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
 142                             int is_write, int mmu_idx)
 143{
 144    X86CPU *cpu = X86_CPU(cs);
 145    CPUX86State *env = &cpu->env;
 146
 147    /* user mode only emulation */
 148    is_write &= 1;
 149    env->cr[2] = addr;
 150    env->error_code = (is_write << PG_ERROR_W_BIT);
 151    env->error_code |= PG_ERROR_U_MASK;
 152    cs->exception_index = EXCP0E_PAGE;
 153    env->exception_is_int = 0;
 154    env->exception_next_eip = -1;
 155    return 1;
 156}
 157
 158#else
 159
 160static hwaddr get_hphys(CPUState *cs, hwaddr gphys, MMUAccessType access_type,
 161                        int *prot)
 162{
 163    CPUX86State *env = &X86_CPU(cs)->env;
 164    uint64_t rsvd_mask = PG_HI_RSVD_MASK;
 165    uint64_t ptep, pte;
 166    uint64_t exit_info_1 = 0;
 167    target_ulong pde_addr, pte_addr;
 168    uint32_t page_offset;
 169    int page_size;
 170
 171    if (likely(!(env->hflags2 & HF2_NPT_MASK))) {
 172        return gphys;
 173    }
 174
 175    if (!(env->nested_pg_mode & SVM_NPT_NXE)) {
 176        rsvd_mask |= PG_NX_MASK;
 177    }
 178
 179    if (env->nested_pg_mode & SVM_NPT_PAE) {
 180        uint64_t pde, pdpe;
 181        target_ulong pdpe_addr;
 182
 183#ifdef TARGET_X86_64
 184        if (env->nested_pg_mode & SVM_NPT_LMA) {
 185            uint64_t pml5e;
 186            uint64_t pml4e_addr, pml4e;
 187
 188            pml5e = env->nested_cr3;
 189            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
 190
 191            pml4e_addr = (pml5e & PG_ADDRESS_MASK) +
 192                    (((gphys >> 39) & 0x1ff) << 3);
 193            pml4e = x86_ldq_phys(cs, pml4e_addr);
 194            if (!(pml4e & PG_PRESENT_MASK)) {
 195                goto do_fault;
 196            }
 197            if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
 198                goto do_fault_rsvd;
 199            }
 200            if (!(pml4e & PG_ACCESSED_MASK)) {
 201                pml4e |= PG_ACCESSED_MASK;
 202                x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
 203            }
 204            ptep &= pml4e ^ PG_NX_MASK;
 205            pdpe_addr = (pml4e & PG_ADDRESS_MASK) +
 206                    (((gphys >> 30) & 0x1ff) << 3);
 207            pdpe = x86_ldq_phys(cs, pdpe_addr);
 208            if (!(pdpe & PG_PRESENT_MASK)) {
 209                goto do_fault;
 210            }
 211            if (pdpe & rsvd_mask) {
 212                goto do_fault_rsvd;
 213            }
 214            ptep &= pdpe ^ PG_NX_MASK;
 215            if (!(pdpe & PG_ACCESSED_MASK)) {
 216                pdpe |= PG_ACCESSED_MASK;
 217                x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
 218            }
 219            if (pdpe & PG_PSE_MASK) {
 220                /* 1 GB page */
 221                page_size = 1024 * 1024 * 1024;
 222                pte_addr = pdpe_addr;
 223                pte = pdpe;
 224                goto do_check_protect;
 225            }
 226        } else
 227#endif
 228        {
 229            pdpe_addr = (env->nested_cr3 & ~0x1f) + ((gphys >> 27) & 0x18);
 230            pdpe = x86_ldq_phys(cs, pdpe_addr);
 231            if (!(pdpe & PG_PRESENT_MASK)) {
 232                goto do_fault;
 233            }
 234            rsvd_mask |= PG_HI_USER_MASK;
 235            if (pdpe & (rsvd_mask | PG_NX_MASK)) {
 236                goto do_fault_rsvd;
 237            }
 238            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
 239        }
 240
 241        pde_addr = (pdpe & PG_ADDRESS_MASK) + (((gphys >> 21) & 0x1ff) << 3);
 242        pde = x86_ldq_phys(cs, pde_addr);
 243        if (!(pde & PG_PRESENT_MASK)) {
 244            goto do_fault;
 245        }
 246        if (pde & rsvd_mask) {
 247            goto do_fault_rsvd;
 248        }
 249        ptep &= pde ^ PG_NX_MASK;
 250        if (pde & PG_PSE_MASK) {
 251            /* 2 MB page */
 252            page_size = 2048 * 1024;
 253            pte_addr = pde_addr;
 254            pte = pde;
 255            goto do_check_protect;
 256        }
 257        /* 4 KB page */
 258        if (!(pde & PG_ACCESSED_MASK)) {
 259            pde |= PG_ACCESSED_MASK;
 260            x86_stl_phys_notdirty(cs, pde_addr, pde);
 261        }
 262        pte_addr = (pde & PG_ADDRESS_MASK) + (((gphys >> 12) & 0x1ff) << 3);
 263        pte = x86_ldq_phys(cs, pte_addr);
 264        if (!(pte & PG_PRESENT_MASK)) {
 265            goto do_fault;
 266        }
 267        if (pte & rsvd_mask) {
 268            goto do_fault_rsvd;
 269        }
 270        /* combine pde and pte nx, user and rw protections */
 271        ptep &= pte ^ PG_NX_MASK;
 272        page_size = 4096;
 273    } else {
 274        uint32_t pde;
 275
 276        /* page directory entry */
 277        pde_addr = (env->nested_cr3 & ~0xfff) + ((gphys >> 20) & 0xffc);
 278        pde = x86_ldl_phys(cs, pde_addr);
 279        if (!(pde & PG_PRESENT_MASK)) {
 280            goto do_fault;
 281        }
 282        ptep = pde | PG_NX_MASK;
 283
 284        /* if PSE bit is set, then we use a 4MB page */
 285        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
 286            page_size = 4096 * 1024;
 287            pte_addr = pde_addr;
 288
 289            /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
 290             * Leave bits 20-13 in place for setting accessed/dirty bits below.
 291             */
 292            pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
 293            rsvd_mask = 0x200000;
 294            goto do_check_protect_pse36;
 295        }
 296
 297        if (!(pde & PG_ACCESSED_MASK)) {
 298            pde |= PG_ACCESSED_MASK;
 299            x86_stl_phys_notdirty(cs, pde_addr, pde);
 300        }
 301
 302        /* page directory entry */
 303        pte_addr = (pde & ~0xfff) + ((gphys >> 10) & 0xffc);
 304        pte = x86_ldl_phys(cs, pte_addr);
 305        if (!(pte & PG_PRESENT_MASK)) {
 306            goto do_fault;
 307        }
 308        /* combine pde and pte user and rw protections */
 309        ptep &= pte | PG_NX_MASK;
 310        page_size = 4096;
 311        rsvd_mask = 0;
 312    }
 313
 314 do_check_protect:
 315    rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
 316 do_check_protect_pse36:
 317    if (pte & rsvd_mask) {
 318        goto do_fault_rsvd;
 319    }
 320    ptep ^= PG_NX_MASK;
 321
 322    if (!(ptep & PG_USER_MASK)) {
 323        goto do_fault_protect;
 324    }
 325    if (ptep & PG_NX_MASK) {
 326        if (access_type == MMU_INST_FETCH) {
 327            goto do_fault_protect;
 328        }
 329        *prot &= ~PAGE_EXEC;
 330    }
 331    if (!(ptep & PG_RW_MASK)) {
 332        if (access_type == MMU_DATA_STORE) {
 333            goto do_fault_protect;
 334        }
 335        *prot &= ~PAGE_WRITE;
 336    }
 337
 338    pte &= PG_ADDRESS_MASK & ~(page_size - 1);
 339    page_offset = gphys & (page_size - 1);
 340    return pte + page_offset;
 341
 342 do_fault_rsvd:
 343    exit_info_1 |= SVM_NPTEXIT_RSVD;
 344 do_fault_protect:
 345    exit_info_1 |= SVM_NPTEXIT_P;
 346 do_fault:
 347    x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
 348                 gphys);
 349    exit_info_1 |= SVM_NPTEXIT_US;
 350    if (access_type == MMU_DATA_STORE) {
 351        exit_info_1 |= SVM_NPTEXIT_RW;
 352    } else if (access_type == MMU_INST_FETCH) {
 353        exit_info_1 |= SVM_NPTEXIT_ID;
 354    }
 355    if (prot) {
 356        exit_info_1 |= SVM_NPTEXIT_GPA;
 357    } else { /* page table access */
 358        exit_info_1 |= SVM_NPTEXIT_GPT;
 359    }
 360    cpu_vmexit(env, SVM_EXIT_NPF, exit_info_1, env->retaddr);
 361}
 362
 363/* return value:
 364 * -1 = cannot handle fault
 365 * 0  = nothing more to do
 366 * 1  = generate PF fault
 367 */
 368int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int size,
 369                             int is_write1, int mmu_idx)
 370{
 371    X86CPU *cpu = X86_CPU(cs);
 372    CPUX86State *env = &cpu->env;
 373    uint64_t ptep, pte;
 374    int32_t a20_mask;
 375    target_ulong pde_addr, pte_addr;
 376    int error_code = 0;
 377    int is_dirty, prot, page_size, is_write, is_user;
 378    hwaddr paddr;
 379    uint64_t rsvd_mask = PG_HI_RSVD_MASK;
 380    uint32_t page_offset;
 381    target_ulong vaddr;
 382
 383    is_user = mmu_idx == MMU_USER_IDX;
 384#if defined(DEBUG_MMU)
 385    printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
 386           addr, is_write1, is_user, env->eip);
 387#endif
 388    is_write = is_write1 & 1;
 389
 390    a20_mask = x86_get_a20_mask(env);
 391    if (!(env->cr[0] & CR0_PG_MASK)) {
 392        pte = addr;
 393#ifdef TARGET_X86_64
 394        if (!(env->hflags & HF_LMA_MASK)) {
 395            /* Without long mode we can only address 32bits in real mode */
 396            pte = (uint32_t)pte;
 397        }
 398#endif
 399        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 400        page_size = 4096;
 401        goto do_mapping;
 402    }
 403
 404    if (!(env->efer & MSR_EFER_NXE)) {
 405        rsvd_mask |= PG_NX_MASK;
 406    }
 407
 408    if (env->cr[4] & CR4_PAE_MASK) {
 409        uint64_t pde, pdpe;
 410        target_ulong pdpe_addr;
 411
 412#ifdef TARGET_X86_64
 413        if (env->hflags & HF_LMA_MASK) {
 414            bool la57 = env->cr[4] & CR4_LA57_MASK;
 415            uint64_t pml5e_addr, pml5e;
 416            uint64_t pml4e_addr, pml4e;
 417            int32_t sext;
 418
 419            /* test virtual address sign extension */
 420            sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
 421            if (sext != 0 && sext != -1) {
 422                env->error_code = 0;
 423                cs->exception_index = EXCP0D_GPF;
 424                return 1;
 425            }
 426
 427            if (la57) {
 428                pml5e_addr = ((env->cr[3] & ~0xfff) +
 429                        (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
 430                pml5e_addr = get_hphys(cs, pml5e_addr, MMU_DATA_STORE, NULL);
 431                pml5e = x86_ldq_phys(cs, pml5e_addr);
 432                if (!(pml5e & PG_PRESENT_MASK)) {
 433                    goto do_fault;
 434                }
 435                if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
 436                    goto do_fault_rsvd;
 437                }
 438                if (!(pml5e & PG_ACCESSED_MASK)) {
 439                    pml5e |= PG_ACCESSED_MASK;
 440                    x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
 441                }
 442                ptep = pml5e ^ PG_NX_MASK;
 443            } else {
 444                pml5e = env->cr[3];
 445                ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
 446            }
 447
 448            pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
 449                    (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
 450            pml4e_addr = get_hphys(cs, pml4e_addr, MMU_DATA_STORE, false);
 451            pml4e = x86_ldq_phys(cs, pml4e_addr);
 452            if (!(pml4e & PG_PRESENT_MASK)) {
 453                goto do_fault;
 454            }
 455            if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
 456                goto do_fault_rsvd;
 457            }
 458            if (!(pml4e & PG_ACCESSED_MASK)) {
 459                pml4e |= PG_ACCESSED_MASK;
 460                x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
 461            }
 462            ptep &= pml4e ^ PG_NX_MASK;
 463            pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
 464                a20_mask;
 465            pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, NULL);
 466            pdpe = x86_ldq_phys(cs, pdpe_addr);
 467            if (!(pdpe & PG_PRESENT_MASK)) {
 468                goto do_fault;
 469            }
 470            if (pdpe & rsvd_mask) {
 471                goto do_fault_rsvd;
 472            }
 473            ptep &= pdpe ^ PG_NX_MASK;
 474            if (!(pdpe & PG_ACCESSED_MASK)) {
 475                pdpe |= PG_ACCESSED_MASK;
 476                x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
 477            }
 478            if (pdpe & PG_PSE_MASK) {
 479                /* 1 GB page */
 480                page_size = 1024 * 1024 * 1024;
 481                pte_addr = pdpe_addr;
 482                pte = pdpe;
 483                goto do_check_protect;
 484            }
 485        } else
 486#endif
 487        {
 488            /* XXX: load them when cr3 is loaded ? */
 489            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
 490                a20_mask;
 491            pdpe_addr = get_hphys(cs, pdpe_addr, MMU_DATA_STORE, false);
 492            pdpe = x86_ldq_phys(cs, pdpe_addr);
 493            if (!(pdpe & PG_PRESENT_MASK)) {
 494                goto do_fault;
 495            }
 496            rsvd_mask |= PG_HI_USER_MASK;
 497            if (pdpe & (rsvd_mask | PG_NX_MASK)) {
 498                goto do_fault_rsvd;
 499            }
 500            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
 501        }
 502
 503        pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
 504            a20_mask;
 505        pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
 506        pde = x86_ldq_phys(cs, pde_addr);
 507        if (!(pde & PG_PRESENT_MASK)) {
 508            goto do_fault;
 509        }
 510        if (pde & rsvd_mask) {
 511            goto do_fault_rsvd;
 512        }
 513        ptep &= pde ^ PG_NX_MASK;
 514        if (pde & PG_PSE_MASK) {
 515            /* 2 MB page */
 516            page_size = 2048 * 1024;
 517            pte_addr = pde_addr;
 518            pte = pde;
 519            goto do_check_protect;
 520        }
 521        /* 4 KB page */
 522        if (!(pde & PG_ACCESSED_MASK)) {
 523            pde |= PG_ACCESSED_MASK;
 524            x86_stl_phys_notdirty(cs, pde_addr, pde);
 525        }
 526        pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
 527            a20_mask;
 528        pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
 529        pte = x86_ldq_phys(cs, pte_addr);
 530        if (!(pte & PG_PRESENT_MASK)) {
 531            goto do_fault;
 532        }
 533        if (pte & rsvd_mask) {
 534            goto do_fault_rsvd;
 535        }
 536        /* combine pde and pte nx, user and rw protections */
 537        ptep &= pte ^ PG_NX_MASK;
 538        page_size = 4096;
 539    } else {
 540        uint32_t pde;
 541
 542        /* page directory entry */
 543        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
 544            a20_mask;
 545        pde_addr = get_hphys(cs, pde_addr, MMU_DATA_STORE, NULL);
 546        pde = x86_ldl_phys(cs, pde_addr);
 547        if (!(pde & PG_PRESENT_MASK)) {
 548            goto do_fault;
 549        }
 550        ptep = pde | PG_NX_MASK;
 551
 552        /* if PSE bit is set, then we use a 4MB page */
 553        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
 554            page_size = 4096 * 1024;
 555            pte_addr = pde_addr;
 556
 557            /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
 558             * Leave bits 20-13 in place for setting accessed/dirty bits below.
 559             */
 560            pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
 561            rsvd_mask = 0x200000;
 562            goto do_check_protect_pse36;
 563        }
 564
 565        if (!(pde & PG_ACCESSED_MASK)) {
 566            pde |= PG_ACCESSED_MASK;
 567            x86_stl_phys_notdirty(cs, pde_addr, pde);
 568        }
 569
 570        /* page directory entry */
 571        pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
 572            a20_mask;
 573        pte_addr = get_hphys(cs, pte_addr, MMU_DATA_STORE, NULL);
 574        pte = x86_ldl_phys(cs, pte_addr);
 575        if (!(pte & PG_PRESENT_MASK)) {
 576            goto do_fault;
 577        }
 578        /* combine pde and pte user and rw protections */
 579        ptep &= pte | PG_NX_MASK;
 580        page_size = 4096;
 581        rsvd_mask = 0;
 582    }
 583
 584do_check_protect:
 585    rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
 586do_check_protect_pse36:
 587    if (pte & rsvd_mask) {
 588        goto do_fault_rsvd;
 589    }
 590    ptep ^= PG_NX_MASK;
 591
 592    /* can the page can be put in the TLB?  prot will tell us */
 593    if (is_user && !(ptep & PG_USER_MASK)) {
 594        goto do_fault_protect;
 595    }
 596
 597    prot = 0;
 598    if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
 599        prot |= PAGE_READ;
 600        if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
 601            prot |= PAGE_WRITE;
 602        }
 603    }
 604    if (!(ptep & PG_NX_MASK) &&
 605        (mmu_idx == MMU_USER_IDX ||
 606         !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
 607        prot |= PAGE_EXEC;
 608    }
 609    if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
 610        (ptep & PG_USER_MASK) && env->pkru) {
 611        uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
 612        uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
 613        uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
 614        uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 615
 616        if (pkru_ad) {
 617            pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
 618        } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
 619            pkru_prot &= ~PAGE_WRITE;
 620        }
 621
 622        prot &= pkru_prot;
 623        if ((pkru_prot & (1 << is_write1)) == 0) {
 624            assert(is_write1 != 2);
 625            error_code |= PG_ERROR_PK_MASK;
 626            goto do_fault_protect;
 627        }
 628    }
 629
 630    if ((prot & (1 << is_write1)) == 0) {
 631        goto do_fault_protect;
 632    }
 633
 634    /* yes, it can! */
 635    is_dirty = is_write && !(pte & PG_DIRTY_MASK);
 636    if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
 637        pte |= PG_ACCESSED_MASK;
 638        if (is_dirty) {
 639            pte |= PG_DIRTY_MASK;
 640        }
 641        x86_stl_phys_notdirty(cs, pte_addr, pte);
 642    }
 643
 644    if (!(pte & PG_DIRTY_MASK)) {
 645        /* only set write access if already dirty... otherwise wait
 646           for dirty access */
 647        assert(!is_write);
 648        prot &= ~PAGE_WRITE;
 649    }
 650
 651 do_mapping:
 652    pte = pte & a20_mask;
 653
 654    /* align to page_size */
 655    pte &= PG_ADDRESS_MASK & ~(page_size - 1);
 656    page_offset = addr & (page_size - 1);
 657    paddr = get_hphys(cs, pte + page_offset, is_write1, &prot);
 658
 659    /* Even if 4MB pages, we map only one 4KB page in the cache to
 660       avoid filling it too fast */
 661    vaddr = addr & TARGET_PAGE_MASK;
 662    paddr &= TARGET_PAGE_MASK;
 663
 664    assert(prot & (1 << is_write1));
 665    tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
 666                            prot, mmu_idx, page_size);
 667    return 0;
 668 do_fault_rsvd:
 669    error_code |= PG_ERROR_RSVD_MASK;
 670 do_fault_protect:
 671    error_code |= PG_ERROR_P_MASK;
 672 do_fault:
 673    error_code |= (is_write << PG_ERROR_W_BIT);
 674    if (is_user)
 675        error_code |= PG_ERROR_U_MASK;
 676    if (is_write1 == 2 &&
 677        (((env->efer & MSR_EFER_NXE) &&
 678          (env->cr[4] & CR4_PAE_MASK)) ||
 679         (env->cr[4] & CR4_SMEP_MASK)))
 680        error_code |= PG_ERROR_I_D_MASK;
 681    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
 682        /* cr2 is not modified in case of exceptions */
 683        x86_stq_phys(cs,
 684                 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
 685                 addr);
 686    } else {
 687        env->cr[2] = addr;
 688    }
 689    env->error_code = error_code;
 690    cs->exception_index = EXCP0E_PAGE;
 691    return 1;
 692}
 693#endif
 694