qemu/target/arm/ptw.c
<<
>>
Prefs
   1/*
   2 * ARM page table walking.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/log.h"
  11#include "qemu/range.h"
  12#include "qemu/main-loop.h"
  13#include "exec/exec-all.h"
  14#include "cpu.h"
  15#include "internals.h"
  16#include "idau.h"
  17
  18
  19typedef struct S1Translate {
  20    ARMMMUIdx in_mmu_idx;
  21    ARMMMUIdx in_ptw_idx;
  22    bool in_secure;
  23    bool in_debug;
  24    bool out_secure;
  25    bool out_rw;
  26    bool out_be;
  27    hwaddr out_virt;
  28    hwaddr out_phys;
  29    void *out_host;
  30} S1Translate;
  31
  32static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
  33                               uint64_t address,
  34                               MMUAccessType access_type, bool s1_is_el0,
  35                               GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
  36    __attribute__((nonnull));
  37
  38static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
  39                                      target_ulong address,
  40                                      MMUAccessType access_type,
  41                                      GetPhysAddrResult *result,
  42                                      ARMMMUFaultInfo *fi)
  43    __attribute__((nonnull));
  44
  45/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
  46static const uint8_t pamax_map[] = {
  47    [0] = 32,
  48    [1] = 36,
  49    [2] = 40,
  50    [3] = 42,
  51    [4] = 44,
  52    [5] = 48,
  53    [6] = 52,
  54};
  55
  56/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
  57unsigned int arm_pamax(ARMCPU *cpu)
  58{
  59    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
  60        unsigned int parange =
  61            FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
  62
  63        /*
  64         * id_aa64mmfr0 is a read-only register so values outside of the
  65         * supported mappings can be considered an implementation error.
  66         */
  67        assert(parange < ARRAY_SIZE(pamax_map));
  68        return pamax_map[parange];
  69    }
  70
  71    /*
  72     * In machvirt_init, we call arm_pamax on a cpu that is not fully
  73     * initialized, so we can't rely on the propagation done in realize.
  74     */
  75    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
  76        arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
  77        /* v7 with LPAE */
  78        return 40;
  79    }
  80    /* Anything else */
  81    return 32;
  82}
  83
  84/*
  85 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
  86 */
  87ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
  88{
  89    switch (mmu_idx) {
  90    case ARMMMUIdx_E10_0:
  91        return ARMMMUIdx_Stage1_E0;
  92    case ARMMMUIdx_E10_1:
  93        return ARMMMUIdx_Stage1_E1;
  94    case ARMMMUIdx_E10_1_PAN:
  95        return ARMMMUIdx_Stage1_E1_PAN;
  96    default:
  97        return mmu_idx;
  98    }
  99}
 100
 101ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
 102{
 103    return stage_1_mmu_idx(arm_mmu_idx(env));
 104}
 105
 106static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
 107{
 108    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
 109}
 110
 111/* Return the TTBR associated with this translation regime */
 112static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
 113{
 114    if (mmu_idx == ARMMMUIdx_Stage2) {
 115        return env->cp15.vttbr_el2;
 116    }
 117    if (mmu_idx == ARMMMUIdx_Stage2_S) {
 118        return env->cp15.vsttbr_el2;
 119    }
 120    if (ttbrn == 0) {
 121        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
 122    } else {
 123        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
 124    }
 125}
 126
 127/* Return true if the specified stage of address translation is disabled */
 128static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
 129                                        bool is_secure)
 130{
 131    uint64_t hcr_el2;
 132
 133    if (arm_feature(env, ARM_FEATURE_M)) {
 134        switch (env->v7m.mpu_ctrl[is_secure] &
 135                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
 136        case R_V7M_MPU_CTRL_ENABLE_MASK:
 137            /* Enabled, but not for HardFault and NMI */
 138            return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
 139        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
 140            /* Enabled for all cases */
 141            return false;
 142        case 0:
 143        default:
 144            /*
 145             * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
 146             * we warned about that in armv7m_nvic.c when the guest set it.
 147             */
 148            return true;
 149        }
 150    }
 151
 152    hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
 153
 154    switch (mmu_idx) {
 155    case ARMMMUIdx_Stage2:
 156    case ARMMMUIdx_Stage2_S:
 157        /* HCR.DC means HCR.VM behaves as 1 */
 158        return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
 159
 160    case ARMMMUIdx_E10_0:
 161    case ARMMMUIdx_E10_1:
 162    case ARMMMUIdx_E10_1_PAN:
 163        /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
 164        if (hcr_el2 & HCR_TGE) {
 165            return true;
 166        }
 167        break;
 168
 169    case ARMMMUIdx_Stage1_E0:
 170    case ARMMMUIdx_Stage1_E1:
 171    case ARMMMUIdx_Stage1_E1_PAN:
 172        /* HCR.DC means SCTLR_EL1.M behaves as 0 */
 173        if (hcr_el2 & HCR_DC) {
 174            return true;
 175        }
 176        break;
 177
 178    case ARMMMUIdx_E20_0:
 179    case ARMMMUIdx_E20_2:
 180    case ARMMMUIdx_E20_2_PAN:
 181    case ARMMMUIdx_E2:
 182    case ARMMMUIdx_E3:
 183        break;
 184
 185    case ARMMMUIdx_Phys_NS:
 186    case ARMMMUIdx_Phys_S:
 187        /* No translation for physical address spaces. */
 188        return true;
 189
 190    default:
 191        g_assert_not_reached();
 192    }
 193
 194    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
 195}
 196
 197static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
 198{
 199    /*
 200     * For an S1 page table walk, the stage 1 attributes are always
 201     * some form of "this is Normal memory". The combined S1+S2
 202     * attributes are therefore only Device if stage 2 specifies Device.
 203     * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
 204     * ie when cacheattrs.attrs bits [3:2] are 0b00.
 205     * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
 206     * when cacheattrs.attrs bit [2] is 0.
 207     */
 208    if (hcr & HCR_FWB) {
 209        return (attrs & 0x4) == 0;
 210    } else {
 211        return (attrs & 0xc) == 0;
 212    }
 213}
 214
 215/* Translate a S1 pagetable walk through S2 if needed.  */
 216static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
 217                             hwaddr addr, ARMMMUFaultInfo *fi)
 218{
 219    bool is_secure = ptw->in_secure;
 220    ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
 221    ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
 222    uint8_t pte_attrs;
 223    bool pte_secure;
 224
 225    ptw->out_virt = addr;
 226
 227    if (unlikely(ptw->in_debug)) {
 228        /*
 229         * From gdbstub, do not use softmmu so that we don't modify the
 230         * state of the cpu at all, including softmmu tlb contents.
 231         */
 232        if (regime_is_stage2(s2_mmu_idx)) {
 233            S1Translate s2ptw = {
 234                .in_mmu_idx = s2_mmu_idx,
 235                .in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS,
 236                .in_secure = is_secure,
 237                .in_debug = true,
 238            };
 239            GetPhysAddrResult s2 = { };
 240
 241            if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
 242                                    false, &s2, fi)) {
 243                goto fail;
 244            }
 245            ptw->out_phys = s2.f.phys_addr;
 246            pte_attrs = s2.cacheattrs.attrs;
 247            pte_secure = s2.f.attrs.secure;
 248        } else {
 249            /* Regime is physical. */
 250            ptw->out_phys = addr;
 251            pte_attrs = 0;
 252            pte_secure = is_secure;
 253        }
 254        ptw->out_host = NULL;
 255        ptw->out_rw = false;
 256    } else {
 257        CPUTLBEntryFull *full;
 258        int flags;
 259
 260        env->tlb_fi = fi;
 261        flags = probe_access_full(env, addr, MMU_DATA_LOAD,
 262                                  arm_to_core_mmu_idx(s2_mmu_idx),
 263                                  true, &ptw->out_host, &full, 0);
 264        env->tlb_fi = NULL;
 265
 266        if (unlikely(flags & TLB_INVALID_MASK)) {
 267            goto fail;
 268        }
 269        ptw->out_phys = full->phys_addr;
 270        ptw->out_rw = full->prot & PAGE_WRITE;
 271        pte_attrs = full->pte_attrs;
 272        pte_secure = full->attrs.secure;
 273    }
 274
 275    if (regime_is_stage2(s2_mmu_idx)) {
 276        uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
 277
 278        if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
 279            /*
 280             * PTW set and S1 walk touched S2 Device memory:
 281             * generate Permission fault.
 282             */
 283            fi->type = ARMFault_Permission;
 284            fi->s2addr = addr;
 285            fi->stage2 = true;
 286            fi->s1ptw = true;
 287            fi->s1ns = !is_secure;
 288            return false;
 289        }
 290    }
 291
 292    /* Check if page table walk is to secure or non-secure PA space. */
 293    ptw->out_secure = (is_secure
 294                       && !(pte_secure
 295                            ? env->cp15.vstcr_el2 & VSTCR_SW
 296                            : env->cp15.vtcr_el2 & VTCR_NSW));
 297    ptw->out_be = regime_translation_big_endian(env, mmu_idx);
 298    return true;
 299
 300 fail:
 301    assert(fi->type != ARMFault_None);
 302    fi->s2addr = addr;
 303    fi->stage2 = true;
 304    fi->s1ptw = true;
 305    fi->s1ns = !is_secure;
 306    return false;
 307}
 308
 309/* All loads done in the course of a page table walk go through here. */
 310static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
 311                            ARMMMUFaultInfo *fi)
 312{
 313    CPUState *cs = env_cpu(env);
 314    void *host = ptw->out_host;
 315    uint32_t data;
 316
 317    if (likely(host)) {
 318        /* Page tables are in RAM, and we have the host address. */
 319        data = qatomic_read((uint32_t *)host);
 320        if (ptw->out_be) {
 321            data = be32_to_cpu(data);
 322        } else {
 323            data = le32_to_cpu(data);
 324        }
 325    } else {
 326        /* Page tables are in MMIO. */
 327        MemTxAttrs attrs = { .secure = ptw->out_secure };
 328        AddressSpace *as = arm_addressspace(cs, attrs);
 329        MemTxResult result = MEMTX_OK;
 330
 331        if (ptw->out_be) {
 332            data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
 333        } else {
 334            data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
 335        }
 336        if (unlikely(result != MEMTX_OK)) {
 337            fi->type = ARMFault_SyncExternalOnWalk;
 338            fi->ea = arm_extabort_type(result);
 339            return 0;
 340        }
 341    }
 342    return data;
 343}
 344
 345static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
 346                            ARMMMUFaultInfo *fi)
 347{
 348    CPUState *cs = env_cpu(env);
 349    void *host = ptw->out_host;
 350    uint64_t data;
 351
 352    if (likely(host)) {
 353        /* Page tables are in RAM, and we have the host address. */
 354#ifdef CONFIG_ATOMIC64
 355        data = qatomic_read__nocheck((uint64_t *)host);
 356        if (ptw->out_be) {
 357            data = be64_to_cpu(data);
 358        } else {
 359            data = le64_to_cpu(data);
 360        }
 361#else
 362        if (ptw->out_be) {
 363            data = ldq_be_p(host);
 364        } else {
 365            data = ldq_le_p(host);
 366        }
 367#endif
 368    } else {
 369        /* Page tables are in MMIO. */
 370        MemTxAttrs attrs = { .secure = ptw->out_secure };
 371        AddressSpace *as = arm_addressspace(cs, attrs);
 372        MemTxResult result = MEMTX_OK;
 373
 374        if (ptw->out_be) {
 375            data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
 376        } else {
 377            data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
 378        }
 379        if (unlikely(result != MEMTX_OK)) {
 380            fi->type = ARMFault_SyncExternalOnWalk;
 381            fi->ea = arm_extabort_type(result);
 382            return 0;
 383        }
 384    }
 385    return data;
 386}
 387
 388static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
 389                             uint64_t new_val, S1Translate *ptw,
 390                             ARMMMUFaultInfo *fi)
 391{
 392    uint64_t cur_val;
 393    void *host = ptw->out_host;
 394
 395    if (unlikely(!host)) {
 396        fi->type = ARMFault_UnsuppAtomicUpdate;
 397        fi->s1ptw = true;
 398        return 0;
 399    }
 400
 401    /*
 402     * Raising a stage2 Protection fault for an atomic update to a read-only
 403     * page is delayed until it is certain that there is a change to make.
 404     */
 405    if (unlikely(!ptw->out_rw)) {
 406        int flags;
 407        void *discard;
 408
 409        env->tlb_fi = fi;
 410        flags = probe_access_flags(env, ptw->out_virt, MMU_DATA_STORE,
 411                                   arm_to_core_mmu_idx(ptw->in_ptw_idx),
 412                                   true, &discard, 0);
 413        env->tlb_fi = NULL;
 414
 415        if (unlikely(flags & TLB_INVALID_MASK)) {
 416            assert(fi->type != ARMFault_None);
 417            fi->s2addr = ptw->out_virt;
 418            fi->stage2 = true;
 419            fi->s1ptw = true;
 420            fi->s1ns = !ptw->in_secure;
 421            return 0;
 422        }
 423
 424        /* In case CAS mismatches and we loop, remember writability. */
 425        ptw->out_rw = true;
 426    }
 427
 428#ifdef CONFIG_ATOMIC64
 429    if (ptw->out_be) {
 430        old_val = cpu_to_be64(old_val);
 431        new_val = cpu_to_be64(new_val);
 432        cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
 433        cur_val = be64_to_cpu(cur_val);
 434    } else {
 435        old_val = cpu_to_le64(old_val);
 436        new_val = cpu_to_le64(new_val);
 437        cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
 438        cur_val = le64_to_cpu(cur_val);
 439    }
 440#else
 441    /*
 442     * We can't support the full 64-bit atomic cmpxchg on the host.
 443     * Because this is only used for FEAT_HAFDBS, which is only for AA64,
 444     * we know that TCG_OVERSIZED_GUEST is set, which means that we are
 445     * running in round-robin mode and could only race with dma i/o.
 446     */
 447#ifndef TCG_OVERSIZED_GUEST
 448# error "Unexpected configuration"
 449#endif
 450    bool locked = qemu_mutex_iothread_locked();
 451    if (!locked) {
 452       qemu_mutex_lock_iothread();
 453    }
 454    if (ptw->out_be) {
 455        cur_val = ldq_be_p(host);
 456        if (cur_val == old_val) {
 457            stq_be_p(host, new_val);
 458        }
 459    } else {
 460        cur_val = ldq_le_p(host);
 461        if (cur_val == old_val) {
 462            stq_le_p(host, new_val);
 463        }
 464    }
 465    if (!locked) {
 466        qemu_mutex_unlock_iothread();
 467    }
 468#endif
 469
 470    return cur_val;
 471}
 472
 473static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
 474                                     uint32_t *table, uint32_t address)
 475{
 476    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
 477    uint64_t tcr = regime_tcr(env, mmu_idx);
 478    int maskshift = extract32(tcr, 0, 3);
 479    uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
 480    uint32_t base_mask;
 481
 482    if (address & mask) {
 483        if (tcr & TTBCR_PD1) {
 484            /* Translation table walk disabled for TTBR1 */
 485            return false;
 486        }
 487        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
 488    } else {
 489        if (tcr & TTBCR_PD0) {
 490            /* Translation table walk disabled for TTBR0 */
 491            return false;
 492        }
 493        base_mask = ~((uint32_t)0x3fffu >> maskshift);
 494        *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
 495    }
 496    *table |= (address >> 18) & 0x3ffc;
 497    return true;
 498}
 499
 500/*
 501 * Translate section/page access permissions to page R/W protection flags
 502 * @env:         CPUARMState
 503 * @mmu_idx:     MMU index indicating required translation regime
 504 * @ap:          The 3-bit access permissions (AP[2:0])
 505 * @domain_prot: The 2-bit domain access permissions
 506 * @is_user: TRUE if accessing from PL0
 507 */
 508static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
 509                         int ap, int domain_prot, bool is_user)
 510{
 511    if (domain_prot == 3) {
 512        return PAGE_READ | PAGE_WRITE;
 513    }
 514
 515    switch (ap) {
 516    case 0:
 517        if (arm_feature(env, ARM_FEATURE_V7)) {
 518            return 0;
 519        }
 520        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
 521        case SCTLR_S:
 522            return is_user ? 0 : PAGE_READ;
 523        case SCTLR_R:
 524            return PAGE_READ;
 525        default:
 526            return 0;
 527        }
 528    case 1:
 529        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
 530    case 2:
 531        if (is_user) {
 532            return PAGE_READ;
 533        } else {
 534            return PAGE_READ | PAGE_WRITE;
 535        }
 536    case 3:
 537        return PAGE_READ | PAGE_WRITE;
 538    case 4: /* Reserved.  */
 539        return 0;
 540    case 5:
 541        return is_user ? 0 : PAGE_READ;
 542    case 6:
 543        return PAGE_READ;
 544    case 7:
 545        if (!arm_feature(env, ARM_FEATURE_V6K)) {
 546            return 0;
 547        }
 548        return PAGE_READ;
 549    default:
 550        g_assert_not_reached();
 551    }
 552}
 553
 554/*
 555 * Translate section/page access permissions to page R/W protection flags
 556 * @env:         CPUARMState
 557 * @mmu_idx:     MMU index indicating required translation regime
 558 * @ap:          The 3-bit access permissions (AP[2:0])
 559 * @domain_prot: The 2-bit domain access permissions
 560 */
 561static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
 562                         int ap, int domain_prot)
 563{
 564   return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
 565                                regime_is_user(env, mmu_idx));
 566}
 567
 568/*
 569 * Translate section/page access permissions to page R/W protection flags.
 570 * @ap:      The 2-bit simple AP (AP[2:1])
 571 * @is_user: TRUE if accessing from PL0
 572 */
 573static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
 574{
 575    switch (ap) {
 576    case 0:
 577        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
 578    case 1:
 579        return PAGE_READ | PAGE_WRITE;
 580    case 2:
 581        return is_user ? 0 : PAGE_READ;
 582    case 3:
 583        return PAGE_READ;
 584    default:
 585        g_assert_not_reached();
 586    }
 587}
 588
 589static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
 590{
 591    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
 592}
 593
 594static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
 595                             uint32_t address, MMUAccessType access_type,
 596                             GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
 597{
 598    int level = 1;
 599    uint32_t table;
 600    uint32_t desc;
 601    int type;
 602    int ap;
 603    int domain = 0;
 604    int domain_prot;
 605    hwaddr phys_addr;
 606    uint32_t dacr;
 607
 608    /* Pagetable walk.  */
 609    /* Lookup l1 descriptor.  */
 610    if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
 611        /* Section translation fault if page walk is disabled by PD0 or PD1 */
 612        fi->type = ARMFault_Translation;
 613        goto do_fault;
 614    }
 615    if (!S1_ptw_translate(env, ptw, table, fi)) {
 616        goto do_fault;
 617    }
 618    desc = arm_ldl_ptw(env, ptw, fi);
 619    if (fi->type != ARMFault_None) {
 620        goto do_fault;
 621    }
 622    type = (desc & 3);
 623    domain = (desc >> 5) & 0x0f;
 624    if (regime_el(env, ptw->in_mmu_idx) == 1) {
 625        dacr = env->cp15.dacr_ns;
 626    } else {
 627        dacr = env->cp15.dacr_s;
 628    }
 629    domain_prot = (dacr >> (domain * 2)) & 3;
 630    if (type == 0) {
 631        /* Section translation fault.  */
 632        fi->type = ARMFault_Translation;
 633        goto do_fault;
 634    }
 635    if (type != 2) {
 636        level = 2;
 637    }
 638    if (domain_prot == 0 || domain_prot == 2) {
 639        fi->type = ARMFault_Domain;
 640        goto do_fault;
 641    }
 642    if (type == 2) {
 643        /* 1Mb section.  */
 644        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
 645        ap = (desc >> 10) & 3;
 646        result->f.lg_page_size = 20; /* 1MB */
 647    } else {
 648        /* Lookup l2 entry.  */
 649        if (type == 1) {
 650            /* Coarse pagetable.  */
 651            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
 652        } else {
 653            /* Fine pagetable.  */
 654            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
 655        }
 656        if (!S1_ptw_translate(env, ptw, table, fi)) {
 657            goto do_fault;
 658        }
 659        desc = arm_ldl_ptw(env, ptw, fi);
 660        if (fi->type != ARMFault_None) {
 661            goto do_fault;
 662        }
 663        switch (desc & 3) {
 664        case 0: /* Page translation fault.  */
 665            fi->type = ARMFault_Translation;
 666            goto do_fault;
 667        case 1: /* 64k page.  */
 668            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
 669            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
 670            result->f.lg_page_size = 16;
 671            break;
 672        case 2: /* 4k page.  */
 673            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 674            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
 675            result->f.lg_page_size = 12;
 676            break;
 677        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
 678            if (type == 1) {
 679                /* ARMv6/XScale extended small page format */
 680                if (arm_feature(env, ARM_FEATURE_XSCALE)
 681                    || arm_feature(env, ARM_FEATURE_V6)) {
 682                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 683                    result->f.lg_page_size = 12;
 684                } else {
 685                    /*
 686                     * UNPREDICTABLE in ARMv5; we choose to take a
 687                     * page translation fault.
 688                     */
 689                    fi->type = ARMFault_Translation;
 690                    goto do_fault;
 691                }
 692            } else {
 693                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
 694                result->f.lg_page_size = 10;
 695            }
 696            ap = (desc >> 4) & 3;
 697            break;
 698        default:
 699            /* Never happens, but compiler isn't smart enough to tell.  */
 700            g_assert_not_reached();
 701        }
 702    }
 703    result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
 704    result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
 705    if (!(result->f.prot & (1 << access_type))) {
 706        /* Access permission fault.  */
 707        fi->type = ARMFault_Permission;
 708        goto do_fault;
 709    }
 710    result->f.phys_addr = phys_addr;
 711    return false;
 712do_fault:
 713    fi->domain = domain;
 714    fi->level = level;
 715    return true;
 716}
 717
 718static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
 719                             uint32_t address, MMUAccessType access_type,
 720                             GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
 721{
 722    ARMCPU *cpu = env_archcpu(env);
 723    ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
 724    int level = 1;
 725    uint32_t table;
 726    uint32_t desc;
 727    uint32_t xn;
 728    uint32_t pxn = 0;
 729    int type;
 730    int ap;
 731    int domain = 0;
 732    int domain_prot;
 733    hwaddr phys_addr;
 734    uint32_t dacr;
 735    bool ns;
 736    int user_prot;
 737
 738    /* Pagetable walk.  */
 739    /* Lookup l1 descriptor.  */
 740    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
 741        /* Section translation fault if page walk is disabled by PD0 or PD1 */
 742        fi->type = ARMFault_Translation;
 743        goto do_fault;
 744    }
 745    if (!S1_ptw_translate(env, ptw, table, fi)) {
 746        goto do_fault;
 747    }
 748    desc = arm_ldl_ptw(env, ptw, fi);
 749    if (fi->type != ARMFault_None) {
 750        goto do_fault;
 751    }
 752    type = (desc & 3);
 753    if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
 754        /* Section translation fault, or attempt to use the encoding
 755         * which is Reserved on implementations without PXN.
 756         */
 757        fi->type = ARMFault_Translation;
 758        goto do_fault;
 759    }
 760    if ((type == 1) || !(desc & (1 << 18))) {
 761        /* Page or Section.  */
 762        domain = (desc >> 5) & 0x0f;
 763    }
 764    if (regime_el(env, mmu_idx) == 1) {
 765        dacr = env->cp15.dacr_ns;
 766    } else {
 767        dacr = env->cp15.dacr_s;
 768    }
 769    if (type == 1) {
 770        level = 2;
 771    }
 772    domain_prot = (dacr >> (domain * 2)) & 3;
 773    if (domain_prot == 0 || domain_prot == 2) {
 774        /* Section or Page domain fault */
 775        fi->type = ARMFault_Domain;
 776        goto do_fault;
 777    }
 778    if (type != 1) {
 779        if (desc & (1 << 18)) {
 780            /* Supersection.  */
 781            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
 782            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
 783            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
 784            result->f.lg_page_size = 24;  /* 16MB */
 785        } else {
 786            /* Section.  */
 787            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
 788            result->f.lg_page_size = 20;  /* 1MB */
 789        }
 790        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
 791        xn = desc & (1 << 4);
 792        pxn = desc & 1;
 793        ns = extract32(desc, 19, 1);
 794    } else {
 795        if (cpu_isar_feature(aa32_pxn, cpu)) {
 796            pxn = (desc >> 2) & 1;
 797        }
 798        ns = extract32(desc, 3, 1);
 799        /* Lookup l2 entry.  */
 800        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
 801        if (!S1_ptw_translate(env, ptw, table, fi)) {
 802            goto do_fault;
 803        }
 804        desc = arm_ldl_ptw(env, ptw, fi);
 805        if (fi->type != ARMFault_None) {
 806            goto do_fault;
 807        }
 808        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
 809        switch (desc & 3) {
 810        case 0: /* Page translation fault.  */
 811            fi->type = ARMFault_Translation;
 812            goto do_fault;
 813        case 1: /* 64k page.  */
 814            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
 815            xn = desc & (1 << 15);
 816            result->f.lg_page_size = 16;
 817            break;
 818        case 2: case 3: /* 4k page.  */
 819            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 820            xn = desc & 1;
 821            result->f.lg_page_size = 12;
 822            break;
 823        default:
 824            /* Never happens, but compiler isn't smart enough to tell.  */
 825            g_assert_not_reached();
 826        }
 827    }
 828    if (domain_prot == 3) {
 829        result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 830    } else {
 831        if (pxn && !regime_is_user(env, mmu_idx)) {
 832            xn = 1;
 833        }
 834        if (xn && access_type == MMU_INST_FETCH) {
 835            fi->type = ARMFault_Permission;
 836            goto do_fault;
 837        }
 838
 839        if (arm_feature(env, ARM_FEATURE_V6K) &&
 840                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
 841            /* The simplified model uses AP[0] as an access control bit.  */
 842            if ((ap & 1) == 0) {
 843                /* Access flag fault.  */
 844                fi->type = ARMFault_AccessFlag;
 845                goto do_fault;
 846            }
 847            result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
 848            user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
 849        } else {
 850            result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
 851            user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
 852        }
 853        if (result->f.prot && !xn) {
 854            result->f.prot |= PAGE_EXEC;
 855        }
 856        if (!(result->f.prot & (1 << access_type))) {
 857            /* Access permission fault.  */
 858            fi->type = ARMFault_Permission;
 859            goto do_fault;
 860        }
 861        if (regime_is_pan(env, mmu_idx) &&
 862            !regime_is_user(env, mmu_idx) &&
 863            user_prot &&
 864            access_type != MMU_INST_FETCH) {
 865            /* Privileged Access Never fault */
 866            fi->type = ARMFault_Permission;
 867            goto do_fault;
 868        }
 869    }
 870    if (ns) {
 871        /* The NS bit will (as required by the architecture) have no effect if
 872         * the CPU doesn't support TZ or this is a non-secure translation
 873         * regime, because the attribute will already be non-secure.
 874         */
 875        result->f.attrs.secure = false;
 876    }
 877    result->f.phys_addr = phys_addr;
 878    return false;
 879do_fault:
 880    fi->domain = domain;
 881    fi->level = level;
 882    return true;
 883}
 884
 885/*
 886 * Translate S2 section/page access permissions to protection flags
 887 * @env:     CPUARMState
 888 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
 889 * @xn:      XN (execute-never) bits
 890 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
 891 */
 892static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
 893{
 894    int prot = 0;
 895
 896    if (s2ap & 1) {
 897        prot |= PAGE_READ;
 898    }
 899    if (s2ap & 2) {
 900        prot |= PAGE_WRITE;
 901    }
 902
 903    if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
 904        switch (xn) {
 905        case 0:
 906            prot |= PAGE_EXEC;
 907            break;
 908        case 1:
 909            if (s1_is_el0) {
 910                prot |= PAGE_EXEC;
 911            }
 912            break;
 913        case 2:
 914            break;
 915        case 3:
 916            if (!s1_is_el0) {
 917                prot |= PAGE_EXEC;
 918            }
 919            break;
 920        default:
 921            g_assert_not_reached();
 922        }
 923    } else {
 924        if (!extract32(xn, 1, 1)) {
 925            if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
 926                prot |= PAGE_EXEC;
 927            }
 928        }
 929    }
 930    return prot;
 931}
 932
 933/*
 934 * Translate section/page access permissions to protection flags
 935 * @env:     CPUARMState
 936 * @mmu_idx: MMU index indicating required translation regime
 937 * @is_aa64: TRUE if AArch64
 938 * @ap:      The 2-bit simple AP (AP[2:1])
 939 * @ns:      NS (non-secure) bit
 940 * @xn:      XN (execute-never) bit
 941 * @pxn:     PXN (privileged execute-never) bit
 942 */
 943static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
 944                      int ap, int ns, int xn, int pxn)
 945{
 946    bool is_user = regime_is_user(env, mmu_idx);
 947    int prot_rw, user_rw;
 948    bool have_wxn;
 949    int wxn = 0;
 950
 951    assert(!regime_is_stage2(mmu_idx));
 952
 953    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
 954    if (is_user) {
 955        prot_rw = user_rw;
 956    } else {
 957        if (user_rw && regime_is_pan(env, mmu_idx)) {
 958            /* PAN forbids data accesses but doesn't affect insn fetch */
 959            prot_rw = 0;
 960        } else {
 961            prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
 962        }
 963    }
 964
 965    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
 966        return prot_rw;
 967    }
 968
 969    /* TODO have_wxn should be replaced with
 970     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
 971     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
 972     * compatible processors have EL2, which is required for [U]WXN.
 973     */
 974    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
 975
 976    if (have_wxn) {
 977        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
 978    }
 979
 980    if (is_aa64) {
 981        if (regime_has_2_ranges(mmu_idx) && !is_user) {
 982            xn = pxn || (user_rw & PAGE_WRITE);
 983        }
 984    } else if (arm_feature(env, ARM_FEATURE_V7)) {
 985        switch (regime_el(env, mmu_idx)) {
 986        case 1:
 987        case 3:
 988            if (is_user) {
 989                xn = xn || !(user_rw & PAGE_READ);
 990            } else {
 991                int uwxn = 0;
 992                if (have_wxn) {
 993                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
 994                }
 995                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
 996                     (uwxn && (user_rw & PAGE_WRITE));
 997            }
 998            break;
 999        case 2:
1000            break;
1001        }
1002    } else {
1003        xn = wxn = 0;
1004    }
1005
1006    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1007        return prot_rw;
1008    }
1009    return prot_rw | PAGE_EXEC;
1010}
1011
1012static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
1013                                          ARMMMUIdx mmu_idx)
1014{
1015    uint64_t tcr = regime_tcr(env, mmu_idx);
1016    uint32_t el = regime_el(env, mmu_idx);
1017    int select, tsz;
1018    bool epd, hpd;
1019
1020    assert(mmu_idx != ARMMMUIdx_Stage2_S);
1021
1022    if (mmu_idx == ARMMMUIdx_Stage2) {
1023        /* VTCR */
1024        bool sext = extract32(tcr, 4, 1);
1025        bool sign = extract32(tcr, 3, 1);
1026
1027        /*
1028         * If the sign-extend bit is not the same as t0sz[3], the result
1029         * is unpredictable. Flag this as a guest error.
1030         */
1031        if (sign != sext) {
1032            qemu_log_mask(LOG_GUEST_ERROR,
1033                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1034        }
1035        tsz = sextract32(tcr, 0, 4) + 8;
1036        select = 0;
1037        hpd = false;
1038        epd = false;
1039    } else if (el == 2) {
1040        /* HTCR */
1041        tsz = extract32(tcr, 0, 3);
1042        select = 0;
1043        hpd = extract64(tcr, 24, 1);
1044        epd = false;
1045    } else {
1046        int t0sz = extract32(tcr, 0, 3);
1047        int t1sz = extract32(tcr, 16, 3);
1048
1049        if (t1sz == 0) {
1050            select = va > (0xffffffffu >> t0sz);
1051        } else {
1052            /* Note that we will detect errors later.  */
1053            select = va >= ~(0xffffffffu >> t1sz);
1054        }
1055        if (!select) {
1056            tsz = t0sz;
1057            epd = extract32(tcr, 7, 1);
1058            hpd = extract64(tcr, 41, 1);
1059        } else {
1060            tsz = t1sz;
1061            epd = extract32(tcr, 23, 1);
1062            hpd = extract64(tcr, 42, 1);
1063        }
1064        /* For aarch32, hpd0 is not enabled without t2e as well.  */
1065        hpd &= extract32(tcr, 6, 1);
1066    }
1067
1068    return (ARMVAParameters) {
1069        .tsz = tsz,
1070        .select = select,
1071        .epd = epd,
1072        .hpd = hpd,
1073    };
1074}
1075
1076/*
1077 * check_s2_mmu_setup
1078 * @cpu:        ARMCPU
1079 * @is_aa64:    True if the translation regime is in AArch64 state
1080 * @startlevel: Suggested starting level
1081 * @inputsize:  Bitsize of IPAs
1082 * @stride:     Page-table stride (See the ARM ARM)
1083 *
1084 * Returns true if the suggested S2 translation parameters are OK and
1085 * false otherwise.
1086 */
1087static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
1088                               int inputsize, int stride, int outputsize)
1089{
1090    const int grainsize = stride + 3;
1091    int startsizecheck;
1092
1093    /*
1094     * Negative levels are usually not allowed...
1095     * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
1096     * begins with level -1.  Note that previous feature tests will have
1097     * eliminated this combination if it is not enabled.
1098     */
1099    if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
1100        return false;
1101    }
1102
1103    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
1104    if (startsizecheck < 1 || startsizecheck > stride + 4) {
1105        return false;
1106    }
1107
1108    if (is_aa64) {
1109        switch (stride) {
1110        case 13: /* 64KB Pages.  */
1111            if (level == 0 || (level == 1 && outputsize <= 42)) {
1112                return false;
1113            }
1114            break;
1115        case 11: /* 16KB Pages.  */
1116            if (level == 0 || (level == 1 && outputsize <= 40)) {
1117                return false;
1118            }
1119            break;
1120        case 9: /* 4KB Pages.  */
1121            if (level == 0 && outputsize <= 42) {
1122                return false;
1123            }
1124            break;
1125        default:
1126            g_assert_not_reached();
1127        }
1128
1129        /* Inputsize checks.  */
1130        if (inputsize > outputsize &&
1131            (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
1132            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
1133            return false;
1134        }
1135    } else {
1136        /* AArch32 only supports 4KB pages. Assert on that.  */
1137        assert(stride == 9);
1138
1139        if (level == 0) {
1140            return false;
1141        }
1142    }
1143    return true;
1144}
1145
1146/**
1147 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1148 *
1149 * Returns false if the translation was successful. Otherwise, phys_ptr,
1150 * attrs, prot and page_size may not be filled in, and the populated fsr
1151 * value provides information on why the translation aborted, in the format
1152 * of a long-format DFSR/IFSR fault register, with the following caveat:
1153 * the WnR bit is never set (the caller must do this).
1154 *
1155 * @env: CPUARMState
1156 * @ptw: Current and next stage parameters for the walk.
1157 * @address: virtual address to get physical address for
1158 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1159 * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1160 *             (so this is a stage 2 page table walk),
1161 *             must be true if this is stage 2 of a stage 1+2
1162 *             walk for an EL0 access. If @mmu_idx is anything else,
1163 *             @s1_is_el0 is ignored.
1164 * @result: set on translation success,
1165 * @fi: set to fault info if the translation fails
1166 */
1167static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1168                               uint64_t address,
1169                               MMUAccessType access_type, bool s1_is_el0,
1170                               GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1171{
1172    ARMCPU *cpu = env_archcpu(env);
1173    ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1174    bool is_secure = ptw->in_secure;
1175    int32_t level;
1176    ARMVAParameters param;
1177    uint64_t ttbr;
1178    hwaddr descaddr, indexmask, indexmask_grainsize;
1179    uint32_t tableattrs;
1180    target_ulong page_size;
1181    uint64_t attrs;
1182    int32_t stride;
1183    int addrsize, inputsize, outputsize;
1184    uint64_t tcr = regime_tcr(env, mmu_idx);
1185    int ap, ns, xn, pxn;
1186    uint32_t el = regime_el(env, mmu_idx);
1187    uint64_t descaddrmask;
1188    bool aarch64 = arm_el_is_aa64(env, el);
1189    uint64_t descriptor, new_descriptor;
1190    bool nstable;
1191
1192    /* TODO: This code does not support shareability levels. */
1193    if (aarch64) {
1194        int ps;
1195
1196        param = aa64_va_parameters(env, address, mmu_idx,
1197                                   access_type != MMU_INST_FETCH);
1198        level = 0;
1199
1200        /*
1201         * If TxSZ is programmed to a value larger than the maximum,
1202         * or smaller than the effective minimum, it is IMPLEMENTATION
1203         * DEFINED whether we behave as if the field were programmed
1204         * within bounds, or if a level 0 Translation fault is generated.
1205         *
1206         * With FEAT_LVA, fault on less than minimum becomes required,
1207         * so our choice is to always raise the fault.
1208         */
1209        if (param.tsz_oob) {
1210            goto do_translation_fault;
1211        }
1212
1213        addrsize = 64 - 8 * param.tbi;
1214        inputsize = 64 - param.tsz;
1215
1216        /*
1217         * Bound PS by PARANGE to find the effective output address size.
1218         * ID_AA64MMFR0 is a read-only register so values outside of the
1219         * supported mappings can be considered an implementation error.
1220         */
1221        ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1222        ps = MIN(ps, param.ps);
1223        assert(ps < ARRAY_SIZE(pamax_map));
1224        outputsize = pamax_map[ps];
1225
1226        /*
1227         * With LPA2, the effective output address (OA) size is at most 48 bits
1228         * unless TCR.DS == 1
1229         */
1230        if (!param.ds && param.gran != Gran64K) {
1231            outputsize = MIN(outputsize, 48);
1232        }
1233    } else {
1234        param = aa32_va_parameters(env, address, mmu_idx);
1235        level = 1;
1236        addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1237        inputsize = addrsize - param.tsz;
1238        outputsize = 40;
1239    }
1240
1241    /*
1242     * We determined the region when collecting the parameters, but we
1243     * have not yet validated that the address is valid for the region.
1244     * Extract the top bits and verify that they all match select.
1245     *
1246     * For aa32, if inputsize == addrsize, then we have selected the
1247     * region by exclusion in aa32_va_parameters and there is no more
1248     * validation to do here.
1249     */
1250    if (inputsize < addrsize) {
1251        target_ulong top_bits = sextract64(address, inputsize,
1252                                           addrsize - inputsize);
1253        if (-top_bits != param.select) {
1254            /* The gap between the two regions is a Translation fault */
1255            goto do_translation_fault;
1256        }
1257    }
1258
1259    stride = arm_granule_bits(param.gran) - 3;
1260
1261    /*
1262     * Note that QEMU ignores shareability and cacheability attributes,
1263     * so we don't need to do anything with the SH, ORGN, IRGN fields
1264     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1265     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1266     * implement any ASID-like capability so we can ignore it (instead
1267     * we will always flush the TLB any time the ASID is changed).
1268     */
1269    ttbr = regime_ttbr(env, mmu_idx, param.select);
1270
1271    /*
1272     * Here we should have set up all the parameters for the translation:
1273     * inputsize, ttbr, epd, stride, tbi
1274     */
1275
1276    if (param.epd) {
1277        /*
1278         * Translation table walk disabled => Translation fault on TLB miss
1279         * Note: This is always 0 on 64-bit EL2 and EL3.
1280         */
1281        goto do_translation_fault;
1282    }
1283
1284    if (!regime_is_stage2(mmu_idx)) {
1285        /*
1286         * The starting level depends on the virtual address size (which can
1287         * be up to 48 bits) and the translation granule size. It indicates
1288         * the number of strides (stride bits at a time) needed to
1289         * consume the bits of the input address. In the pseudocode this is:
1290         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1291         * where their 'inputsize' is our 'inputsize', 'grainsize' is
1292         * our 'stride + 3' and 'stride' is our 'stride'.
1293         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1294         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1295         * = 4 - (inputsize - 4) / stride;
1296         */
1297        level = 4 - (inputsize - 4) / stride;
1298    } else {
1299        /*
1300         * For stage 2 translations the starting level is specified by the
1301         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1302         */
1303        uint32_t sl0 = extract32(tcr, 6, 2);
1304        uint32_t sl2 = extract64(tcr, 33, 1);
1305        int32_t startlevel;
1306        bool ok;
1307
1308        /* SL2 is RES0 unless DS=1 & 4kb granule. */
1309        if (param.ds && stride == 9 && sl2) {
1310            if (sl0 != 0) {
1311                level = 0;
1312                goto do_translation_fault;
1313            }
1314            startlevel = -1;
1315        } else if (!aarch64 || stride == 9) {
1316            /* AArch32 or 4KB pages */
1317            startlevel = 2 - sl0;
1318
1319            if (cpu_isar_feature(aa64_st, cpu)) {
1320                startlevel &= 3;
1321            }
1322        } else {
1323            /* 16KB or 64KB pages */
1324            startlevel = 3 - sl0;
1325        }
1326
1327        /* Check that the starting level is valid. */
1328        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1329                                inputsize, stride, outputsize);
1330        if (!ok) {
1331            goto do_translation_fault;
1332        }
1333        level = startlevel;
1334    }
1335
1336    indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1337    indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1338
1339    /* Now we can extract the actual base address from the TTBR */
1340    descaddr = extract64(ttbr, 0, 48);
1341
1342    /*
1343     * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1344     *
1345     * Otherwise, if the base address is out of range, raise AddressSizeFault.
1346     * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1347     * but we've just cleared the bits above 47, so simplify the test.
1348     */
1349    if (outputsize > 48) {
1350        descaddr |= extract64(ttbr, 2, 4) << 48;
1351    } else if (descaddr >> outputsize) {
1352        level = 0;
1353        fi->type = ARMFault_AddressSize;
1354        goto do_fault;
1355    }
1356
1357    /*
1358     * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1359     * and also to mask out CnP (bit 0) which could validly be non-zero.
1360     */
1361    descaddr &= ~indexmask;
1362
1363    /*
1364     * For AArch32, the address field in the descriptor goes up to bit 39
1365     * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1366     * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1367     * bits as part of the address, which will be checked via outputsize.
1368     * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1369     * the highest bits of a 52-bit output are placed elsewhere.
1370     */
1371    if (param.ds) {
1372        descaddrmask = MAKE_64BIT_MASK(0, 50);
1373    } else if (arm_feature(env, ARM_FEATURE_V8)) {
1374        descaddrmask = MAKE_64BIT_MASK(0, 48);
1375    } else {
1376        descaddrmask = MAKE_64BIT_MASK(0, 40);
1377    }
1378    descaddrmask &= ~indexmask_grainsize;
1379
1380    /*
1381     * Secure accesses start with the page table in secure memory and
1382     * can be downgraded to non-secure at any step. Non-secure accesses
1383     * remain non-secure. We implement this by just ORing in the NSTable/NS
1384     * bits at each step.
1385     */
1386    tableattrs = is_secure ? 0 : (1 << 4);
1387
1388 next_level:
1389    descaddr |= (address >> (stride * (4 - level))) & indexmask;
1390    descaddr &= ~7ULL;
1391    nstable = extract32(tableattrs, 4, 1);
1392    if (nstable) {
1393        /*
1394         * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1395         * Assert that the non-secure idx are even, and relative order.
1396         */
1397        QEMU_BUILD_BUG_ON((ARMMMUIdx_Phys_NS & 1) != 0);
1398        QEMU_BUILD_BUG_ON((ARMMMUIdx_Stage2 & 1) != 0);
1399        QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_NS + 1 != ARMMMUIdx_Phys_S);
1400        QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2 + 1 != ARMMMUIdx_Stage2_S);
1401        ptw->in_ptw_idx &= ~1;
1402        ptw->in_secure = false;
1403    }
1404    if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
1405        goto do_fault;
1406    }
1407    descriptor = arm_ldq_ptw(env, ptw, fi);
1408    if (fi->type != ARMFault_None) {
1409        goto do_fault;
1410    }
1411    new_descriptor = descriptor;
1412
1413 restart_atomic_update:
1414    if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
1415        /* Invalid, or the Reserved level 3 encoding */
1416        goto do_translation_fault;
1417    }
1418
1419    descaddr = descriptor & descaddrmask;
1420
1421    /*
1422     * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1423     * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1424     * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1425     * raise AddressSizeFault.
1426     */
1427    if (outputsize > 48) {
1428        if (param.ds) {
1429            descaddr |= extract64(descriptor, 8, 2) << 50;
1430        } else {
1431            descaddr |= extract64(descriptor, 12, 4) << 48;
1432        }
1433    } else if (descaddr >> outputsize) {
1434        fi->type = ARMFault_AddressSize;
1435        goto do_fault;
1436    }
1437
1438    if ((descriptor & 2) && (level < 3)) {
1439        /*
1440         * Table entry. The top five bits are attributes which may
1441         * propagate down through lower levels of the table (and
1442         * which are all arranged so that 0 means "no effect", so
1443         * we can gather them up by ORing in the bits at each level).
1444         */
1445        tableattrs |= extract64(descriptor, 59, 5);
1446        level++;
1447        indexmask = indexmask_grainsize;
1448        goto next_level;
1449    }
1450
1451    /*
1452     * Block entry at level 1 or 2, or page entry at level 3.
1453     * These are basically the same thing, although the number
1454     * of bits we pull in from the vaddr varies. Note that although
1455     * descaddrmask masks enough of the low bits of the descriptor
1456     * to give a correct page or table address, the address field
1457     * in a block descriptor is smaller; so we need to explicitly
1458     * clear the lower bits here before ORing in the low vaddr bits.
1459     *
1460     * Afterward, descaddr is the final physical address.
1461     */
1462    page_size = (1ULL << ((stride * (4 - level)) + 3));
1463    descaddr &= ~(hwaddr)(page_size - 1);
1464    descaddr |= (address & (page_size - 1));
1465
1466    if (likely(!ptw->in_debug)) {
1467        /*
1468         * Access flag.
1469         * If HA is enabled, prepare to update the descriptor below.
1470         * Otherwise, pass the access fault on to software.
1471         */
1472        if (!(descriptor & (1 << 10))) {
1473            if (param.ha) {
1474                new_descriptor |= 1 << 10; /* AF */
1475            } else {
1476                fi->type = ARMFault_AccessFlag;
1477                goto do_fault;
1478            }
1479        }
1480
1481        /*
1482         * Dirty Bit.
1483         * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1484         * bit for writeback. The actual write protection test may still be
1485         * overridden by tableattrs, to be merged below.
1486         */
1487        if (param.hd
1488            && extract64(descriptor, 51, 1)  /* DBM */
1489            && access_type == MMU_DATA_STORE) {
1490            if (regime_is_stage2(mmu_idx)) {
1491                new_descriptor |= 1ull << 7;    /* set S2AP[1] */
1492            } else {
1493                new_descriptor &= ~(1ull << 7); /* clear AP[2] */
1494            }
1495        }
1496    }
1497
1498    /*
1499     * Extract attributes from the (modified) descriptor, and apply
1500     * table descriptors. Stage 2 table descriptors do not include
1501     * any attribute fields. HPD disables all the table attributes
1502     * except NSTable.
1503     */
1504    attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1505    if (!regime_is_stage2(mmu_idx)) {
1506        attrs |= nstable << 5; /* NS */
1507        if (!param.hpd) {
1508            attrs |= extract64(tableattrs, 0, 2) << 53;     /* XN, PXN */
1509            /*
1510             * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1511             * means "force PL1 access only", which means forcing AP[1] to 0.
1512             */
1513            attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
1514            attrs |= extract32(tableattrs, 3, 1) << 7;    /* APT[1] => AP[2] */
1515        }
1516    }
1517
1518    ap = extract32(attrs, 6, 2);
1519    if (regime_is_stage2(mmu_idx)) {
1520        ns = mmu_idx == ARMMMUIdx_Stage2;
1521        xn = extract64(attrs, 53, 2);
1522        result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
1523    } else {
1524        ns = extract32(attrs, 5, 1);
1525        xn = extract64(attrs, 54, 1);
1526        pxn = extract64(attrs, 53, 1);
1527        result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1528    }
1529
1530    if (!(result->f.prot & (1 << access_type))) {
1531        fi->type = ARMFault_Permission;
1532        goto do_fault;
1533    }
1534
1535    /* If FEAT_HAFDBS has made changes, update the PTE. */
1536    if (new_descriptor != descriptor) {
1537        new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
1538        if (fi->type != ARMFault_None) {
1539            goto do_fault;
1540        }
1541        /*
1542         * I_YZSVV says that if the in-memory descriptor has changed,
1543         * then we must use the information in that new value
1544         * (which might include a different output address, different
1545         * attributes, or generate a fault).
1546         * Restart the handling of the descriptor value from scratch.
1547         */
1548        if (new_descriptor != descriptor) {
1549            descriptor = new_descriptor;
1550            goto restart_atomic_update;
1551        }
1552    }
1553
1554    if (ns) {
1555        /*
1556         * The NS bit will (as required by the architecture) have no effect if
1557         * the CPU doesn't support TZ or this is a non-secure translation
1558         * regime, because the attribute will already be non-secure.
1559         */
1560        result->f.attrs.secure = false;
1561    }
1562
1563    /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB.  */
1564    if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
1565        result->f.guarded = extract64(attrs, 50, 1); /* GP */
1566    }
1567
1568    if (regime_is_stage2(mmu_idx)) {
1569        result->cacheattrs.is_s2_format = true;
1570        result->cacheattrs.attrs = extract32(attrs, 2, 4);
1571    } else {
1572        /* Index into MAIR registers for cache attributes */
1573        uint8_t attrindx = extract32(attrs, 2, 3);
1574        uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1575        assert(attrindx <= 7);
1576        result->cacheattrs.is_s2_format = false;
1577        result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1578    }
1579
1580    /*
1581     * For FEAT_LPA2 and effective DS, the SH field in the attributes
1582     * was re-purposed for output address bits.  The SH attribute in
1583     * that case comes from TCR_ELx, which we extracted earlier.
1584     */
1585    if (param.ds) {
1586        result->cacheattrs.shareability = param.sh;
1587    } else {
1588        result->cacheattrs.shareability = extract32(attrs, 8, 2);
1589    }
1590
1591    result->f.phys_addr = descaddr;
1592    result->f.lg_page_size = ctz64(page_size);
1593    return false;
1594
1595 do_translation_fault:
1596    fi->type = ARMFault_Translation;
1597 do_fault:
1598    fi->level = level;
1599    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1600    fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
1601    fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1602    return true;
1603}
1604
1605static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1606                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1607                                 bool is_secure, GetPhysAddrResult *result,
1608                                 ARMMMUFaultInfo *fi)
1609{
1610    int n;
1611    uint32_t mask;
1612    uint32_t base;
1613    bool is_user = regime_is_user(env, mmu_idx);
1614
1615    if (regime_translation_disabled(env, mmu_idx, is_secure)) {
1616        /* MPU disabled.  */
1617        result->f.phys_addr = address;
1618        result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1619        return false;
1620    }
1621
1622    result->f.phys_addr = address;
1623    for (n = 7; n >= 0; n--) {
1624        base = env->cp15.c6_region[n];
1625        if ((base & 1) == 0) {
1626            continue;
1627        }
1628        mask = 1 << ((base >> 1) & 0x1f);
1629        /* Keep this shift separate from the above to avoid an
1630           (undefined) << 32.  */
1631        mask = (mask << 1) - 1;
1632        if (((base ^ address) & ~mask) == 0) {
1633            break;
1634        }
1635    }
1636    if (n < 0) {
1637        fi->type = ARMFault_Background;
1638        return true;
1639    }
1640
1641    if (access_type == MMU_INST_FETCH) {
1642        mask = env->cp15.pmsav5_insn_ap;
1643    } else {
1644        mask = env->cp15.pmsav5_data_ap;
1645    }
1646    mask = (mask >> (n * 4)) & 0xf;
1647    switch (mask) {
1648    case 0:
1649        fi->type = ARMFault_Permission;
1650        fi->level = 1;
1651        return true;
1652    case 1:
1653        if (is_user) {
1654            fi->type = ARMFault_Permission;
1655            fi->level = 1;
1656            return true;
1657        }
1658        result->f.prot = PAGE_READ | PAGE_WRITE;
1659        break;
1660    case 2:
1661        result->f.prot = PAGE_READ;
1662        if (!is_user) {
1663            result->f.prot |= PAGE_WRITE;
1664        }
1665        break;
1666    case 3:
1667        result->f.prot = PAGE_READ | PAGE_WRITE;
1668        break;
1669    case 5:
1670        if (is_user) {
1671            fi->type = ARMFault_Permission;
1672            fi->level = 1;
1673            return true;
1674        }
1675        result->f.prot = PAGE_READ;
1676        break;
1677    case 6:
1678        result->f.prot = PAGE_READ;
1679        break;
1680    default:
1681        /* Bad permission.  */
1682        fi->type = ARMFault_Permission;
1683        fi->level = 1;
1684        return true;
1685    }
1686    result->f.prot |= PAGE_EXEC;
1687    return false;
1688}
1689
1690static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1691                                         int32_t address, uint8_t *prot)
1692{
1693    if (!arm_feature(env, ARM_FEATURE_M)) {
1694        *prot = PAGE_READ | PAGE_WRITE;
1695        switch (address) {
1696        case 0xF0000000 ... 0xFFFFFFFF:
1697            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1698                /* hivecs execing is ok */
1699                *prot |= PAGE_EXEC;
1700            }
1701            break;
1702        case 0x00000000 ... 0x7FFFFFFF:
1703            *prot |= PAGE_EXEC;
1704            break;
1705        }
1706    } else {
1707        /* Default system address map for M profile cores.
1708         * The architecture specifies which regions are execute-never;
1709         * at the MPU level no other checks are defined.
1710         */
1711        switch (address) {
1712        case 0x00000000 ... 0x1fffffff: /* ROM */
1713        case 0x20000000 ... 0x3fffffff: /* SRAM */
1714        case 0x60000000 ... 0x7fffffff: /* RAM */
1715        case 0x80000000 ... 0x9fffffff: /* RAM */
1716            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1717            break;
1718        case 0x40000000 ... 0x5fffffff: /* Peripheral */
1719        case 0xa0000000 ... 0xbfffffff: /* Device */
1720        case 0xc0000000 ... 0xdfffffff: /* Device */
1721        case 0xe0000000 ... 0xffffffff: /* System */
1722            *prot = PAGE_READ | PAGE_WRITE;
1723            break;
1724        default:
1725            g_assert_not_reached();
1726        }
1727    }
1728}
1729
1730static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1731{
1732    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1733    return arm_feature(env, ARM_FEATURE_M) &&
1734        extract32(address, 20, 12) == 0xe00;
1735}
1736
1737static bool m_is_system_region(CPUARMState *env, uint32_t address)
1738{
1739    /*
1740     * True if address is in the M profile system region
1741     * 0xe0000000 - 0xffffffff
1742     */
1743    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1744}
1745
1746static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1747                                         bool is_secure, bool is_user)
1748{
1749    /*
1750     * Return true if we should use the default memory map as a
1751     * "background" region if there are no hits against any MPU regions.
1752     */
1753    CPUARMState *env = &cpu->env;
1754
1755    if (is_user) {
1756        return false;
1757    }
1758
1759    if (arm_feature(env, ARM_FEATURE_M)) {
1760        return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1761    } else {
1762        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1763    }
1764}
1765
1766static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1767                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1768                                 bool secure, GetPhysAddrResult *result,
1769                                 ARMMMUFaultInfo *fi)
1770{
1771    ARMCPU *cpu = env_archcpu(env);
1772    int n;
1773    bool is_user = regime_is_user(env, mmu_idx);
1774
1775    result->f.phys_addr = address;
1776    result->f.lg_page_size = TARGET_PAGE_BITS;
1777    result->f.prot = 0;
1778
1779    if (regime_translation_disabled(env, mmu_idx, secure) ||
1780        m_is_ppb_region(env, address)) {
1781        /*
1782         * MPU disabled or M profile PPB access: use default memory map.
1783         * The other case which uses the default memory map in the
1784         * v7M ARM ARM pseudocode is exception vector reads from the vector
1785         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1786         * which always does a direct read using address_space_ldl(), rather
1787         * than going via this function, so we don't need to check that here.
1788         */
1789        get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1790    } else { /* MPU enabled */
1791        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1792            /* region search */
1793            uint32_t base = env->pmsav7.drbar[n];
1794            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1795            uint32_t rmask;
1796            bool srdis = false;
1797
1798            if (!(env->pmsav7.drsr[n] & 0x1)) {
1799                continue;
1800            }
1801
1802            if (!rsize) {
1803                qemu_log_mask(LOG_GUEST_ERROR,
1804                              "DRSR[%d]: Rsize field cannot be 0\n", n);
1805                continue;
1806            }
1807            rsize++;
1808            rmask = (1ull << rsize) - 1;
1809
1810            if (base & rmask) {
1811                qemu_log_mask(LOG_GUEST_ERROR,
1812                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1813                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
1814                              n, base, rmask);
1815                continue;
1816            }
1817
1818            if (address < base || address > base + rmask) {
1819                /*
1820                 * Address not in this region. We must check whether the
1821                 * region covers addresses in the same page as our address.
1822                 * In that case we must not report a size that covers the
1823                 * whole page for a subsequent hit against a different MPU
1824                 * region or the background region, because it would result in
1825                 * incorrect TLB hits for subsequent accesses to addresses that
1826                 * are in this MPU region.
1827                 */
1828                if (ranges_overlap(base, rmask,
1829                                   address & TARGET_PAGE_MASK,
1830                                   TARGET_PAGE_SIZE)) {
1831                    result->f.lg_page_size = 0;
1832                }
1833                continue;
1834            }
1835
1836            /* Region matched */
1837
1838            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1839                int i, snd;
1840                uint32_t srdis_mask;
1841
1842                rsize -= 3; /* sub region size (power of 2) */
1843                snd = ((address - base) >> rsize) & 0x7;
1844                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1845
1846                srdis_mask = srdis ? 0x3 : 0x0;
1847                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1848                    /*
1849                     * This will check in groups of 2, 4 and then 8, whether
1850                     * the subregion bits are consistent. rsize is incremented
1851                     * back up to give the region size, considering consistent
1852                     * adjacent subregions as one region. Stop testing if rsize
1853                     * is already big enough for an entire QEMU page.
1854                     */
1855                    int snd_rounded = snd & ~(i - 1);
1856                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1857                                                     snd_rounded + 8, i);
1858                    if (srdis_mask ^ srdis_multi) {
1859                        break;
1860                    }
1861                    srdis_mask = (srdis_mask << i) | srdis_mask;
1862                    rsize++;
1863                }
1864            }
1865            if (srdis) {
1866                continue;
1867            }
1868            if (rsize < TARGET_PAGE_BITS) {
1869                result->f.lg_page_size = rsize;
1870            }
1871            break;
1872        }
1873
1874        if (n == -1) { /* no hits */
1875            if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1876                /* background fault */
1877                fi->type = ARMFault_Background;
1878                return true;
1879            }
1880            get_phys_addr_pmsav7_default(env, mmu_idx, address,
1881                                         &result->f.prot);
1882        } else { /* a MPU hit! */
1883            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1884            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1885
1886            if (m_is_system_region(env, address)) {
1887                /* System space is always execute never */
1888                xn = 1;
1889            }
1890
1891            if (is_user) { /* User mode AP bit decoding */
1892                switch (ap) {
1893                case 0:
1894                case 1:
1895                case 5:
1896                    break; /* no access */
1897                case 3:
1898                    result->f.prot |= PAGE_WRITE;
1899                    /* fall through */
1900                case 2:
1901                case 6:
1902                    result->f.prot |= PAGE_READ | PAGE_EXEC;
1903                    break;
1904                case 7:
1905                    /* for v7M, same as 6; for R profile a reserved value */
1906                    if (arm_feature(env, ARM_FEATURE_M)) {
1907                        result->f.prot |= PAGE_READ | PAGE_EXEC;
1908                        break;
1909                    }
1910                    /* fall through */
1911                default:
1912                    qemu_log_mask(LOG_GUEST_ERROR,
1913                                  "DRACR[%d]: Bad value for AP bits: 0x%"
1914                                  PRIx32 "\n", n, ap);
1915                }
1916            } else { /* Priv. mode AP bits decoding */
1917                switch (ap) {
1918                case 0:
1919                    break; /* no access */
1920                case 1:
1921                case 2:
1922                case 3:
1923                    result->f.prot |= PAGE_WRITE;
1924                    /* fall through */
1925                case 5:
1926                case 6:
1927                    result->f.prot |= PAGE_READ | PAGE_EXEC;
1928                    break;
1929                case 7:
1930                    /* for v7M, same as 6; for R profile a reserved value */
1931                    if (arm_feature(env, ARM_FEATURE_M)) {
1932                        result->f.prot |= PAGE_READ | PAGE_EXEC;
1933                        break;
1934                    }
1935                    /* fall through */
1936                default:
1937                    qemu_log_mask(LOG_GUEST_ERROR,
1938                                  "DRACR[%d]: Bad value for AP bits: 0x%"
1939                                  PRIx32 "\n", n, ap);
1940                }
1941            }
1942
1943            /* execute never */
1944            if (xn) {
1945                result->f.prot &= ~PAGE_EXEC;
1946            }
1947        }
1948    }
1949
1950    fi->type = ARMFault_Permission;
1951    fi->level = 1;
1952    return !(result->f.prot & (1 << access_type));
1953}
1954
1955bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1956                       MMUAccessType access_type, ARMMMUIdx mmu_idx,
1957                       bool secure, GetPhysAddrResult *result,
1958                       ARMMMUFaultInfo *fi, uint32_t *mregion)
1959{
1960    /*
1961     * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1962     * that a full phys-to-virt translation does).
1963     * mregion is (if not NULL) set to the region number which matched,
1964     * or -1 if no region number is returned (MPU off, address did not
1965     * hit a region, address hit in multiple regions).
1966     * If the region hit doesn't cover the entire TARGET_PAGE the address
1967     * is within, then we set the result page_size to 1 to force the
1968     * memory system to use a subpage.
1969     */
1970    ARMCPU *cpu = env_archcpu(env);
1971    bool is_user = regime_is_user(env, mmu_idx);
1972    int n;
1973    int matchregion = -1;
1974    bool hit = false;
1975    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1976    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1977
1978    result->f.lg_page_size = TARGET_PAGE_BITS;
1979    result->f.phys_addr = address;
1980    result->f.prot = 0;
1981    if (mregion) {
1982        *mregion = -1;
1983    }
1984
1985    /*
1986     * Unlike the ARM ARM pseudocode, we don't need to check whether this
1987     * was an exception vector read from the vector table (which is always
1988     * done using the default system address map), because those accesses
1989     * are done in arm_v7m_load_vector(), which always does a direct
1990     * read using address_space_ldl(), rather than going via this function.
1991     */
1992    if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
1993        hit = true;
1994    } else if (m_is_ppb_region(env, address)) {
1995        hit = true;
1996    } else {
1997        if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1998            hit = true;
1999        }
2000
2001        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
2002            /* region search */
2003            /*
2004             * Note that the base address is bits [31:5] from the register
2005             * with bits [4:0] all zeroes, but the limit address is bits
2006             * [31:5] from the register with bits [4:0] all ones.
2007             */
2008            uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
2009            uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
2010
2011            if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
2012                /* Region disabled */
2013                continue;
2014            }
2015
2016            if (address < base || address > limit) {
2017                /*
2018                 * Address not in this region. We must check whether the
2019                 * region covers addresses in the same page as our address.
2020                 * In that case we must not report a size that covers the
2021                 * whole page for a subsequent hit against a different MPU
2022                 * region or the background region, because it would result in
2023                 * incorrect TLB hits for subsequent accesses to addresses that
2024                 * are in this MPU region.
2025                 */
2026                if (limit >= base &&
2027                    ranges_overlap(base, limit - base + 1,
2028                                   addr_page_base,
2029                                   TARGET_PAGE_SIZE)) {
2030                    result->f.lg_page_size = 0;
2031                }
2032                continue;
2033            }
2034
2035            if (base > addr_page_base || limit < addr_page_limit) {
2036                result->f.lg_page_size = 0;
2037            }
2038
2039            if (matchregion != -1) {
2040                /*
2041                 * Multiple regions match -- always a failure (unlike
2042                 * PMSAv7 where highest-numbered-region wins)
2043                 */
2044                fi->type = ARMFault_Permission;
2045                fi->level = 1;
2046                return true;
2047            }
2048
2049            matchregion = n;
2050            hit = true;
2051        }
2052    }
2053
2054    if (!hit) {
2055        /* background fault */
2056        fi->type = ARMFault_Background;
2057        return true;
2058    }
2059
2060    if (matchregion == -1) {
2061        /* hit using the background region */
2062        get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2063    } else {
2064        uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
2065        uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
2066        bool pxn = false;
2067
2068        if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2069            pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
2070        }
2071
2072        if (m_is_system_region(env, address)) {
2073            /* System space is always execute never */
2074            xn = 1;
2075        }
2076
2077        result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2078        if (result->f.prot && !xn && !(pxn && !is_user)) {
2079            result->f.prot |= PAGE_EXEC;
2080        }
2081        /*
2082         * We don't need to look the attribute up in the MAIR0/MAIR1
2083         * registers because that only tells us about cacheability.
2084         */
2085        if (mregion) {
2086            *mregion = matchregion;
2087        }
2088    }
2089
2090    fi->type = ARMFault_Permission;
2091    fi->level = 1;
2092    return !(result->f.prot & (1 << access_type));
2093}
2094
2095static bool v8m_is_sau_exempt(CPUARMState *env,
2096                              uint32_t address, MMUAccessType access_type)
2097{
2098    /*
2099     * The architecture specifies that certain address ranges are
2100     * exempt from v8M SAU/IDAU checks.
2101     */
2102    return
2103        (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
2104        (address >= 0xe0000000 && address <= 0xe0002fff) ||
2105        (address >= 0xe000e000 && address <= 0xe000efff) ||
2106        (address >= 0xe002e000 && address <= 0xe002efff) ||
2107        (address >= 0xe0040000 && address <= 0xe0041fff) ||
2108        (address >= 0xe00ff000 && address <= 0xe00fffff);
2109}
2110
2111void v8m_security_lookup(CPUARMState *env, uint32_t address,
2112                         MMUAccessType access_type, ARMMMUIdx mmu_idx,
2113                         bool is_secure, V8M_SAttributes *sattrs)
2114{
2115    /*
2116     * Look up the security attributes for this address. Compare the
2117     * pseudocode SecurityCheck() function.
2118     * We assume the caller has zero-initialized *sattrs.
2119     */
2120    ARMCPU *cpu = env_archcpu(env);
2121    int r;
2122    bool idau_exempt = false, idau_ns = true, idau_nsc = true;
2123    int idau_region = IREGION_NOTVALID;
2124    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2125    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2126
2127    if (cpu->idau) {
2128        IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
2129        IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
2130
2131        iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
2132                   &idau_nsc);
2133    }
2134
2135    if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
2136        /* 0xf0000000..0xffffffff is always S for insn fetches */
2137        return;
2138    }
2139
2140    if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2141        sattrs->ns = !is_secure;
2142        return;
2143    }
2144
2145    if (idau_region != IREGION_NOTVALID) {
2146        sattrs->irvalid = true;
2147        sattrs->iregion = idau_region;
2148    }
2149
2150    switch (env->sau.ctrl & 3) {
2151    case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2152        break;
2153    case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2154        sattrs->ns = true;
2155        break;
2156    default: /* SAU.ENABLE == 1 */
2157        for (r = 0; r < cpu->sau_sregion; r++) {
2158            if (env->sau.rlar[r] & 1) {
2159                uint32_t base = env->sau.rbar[r] & ~0x1f;
2160                uint32_t limit = env->sau.rlar[r] | 0x1f;
2161
2162                if (base <= address && limit >= address) {
2163                    if (base > addr_page_base || limit < addr_page_limit) {
2164                        sattrs->subpage = true;
2165                    }
2166                    if (sattrs->srvalid) {
2167                        /*
2168                         * If we hit in more than one region then we must report
2169                         * as Secure, not NS-Callable, with no valid region
2170                         * number info.
2171                         */
2172                        sattrs->ns = false;
2173                        sattrs->nsc = false;
2174                        sattrs->sregion = 0;
2175                        sattrs->srvalid = false;
2176                        break;
2177                    } else {
2178                        if (env->sau.rlar[r] & 2) {
2179                            sattrs->nsc = true;
2180                        } else {
2181                            sattrs->ns = true;
2182                        }
2183                        sattrs->srvalid = true;
2184                        sattrs->sregion = r;
2185                    }
2186                } else {
2187                    /*
2188                     * Address not in this region. We must check whether the
2189                     * region covers addresses in the same page as our address.
2190                     * In that case we must not report a size that covers the
2191                     * whole page for a subsequent hit against a different MPU
2192                     * region or the background region, because it would result
2193                     * in incorrect TLB hits for subsequent accesses to
2194                     * addresses that are in this MPU region.
2195                     */
2196                    if (limit >= base &&
2197                        ranges_overlap(base, limit - base + 1,
2198                                       addr_page_base,
2199                                       TARGET_PAGE_SIZE)) {
2200                        sattrs->subpage = true;
2201                    }
2202                }
2203            }
2204        }
2205        break;
2206    }
2207
2208    /*
2209     * The IDAU will override the SAU lookup results if it specifies
2210     * higher security than the SAU does.
2211     */
2212    if (!idau_ns) {
2213        if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2214            sattrs->ns = false;
2215            sattrs->nsc = idau_nsc;
2216        }
2217    }
2218}
2219
2220static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
2221                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
2222                                 bool secure, GetPhysAddrResult *result,
2223                                 ARMMMUFaultInfo *fi)
2224{
2225    V8M_SAttributes sattrs = {};
2226    bool ret;
2227
2228    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2229        v8m_security_lookup(env, address, access_type, mmu_idx,
2230                            secure, &sattrs);
2231        if (access_type == MMU_INST_FETCH) {
2232            /*
2233             * Instruction fetches always use the MMU bank and the
2234             * transaction attribute determined by the fetch address,
2235             * regardless of CPU state. This is painful for QEMU
2236             * to handle, because it would mean we need to encode
2237             * into the mmu_idx not just the (user, negpri) information
2238             * for the current security state but also that for the
2239             * other security state, which would balloon the number
2240             * of mmu_idx values needed alarmingly.
2241             * Fortunately we can avoid this because it's not actually
2242             * possible to arbitrarily execute code from memory with
2243             * the wrong security attribute: it will always generate
2244             * an exception of some kind or another, apart from the
2245             * special case of an NS CPU executing an SG instruction
2246             * in S&NSC memory. So we always just fail the translation
2247             * here and sort things out in the exception handler
2248             * (including possibly emulating an SG instruction).
2249             */
2250            if (sattrs.ns != !secure) {
2251                if (sattrs.nsc) {
2252                    fi->type = ARMFault_QEMU_NSCExec;
2253                } else {
2254                    fi->type = ARMFault_QEMU_SFault;
2255                }
2256                result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2257                result->f.phys_addr = address;
2258                result->f.prot = 0;
2259                return true;
2260            }
2261        } else {
2262            /*
2263             * For data accesses we always use the MMU bank indicated
2264             * by the current CPU state, but the security attributes
2265             * might downgrade a secure access to nonsecure.
2266             */
2267            if (sattrs.ns) {
2268                result->f.attrs.secure = false;
2269            } else if (!secure) {
2270                /*
2271                 * NS access to S memory must fault.
2272                 * Architecturally we should first check whether the
2273                 * MPU information for this address indicates that we
2274                 * are doing an unaligned access to Device memory, which
2275                 * should generate a UsageFault instead. QEMU does not
2276                 * currently check for that kind of unaligned access though.
2277                 * If we added it we would need to do so as a special case
2278                 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2279                 */
2280                fi->type = ARMFault_QEMU_SFault;
2281                result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2282                result->f.phys_addr = address;
2283                result->f.prot = 0;
2284                return true;
2285            }
2286        }
2287    }
2288
2289    ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2290                            result, fi, NULL);
2291    if (sattrs.subpage) {
2292        result->f.lg_page_size = 0;
2293    }
2294    return ret;
2295}
2296
2297/*
2298 * Translate from the 4-bit stage 2 representation of
2299 * memory attributes (without cache-allocation hints) to
2300 * the 8-bit representation of the stage 1 MAIR registers
2301 * (which includes allocation hints).
2302 *
2303 * ref: shared/translation/attrs/S2AttrDecode()
2304 *      .../S2ConvertAttrsHints()
2305 */
2306static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2307{
2308    uint8_t hiattr = extract32(s2attrs, 2, 2);
2309    uint8_t loattr = extract32(s2attrs, 0, 2);
2310    uint8_t hihint = 0, lohint = 0;
2311
2312    if (hiattr != 0) { /* normal memory */
2313        if (hcr & HCR_CD) { /* cache disabled */
2314            hiattr = loattr = 1; /* non-cacheable */
2315        } else {
2316            if (hiattr != 1) { /* Write-through or write-back */
2317                hihint = 3; /* RW allocate */
2318            }
2319            if (loattr != 1) { /* Write-through or write-back */
2320                lohint = 3; /* RW allocate */
2321            }
2322        }
2323    }
2324
2325    return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2326}
2327
2328/*
2329 * Combine either inner or outer cacheability attributes for normal
2330 * memory, according to table D4-42 and pseudocode procedure
2331 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2332 *
2333 * NB: only stage 1 includes allocation hints (RW bits), leading to
2334 * some asymmetry.
2335 */
2336static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2337{
2338    if (s1 == 4 || s2 == 4) {
2339        /* non-cacheable has precedence */
2340        return 4;
2341    } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2342        /* stage 1 write-through takes precedence */
2343        return s1;
2344    } else if (extract32(s2, 2, 2) == 2) {
2345        /* stage 2 write-through takes precedence, but the allocation hint
2346         * is still taken from stage 1
2347         */
2348        return (2 << 2) | extract32(s1, 0, 2);
2349    } else { /* write-back */
2350        return s1;
2351    }
2352}
2353
2354/*
2355 * Combine the memory type and cacheability attributes of
2356 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2357 * combined attributes in MAIR_EL1 format.
2358 */
2359static uint8_t combined_attrs_nofwb(uint64_t hcr,
2360                                    ARMCacheAttrs s1, ARMCacheAttrs s2)
2361{
2362    uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2363
2364    s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2365
2366    s1lo = extract32(s1.attrs, 0, 4);
2367    s2lo = extract32(s2_mair_attrs, 0, 4);
2368    s1hi = extract32(s1.attrs, 4, 4);
2369    s2hi = extract32(s2_mair_attrs, 4, 4);
2370
2371    /* Combine memory type and cacheability attributes */
2372    if (s1hi == 0 || s2hi == 0) {
2373        /* Device has precedence over normal */
2374        if (s1lo == 0 || s2lo == 0) {
2375            /* nGnRnE has precedence over anything */
2376            ret_attrs = 0;
2377        } else if (s1lo == 4 || s2lo == 4) {
2378            /* non-Reordering has precedence over Reordering */
2379            ret_attrs = 4;  /* nGnRE */
2380        } else if (s1lo == 8 || s2lo == 8) {
2381            /* non-Gathering has precedence over Gathering */
2382            ret_attrs = 8;  /* nGRE */
2383        } else {
2384            ret_attrs = 0xc; /* GRE */
2385        }
2386    } else { /* Normal memory */
2387        /* Outer/inner cacheability combine independently */
2388        ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2389                  | combine_cacheattr_nibble(s1lo, s2lo);
2390    }
2391    return ret_attrs;
2392}
2393
2394static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2395{
2396    /*
2397     * Given the 4 bits specifying the outer or inner cacheability
2398     * in MAIR format, return a value specifying Normal Write-Back,
2399     * with the allocation and transient hints taken from the input
2400     * if the input specified some kind of cacheable attribute.
2401     */
2402    if (attr == 0 || attr == 4) {
2403        /*
2404         * 0 == an UNPREDICTABLE encoding
2405         * 4 == Non-cacheable
2406         * Either way, force Write-Back RW allocate non-transient
2407         */
2408        return 0xf;
2409    }
2410    /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2411    return attr | 4;
2412}
2413
2414/*
2415 * Combine the memory type and cacheability attributes of
2416 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2417 * combined attributes in MAIR_EL1 format.
2418 */
2419static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2420{
2421    switch (s2.attrs) {
2422    case 7:
2423        /* Use stage 1 attributes */
2424        return s1.attrs;
2425    case 6:
2426        /*
2427         * Force Normal Write-Back. Note that if S1 is Normal cacheable
2428         * then we take the allocation hints from it; otherwise it is
2429         * RW allocate, non-transient.
2430         */
2431        if ((s1.attrs & 0xf0) == 0) {
2432            /* S1 is Device */
2433            return 0xff;
2434        }
2435        /* Need to check the Inner and Outer nibbles separately */
2436        return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2437            force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2438    case 5:
2439        /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2440        if ((s1.attrs & 0xf0) == 0) {
2441            return s1.attrs;
2442        }
2443        return 0x44;
2444    case 0 ... 3:
2445        /* Force Device, of subtype specified by S2 */
2446        return s2.attrs << 2;
2447    default:
2448        /*
2449         * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2450         * arbitrarily force Device.
2451         */
2452        return 0;
2453    }
2454}
2455
2456/*
2457 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2458 * and CombineS1S2Desc()
2459 *
2460 * @env:     CPUARMState
2461 * @s1:      Attributes from stage 1 walk
2462 * @s2:      Attributes from stage 2 walk
2463 */
2464static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2465                                        ARMCacheAttrs s1, ARMCacheAttrs s2)
2466{
2467    ARMCacheAttrs ret;
2468    bool tagged = false;
2469
2470    assert(s2.is_s2_format && !s1.is_s2_format);
2471    ret.is_s2_format = false;
2472
2473    if (s1.attrs == 0xf0) {
2474        tagged = true;
2475        s1.attrs = 0xff;
2476    }
2477
2478    /* Combine shareability attributes (table D4-43) */
2479    if (s1.shareability == 2 || s2.shareability == 2) {
2480        /* if either are outer-shareable, the result is outer-shareable */
2481        ret.shareability = 2;
2482    } else if (s1.shareability == 3 || s2.shareability == 3) {
2483        /* if either are inner-shareable, the result is inner-shareable */
2484        ret.shareability = 3;
2485    } else {
2486        /* both non-shareable */
2487        ret.shareability = 0;
2488    }
2489
2490    /* Combine memory type and cacheability attributes */
2491    if (hcr & HCR_FWB) {
2492        ret.attrs = combined_attrs_fwb(s1, s2);
2493    } else {
2494        ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2495    }
2496
2497    /*
2498     * Any location for which the resultant memory type is any
2499     * type of Device memory is always treated as Outer Shareable.
2500     * Any location for which the resultant memory type is Normal
2501     * Inner Non-cacheable, Outer Non-cacheable is always treated
2502     * as Outer Shareable.
2503     * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2504     */
2505    if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2506        ret.shareability = 2;
2507    }
2508
2509    /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2510    if (tagged && ret.attrs == 0xff) {
2511        ret.attrs = 0xf0;
2512    }
2513
2514    return ret;
2515}
2516
2517/*
2518 * MMU disabled.  S1 addresses within aa64 translation regimes are
2519 * still checked for bounds -- see AArch64.S1DisabledOutput().
2520 */
2521static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2522                                   MMUAccessType access_type,
2523                                   ARMMMUIdx mmu_idx, bool is_secure,
2524                                   GetPhysAddrResult *result,
2525                                   ARMMMUFaultInfo *fi)
2526{
2527    uint8_t memattr = 0x00;    /* Device nGnRnE */
2528    uint8_t shareability = 0;  /* non-sharable */
2529    int r_el;
2530
2531    switch (mmu_idx) {
2532    case ARMMMUIdx_Stage2:
2533    case ARMMMUIdx_Stage2_S:
2534    case ARMMMUIdx_Phys_NS:
2535    case ARMMMUIdx_Phys_S:
2536        break;
2537
2538    default:
2539        r_el = regime_el(env, mmu_idx);
2540        if (arm_el_is_aa64(env, r_el)) {
2541            int pamax = arm_pamax(env_archcpu(env));
2542            uint64_t tcr = env->cp15.tcr_el[r_el];
2543            int addrtop, tbi;
2544
2545            tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2546            if (access_type == MMU_INST_FETCH) {
2547                tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2548            }
2549            tbi = (tbi >> extract64(address, 55, 1)) & 1;
2550            addrtop = (tbi ? 55 : 63);
2551
2552            if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2553                fi->type = ARMFault_AddressSize;
2554                fi->level = 0;
2555                fi->stage2 = false;
2556                return 1;
2557            }
2558
2559            /*
2560             * When TBI is disabled, we've just validated that all of the
2561             * bits above PAMax are zero, so logically we only need to
2562             * clear the top byte for TBI.  But it's clearer to follow
2563             * the pseudocode set of addrdesc.paddress.
2564             */
2565            address = extract64(address, 0, 52);
2566        }
2567
2568        /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2569        if (r_el == 1) {
2570            uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2571            if (hcr & HCR_DC) {
2572                if (hcr & HCR_DCT) {
2573                    memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2574                } else {
2575                    memattr = 0xff;  /* Normal, WB, RWA */
2576                }
2577            }
2578        }
2579        if (memattr == 0 && access_type == MMU_INST_FETCH) {
2580            if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2581                memattr = 0xee;  /* Normal, WT, RA, NT */
2582            } else {
2583                memattr = 0x44;  /* Normal, NC, No */
2584            }
2585            shareability = 2; /* outer sharable */
2586        }
2587        result->cacheattrs.is_s2_format = false;
2588        break;
2589    }
2590
2591    result->f.phys_addr = address;
2592    result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2593    result->f.lg_page_size = TARGET_PAGE_BITS;
2594    result->cacheattrs.shareability = shareability;
2595    result->cacheattrs.attrs = memattr;
2596    return false;
2597}
2598
2599static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
2600                                   target_ulong address,
2601                                   MMUAccessType access_type,
2602                                   GetPhysAddrResult *result,
2603                                   ARMMMUFaultInfo *fi)
2604{
2605    hwaddr ipa;
2606    int s1_prot, s1_lgpgsz;
2607    bool is_secure = ptw->in_secure;
2608    bool ret, ipa_secure, s2walk_secure;
2609    ARMCacheAttrs cacheattrs1;
2610    bool is_el0;
2611    uint64_t hcr;
2612
2613    ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
2614
2615    /* If S1 fails, return early.  */
2616    if (ret) {
2617        return ret;
2618    }
2619
2620    ipa = result->f.phys_addr;
2621    ipa_secure = result->f.attrs.secure;
2622    if (is_secure) {
2623        /* Select TCR based on the NS bit from the S1 walk. */
2624        s2walk_secure = !(ipa_secure
2625                          ? env->cp15.vstcr_el2 & VSTCR_SW
2626                          : env->cp15.vtcr_el2 & VTCR_NSW);
2627    } else {
2628        assert(!ipa_secure);
2629        s2walk_secure = false;
2630    }
2631
2632    is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
2633    ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2634    ptw->in_ptw_idx = s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
2635    ptw->in_secure = s2walk_secure;
2636
2637    /*
2638     * S1 is done, now do S2 translation.
2639     * Save the stage1 results so that we may merge prot and cacheattrs later.
2640     */
2641    s1_prot = result->f.prot;
2642    s1_lgpgsz = result->f.lg_page_size;
2643    cacheattrs1 = result->cacheattrs;
2644    memset(result, 0, sizeof(*result));
2645
2646    ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
2647    fi->s2addr = ipa;
2648
2649    /* Combine the S1 and S2 perms.  */
2650    result->f.prot &= s1_prot;
2651
2652    /* If S2 fails, return early.  */
2653    if (ret) {
2654        return ret;
2655    }
2656
2657    /*
2658     * Use the maximum of the S1 & S2 page size, so that invalidation
2659     * of pages > TARGET_PAGE_SIZE works correctly.
2660     */
2661    if (result->f.lg_page_size < s1_lgpgsz) {
2662        result->f.lg_page_size = s1_lgpgsz;
2663    }
2664
2665    /* Combine the S1 and S2 cache attributes. */
2666    hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2667    if (hcr & HCR_DC) {
2668        /*
2669         * HCR.DC forces the first stage attributes to
2670         *  Normal Non-Shareable,
2671         *  Inner Write-Back Read-Allocate Write-Allocate,
2672         *  Outer Write-Back Read-Allocate Write-Allocate.
2673         * Do not overwrite Tagged within attrs.
2674         */
2675        if (cacheattrs1.attrs != 0xf0) {
2676            cacheattrs1.attrs = 0xff;
2677        }
2678        cacheattrs1.shareability = 0;
2679    }
2680    result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2681                                            result->cacheattrs);
2682
2683    /*
2684     * Check if IPA translates to secure or non-secure PA space.
2685     * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2686     */
2687    result->f.attrs.secure =
2688        (is_secure
2689         && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
2690         && (ipa_secure
2691             || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
2692
2693    return false;
2694}
2695
2696static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
2697                                      target_ulong address,
2698                                      MMUAccessType access_type,
2699                                      GetPhysAddrResult *result,
2700                                      ARMMMUFaultInfo *fi)
2701{
2702    ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2703    bool is_secure = ptw->in_secure;
2704    ARMMMUIdx s1_mmu_idx;
2705
2706    /*
2707     * The page table entries may downgrade secure to non-secure, but
2708     * cannot upgrade an non-secure translation regime's attributes
2709     * to secure.
2710     */
2711    result->f.attrs.secure = is_secure;
2712
2713    switch (mmu_idx) {
2714    case ARMMMUIdx_Phys_S:
2715    case ARMMMUIdx_Phys_NS:
2716        /* Checking Phys early avoids special casing later vs regime_el. */
2717        return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2718                                      is_secure, result, fi);
2719
2720    case ARMMMUIdx_Stage1_E0:
2721    case ARMMMUIdx_Stage1_E1:
2722    case ARMMMUIdx_Stage1_E1_PAN:
2723        /* First stage lookup uses second stage for ptw. */
2724        ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2725        break;
2726
2727    case ARMMMUIdx_E10_0:
2728        s1_mmu_idx = ARMMMUIdx_Stage1_E0;
2729        goto do_twostage;
2730    case ARMMMUIdx_E10_1:
2731        s1_mmu_idx = ARMMMUIdx_Stage1_E1;
2732        goto do_twostage;
2733    case ARMMMUIdx_E10_1_PAN:
2734        s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
2735    do_twostage:
2736        /*
2737         * Call ourselves recursively to do the stage 1 and then stage 2
2738         * translations if mmu_idx is a two-stage regime, and EL2 present.
2739         * Otherwise, a stage1+stage2 translation is just stage 1.
2740         */
2741        ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
2742        if (arm_feature(env, ARM_FEATURE_EL2) &&
2743            !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
2744            return get_phys_addr_twostage(env, ptw, address, access_type,
2745                                          result, fi);
2746        }
2747        /* fall through */
2748
2749    default:
2750        /* Single stage and second stage uses physical for ptw. */
2751        ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
2752        break;
2753    }
2754
2755    result->f.attrs.user = regime_is_user(env, mmu_idx);
2756
2757    /*
2758     * Fast Context Switch Extension. This doesn't exist at all in v8.
2759     * In v7 and earlier it affects all stage 1 translations.
2760     */
2761    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2762        && !arm_feature(env, ARM_FEATURE_V8)) {
2763        if (regime_el(env, mmu_idx) == 3) {
2764            address += env->cp15.fcseidr_s;
2765        } else {
2766            address += env->cp15.fcseidr_ns;
2767        }
2768    }
2769
2770    if (arm_feature(env, ARM_FEATURE_PMSA)) {
2771        bool ret;
2772        result->f.lg_page_size = TARGET_PAGE_BITS;
2773
2774        if (arm_feature(env, ARM_FEATURE_V8)) {
2775            /* PMSAv8 */
2776            ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2777                                       is_secure, result, fi);
2778        } else if (arm_feature(env, ARM_FEATURE_V7)) {
2779            /* PMSAv7 */
2780            ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2781                                       is_secure, result, fi);
2782        } else {
2783            /* Pre-v7 MPU */
2784            ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2785                                       is_secure, result, fi);
2786        }
2787        qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2788                      " mmu_idx %u -> %s (prot %c%c%c)\n",
2789                      access_type == MMU_DATA_LOAD ? "reading" :
2790                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2791                      (uint32_t)address, mmu_idx,
2792                      ret ? "Miss" : "Hit",
2793                      result->f.prot & PAGE_READ ? 'r' : '-',
2794                      result->f.prot & PAGE_WRITE ? 'w' : '-',
2795                      result->f.prot & PAGE_EXEC ? 'x' : '-');
2796
2797        return ret;
2798    }
2799
2800    /* Definitely a real MMU, not an MPU */
2801
2802    if (regime_translation_disabled(env, mmu_idx, is_secure)) {
2803        return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2804                                      is_secure, result, fi);
2805    }
2806
2807    if (regime_using_lpae_format(env, mmu_idx)) {
2808        return get_phys_addr_lpae(env, ptw, address, access_type, false,
2809                                  result, fi);
2810    } else if (arm_feature(env, ARM_FEATURE_V7) ||
2811               regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2812        return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
2813    } else {
2814        return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
2815    }
2816}
2817
2818bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
2819                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
2820                               bool is_secure, GetPhysAddrResult *result,
2821                               ARMMMUFaultInfo *fi)
2822{
2823    S1Translate ptw = {
2824        .in_mmu_idx = mmu_idx,
2825        .in_secure = is_secure,
2826    };
2827    return get_phys_addr_with_struct(env, &ptw, address, access_type,
2828                                     result, fi);
2829}
2830
2831bool get_phys_addr(CPUARMState *env, target_ulong address,
2832                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
2833                   GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2834{
2835    bool is_secure;
2836
2837    switch (mmu_idx) {
2838    case ARMMMUIdx_E10_0:
2839    case ARMMMUIdx_E10_1:
2840    case ARMMMUIdx_E10_1_PAN:
2841    case ARMMMUIdx_E20_0:
2842    case ARMMMUIdx_E20_2:
2843    case ARMMMUIdx_E20_2_PAN:
2844    case ARMMMUIdx_Stage1_E0:
2845    case ARMMMUIdx_Stage1_E1:
2846    case ARMMMUIdx_Stage1_E1_PAN:
2847    case ARMMMUIdx_E2:
2848        is_secure = arm_is_secure_below_el3(env);
2849        break;
2850    case ARMMMUIdx_Stage2:
2851    case ARMMMUIdx_Phys_NS:
2852    case ARMMMUIdx_MPrivNegPri:
2853    case ARMMMUIdx_MUserNegPri:
2854    case ARMMMUIdx_MPriv:
2855    case ARMMMUIdx_MUser:
2856        is_secure = false;
2857        break;
2858    case ARMMMUIdx_E3:
2859    case ARMMMUIdx_Stage2_S:
2860    case ARMMMUIdx_Phys_S:
2861    case ARMMMUIdx_MSPrivNegPri:
2862    case ARMMMUIdx_MSUserNegPri:
2863    case ARMMMUIdx_MSPriv:
2864    case ARMMMUIdx_MSUser:
2865        is_secure = true;
2866        break;
2867    default:
2868        g_assert_not_reached();
2869    }
2870    return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
2871                                     is_secure, result, fi);
2872}
2873
2874hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2875                                         MemTxAttrs *attrs)
2876{
2877    ARMCPU *cpu = ARM_CPU(cs);
2878    CPUARMState *env = &cpu->env;
2879    S1Translate ptw = {
2880        .in_mmu_idx = arm_mmu_idx(env),
2881        .in_secure = arm_is_secure(env),
2882        .in_debug = true,
2883    };
2884    GetPhysAddrResult res = {};
2885    ARMMMUFaultInfo fi = {};
2886    bool ret;
2887
2888    ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
2889    *attrs = res.f.attrs;
2890
2891    if (ret) {
2892        return -1;
2893    }
2894    return res.f.phys_addr;
2895}
2896