qemu/target/arm/ptw.c
<<
>>
Prefs
   1/*
   2 * ARM page table walking.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/log.h"
  11#include "qemu/range.h"
  12#include "cpu.h"
  13#include "internals.h"
  14#include "idau.h"
  15
  16
  17static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
  18                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  19                               bool s1_is_el0, hwaddr *phys_ptr,
  20                               MemTxAttrs *txattrs, int *prot,
  21                               target_ulong *page_size_ptr,
  22                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
  23    __attribute__((nonnull));
  24
  25/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
  26static const uint8_t pamax_map[] = {
  27    [0] = 32,
  28    [1] = 36,
  29    [2] = 40,
  30    [3] = 42,
  31    [4] = 44,
  32    [5] = 48,
  33    [6] = 52,
  34};
  35
  36/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
  37unsigned int arm_pamax(ARMCPU *cpu)
  38{
  39    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
  40        unsigned int parange =
  41            FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
  42
  43        /*
  44         * id_aa64mmfr0 is a read-only register so values outside of the
  45         * supported mappings can be considered an implementation error.
  46         */
  47        assert(parange < ARRAY_SIZE(pamax_map));
  48        return pamax_map[parange];
  49    }
  50
  51    /*
  52     * In machvirt_init, we call arm_pamax on a cpu that is not fully
  53     * initialized, so we can't rely on the propagation done in realize.
  54     */
  55    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
  56        arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
  57        /* v7 with LPAE */
  58        return 40;
  59    }
  60    /* Anything else */
  61    return 32;
  62}
  63
  64/*
  65 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
  66 */
  67ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
  68{
  69    switch (mmu_idx) {
  70    case ARMMMUIdx_SE10_0:
  71        return ARMMMUIdx_Stage1_SE0;
  72    case ARMMMUIdx_SE10_1:
  73        return ARMMMUIdx_Stage1_SE1;
  74    case ARMMMUIdx_SE10_1_PAN:
  75        return ARMMMUIdx_Stage1_SE1_PAN;
  76    case ARMMMUIdx_E10_0:
  77        return ARMMMUIdx_Stage1_E0;
  78    case ARMMMUIdx_E10_1:
  79        return ARMMMUIdx_Stage1_E1;
  80    case ARMMMUIdx_E10_1_PAN:
  81        return ARMMMUIdx_Stage1_E1_PAN;
  82    default:
  83        return mmu_idx;
  84    }
  85}
  86
  87ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
  88{
  89    return stage_1_mmu_idx(arm_mmu_idx(env));
  90}
  91
  92static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
  93{
  94    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
  95}
  96
  97static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
  98{
  99    switch (mmu_idx) {
 100    case ARMMMUIdx_SE10_0:
 101    case ARMMMUIdx_E20_0:
 102    case ARMMMUIdx_SE20_0:
 103    case ARMMMUIdx_Stage1_E0:
 104    case ARMMMUIdx_Stage1_SE0:
 105    case ARMMMUIdx_MUser:
 106    case ARMMMUIdx_MSUser:
 107    case ARMMMUIdx_MUserNegPri:
 108    case ARMMMUIdx_MSUserNegPri:
 109        return true;
 110    default:
 111        return false;
 112    case ARMMMUIdx_E10_0:
 113    case ARMMMUIdx_E10_1:
 114    case ARMMMUIdx_E10_1_PAN:
 115        g_assert_not_reached();
 116    }
 117}
 118
 119/* Return the TTBR associated with this translation regime */
 120static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
 121{
 122    if (mmu_idx == ARMMMUIdx_Stage2) {
 123        return env->cp15.vttbr_el2;
 124    }
 125    if (mmu_idx == ARMMMUIdx_Stage2_S) {
 126        return env->cp15.vsttbr_el2;
 127    }
 128    if (ttbrn == 0) {
 129        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
 130    } else {
 131        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
 132    }
 133}
 134
 135/* Return true if the specified stage of address translation is disabled */
 136static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
 137{
 138    uint64_t hcr_el2;
 139
 140    if (arm_feature(env, ARM_FEATURE_M)) {
 141        switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
 142                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
 143        case R_V7M_MPU_CTRL_ENABLE_MASK:
 144            /* Enabled, but not for HardFault and NMI */
 145            return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
 146        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
 147            /* Enabled for all cases */
 148            return false;
 149        case 0:
 150        default:
 151            /*
 152             * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
 153             * we warned about that in armv7m_nvic.c when the guest set it.
 154             */
 155            return true;
 156        }
 157    }
 158
 159    hcr_el2 = arm_hcr_el2_eff(env);
 160
 161    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
 162        /* HCR.DC means HCR.VM behaves as 1 */
 163        return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
 164    }
 165
 166    if (hcr_el2 & HCR_TGE) {
 167        /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
 168        if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
 169            return true;
 170        }
 171    }
 172
 173    if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
 174        /* HCR.DC means SCTLR_EL1.M behaves as 0 */
 175        return true;
 176    }
 177
 178    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
 179}
 180
 181static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
 182{
 183    /*
 184     * For an S1 page table walk, the stage 1 attributes are always
 185     * some form of "this is Normal memory". The combined S1+S2
 186     * attributes are therefore only Device if stage 2 specifies Device.
 187     * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
 188     * ie when cacheattrs.attrs bits [3:2] are 0b00.
 189     * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
 190     * when cacheattrs.attrs bit [2] is 0.
 191     */
 192    assert(cacheattrs.is_s2_format);
 193    if (arm_hcr_el2_eff(env) & HCR_FWB) {
 194        return (cacheattrs.attrs & 0x4) == 0;
 195    } else {
 196        return (cacheattrs.attrs & 0xc) == 0;
 197    }
 198}
 199
 200/* Translate a S1 pagetable walk through S2 if needed.  */
 201static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
 202                               hwaddr addr, bool *is_secure,
 203                               ARMMMUFaultInfo *fi)
 204{
 205    if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
 206        !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
 207        target_ulong s2size;
 208        hwaddr s2pa;
 209        int s2prot;
 210        int ret;
 211        ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
 212                                          : ARMMMUIdx_Stage2;
 213        ARMCacheAttrs cacheattrs = {};
 214        MemTxAttrs txattrs = {};
 215
 216        ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
 217                                 &s2pa, &txattrs, &s2prot, &s2size, fi,
 218                                 &cacheattrs);
 219        if (ret) {
 220            assert(fi->type != ARMFault_None);
 221            fi->s2addr = addr;
 222            fi->stage2 = true;
 223            fi->s1ptw = true;
 224            fi->s1ns = !*is_secure;
 225            return ~0;
 226        }
 227        if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
 228            ptw_attrs_are_device(env, cacheattrs)) {
 229            /*
 230             * PTW set and S1 walk touched S2 Device memory:
 231             * generate Permission fault.
 232             */
 233            fi->type = ARMFault_Permission;
 234            fi->s2addr = addr;
 235            fi->stage2 = true;
 236            fi->s1ptw = true;
 237            fi->s1ns = !*is_secure;
 238            return ~0;
 239        }
 240
 241        if (arm_is_secure_below_el3(env)) {
 242            /* Check if page table walk is to secure or non-secure PA space. */
 243            if (*is_secure) {
 244                *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
 245            } else {
 246                *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
 247            }
 248        } else {
 249            assert(!*is_secure);
 250        }
 251
 252        addr = s2pa;
 253    }
 254    return addr;
 255}
 256
 257/* All loads done in the course of a page table walk go through here. */
 258static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
 259                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
 260{
 261    CPUState *cs = env_cpu(env);
 262    MemTxAttrs attrs = {};
 263    MemTxResult result = MEMTX_OK;
 264    AddressSpace *as;
 265    uint32_t data;
 266
 267    addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
 268    attrs.secure = is_secure;
 269    as = arm_addressspace(cs, attrs);
 270    if (fi->s1ptw) {
 271        return 0;
 272    }
 273    if (regime_translation_big_endian(env, mmu_idx)) {
 274        data = address_space_ldl_be(as, addr, attrs, &result);
 275    } else {
 276        data = address_space_ldl_le(as, addr, attrs, &result);
 277    }
 278    if (result == MEMTX_OK) {
 279        return data;
 280    }
 281    fi->type = ARMFault_SyncExternalOnWalk;
 282    fi->ea = arm_extabort_type(result);
 283    return 0;
 284}
 285
 286static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
 287                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
 288{
 289    CPUState *cs = env_cpu(env);
 290    MemTxAttrs attrs = {};
 291    MemTxResult result = MEMTX_OK;
 292    AddressSpace *as;
 293    uint64_t data;
 294
 295    addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
 296    attrs.secure = is_secure;
 297    as = arm_addressspace(cs, attrs);
 298    if (fi->s1ptw) {
 299        return 0;
 300    }
 301    if (regime_translation_big_endian(env, mmu_idx)) {
 302        data = address_space_ldq_be(as, addr, attrs, &result);
 303    } else {
 304        data = address_space_ldq_le(as, addr, attrs, &result);
 305    }
 306    if (result == MEMTX_OK) {
 307        return data;
 308    }
 309    fi->type = ARMFault_SyncExternalOnWalk;
 310    fi->ea = arm_extabort_type(result);
 311    return 0;
 312}
 313
 314static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
 315                                     uint32_t *table, uint32_t address)
 316{
 317    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
 318    uint64_t tcr = regime_tcr(env, mmu_idx);
 319    int maskshift = extract32(tcr, 0, 3);
 320    uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
 321    uint32_t base_mask;
 322
 323    if (address & mask) {
 324        if (tcr & TTBCR_PD1) {
 325            /* Translation table walk disabled for TTBR1 */
 326            return false;
 327        }
 328        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
 329    } else {
 330        if (tcr & TTBCR_PD0) {
 331            /* Translation table walk disabled for TTBR0 */
 332            return false;
 333        }
 334        base_mask = ~((uint32_t)0x3fffu >> maskshift);
 335        *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
 336    }
 337    *table |= (address >> 18) & 0x3ffc;
 338    return true;
 339}
 340
 341/*
 342 * Translate section/page access permissions to page R/W protection flags
 343 * @env:         CPUARMState
 344 * @mmu_idx:     MMU index indicating required translation regime
 345 * @ap:          The 3-bit access permissions (AP[2:0])
 346 * @domain_prot: The 2-bit domain access permissions
 347 */
 348static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
 349                         int ap, int domain_prot)
 350{
 351    bool is_user = regime_is_user(env, mmu_idx);
 352
 353    if (domain_prot == 3) {
 354        return PAGE_READ | PAGE_WRITE;
 355    }
 356
 357    switch (ap) {
 358    case 0:
 359        if (arm_feature(env, ARM_FEATURE_V7)) {
 360            return 0;
 361        }
 362        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
 363        case SCTLR_S:
 364            return is_user ? 0 : PAGE_READ;
 365        case SCTLR_R:
 366            return PAGE_READ;
 367        default:
 368            return 0;
 369        }
 370    case 1:
 371        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
 372    case 2:
 373        if (is_user) {
 374            return PAGE_READ;
 375        } else {
 376            return PAGE_READ | PAGE_WRITE;
 377        }
 378    case 3:
 379        return PAGE_READ | PAGE_WRITE;
 380    case 4: /* Reserved.  */
 381        return 0;
 382    case 5:
 383        return is_user ? 0 : PAGE_READ;
 384    case 6:
 385        return PAGE_READ;
 386    case 7:
 387        if (!arm_feature(env, ARM_FEATURE_V6K)) {
 388            return 0;
 389        }
 390        return PAGE_READ;
 391    default:
 392        g_assert_not_reached();
 393    }
 394}
 395
 396/*
 397 * Translate section/page access permissions to page R/W protection flags.
 398 * @ap:      The 2-bit simple AP (AP[2:1])
 399 * @is_user: TRUE if accessing from PL0
 400 */
 401static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
 402{
 403    switch (ap) {
 404    case 0:
 405        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
 406    case 1:
 407        return PAGE_READ | PAGE_WRITE;
 408    case 2:
 409        return is_user ? 0 : PAGE_READ;
 410    case 3:
 411        return PAGE_READ;
 412    default:
 413        g_assert_not_reached();
 414    }
 415}
 416
 417static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
 418{
 419    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
 420}
 421
 422static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
 423                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
 424                             hwaddr *phys_ptr, int *prot,
 425                             target_ulong *page_size,
 426                             ARMMMUFaultInfo *fi)
 427{
 428    int level = 1;
 429    uint32_t table;
 430    uint32_t desc;
 431    int type;
 432    int ap;
 433    int domain = 0;
 434    int domain_prot;
 435    hwaddr phys_addr;
 436    uint32_t dacr;
 437
 438    /* Pagetable walk.  */
 439    /* Lookup l1 descriptor.  */
 440    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
 441        /* Section translation fault if page walk is disabled by PD0 or PD1 */
 442        fi->type = ARMFault_Translation;
 443        goto do_fault;
 444    }
 445    desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
 446                       mmu_idx, fi);
 447    if (fi->type != ARMFault_None) {
 448        goto do_fault;
 449    }
 450    type = (desc & 3);
 451    domain = (desc >> 5) & 0x0f;
 452    if (regime_el(env, mmu_idx) == 1) {
 453        dacr = env->cp15.dacr_ns;
 454    } else {
 455        dacr = env->cp15.dacr_s;
 456    }
 457    domain_prot = (dacr >> (domain * 2)) & 3;
 458    if (type == 0) {
 459        /* Section translation fault.  */
 460        fi->type = ARMFault_Translation;
 461        goto do_fault;
 462    }
 463    if (type != 2) {
 464        level = 2;
 465    }
 466    if (domain_prot == 0 || domain_prot == 2) {
 467        fi->type = ARMFault_Domain;
 468        goto do_fault;
 469    }
 470    if (type == 2) {
 471        /* 1Mb section.  */
 472        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
 473        ap = (desc >> 10) & 3;
 474        *page_size = 1024 * 1024;
 475    } else {
 476        /* Lookup l2 entry.  */
 477        if (type == 1) {
 478            /* Coarse pagetable.  */
 479            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
 480        } else {
 481            /* Fine pagetable.  */
 482            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
 483        }
 484        desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
 485                           mmu_idx, fi);
 486        if (fi->type != ARMFault_None) {
 487            goto do_fault;
 488        }
 489        switch (desc & 3) {
 490        case 0: /* Page translation fault.  */
 491            fi->type = ARMFault_Translation;
 492            goto do_fault;
 493        case 1: /* 64k page.  */
 494            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
 495            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
 496            *page_size = 0x10000;
 497            break;
 498        case 2: /* 4k page.  */
 499            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 500            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
 501            *page_size = 0x1000;
 502            break;
 503        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
 504            if (type == 1) {
 505                /* ARMv6/XScale extended small page format */
 506                if (arm_feature(env, ARM_FEATURE_XSCALE)
 507                    || arm_feature(env, ARM_FEATURE_V6)) {
 508                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 509                    *page_size = 0x1000;
 510                } else {
 511                    /*
 512                     * UNPREDICTABLE in ARMv5; we choose to take a
 513                     * page translation fault.
 514                     */
 515                    fi->type = ARMFault_Translation;
 516                    goto do_fault;
 517                }
 518            } else {
 519                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
 520                *page_size = 0x400;
 521            }
 522            ap = (desc >> 4) & 3;
 523            break;
 524        default:
 525            /* Never happens, but compiler isn't smart enough to tell.  */
 526            g_assert_not_reached();
 527        }
 528    }
 529    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
 530    *prot |= *prot ? PAGE_EXEC : 0;
 531    if (!(*prot & (1 << access_type))) {
 532        /* Access permission fault.  */
 533        fi->type = ARMFault_Permission;
 534        goto do_fault;
 535    }
 536    *phys_ptr = phys_addr;
 537    return false;
 538do_fault:
 539    fi->domain = domain;
 540    fi->level = level;
 541    return true;
 542}
 543
 544static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
 545                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
 546                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
 547                             target_ulong *page_size, ARMMMUFaultInfo *fi)
 548{
 549    ARMCPU *cpu = env_archcpu(env);
 550    int level = 1;
 551    uint32_t table;
 552    uint32_t desc;
 553    uint32_t xn;
 554    uint32_t pxn = 0;
 555    int type;
 556    int ap;
 557    int domain = 0;
 558    int domain_prot;
 559    hwaddr phys_addr;
 560    uint32_t dacr;
 561    bool ns;
 562
 563    /* Pagetable walk.  */
 564    /* Lookup l1 descriptor.  */
 565    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
 566        /* Section translation fault if page walk is disabled by PD0 or PD1 */
 567        fi->type = ARMFault_Translation;
 568        goto do_fault;
 569    }
 570    desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
 571                       mmu_idx, fi);
 572    if (fi->type != ARMFault_None) {
 573        goto do_fault;
 574    }
 575    type = (desc & 3);
 576    if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
 577        /* Section translation fault, or attempt to use the encoding
 578         * which is Reserved on implementations without PXN.
 579         */
 580        fi->type = ARMFault_Translation;
 581        goto do_fault;
 582    }
 583    if ((type == 1) || !(desc & (1 << 18))) {
 584        /* Page or Section.  */
 585        domain = (desc >> 5) & 0x0f;
 586    }
 587    if (regime_el(env, mmu_idx) == 1) {
 588        dacr = env->cp15.dacr_ns;
 589    } else {
 590        dacr = env->cp15.dacr_s;
 591    }
 592    if (type == 1) {
 593        level = 2;
 594    }
 595    domain_prot = (dacr >> (domain * 2)) & 3;
 596    if (domain_prot == 0 || domain_prot == 2) {
 597        /* Section or Page domain fault */
 598        fi->type = ARMFault_Domain;
 599        goto do_fault;
 600    }
 601    if (type != 1) {
 602        if (desc & (1 << 18)) {
 603            /* Supersection.  */
 604            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
 605            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
 606            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
 607            *page_size = 0x1000000;
 608        } else {
 609            /* Section.  */
 610            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
 611            *page_size = 0x100000;
 612        }
 613        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
 614        xn = desc & (1 << 4);
 615        pxn = desc & 1;
 616        ns = extract32(desc, 19, 1);
 617    } else {
 618        if (cpu_isar_feature(aa32_pxn, cpu)) {
 619            pxn = (desc >> 2) & 1;
 620        }
 621        ns = extract32(desc, 3, 1);
 622        /* Lookup l2 entry.  */
 623        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
 624        desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
 625                           mmu_idx, fi);
 626        if (fi->type != ARMFault_None) {
 627            goto do_fault;
 628        }
 629        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
 630        switch (desc & 3) {
 631        case 0: /* Page translation fault.  */
 632            fi->type = ARMFault_Translation;
 633            goto do_fault;
 634        case 1: /* 64k page.  */
 635            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
 636            xn = desc & (1 << 15);
 637            *page_size = 0x10000;
 638            break;
 639        case 2: case 3: /* 4k page.  */
 640            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
 641            xn = desc & 1;
 642            *page_size = 0x1000;
 643            break;
 644        default:
 645            /* Never happens, but compiler isn't smart enough to tell.  */
 646            g_assert_not_reached();
 647        }
 648    }
 649    if (domain_prot == 3) {
 650        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 651    } else {
 652        if (pxn && !regime_is_user(env, mmu_idx)) {
 653            xn = 1;
 654        }
 655        if (xn && access_type == MMU_INST_FETCH) {
 656            fi->type = ARMFault_Permission;
 657            goto do_fault;
 658        }
 659
 660        if (arm_feature(env, ARM_FEATURE_V6K) &&
 661                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
 662            /* The simplified model uses AP[0] as an access control bit.  */
 663            if ((ap & 1) == 0) {
 664                /* Access flag fault.  */
 665                fi->type = ARMFault_AccessFlag;
 666                goto do_fault;
 667            }
 668            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
 669        } else {
 670            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
 671        }
 672        if (*prot && !xn) {
 673            *prot |= PAGE_EXEC;
 674        }
 675        if (!(*prot & (1 << access_type))) {
 676            /* Access permission fault.  */
 677            fi->type = ARMFault_Permission;
 678            goto do_fault;
 679        }
 680    }
 681    if (ns) {
 682        /* The NS bit will (as required by the architecture) have no effect if
 683         * the CPU doesn't support TZ or this is a non-secure translation
 684         * regime, because the attribute will already be non-secure.
 685         */
 686        attrs->secure = false;
 687    }
 688    *phys_ptr = phys_addr;
 689    return false;
 690do_fault:
 691    fi->domain = domain;
 692    fi->level = level;
 693    return true;
 694}
 695
 696/*
 697 * Translate S2 section/page access permissions to protection flags
 698 * @env:     CPUARMState
 699 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
 700 * @xn:      XN (execute-never) bits
 701 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
 702 */
 703static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
 704{
 705    int prot = 0;
 706
 707    if (s2ap & 1) {
 708        prot |= PAGE_READ;
 709    }
 710    if (s2ap & 2) {
 711        prot |= PAGE_WRITE;
 712    }
 713
 714    if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
 715        switch (xn) {
 716        case 0:
 717            prot |= PAGE_EXEC;
 718            break;
 719        case 1:
 720            if (s1_is_el0) {
 721                prot |= PAGE_EXEC;
 722            }
 723            break;
 724        case 2:
 725            break;
 726        case 3:
 727            if (!s1_is_el0) {
 728                prot |= PAGE_EXEC;
 729            }
 730            break;
 731        default:
 732            g_assert_not_reached();
 733        }
 734    } else {
 735        if (!extract32(xn, 1, 1)) {
 736            if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
 737                prot |= PAGE_EXEC;
 738            }
 739        }
 740    }
 741    return prot;
 742}
 743
 744/*
 745 * Translate section/page access permissions to protection flags
 746 * @env:     CPUARMState
 747 * @mmu_idx: MMU index indicating required translation regime
 748 * @is_aa64: TRUE if AArch64
 749 * @ap:      The 2-bit simple AP (AP[2:1])
 750 * @ns:      NS (non-secure) bit
 751 * @xn:      XN (execute-never) bit
 752 * @pxn:     PXN (privileged execute-never) bit
 753 */
 754static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
 755                      int ap, int ns, int xn, int pxn)
 756{
 757    bool is_user = regime_is_user(env, mmu_idx);
 758    int prot_rw, user_rw;
 759    bool have_wxn;
 760    int wxn = 0;
 761
 762    assert(mmu_idx != ARMMMUIdx_Stage2);
 763    assert(mmu_idx != ARMMMUIdx_Stage2_S);
 764
 765    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
 766    if (is_user) {
 767        prot_rw = user_rw;
 768    } else {
 769        if (user_rw && regime_is_pan(env, mmu_idx)) {
 770            /* PAN forbids data accesses but doesn't affect insn fetch */
 771            prot_rw = 0;
 772        } else {
 773            prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
 774        }
 775    }
 776
 777    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
 778        return prot_rw;
 779    }
 780
 781    /* TODO have_wxn should be replaced with
 782     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
 783     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
 784     * compatible processors have EL2, which is required for [U]WXN.
 785     */
 786    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
 787
 788    if (have_wxn) {
 789        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
 790    }
 791
 792    if (is_aa64) {
 793        if (regime_has_2_ranges(mmu_idx) && !is_user) {
 794            xn = pxn || (user_rw & PAGE_WRITE);
 795        }
 796    } else if (arm_feature(env, ARM_FEATURE_V7)) {
 797        switch (regime_el(env, mmu_idx)) {
 798        case 1:
 799        case 3:
 800            if (is_user) {
 801                xn = xn || !(user_rw & PAGE_READ);
 802            } else {
 803                int uwxn = 0;
 804                if (have_wxn) {
 805                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
 806                }
 807                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
 808                     (uwxn && (user_rw & PAGE_WRITE));
 809            }
 810            break;
 811        case 2:
 812            break;
 813        }
 814    } else {
 815        xn = wxn = 0;
 816    }
 817
 818    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
 819        return prot_rw;
 820    }
 821    return prot_rw | PAGE_EXEC;
 822}
 823
 824static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
 825                                          ARMMMUIdx mmu_idx)
 826{
 827    uint64_t tcr = regime_tcr(env, mmu_idx);
 828    uint32_t el = regime_el(env, mmu_idx);
 829    int select, tsz;
 830    bool epd, hpd;
 831
 832    assert(mmu_idx != ARMMMUIdx_Stage2_S);
 833
 834    if (mmu_idx == ARMMMUIdx_Stage2) {
 835        /* VTCR */
 836        bool sext = extract32(tcr, 4, 1);
 837        bool sign = extract32(tcr, 3, 1);
 838
 839        /*
 840         * If the sign-extend bit is not the same as t0sz[3], the result
 841         * is unpredictable. Flag this as a guest error.
 842         */
 843        if (sign != sext) {
 844            qemu_log_mask(LOG_GUEST_ERROR,
 845                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
 846        }
 847        tsz = sextract32(tcr, 0, 4) + 8;
 848        select = 0;
 849        hpd = false;
 850        epd = false;
 851    } else if (el == 2) {
 852        /* HTCR */
 853        tsz = extract32(tcr, 0, 3);
 854        select = 0;
 855        hpd = extract64(tcr, 24, 1);
 856        epd = false;
 857    } else {
 858        int t0sz = extract32(tcr, 0, 3);
 859        int t1sz = extract32(tcr, 16, 3);
 860
 861        if (t1sz == 0) {
 862            select = va > (0xffffffffu >> t0sz);
 863        } else {
 864            /* Note that we will detect errors later.  */
 865            select = va >= ~(0xffffffffu >> t1sz);
 866        }
 867        if (!select) {
 868            tsz = t0sz;
 869            epd = extract32(tcr, 7, 1);
 870            hpd = extract64(tcr, 41, 1);
 871        } else {
 872            tsz = t1sz;
 873            epd = extract32(tcr, 23, 1);
 874            hpd = extract64(tcr, 42, 1);
 875        }
 876        /* For aarch32, hpd0 is not enabled without t2e as well.  */
 877        hpd &= extract32(tcr, 6, 1);
 878    }
 879
 880    return (ARMVAParameters) {
 881        .tsz = tsz,
 882        .select = select,
 883        .epd = epd,
 884        .hpd = hpd,
 885    };
 886}
 887
 888/*
 889 * check_s2_mmu_setup
 890 * @cpu:        ARMCPU
 891 * @is_aa64:    True if the translation regime is in AArch64 state
 892 * @startlevel: Suggested starting level
 893 * @inputsize:  Bitsize of IPAs
 894 * @stride:     Page-table stride (See the ARM ARM)
 895 *
 896 * Returns true if the suggested S2 translation parameters are OK and
 897 * false otherwise.
 898 */
 899static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
 900                               int inputsize, int stride, int outputsize)
 901{
 902    const int grainsize = stride + 3;
 903    int startsizecheck;
 904
 905    /*
 906     * Negative levels are usually not allowed...
 907     * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
 908     * begins with level -1.  Note that previous feature tests will have
 909     * eliminated this combination if it is not enabled.
 910     */
 911    if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
 912        return false;
 913    }
 914
 915    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
 916    if (startsizecheck < 1 || startsizecheck > stride + 4) {
 917        return false;
 918    }
 919
 920    if (is_aa64) {
 921        switch (stride) {
 922        case 13: /* 64KB Pages.  */
 923            if (level == 0 || (level == 1 && outputsize <= 42)) {
 924                return false;
 925            }
 926            break;
 927        case 11: /* 16KB Pages.  */
 928            if (level == 0 || (level == 1 && outputsize <= 40)) {
 929                return false;
 930            }
 931            break;
 932        case 9: /* 4KB Pages.  */
 933            if (level == 0 && outputsize <= 42) {
 934                return false;
 935            }
 936            break;
 937        default:
 938            g_assert_not_reached();
 939        }
 940
 941        /* Inputsize checks.  */
 942        if (inputsize > outputsize &&
 943            (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
 944            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
 945            return false;
 946        }
 947    } else {
 948        /* AArch32 only supports 4KB pages. Assert on that.  */
 949        assert(stride == 9);
 950
 951        if (level == 0) {
 952            return false;
 953        }
 954    }
 955    return true;
 956}
 957
 958/**
 959 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
 960 *
 961 * Returns false if the translation was successful. Otherwise, phys_ptr,
 962 * attrs, prot and page_size may not be filled in, and the populated fsr
 963 * value provides information on why the translation aborted, in the format
 964 * of a long-format DFSR/IFSR fault register, with the following caveat:
 965 * the WnR bit is never set (the caller must do this).
 966 *
 967 * @env: CPUARMState
 968 * @address: virtual address to get physical address for
 969 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
 970 * @mmu_idx: MMU index indicating required translation regime
 971 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
 972 *             table walk), must be true if this is stage 2 of a stage 1+2
 973 *             walk for an EL0 access. If @mmu_idx is anything else,
 974 *             @s1_is_el0 is ignored.
 975 * @phys_ptr: set to the physical address corresponding to the virtual address
 976 * @attrs: set to the memory transaction attributes to use
 977 * @prot: set to the permissions for the page containing phys_ptr
 978 * @page_size_ptr: set to the size of the page containing phys_ptr
 979 * @fi: set to fault info if the translation fails
 980 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
 981 */
 982static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
 983                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
 984                               bool s1_is_el0, hwaddr *phys_ptr,
 985                               MemTxAttrs *txattrs, int *prot,
 986                               target_ulong *page_size_ptr,
 987                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
 988{
 989    ARMCPU *cpu = env_archcpu(env);
 990    /* Read an LPAE long-descriptor translation table. */
 991    ARMFaultType fault_type = ARMFault_Translation;
 992    uint32_t level;
 993    ARMVAParameters param;
 994    uint64_t ttbr;
 995    hwaddr descaddr, indexmask, indexmask_grainsize;
 996    uint32_t tableattrs;
 997    target_ulong page_size;
 998    uint32_t attrs;
 999    int32_t stride;
1000    int addrsize, inputsize, outputsize;
1001    uint64_t tcr = regime_tcr(env, mmu_idx);
1002    int ap, ns, xn, pxn;
1003    uint32_t el = regime_el(env, mmu_idx);
1004    uint64_t descaddrmask;
1005    bool aarch64 = arm_el_is_aa64(env, el);
1006    bool guarded = false;
1007
1008    /* TODO: This code does not support shareability levels. */
1009    if (aarch64) {
1010        int ps;
1011
1012        param = aa64_va_parameters(env, address, mmu_idx,
1013                                   access_type != MMU_INST_FETCH);
1014        level = 0;
1015
1016        /*
1017         * If TxSZ is programmed to a value larger than the maximum,
1018         * or smaller than the effective minimum, it is IMPLEMENTATION
1019         * DEFINED whether we behave as if the field were programmed
1020         * within bounds, or if a level 0 Translation fault is generated.
1021         *
1022         * With FEAT_LVA, fault on less than minimum becomes required,
1023         * so our choice is to always raise the fault.
1024         */
1025        if (param.tsz_oob) {
1026            fault_type = ARMFault_Translation;
1027            goto do_fault;
1028        }
1029
1030        addrsize = 64 - 8 * param.tbi;
1031        inputsize = 64 - param.tsz;
1032
1033        /*
1034         * Bound PS by PARANGE to find the effective output address size.
1035         * ID_AA64MMFR0 is a read-only register so values outside of the
1036         * supported mappings can be considered an implementation error.
1037         */
1038        ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1039        ps = MIN(ps, param.ps);
1040        assert(ps < ARRAY_SIZE(pamax_map));
1041        outputsize = pamax_map[ps];
1042    } else {
1043        param = aa32_va_parameters(env, address, mmu_idx);
1044        level = 1;
1045        addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1046        inputsize = addrsize - param.tsz;
1047        outputsize = 40;
1048    }
1049
1050    /*
1051     * We determined the region when collecting the parameters, but we
1052     * have not yet validated that the address is valid for the region.
1053     * Extract the top bits and verify that they all match select.
1054     *
1055     * For aa32, if inputsize == addrsize, then we have selected the
1056     * region by exclusion in aa32_va_parameters and there is no more
1057     * validation to do here.
1058     */
1059    if (inputsize < addrsize) {
1060        target_ulong top_bits = sextract64(address, inputsize,
1061                                           addrsize - inputsize);
1062        if (-top_bits != param.select) {
1063            /* The gap between the two regions is a Translation fault */
1064            fault_type = ARMFault_Translation;
1065            goto do_fault;
1066        }
1067    }
1068
1069    if (param.using64k) {
1070        stride = 13;
1071    } else if (param.using16k) {
1072        stride = 11;
1073    } else {
1074        stride = 9;
1075    }
1076
1077    /*
1078     * Note that QEMU ignores shareability and cacheability attributes,
1079     * so we don't need to do anything with the SH, ORGN, IRGN fields
1080     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1081     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1082     * implement any ASID-like capability so we can ignore it (instead
1083     * we will always flush the TLB any time the ASID is changed).
1084     */
1085    ttbr = regime_ttbr(env, mmu_idx, param.select);
1086
1087    /*
1088     * Here we should have set up all the parameters for the translation:
1089     * inputsize, ttbr, epd, stride, tbi
1090     */
1091
1092    if (param.epd) {
1093        /*
1094         * Translation table walk disabled => Translation fault on TLB miss
1095         * Note: This is always 0 on 64-bit EL2 and EL3.
1096         */
1097        goto do_fault;
1098    }
1099
1100    if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1101        /*
1102         * The starting level depends on the virtual address size (which can
1103         * be up to 48 bits) and the translation granule size. It indicates
1104         * the number of strides (stride bits at a time) needed to
1105         * consume the bits of the input address. In the pseudocode this is:
1106         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1107         * where their 'inputsize' is our 'inputsize', 'grainsize' is
1108         * our 'stride + 3' and 'stride' is our 'stride'.
1109         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1110         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1111         * = 4 - (inputsize - 4) / stride;
1112         */
1113        level = 4 - (inputsize - 4) / stride;
1114    } else {
1115        /*
1116         * For stage 2 translations the starting level is specified by the
1117         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1118         */
1119        uint32_t sl0 = extract32(tcr, 6, 2);
1120        uint32_t sl2 = extract64(tcr, 33, 1);
1121        uint32_t startlevel;
1122        bool ok;
1123
1124        /* SL2 is RES0 unless DS=1 & 4kb granule. */
1125        if (param.ds && stride == 9 && sl2) {
1126            if (sl0 != 0) {
1127                level = 0;
1128                fault_type = ARMFault_Translation;
1129                goto do_fault;
1130            }
1131            startlevel = -1;
1132        } else if (!aarch64 || stride == 9) {
1133            /* AArch32 or 4KB pages */
1134            startlevel = 2 - sl0;
1135
1136            if (cpu_isar_feature(aa64_st, cpu)) {
1137                startlevel &= 3;
1138            }
1139        } else {
1140            /* 16KB or 64KB pages */
1141            startlevel = 3 - sl0;
1142        }
1143
1144        /* Check that the starting level is valid. */
1145        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1146                                inputsize, stride, outputsize);
1147        if (!ok) {
1148            fault_type = ARMFault_Translation;
1149            goto do_fault;
1150        }
1151        level = startlevel;
1152    }
1153
1154    indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1155    indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1156
1157    /* Now we can extract the actual base address from the TTBR */
1158    descaddr = extract64(ttbr, 0, 48);
1159
1160    /*
1161     * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1162     *
1163     * Otherwise, if the base address is out of range, raise AddressSizeFault.
1164     * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1165     * but we've just cleared the bits above 47, so simplify the test.
1166     */
1167    if (outputsize > 48) {
1168        descaddr |= extract64(ttbr, 2, 4) << 48;
1169    } else if (descaddr >> outputsize) {
1170        level = 0;
1171        fault_type = ARMFault_AddressSize;
1172        goto do_fault;
1173    }
1174
1175    /*
1176     * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1177     * and also to mask out CnP (bit 0) which could validly be non-zero.
1178     */
1179    descaddr &= ~indexmask;
1180
1181    /*
1182     * For AArch32, the address field in the descriptor goes up to bit 39
1183     * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1184     * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1185     * bits as part of the address, which will be checked via outputsize.
1186     * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1187     * the highest bits of a 52-bit output are placed elsewhere.
1188     */
1189    if (param.ds) {
1190        descaddrmask = MAKE_64BIT_MASK(0, 50);
1191    } else if (arm_feature(env, ARM_FEATURE_V8)) {
1192        descaddrmask = MAKE_64BIT_MASK(0, 48);
1193    } else {
1194        descaddrmask = MAKE_64BIT_MASK(0, 40);
1195    }
1196    descaddrmask &= ~indexmask_grainsize;
1197
1198    /*
1199     * Secure accesses start with the page table in secure memory and
1200     * can be downgraded to non-secure at any step. Non-secure accesses
1201     * remain non-secure. We implement this by just ORing in the NSTable/NS
1202     * bits at each step.
1203     */
1204    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
1205    for (;;) {
1206        uint64_t descriptor;
1207        bool nstable;
1208
1209        descaddr |= (address >> (stride * (4 - level))) & indexmask;
1210        descaddr &= ~7ULL;
1211        nstable = extract32(tableattrs, 4, 1);
1212        descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
1213        if (fi->type != ARMFault_None) {
1214            goto do_fault;
1215        }
1216
1217        if (!(descriptor & 1) ||
1218            (!(descriptor & 2) && (level == 3))) {
1219            /* Invalid, or the Reserved level 3 encoding */
1220            goto do_fault;
1221        }
1222
1223        descaddr = descriptor & descaddrmask;
1224
1225        /*
1226         * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1227         * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1228         * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1229         * raise AddressSizeFault.
1230         */
1231        if (outputsize > 48) {
1232            if (param.ds) {
1233                descaddr |= extract64(descriptor, 8, 2) << 50;
1234            } else {
1235                descaddr |= extract64(descriptor, 12, 4) << 48;
1236            }
1237        } else if (descaddr >> outputsize) {
1238            fault_type = ARMFault_AddressSize;
1239            goto do_fault;
1240        }
1241
1242        if ((descriptor & 2) && (level < 3)) {
1243            /*
1244             * Table entry. The top five bits are attributes which may
1245             * propagate down through lower levels of the table (and
1246             * which are all arranged so that 0 means "no effect", so
1247             * we can gather them up by ORing in the bits at each level).
1248             */
1249            tableattrs |= extract64(descriptor, 59, 5);
1250            level++;
1251            indexmask = indexmask_grainsize;
1252            continue;
1253        }
1254        /*
1255         * Block entry at level 1 or 2, or page entry at level 3.
1256         * These are basically the same thing, although the number
1257         * of bits we pull in from the vaddr varies. Note that although
1258         * descaddrmask masks enough of the low bits of the descriptor
1259         * to give a correct page or table address, the address field
1260         * in a block descriptor is smaller; so we need to explicitly
1261         * clear the lower bits here before ORing in the low vaddr bits.
1262         */
1263        page_size = (1ULL << ((stride * (4 - level)) + 3));
1264        descaddr &= ~(hwaddr)(page_size - 1);
1265        descaddr |= (address & (page_size - 1));
1266        /* Extract attributes from the descriptor */
1267        attrs = extract64(descriptor, 2, 10)
1268            | (extract64(descriptor, 52, 12) << 10);
1269
1270        if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1271            /* Stage 2 table descriptors do not include any attribute fields */
1272            break;
1273        }
1274        /* Merge in attributes from table descriptors */
1275        attrs |= nstable << 3; /* NS */
1276        guarded = extract64(descriptor, 50, 1);  /* GP */
1277        if (param.hpd) {
1278            /* HPD disables all the table attributes except NSTable.  */
1279            break;
1280        }
1281        attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
1282        /*
1283         * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1284         * means "force PL1 access only", which means forcing AP[1] to 0.
1285         */
1286        attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
1287        attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
1288        break;
1289    }
1290    /*
1291     * Here descaddr is the final physical address, and attributes
1292     * are all in attrs.
1293     */
1294    fault_type = ARMFault_AccessFlag;
1295    if ((attrs & (1 << 8)) == 0) {
1296        /* Access flag */
1297        goto do_fault;
1298    }
1299
1300    ap = extract32(attrs, 4, 2);
1301
1302    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1303        ns = mmu_idx == ARMMMUIdx_Stage2;
1304        xn = extract32(attrs, 11, 2);
1305        *prot = get_S2prot(env, ap, xn, s1_is_el0);
1306    } else {
1307        ns = extract32(attrs, 3, 1);
1308        xn = extract32(attrs, 12, 1);
1309        pxn = extract32(attrs, 11, 1);
1310        *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1311    }
1312
1313    fault_type = ARMFault_Permission;
1314    if (!(*prot & (1 << access_type))) {
1315        goto do_fault;
1316    }
1317
1318    if (ns) {
1319        /*
1320         * The NS bit will (as required by the architecture) have no effect if
1321         * the CPU doesn't support TZ or this is a non-secure translation
1322         * regime, because the attribute will already be non-secure.
1323         */
1324        txattrs->secure = false;
1325    }
1326    /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
1327    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
1328        arm_tlb_bti_gp(txattrs) = true;
1329    }
1330
1331    if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1332        cacheattrs->is_s2_format = true;
1333        cacheattrs->attrs = extract32(attrs, 0, 4);
1334    } else {
1335        /* Index into MAIR registers for cache attributes */
1336        uint8_t attrindx = extract32(attrs, 0, 3);
1337        uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1338        assert(attrindx <= 7);
1339        cacheattrs->is_s2_format = false;
1340        cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
1341    }
1342
1343    /*
1344     * For FEAT_LPA2 and effective DS, the SH field in the attributes
1345     * was re-purposed for output address bits.  The SH attribute in
1346     * that case comes from TCR_ELx, which we extracted earlier.
1347     */
1348    if (param.ds) {
1349        cacheattrs->shareability = param.sh;
1350    } else {
1351        cacheattrs->shareability = extract32(attrs, 6, 2);
1352    }
1353
1354    *phys_ptr = descaddr;
1355    *page_size_ptr = page_size;
1356    return false;
1357
1358do_fault:
1359    fi->type = fault_type;
1360    fi->level = level;
1361    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1362    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1363                               mmu_idx == ARMMMUIdx_Stage2_S);
1364    fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1365    return true;
1366}
1367
1368static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1369                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1370                                 hwaddr *phys_ptr, int *prot,
1371                                 ARMMMUFaultInfo *fi)
1372{
1373    int n;
1374    uint32_t mask;
1375    uint32_t base;
1376    bool is_user = regime_is_user(env, mmu_idx);
1377
1378    if (regime_translation_disabled(env, mmu_idx)) {
1379        /* MPU disabled.  */
1380        *phys_ptr = address;
1381        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1382        return false;
1383    }
1384
1385    *phys_ptr = address;
1386    for (n = 7; n >= 0; n--) {
1387        base = env->cp15.c6_region[n];
1388        if ((base & 1) == 0) {
1389            continue;
1390        }
1391        mask = 1 << ((base >> 1) & 0x1f);
1392        /* Keep this shift separate from the above to avoid an
1393           (undefined) << 32.  */
1394        mask = (mask << 1) - 1;
1395        if (((base ^ address) & ~mask) == 0) {
1396            break;
1397        }
1398    }
1399    if (n < 0) {
1400        fi->type = ARMFault_Background;
1401        return true;
1402    }
1403
1404    if (access_type == MMU_INST_FETCH) {
1405        mask = env->cp15.pmsav5_insn_ap;
1406    } else {
1407        mask = env->cp15.pmsav5_data_ap;
1408    }
1409    mask = (mask >> (n * 4)) & 0xf;
1410    switch (mask) {
1411    case 0:
1412        fi->type = ARMFault_Permission;
1413        fi->level = 1;
1414        return true;
1415    case 1:
1416        if (is_user) {
1417            fi->type = ARMFault_Permission;
1418            fi->level = 1;
1419            return true;
1420        }
1421        *prot = PAGE_READ | PAGE_WRITE;
1422        break;
1423    case 2:
1424        *prot = PAGE_READ;
1425        if (!is_user) {
1426            *prot |= PAGE_WRITE;
1427        }
1428        break;
1429    case 3:
1430        *prot = PAGE_READ | PAGE_WRITE;
1431        break;
1432    case 5:
1433        if (is_user) {
1434            fi->type = ARMFault_Permission;
1435            fi->level = 1;
1436            return true;
1437        }
1438        *prot = PAGE_READ;
1439        break;
1440    case 6:
1441        *prot = PAGE_READ;
1442        break;
1443    default:
1444        /* Bad permission.  */
1445        fi->type = ARMFault_Permission;
1446        fi->level = 1;
1447        return true;
1448    }
1449    *prot |= PAGE_EXEC;
1450    return false;
1451}
1452
1453static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1454                                         int32_t address, int *prot)
1455{
1456    if (!arm_feature(env, ARM_FEATURE_M)) {
1457        *prot = PAGE_READ | PAGE_WRITE;
1458        switch (address) {
1459        case 0xF0000000 ... 0xFFFFFFFF:
1460            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1461                /* hivecs execing is ok */
1462                *prot |= PAGE_EXEC;
1463            }
1464            break;
1465        case 0x00000000 ... 0x7FFFFFFF:
1466            *prot |= PAGE_EXEC;
1467            break;
1468        }
1469    } else {
1470        /* Default system address map for M profile cores.
1471         * The architecture specifies which regions are execute-never;
1472         * at the MPU level no other checks are defined.
1473         */
1474        switch (address) {
1475        case 0x00000000 ... 0x1fffffff: /* ROM */
1476        case 0x20000000 ... 0x3fffffff: /* SRAM */
1477        case 0x60000000 ... 0x7fffffff: /* RAM */
1478        case 0x80000000 ... 0x9fffffff: /* RAM */
1479            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1480            break;
1481        case 0x40000000 ... 0x5fffffff: /* Peripheral */
1482        case 0xa0000000 ... 0xbfffffff: /* Device */
1483        case 0xc0000000 ... 0xdfffffff: /* Device */
1484        case 0xe0000000 ... 0xffffffff: /* System */
1485            *prot = PAGE_READ | PAGE_WRITE;
1486            break;
1487        default:
1488            g_assert_not_reached();
1489        }
1490    }
1491}
1492
1493static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1494{
1495    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1496    return arm_feature(env, ARM_FEATURE_M) &&
1497        extract32(address, 20, 12) == 0xe00;
1498}
1499
1500static bool m_is_system_region(CPUARMState *env, uint32_t address)
1501{
1502    /*
1503     * True if address is in the M profile system region
1504     * 0xe0000000 - 0xffffffff
1505     */
1506    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1507}
1508
1509static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1510                                         bool is_user)
1511{
1512    /*
1513     * Return true if we should use the default memory map as a
1514     * "background" region if there are no hits against any MPU regions.
1515     */
1516    CPUARMState *env = &cpu->env;
1517
1518    if (is_user) {
1519        return false;
1520    }
1521
1522    if (arm_feature(env, ARM_FEATURE_M)) {
1523        return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
1524            & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1525    } else {
1526        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1527    }
1528}
1529
1530static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1531                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1532                                 hwaddr *phys_ptr, int *prot,
1533                                 target_ulong *page_size,
1534                                 ARMMMUFaultInfo *fi)
1535{
1536    ARMCPU *cpu = env_archcpu(env);
1537    int n;
1538    bool is_user = regime_is_user(env, mmu_idx);
1539
1540    *phys_ptr = address;
1541    *page_size = TARGET_PAGE_SIZE;
1542    *prot = 0;
1543
1544    if (regime_translation_disabled(env, mmu_idx) ||
1545        m_is_ppb_region(env, address)) {
1546        /*
1547         * MPU disabled or M profile PPB access: use default memory map.
1548         * The other case which uses the default memory map in the
1549         * v7M ARM ARM pseudocode is exception vector reads from the vector
1550         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1551         * which always does a direct read using address_space_ldl(), rather
1552         * than going via this function, so we don't need to check that here.
1553         */
1554        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1555    } else { /* MPU enabled */
1556        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1557            /* region search */
1558            uint32_t base = env->pmsav7.drbar[n];
1559            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1560            uint32_t rmask;
1561            bool srdis = false;
1562
1563            if (!(env->pmsav7.drsr[n] & 0x1)) {
1564                continue;
1565            }
1566
1567            if (!rsize) {
1568                qemu_log_mask(LOG_GUEST_ERROR,
1569                              "DRSR[%d]: Rsize field cannot be 0\n", n);
1570                continue;
1571            }
1572            rsize++;
1573            rmask = (1ull << rsize) - 1;
1574
1575            if (base & rmask) {
1576                qemu_log_mask(LOG_GUEST_ERROR,
1577                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1578                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
1579                              n, base, rmask);
1580                continue;
1581            }
1582
1583            if (address < base || address > base + rmask) {
1584                /*
1585                 * Address not in this region. We must check whether the
1586                 * region covers addresses in the same page as our address.
1587                 * In that case we must not report a size that covers the
1588                 * whole page for a subsequent hit against a different MPU
1589                 * region or the background region, because it would result in
1590                 * incorrect TLB hits for subsequent accesses to addresses that
1591                 * are in this MPU region.
1592                 */
1593                if (ranges_overlap(base, rmask,
1594                                   address & TARGET_PAGE_MASK,
1595                                   TARGET_PAGE_SIZE)) {
1596                    *page_size = 1;
1597                }
1598                continue;
1599            }
1600
1601            /* Region matched */
1602
1603            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1604                int i, snd;
1605                uint32_t srdis_mask;
1606
1607                rsize -= 3; /* sub region size (power of 2) */
1608                snd = ((address - base) >> rsize) & 0x7;
1609                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1610
1611                srdis_mask = srdis ? 0x3 : 0x0;
1612                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1613                    /*
1614                     * This will check in groups of 2, 4 and then 8, whether
1615                     * the subregion bits are consistent. rsize is incremented
1616                     * back up to give the region size, considering consistent
1617                     * adjacent subregions as one region. Stop testing if rsize
1618                     * is already big enough for an entire QEMU page.
1619                     */
1620                    int snd_rounded = snd & ~(i - 1);
1621                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1622                                                     snd_rounded + 8, i);
1623                    if (srdis_mask ^ srdis_multi) {
1624                        break;
1625                    }
1626                    srdis_mask = (srdis_mask << i) | srdis_mask;
1627                    rsize++;
1628                }
1629            }
1630            if (srdis) {
1631                continue;
1632            }
1633            if (rsize < TARGET_PAGE_BITS) {
1634                *page_size = 1 << rsize;
1635            }
1636            break;
1637        }
1638
1639        if (n == -1) { /* no hits */
1640            if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1641                /* background fault */
1642                fi->type = ARMFault_Background;
1643                return true;
1644            }
1645            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1646        } else { /* a MPU hit! */
1647            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1648            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1649
1650            if (m_is_system_region(env, address)) {
1651                /* System space is always execute never */
1652                xn = 1;
1653            }
1654
1655            if (is_user) { /* User mode AP bit decoding */
1656                switch (ap) {
1657                case 0:
1658                case 1:
1659                case 5:
1660                    break; /* no access */
1661                case 3:
1662                    *prot |= PAGE_WRITE;
1663                    /* fall through */
1664                case 2:
1665                case 6:
1666                    *prot |= PAGE_READ | PAGE_EXEC;
1667                    break;
1668                case 7:
1669                    /* for v7M, same as 6; for R profile a reserved value */
1670                    if (arm_feature(env, ARM_FEATURE_M)) {
1671                        *prot |= PAGE_READ | PAGE_EXEC;
1672                        break;
1673                    }
1674                    /* fall through */
1675                default:
1676                    qemu_log_mask(LOG_GUEST_ERROR,
1677                                  "DRACR[%d]: Bad value for AP bits: 0x%"
1678                                  PRIx32 "\n", n, ap);
1679                }
1680            } else { /* Priv. mode AP bits decoding */
1681                switch (ap) {
1682                case 0:
1683                    break; /* no access */
1684                case 1:
1685                case 2:
1686                case 3:
1687                    *prot |= PAGE_WRITE;
1688                    /* fall through */
1689                case 5:
1690                case 6:
1691                    *prot |= PAGE_READ | PAGE_EXEC;
1692                    break;
1693                case 7:
1694                    /* for v7M, same as 6; for R profile a reserved value */
1695                    if (arm_feature(env, ARM_FEATURE_M)) {
1696                        *prot |= PAGE_READ | PAGE_EXEC;
1697                        break;
1698                    }
1699                    /* fall through */
1700                default:
1701                    qemu_log_mask(LOG_GUEST_ERROR,
1702                                  "DRACR[%d]: Bad value for AP bits: 0x%"
1703                                  PRIx32 "\n", n, ap);
1704                }
1705            }
1706
1707            /* execute never */
1708            if (xn) {
1709                *prot &= ~PAGE_EXEC;
1710            }
1711        }
1712    }
1713
1714    fi->type = ARMFault_Permission;
1715    fi->level = 1;
1716    return !(*prot & (1 << access_type));
1717}
1718
1719bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1720                       MMUAccessType access_type, ARMMMUIdx mmu_idx,
1721                       hwaddr *phys_ptr, MemTxAttrs *txattrs,
1722                       int *prot, bool *is_subpage,
1723                       ARMMMUFaultInfo *fi, uint32_t *mregion)
1724{
1725    /*
1726     * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1727     * that a full phys-to-virt translation does).
1728     * mregion is (if not NULL) set to the region number which matched,
1729     * or -1 if no region number is returned (MPU off, address did not
1730     * hit a region, address hit in multiple regions).
1731     * We set is_subpage to true if the region hit doesn't cover the
1732     * entire TARGET_PAGE the address is within.
1733     */
1734    ARMCPU *cpu = env_archcpu(env);
1735    bool is_user = regime_is_user(env, mmu_idx);
1736    uint32_t secure = regime_is_secure(env, mmu_idx);
1737    int n;
1738    int matchregion = -1;
1739    bool hit = false;
1740    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1741    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1742
1743    *is_subpage = false;
1744    *phys_ptr = address;
1745    *prot = 0;
1746    if (mregion) {
1747        *mregion = -1;
1748    }
1749
1750    /*
1751     * Unlike the ARM ARM pseudocode, we don't need to check whether this
1752     * was an exception vector read from the vector table (which is always
1753     * done using the default system address map), because those accesses
1754     * are done in arm_v7m_load_vector(), which always does a direct
1755     * read using address_space_ldl(), rather than going via this function.
1756     */
1757    if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
1758        hit = true;
1759    } else if (m_is_ppb_region(env, address)) {
1760        hit = true;
1761    } else {
1762        if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1763            hit = true;
1764        }
1765
1766        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1767            /* region search */
1768            /*
1769             * Note that the base address is bits [31:5] from the register
1770             * with bits [4:0] all zeroes, but the limit address is bits
1771             * [31:5] from the register with bits [4:0] all ones.
1772             */
1773            uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1774            uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1775
1776            if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1777                /* Region disabled */
1778                continue;
1779            }
1780
1781            if (address < base || address > limit) {
1782                /*
1783                 * Address not in this region. We must check whether the
1784                 * region covers addresses in the same page as our address.
1785                 * In that case we must not report a size that covers the
1786                 * whole page for a subsequent hit against a different MPU
1787                 * region or the background region, because it would result in
1788                 * incorrect TLB hits for subsequent accesses to addresses that
1789                 * are in this MPU region.
1790                 */
1791                if (limit >= base &&
1792                    ranges_overlap(base, limit - base + 1,
1793                                   addr_page_base,
1794                                   TARGET_PAGE_SIZE)) {
1795                    *is_subpage = true;
1796                }
1797                continue;
1798            }
1799
1800            if (base > addr_page_base || limit < addr_page_limit) {
1801                *is_subpage = true;
1802            }
1803
1804            if (matchregion != -1) {
1805                /*
1806                 * Multiple regions match -- always a failure (unlike
1807                 * PMSAv7 where highest-numbered-region wins)
1808                 */
1809                fi->type = ARMFault_Permission;
1810                fi->level = 1;
1811                return true;
1812            }
1813
1814            matchregion = n;
1815            hit = true;
1816        }
1817    }
1818
1819    if (!hit) {
1820        /* background fault */
1821        fi->type = ARMFault_Background;
1822        return true;
1823    }
1824
1825    if (matchregion == -1) {
1826        /* hit using the background region */
1827        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1828    } else {
1829        uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1830        uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1831        bool pxn = false;
1832
1833        if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1834            pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1835        }
1836
1837        if (m_is_system_region(env, address)) {
1838            /* System space is always execute never */
1839            xn = 1;
1840        }
1841
1842        *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1843        if (*prot && !xn && !(pxn && !is_user)) {
1844            *prot |= PAGE_EXEC;
1845        }
1846        /*
1847         * We don't need to look the attribute up in the MAIR0/MAIR1
1848         * registers because that only tells us about cacheability.
1849         */
1850        if (mregion) {
1851            *mregion = matchregion;
1852        }
1853    }
1854
1855    fi->type = ARMFault_Permission;
1856    fi->level = 1;
1857    return !(*prot & (1 << access_type));
1858}
1859
1860static bool v8m_is_sau_exempt(CPUARMState *env,
1861                              uint32_t address, MMUAccessType access_type)
1862{
1863    /*
1864     * The architecture specifies that certain address ranges are
1865     * exempt from v8M SAU/IDAU checks.
1866     */
1867    return
1868        (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1869        (address >= 0xe0000000 && address <= 0xe0002fff) ||
1870        (address >= 0xe000e000 && address <= 0xe000efff) ||
1871        (address >= 0xe002e000 && address <= 0xe002efff) ||
1872        (address >= 0xe0040000 && address <= 0xe0041fff) ||
1873        (address >= 0xe00ff000 && address <= 0xe00fffff);
1874}
1875
1876void v8m_security_lookup(CPUARMState *env, uint32_t address,
1877                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
1878                                V8M_SAttributes *sattrs)
1879{
1880    /*
1881     * Look up the security attributes for this address. Compare the
1882     * pseudocode SecurityCheck() function.
1883     * We assume the caller has zero-initialized *sattrs.
1884     */
1885    ARMCPU *cpu = env_archcpu(env);
1886    int r;
1887    bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1888    int idau_region = IREGION_NOTVALID;
1889    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1890    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1891
1892    if (cpu->idau) {
1893        IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1894        IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1895
1896        iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1897                   &idau_nsc);
1898    }
1899
1900    if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1901        /* 0xf0000000..0xffffffff is always S for insn fetches */
1902        return;
1903    }
1904
1905    if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1906        sattrs->ns = !regime_is_secure(env, mmu_idx);
1907        return;
1908    }
1909
1910    if (idau_region != IREGION_NOTVALID) {
1911        sattrs->irvalid = true;
1912        sattrs->iregion = idau_region;
1913    }
1914
1915    switch (env->sau.ctrl & 3) {
1916    case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1917        break;
1918    case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1919        sattrs->ns = true;
1920        break;
1921    default: /* SAU.ENABLE == 1 */
1922        for (r = 0; r < cpu->sau_sregion; r++) {
1923            if (env->sau.rlar[r] & 1) {
1924                uint32_t base = env->sau.rbar[r] & ~0x1f;
1925                uint32_t limit = env->sau.rlar[r] | 0x1f;
1926
1927                if (base <= address && limit >= address) {
1928                    if (base > addr_page_base || limit < addr_page_limit) {
1929                        sattrs->subpage = true;
1930                    }
1931                    if (sattrs->srvalid) {
1932                        /*
1933                         * If we hit in more than one region then we must report
1934                         * as Secure, not NS-Callable, with no valid region
1935                         * number info.
1936                         */
1937                        sattrs->ns = false;
1938                        sattrs->nsc = false;
1939                        sattrs->sregion = 0;
1940                        sattrs->srvalid = false;
1941                        break;
1942                    } else {
1943                        if (env->sau.rlar[r] & 2) {
1944                            sattrs->nsc = true;
1945                        } else {
1946                            sattrs->ns = true;
1947                        }
1948                        sattrs->srvalid = true;
1949                        sattrs->sregion = r;
1950                    }
1951                } else {
1952                    /*
1953                     * Address not in this region. We must check whether the
1954                     * region covers addresses in the same page as our address.
1955                     * In that case we must not report a size that covers the
1956                     * whole page for a subsequent hit against a different MPU
1957                     * region or the background region, because it would result
1958                     * in incorrect TLB hits for subsequent accesses to
1959                     * addresses that are in this MPU region.
1960                     */
1961                    if (limit >= base &&
1962                        ranges_overlap(base, limit - base + 1,
1963                                       addr_page_base,
1964                                       TARGET_PAGE_SIZE)) {
1965                        sattrs->subpage = true;
1966                    }
1967                }
1968            }
1969        }
1970        break;
1971    }
1972
1973    /*
1974     * The IDAU will override the SAU lookup results if it specifies
1975     * higher security than the SAU does.
1976     */
1977    if (!idau_ns) {
1978        if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
1979            sattrs->ns = false;
1980            sattrs->nsc = idau_nsc;
1981        }
1982    }
1983}
1984
1985static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1986                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1987                                 hwaddr *phys_ptr, MemTxAttrs *txattrs,
1988                                 int *prot, target_ulong *page_size,
1989                                 ARMMMUFaultInfo *fi)
1990{
1991    uint32_t secure = regime_is_secure(env, mmu_idx);
1992    V8M_SAttributes sattrs = {};
1993    bool ret;
1994    bool mpu_is_subpage;
1995
1996    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1997        v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
1998        if (access_type == MMU_INST_FETCH) {
1999            /*
2000             * Instruction fetches always use the MMU bank and the
2001             * transaction attribute determined by the fetch address,
2002             * regardless of CPU state. This is painful for QEMU
2003             * to handle, because it would mean we need to encode
2004             * into the mmu_idx not just the (user, negpri) information
2005             * for the current security state but also that for the
2006             * other security state, which would balloon the number
2007             * of mmu_idx values needed alarmingly.
2008             * Fortunately we can avoid this because it's not actually
2009             * possible to arbitrarily execute code from memory with
2010             * the wrong security attribute: it will always generate
2011             * an exception of some kind or another, apart from the
2012             * special case of an NS CPU executing an SG instruction
2013             * in S&NSC memory. So we always just fail the translation
2014             * here and sort things out in the exception handler
2015             * (including possibly emulating an SG instruction).
2016             */
2017            if (sattrs.ns != !secure) {
2018                if (sattrs.nsc) {
2019                    fi->type = ARMFault_QEMU_NSCExec;
2020                } else {
2021                    fi->type = ARMFault_QEMU_SFault;
2022                }
2023                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2024                *phys_ptr = address;
2025                *prot = 0;
2026                return true;
2027            }
2028        } else {
2029            /*
2030             * For data accesses we always use the MMU bank indicated
2031             * by the current CPU state, but the security attributes
2032             * might downgrade a secure access to nonsecure.
2033             */
2034            if (sattrs.ns) {
2035                txattrs->secure = false;
2036            } else if (!secure) {
2037                /*
2038                 * NS access to S memory must fault.
2039                 * Architecturally we should first check whether the
2040                 * MPU information for this address indicates that we
2041                 * are doing an unaligned access to Device memory, which
2042                 * should generate a UsageFault instead. QEMU does not
2043                 * currently check for that kind of unaligned access though.
2044                 * If we added it we would need to do so as a special case
2045                 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2046                 */
2047                fi->type = ARMFault_QEMU_SFault;
2048                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2049                *phys_ptr = address;
2050                *prot = 0;
2051                return true;
2052            }
2053        }
2054    }
2055
2056    ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
2057                            txattrs, prot, &mpu_is_subpage, fi, NULL);
2058    *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
2059    return ret;
2060}
2061
2062/*
2063 * Translate from the 4-bit stage 2 representation of
2064 * memory attributes (without cache-allocation hints) to
2065 * the 8-bit representation of the stage 1 MAIR registers
2066 * (which includes allocation hints).
2067 *
2068 * ref: shared/translation/attrs/S2AttrDecode()
2069 *      .../S2ConvertAttrsHints()
2070 */
2071static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
2072{
2073    uint8_t hiattr = extract32(s2attrs, 2, 2);
2074    uint8_t loattr = extract32(s2attrs, 0, 2);
2075    uint8_t hihint = 0, lohint = 0;
2076
2077    if (hiattr != 0) { /* normal memory */
2078        if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
2079            hiattr = loattr = 1; /* non-cacheable */
2080        } else {
2081            if (hiattr != 1) { /* Write-through or write-back */
2082                hihint = 3; /* RW allocate */
2083            }
2084            if (loattr != 1) { /* Write-through or write-back */
2085                lohint = 3; /* RW allocate */
2086            }
2087        }
2088    }
2089
2090    return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2091}
2092
2093/*
2094 * Combine either inner or outer cacheability attributes for normal
2095 * memory, according to table D4-42 and pseudocode procedure
2096 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2097 *
2098 * NB: only stage 1 includes allocation hints (RW bits), leading to
2099 * some asymmetry.
2100 */
2101static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2102{
2103    if (s1 == 4 || s2 == 4) {
2104        /* non-cacheable has precedence */
2105        return 4;
2106    } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2107        /* stage 1 write-through takes precedence */
2108        return s1;
2109    } else if (extract32(s2, 2, 2) == 2) {
2110        /* stage 2 write-through takes precedence, but the allocation hint
2111         * is still taken from stage 1
2112         */
2113        return (2 << 2) | extract32(s1, 0, 2);
2114    } else { /* write-back */
2115        return s1;
2116    }
2117}
2118
2119/*
2120 * Combine the memory type and cacheability attributes of
2121 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2122 * combined attributes in MAIR_EL1 format.
2123 */
2124static uint8_t combined_attrs_nofwb(CPUARMState *env,
2125                                    ARMCacheAttrs s1, ARMCacheAttrs s2)
2126{
2127    uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2128
2129    s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
2130
2131    s1lo = extract32(s1.attrs, 0, 4);
2132    s2lo = extract32(s2_mair_attrs, 0, 4);
2133    s1hi = extract32(s1.attrs, 4, 4);
2134    s2hi = extract32(s2_mair_attrs, 4, 4);
2135
2136    /* Combine memory type and cacheability attributes */
2137    if (s1hi == 0 || s2hi == 0) {
2138        /* Device has precedence over normal */
2139        if (s1lo == 0 || s2lo == 0) {
2140            /* nGnRnE has precedence over anything */
2141            ret_attrs = 0;
2142        } else if (s1lo == 4 || s2lo == 4) {
2143            /* non-Reordering has precedence over Reordering */
2144            ret_attrs = 4;  /* nGnRE */
2145        } else if (s1lo == 8 || s2lo == 8) {
2146            /* non-Gathering has precedence over Gathering */
2147            ret_attrs = 8;  /* nGRE */
2148        } else {
2149            ret_attrs = 0xc; /* GRE */
2150        }
2151    } else { /* Normal memory */
2152        /* Outer/inner cacheability combine independently */
2153        ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2154                  | combine_cacheattr_nibble(s1lo, s2lo);
2155    }
2156    return ret_attrs;
2157}
2158
2159static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2160{
2161    /*
2162     * Given the 4 bits specifying the outer or inner cacheability
2163     * in MAIR format, return a value specifying Normal Write-Back,
2164     * with the allocation and transient hints taken from the input
2165     * if the input specified some kind of cacheable attribute.
2166     */
2167    if (attr == 0 || attr == 4) {
2168        /*
2169         * 0 == an UNPREDICTABLE encoding
2170         * 4 == Non-cacheable
2171         * Either way, force Write-Back RW allocate non-transient
2172         */
2173        return 0xf;
2174    }
2175    /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2176    return attr | 4;
2177}
2178
2179/*
2180 * Combine the memory type and cacheability attributes of
2181 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2182 * combined attributes in MAIR_EL1 format.
2183 */
2184static uint8_t combined_attrs_fwb(CPUARMState *env,
2185                                  ARMCacheAttrs s1, ARMCacheAttrs s2)
2186{
2187    switch (s2.attrs) {
2188    case 7:
2189        /* Use stage 1 attributes */
2190        return s1.attrs;
2191    case 6:
2192        /*
2193         * Force Normal Write-Back. Note that if S1 is Normal cacheable
2194         * then we take the allocation hints from it; otherwise it is
2195         * RW allocate, non-transient.
2196         */
2197        if ((s1.attrs & 0xf0) == 0) {
2198            /* S1 is Device */
2199            return 0xff;
2200        }
2201        /* Need to check the Inner and Outer nibbles separately */
2202        return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2203            force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2204    case 5:
2205        /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2206        if ((s1.attrs & 0xf0) == 0) {
2207            return s1.attrs;
2208        }
2209        return 0x44;
2210    case 0 ... 3:
2211        /* Force Device, of subtype specified by S2 */
2212        return s2.attrs << 2;
2213    default:
2214        /*
2215         * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2216         * arbitrarily force Device.
2217         */
2218        return 0;
2219    }
2220}
2221
2222/*
2223 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2224 * and CombineS1S2Desc()
2225 *
2226 * @env:     CPUARMState
2227 * @s1:      Attributes from stage 1 walk
2228 * @s2:      Attributes from stage 2 walk
2229 */
2230static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
2231                                        ARMCacheAttrs s1, ARMCacheAttrs s2)
2232{
2233    ARMCacheAttrs ret;
2234    bool tagged = false;
2235
2236    assert(s2.is_s2_format && !s1.is_s2_format);
2237    ret.is_s2_format = false;
2238
2239    if (s1.attrs == 0xf0) {
2240        tagged = true;
2241        s1.attrs = 0xff;
2242    }
2243
2244    /* Combine shareability attributes (table D4-43) */
2245    if (s1.shareability == 2 || s2.shareability == 2) {
2246        /* if either are outer-shareable, the result is outer-shareable */
2247        ret.shareability = 2;
2248    } else if (s1.shareability == 3 || s2.shareability == 3) {
2249        /* if either are inner-shareable, the result is inner-shareable */
2250        ret.shareability = 3;
2251    } else {
2252        /* both non-shareable */
2253        ret.shareability = 0;
2254    }
2255
2256    /* Combine memory type and cacheability attributes */
2257    if (arm_hcr_el2_eff(env) & HCR_FWB) {
2258        ret.attrs = combined_attrs_fwb(env, s1, s2);
2259    } else {
2260        ret.attrs = combined_attrs_nofwb(env, s1, s2);
2261    }
2262
2263    /*
2264     * Any location for which the resultant memory type is any
2265     * type of Device memory is always treated as Outer Shareable.
2266     * Any location for which the resultant memory type is Normal
2267     * Inner Non-cacheable, Outer Non-cacheable is always treated
2268     * as Outer Shareable.
2269     * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2270     */
2271    if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2272        ret.shareability = 2;
2273    }
2274
2275    /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2276    if (tagged && ret.attrs == 0xff) {
2277        ret.attrs = 0xf0;
2278    }
2279
2280    return ret;
2281}
2282
2283/**
2284 * get_phys_addr - get the physical address for this virtual address
2285 *
2286 * Find the physical address corresponding to the given virtual address,
2287 * by doing a translation table walk on MMU based systems or using the
2288 * MPU state on MPU based systems.
2289 *
2290 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
2291 * prot and page_size may not be filled in, and the populated fsr value provides
2292 * information on why the translation aborted, in the format of a
2293 * DFSR/IFSR fault register, with the following caveats:
2294 *  * we honour the short vs long DFSR format differences.
2295 *  * the WnR bit is never set (the caller must do this).
2296 *  * for PSMAv5 based systems we don't bother to return a full FSR format
2297 *    value.
2298 *
2299 * @env: CPUARMState
2300 * @address: virtual address to get physical address for
2301 * @access_type: 0 for read, 1 for write, 2 for execute
2302 * @mmu_idx: MMU index indicating required translation regime
2303 * @phys_ptr: set to the physical address corresponding to the virtual address
2304 * @attrs: set to the memory transaction attributes to use
2305 * @prot: set to the permissions for the page containing phys_ptr
2306 * @page_size: set to the size of the page containing phys_ptr
2307 * @fi: set to fault info if the translation fails
2308 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
2309 */
2310bool get_phys_addr(CPUARMState *env, target_ulong address,
2311                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
2312                   hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
2313                   target_ulong *page_size,
2314                   ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
2315{
2316    ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2317
2318    if (mmu_idx != s1_mmu_idx) {
2319        /*
2320         * Call ourselves recursively to do the stage 1 and then stage 2
2321         * translations if mmu_idx is a two-stage regime.
2322         */
2323        if (arm_feature(env, ARM_FEATURE_EL2)) {
2324            hwaddr ipa;
2325            int s2_prot;
2326            int ret;
2327            bool ipa_secure;
2328            ARMCacheAttrs cacheattrs2 = {};
2329            ARMMMUIdx s2_mmu_idx;
2330            bool is_el0;
2331
2332            ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
2333                                attrs, prot, page_size, fi, cacheattrs);
2334
2335            /* If S1 fails or S2 is disabled, return early.  */
2336            if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
2337                *phys_ptr = ipa;
2338                return ret;
2339            }
2340
2341            ipa_secure = attrs->secure;
2342            if (arm_is_secure_below_el3(env)) {
2343                if (ipa_secure) {
2344                    attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
2345                } else {
2346                    attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
2347                }
2348            } else {
2349                assert(!ipa_secure);
2350            }
2351
2352            s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2353            is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
2354
2355            /* S1 is done. Now do S2 translation.  */
2356            ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
2357                                     phys_ptr, attrs, &s2_prot,
2358                                     page_size, fi, &cacheattrs2);
2359            fi->s2addr = ipa;
2360            /* Combine the S1 and S2 perms.  */
2361            *prot &= s2_prot;
2362
2363            /* If S2 fails, return early.  */
2364            if (ret) {
2365                return ret;
2366            }
2367
2368            /* Combine the S1 and S2 cache attributes. */
2369            if (arm_hcr_el2_eff(env) & HCR_DC) {
2370                /*
2371                 * HCR.DC forces the first stage attributes to
2372                 *  Normal Non-Shareable,
2373                 *  Inner Write-Back Read-Allocate Write-Allocate,
2374                 *  Outer Write-Back Read-Allocate Write-Allocate.
2375                 * Do not overwrite Tagged within attrs.
2376                 */
2377                if (cacheattrs->attrs != 0xf0) {
2378                    cacheattrs->attrs = 0xff;
2379                }
2380                cacheattrs->shareability = 0;
2381            }
2382            *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
2383
2384            /* Check if IPA translates to secure or non-secure PA space. */
2385            if (arm_is_secure_below_el3(env)) {
2386                if (ipa_secure) {
2387                    attrs->secure =
2388                        !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
2389                } else {
2390                    attrs->secure =
2391                        !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
2392                        || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
2393                }
2394            }
2395            return 0;
2396        } else {
2397            /*
2398             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
2399             */
2400            mmu_idx = stage_1_mmu_idx(mmu_idx);
2401        }
2402    }
2403
2404    /*
2405     * The page table entries may downgrade secure to non-secure, but
2406     * cannot upgrade an non-secure translation regime's attributes
2407     * to secure.
2408     */
2409    attrs->secure = regime_is_secure(env, mmu_idx);
2410    attrs->user = regime_is_user(env, mmu_idx);
2411
2412    /*
2413     * Fast Context Switch Extension. This doesn't exist at all in v8.
2414     * In v7 and earlier it affects all stage 1 translations.
2415     */
2416    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2417        && !arm_feature(env, ARM_FEATURE_V8)) {
2418        if (regime_el(env, mmu_idx) == 3) {
2419            address += env->cp15.fcseidr_s;
2420        } else {
2421            address += env->cp15.fcseidr_ns;
2422        }
2423    }
2424
2425    if (arm_feature(env, ARM_FEATURE_PMSA)) {
2426        bool ret;
2427        *page_size = TARGET_PAGE_SIZE;
2428
2429        if (arm_feature(env, ARM_FEATURE_V8)) {
2430            /* PMSAv8 */
2431            ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2432                                       phys_ptr, attrs, prot, page_size, fi);
2433        } else if (arm_feature(env, ARM_FEATURE_V7)) {
2434            /* PMSAv7 */
2435            ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2436                                       phys_ptr, prot, page_size, fi);
2437        } else {
2438            /* Pre-v7 MPU */
2439            ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2440                                       phys_ptr, prot, fi);
2441        }
2442        qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2443                      " mmu_idx %u -> %s (prot %c%c%c)\n",
2444                      access_type == MMU_DATA_LOAD ? "reading" :
2445                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2446                      (uint32_t)address, mmu_idx,
2447                      ret ? "Miss" : "Hit",
2448                      *prot & PAGE_READ ? 'r' : '-',
2449                      *prot & PAGE_WRITE ? 'w' : '-',
2450                      *prot & PAGE_EXEC ? 'x' : '-');
2451
2452        return ret;
2453    }
2454
2455    /* Definitely a real MMU, not an MPU */
2456
2457    if (regime_translation_disabled(env, mmu_idx)) {
2458        uint64_t hcr;
2459        uint8_t memattr;
2460
2461        /*
2462         * MMU disabled.  S1 addresses within aa64 translation regimes are
2463         * still checked for bounds -- see AArch64.TranslateAddressS1Off.
2464         */
2465        if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2466            int r_el = regime_el(env, mmu_idx);
2467            if (arm_el_is_aa64(env, r_el)) {
2468                int pamax = arm_pamax(env_archcpu(env));
2469                uint64_t tcr = env->cp15.tcr_el[r_el];
2470                int addrtop, tbi;
2471
2472                tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2473                if (access_type == MMU_INST_FETCH) {
2474                    tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2475                }
2476                tbi = (tbi >> extract64(address, 55, 1)) & 1;
2477                addrtop = (tbi ? 55 : 63);
2478
2479                if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2480                    fi->type = ARMFault_AddressSize;
2481                    fi->level = 0;
2482                    fi->stage2 = false;
2483                    return 1;
2484                }
2485
2486                /*
2487                 * When TBI is disabled, we've just validated that all of the
2488                 * bits above PAMax are zero, so logically we only need to
2489                 * clear the top byte for TBI.  But it's clearer to follow
2490                 * the pseudocode set of addrdesc.paddress.
2491                 */
2492                address = extract64(address, 0, 52);
2493            }
2494        }
2495        *phys_ptr = address;
2496        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2497        *page_size = TARGET_PAGE_SIZE;
2498
2499        /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2500        hcr = arm_hcr_el2_eff(env);
2501        cacheattrs->shareability = 0;
2502        cacheattrs->is_s2_format = false;
2503        if (hcr & HCR_DC) {
2504            if (hcr & HCR_DCT) {
2505                memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2506            } else {
2507                memattr = 0xff;  /* Normal, WB, RWA */
2508            }
2509        } else if (access_type == MMU_INST_FETCH) {
2510            if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2511                memattr = 0xee;  /* Normal, WT, RA, NT */
2512            } else {
2513                memattr = 0x44;  /* Normal, NC, No */
2514            }
2515            cacheattrs->shareability = 2; /* outer sharable */
2516        } else {
2517            memattr = 0x00;      /* Device, nGnRnE */
2518        }
2519        cacheattrs->attrs = memattr;
2520        return 0;
2521    }
2522
2523    if (regime_using_lpae_format(env, mmu_idx)) {
2524        return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
2525                                  phys_ptr, attrs, prot, page_size,
2526                                  fi, cacheattrs);
2527    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2528        return get_phys_addr_v6(env, address, access_type, mmu_idx,
2529                                phys_ptr, attrs, prot, page_size, fi);
2530    } else {
2531        return get_phys_addr_v5(env, address, access_type, mmu_idx,
2532                                    phys_ptr, prot, page_size, fi);
2533    }
2534}
2535
2536hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2537                                         MemTxAttrs *attrs)
2538{
2539    ARMCPU *cpu = ARM_CPU(cs);
2540    CPUARMState *env = &cpu->env;
2541    hwaddr phys_addr;
2542    target_ulong page_size;
2543    int prot;
2544    bool ret;
2545    ARMMMUFaultInfo fi = {};
2546    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2547    ARMCacheAttrs cacheattrs = {};
2548
2549    *attrs = (MemTxAttrs) {};
2550
2551    ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
2552                        attrs, &prot, &page_size, &fi, &cacheattrs);
2553
2554    if (ret) {
2555        return -1;
2556    }
2557    return phys_addr;
2558}
2559