qemu/target/arm/tlb_helper.c
<<
>>
Prefs
   1/*
   2 * ARM TLB (Translation lookaside buffer) helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8#include "qemu/osdep.h"
   9#include "cpu.h"
  10#include "internals.h"
  11#include "exec/exec-all.h"
  12
  13static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  14                                            unsigned int target_el,
  15                                            bool same_el, bool ea,
  16                                            bool s1ptw, bool is_write,
  17                                            int fsc)
  18{
  19    uint32_t syn;
  20
  21    /*
  22     * ISV is only set for data aborts routed to EL2 and
  23     * never for stage-1 page table walks faulting on stage 2.
  24     *
  25     * Furthermore, ISV is only set for certain kinds of load/stores.
  26     * If the template syndrome does not have ISV set, we should leave
  27     * it cleared.
  28     *
  29     * See ARMv8 specs, D7-1974:
  30     * ISS encoding for an exception from a Data Abort, the
  31     * ISV field.
  32     */
  33    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
  34        syn = syn_data_abort_no_iss(same_el, 0,
  35                                    ea, 0, s1ptw, is_write, fsc);
  36    } else {
  37        /*
  38         * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
  39         * syndrome created at translation time.
  40         * Now we create the runtime syndrome with the remaining fields.
  41         */
  42        syn = syn_data_abort_with_iss(same_el,
  43                                      0, 0, 0, 0, 0,
  44                                      ea, 0, s1ptw, is_write, fsc,
  45                                      true);
  46        /* Merge the runtime syndrome with the template syndrome.  */
  47        syn |= template_syn;
  48    }
  49    return syn;
  50}
  51
  52static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
  53                                            MMUAccessType access_type,
  54                                            int mmu_idx, ARMMMUFaultInfo *fi)
  55{
  56    CPUARMState *env = &cpu->env;
  57    int target_el;
  58    bool same_el;
  59    uint32_t syn, exc, fsr, fsc;
  60    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
  61
  62    target_el = exception_target_el(env);
  63    if (fi->stage2) {
  64        target_el = 2;
  65        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
  66        if (arm_is_secure_below_el3(env) && fi->s1ns) {
  67            env->cp15.hpfar_el2 |= HPFAR_NS;
  68        }
  69    }
  70    same_el = (arm_current_el(env) == target_el);
  71
  72    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
  73        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
  74        /*
  75         * LPAE format fault status register : bottom 6 bits are
  76         * status code in the same form as needed for syndrome
  77         */
  78        fsr = arm_fi_to_lfsc(fi);
  79        fsc = extract32(fsr, 0, 6);
  80    } else {
  81        fsr = arm_fi_to_sfsc(fi);
  82        /*
  83         * Short format FSR : this fault will never actually be reported
  84         * to an EL that uses a syndrome register. Use a (currently)
  85         * reserved FSR code in case the constructed syndrome does leak
  86         * into the guest somehow.
  87         */
  88        fsc = 0x3f;
  89    }
  90
  91    if (access_type == MMU_INST_FETCH) {
  92        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
  93        exc = EXCP_PREFETCH_ABORT;
  94    } else {
  95        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
  96                                   same_el, fi->ea, fi->s1ptw,
  97                                   access_type == MMU_DATA_STORE,
  98                                   fsc);
  99        if (access_type == MMU_DATA_STORE
 100            && arm_feature(env, ARM_FEATURE_V6)) {
 101            fsr |= (1 << 11);
 102        }
 103        exc = EXCP_DATA_ABORT;
 104    }
 105
 106    env->exception.vaddress = addr;
 107    env->exception.fsr = fsr;
 108    raise_exception(env, exc, syn, target_el);
 109}
 110
 111/* Raise a data fault alignment exception for the specified virtual address */
 112void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 113                                 MMUAccessType access_type,
 114                                 int mmu_idx, uintptr_t retaddr)
 115{
 116    ARMCPU *cpu = ARM_CPU(cs);
 117    ARMMMUFaultInfo fi = {};
 118
 119    /* now we have a real cpu fault */
 120    cpu_restore_state(cs, retaddr, true);
 121
 122    fi.type = ARMFault_Alignment;
 123    arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 124}
 125
 126#if !defined(CONFIG_USER_ONLY)
 127
 128/*
 129 * arm_cpu_do_transaction_failed: handle a memory system error response
 130 * (eg "no device/memory present at address") by raising an external abort
 131 * exception
 132 */
 133void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 134                                   vaddr addr, unsigned size,
 135                                   MMUAccessType access_type,
 136                                   int mmu_idx, MemTxAttrs attrs,
 137                                   MemTxResult response, uintptr_t retaddr)
 138{
 139    ARMCPU *cpu = ARM_CPU(cs);
 140    ARMMMUFaultInfo fi = {};
 141
 142    /* now we have a real cpu fault */
 143    cpu_restore_state(cs, retaddr, true);
 144
 145    fi.ea = arm_extabort_type(response);
 146    fi.type = ARMFault_SyncExternal;
 147    arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 148}
 149
 150bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 151                      MMUAccessType access_type, int mmu_idx,
 152                      bool probe, uintptr_t retaddr)
 153{
 154    ARMCPU *cpu = ARM_CPU(cs);
 155    ARMMMUFaultInfo fi = {};
 156    hwaddr phys_addr;
 157    target_ulong page_size;
 158    int prot, ret;
 159    MemTxAttrs attrs = {};
 160    ARMCacheAttrs cacheattrs = {};
 161
 162    /*
 163     * Walk the page table and (if the mapping exists) add the page
 164     * to the TLB.  On success, return true.  Otherwise, if probing,
 165     * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
 166     * register format, and signal the fault.
 167     */
 168    ret = get_phys_addr(&cpu->env, address, access_type,
 169                        core_to_arm_mmu_idx(&cpu->env, mmu_idx),
 170                        &phys_addr, &attrs, &prot, &page_size,
 171                        &fi, &cacheattrs);
 172    if (likely(!ret)) {
 173        /*
 174         * Map a single [sub]page. Regions smaller than our declared
 175         * target page size are handled specially, so for those we
 176         * pass in the exact addresses.
 177         */
 178        if (page_size >= TARGET_PAGE_SIZE) {
 179            phys_addr &= TARGET_PAGE_MASK;
 180            address &= TARGET_PAGE_MASK;
 181        }
 182        /* Notice and record tagged memory. */
 183        if (cpu_isar_feature(aa64_mte, cpu) && cacheattrs.attrs == 0xf0) {
 184            arm_tlb_mte_tagged(&attrs) = true;
 185        }
 186
 187        tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
 188                                prot, mmu_idx, page_size);
 189        return true;
 190    } else if (probe) {
 191        return false;
 192    } else {
 193        /* now we have a real cpu fault */
 194        cpu_restore_state(cs, retaddr, true);
 195        arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
 196    }
 197}
 198#else
 199void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
 200                            MMUAccessType access_type,
 201                            bool maperr, uintptr_t ra)
 202{
 203    ARMMMUFaultInfo fi = {
 204        .type = maperr ? ARMFault_Translation : ARMFault_Permission,
 205        .level = 3,
 206    };
 207    ARMCPU *cpu = ARM_CPU(cs);
 208
 209    /*
 210     * We report both ESR and FAR to signal handlers.
 211     * For now, it's easiest to deliver the fault normally.
 212     */
 213    cpu_restore_state(cs, ra, true);
 214    arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
 215}
 216
 217void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
 218                           MMUAccessType access_type, uintptr_t ra)
 219{
 220    arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
 221}
 222#endif /* !defined(CONFIG_USER_ONLY) */
 223