qemu/target/arm/tlb_helper.c
<<
>>
Prefs
   1/*
   2 * ARM TLB (Translation lookaside buffer) helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8#include "qemu/osdep.h"
   9#include "cpu.h"
  10#include "internals.h"
  11#include "exec/exec-all.h"
  12
  13#if !defined(CONFIG_USER_ONLY)
  14
  15static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  16                                            unsigned int target_el,
  17                                            bool same_el, bool ea,
  18                                            bool s1ptw, bool is_write,
  19                                            int fsc)
  20{
  21    uint32_t syn;
  22
  23    /*
  24     * ISV is only set for data aborts routed to EL2 and
  25     * never for stage-1 page table walks faulting on stage 2.
  26     *
  27     * Furthermore, ISV is only set for certain kinds of load/stores.
  28     * If the template syndrome does not have ISV set, we should leave
  29     * it cleared.
  30     *
  31     * See ARMv8 specs, D7-1974:
  32     * ISS encoding for an exception from a Data Abort, the
  33     * ISV field.
  34     */
  35    if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
  36        syn = syn_data_abort_no_iss(same_el,
  37                                    ea, 0, s1ptw, is_write, fsc);
  38    } else {
  39        /*
  40         * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
  41         * syndrome created at translation time.
  42         * Now we create the runtime syndrome with the remaining fields.
  43         */
  44        syn = syn_data_abort_with_iss(same_el,
  45                                      0, 0, 0, 0, 0,
  46                                      ea, 0, s1ptw, is_write, fsc,
  47                                      false);
  48        /* Merge the runtime syndrome with the template syndrome.  */
  49        syn |= template_syn;
  50    }
  51    return syn;
  52}
  53
  54static void QEMU_NORETURN arm_deliver_fault(ARMCPU *cpu, vaddr addr,
  55                                            MMUAccessType access_type,
  56                                            int mmu_idx, ARMMMUFaultInfo *fi)
  57{
  58    CPUARMState *env = &cpu->env;
  59    int target_el;
  60    bool same_el;
  61    uint32_t syn, exc, fsr, fsc;
  62    ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
  63
  64    target_el = exception_target_el(env);
  65    if (fi->stage2) {
  66        target_el = 2;
  67        env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
  68    }
  69    same_el = (arm_current_el(env) == target_el);
  70
  71    if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
  72        arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
  73        /*
  74         * LPAE format fault status register : bottom 6 bits are
  75         * status code in the same form as needed for syndrome
  76         */
  77        fsr = arm_fi_to_lfsc(fi);
  78        fsc = extract32(fsr, 0, 6);
  79    } else {
  80        fsr = arm_fi_to_sfsc(fi);
  81        /*
  82         * Short format FSR : this fault will never actually be reported
  83         * to an EL that uses a syndrome register. Use a (currently)
  84         * reserved FSR code in case the constructed syndrome does leak
  85         * into the guest somehow.
  86         */
  87        fsc = 0x3f;
  88    }
  89
  90    if (access_type == MMU_INST_FETCH) {
  91        syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
  92        exc = EXCP_PREFETCH_ABORT;
  93    } else {
  94        syn = merge_syn_data_abort(env->exception.syndrome, target_el,
  95                                   same_el, fi->ea, fi->s1ptw,
  96                                   access_type == MMU_DATA_STORE,
  97                                   fsc);
  98        if (access_type == MMU_DATA_STORE
  99            && arm_feature(env, ARM_FEATURE_V6)) {
 100            fsr |= (1 << 11);
 101        }
 102        exc = EXCP_DATA_ABORT;
 103    }
 104
 105    env->exception.vaddress = addr;
 106    env->exception.fsr = fsr;
 107    raise_exception(env, exc, syn, target_el);
 108}
 109
 110/* Raise a data fault alignment exception for the specified virtual address */
 111void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
 112                                 MMUAccessType access_type,
 113                                 int mmu_idx, uintptr_t retaddr)
 114{
 115    ARMCPU *cpu = ARM_CPU(cs);
 116    ARMMMUFaultInfo fi = {};
 117
 118    /* now we have a real cpu fault */
 119    cpu_restore_state(cs, retaddr, true);
 120
 121    fi.type = ARMFault_Alignment;
 122    arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
 123}
 124
 125/*
 126 * arm_cpu_do_transaction_failed: handle a memory system error response
 127 * (eg "no device/memory present at address") by raising an external abort
 128 * exception
 129 */
 130void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
 131                                   vaddr addr, unsigned size,
 132                                   MMUAccessType access_type,
 133                                   int mmu_idx, MemTxAttrs attrs,
 134                                   MemTxResult response, uintptr_t retaddr)
 135{
 136    ARMCPU *cpu = ARM_CPU(cs);
 137    ARMMMUFaultInfo fi = {};
 138
 139    /* now we have a real cpu fault */
 140    cpu_restore_state(cs, retaddr, true);
 141
 142    fi.ea = arm_extabort_type(response);
 143    fi.type = ARMFault_SyncExternal;
 144    arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
 145}
 146
 147#endif /* !defined(CONFIG_USER_ONLY) */
 148
 149bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 150                      MMUAccessType access_type, int mmu_idx,
 151                      bool probe, uintptr_t retaddr)
 152{
 153    ARMCPU *cpu = ARM_CPU(cs);
 154
 155#ifdef CONFIG_USER_ONLY
 156    cpu->env.exception.vaddress = address;
 157    if (access_type == MMU_INST_FETCH) {
 158        cs->exception_index = EXCP_PREFETCH_ABORT;
 159    } else {
 160        cs->exception_index = EXCP_DATA_ABORT;
 161    }
 162    cpu_loop_exit_restore(cs, retaddr);
 163#else
 164    hwaddr phys_addr;
 165    target_ulong page_size;
 166    int prot, ret;
 167    MemTxAttrs attrs = {};
 168    ARMMMUFaultInfo fi = {};
 169
 170    /*
 171     * Walk the page table and (if the mapping exists) add the page
 172     * to the TLB.  On success, return true.  Otherwise, if probing,
 173     * return false.  Otherwise populate fsr with ARM DFSR/IFSR fault
 174     * register format, and signal the fault.
 175     */
 176    ret = get_phys_addr(&cpu->env, address, access_type,
 177                        core_to_arm_mmu_idx(&cpu->env, mmu_idx),
 178                        &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
 179    if (likely(!ret)) {
 180        /*
 181         * Map a single [sub]page. Regions smaller than our declared
 182         * target page size are handled specially, so for those we
 183         * pass in the exact addresses.
 184         */
 185        if (page_size >= TARGET_PAGE_SIZE) {
 186            phys_addr &= TARGET_PAGE_MASK;
 187            address &= TARGET_PAGE_MASK;
 188        }
 189        tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
 190                                prot, mmu_idx, page_size);
 191        return true;
 192    } else if (probe) {
 193        return false;
 194    } else {
 195        /* now we have a real cpu fault */
 196        cpu_restore_state(cs, retaddr, true);
 197        arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
 198    }
 199#endif
 200}
 201