qemu/softmmu_template.h
<<
>>
Prefs
   1/*
   2 *  Software MMU support
   3 *
   4 * Generate helpers used by TCG for qemu_ld/st ops and code load
   5 * functions.
   6 *
   7 * Included from target op helpers and exec.c.
   8 *
   9 *  Copyright (c) 2003 Fabrice Bellard
  10 *
  11 * This library is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU Lesser General Public
  13 * License as published by the Free Software Foundation; either
  14 * version 2 of the License, or (at your option) any later version.
  15 *
  16 * This library is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * Lesser General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU Lesser General Public
  22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  23 */
  24#include "qemu/timer.h"
  25#include "exec/address-spaces.h"
  26#include "exec/memory.h"
  27#include "qemu/etrace.h"
  28
  29#define DATA_SIZE (1 << SHIFT)
  30
  31#if DATA_SIZE == 8
  32#define SUFFIX q
  33#define LSUFFIX q
  34#define SDATA_TYPE  int64_t
  35#define DATA_TYPE  uint64_t
  36#elif DATA_SIZE == 4
  37#define SUFFIX l
  38#define LSUFFIX l
  39#define SDATA_TYPE  int32_t
  40#define DATA_TYPE  uint32_t
  41#elif DATA_SIZE == 2
  42#define SUFFIX w
  43#define LSUFFIX uw
  44#define SDATA_TYPE  int16_t
  45#define DATA_TYPE  uint16_t
  46#elif DATA_SIZE == 1
  47#define SUFFIX b
  48#define LSUFFIX ub
  49#define SDATA_TYPE  int8_t
  50#define DATA_TYPE  uint8_t
  51#else
  52#error unsupported data size
  53#endif
  54
  55
  56/* For the benefit of TCG generated code, we want to avoid the complication
  57   of ABI-specific return type promotion and always return a value extended
  58   to the register size of the host.  This is tcg_target_long, except in the
  59   case of a 32-bit host and 64-bit data, and for that we always have
  60   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
  61#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
  62# define WORD_TYPE  DATA_TYPE
  63# define USUFFIX    SUFFIX
  64#else
  65# define WORD_TYPE  tcg_target_ulong
  66# define USUFFIX    glue(u, SUFFIX)
  67# define SSUFFIX    glue(s, SUFFIX)
  68#endif
  69
  70#ifdef SOFTMMU_CODE_ACCESS
  71#define READ_ACCESS_TYPE MMU_INST_FETCH
  72#define ADDR_READ addr_code
  73#else
  74#define READ_ACCESS_TYPE MMU_DATA_LOAD
  75#define ADDR_READ addr_read
  76#endif
  77
  78#if DATA_SIZE == 8
  79# define BSWAP(X)  bswap64(X)
  80#elif DATA_SIZE == 4
  81# define BSWAP(X)  bswap32(X)
  82#elif DATA_SIZE == 2
  83# define BSWAP(X)  bswap16(X)
  84#else
  85# define BSWAP(X)  (X)
  86#endif
  87
  88#ifdef TARGET_WORDS_BIGENDIAN
  89# define TGT_BE(X)  (X)
  90# define TGT_LE(X)  BSWAP(X)
  91#else
  92# define TGT_BE(X)  BSWAP(X)
  93# define TGT_LE(X)  (X)
  94#endif
  95
  96#if DATA_SIZE == 1
  97# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
  98# define helper_be_ld_name  helper_le_ld_name
  99# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
 100# define helper_be_lds_name helper_le_lds_name
 101# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
 102# define helper_be_st_name  helper_le_st_name
 103#else
 104# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
 105# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
 106# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
 107# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
 108# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
 109# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
 110#endif
 111
 112#ifdef TARGET_WORDS_BIGENDIAN
 113# define helper_te_ld_name  helper_be_ld_name
 114# define helper_te_st_name  helper_be_st_name
 115#else
 116# define helper_te_ld_name  helper_le_ld_name
 117# define helper_te_st_name  helper_le_st_name
 118#endif
 119
 120/* macro to check the victim tlb */
 121#define VICTIM_TLB_HIT(ty)                                                    \
 122({                                                                            \
 123    /* we are about to do a page table walk. our last hope is the             \
 124     * victim tlb. try to refill from the victim tlb before walking the       \
 125     * page table. */                                                         \
 126    int vidx;                                                                 \
 127    CPUIOTLBEntry tmpiotlb;                                                   \
 128    CPUTLBEntry tmptlb;                                                       \
 129    for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) {                         \
 130        if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
 131            /* found entry in victim tlb, swap tlb and iotlb */               \
 132            tmptlb = env->tlb_table[mmu_idx][index];                          \
 133            env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
 134            env->tlb_v_table[mmu_idx][vidx] = tmptlb;                         \
 135            tmpiotlb = env->iotlb[mmu_idx][index];                            \
 136            env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx];         \
 137            env->iotlb_v[mmu_idx][vidx] = tmpiotlb;                           \
 138            break;                                                            \
 139        }                                                                     \
 140    }                                                                         \
 141    /* return true when there is a vtlb hit, i.e. vidx >=0 */                 \
 142    vidx >= 0;                                                                \
 143})
 144
 145#ifndef SOFTMMU_CODE_ACCESS
 146static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
 147                                              CPUIOTLBEntry *iotlbentry,
 148                                              target_ulong addr,
 149                                              uintptr_t retaddr)
 150{
 151    uint64_t val;
 152    CPUState *cpu = ENV_GET_CPU(env);
 153    hwaddr physaddr = iotlbentry->addr;
 154    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 155
 156    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
 157    cpu->mem_io_pc = retaddr;
 158    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 159        cpu_io_recompile(cpu, retaddr);
 160    }
 161
 162    cpu->mem_io_vaddr = addr;
 163    if (mr->iommu_ops) {
 164        address_space_rw(cpu->as, physaddr, iotlbentry->attrs, (void *) &val,
 165                         1 << SHIFT, false);
 166    } else {
 167        memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
 168                                    iotlbentry->attrs);
 169    }
 170
 171    if (qemu_etrace_mask(ETRACE_F_MEM)) {
 172        etrace_mem_access(&qemu_etracer, 0, 0,
 173                          addr, DATA_SIZE, MEM_READ, val);
 174    }
 175    return val;
 176}
 177#endif
 178
 179WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 180                            TCGMemOpIdx oi, uintptr_t retaddr)
 181{
 182    unsigned mmu_idx = get_mmuidx(oi);
 183    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 184    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 185    uintptr_t haddr;
 186    DATA_TYPE res;
 187
 188    /* Adjust the given return address.  */
 189    retaddr -= GETPC_ADJ;
 190
 191    /* If the TLB entry is for a different page, reload and try again.  */
 192    if ((addr & TARGET_PAGE_MASK)
 193         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 194        if ((addr & (DATA_SIZE - 1)) != 0
 195            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 196            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 197                                 mmu_idx, retaddr);
 198        }
 199        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 200            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 201                     mmu_idx, retaddr);
 202        }
 203        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 204    }
 205
 206    /* Handle an IO access.  */
 207    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 208        CPUIOTLBEntry *iotlbentry;
 209        if ((addr & (DATA_SIZE - 1)) != 0) {
 210            goto do_unaligned_access;
 211        }
 212        iotlbentry = &env->iotlb[mmu_idx][index];
 213
 214        /* ??? Note that the io helpers always read data in the target
 215           byte ordering.  We should push the LE/BE request down into io.  */
 216        res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
 217        res = TGT_LE(res);
 218        return res;
 219    }
 220
 221    /* Handle slow unaligned access (it spans two pages or IO).  */
 222    if (DATA_SIZE > 1
 223        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 224                    >= TARGET_PAGE_SIZE)) {
 225        target_ulong addr1, addr2;
 226        DATA_TYPE res1, res2;
 227        unsigned shift;
 228    do_unaligned_access:
 229        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 230            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 231                                 mmu_idx, retaddr);
 232        }
 233        addr1 = addr & ~(DATA_SIZE - 1);
 234        addr2 = addr1 + DATA_SIZE;
 235        /* Note the adjustment at the beginning of the function.
 236           Undo that for the recursion.  */
 237        res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
 238        res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
 239        shift = (addr & (DATA_SIZE - 1)) * 8;
 240
 241        /* Little-endian combine.  */
 242        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
 243        return res;
 244    }
 245
 246    /* Handle aligned access or unaligned access in the same page.  */
 247    if ((addr & (DATA_SIZE - 1)) != 0
 248        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 249        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 250                             mmu_idx, retaddr);
 251    }
 252
 253    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 254#if DATA_SIZE == 1
 255    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
 256#else
 257    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
 258#endif
 259    return res;
 260}
 261
 262#if DATA_SIZE > 1
 263WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 264                            TCGMemOpIdx oi, uintptr_t retaddr)
 265{
 266    unsigned mmu_idx = get_mmuidx(oi);
 267    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 268    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 269    uintptr_t haddr;
 270    DATA_TYPE res;
 271
 272    /* Adjust the given return address.  */
 273    retaddr -= GETPC_ADJ;
 274
 275    /* If the TLB entry is for a different page, reload and try again.  */
 276    if ((addr & TARGET_PAGE_MASK)
 277         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 278        if ((addr & (DATA_SIZE - 1)) != 0
 279            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 280            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 281                                 mmu_idx, retaddr);
 282        }
 283        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 284            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 285                     mmu_idx, retaddr);
 286        }
 287        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 288    }
 289
 290    /* Handle an IO access.  */
 291    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 292        CPUIOTLBEntry *iotlbentry;
 293        if ((addr & (DATA_SIZE - 1)) != 0) {
 294            goto do_unaligned_access;
 295        }
 296        iotlbentry = &env->iotlb[mmu_idx][index];
 297
 298        /* ??? Note that the io helpers always read data in the target
 299           byte ordering.  We should push the LE/BE request down into io.  */
 300        res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
 301        res = TGT_BE(res);
 302        return res;
 303    }
 304
 305    /* Handle slow unaligned access (it spans two pages or IO).  */
 306    if (DATA_SIZE > 1
 307        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 308                    >= TARGET_PAGE_SIZE)) {
 309        target_ulong addr1, addr2;
 310        DATA_TYPE res1, res2;
 311        unsigned shift;
 312    do_unaligned_access:
 313        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 314            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 315                                 mmu_idx, retaddr);
 316        }
 317        addr1 = addr & ~(DATA_SIZE - 1);
 318        addr2 = addr1 + DATA_SIZE;
 319        /* Note the adjustment at the beginning of the function.
 320           Undo that for the recursion.  */
 321        res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
 322        res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
 323        shift = (addr & (DATA_SIZE - 1)) * 8;
 324
 325        /* Big-endian combine.  */
 326        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
 327        return res;
 328    }
 329
 330    /* Handle aligned access or unaligned access in the same page.  */
 331    if ((addr & (DATA_SIZE - 1)) != 0
 332        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 333        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 334                             mmu_idx, retaddr);
 335    }
 336
 337    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 338    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
 339    return res;
 340}
 341#endif /* DATA_SIZE > 1 */
 342
 343#ifndef SOFTMMU_CODE_ACCESS
 344
 345/* Provide signed versions of the load routines as well.  We can of course
 346   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
 347#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
 348WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
 349                             TCGMemOpIdx oi, uintptr_t retaddr)
 350{
 351    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
 352}
 353
 354# if DATA_SIZE > 1
 355WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
 356                             TCGMemOpIdx oi, uintptr_t retaddr)
 357{
 358    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
 359}
 360# endif
 361#endif
 362
 363static inline void glue(io_write, SUFFIX)(CPUArchState *env,
 364                                          CPUIOTLBEntry *iotlbentry,
 365                                          DATA_TYPE val,
 366                                          target_ulong addr,
 367                                          uintptr_t retaddr)
 368{
 369    CPUState *cpu = ENV_GET_CPU(env);
 370    hwaddr physaddr = iotlbentry->addr;
 371    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 372
 373    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
 374    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 375        cpu_io_recompile(cpu, retaddr);
 376    }
 377
 378    if (qemu_etrace_mask(ETRACE_F_MEM)) {
 379        etrace_mem_access(&qemu_etracer, 0, 0,
 380                          addr, DATA_SIZE, MEM_WRITE, val);
 381    }
 382
 383    cpu->mem_io_vaddr = addr;
 384    cpu->mem_io_pc = retaddr;
 385    if (mr->iommu_ops) {
 386        address_space_rw(cpu->as, physaddr, iotlbentry->attrs, (void *) &val,
 387                         1 << SHIFT, true);
 388    } else {
 389        memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
 390                                     iotlbentry->attrs);
 391    }
 392}
 393
 394void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 395                       TCGMemOpIdx oi, uintptr_t retaddr)
 396{
 397    unsigned mmu_idx = get_mmuidx(oi);
 398    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 399    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 400    uintptr_t haddr;
 401
 402    /* Adjust the given return address.  */
 403    retaddr -= GETPC_ADJ;
 404
 405    /* If the TLB entry is for a different page, reload and try again.  */
 406    if ((addr & TARGET_PAGE_MASK)
 407        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 408        if ((addr & (DATA_SIZE - 1)) != 0
 409            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 410            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 411                                 mmu_idx, retaddr);
 412        }
 413        if (!VICTIM_TLB_HIT(addr_write)) {
 414            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 415        }
 416        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 417    }
 418
 419    /* Handle an IO access.  */
 420    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 421        CPUIOTLBEntry *iotlbentry;
 422        if ((addr & (DATA_SIZE - 1)) != 0) {
 423            goto do_unaligned_access;
 424        }
 425        iotlbentry = &env->iotlb[mmu_idx][index];
 426
 427        /* ??? Note that the io helpers always read data in the target
 428           byte ordering.  We should push the LE/BE request down into io.  */
 429        val = TGT_LE(val);
 430        glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
 431        return;
 432    }
 433
 434    /* Handle slow unaligned access (it spans two pages or IO).  */
 435    if (DATA_SIZE > 1
 436        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 437                     >= TARGET_PAGE_SIZE)) {
 438        int i;
 439    do_unaligned_access:
 440        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 441            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 442                                 mmu_idx, retaddr);
 443        }
 444        /* XXX: not efficient, but simple */
 445        /* Note: relies on the fact that tlb_fill() does not remove the
 446         * previous page from the TLB cache.  */
 447        for (i = DATA_SIZE - 1; i >= 0; i--) {
 448            /* Little-endian extract.  */
 449            uint8_t val8 = val >> (i * 8);
 450            /* Note the adjustment at the beginning of the function.
 451               Undo that for the recursion.  */
 452            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 453                                            oi, retaddr + GETPC_ADJ);
 454        }
 455        return;
 456    }
 457
 458    /* Handle aligned access or unaligned access in the same page.  */
 459    if ((addr & (DATA_SIZE - 1)) != 0
 460        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 461        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 462                             mmu_idx, retaddr);
 463    }
 464
 465    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 466#if DATA_SIZE == 1
 467    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
 468#else
 469    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
 470#endif
 471}
 472
 473#if DATA_SIZE > 1
 474void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 475                       TCGMemOpIdx oi, uintptr_t retaddr)
 476{
 477    unsigned mmu_idx = get_mmuidx(oi);
 478    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 479    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 480    uintptr_t haddr;
 481
 482    /* Adjust the given return address.  */
 483    retaddr -= GETPC_ADJ;
 484
 485    /* If the TLB entry is for a different page, reload and try again.  */
 486    if ((addr & TARGET_PAGE_MASK)
 487        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 488        if ((addr & (DATA_SIZE - 1)) != 0
 489            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 490            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 491                                 mmu_idx, retaddr);
 492        }
 493        if (!VICTIM_TLB_HIT(addr_write)) {
 494            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 495        }
 496        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 497    }
 498
 499    /* Handle an IO access.  */
 500    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 501        CPUIOTLBEntry *iotlbentry;
 502        if ((addr & (DATA_SIZE - 1)) != 0) {
 503            goto do_unaligned_access;
 504        }
 505        iotlbentry = &env->iotlb[mmu_idx][index];
 506
 507        /* ??? Note that the io helpers always read data in the target
 508           byte ordering.  We should push the LE/BE request down into io.  */
 509        val = TGT_BE(val);
 510        glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
 511        return;
 512    }
 513
 514    /* Handle slow unaligned access (it spans two pages or IO).  */
 515    if (DATA_SIZE > 1
 516        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 517                     >= TARGET_PAGE_SIZE)) {
 518        int i;
 519    do_unaligned_access:
 520        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 521            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 522                                 mmu_idx, retaddr);
 523        }
 524        /* XXX: not efficient, but simple */
 525        /* Note: relies on the fact that tlb_fill() does not remove the
 526         * previous page from the TLB cache.  */
 527        for (i = DATA_SIZE - 1; i >= 0; i--) {
 528            /* Big-endian extract.  */
 529            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
 530            /* Note the adjustment at the beginning of the function.
 531               Undo that for the recursion.  */
 532            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 533                                            oi, retaddr + GETPC_ADJ);
 534        }
 535        return;
 536    }
 537
 538    /* Handle aligned access or unaligned access in the same page.  */
 539    if ((addr & (DATA_SIZE - 1)) != 0
 540        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 541        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 542                             mmu_idx, retaddr);
 543    }
 544
 545    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 546    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
 547}
 548#endif /* DATA_SIZE > 1 */
 549
 550#if DATA_SIZE == 1
 551/* Probe for whether the specified guest write access is permitted.
 552 * If it is not permitted then an exception will be taken in the same
 553 * way as if this were a real write access (and we will not return).
 554 * Otherwise the function will return, and there will be a valid
 555 * entry in the TLB for this access.
 556 */
 557void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
 558                 uintptr_t retaddr)
 559{
 560    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 561    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 562
 563    if ((addr & TARGET_PAGE_MASK)
 564        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 565        /* TLB entry is for a different page */
 566        if (!VICTIM_TLB_HIT(addr_write)) {
 567            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 568        }
 569    }
 570}
 571#endif
 572#endif /* !defined(SOFTMMU_CODE_ACCESS) */
 573
 574#undef READ_ACCESS_TYPE
 575#undef SHIFT
 576#undef DATA_TYPE
 577#undef SUFFIX
 578#undef LSUFFIX
 579#undef DATA_SIZE
 580#undef ADDR_READ
 581#undef WORD_TYPE
 582#undef SDATA_TYPE
 583#undef USUFFIX
 584#undef SSUFFIX
 585#undef BSWAP
 586#undef TGT_BE
 587#undef TGT_LE
 588#undef CPU_BE
 589#undef CPU_LE
 590#undef helper_le_ld_name
 591#undef helper_be_ld_name
 592#undef helper_le_lds_name
 593#undef helper_be_lds_name
 594#undef helper_le_st_name
 595#undef helper_be_st_name
 596#undef helper_te_ld_name
 597#undef helper_te_st_name
 598