qemu/softmmu_template.h
<<
>>
Prefs
   1/*
   2 *  Software MMU support
   3 *
   4 * Generate helpers used by TCG for qemu_ld/st ops and code load
   5 * functions.
   6 *
   7 * Included from target op helpers and exec.c.
   8 *
   9 *  Copyright (c) 2003 Fabrice Bellard
  10 *
  11 * This library is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU Lesser General Public
  13 * License as published by the Free Software Foundation; either
  14 * version 2 of the License, or (at your option) any later version.
  15 *
  16 * This library is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * Lesser General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU Lesser General Public
  22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  23 */
  24#if DATA_SIZE == 8
  25#define SUFFIX q
  26#define LSUFFIX q
  27#define SDATA_TYPE  int64_t
  28#define DATA_TYPE  uint64_t
  29#elif DATA_SIZE == 4
  30#define SUFFIX l
  31#define LSUFFIX l
  32#define SDATA_TYPE  int32_t
  33#define DATA_TYPE  uint32_t
  34#elif DATA_SIZE == 2
  35#define SUFFIX w
  36#define LSUFFIX uw
  37#define SDATA_TYPE  int16_t
  38#define DATA_TYPE  uint16_t
  39#elif DATA_SIZE == 1
  40#define SUFFIX b
  41#define LSUFFIX ub
  42#define SDATA_TYPE  int8_t
  43#define DATA_TYPE  uint8_t
  44#else
  45#error unsupported data size
  46#endif
  47
  48
  49/* For the benefit of TCG generated code, we want to avoid the complication
  50   of ABI-specific return type promotion and always return a value extended
  51   to the register size of the host.  This is tcg_target_long, except in the
  52   case of a 32-bit host and 64-bit data, and for that we always have
  53   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
  54#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
  55# define WORD_TYPE  DATA_TYPE
  56# define USUFFIX    SUFFIX
  57#else
  58# define WORD_TYPE  tcg_target_ulong
  59# define USUFFIX    glue(u, SUFFIX)
  60# define SSUFFIX    glue(s, SUFFIX)
  61#endif
  62
  63#ifdef SOFTMMU_CODE_ACCESS
  64#define READ_ACCESS_TYPE MMU_INST_FETCH
  65#define ADDR_READ addr_code
  66#else
  67#define READ_ACCESS_TYPE MMU_DATA_LOAD
  68#define ADDR_READ addr_read
  69#endif
  70
  71#if DATA_SIZE == 8
  72# define BSWAP(X)  bswap64(X)
  73#elif DATA_SIZE == 4
  74# define BSWAP(X)  bswap32(X)
  75#elif DATA_SIZE == 2
  76# define BSWAP(X)  bswap16(X)
  77#else
  78# define BSWAP(X)  (X)
  79#endif
  80
  81#if DATA_SIZE == 1
  82# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
  83# define helper_be_ld_name  helper_le_ld_name
  84# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
  85# define helper_be_lds_name helper_le_lds_name
  86# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
  87# define helper_be_st_name  helper_le_st_name
  88#else
  89# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
  90# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
  91# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
  92# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
  93# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
  94# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
  95#endif
  96
  97#ifndef SOFTMMU_CODE_ACCESS
  98static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
  99                                              size_t mmu_idx, size_t index,
 100                                              target_ulong addr,
 101                                              uintptr_t retaddr)
 102{
 103    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
 104    return io_readx(env, iotlbentry, addr, retaddr, DATA_SIZE);
 105}
 106#endif
 107
 108WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 109                            TCGMemOpIdx oi, uintptr_t retaddr)
 110{
 111    unsigned mmu_idx = get_mmuidx(oi);
 112    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 113    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 114    unsigned a_bits = get_alignment_bits(get_memop(oi));
 115    uintptr_t haddr;
 116    DATA_TYPE res;
 117
 118    if (addr & ((1 << a_bits) - 1)) {
 119        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 120                             mmu_idx, retaddr);
 121    }
 122
 123    /* If the TLB entry is for a different page, reload and try again.  */
 124    if ((addr & TARGET_PAGE_MASK)
 125         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 126        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
 127            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 128                     mmu_idx, retaddr);
 129        }
 130        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 131    }
 132
 133    /* Handle an IO access.  */
 134    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 135        if ((addr & (DATA_SIZE - 1)) != 0) {
 136            goto do_unaligned_access;
 137        }
 138
 139        /* ??? Note that the io helpers always read data in the target
 140           byte ordering.  We should push the LE/BE request down into io.  */
 141        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
 142        res = TGT_LE(res);
 143        return res;
 144    }
 145
 146    /* Handle slow unaligned access (it spans two pages or IO).  */
 147    if (DATA_SIZE > 1
 148        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 149                    >= TARGET_PAGE_SIZE)) {
 150        target_ulong addr1, addr2;
 151        DATA_TYPE res1, res2;
 152        unsigned shift;
 153    do_unaligned_access:
 154
 155        addr1 = addr & ~(DATA_SIZE - 1);
 156        addr2 = addr1 + DATA_SIZE;
 157        res1 = helper_le_ld_name(env, addr1, oi, retaddr);
 158        res2 = helper_le_ld_name(env, addr2, oi, retaddr);
 159        shift = (addr & (DATA_SIZE - 1)) * 8;
 160
 161        /* Little-endian combine.  */
 162        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
 163        return res;
 164    }
 165
 166    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 167#if DATA_SIZE == 1
 168    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
 169#else
 170    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
 171#endif
 172    return res;
 173}
 174
 175#if DATA_SIZE > 1
 176WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 177                            TCGMemOpIdx oi, uintptr_t retaddr)
 178{
 179    unsigned mmu_idx = get_mmuidx(oi);
 180    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 181    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 182    unsigned a_bits = get_alignment_bits(get_memop(oi));
 183    uintptr_t haddr;
 184    DATA_TYPE res;
 185
 186    if (addr & ((1 << a_bits) - 1)) {
 187        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 188                             mmu_idx, retaddr);
 189    }
 190
 191    /* If the TLB entry is for a different page, reload and try again.  */
 192    if ((addr & TARGET_PAGE_MASK)
 193         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 194        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
 195            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 196                     mmu_idx, retaddr);
 197        }
 198        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 199    }
 200
 201    /* Handle an IO access.  */
 202    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 203        if ((addr & (DATA_SIZE - 1)) != 0) {
 204            goto do_unaligned_access;
 205        }
 206
 207        /* ??? Note that the io helpers always read data in the target
 208           byte ordering.  We should push the LE/BE request down into io.  */
 209        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
 210        res = TGT_BE(res);
 211        return res;
 212    }
 213
 214    /* Handle slow unaligned access (it spans two pages or IO).  */
 215    if (DATA_SIZE > 1
 216        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 217                    >= TARGET_PAGE_SIZE)) {
 218        target_ulong addr1, addr2;
 219        DATA_TYPE res1, res2;
 220        unsigned shift;
 221    do_unaligned_access:
 222        addr1 = addr & ~(DATA_SIZE - 1);
 223        addr2 = addr1 + DATA_SIZE;
 224        res1 = helper_be_ld_name(env, addr1, oi, retaddr);
 225        res2 = helper_be_ld_name(env, addr2, oi, retaddr);
 226        shift = (addr & (DATA_SIZE - 1)) * 8;
 227
 228        /* Big-endian combine.  */
 229        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
 230        return res;
 231    }
 232
 233    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 234    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
 235    return res;
 236}
 237#endif /* DATA_SIZE > 1 */
 238
 239#ifndef SOFTMMU_CODE_ACCESS
 240
 241/* Provide signed versions of the load routines as well.  We can of course
 242   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
 243#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
 244WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
 245                             TCGMemOpIdx oi, uintptr_t retaddr)
 246{
 247    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
 248}
 249
 250# if DATA_SIZE > 1
 251WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
 252                             TCGMemOpIdx oi, uintptr_t retaddr)
 253{
 254    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
 255}
 256# endif
 257#endif
 258
 259static inline void glue(io_write, SUFFIX)(CPUArchState *env,
 260                                          size_t mmu_idx, size_t index,
 261                                          DATA_TYPE val,
 262                                          target_ulong addr,
 263                                          uintptr_t retaddr)
 264{
 265    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
 266    return io_writex(env, iotlbentry, val, addr, retaddr, DATA_SIZE);
 267}
 268
 269void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 270                       TCGMemOpIdx oi, uintptr_t retaddr)
 271{
 272    unsigned mmu_idx = get_mmuidx(oi);
 273    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 274    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 275    unsigned a_bits = get_alignment_bits(get_memop(oi));
 276    uintptr_t haddr;
 277
 278    if (addr & ((1 << a_bits) - 1)) {
 279        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 280                             mmu_idx, retaddr);
 281    }
 282
 283    /* If the TLB entry is for a different page, reload and try again.  */
 284    if ((addr & TARGET_PAGE_MASK)
 285        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 286        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 287            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 288        }
 289        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 290    }
 291
 292    /* Handle an IO access.  */
 293    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 294        if ((addr & (DATA_SIZE - 1)) != 0) {
 295            goto do_unaligned_access;
 296        }
 297
 298        /* ??? Note that the io helpers always read data in the target
 299           byte ordering.  We should push the LE/BE request down into io.  */
 300        val = TGT_LE(val);
 301        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
 302        return;
 303    }
 304
 305    /* Handle slow unaligned access (it spans two pages or IO).  */
 306    if (DATA_SIZE > 1
 307        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 308                     >= TARGET_PAGE_SIZE)) {
 309        int i, index2;
 310        target_ulong page2, tlb_addr2;
 311    do_unaligned_access:
 312        /* Ensure the second page is in the TLB.  Note that the first page
 313           is already guaranteed to be filled, and that the second page
 314           cannot evict the first.  */
 315        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
 316        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 317        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
 318        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
 319            && !VICTIM_TLB_HIT(addr_write, page2)) {
 320            tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
 321                     mmu_idx, retaddr);
 322        }
 323
 324        /* XXX: not efficient, but simple.  */
 325        /* This loop must go in the forward direction to avoid issues
 326           with self-modifying code in Windows 64-bit.  */
 327        for (i = 0; i < DATA_SIZE; ++i) {
 328            /* Little-endian extract.  */
 329            uint8_t val8 = val >> (i * 8);
 330            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 331                                            oi, retaddr);
 332        }
 333        return;
 334    }
 335
 336    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 337#if DATA_SIZE == 1
 338    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
 339#else
 340    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
 341#endif
 342}
 343
 344#if DATA_SIZE > 1
 345void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 346                       TCGMemOpIdx oi, uintptr_t retaddr)
 347{
 348    unsigned mmu_idx = get_mmuidx(oi);
 349    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 350    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 351    unsigned a_bits = get_alignment_bits(get_memop(oi));
 352    uintptr_t haddr;
 353
 354    if (addr & ((1 << a_bits) - 1)) {
 355        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 356                             mmu_idx, retaddr);
 357    }
 358
 359    /* If the TLB entry is for a different page, reload and try again.  */
 360    if ((addr & TARGET_PAGE_MASK)
 361        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 362        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 363            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 364        }
 365        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 366    }
 367
 368    /* Handle an IO access.  */
 369    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 370        if ((addr & (DATA_SIZE - 1)) != 0) {
 371            goto do_unaligned_access;
 372        }
 373
 374        /* ??? Note that the io helpers always read data in the target
 375           byte ordering.  We should push the LE/BE request down into io.  */
 376        val = TGT_BE(val);
 377        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
 378        return;
 379    }
 380
 381    /* Handle slow unaligned access (it spans two pages or IO).  */
 382    if (DATA_SIZE > 1
 383        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 384                     >= TARGET_PAGE_SIZE)) {
 385        int i, index2;
 386        target_ulong page2, tlb_addr2;
 387    do_unaligned_access:
 388        /* Ensure the second page is in the TLB.  Note that the first page
 389           is already guaranteed to be filled, and that the second page
 390           cannot evict the first.  */
 391        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
 392        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 393        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
 394        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
 395            && !VICTIM_TLB_HIT(addr_write, page2)) {
 396            tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
 397                     mmu_idx, retaddr);
 398        }
 399
 400        /* XXX: not efficient, but simple */
 401        /* This loop must go in the forward direction to avoid issues
 402           with self-modifying code.  */
 403        for (i = 0; i < DATA_SIZE; ++i) {
 404            /* Big-endian extract.  */
 405            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
 406            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 407                                            oi, retaddr);
 408        }
 409        return;
 410    }
 411
 412    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 413    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
 414}
 415#endif /* DATA_SIZE > 1 */
 416#endif /* !defined(SOFTMMU_CODE_ACCESS) */
 417
 418#undef READ_ACCESS_TYPE
 419#undef DATA_TYPE
 420#undef SUFFIX
 421#undef LSUFFIX
 422#undef DATA_SIZE
 423#undef ADDR_READ
 424#undef WORD_TYPE
 425#undef SDATA_TYPE
 426#undef USUFFIX
 427#undef SSUFFIX
 428#undef BSWAP
 429#undef helper_le_ld_name
 430#undef helper_be_ld_name
 431#undef helper_le_lds_name
 432#undef helper_be_lds_name
 433#undef helper_le_st_name
 434#undef helper_be_st_name
 435