qemu/accel/tcg/softmmu_template.h
<<
>>
Prefs
   1/*
   2 *  Software MMU support
   3 *
   4 * Generate helpers used by TCG for qemu_ld/st ops and code load
   5 * functions.
   6 *
   7 * Included from target op helpers and exec.c.
   8 *
   9 *  Copyright (c) 2003 Fabrice Bellard
  10 *
  11 * This library is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU Lesser General Public
  13 * License as published by the Free Software Foundation; either
  14 * version 2.1 of the License, or (at your option) any later version.
  15 *
  16 * This library is distributed in the hope that it will be useful,
  17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * Lesser General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU Lesser General Public
  22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  23 */
  24#if DATA_SIZE == 8
  25#define SUFFIX q
  26#define LSUFFIX q
  27#define SDATA_TYPE  int64_t
  28#define DATA_TYPE  uint64_t
  29#elif DATA_SIZE == 4
  30#define SUFFIX l
  31#define LSUFFIX l
  32#define SDATA_TYPE  int32_t
  33#define DATA_TYPE  uint32_t
  34#elif DATA_SIZE == 2
  35#define SUFFIX w
  36#define LSUFFIX uw
  37#define SDATA_TYPE  int16_t
  38#define DATA_TYPE  uint16_t
  39#elif DATA_SIZE == 1
  40#define SUFFIX b
  41#define LSUFFIX ub
  42#define SDATA_TYPE  int8_t
  43#define DATA_TYPE  uint8_t
  44#else
  45#error unsupported data size
  46#endif
  47
  48
  49/* For the benefit of TCG generated code, we want to avoid the complication
  50   of ABI-specific return type promotion and always return a value extended
  51   to the register size of the host.  This is tcg_target_long, except in the
  52   case of a 32-bit host and 64-bit data, and for that we always have
  53   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
  54#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
  55# define WORD_TYPE  DATA_TYPE
  56# define USUFFIX    SUFFIX
  57#else
  58# define WORD_TYPE  tcg_target_ulong
  59# define USUFFIX    glue(u, SUFFIX)
  60# define SSUFFIX    glue(s, SUFFIX)
  61#endif
  62
  63#ifdef SOFTMMU_CODE_ACCESS
  64#define READ_ACCESS_TYPE MMU_INST_FETCH
  65#define ADDR_READ addr_code
  66#else
  67#define READ_ACCESS_TYPE MMU_DATA_LOAD
  68#define ADDR_READ addr_read
  69#endif
  70
  71#if DATA_SIZE == 8
  72# define BSWAP(X)  bswap64(X)
  73#elif DATA_SIZE == 4
  74# define BSWAP(X)  bswap32(X)
  75#elif DATA_SIZE == 2
  76# define BSWAP(X)  bswap16(X)
  77#else
  78# define BSWAP(X)  (X)
  79#endif
  80
  81#if DATA_SIZE == 1
  82# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
  83# define helper_be_ld_name  helper_le_ld_name
  84# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
  85# define helper_be_lds_name helper_le_lds_name
  86# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
  87# define helper_be_st_name  helper_le_st_name
  88#else
  89# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
  90# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
  91# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
  92# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
  93# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
  94# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
  95#endif
  96
  97#ifndef SOFTMMU_CODE_ACCESS
  98static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
  99                                              size_t mmu_idx, size_t index,
 100                                              target_ulong addr,
 101                                              uintptr_t retaddr,
 102                                              bool recheck,
 103                                              MMUAccessType access_type)
 104{
 105    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
 106    return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
 107                    access_type, DATA_SIZE);
 108}
 109#endif
 110
 111WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 112                            TCGMemOpIdx oi, uintptr_t retaddr)
 113{
 114    uintptr_t mmu_idx = get_mmuidx(oi);
 115    uintptr_t index = tlb_index(env, mmu_idx, addr);
 116    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
 117    target_ulong tlb_addr = entry->ADDR_READ;
 118    unsigned a_bits = get_alignment_bits(get_memop(oi));
 119    uintptr_t haddr;
 120    DATA_TYPE res;
 121
 122    if (addr & ((1 << a_bits) - 1)) {
 123        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 124                             mmu_idx, retaddr);
 125    }
 126
 127    /* If the TLB entry is for a different page, reload and try again.  */
 128    if (!tlb_hit(tlb_addr, addr)) {
 129        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
 130            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
 131                     mmu_idx, retaddr);
 132            index = tlb_index(env, mmu_idx, addr);
 133            entry = tlb_entry(env, mmu_idx, addr);
 134        }
 135        tlb_addr = entry->ADDR_READ;
 136    }
 137
 138    /* Handle an IO access.  */
 139    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 140        if ((addr & (DATA_SIZE - 1)) != 0) {
 141            goto do_unaligned_access;
 142        }
 143
 144        /* ??? Note that the io helpers always read data in the target
 145           byte ordering.  We should push the LE/BE request down into io.  */
 146        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
 147                                    tlb_addr & TLB_RECHECK,
 148                                    READ_ACCESS_TYPE);
 149        res = TGT_LE(res);
 150        return res;
 151    }
 152
 153    /* Handle slow unaligned access (it spans two pages or IO).  */
 154    if (DATA_SIZE > 1
 155        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 156                    >= TARGET_PAGE_SIZE)) {
 157        target_ulong addr1, addr2;
 158        DATA_TYPE res1, res2;
 159        unsigned shift;
 160    do_unaligned_access:
 161        addr1 = addr & ~(DATA_SIZE - 1);
 162        addr2 = addr1 + DATA_SIZE;
 163        res1 = helper_le_ld_name(env, addr1, oi, retaddr);
 164        res2 = helper_le_ld_name(env, addr2, oi, retaddr);
 165        shift = (addr & (DATA_SIZE - 1)) * 8;
 166
 167        /* Little-endian combine.  */
 168        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
 169        return res;
 170    }
 171
 172    haddr = addr + entry->addend;
 173#if DATA_SIZE == 1
 174    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
 175#else
 176    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
 177#endif
 178    return res;
 179}
 180
 181#if DATA_SIZE > 1
 182WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 183                            TCGMemOpIdx oi, uintptr_t retaddr)
 184{
 185    uintptr_t mmu_idx = get_mmuidx(oi);
 186    uintptr_t index = tlb_index(env, mmu_idx, addr);
 187    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
 188    target_ulong tlb_addr = entry->ADDR_READ;
 189    unsigned a_bits = get_alignment_bits(get_memop(oi));
 190    uintptr_t haddr;
 191    DATA_TYPE res;
 192
 193    if (addr & ((1 << a_bits) - 1)) {
 194        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 195                             mmu_idx, retaddr);
 196    }
 197
 198    /* If the TLB entry is for a different page, reload and try again.  */
 199    if (!tlb_hit(tlb_addr, addr)) {
 200        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
 201            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, READ_ACCESS_TYPE,
 202                     mmu_idx, retaddr);
 203            index = tlb_index(env, mmu_idx, addr);
 204            entry = tlb_entry(env, mmu_idx, addr);
 205        }
 206        tlb_addr = entry->ADDR_READ;
 207    }
 208
 209    /* Handle an IO access.  */
 210    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 211        if ((addr & (DATA_SIZE - 1)) != 0) {
 212            goto do_unaligned_access;
 213        }
 214
 215        /* ??? Note that the io helpers always read data in the target
 216           byte ordering.  We should push the LE/BE request down into io.  */
 217        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
 218                                    tlb_addr & TLB_RECHECK,
 219                                    READ_ACCESS_TYPE);
 220        res = TGT_BE(res);
 221        return res;
 222    }
 223
 224    /* Handle slow unaligned access (it spans two pages or IO).  */
 225    if (DATA_SIZE > 1
 226        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 227                    >= TARGET_PAGE_SIZE)) {
 228        target_ulong addr1, addr2;
 229        DATA_TYPE res1, res2;
 230        unsigned shift;
 231    do_unaligned_access:
 232        addr1 = addr & ~(DATA_SIZE - 1);
 233        addr2 = addr1 + DATA_SIZE;
 234        res1 = helper_be_ld_name(env, addr1, oi, retaddr);
 235        res2 = helper_be_ld_name(env, addr2, oi, retaddr);
 236        shift = (addr & (DATA_SIZE - 1)) * 8;
 237
 238        /* Big-endian combine.  */
 239        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
 240        return res;
 241    }
 242
 243    haddr = addr + entry->addend;
 244    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
 245    return res;
 246}
 247#endif /* DATA_SIZE > 1 */
 248
 249#ifndef SOFTMMU_CODE_ACCESS
 250
 251/* Provide signed versions of the load routines as well.  We can of course
 252   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
 253#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
 254WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
 255                             TCGMemOpIdx oi, uintptr_t retaddr)
 256{
 257    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
 258}
 259
 260# if DATA_SIZE > 1
 261WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
 262                             TCGMemOpIdx oi, uintptr_t retaddr)
 263{
 264    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
 265}
 266# endif
 267#endif
 268
 269static inline void glue(io_write, SUFFIX)(CPUArchState *env,
 270                                          size_t mmu_idx, size_t index,
 271                                          DATA_TYPE val,
 272                                          target_ulong addr,
 273                                          uintptr_t retaddr,
 274                                          bool recheck)
 275{
 276    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
 277    return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
 278                     recheck, DATA_SIZE);
 279}
 280
 281void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 282                       TCGMemOpIdx oi, uintptr_t retaddr)
 283{
 284    uintptr_t mmu_idx = get_mmuidx(oi);
 285    uintptr_t index = tlb_index(env, mmu_idx, addr);
 286    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
 287    target_ulong tlb_addr = tlb_addr_write(entry);
 288    unsigned a_bits = get_alignment_bits(get_memop(oi));
 289    uintptr_t haddr;
 290
 291    if (addr & ((1 << a_bits) - 1)) {
 292        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 293                             mmu_idx, retaddr);
 294    }
 295
 296    /* If the TLB entry is for a different page, reload and try again.  */
 297    if (!tlb_hit(tlb_addr, addr)) {
 298        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 299            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
 300                     mmu_idx, retaddr);
 301            index = tlb_index(env, mmu_idx, addr);
 302            entry = tlb_entry(env, mmu_idx, addr);
 303        }
 304        tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
 305    }
 306
 307    /* Handle an IO access.  */
 308    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 309        if ((addr & (DATA_SIZE - 1)) != 0) {
 310            goto do_unaligned_access;
 311        }
 312
 313        /* ??? Note that the io helpers always read data in the target
 314           byte ordering.  We should push the LE/BE request down into io.  */
 315        val = TGT_LE(val);
 316        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr,
 317                               retaddr, tlb_addr & TLB_RECHECK);
 318        return;
 319    }
 320
 321    /* Handle slow unaligned access (it spans two pages or IO).  */
 322    if (DATA_SIZE > 1
 323        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 324                     >= TARGET_PAGE_SIZE)) {
 325        int i;
 326        target_ulong page2;
 327        CPUTLBEntry *entry2;
 328    do_unaligned_access:
 329        /* Ensure the second page is in the TLB.  Note that the first page
 330           is already guaranteed to be filled, and that the second page
 331           cannot evict the first.  */
 332        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
 333        entry2 = tlb_entry(env, mmu_idx, page2);
 334        if (!tlb_hit_page(tlb_addr_write(entry2), page2)
 335            && !VICTIM_TLB_HIT(addr_write, page2)) {
 336            tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
 337                     mmu_idx, retaddr);
 338        }
 339
 340        /* XXX: not efficient, but simple.  */
 341        /* This loop must go in the forward direction to avoid issues
 342           with self-modifying code in Windows 64-bit.  */
 343        for (i = 0; i < DATA_SIZE; ++i) {
 344            /* Little-endian extract.  */
 345            uint8_t val8 = val >> (i * 8);
 346            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 347                                            oi, retaddr);
 348        }
 349        return;
 350    }
 351
 352    haddr = addr + entry->addend;
 353#if DATA_SIZE == 1
 354    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
 355#else
 356    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
 357#endif
 358}
 359
 360#if DATA_SIZE > 1
 361void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 362                       TCGMemOpIdx oi, uintptr_t retaddr)
 363{
 364    uintptr_t mmu_idx = get_mmuidx(oi);
 365    uintptr_t index = tlb_index(env, mmu_idx, addr);
 366    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
 367    target_ulong tlb_addr = tlb_addr_write(entry);
 368    unsigned a_bits = get_alignment_bits(get_memop(oi));
 369    uintptr_t haddr;
 370
 371    if (addr & ((1 << a_bits) - 1)) {
 372        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 373                             mmu_idx, retaddr);
 374    }
 375
 376    /* If the TLB entry is for a different page, reload and try again.  */
 377    if (!tlb_hit(tlb_addr, addr)) {
 378        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 379            tlb_fill(ENV_GET_CPU(env), addr, DATA_SIZE, MMU_DATA_STORE,
 380                     mmu_idx, retaddr);
 381            index = tlb_index(env, mmu_idx, addr);
 382            entry = tlb_entry(env, mmu_idx, addr);
 383        }
 384        tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
 385    }
 386
 387    /* Handle an IO access.  */
 388    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
 389        if ((addr & (DATA_SIZE - 1)) != 0) {
 390            goto do_unaligned_access;
 391        }
 392
 393        /* ??? Note that the io helpers always read data in the target
 394           byte ordering.  We should push the LE/BE request down into io.  */
 395        val = TGT_BE(val);
 396        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr,
 397                               tlb_addr & TLB_RECHECK);
 398        return;
 399    }
 400
 401    /* Handle slow unaligned access (it spans two pages or IO).  */
 402    if (DATA_SIZE > 1
 403        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
 404                     >= TARGET_PAGE_SIZE)) {
 405        int i;
 406        target_ulong page2;
 407        CPUTLBEntry *entry2;
 408    do_unaligned_access:
 409        /* Ensure the second page is in the TLB.  Note that the first page
 410           is already guaranteed to be filled, and that the second page
 411           cannot evict the first.  */
 412        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
 413        entry2 = tlb_entry(env, mmu_idx, page2);
 414        if (!tlb_hit_page(tlb_addr_write(entry2), page2)
 415            && !VICTIM_TLB_HIT(addr_write, page2)) {
 416            tlb_fill(ENV_GET_CPU(env), page2, DATA_SIZE, MMU_DATA_STORE,
 417                     mmu_idx, retaddr);
 418        }
 419
 420        /* XXX: not efficient, but simple */
 421        /* This loop must go in the forward direction to avoid issues
 422           with self-modifying code.  */
 423        for (i = 0; i < DATA_SIZE; ++i) {
 424            /* Big-endian extract.  */
 425            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
 426            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 427                                            oi, retaddr);
 428        }
 429        return;
 430    }
 431
 432    haddr = addr + entry->addend;
 433    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
 434}
 435#endif /* DATA_SIZE > 1 */
 436#endif /* !defined(SOFTMMU_CODE_ACCESS) */
 437
 438#undef READ_ACCESS_TYPE
 439#undef DATA_TYPE
 440#undef SUFFIX
 441#undef LSUFFIX
 442#undef DATA_SIZE
 443#undef ADDR_READ
 444#undef WORD_TYPE
 445#undef SDATA_TYPE
 446#undef USUFFIX
 447#undef SSUFFIX
 448#undef BSWAP
 449#undef helper_le_ld_name
 450#undef helper_be_ld_name
 451#undef helper_le_lds_name
 452#undef helper_be_lds_name
 453#undef helper_le_st_name
 454#undef helper_be_st_name
 455