qemu/accel/tcg/cputlb.c
<<
>>
Prefs
   1/*
   2 *  Common CPU TLB handling
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/main-loop.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "exec/memory.h"
  25#include "exec/address-spaces.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/cputlb.h"
  28#include "exec/memory-internal.h"
  29#include "exec/ram_addr.h"
  30#include "tcg/tcg.h"
  31#include "qemu/error-report.h"
  32#include "exec/log.h"
  33#include "exec/helper-proto.h"
  34#include "qemu/atomic.h"
  35#include "qemu/atomic128.h"
  36
  37/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
  38/* #define DEBUG_TLB */
  39/* #define DEBUG_TLB_LOG */
  40
  41#ifdef DEBUG_TLB
  42# define DEBUG_TLB_GATE 1
  43# ifdef DEBUG_TLB_LOG
  44#  define DEBUG_TLB_LOG_GATE 1
  45# else
  46#  define DEBUG_TLB_LOG_GATE 0
  47# endif
  48#else
  49# define DEBUG_TLB_GATE 0
  50# define DEBUG_TLB_LOG_GATE 0
  51#endif
  52
  53#define tlb_debug(fmt, ...) do { \
  54    if (DEBUG_TLB_LOG_GATE) { \
  55        qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
  56                      ## __VA_ARGS__); \
  57    } else if (DEBUG_TLB_GATE) { \
  58        fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
  59    } \
  60} while (0)
  61
  62#define assert_cpu_is_self(cpu) do {                              \
  63        if (DEBUG_TLB_GATE) {                                     \
  64            g_assert(!(cpu)->created || qemu_cpu_is_self(cpu));   \
  65        }                                                         \
  66    } while (0)
  67
  68/* run_on_cpu_data.target_ptr should always be big enough for a
  69 * target_ulong even on 32 bit builds */
  70QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
  71
  72/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
  73 */
  74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
  75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
  76
  77static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
  78{
  79    return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
  80}
  81
  82static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
  83                             size_t max_entries)
  84{
  85    desc->window_begin_ns = ns;
  86    desc->window_max_entries = max_entries;
  87}
  88
  89static void tlb_dyn_init(CPUArchState *env)
  90{
  91    int i;
  92
  93    for (i = 0; i < NB_MMU_MODES; i++) {
  94        CPUTLBDesc *desc = &env_tlb(env)->d[i];
  95        size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
  96
  97        tlb_window_reset(desc, get_clock_realtime(), 0);
  98        desc->n_used_entries = 0;
  99        env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
 100        env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
 101        env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
 102    }
 103}
 104
 105/**
 106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
 107 * @env: CPU that owns the TLB
 108 * @mmu_idx: MMU index of the TLB
 109 *
 110 * Called with tlb_lock_held.
 111 *
 112 * We have two main constraints when resizing a TLB: (1) we only resize it
 113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
 114 * the array or unnecessarily flushing it), which means we do not control how
 115 * frequently the resizing can occur; (2) we don't have access to the guest's
 116 * future scheduling decisions, and therefore have to decide the magnitude of
 117 * the resize based on past observations.
 118 *
 119 * In general, a memory-hungry process can benefit greatly from an appropriately
 120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
 121 * we just have to make the TLB as large as possible; while an oversized TLB
 122 * results in minimal TLB miss rates, it also takes longer to be flushed
 123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
 124 * performance.
 125 *
 126 * To achieve near-optimal performance for all kinds of workloads, we:
 127 *
 128 * 1. Aggressively increase the size of the TLB when the use rate of the
 129 * TLB being flushed is high, since it is likely that in the near future this
 130 * memory-hungry process will execute again, and its memory hungriness will
 131 * probably be similar.
 132 *
 133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
 134 * reasonably large time window. The rationale is that if in such a time window
 135 * we have not observed a high TLB use rate, it is likely that we won't observe
 136 * it in the near future. In that case, once a time window expires we downsize
 137 * the TLB to match the maximum use rate observed in the window.
 138 *
 139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
 140 * since in that range performance is likely near-optimal. Recall that the TLB
 141 * is direct mapped, so we want the use rate to be low (or at least not too
 142 * high), since otherwise we are likely to have a significant amount of
 143 * conflict misses.
 144 */
 145static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
 146{
 147    CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
 148    size_t old_size = tlb_n_entries(env, mmu_idx);
 149    size_t rate;
 150    size_t new_size = old_size;
 151    int64_t now = get_clock_realtime();
 152    int64_t window_len_ms = 100;
 153    int64_t window_len_ns = window_len_ms * 1000 * 1000;
 154    bool window_expired = now > desc->window_begin_ns + window_len_ns;
 155
 156    if (desc->n_used_entries > desc->window_max_entries) {
 157        desc->window_max_entries = desc->n_used_entries;
 158    }
 159    rate = desc->window_max_entries * 100 / old_size;
 160
 161    if (rate > 70) {
 162        new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
 163    } else if (rate < 30 && window_expired) {
 164        size_t ceil = pow2ceil(desc->window_max_entries);
 165        size_t expected_rate = desc->window_max_entries * 100 / ceil;
 166
 167        /*
 168         * Avoid undersizing when the max number of entries seen is just below
 169         * a pow2. For instance, if max_entries == 1025, the expected use rate
 170         * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
 171         * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
 172         * later. Thus, make sure that the expected use rate remains below 70%.
 173         * (and since we double the size, that means the lowest rate we'd
 174         * expect to get is 35%, which is still in the 30-70% range where
 175         * we consider that the size is appropriate.)
 176         */
 177        if (expected_rate > 70) {
 178            ceil *= 2;
 179        }
 180        new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
 181    }
 182
 183    if (new_size == old_size) {
 184        if (window_expired) {
 185            tlb_window_reset(desc, now, desc->n_used_entries);
 186        }
 187        return;
 188    }
 189
 190    g_free(env_tlb(env)->f[mmu_idx].table);
 191    g_free(env_tlb(env)->d[mmu_idx].iotlb);
 192
 193    tlb_window_reset(desc, now, 0);
 194    /* desc->n_used_entries is cleared by the caller */
 195    env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
 196    env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
 197    env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
 198    /*
 199     * If the allocations fail, try smaller sizes. We just freed some
 200     * memory, so going back to half of new_size has a good chance of working.
 201     * Increased memory pressure elsewhere in the system might cause the
 202     * allocations to fail though, so we progressively reduce the allocation
 203     * size, aborting if we cannot even allocate the smallest TLB we support.
 204     */
 205    while (env_tlb(env)->f[mmu_idx].table == NULL ||
 206           env_tlb(env)->d[mmu_idx].iotlb == NULL) {
 207        if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
 208            error_report("%s: %s", __func__, strerror(errno));
 209            abort();
 210        }
 211        new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
 212        env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
 213
 214        g_free(env_tlb(env)->f[mmu_idx].table);
 215        g_free(env_tlb(env)->d[mmu_idx].iotlb);
 216        env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
 217        env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
 218    }
 219}
 220
 221static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
 222{
 223    tlb_mmu_resize_locked(env, mmu_idx);
 224    memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
 225    env_tlb(env)->d[mmu_idx].n_used_entries = 0;
 226}
 227
 228static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
 229{
 230    env_tlb(env)->d[mmu_idx].n_used_entries++;
 231}
 232
 233static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
 234{
 235    env_tlb(env)->d[mmu_idx].n_used_entries--;
 236}
 237
 238void tlb_init(CPUState *cpu)
 239{
 240    CPUArchState *env = cpu->env_ptr;
 241
 242    qemu_spin_init(&env_tlb(env)->c.lock);
 243
 244    /* Ensure that cpu_reset performs a full flush.  */
 245    env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
 246
 247    tlb_dyn_init(env);
 248}
 249
 250/* flush_all_helper: run fn across all cpus
 251 *
 252 * If the wait flag is set then the src cpu's helper will be queued as
 253 * "safe" work and the loop exited creating a synchronisation point
 254 * where all queued work will be finished before execution starts
 255 * again.
 256 */
 257static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
 258                             run_on_cpu_data d)
 259{
 260    CPUState *cpu;
 261
 262    CPU_FOREACH(cpu) {
 263        if (cpu != src) {
 264            async_run_on_cpu(cpu, fn, d);
 265        }
 266    }
 267}
 268
 269void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
 270{
 271    CPUState *cpu;
 272    size_t full = 0, part = 0, elide = 0;
 273
 274    CPU_FOREACH(cpu) {
 275        CPUArchState *env = cpu->env_ptr;
 276
 277        full += atomic_read(&env_tlb(env)->c.full_flush_count);
 278        part += atomic_read(&env_tlb(env)->c.part_flush_count);
 279        elide += atomic_read(&env_tlb(env)->c.elide_flush_count);
 280    }
 281    *pfull = full;
 282    *ppart = part;
 283    *pelide = elide;
 284}
 285
 286static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
 287{
 288    tlb_table_flush_by_mmuidx(env, mmu_idx);
 289    env_tlb(env)->d[mmu_idx].large_page_addr = -1;
 290    env_tlb(env)->d[mmu_idx].large_page_mask = -1;
 291    env_tlb(env)->d[mmu_idx].vindex = 0;
 292    memset(env_tlb(env)->d[mmu_idx].vtable, -1,
 293           sizeof(env_tlb(env)->d[0].vtable));
 294}
 295
 296static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
 297{
 298    CPUArchState *env = cpu->env_ptr;
 299    uint16_t asked = data.host_int;
 300    uint16_t all_dirty, work, to_clean;
 301
 302    assert_cpu_is_self(cpu);
 303
 304    tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
 305
 306    qemu_spin_lock(&env_tlb(env)->c.lock);
 307
 308    all_dirty = env_tlb(env)->c.dirty;
 309    to_clean = asked & all_dirty;
 310    all_dirty &= ~to_clean;
 311    env_tlb(env)->c.dirty = all_dirty;
 312
 313    for (work = to_clean; work != 0; work &= work - 1) {
 314        int mmu_idx = ctz32(work);
 315        tlb_flush_one_mmuidx_locked(env, mmu_idx);
 316    }
 317
 318    qemu_spin_unlock(&env_tlb(env)->c.lock);
 319
 320    cpu_tb_jmp_cache_clear(cpu);
 321
 322    if (to_clean == ALL_MMUIDX_BITS) {
 323        atomic_set(&env_tlb(env)->c.full_flush_count,
 324                   env_tlb(env)->c.full_flush_count + 1);
 325    } else {
 326        atomic_set(&env_tlb(env)->c.part_flush_count,
 327                   env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
 328        if (to_clean != asked) {
 329            atomic_set(&env_tlb(env)->c.elide_flush_count,
 330                       env_tlb(env)->c.elide_flush_count +
 331                       ctpop16(asked & ~to_clean));
 332        }
 333    }
 334}
 335
 336void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 337{
 338    tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
 339
 340    if (cpu->created && !qemu_cpu_is_self(cpu)) {
 341        async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
 342                         RUN_ON_CPU_HOST_INT(idxmap));
 343    } else {
 344        tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
 345    }
 346}
 347
 348void tlb_flush(CPUState *cpu)
 349{
 350    tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
 351}
 352
 353void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
 354{
 355    const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 356
 357    tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
 358
 359    flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 360    fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
 361}
 362
 363void tlb_flush_all_cpus(CPUState *src_cpu)
 364{
 365    tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
 366}
 367
 368void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
 369{
 370    const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 371
 372    tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
 373
 374    flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 375    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 376}
 377
 378void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 379{
 380    tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
 381}
 382
 383static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
 384                                        target_ulong page)
 385{
 386    return tlb_hit_page(tlb_entry->addr_read, page) ||
 387           tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
 388           tlb_hit_page(tlb_entry->addr_code, page);
 389}
 390
 391/**
 392 * tlb_entry_is_empty - return true if the entry is not in use
 393 * @te: pointer to CPUTLBEntry
 394 */
 395static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
 396{
 397    return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
 398}
 399
 400/* Called with tlb_c.lock held */
 401static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
 402                                          target_ulong page)
 403{
 404    if (tlb_hit_page_anyprot(tlb_entry, page)) {
 405        memset(tlb_entry, -1, sizeof(*tlb_entry));
 406        return true;
 407    }
 408    return false;
 409}
 410
 411/* Called with tlb_c.lock held */
 412static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
 413                                              target_ulong page)
 414{
 415    CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
 416    int k;
 417
 418    assert_cpu_is_self(env_cpu(env));
 419    for (k = 0; k < CPU_VTLB_SIZE; k++) {
 420        if (tlb_flush_entry_locked(&d->vtable[k], page)) {
 421            tlb_n_used_entries_dec(env, mmu_idx);
 422        }
 423    }
 424}
 425
 426static void tlb_flush_page_locked(CPUArchState *env, int midx,
 427                                  target_ulong page)
 428{
 429    target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
 430    target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
 431
 432    /* Check if we need to flush due to large pages.  */
 433    if ((page & lp_mask) == lp_addr) {
 434        tlb_debug("forcing full flush midx %d ("
 435                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
 436                  midx, lp_addr, lp_mask);
 437        tlb_flush_one_mmuidx_locked(env, midx);
 438    } else {
 439        if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
 440            tlb_n_used_entries_dec(env, midx);
 441        }
 442        tlb_flush_vtlb_page_locked(env, midx, page);
 443    }
 444}
 445
 446/* As we are going to hijack the bottom bits of the page address for a
 447 * mmuidx bit mask we need to fail to build if we can't do that
 448 */
 449QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
 450
 451static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
 452                                                run_on_cpu_data data)
 453{
 454    CPUArchState *env = cpu->env_ptr;
 455    target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
 456    target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
 457    unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
 458    int mmu_idx;
 459
 460    assert_cpu_is_self(cpu);
 461
 462    tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
 463              addr, mmu_idx_bitmap);
 464
 465    qemu_spin_lock(&env_tlb(env)->c.lock);
 466    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 467        if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
 468            tlb_flush_page_locked(env, mmu_idx, addr);
 469        }
 470    }
 471    qemu_spin_unlock(&env_tlb(env)->c.lock);
 472
 473    tb_flush_jmp_cache(cpu, addr);
 474}
 475
 476void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
 477{
 478    target_ulong addr_and_mmu_idx;
 479
 480    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
 481
 482    /* This should already be page aligned */
 483    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 484    addr_and_mmu_idx |= idxmap;
 485
 486    if (!qemu_cpu_is_self(cpu)) {
 487        async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
 488                         RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 489    } else {
 490        tlb_flush_page_by_mmuidx_async_work(
 491            cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 492    }
 493}
 494
 495void tlb_flush_page(CPUState *cpu, target_ulong addr)
 496{
 497    tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
 498}
 499
 500void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
 501                                       uint16_t idxmap)
 502{
 503    const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
 504    target_ulong addr_and_mmu_idx;
 505
 506    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 507
 508    /* This should already be page aligned */
 509    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 510    addr_and_mmu_idx |= idxmap;
 511
 512    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 513    fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 514}
 515
 516void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 517{
 518    tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
 519}
 520
 521void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 522                                              target_ulong addr,
 523                                              uint16_t idxmap)
 524{
 525    const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
 526    target_ulong addr_and_mmu_idx;
 527
 528    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 529
 530    /* This should already be page aligned */
 531    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 532    addr_and_mmu_idx |= idxmap;
 533
 534    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 535    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 536}
 537
 538void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
 539{
 540    tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
 541}
 542
 543/* update the TLBs so that writes to code in the virtual page 'addr'
 544   can be detected */
 545void tlb_protect_code(ram_addr_t ram_addr)
 546{
 547    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
 548                                             DIRTY_MEMORY_CODE);
 549}
 550
 551/* update the TLB so that writes in physical page 'phys_addr' are no longer
 552   tested for self modifying code */
 553void tlb_unprotect_code(ram_addr_t ram_addr)
 554{
 555    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
 556}
 557
 558
 559/*
 560 * Dirty write flag handling
 561 *
 562 * When the TCG code writes to a location it looks up the address in
 563 * the TLB and uses that data to compute the final address. If any of
 564 * the lower bits of the address are set then the slow path is forced.
 565 * There are a number of reasons to do this but for normal RAM the
 566 * most usual is detecting writes to code regions which may invalidate
 567 * generated code.
 568 *
 569 * Other vCPUs might be reading their TLBs during guest execution, so we update
 570 * te->addr_write with atomic_set. We don't need to worry about this for
 571 * oversized guests as MTTCG is disabled for them.
 572 *
 573 * Called with tlb_c.lock held.
 574 */
 575static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
 576                                         uintptr_t start, uintptr_t length)
 577{
 578    uintptr_t addr = tlb_entry->addr_write;
 579
 580    if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
 581        addr &= TARGET_PAGE_MASK;
 582        addr += tlb_entry->addend;
 583        if ((addr - start) < length) {
 584#if TCG_OVERSIZED_GUEST
 585            tlb_entry->addr_write |= TLB_NOTDIRTY;
 586#else
 587            atomic_set(&tlb_entry->addr_write,
 588                       tlb_entry->addr_write | TLB_NOTDIRTY);
 589#endif
 590        }
 591    }
 592}
 593
 594/*
 595 * Called with tlb_c.lock held.
 596 * Called only from the vCPU context, i.e. the TLB's owner thread.
 597 */
 598static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
 599{
 600    *d = *s;
 601}
 602
 603/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
 604 * the target vCPU).
 605 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
 606 * thing actually updated is the target TLB entry ->addr_write flags.
 607 */
 608void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
 609{
 610    CPUArchState *env;
 611
 612    int mmu_idx;
 613
 614    env = cpu->env_ptr;
 615    qemu_spin_lock(&env_tlb(env)->c.lock);
 616    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 617        unsigned int i;
 618        unsigned int n = tlb_n_entries(env, mmu_idx);
 619
 620        for (i = 0; i < n; i++) {
 621            tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
 622                                         start1, length);
 623        }
 624
 625        for (i = 0; i < CPU_VTLB_SIZE; i++) {
 626            tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
 627                                         start1, length);
 628        }
 629    }
 630    qemu_spin_unlock(&env_tlb(env)->c.lock);
 631}
 632
 633/* Called with tlb_c.lock held */
 634static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
 635                                         target_ulong vaddr)
 636{
 637    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
 638        tlb_entry->addr_write = vaddr;
 639    }
 640}
 641
 642/* update the TLB corresponding to virtual page vaddr
 643   so that it is no longer dirty */
 644void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
 645{
 646    CPUArchState *env = cpu->env_ptr;
 647    int mmu_idx;
 648
 649    assert_cpu_is_self(cpu);
 650
 651    vaddr &= TARGET_PAGE_MASK;
 652    qemu_spin_lock(&env_tlb(env)->c.lock);
 653    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 654        tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
 655    }
 656
 657    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 658        int k;
 659        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 660            tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
 661        }
 662    }
 663    qemu_spin_unlock(&env_tlb(env)->c.lock);
 664}
 665
 666/* Our TLB does not support large pages, so remember the area covered by
 667   large pages and trigger a full TLB flush if these are invalidated.  */
 668static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
 669                               target_ulong vaddr, target_ulong size)
 670{
 671    target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
 672    target_ulong lp_mask = ~(size - 1);
 673
 674    if (lp_addr == (target_ulong)-1) {
 675        /* No previous large page.  */
 676        lp_addr = vaddr;
 677    } else {
 678        /* Extend the existing region to include the new page.
 679           This is a compromise between unnecessary flushes and
 680           the cost of maintaining a full variable size TLB.  */
 681        lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
 682        while (((lp_addr ^ vaddr) & lp_mask) != 0) {
 683            lp_mask <<= 1;
 684        }
 685    }
 686    env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
 687    env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
 688}
 689
 690/* Add a new TLB entry. At most one entry for a given virtual address
 691 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 692 * supplied size is only used by tlb_flush_page.
 693 *
 694 * Called from TCG-generated code, which is under an RCU read-side
 695 * critical section.
 696 */
 697void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 698                             hwaddr paddr, MemTxAttrs attrs, int prot,
 699                             int mmu_idx, target_ulong size)
 700{
 701    CPUArchState *env = cpu->env_ptr;
 702    CPUTLB *tlb = env_tlb(env);
 703    CPUTLBDesc *desc = &tlb->d[mmu_idx];
 704    MemoryRegionSection *section;
 705    unsigned int index;
 706    target_ulong address;
 707    target_ulong code_address;
 708    uintptr_t addend;
 709    CPUTLBEntry *te, tn;
 710    hwaddr iotlb, xlat, sz, paddr_page;
 711    target_ulong vaddr_page;
 712    int asidx = cpu_asidx_from_attrs(cpu, attrs);
 713
 714    assert_cpu_is_self(cpu);
 715
 716    if (size <= TARGET_PAGE_SIZE) {
 717        sz = TARGET_PAGE_SIZE;
 718    } else {
 719        tlb_add_large_page(env, mmu_idx, vaddr, size);
 720        sz = size;
 721    }
 722    vaddr_page = vaddr & TARGET_PAGE_MASK;
 723    paddr_page = paddr & TARGET_PAGE_MASK;
 724
 725    section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
 726                                                &xlat, &sz, attrs, &prot);
 727    assert(sz >= TARGET_PAGE_SIZE);
 728
 729    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
 730              " prot=%x idx=%d\n",
 731              vaddr, paddr, prot, mmu_idx);
 732
 733    address = vaddr_page;
 734    if (size < TARGET_PAGE_SIZE) {
 735        /*
 736         * Slow-path the TLB entries; we will repeat the MMU check and TLB
 737         * fill on every access.
 738         */
 739        address |= TLB_RECHECK;
 740    }
 741    if (!memory_region_is_ram(section->mr) &&
 742        !memory_region_is_romd(section->mr)) {
 743        /* IO memory case */
 744        address |= TLB_MMIO;
 745        addend = 0;
 746    } else {
 747        /* TLB_MMIO for rom/romd handled below */
 748        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
 749    }
 750
 751    code_address = address;
 752    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page,
 753                                            paddr_page, xlat, prot, &address);
 754
 755    index = tlb_index(env, mmu_idx, vaddr_page);
 756    te = tlb_entry(env, mmu_idx, vaddr_page);
 757
 758    /*
 759     * Hold the TLB lock for the rest of the function. We could acquire/release
 760     * the lock several times in the function, but it is faster to amortize the
 761     * acquisition cost by acquiring it just once. Note that this leads to
 762     * a longer critical section, but this is not a concern since the TLB lock
 763     * is unlikely to be contended.
 764     */
 765    qemu_spin_lock(&tlb->c.lock);
 766
 767    /* Note that the tlb is no longer clean.  */
 768    tlb->c.dirty |= 1 << mmu_idx;
 769
 770    /* Make sure there's no cached translation for the new page.  */
 771    tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
 772
 773    /*
 774     * Only evict the old entry to the victim tlb if it's for a
 775     * different page; otherwise just overwrite the stale data.
 776     */
 777    if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
 778        unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
 779        CPUTLBEntry *tv = &desc->vtable[vidx];
 780
 781        /* Evict the old entry into the victim tlb.  */
 782        copy_tlb_helper_locked(tv, te);
 783        desc->viotlb[vidx] = desc->iotlb[index];
 784        tlb_n_used_entries_dec(env, mmu_idx);
 785    }
 786
 787    /* refill the tlb */
 788    /*
 789     * At this point iotlb contains a physical section number in the lower
 790     * TARGET_PAGE_BITS, and either
 791     *  + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
 792     *  + the offset within section->mr of the page base (otherwise)
 793     * We subtract the vaddr_page (which is page aligned and thus won't
 794     * disturb the low bits) to give an offset which can be added to the
 795     * (non-page-aligned) vaddr of the eventual memory access to get
 796     * the MemoryRegion offset for the access. Note that the vaddr we
 797     * subtract here is that of the page base, and not the same as the
 798     * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
 799     */
 800    desc->iotlb[index].addr = iotlb - vaddr_page;
 801    desc->iotlb[index].attrs = attrs;
 802
 803    /* Now calculate the new entry */
 804    tn.addend = addend - vaddr_page;
 805    if (prot & PAGE_READ) {
 806        tn.addr_read = address;
 807    } else {
 808        tn.addr_read = -1;
 809    }
 810
 811    if (prot & PAGE_EXEC) {
 812        tn.addr_code = code_address;
 813    } else {
 814        tn.addr_code = -1;
 815    }
 816
 817    tn.addr_write = -1;
 818    if (prot & PAGE_WRITE) {
 819        if ((memory_region_is_ram(section->mr) && section->readonly)
 820            || memory_region_is_romd(section->mr)) {
 821            /* Write access calls the I/O callback.  */
 822            tn.addr_write = address | TLB_MMIO;
 823        } else if (memory_region_is_ram(section->mr)
 824                   && cpu_physical_memory_is_clean(
 825                       memory_region_get_ram_addr(section->mr) + xlat)) {
 826            tn.addr_write = address | TLB_NOTDIRTY;
 827        } else {
 828            tn.addr_write = address;
 829        }
 830        if (prot & PAGE_WRITE_INV) {
 831            tn.addr_write |= TLB_INVALID_MASK;
 832        }
 833    }
 834
 835    copy_tlb_helper_locked(te, &tn);
 836    tlb_n_used_entries_inc(env, mmu_idx);
 837    qemu_spin_unlock(&tlb->c.lock);
 838}
 839
 840/* Add a new TLB entry, but without specifying the memory
 841 * transaction attributes to be used.
 842 */
 843void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 844                  hwaddr paddr, int prot,
 845                  int mmu_idx, target_ulong size)
 846{
 847    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
 848                            prot, mmu_idx, size);
 849}
 850
 851static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
 852{
 853    ram_addr_t ram_addr;
 854
 855    ram_addr = qemu_ram_addr_from_host(ptr);
 856    if (ram_addr == RAM_ADDR_INVALID) {
 857        error_report("Bad ram pointer %p", ptr);
 858        abort();
 859    }
 860    return ram_addr;
 861}
 862
 863/*
 864 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
 865 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
 866 * be discarded and looked up again (e.g. via tlb_entry()).
 867 */
 868static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
 869                     MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
 870{
 871    CPUClass *cc = CPU_GET_CLASS(cpu);
 872    bool ok;
 873
 874    /*
 875     * This is not a probe, so only valid return is success; failure
 876     * should result in exception + longjmp to the cpu loop.
 877     */
 878    ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
 879    assert(ok);
 880}
 881
 882static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
 883                         int mmu_idx, target_ulong addr, uintptr_t retaddr,
 884                         MMUAccessType access_type, int size)
 885{
 886    CPUState *cpu = env_cpu(env);
 887    hwaddr mr_offset;
 888    MemoryRegionSection *section;
 889    MemoryRegion *mr;
 890    uint64_t val;
 891    bool locked = false;
 892    MemTxResult r;
 893
 894    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
 895    mr = section->mr;
 896    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
 897    cpu->mem_io_pc = retaddr;
 898    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 899        cpu_io_recompile(cpu, retaddr);
 900    }
 901
 902    cpu->mem_io_vaddr = addr;
 903    cpu->mem_io_access_type = access_type;
 904
 905    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
 906        qemu_mutex_lock_iothread();
 907        locked = true;
 908    }
 909    r = memory_region_dispatch_read(mr, mr_offset,
 910                                    &val, size, iotlbentry->attrs);
 911    if (r != MEMTX_OK) {
 912        hwaddr physaddr = mr_offset +
 913            section->offset_within_address_space -
 914            section->offset_within_region;
 915
 916        cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
 917                               mmu_idx, iotlbentry->attrs, r, retaddr);
 918    }
 919    if (locked) {
 920        qemu_mutex_unlock_iothread();
 921    }
 922
 923    return val;
 924}
 925
 926static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
 927                      int mmu_idx, uint64_t val, target_ulong addr,
 928                      uintptr_t retaddr, int size)
 929{
 930    CPUState *cpu = env_cpu(env);
 931    hwaddr mr_offset;
 932    MemoryRegionSection *section;
 933    MemoryRegion *mr;
 934    bool locked = false;
 935    MemTxResult r;
 936
 937    section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
 938    mr = section->mr;
 939    mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
 940    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 941        cpu_io_recompile(cpu, retaddr);
 942    }
 943    cpu->mem_io_vaddr = addr;
 944    cpu->mem_io_pc = retaddr;
 945
 946    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
 947        qemu_mutex_lock_iothread();
 948        locked = true;
 949    }
 950    r = memory_region_dispatch_write(mr, mr_offset,
 951                                     val, size, iotlbentry->attrs);
 952    if (r != MEMTX_OK) {
 953        hwaddr physaddr = mr_offset +
 954            section->offset_within_address_space -
 955            section->offset_within_region;
 956
 957        cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
 958                               mmu_idx, iotlbentry->attrs, r, retaddr);
 959    }
 960    if (locked) {
 961        qemu_mutex_unlock_iothread();
 962    }
 963}
 964
 965static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
 966{
 967#if TCG_OVERSIZED_GUEST
 968    return *(target_ulong *)((uintptr_t)entry + ofs);
 969#else
 970    /* ofs might correspond to .addr_write, so use atomic_read */
 971    return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
 972#endif
 973}
 974
 975/* Return true if ADDR is present in the victim tlb, and has been copied
 976   back to the main tlb.  */
 977static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
 978                           size_t elt_ofs, target_ulong page)
 979{
 980    size_t vidx;
 981
 982    assert_cpu_is_self(env_cpu(env));
 983    for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
 984        CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
 985        target_ulong cmp;
 986
 987        /* elt_ofs might correspond to .addr_write, so use atomic_read */
 988#if TCG_OVERSIZED_GUEST
 989        cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
 990#else
 991        cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
 992#endif
 993
 994        if (cmp == page) {
 995            /* Found entry in victim tlb, swap tlb and iotlb.  */
 996            CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
 997
 998            qemu_spin_lock(&env_tlb(env)->c.lock);
 999            copy_tlb_helper_locked(&tmptlb, tlb);
1000            copy_tlb_helper_locked(tlb, vtlb);
1001            copy_tlb_helper_locked(vtlb, &tmptlb);
1002            qemu_spin_unlock(&env_tlb(env)->c.lock);
1003
1004            CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1005            CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
1006            tmpio = *io; *io = *vio; *vio = tmpio;
1007            return true;
1008        }
1009    }
1010    return false;
1011}
1012
1013/* Macro to call the above, with local variables from the use context.  */
1014#define VICTIM_TLB_HIT(TY, ADDR) \
1015  victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1016                 (ADDR) & TARGET_PAGE_MASK)
1017
1018/* NOTE: this function can trigger an exception */
1019/* NOTE2: the returned address is not exactly the physical address: it
1020 * is actually a ram_addr_t (in system mode; the user mode emulation
1021 * version of this function returns a guest virtual address).
1022 */
1023tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1024{
1025    uintptr_t mmu_idx = cpu_mmu_index(env, true);
1026    uintptr_t index = tlb_index(env, mmu_idx, addr);
1027    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1028    void *p;
1029
1030    if (unlikely(!tlb_hit(entry->addr_code, addr))) {
1031        if (!VICTIM_TLB_HIT(addr_code, addr)) {
1032            tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
1033            index = tlb_index(env, mmu_idx, addr);
1034            entry = tlb_entry(env, mmu_idx, addr);
1035        }
1036        assert(tlb_hit(entry->addr_code, addr));
1037    }
1038
1039    if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
1040        /*
1041         * Return -1 if we can't translate and execute from an entire
1042         * page of RAM here, which will cause us to execute by loading
1043         * and translating one insn at a time, without caching:
1044         *  - TLB_RECHECK: means the MMU protection covers a smaller range
1045         *    than a target page, so we must redo the MMU check every insn
1046         *  - TLB_MMIO: region is not backed by RAM
1047         */
1048        return -1;
1049    }
1050
1051    p = (void *)((uintptr_t)addr + entry->addend);
1052    return qemu_ram_addr_from_host_nofail(p);
1053}
1054
1055/* Probe for whether the specified guest write access is permitted.
1056 * If it is not permitted then an exception will be taken in the same
1057 * way as if this were a real write access (and we will not return).
1058 * Otherwise the function will return, and there will be a valid
1059 * entry in the TLB for this access.
1060 */
1061void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
1062                 uintptr_t retaddr)
1063{
1064    uintptr_t index = tlb_index(env, mmu_idx, addr);
1065    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1066
1067    if (!tlb_hit(tlb_addr_write(entry), addr)) {
1068        /* TLB entry is for a different page */
1069        if (!VICTIM_TLB_HIT(addr_write, addr)) {
1070            tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1071                     mmu_idx, retaddr);
1072        }
1073    }
1074}
1075
1076void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1077                        MMUAccessType access_type, int mmu_idx)
1078{
1079    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1080    uintptr_t tlb_addr, page;
1081    size_t elt_ofs;
1082
1083    switch (access_type) {
1084    case MMU_DATA_LOAD:
1085        elt_ofs = offsetof(CPUTLBEntry, addr_read);
1086        break;
1087    case MMU_DATA_STORE:
1088        elt_ofs = offsetof(CPUTLBEntry, addr_write);
1089        break;
1090    case MMU_INST_FETCH:
1091        elt_ofs = offsetof(CPUTLBEntry, addr_code);
1092        break;
1093    default:
1094        g_assert_not_reached();
1095    }
1096
1097    page = addr & TARGET_PAGE_MASK;
1098    tlb_addr = tlb_read_ofs(entry, elt_ofs);
1099
1100    if (!tlb_hit_page(tlb_addr, page)) {
1101        uintptr_t index = tlb_index(env, mmu_idx, addr);
1102
1103        if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
1104            CPUState *cs = env_cpu(env);
1105            CPUClass *cc = CPU_GET_CLASS(cs);
1106
1107            if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
1108                /* Non-faulting page table read failed.  */
1109                return NULL;
1110            }
1111
1112            /* TLB resize via tlb_fill may have moved the entry.  */
1113            entry = tlb_entry(env, mmu_idx, addr);
1114        }
1115        tlb_addr = tlb_read_ofs(entry, elt_ofs);
1116    }
1117
1118    if (tlb_addr & ~TARGET_PAGE_MASK) {
1119        /* IO access */
1120        return NULL;
1121    }
1122
1123    return (void *)((uintptr_t)addr + entry->addend);
1124}
1125
1126/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
1127 * operations, or io operations to proceed.  Return the host address.  */
1128static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
1129                               TCGMemOpIdx oi, uintptr_t retaddr,
1130                               NotDirtyInfo *ndi)
1131{
1132    size_t mmu_idx = get_mmuidx(oi);
1133    uintptr_t index = tlb_index(env, mmu_idx, addr);
1134    CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1135    target_ulong tlb_addr = tlb_addr_write(tlbe);
1136    TCGMemOp mop = get_memop(oi);
1137    int a_bits = get_alignment_bits(mop);
1138    int s_bits = mop & MO_SIZE;
1139    void *hostaddr;
1140
1141    /* Adjust the given return address.  */
1142    retaddr -= GETPC_ADJ;
1143
1144    /* Enforce guest required alignment.  */
1145    if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1146        /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1147        cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1148                             mmu_idx, retaddr);
1149    }
1150
1151    /* Enforce qemu required alignment.  */
1152    if (unlikely(addr & ((1 << s_bits) - 1))) {
1153        /* We get here if guest alignment was not requested,
1154           or was not enforced by cpu_unaligned_access above.
1155           We might widen the access and emulate, but for now
1156           mark an exception and exit the cpu loop.  */
1157        goto stop_the_world;
1158    }
1159
1160    /* Check TLB entry and enforce page permissions.  */
1161    if (!tlb_hit(tlb_addr, addr)) {
1162        if (!VICTIM_TLB_HIT(addr_write, addr)) {
1163            tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
1164                     mmu_idx, retaddr);
1165            index = tlb_index(env, mmu_idx, addr);
1166            tlbe = tlb_entry(env, mmu_idx, addr);
1167        }
1168        tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
1169    }
1170
1171    /* Notice an IO access or a needs-MMU-lookup access */
1172    if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) {
1173        /* There's really nothing that can be done to
1174           support this apart from stop-the-world.  */
1175        goto stop_the_world;
1176    }
1177
1178    /* Let the guest notice RMW on a write-only page.  */
1179    if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1180        tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1181                 mmu_idx, retaddr);
1182        /* Since we don't support reads and writes to different addresses,
1183           and we do have the proper page loaded for write, this shouldn't
1184           ever return.  But just in case, handle via stop-the-world.  */
1185        goto stop_the_world;
1186    }
1187
1188    hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1189
1190    ndi->active = false;
1191    if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1192        ndi->active = true;
1193        memory_notdirty_write_prepare(ndi, env_cpu(env), addr,
1194                                      qemu_ram_addr_from_host_nofail(hostaddr),
1195                                      1 << s_bits);
1196    }
1197
1198    return hostaddr;
1199
1200 stop_the_world:
1201    cpu_loop_exit_atomic(env_cpu(env), retaddr);
1202}
1203
1204#ifdef TARGET_WORDS_BIGENDIAN
1205#define NEED_BE_BSWAP 0
1206#define NEED_LE_BSWAP 1
1207#else
1208#define NEED_BE_BSWAP 1
1209#define NEED_LE_BSWAP 0
1210#endif
1211
1212/*
1213 * Byte Swap Helper
1214 *
1215 * This should all dead code away depending on the build host and
1216 * access type.
1217 */
1218
1219static inline uint64_t handle_bswap(uint64_t val, int size, bool big_endian)
1220{
1221    if ((big_endian && NEED_BE_BSWAP) || (!big_endian && NEED_LE_BSWAP)) {
1222        switch (size) {
1223        case 1: return val;
1224        case 2: return bswap16(val);
1225        case 4: return bswap32(val);
1226        case 8: return bswap64(val);
1227        default:
1228            g_assert_not_reached();
1229        }
1230    } else {
1231        return val;
1232    }
1233}
1234
1235/*
1236 * Load Helpers
1237 *
1238 * We support two different access types. SOFTMMU_CODE_ACCESS is
1239 * specifically for reading instructions from system memory. It is
1240 * called by the translation loop and in some helpers where the code
1241 * is disassembled. It shouldn't be called directly by guest code.
1242 */
1243
1244typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
1245                                TCGMemOpIdx oi, uintptr_t retaddr);
1246
1247static inline uint64_t __attribute__((always_inline))
1248load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
1249            uintptr_t retaddr, size_t size, bool big_endian, bool code_read,
1250            FullLoadHelper *full_load)
1251{
1252    uintptr_t mmu_idx = get_mmuidx(oi);
1253    uintptr_t index = tlb_index(env, mmu_idx, addr);
1254    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1255    target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1256    const size_t tlb_off = code_read ?
1257        offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
1258    const MMUAccessType access_type =
1259        code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
1260    unsigned a_bits = get_alignment_bits(get_memop(oi));
1261    void *haddr;
1262    uint64_t res;
1263
1264    /* Handle CPU specific unaligned behaviour */
1265    if (addr & ((1 << a_bits) - 1)) {
1266        cpu_unaligned_access(env_cpu(env), addr, access_type,
1267                             mmu_idx, retaddr);
1268    }
1269
1270    /* If the TLB entry is for a different page, reload and try again.  */
1271    if (!tlb_hit(tlb_addr, addr)) {
1272        if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1273                            addr & TARGET_PAGE_MASK)) {
1274            tlb_fill(env_cpu(env), addr, size,
1275                     access_type, mmu_idx, retaddr);
1276            index = tlb_index(env, mmu_idx, addr);
1277            entry = tlb_entry(env, mmu_idx, addr);
1278        }
1279        tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1280    }
1281
1282    /* Handle an IO access.  */
1283    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1284        if ((addr & (size - 1)) != 0) {
1285            goto do_unaligned_access;
1286        }
1287
1288        if (tlb_addr & TLB_RECHECK) {
1289            /*
1290             * This is a TLB_RECHECK access, where the MMU protection
1291             * covers a smaller range than a target page, and we must
1292             * repeat the MMU check here. This tlb_fill() call might
1293             * longjump out if this access should cause a guest exception.
1294             */
1295            tlb_fill(env_cpu(env), addr, size,
1296                     access_type, mmu_idx, retaddr);
1297            index = tlb_index(env, mmu_idx, addr);
1298            entry = tlb_entry(env, mmu_idx, addr);
1299
1300            tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1301            tlb_addr &= ~TLB_RECHECK;
1302            if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1303                /* RAM access */
1304                goto do_aligned_access;
1305            }
1306        }
1307
1308        res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index],
1309                       mmu_idx, addr, retaddr, access_type, size);
1310        return handle_bswap(res, size, big_endian);
1311    }
1312
1313    /* Handle slow unaligned access (it spans two pages or IO).  */
1314    if (size > 1
1315        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1316                    >= TARGET_PAGE_SIZE)) {
1317        target_ulong addr1, addr2;
1318        uint64_t r1, r2;
1319        unsigned shift;
1320    do_unaligned_access:
1321        addr1 = addr & ~((target_ulong)size - 1);
1322        addr2 = addr1 + size;
1323        r1 = full_load(env, addr1, oi, retaddr);
1324        r2 = full_load(env, addr2, oi, retaddr);
1325        shift = (addr & (size - 1)) * 8;
1326
1327        if (big_endian) {
1328            /* Big-endian combine.  */
1329            res = (r1 << shift) | (r2 >> ((size * 8) - shift));
1330        } else {
1331            /* Little-endian combine.  */
1332            res = (r1 >> shift) | (r2 << ((size * 8) - shift));
1333        }
1334        return res & MAKE_64BIT_MASK(0, size * 8);
1335    }
1336
1337 do_aligned_access:
1338    haddr = (void *)((uintptr_t)addr + entry->addend);
1339    switch (size) {
1340    case 1:
1341        res = ldub_p(haddr);
1342        break;
1343    case 2:
1344        if (big_endian) {
1345            res = lduw_be_p(haddr);
1346        } else {
1347            res = lduw_le_p(haddr);
1348        }
1349        break;
1350    case 4:
1351        if (big_endian) {
1352            res = (uint32_t)ldl_be_p(haddr);
1353        } else {
1354            res = (uint32_t)ldl_le_p(haddr);
1355        }
1356        break;
1357    case 8:
1358        if (big_endian) {
1359            res = ldq_be_p(haddr);
1360        } else {
1361            res = ldq_le_p(haddr);
1362        }
1363        break;
1364    default:
1365        g_assert_not_reached();
1366    }
1367
1368    return res;
1369}
1370
1371/*
1372 * For the benefit of TCG generated code, we want to avoid the
1373 * complication of ABI-specific return type promotion and always
1374 * return a value extended to the register size of the host. This is
1375 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1376 * data, and for that we always have uint64_t.
1377 *
1378 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1379 */
1380
1381static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
1382                              TCGMemOpIdx oi, uintptr_t retaddr)
1383{
1384    return load_helper(env, addr, oi, retaddr, 1, false, false,
1385                       full_ldub_mmu);
1386}
1387
1388tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1389                                     TCGMemOpIdx oi, uintptr_t retaddr)
1390{
1391    return full_ldub_mmu(env, addr, oi, retaddr);
1392}
1393
1394static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1395                                 TCGMemOpIdx oi, uintptr_t retaddr)
1396{
1397    return load_helper(env, addr, oi, retaddr, 2, false, false,
1398                       full_le_lduw_mmu);
1399}
1400
1401tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1402                                    TCGMemOpIdx oi, uintptr_t retaddr)
1403{
1404    return full_le_lduw_mmu(env, addr, oi, retaddr);
1405}
1406
1407static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1408                                 TCGMemOpIdx oi, uintptr_t retaddr)
1409{
1410    return load_helper(env, addr, oi, retaddr, 2, true, false,
1411                       full_be_lduw_mmu);
1412}
1413
1414tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1415                                    TCGMemOpIdx oi, uintptr_t retaddr)
1416{
1417    return full_be_lduw_mmu(env, addr, oi, retaddr);
1418}
1419
1420static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1421                                 TCGMemOpIdx oi, uintptr_t retaddr)
1422{
1423    return load_helper(env, addr, oi, retaddr, 4, false, false,
1424                       full_le_ldul_mmu);
1425}
1426
1427tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1428                                    TCGMemOpIdx oi, uintptr_t retaddr)
1429{
1430    return full_le_ldul_mmu(env, addr, oi, retaddr);
1431}
1432
1433static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1434                                 TCGMemOpIdx oi, uintptr_t retaddr)
1435{
1436    return load_helper(env, addr, oi, retaddr, 4, true, false,
1437                       full_be_ldul_mmu);
1438}
1439
1440tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1441                                    TCGMemOpIdx oi, uintptr_t retaddr)
1442{
1443    return full_be_ldul_mmu(env, addr, oi, retaddr);
1444}
1445
1446uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1447                           TCGMemOpIdx oi, uintptr_t retaddr)
1448{
1449    return load_helper(env, addr, oi, retaddr, 8, false, false,
1450                       helper_le_ldq_mmu);
1451}
1452
1453uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1454                           TCGMemOpIdx oi, uintptr_t retaddr)
1455{
1456    return load_helper(env, addr, oi, retaddr, 8, true, false,
1457                       helper_be_ldq_mmu);
1458}
1459
1460/*
1461 * Provide signed versions of the load routines as well.  We can of course
1462 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1463 */
1464
1465
1466tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1467                                     TCGMemOpIdx oi, uintptr_t retaddr)
1468{
1469    return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
1470}
1471
1472tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1473                                    TCGMemOpIdx oi, uintptr_t retaddr)
1474{
1475    return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
1476}
1477
1478tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1479                                    TCGMemOpIdx oi, uintptr_t retaddr)
1480{
1481    return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
1482}
1483
1484tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1485                                    TCGMemOpIdx oi, uintptr_t retaddr)
1486{
1487    return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
1488}
1489
1490tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1491                                    TCGMemOpIdx oi, uintptr_t retaddr)
1492{
1493    return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
1494}
1495
1496/*
1497 * Store Helpers
1498 */
1499
1500static inline void __attribute__((always_inline))
1501store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
1502             TCGMemOpIdx oi, uintptr_t retaddr, size_t size, bool big_endian)
1503{
1504    uintptr_t mmu_idx = get_mmuidx(oi);
1505    uintptr_t index = tlb_index(env, mmu_idx, addr);
1506    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
1507    target_ulong tlb_addr = tlb_addr_write(entry);
1508    const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
1509    unsigned a_bits = get_alignment_bits(get_memop(oi));
1510    void *haddr;
1511
1512    /* Handle CPU specific unaligned behaviour */
1513    if (addr & ((1 << a_bits) - 1)) {
1514        cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
1515                             mmu_idx, retaddr);
1516    }
1517
1518    /* If the TLB entry is for a different page, reload and try again.  */
1519    if (!tlb_hit(tlb_addr, addr)) {
1520        if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1521            addr & TARGET_PAGE_MASK)) {
1522            tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1523                     mmu_idx, retaddr);
1524            index = tlb_index(env, mmu_idx, addr);
1525            entry = tlb_entry(env, mmu_idx, addr);
1526        }
1527        tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
1528    }
1529
1530    /* Handle an IO access.  */
1531    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
1532        if ((addr & (size - 1)) != 0) {
1533            goto do_unaligned_access;
1534        }
1535
1536        if (tlb_addr & TLB_RECHECK) {
1537            /*
1538             * This is a TLB_RECHECK access, where the MMU protection
1539             * covers a smaller range than a target page, and we must
1540             * repeat the MMU check here. This tlb_fill() call might
1541             * longjump out if this access should cause a guest exception.
1542             */
1543            tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
1544                     mmu_idx, retaddr);
1545            index = tlb_index(env, mmu_idx, addr);
1546            entry = tlb_entry(env, mmu_idx, addr);
1547
1548            tlb_addr = tlb_addr_write(entry);
1549            tlb_addr &= ~TLB_RECHECK;
1550            if (!(tlb_addr & ~TARGET_PAGE_MASK)) {
1551                /* RAM access */
1552                goto do_aligned_access;
1553            }
1554        }
1555
1556        io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx,
1557                  handle_bswap(val, size, big_endian),
1558                  addr, retaddr, size);
1559        return;
1560    }
1561
1562    /* Handle slow unaligned access (it spans two pages or IO).  */
1563    if (size > 1
1564        && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1565                     >= TARGET_PAGE_SIZE)) {
1566        int i;
1567        uintptr_t index2;
1568        CPUTLBEntry *entry2;
1569        target_ulong page2, tlb_addr2;
1570    do_unaligned_access:
1571        /*
1572         * Ensure the second page is in the TLB.  Note that the first page
1573         * is already guaranteed to be filled, and that the second page
1574         * cannot evict the first.
1575         */
1576        page2 = (addr + size) & TARGET_PAGE_MASK;
1577        index2 = tlb_index(env, mmu_idx, page2);
1578        entry2 = tlb_entry(env, mmu_idx, page2);
1579        tlb_addr2 = tlb_addr_write(entry2);
1580        if (!tlb_hit_page(tlb_addr2, page2)
1581            && !victim_tlb_hit(env, mmu_idx, index2, tlb_off,
1582                               page2 & TARGET_PAGE_MASK)) {
1583            tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE,
1584                     mmu_idx, retaddr);
1585        }
1586
1587        /*
1588         * XXX: not efficient, but simple.
1589         * This loop must go in the forward direction to avoid issues
1590         * with self-modifying code in Windows 64-bit.
1591         */
1592        for (i = 0; i < size; ++i) {
1593            uint8_t val8;
1594            if (big_endian) {
1595                /* Big-endian extract.  */
1596                val8 = val >> (((size - 1) * 8) - (i * 8));
1597            } else {
1598                /* Little-endian extract.  */
1599                val8 = val >> (i * 8);
1600            }
1601            helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr);
1602        }
1603        return;
1604    }
1605
1606 do_aligned_access:
1607    haddr = (void *)((uintptr_t)addr + entry->addend);
1608    switch (size) {
1609    case 1:
1610        stb_p(haddr, val);
1611        break;
1612    case 2:
1613        if (big_endian) {
1614            stw_be_p(haddr, val);
1615        } else {
1616            stw_le_p(haddr, val);
1617        }
1618        break;
1619    case 4:
1620        if (big_endian) {
1621            stl_be_p(haddr, val);
1622        } else {
1623            stl_le_p(haddr, val);
1624        }
1625        break;
1626    case 8:
1627        if (big_endian) {
1628            stq_be_p(haddr, val);
1629        } else {
1630            stq_le_p(haddr, val);
1631        }
1632        break;
1633    default:
1634        g_assert_not_reached();
1635        break;
1636    }
1637}
1638
1639void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1640                        TCGMemOpIdx oi, uintptr_t retaddr)
1641{
1642    store_helper(env, addr, val, oi, retaddr, 1, false);
1643}
1644
1645void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1646                       TCGMemOpIdx oi, uintptr_t retaddr)
1647{
1648    store_helper(env, addr, val, oi, retaddr, 2, false);
1649}
1650
1651void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1652                       TCGMemOpIdx oi, uintptr_t retaddr)
1653{
1654    store_helper(env, addr, val, oi, retaddr, 2, true);
1655}
1656
1657void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1658                       TCGMemOpIdx oi, uintptr_t retaddr)
1659{
1660    store_helper(env, addr, val, oi, retaddr, 4, false);
1661}
1662
1663void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1664                       TCGMemOpIdx oi, uintptr_t retaddr)
1665{
1666    store_helper(env, addr, val, oi, retaddr, 4, true);
1667}
1668
1669void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1670                       TCGMemOpIdx oi, uintptr_t retaddr)
1671{
1672    store_helper(env, addr, val, oi, retaddr, 8, false);
1673}
1674
1675void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1676                       TCGMemOpIdx oi, uintptr_t retaddr)
1677{
1678    store_helper(env, addr, val, oi, retaddr, 8, true);
1679}
1680
1681/* First set of helpers allows passing in of OI and RETADDR.  This makes
1682   them callable from other helpers.  */
1683
1684#define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1685#define ATOMIC_NAME(X) \
1686    HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1687#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1688#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1689#define ATOMIC_MMU_CLEANUP                              \
1690    do {                                                \
1691        if (unlikely(ndi.active)) {                     \
1692            memory_notdirty_write_complete(&ndi);       \
1693        }                                               \
1694    } while (0)
1695
1696#define DATA_SIZE 1
1697#include "atomic_template.h"
1698
1699#define DATA_SIZE 2
1700#include "atomic_template.h"
1701
1702#define DATA_SIZE 4
1703#include "atomic_template.h"
1704
1705#ifdef CONFIG_ATOMIC64
1706#define DATA_SIZE 8
1707#include "atomic_template.h"
1708#endif
1709
1710#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1711#define DATA_SIZE 16
1712#include "atomic_template.h"
1713#endif
1714
1715/* Second set of helpers are directly callable from TCG as helpers.  */
1716
1717#undef EXTRA_ARGS
1718#undef ATOMIC_NAME
1719#undef ATOMIC_MMU_LOOKUP
1720#define EXTRA_ARGS         , TCGMemOpIdx oi
1721#define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1722#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1723
1724#define DATA_SIZE 1
1725#include "atomic_template.h"
1726
1727#define DATA_SIZE 2
1728#include "atomic_template.h"
1729
1730#define DATA_SIZE 4
1731#include "atomic_template.h"
1732
1733#ifdef CONFIG_ATOMIC64
1734#define DATA_SIZE 8
1735#include "atomic_template.h"
1736#endif
1737
1738/* Code access functions.  */
1739
1740static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
1741                               TCGMemOpIdx oi, uintptr_t retaddr)
1742{
1743    return load_helper(env, addr, oi, retaddr, 1, false, true,
1744                       full_ldub_cmmu);
1745}
1746
1747uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
1748                            TCGMemOpIdx oi, uintptr_t retaddr)
1749{
1750    return full_ldub_cmmu(env, addr, oi, retaddr);
1751}
1752
1753static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
1754                                  TCGMemOpIdx oi, uintptr_t retaddr)
1755{
1756    return load_helper(env, addr, oi, retaddr, 2, false, true,
1757                       full_le_lduw_cmmu);
1758}
1759
1760uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
1761                            TCGMemOpIdx oi, uintptr_t retaddr)
1762{
1763    return full_le_lduw_cmmu(env, addr, oi, retaddr);
1764}
1765
1766static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
1767                                  TCGMemOpIdx oi, uintptr_t retaddr)
1768{
1769    return load_helper(env, addr, oi, retaddr, 2, true, true,
1770                       full_be_lduw_cmmu);
1771}
1772
1773uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
1774                            TCGMemOpIdx oi, uintptr_t retaddr)
1775{
1776    return full_be_lduw_cmmu(env, addr, oi, retaddr);
1777}
1778
1779static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
1780                                  TCGMemOpIdx oi, uintptr_t retaddr)
1781{
1782    return load_helper(env, addr, oi, retaddr, 4, false, true,
1783                       full_le_ldul_cmmu);
1784}
1785
1786uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
1787                            TCGMemOpIdx oi, uintptr_t retaddr)
1788{
1789    return full_le_ldul_cmmu(env, addr, oi, retaddr);
1790}
1791
1792static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
1793                                  TCGMemOpIdx oi, uintptr_t retaddr)
1794{
1795    return load_helper(env, addr, oi, retaddr, 4, true, true,
1796                       full_be_ldul_cmmu);
1797}
1798
1799uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
1800                            TCGMemOpIdx oi, uintptr_t retaddr)
1801{
1802    return full_be_ldul_cmmu(env, addr, oi, retaddr);
1803}
1804
1805uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
1806                            TCGMemOpIdx oi, uintptr_t retaddr)
1807{
1808    return load_helper(env, addr, oi, retaddr, 8, false, true,
1809                       helper_le_ldq_cmmu);
1810}
1811
1812uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
1813                            TCGMemOpIdx oi, uintptr_t retaddr)
1814{
1815    return load_helper(env, addr, oi, retaddr, 8, true, true,
1816                       helper_be_ldq_cmmu);
1817}
1818