qemu/accel/tcg/cputlb.c
<<
>>
Prefs
   1/*
   2 *  Common CPU TLB handling
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/main-loop.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24#include "exec/memory.h"
  25#include "exec/address-spaces.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/cputlb.h"
  28#include "exec/memory-internal.h"
  29#include "exec/ram_addr.h"
  30#include "tcg/tcg.h"
  31#include "qemu/error-report.h"
  32#include "exec/log.h"
  33#include "exec/helper-proto.h"
  34#include "qemu/atomic.h"
  35
  36/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
  37/* #define DEBUG_TLB */
  38/* #define DEBUG_TLB_LOG */
  39
  40#ifdef DEBUG_TLB
  41# define DEBUG_TLB_GATE 1
  42# ifdef DEBUG_TLB_LOG
  43#  define DEBUG_TLB_LOG_GATE 1
  44# else
  45#  define DEBUG_TLB_LOG_GATE 0
  46# endif
  47#else
  48# define DEBUG_TLB_GATE 0
  49# define DEBUG_TLB_LOG_GATE 0
  50#endif
  51
  52#define tlb_debug(fmt, ...) do { \
  53    if (DEBUG_TLB_LOG_GATE) { \
  54        qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
  55                      ## __VA_ARGS__); \
  56    } else if (DEBUG_TLB_GATE) { \
  57        fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
  58    } \
  59} while (0)
  60
  61#define assert_cpu_is_self(this_cpu) do {                         \
  62        if (DEBUG_TLB_GATE) {                                     \
  63            g_assert(!cpu->created || qemu_cpu_is_self(cpu));     \
  64        }                                                         \
  65    } while (0)
  66
  67/* run_on_cpu_data.target_ptr should always be big enough for a
  68 * target_ulong even on 32 bit builds */
  69QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
  70
  71/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
  72 */
  73QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
  74#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
  75
  76/* flush_all_helper: run fn across all cpus
  77 *
  78 * If the wait flag is set then the src cpu's helper will be queued as
  79 * "safe" work and the loop exited creating a synchronisation point
  80 * where all queued work will be finished before execution starts
  81 * again.
  82 */
  83static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
  84                             run_on_cpu_data d)
  85{
  86    CPUState *cpu;
  87
  88    CPU_FOREACH(cpu) {
  89        if (cpu != src) {
  90            async_run_on_cpu(cpu, fn, d);
  91        }
  92    }
  93}
  94
  95size_t tlb_flush_count(void)
  96{
  97    CPUState *cpu;
  98    size_t count = 0;
  99
 100    CPU_FOREACH(cpu) {
 101        CPUArchState *env = cpu->env_ptr;
 102
 103        count += atomic_read(&env->tlb_flush_count);
 104    }
 105    return count;
 106}
 107
 108/* This is OK because CPU architectures generally permit an
 109 * implementation to drop entries from the TLB at any time, so
 110 * flushing more entries than required is only an efficiency issue,
 111 * not a correctness issue.
 112 */
 113static void tlb_flush_nocheck(CPUState *cpu)
 114{
 115    CPUArchState *env = cpu->env_ptr;
 116
 117    /* The QOM tests will trigger tlb_flushes without setting up TCG
 118     * so we bug out here in that case.
 119     */
 120    if (!tcg_enabled()) {
 121        return;
 122    }
 123
 124    assert_cpu_is_self(cpu);
 125    atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1);
 126    tlb_debug("(count: %zu)\n", tlb_flush_count());
 127
 128    tb_lock();
 129
 130    memset(env->tlb_table, -1, sizeof(env->tlb_table));
 131    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
 132    cpu_tb_jmp_cache_clear(cpu);
 133
 134    env->vtlb_index = 0;
 135    env->tlb_flush_addr = -1;
 136    env->tlb_flush_mask = 0;
 137
 138    tb_unlock();
 139
 140    atomic_mb_set(&cpu->pending_tlb_flush, 0);
 141}
 142
 143static void tlb_flush_global_async_work(CPUState *cpu, run_on_cpu_data data)
 144{
 145    tlb_flush_nocheck(cpu);
 146}
 147
 148void tlb_flush(CPUState *cpu)
 149{
 150    if (cpu->created && !qemu_cpu_is_self(cpu)) {
 151        if (atomic_mb_read(&cpu->pending_tlb_flush) != ALL_MMUIDX_BITS) {
 152            atomic_mb_set(&cpu->pending_tlb_flush, ALL_MMUIDX_BITS);
 153            async_run_on_cpu(cpu, tlb_flush_global_async_work,
 154                             RUN_ON_CPU_NULL);
 155        }
 156    } else {
 157        tlb_flush_nocheck(cpu);
 158    }
 159}
 160
 161void tlb_flush_all_cpus(CPUState *src_cpu)
 162{
 163    const run_on_cpu_func fn = tlb_flush_global_async_work;
 164    flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
 165    fn(src_cpu, RUN_ON_CPU_NULL);
 166}
 167
 168void tlb_flush_all_cpus_synced(CPUState *src_cpu)
 169{
 170    const run_on_cpu_func fn = tlb_flush_global_async_work;
 171    flush_all_helper(src_cpu, fn, RUN_ON_CPU_NULL);
 172    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_NULL);
 173}
 174
 175static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
 176{
 177    CPUArchState *env = cpu->env_ptr;
 178    unsigned long mmu_idx_bitmask = data.host_int;
 179    int mmu_idx;
 180
 181    assert_cpu_is_self(cpu);
 182
 183    tb_lock();
 184
 185    tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
 186
 187    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 188
 189        if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
 190            tlb_debug("%d\n", mmu_idx);
 191
 192            memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
 193            memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
 194        }
 195    }
 196
 197    cpu_tb_jmp_cache_clear(cpu);
 198
 199    tlb_debug("done\n");
 200
 201    tb_unlock();
 202}
 203
 204void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
 205{
 206    tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
 207
 208    if (!qemu_cpu_is_self(cpu)) {
 209        uint16_t pending_flushes = idxmap;
 210        pending_flushes &= ~atomic_mb_read(&cpu->pending_tlb_flush);
 211
 212        if (pending_flushes) {
 213            tlb_debug("reduced mmu_idx: 0x%" PRIx16 "\n", pending_flushes);
 214
 215            atomic_or(&cpu->pending_tlb_flush, pending_flushes);
 216            async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
 217                             RUN_ON_CPU_HOST_INT(pending_flushes));
 218        }
 219    } else {
 220        tlb_flush_by_mmuidx_async_work(cpu,
 221                                       RUN_ON_CPU_HOST_INT(idxmap));
 222    }
 223}
 224
 225void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
 226{
 227    const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 228
 229    tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
 230
 231    flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 232    fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
 233}
 234
 235void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 236                                                       uint16_t idxmap)
 237{
 238    const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
 239
 240    tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
 241
 242    flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 243    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
 244}
 245
 246
 247
 248static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 249{
 250    if (addr == (tlb_entry->addr_read &
 251                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
 252        addr == (tlb_entry->addr_write &
 253                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
 254        addr == (tlb_entry->addr_code &
 255                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 256        memset(tlb_entry, -1, sizeof(*tlb_entry));
 257    }
 258}
 259
 260static void tlb_flush_page_async_work(CPUState *cpu, run_on_cpu_data data)
 261{
 262    CPUArchState *env = cpu->env_ptr;
 263    target_ulong addr = (target_ulong) data.target_ptr;
 264    int i;
 265    int mmu_idx;
 266
 267    assert_cpu_is_self(cpu);
 268
 269    tlb_debug("page :" TARGET_FMT_lx "\n", addr);
 270
 271    /* Check if we need to flush due to large pages.  */
 272    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
 273        tlb_debug("forcing full flush ("
 274                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
 275                  env->tlb_flush_addr, env->tlb_flush_mask);
 276
 277        tlb_flush(cpu);
 278        return;
 279    }
 280
 281    addr &= TARGET_PAGE_MASK;
 282    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 283    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 284        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
 285    }
 286
 287    /* check whether there are entries that need to be flushed in the vtlb */
 288    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 289        int k;
 290        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 291            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
 292        }
 293    }
 294
 295    tb_flush_jmp_cache(cpu, addr);
 296}
 297
 298void tlb_flush_page(CPUState *cpu, target_ulong addr)
 299{
 300    tlb_debug("page :" TARGET_FMT_lx "\n", addr);
 301
 302    if (!qemu_cpu_is_self(cpu)) {
 303        async_run_on_cpu(cpu, tlb_flush_page_async_work,
 304                         RUN_ON_CPU_TARGET_PTR(addr));
 305    } else {
 306        tlb_flush_page_async_work(cpu, RUN_ON_CPU_TARGET_PTR(addr));
 307    }
 308}
 309
 310/* As we are going to hijack the bottom bits of the page address for a
 311 * mmuidx bit mask we need to fail to build if we can't do that
 312 */
 313QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
 314
 315static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
 316                                                run_on_cpu_data data)
 317{
 318    CPUArchState *env = cpu->env_ptr;
 319    target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
 320    target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
 321    unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
 322    int page = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 323    int mmu_idx;
 324    int i;
 325
 326    assert_cpu_is_self(cpu);
 327
 328    tlb_debug("page:%d addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n",
 329              page, addr, mmu_idx_bitmap);
 330
 331    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 332        if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
 333            tlb_flush_entry(&env->tlb_table[mmu_idx][page], addr);
 334
 335            /* check whether there are vltb entries that need to be flushed */
 336            for (i = 0; i < CPU_VTLB_SIZE; i++) {
 337                tlb_flush_entry(&env->tlb_v_table[mmu_idx][i], addr);
 338            }
 339        }
 340    }
 341
 342    tb_flush_jmp_cache(cpu, addr);
 343}
 344
 345static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu,
 346                                                          run_on_cpu_data data)
 347{
 348    CPUArchState *env = cpu->env_ptr;
 349    target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
 350    target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
 351    unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
 352
 353    tlb_debug("addr:"TARGET_FMT_lx" mmu_idx: %04lx\n", addr, mmu_idx_bitmap);
 354
 355    /* Check if we need to flush due to large pages.  */
 356    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
 357        tlb_debug("forced full flush ("
 358                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
 359                  env->tlb_flush_addr, env->tlb_flush_mask);
 360
 361        tlb_flush_by_mmuidx_async_work(cpu,
 362                                       RUN_ON_CPU_HOST_INT(mmu_idx_bitmap));
 363    } else {
 364        tlb_flush_page_by_mmuidx_async_work(cpu, data);
 365    }
 366}
 367
 368void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
 369{
 370    target_ulong addr_and_mmu_idx;
 371
 372    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
 373
 374    /* This should already be page aligned */
 375    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 376    addr_and_mmu_idx |= idxmap;
 377
 378    if (!qemu_cpu_is_self(cpu)) {
 379        async_run_on_cpu(cpu, tlb_check_page_and_flush_by_mmuidx_async_work,
 380                         RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 381    } else {
 382        tlb_check_page_and_flush_by_mmuidx_async_work(
 383            cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 384    }
 385}
 386
 387void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
 388                                       uint16_t idxmap)
 389{
 390    const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
 391    target_ulong addr_and_mmu_idx;
 392
 393    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 394
 395    /* This should already be page aligned */
 396    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 397    addr_and_mmu_idx |= idxmap;
 398
 399    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 400    fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 401}
 402
 403void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
 404                                                            target_ulong addr,
 405                                                            uint16_t idxmap)
 406{
 407    const run_on_cpu_func fn = tlb_check_page_and_flush_by_mmuidx_async_work;
 408    target_ulong addr_and_mmu_idx;
 409
 410    tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
 411
 412    /* This should already be page aligned */
 413    addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
 414    addr_and_mmu_idx |= idxmap;
 415
 416    flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 417    async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
 418}
 419
 420void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
 421{
 422    const run_on_cpu_func fn = tlb_flush_page_async_work;
 423
 424    flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
 425    fn(src, RUN_ON_CPU_TARGET_PTR(addr));
 426}
 427
 428void tlb_flush_page_all_cpus_synced(CPUState *src,
 429                                                  target_ulong addr)
 430{
 431    const run_on_cpu_func fn = tlb_flush_page_async_work;
 432
 433    flush_all_helper(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
 434    async_safe_run_on_cpu(src, fn, RUN_ON_CPU_TARGET_PTR(addr));
 435}
 436
 437/* update the TLBs so that writes to code in the virtual page 'addr'
 438   can be detected */
 439void tlb_protect_code(ram_addr_t ram_addr)
 440{
 441    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
 442                                             DIRTY_MEMORY_CODE);
 443}
 444
 445/* update the TLB so that writes in physical page 'phys_addr' are no longer
 446   tested for self modifying code */
 447void tlb_unprotect_code(ram_addr_t ram_addr)
 448{
 449    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
 450}
 451
 452
 453/*
 454 * Dirty write flag handling
 455 *
 456 * When the TCG code writes to a location it looks up the address in
 457 * the TLB and uses that data to compute the final address. If any of
 458 * the lower bits of the address are set then the slow path is forced.
 459 * There are a number of reasons to do this but for normal RAM the
 460 * most usual is detecting writes to code regions which may invalidate
 461 * generated code.
 462 *
 463 * Because we want other vCPUs to respond to changes straight away we
 464 * update the te->addr_write field atomically. If the TLB entry has
 465 * been changed by the vCPU in the mean time we skip the update.
 466 *
 467 * As this function uses atomic accesses we also need to ensure
 468 * updates to tlb_entries follow the same access rules. We don't need
 469 * to worry about this for oversized guests as MTTCG is disabled for
 470 * them.
 471 */
 472
 473static void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
 474                           uintptr_t length)
 475{
 476#if TCG_OVERSIZED_GUEST
 477    uintptr_t addr = tlb_entry->addr_write;
 478
 479    if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
 480        addr &= TARGET_PAGE_MASK;
 481        addr += tlb_entry->addend;
 482        if ((addr - start) < length) {
 483            tlb_entry->addr_write |= TLB_NOTDIRTY;
 484        }
 485    }
 486#else
 487    /* paired with atomic_mb_set in tlb_set_page_with_attrs */
 488    uintptr_t orig_addr = atomic_mb_read(&tlb_entry->addr_write);
 489    uintptr_t addr = orig_addr;
 490
 491    if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) {
 492        addr &= TARGET_PAGE_MASK;
 493        addr += atomic_read(&tlb_entry->addend);
 494        if ((addr - start) < length) {
 495            uintptr_t notdirty_addr = orig_addr | TLB_NOTDIRTY;
 496            atomic_cmpxchg(&tlb_entry->addr_write, orig_addr, notdirty_addr);
 497        }
 498    }
 499#endif
 500}
 501
 502/* For atomic correctness when running MTTCG we need to use the right
 503 * primitives when copying entries */
 504static inline void copy_tlb_helper(CPUTLBEntry *d, CPUTLBEntry *s,
 505                                   bool atomic_set)
 506{
 507#if TCG_OVERSIZED_GUEST
 508    *d = *s;
 509#else
 510    if (atomic_set) {
 511        d->addr_read = s->addr_read;
 512        d->addr_code = s->addr_code;
 513        atomic_set(&d->addend, atomic_read(&s->addend));
 514        /* Pairs with flag setting in tlb_reset_dirty_range */
 515        atomic_mb_set(&d->addr_write, atomic_read(&s->addr_write));
 516    } else {
 517        d->addr_read = s->addr_read;
 518        d->addr_write = atomic_read(&s->addr_write);
 519        d->addr_code = s->addr_code;
 520        d->addend = atomic_read(&s->addend);
 521    }
 522#endif
 523}
 524
 525/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
 526 * the target vCPU). As such care needs to be taken that we don't
 527 * dangerously race with another vCPU update. The only thing actually
 528 * updated is the target TLB entry ->addr_write flags.
 529 */
 530void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
 531{
 532    CPUArchState *env;
 533
 534    int mmu_idx;
 535
 536    env = cpu->env_ptr;
 537    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 538        unsigned int i;
 539
 540        for (i = 0; i < CPU_TLB_SIZE; i++) {
 541            tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
 542                                  start1, length);
 543        }
 544
 545        for (i = 0; i < CPU_VTLB_SIZE; i++) {
 546            tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
 547                                  start1, length);
 548        }
 549    }
 550}
 551
 552static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
 553{
 554    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
 555        tlb_entry->addr_write = vaddr;
 556    }
 557}
 558
 559/* update the TLB corresponding to virtual page vaddr
 560   so that it is no longer dirty */
 561void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
 562{
 563    CPUArchState *env = cpu->env_ptr;
 564    int i;
 565    int mmu_idx;
 566
 567    assert_cpu_is_self(cpu);
 568
 569    vaddr &= TARGET_PAGE_MASK;
 570    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 571    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 572        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
 573    }
 574
 575    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 576        int k;
 577        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 578            tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
 579        }
 580    }
 581}
 582
 583/* Our TLB does not support large pages, so remember the area covered by
 584   large pages and trigger a full TLB flush if these are invalidated.  */
 585static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
 586                               target_ulong size)
 587{
 588    target_ulong mask = ~(size - 1);
 589
 590    if (env->tlb_flush_addr == (target_ulong)-1) {
 591        env->tlb_flush_addr = vaddr & mask;
 592        env->tlb_flush_mask = mask;
 593        return;
 594    }
 595    /* Extend the existing region to include the new page.
 596       This is a compromise between unnecessary flushes and the cost
 597       of maintaining a full variable size TLB.  */
 598    mask &= env->tlb_flush_mask;
 599    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
 600        mask <<= 1;
 601    }
 602    env->tlb_flush_addr &= mask;
 603    env->tlb_flush_mask = mask;
 604}
 605
 606/* Add a new TLB entry. At most one entry for a given virtual address
 607 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 608 * supplied size is only used by tlb_flush_page.
 609 *
 610 * Called from TCG-generated code, which is under an RCU read-side
 611 * critical section.
 612 */
 613void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 614                             hwaddr paddr, MemTxAttrs attrs, int prot,
 615                             int mmu_idx, target_ulong size)
 616{
 617    CPUArchState *env = cpu->env_ptr;
 618    MemoryRegionSection *section;
 619    unsigned int index;
 620    target_ulong address;
 621    target_ulong code_address;
 622    uintptr_t addend;
 623    CPUTLBEntry *te, *tv, tn;
 624    hwaddr iotlb, xlat, sz;
 625    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
 626    int asidx = cpu_asidx_from_attrs(cpu, attrs);
 627
 628    assert_cpu_is_self(cpu);
 629    assert(size >= TARGET_PAGE_SIZE);
 630    if (size != TARGET_PAGE_SIZE) {
 631        tlb_add_large_page(env, vaddr, size);
 632    }
 633
 634    sz = size;
 635    section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
 636    assert(sz >= TARGET_PAGE_SIZE);
 637
 638    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
 639              " prot=%x idx=%d\n",
 640              vaddr, paddr, prot, mmu_idx);
 641
 642    address = vaddr;
 643    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
 644        /* IO memory case */
 645        address |= TLB_MMIO;
 646        addend = 0;
 647    } else {
 648        /* TLB_MMIO for rom/romd handled below */
 649        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
 650    }
 651
 652    code_address = address;
 653    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
 654                                            prot, &address);
 655
 656    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 657    te = &env->tlb_table[mmu_idx][index];
 658    /* do not discard the translation in te, evict it into a victim tlb */
 659    tv = &env->tlb_v_table[mmu_idx][vidx];
 660
 661    /* addr_write can race with tlb_reset_dirty_range */
 662    copy_tlb_helper(tv, te, true);
 663
 664    env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
 665
 666    /* refill the tlb */
 667    env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
 668    env->iotlb[mmu_idx][index].attrs = attrs;
 669
 670    /* Now calculate the new entry */
 671    tn.addend = addend - vaddr;
 672    if (prot & PAGE_READ) {
 673        tn.addr_read = address;
 674    } else {
 675        tn.addr_read = -1;
 676    }
 677
 678    if (prot & PAGE_EXEC) {
 679        tn.addr_code = code_address;
 680    } else {
 681        tn.addr_code = -1;
 682    }
 683
 684    tn.addr_write = -1;
 685    if (prot & PAGE_WRITE) {
 686        if ((memory_region_is_ram(section->mr) && section->readonly)
 687            || memory_region_is_romd(section->mr)) {
 688            /* Write access calls the I/O callback.  */
 689            tn.addr_write = address | TLB_MMIO;
 690        } else if (memory_region_is_ram(section->mr)
 691                   && cpu_physical_memory_is_clean(
 692                        memory_region_get_ram_addr(section->mr) + xlat)) {
 693            tn.addr_write = address | TLB_NOTDIRTY;
 694        } else {
 695            tn.addr_write = address;
 696        }
 697        if (prot & PAGE_WRITE_INV) {
 698            tn.addr_write |= TLB_INVALID_MASK;
 699        }
 700    }
 701
 702    /* Pairs with flag setting in tlb_reset_dirty_range */
 703    copy_tlb_helper(te, &tn, true);
 704    /* atomic_mb_set(&te->addr_write, write_address); */
 705}
 706
 707/* Add a new TLB entry, but without specifying the memory
 708 * transaction attributes to be used.
 709 */
 710void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 711                  hwaddr paddr, int prot,
 712                  int mmu_idx, target_ulong size)
 713{
 714    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
 715                            prot, mmu_idx, size);
 716}
 717
 718static void report_bad_exec(CPUState *cpu, target_ulong addr)
 719{
 720    /* Accidentally executing outside RAM or ROM is quite common for
 721     * several user-error situations, so report it in a way that
 722     * makes it clear that this isn't a QEMU bug and provide suggestions
 723     * about what a user could do to fix things.
 724     */
 725    error_report("Trying to execute code outside RAM or ROM at 0x"
 726                 TARGET_FMT_lx, addr);
 727    error_printf("This usually means one of the following happened:\n\n"
 728                 "(1) You told QEMU to execute a kernel for the wrong machine "
 729                 "type, and it crashed on startup (eg trying to run a "
 730                 "raspberry pi kernel on a versatilepb QEMU machine)\n"
 731                 "(2) You didn't give QEMU a kernel or BIOS filename at all, "
 732                 "and QEMU executed a ROM full of no-op instructions until "
 733                 "it fell off the end\n"
 734                 "(3) Your guest kernel has a bug and crashed by jumping "
 735                 "off into nowhere\n\n"
 736                 "This is almost always one of the first two, so check your "
 737                 "command line and that you are using the right type of kernel "
 738                 "for this machine.\n"
 739                 "If you think option (3) is likely then you can try debugging "
 740                 "your guest with the -d debug options; in particular "
 741                 "-d guest_errors will cause the log to include a dump of the "
 742                 "guest register state at this point.\n\n"
 743                 "Execution cannot continue; stopping here.\n\n");
 744
 745    /* Report also to the logs, with more detail including register dump */
 746    qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
 747                  "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
 748    log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
 749}
 750
 751static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
 752{
 753    ram_addr_t ram_addr;
 754
 755    ram_addr = qemu_ram_addr_from_host(ptr);
 756    if (ram_addr == RAM_ADDR_INVALID) {
 757        error_report("Bad ram pointer %p", ptr);
 758        abort();
 759    }
 760    return ram_addr;
 761}
 762
 763static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
 764                         int mmu_idx,
 765                         target_ulong addr, uintptr_t retaddr, int size)
 766{
 767    CPUState *cpu = ENV_GET_CPU(env);
 768    hwaddr physaddr = iotlbentry->addr;
 769    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 770    uint64_t val;
 771    bool locked = false;
 772    MemTxResult r;
 773
 774    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
 775    cpu->mem_io_pc = retaddr;
 776    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 777        cpu_io_recompile(cpu, retaddr);
 778    }
 779
 780    cpu->mem_io_vaddr = addr;
 781
 782    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
 783        qemu_mutex_lock_iothread();
 784        locked = true;
 785    }
 786    r = memory_region_dispatch_read(mr, physaddr,
 787                                    &val, size, iotlbentry->attrs);
 788    if (r != MEMTX_OK) {
 789        cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
 790                               mmu_idx, iotlbentry->attrs, r, retaddr);
 791    }
 792    if (locked) {
 793        qemu_mutex_unlock_iothread();
 794    }
 795
 796    return val;
 797}
 798
 799static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
 800                      int mmu_idx,
 801                      uint64_t val, target_ulong addr,
 802                      uintptr_t retaddr, int size)
 803{
 804    CPUState *cpu = ENV_GET_CPU(env);
 805    hwaddr physaddr = iotlbentry->addr;
 806    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 807    bool locked = false;
 808    MemTxResult r;
 809
 810    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
 811    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
 812        cpu_io_recompile(cpu, retaddr);
 813    }
 814    cpu->mem_io_vaddr = addr;
 815    cpu->mem_io_pc = retaddr;
 816
 817    if (mr->global_locking && !qemu_mutex_iothread_locked()) {
 818        qemu_mutex_lock_iothread();
 819        locked = true;
 820    }
 821    r = memory_region_dispatch_write(mr, physaddr,
 822                                     val, size, iotlbentry->attrs);
 823    if (r != MEMTX_OK) {
 824        cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE,
 825                               mmu_idx, iotlbentry->attrs, r, retaddr);
 826    }
 827    if (locked) {
 828        qemu_mutex_unlock_iothread();
 829    }
 830}
 831
 832/* Return true if ADDR is present in the victim tlb, and has been copied
 833   back to the main tlb.  */
 834static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
 835                           size_t elt_ofs, target_ulong page)
 836{
 837    size_t vidx;
 838    for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
 839        CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
 840        target_ulong cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
 841
 842        if (cmp == page) {
 843            /* Found entry in victim tlb, swap tlb and iotlb.  */
 844            CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index];
 845
 846            copy_tlb_helper(&tmptlb, tlb, false);
 847            copy_tlb_helper(tlb, vtlb, true);
 848            copy_tlb_helper(vtlb, &tmptlb, true);
 849
 850            CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index];
 851            CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx];
 852            tmpio = *io; *io = *vio; *vio = tmpio;
 853            return true;
 854        }
 855    }
 856    return false;
 857}
 858
 859/* Macro to call the above, with local variables from the use context.  */
 860#define VICTIM_TLB_HIT(TY, ADDR) \
 861  victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
 862                 (ADDR) & TARGET_PAGE_MASK)
 863
 864/* NOTE: this function can trigger an exception */
 865/* NOTE2: the returned address is not exactly the physical address: it
 866 * is actually a ram_addr_t (in system mode; the user mode emulation
 867 * version of this function returns a guest virtual address).
 868 */
 869tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
 870{
 871    int mmu_idx, index, pd;
 872    void *p;
 873    MemoryRegion *mr;
 874    CPUState *cpu = ENV_GET_CPU(env);
 875    CPUIOTLBEntry *iotlbentry;
 876    hwaddr physaddr;
 877
 878    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 879    mmu_idx = cpu_mmu_index(env, true);
 880    if (unlikely(env->tlb_table[mmu_idx][index].addr_code !=
 881                 (addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK)))) {
 882        if (!VICTIM_TLB_HIT(addr_read, addr)) {
 883            tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
 884        }
 885    }
 886    iotlbentry = &env->iotlb[mmu_idx][index];
 887    pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
 888    mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
 889    if (memory_region_is_unassigned(mr)) {
 890        qemu_mutex_lock_iothread();
 891        if (memory_region_request_mmio_ptr(mr, addr)) {
 892            qemu_mutex_unlock_iothread();
 893            /* A MemoryRegion is potentially added so re-run the
 894             * get_page_addr_code.
 895             */
 896            return get_page_addr_code(env, addr);
 897        }
 898        qemu_mutex_unlock_iothread();
 899
 900        /* Give the new-style cpu_transaction_failed() hook first chance
 901         * to handle this.
 902         * This is not the ideal place to detect and generate CPU
 903         * exceptions for instruction fetch failure (for instance
 904         * we don't know the length of the access that the CPU would
 905         * use, and it would be better to go ahead and try the access
 906         * and use the MemTXResult it produced). However it is the
 907         * simplest place we have currently available for the check.
 908         */
 909        physaddr = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
 910        cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
 911                               iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
 912
 913        cpu_unassigned_access(cpu, addr, false, true, 0, 4);
 914        /* The CPU's unassigned access hook might have longjumped out
 915         * with an exception. If it didn't (or there was no hook) then
 916         * we can't proceed further.
 917         */
 918        report_bad_exec(cpu, addr);
 919        exit(1);
 920    }
 921    p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
 922    return qemu_ram_addr_from_host_nofail(p);
 923}
 924
 925/* Probe for whether the specified guest write access is permitted.
 926 * If it is not permitted then an exception will be taken in the same
 927 * way as if this were a real write access (and we will not return).
 928 * Otherwise the function will return, and there will be a valid
 929 * entry in the TLB for this access.
 930 */
 931void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
 932                 uintptr_t retaddr)
 933{
 934    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 935    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 936
 937    if ((addr & TARGET_PAGE_MASK)
 938        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 939        /* TLB entry is for a different page */
 940        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 941            tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE,
 942                     mmu_idx, retaddr);
 943        }
 944    }
 945}
 946
 947/* Probe for a read-modify-write atomic operation.  Do not allow unaligned
 948 * operations, or io operations to proceed.  Return the host address.  */
 949static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 950                               TCGMemOpIdx oi, uintptr_t retaddr,
 951                               NotDirtyInfo *ndi)
 952{
 953    size_t mmu_idx = get_mmuidx(oi);
 954    size_t index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 955    CPUTLBEntry *tlbe = &env->tlb_table[mmu_idx][index];
 956    target_ulong tlb_addr = tlbe->addr_write;
 957    TCGMemOp mop = get_memop(oi);
 958    int a_bits = get_alignment_bits(mop);
 959    int s_bits = mop & MO_SIZE;
 960    void *hostaddr;
 961
 962    /* Adjust the given return address.  */
 963    retaddr -= GETPC_ADJ;
 964
 965    /* Enforce guest required alignment.  */
 966    if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
 967        /* ??? Maybe indicate atomic op to cpu_unaligned_access */
 968        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 969                             mmu_idx, retaddr);
 970    }
 971
 972    /* Enforce qemu required alignment.  */
 973    if (unlikely(addr & ((1 << s_bits) - 1))) {
 974        /* We get here if guest alignment was not requested,
 975           or was not enforced by cpu_unaligned_access above.
 976           We might widen the access and emulate, but for now
 977           mark an exception and exit the cpu loop.  */
 978        goto stop_the_world;
 979    }
 980
 981    /* Check TLB entry and enforce page permissions.  */
 982    if ((addr & TARGET_PAGE_MASK)
 983        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 984        if (!VICTIM_TLB_HIT(addr_write, addr)) {
 985            tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE,
 986                     mmu_idx, retaddr);
 987        }
 988        tlb_addr = tlbe->addr_write & ~TLB_INVALID_MASK;
 989    }
 990
 991    /* Notice an IO access  */
 992    if (unlikely(tlb_addr & TLB_MMIO)) {
 993        /* There's really nothing that can be done to
 994           support this apart from stop-the-world.  */
 995        goto stop_the_world;
 996    }
 997
 998    /* Let the guest notice RMW on a write-only page.  */
 999    if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1000        tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD,
1001                 mmu_idx, retaddr);
1002        /* Since we don't support reads and writes to different addresses,
1003           and we do have the proper page loaded for write, this shouldn't
1004           ever return.  But just in case, handle via stop-the-world.  */
1005        goto stop_the_world;
1006    }
1007
1008    hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1009
1010    ndi->active = false;
1011    if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
1012        ndi->active = true;
1013        memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr,
1014                                      qemu_ram_addr_from_host_nofail(hostaddr),
1015                                      1 << s_bits);
1016    }
1017
1018    return hostaddr;
1019
1020 stop_the_world:
1021    cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
1022}
1023
1024#ifdef TARGET_WORDS_BIGENDIAN
1025# define TGT_BE(X)  (X)
1026# define TGT_LE(X)  BSWAP(X)
1027#else
1028# define TGT_BE(X)  BSWAP(X)
1029# define TGT_LE(X)  (X)
1030#endif
1031
1032#define MMUSUFFIX _mmu
1033
1034#define DATA_SIZE 1
1035#include "softmmu_template.h"
1036
1037#define DATA_SIZE 2
1038#include "softmmu_template.h"
1039
1040#define DATA_SIZE 4
1041#include "softmmu_template.h"
1042
1043#define DATA_SIZE 8
1044#include "softmmu_template.h"
1045
1046/* First set of helpers allows passing in of OI and RETADDR.  This makes
1047   them callable from other helpers.  */
1048
1049#define EXTRA_ARGS     , TCGMemOpIdx oi, uintptr_t retaddr
1050#define ATOMIC_NAME(X) \
1051    HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1052#define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1053#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1054#define ATOMIC_MMU_CLEANUP                              \
1055    do {                                                \
1056        if (unlikely(ndi.active)) {                     \
1057            memory_notdirty_write_complete(&ndi);       \
1058        }                                               \
1059    } while (0)
1060
1061#define DATA_SIZE 1
1062#include "atomic_template.h"
1063
1064#define DATA_SIZE 2
1065#include "atomic_template.h"
1066
1067#define DATA_SIZE 4
1068#include "atomic_template.h"
1069
1070#ifdef CONFIG_ATOMIC64
1071#define DATA_SIZE 8
1072#include "atomic_template.h"
1073#endif
1074
1075#ifdef CONFIG_ATOMIC128
1076#define DATA_SIZE 16
1077#include "atomic_template.h"
1078#endif
1079
1080/* Second set of helpers are directly callable from TCG as helpers.  */
1081
1082#undef EXTRA_ARGS
1083#undef ATOMIC_NAME
1084#undef ATOMIC_MMU_LOOKUP
1085#define EXTRA_ARGS         , TCGMemOpIdx oi
1086#define ATOMIC_NAME(X)     HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1087#define ATOMIC_MMU_LOOKUP  atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1088
1089#define DATA_SIZE 1
1090#include "atomic_template.h"
1091
1092#define DATA_SIZE 2
1093#include "atomic_template.h"
1094
1095#define DATA_SIZE 4
1096#include "atomic_template.h"
1097
1098#ifdef CONFIG_ATOMIC64
1099#define DATA_SIZE 8
1100#include "atomic_template.h"
1101#endif
1102
1103/* Code access functions.  */
1104
1105#undef MMUSUFFIX
1106#define MMUSUFFIX _cmmu
1107#undef GETPC
1108#define GETPC() ((uintptr_t)0)
1109#define SOFTMMU_CODE_ACCESS
1110
1111#define DATA_SIZE 1
1112#include "softmmu_template.h"
1113
1114#define DATA_SIZE 2
1115#include "softmmu_template.h"
1116
1117#define DATA_SIZE 4
1118#include "softmmu_template.h"
1119
1120#define DATA_SIZE 8
1121#include "softmmu_template.h"
1122