qemu/include/exec/cpu-all.h
<<
>>
Prefs
   1/*
   2 * defines common to all virtual CPUs
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef CPU_ALL_H
  20#define CPU_ALL_H
  21
  22#include "exec/cpu-common.h"
  23#include "exec/memory.h"
  24#include "exec/tswap.h"
  25#include "qemu/thread.h"
  26#include "hw/core/cpu.h"
  27#include "qemu/rcu.h"
  28
  29#define EXCP_INTERRUPT  0x10000 /* async interruption */
  30#define EXCP_HLT        0x10001 /* hlt instruction reached */
  31#define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
  32#define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) */
  33#define EXCP_YIELD      0x10004 /* cpu wants to yield timeslice to another */
  34#define EXCP_ATOMIC     0x10005 /* stop-the-world and emulate atomic */
  35
  36/* some important defines:
  37 *
  38 * HOST_BIG_ENDIAN : whether the host cpu is big endian and
  39 * otherwise little endian.
  40 *
  41 * TARGET_BIG_ENDIAN : same for the target cpu
  42 */
  43
  44#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  45#define BSWAP_NEEDED
  46#endif
  47
  48#if TARGET_LONG_SIZE == 4
  49#define tswapl(s) tswap32(s)
  50#define tswapls(s) tswap32s((uint32_t *)(s))
  51#define bswaptls(s) bswap32s(s)
  52#else
  53#define tswapl(s) tswap64(s)
  54#define tswapls(s) tswap64s((uint64_t *)(s))
  55#define bswaptls(s) bswap64s(s)
  56#endif
  57
  58/* Target-endianness CPU memory access functions. These fit into the
  59 * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
  60 */
  61#if TARGET_BIG_ENDIAN
  62#define lduw_p(p) lduw_be_p(p)
  63#define ldsw_p(p) ldsw_be_p(p)
  64#define ldl_p(p) ldl_be_p(p)
  65#define ldq_p(p) ldq_be_p(p)
  66#define stw_p(p, v) stw_be_p(p, v)
  67#define stl_p(p, v) stl_be_p(p, v)
  68#define stq_p(p, v) stq_be_p(p, v)
  69#define ldn_p(p, sz) ldn_be_p(p, sz)
  70#define stn_p(p, sz, v) stn_be_p(p, sz, v)
  71#else
  72#define lduw_p(p) lduw_le_p(p)
  73#define ldsw_p(p) ldsw_le_p(p)
  74#define ldl_p(p) ldl_le_p(p)
  75#define ldq_p(p) ldq_le_p(p)
  76#define stw_p(p, v) stw_le_p(p, v)
  77#define stl_p(p, v) stl_le_p(p, v)
  78#define stq_p(p, v) stq_le_p(p, v)
  79#define ldn_p(p, sz) ldn_le_p(p, sz)
  80#define stn_p(p, sz, v) stn_le_p(p, sz, v)
  81#endif
  82
  83/* MMU memory access macros */
  84
  85#if defined(CONFIG_USER_ONLY)
  86#include "exec/user/abitypes.h"
  87#include "exec/user/guest-base.h"
  88
  89extern bool have_guest_base;
  90
  91/*
  92 * If non-zero, the guest virtual address space is a contiguous subset
  93 * of the host virtual address space, i.e. '-R reserved_va' is in effect
  94 * either from the command-line or by default.  The value is the last
  95 * byte of the guest address space e.g. UINT32_MAX.
  96 *
  97 * If zero, the host and guest virtual address spaces are intermingled.
  98 */
  99extern unsigned long reserved_va;
 100
 101/*
 102 * Limit the guest addresses as best we can.
 103 *
 104 * When not using -R reserved_va, we cannot really limit the guest
 105 * to less address space than the host.  For 32-bit guests, this
 106 * acts as a sanity check that we're not giving the guest an address
 107 * that it cannot even represent.  For 64-bit guests... the address
 108 * might not be what the real kernel would give, but it is at least
 109 * representable in the guest.
 110 *
 111 * TODO: Improve address allocation to avoid this problem, and to
 112 * avoid setting bits at the top of guest addresses that might need
 113 * to be used for tags.
 114 */
 115#define GUEST_ADDR_MAX_                                                 \
 116    ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ?  \
 117     UINT32_MAX : ~0ul)
 118#define GUEST_ADDR_MAX    (reserved_va ? : GUEST_ADDR_MAX_)
 119
 120#else
 121
 122#include "exec/hwaddr.h"
 123
 124#define SUFFIX
 125#define ARG1         as
 126#define ARG1_DECL    AddressSpace *as
 127#define TARGET_ENDIANNESS
 128#include "exec/memory_ldst.h.inc"
 129
 130#define SUFFIX       _cached_slow
 131#define ARG1         cache
 132#define ARG1_DECL    MemoryRegionCache *cache
 133#define TARGET_ENDIANNESS
 134#include "exec/memory_ldst.h.inc"
 135
 136static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
 137{
 138    address_space_stl_notdirty(as, addr, val,
 139                               MEMTXATTRS_UNSPECIFIED, NULL);
 140}
 141
 142#define SUFFIX
 143#define ARG1         as
 144#define ARG1_DECL    AddressSpace *as
 145#define TARGET_ENDIANNESS
 146#include "exec/memory_ldst_phys.h.inc"
 147
 148/* Inline fast path for direct RAM access.  */
 149#define ENDIANNESS
 150#include "exec/memory_ldst_cached.h.inc"
 151
 152#define SUFFIX       _cached
 153#define ARG1         cache
 154#define ARG1_DECL    MemoryRegionCache *cache
 155#define TARGET_ENDIANNESS
 156#include "exec/memory_ldst_phys.h.inc"
 157#endif
 158
 159/* page related stuff */
 160
 161#ifdef TARGET_PAGE_BITS_VARY
 162# include "exec/page-vary.h"
 163extern const TargetPageBits target_page;
 164#ifdef CONFIG_DEBUG_TCG
 165#define TARGET_PAGE_BITS   ({ assert(target_page.decided); target_page.bits; })
 166#define TARGET_PAGE_MASK   ({ assert(target_page.decided); \
 167                              (target_long)target_page.mask; })
 168#else
 169#define TARGET_PAGE_BITS   target_page.bits
 170#define TARGET_PAGE_MASK   ((target_long)target_page.mask)
 171#endif
 172#define TARGET_PAGE_SIZE   (-(int)TARGET_PAGE_MASK)
 173#else
 174#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
 175#define TARGET_PAGE_SIZE   (1 << TARGET_PAGE_BITS)
 176#define TARGET_PAGE_MASK   ((target_long)-1 << TARGET_PAGE_BITS)
 177#endif
 178
 179#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
 180
 181/* same as PROT_xxx */
 182#define PAGE_READ      0x0001
 183#define PAGE_WRITE     0x0002
 184#define PAGE_EXEC      0x0004
 185#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
 186#define PAGE_VALID     0x0008
 187/*
 188 * Original state of the write flag (used when tracking self-modifying code)
 189 */
 190#define PAGE_WRITE_ORG 0x0010
 191/*
 192 * Invalidate the TLB entry immediately, helpful for s390x
 193 * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs()
 194 */
 195#define PAGE_WRITE_INV 0x0020
 196/* For use with page_set_flags: page is being replaced; target_data cleared. */
 197#define PAGE_RESET     0x0040
 198/* For linux-user, indicates that the page is MAP_ANON. */
 199#define PAGE_ANON      0x0080
 200
 201#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
 202/* FIXME: Code that sets/uses this is broken and needs to go away.  */
 203#define PAGE_RESERVED  0x0100
 204#endif
 205/* Target-specific bits that will be used via page_get_flags().  */
 206#define PAGE_TARGET_1  0x0200
 207#define PAGE_TARGET_2  0x0400
 208
 209/*
 210 * For linux-user, indicates that the page is mapped with the same semantics
 211 * in both guest and host.
 212 */
 213#define PAGE_PASSTHROUGH 0x0800
 214
 215#if defined(CONFIG_USER_ONLY)
 216void page_dump(FILE *f);
 217
 218typedef int (*walk_memory_regions_fn)(void *, target_ulong,
 219                                      target_ulong, unsigned long);
 220int walk_memory_regions(void *, walk_memory_regions_fn);
 221
 222int page_get_flags(target_ulong address);
 223void page_set_flags(target_ulong start, target_ulong last, int flags);
 224void page_reset_target_data(target_ulong start, target_ulong last);
 225
 226/**
 227 * page_check_range
 228 * @start: first byte of range
 229 * @len: length of range
 230 * @flags: flags required for each page
 231 *
 232 * Return true if every page in [@start, @start+@len) has @flags set.
 233 * Return false if any page is unmapped.  Thus testing flags == 0 is
 234 * equivalent to testing for flags == PAGE_VALID.
 235 */
 236bool page_check_range(target_ulong start, target_ulong last, int flags);
 237
 238/**
 239 * page_check_range_empty:
 240 * @start: first byte of range
 241 * @last: last byte of range
 242 * Context: holding mmap lock
 243 *
 244 * Return true if the entire range [@start, @last] is unmapped.
 245 * The memory lock must be held so that the caller will can ensure
 246 * the result stays true until a new mapping can be installed.
 247 */
 248bool page_check_range_empty(target_ulong start, target_ulong last);
 249
 250/**
 251 * page_find_range_empty
 252 * @min: first byte of search range
 253 * @max: last byte of search range
 254 * @len: size of the hole required
 255 * @align: alignment of the hole required (power of 2)
 256 *
 257 * If there is a range [x, x+@len) within [@min, @max] such that
 258 * x % @align == 0, then return x.  Otherwise return -1.
 259 * The memory lock must be held, as the caller will want to ensure
 260 * the returned range stays empty until a new mapping can be installed.
 261 */
 262target_ulong page_find_range_empty(target_ulong min, target_ulong max,
 263                                   target_ulong len, target_ulong align);
 264
 265/**
 266 * page_get_target_data(address)
 267 * @address: guest virtual address
 268 *
 269 * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
 270 * with the guest page at @address, allocating it if necessary.  The
 271 * caller should already have verified that the address is valid.
 272 *
 273 * The memory will be freed when the guest page is deallocated,
 274 * e.g. with the munmap system call.
 275 */
 276void *page_get_target_data(target_ulong address)
 277    __attribute__((returns_nonnull));
 278#endif
 279
 280CPUArchState *cpu_copy(CPUArchState *env);
 281
 282/* Flags for use in ENV->INTERRUPT_PENDING.
 283
 284   The numbers assigned here are non-sequential in order to preserve
 285   binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
 286   previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
 287   the vmstate dump.  */
 288
 289/* External hardware interrupt pending.  This is typically used for
 290   interrupts from devices.  */
 291#define CPU_INTERRUPT_HARD        0x0002
 292
 293/* Exit the current TB.  This is typically used when some system-level device
 294   makes some change to the memory mapping.  E.g. the a20 line change.  */
 295#define CPU_INTERRUPT_EXITTB      0x0004
 296
 297/* Halt the CPU.  */
 298#define CPU_INTERRUPT_HALT        0x0020
 299
 300/* Debug event pending.  */
 301#define CPU_INTERRUPT_DEBUG       0x0080
 302
 303/* Reset signal.  */
 304#define CPU_INTERRUPT_RESET       0x0400
 305
 306/* Several target-specific external hardware interrupts.  Each target/cpu.h
 307   should define proper names based on these defines.  */
 308#define CPU_INTERRUPT_TGT_EXT_0   0x0008
 309#define CPU_INTERRUPT_TGT_EXT_1   0x0010
 310#define CPU_INTERRUPT_TGT_EXT_2   0x0040
 311#define CPU_INTERRUPT_TGT_EXT_3   0x0200
 312#define CPU_INTERRUPT_TGT_EXT_4   0x1000
 313
 314/* Several target-specific internal interrupts.  These differ from the
 315   preceding target-specific interrupts in that they are intended to
 316   originate from within the cpu itself, typically in response to some
 317   instruction being executed.  These, therefore, are not masked while
 318   single-stepping within the debugger.  */
 319#define CPU_INTERRUPT_TGT_INT_0   0x0100
 320#define CPU_INTERRUPT_TGT_INT_1   0x0800
 321#define CPU_INTERRUPT_TGT_INT_2   0x2000
 322
 323/* First unused bit: 0x4000.  */
 324
 325/* The set of all bits that should be masked when single-stepping.  */
 326#define CPU_INTERRUPT_SSTEP_MASK \
 327    (CPU_INTERRUPT_HARD          \
 328     | CPU_INTERRUPT_TGT_EXT_0   \
 329     | CPU_INTERRUPT_TGT_EXT_1   \
 330     | CPU_INTERRUPT_TGT_EXT_2   \
 331     | CPU_INTERRUPT_TGT_EXT_3   \
 332     | CPU_INTERRUPT_TGT_EXT_4)
 333
 334#ifdef CONFIG_USER_ONLY
 335
 336/*
 337 * Allow some level of source compatibility with softmmu.  We do not
 338 * support any of the more exotic features, so only invalid pages may
 339 * be signaled by probe_access_flags().
 340 */
 341#define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
 342#define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 2))
 343#define TLB_WATCHPOINT      0
 344
 345#else
 346
 347/*
 348 * Flags stored in the low bits of the TLB virtual address.
 349 * These are defined so that fast path ram access is all zeros.
 350 * The flags all must be between TARGET_PAGE_BITS and
 351 * maximum address alignment bit.
 352 *
 353 * Use TARGET_PAGE_BITS_MIN so that these bits are constant
 354 * when TARGET_PAGE_BITS_VARY is in effect.
 355 *
 356 * The count, if not the placement of these bits is known
 357 * to tcg/tcg-op-ldst.c, check_max_alignment().
 358 */
 359/* Zero if TLB entry is valid.  */
 360#define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS_MIN - 1))
 361/* Set if TLB entry references a clean RAM page.  The iotlb entry will
 362   contain the page physical address.  */
 363#define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS_MIN - 2))
 364/* Set if TLB entry is an IO callback.  */
 365#define TLB_MMIO            (1 << (TARGET_PAGE_BITS_MIN - 3))
 366/* Set if TLB entry writes ignored.  */
 367#define TLB_DISCARD_WRITE   (1 << (TARGET_PAGE_BITS_MIN - 4))
 368/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
 369#define TLB_FORCE_SLOW      (1 << (TARGET_PAGE_BITS_MIN - 5))
 370
 371/*
 372 * Use this mask to check interception with an alignment mask
 373 * in a TCG backend.
 374 */
 375#define TLB_FLAGS_MASK \
 376    (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
 377    | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
 378
 379/*
 380 * Flags stored in CPUTLBEntryFull.slow_flags[x].
 381 * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
 382 */
 383/* Set if TLB entry requires byte swap.  */
 384#define TLB_BSWAP            (1 << 0)
 385/* Set if TLB entry contains a watchpoint.  */
 386#define TLB_WATCHPOINT       (1 << 1)
 387
 388#define TLB_SLOW_FLAGS_MASK  (TLB_BSWAP | TLB_WATCHPOINT)
 389
 390/* The two sets of flags must not overlap. */
 391QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
 392
 393/**
 394 * tlb_hit_page: return true if page aligned @addr is a hit against the
 395 * TLB entry @tlb_addr
 396 *
 397 * @addr: virtual address to test (must be page aligned)
 398 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
 399 */
 400static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
 401{
 402    return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
 403}
 404
 405/**
 406 * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
 407 *
 408 * @addr: virtual address to test (need not be page aligned)
 409 * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
 410 */
 411static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
 412{
 413    return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
 414}
 415
 416#ifdef CONFIG_TCG
 417/* accel/tcg/translate-all.c */
 418void dump_exec_info(GString *buf);
 419#endif /* CONFIG_TCG */
 420
 421#endif /* !CONFIG_USER_ONLY */
 422
 423/* accel/tcg/cpu-exec.c */
 424int cpu_exec(CPUState *cpu);
 425void tcg_exec_realizefn(CPUState *cpu, Error **errp);
 426void tcg_exec_unrealizefn(CPUState *cpu);
 427
 428/**
 429 * cpu_set_cpustate_pointers(cpu)
 430 * @cpu: The cpu object
 431 *
 432 * Set the generic pointers in CPUState into the outer object.
 433 */
 434static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
 435{
 436    cpu->parent_obj.env_ptr = &cpu->env;
 437    cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
 438}
 439
 440/**
 441 * env_archcpu(env)
 442 * @env: The architecture environment
 443 *
 444 * Return the ArchCPU associated with the environment.
 445 */
 446static inline ArchCPU *env_archcpu(CPUArchState *env)
 447{
 448    return container_of(env, ArchCPU, env);
 449}
 450
 451/**
 452 * env_cpu(env)
 453 * @env: The architecture environment
 454 *
 455 * Return the CPUState associated with the environment.
 456 */
 457static inline CPUState *env_cpu(CPUArchState *env)
 458{
 459    return &env_archcpu(env)->parent_obj;
 460}
 461
 462/**
 463 * env_neg(env)
 464 * @env: The architecture environment
 465 *
 466 * Return the CPUNegativeOffsetState associated with the environment.
 467 */
 468static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
 469{
 470    ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
 471    return &arch_cpu->neg;
 472}
 473
 474/**
 475 * cpu_neg(cpu)
 476 * @cpu: The generic CPUState
 477 *
 478 * Return the CPUNegativeOffsetState associated with the cpu.
 479 */
 480static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
 481{
 482    ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
 483    return &arch_cpu->neg;
 484}
 485
 486/**
 487 * env_tlb(env)
 488 * @env: The architecture environment
 489 *
 490 * Return the CPUTLB state associated with the environment.
 491 */
 492static inline CPUTLB *env_tlb(CPUArchState *env)
 493{
 494    return &env_neg(env)->tlb;
 495}
 496
 497#endif /* CPU_ALL_H */
 498