1/* 2 * QEMU CPU model 3 * 4 * Copyright (c) 2012 SUSE LINUX Products GmbH 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 */ 20#ifndef QEMU_CPU_H 21#define QEMU_CPU_H 22 23#include "hw/qdev-core.h" 24#include "disas/bfd.h" 25#include "exec/hwaddr.h" 26#include "exec/memattrs.h" 27#include "qemu/queue.h" 28#include "qemu/thread.h" 29 30typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, 31 void *opaque); 32 33/** 34 * vaddr: 35 * Type wide enough to contain any #target_ulong virtual address. 36 */ 37typedef uint64_t vaddr; 38#define VADDR_PRId PRId64 39#define VADDR_PRIu PRIu64 40#define VADDR_PRIo PRIo64 41#define VADDR_PRIx PRIx64 42#define VADDR_PRIX PRIX64 43#define VADDR_MAX UINT64_MAX 44 45/** 46 * SECTION:cpu 47 * @section_id: QEMU-cpu 48 * @title: CPU Class 49 * @short_description: Base class for all CPUs 50 */ 51 52#define TYPE_CPU "cpu" 53 54/* Since this macro is used a lot in hot code paths and in conjunction with 55 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using 56 * an unchecked cast. 57 */ 58#define CPU(obj) ((CPUState *)(obj)) 59 60#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU) 61#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU) 62 63typedef struct CPUWatchpoint CPUWatchpoint; 64 65typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr, 66 bool is_write, bool is_exec, int opaque, 67 unsigned size); 68 69struct TranslationBlock; 70 71/** 72 * CPUClass: 73 * @class_by_name: Callback to map -cpu command line model name to an 74 * instantiatable CPU type. 75 * @parse_features: Callback to parse command line arguments. 76 * @reset: Callback to reset the #CPUState to its initial state. 77 * @reset_dump_flags: #CPUDumpFlags to use for reset logging. 78 * @has_work: Callback for checking if there is work to do. 79 * @do_interrupt: Callback for interrupt handling. 80 * @do_unassigned_access: Callback for unassigned access handling. 81 * @do_unaligned_access: Callback for unaligned access handling, if 82 * the target defines #ALIGNED_ONLY. 83 * @virtio_is_big_endian: Callback to return %true if a CPU which supports 84 * runtime configurable endianness is currently big-endian. Non-configurable 85 * CPUs can use the default implementation of this method. This method should 86 * not be used by any callers other than the pre-1.0 virtio devices. 87 * @memory_rw_debug: Callback for GDB memory access. 88 * @dump_state: Callback for dumping state. 89 * @dump_statistics: Callback for dumping statistics. 90 * @get_arch_id: Callback for getting architecture-dependent CPU ID. 91 * @get_paging_enabled: Callback for inquiring whether paging is enabled. 92 * @get_memory_mapping: Callback for obtaining the memory mappings. 93 * @set_pc: Callback for setting the Program Counter register. 94 * @synchronize_from_tb: Callback for synchronizing state from a TCG 95 * #TranslationBlock. 96 * @handle_mmu_fault: Callback for handling an MMU fault. 97 * @get_phys_page_debug: Callback for obtaining a physical address. 98 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the 99 * associated memory transaction attributes to use for the access. 100 * CPUs which use memory transaction attributes should implement this 101 * instead of get_phys_page_debug. 102 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for 103 * a memory access with the specified memory transaction attributes. 104 * @gdb_read_register: Callback for letting GDB read a register. 105 * @gdb_write_register: Callback for letting GDB write a register. 106 * @debug_check_watchpoint: Callback: return true if the architectural 107 * watchpoint whose address has matched should really fire. 108 * @debug_excp_handler: Callback for handling debug exceptions. 109 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a 110 * 64-bit VM coredump. 111 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF 112 * note to a 32-bit VM coredump. 113 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a 114 * 32-bit VM coredump. 115 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF 116 * note to a 32-bit VM coredump. 117 * @vmsd: State description for migration. 118 * @gdb_num_core_regs: Number of core registers accessible to GDB. 119 * @gdb_core_xml_file: File name for core registers GDB XML description. 120 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop 121 * before the insn which triggers a watchpoint rather than after it. 122 * @gdb_arch_name: Optional callback that returns the architecture name known 123 * to GDB. The caller must free the returned string with g_free. 124 * @cpu_exec_enter: Callback for cpu_exec preparation. 125 * @cpu_exec_exit: Callback for cpu_exec cleanup. 126 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec. 127 * @disas_set_info: Setup architecture specific components of disassembly info 128 * 129 * Represents a CPU family or model. 130 */ 131typedef struct CPUClass { 132 /*< private >*/ 133 DeviceClass parent_class; 134 /*< public >*/ 135 136 ObjectClass *(*class_by_name)(const char *cpu_model); 137 void (*parse_features)(CPUState *cpu, char *str, Error **errp); 138 139 void (*reset)(CPUState *cpu); 140 int reset_dump_flags; 141 bool (*has_work)(CPUState *cpu); 142 void (*do_interrupt)(CPUState *cpu); 143 CPUUnassignedAccess do_unassigned_access; 144 void (*do_unaligned_access)(CPUState *cpu, vaddr addr, 145 int is_write, int is_user, uintptr_t retaddr); 146 bool (*virtio_is_big_endian)(CPUState *cpu); 147 int (*memory_rw_debug)(CPUState *cpu, vaddr addr, 148 uint8_t *buf, int len, bool is_write); 149 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 150 int flags); 151 void (*dump_statistics)(CPUState *cpu, FILE *f, 152 fprintf_function cpu_fprintf, int flags); 153 int64_t (*get_arch_id)(CPUState *cpu); 154 bool (*get_paging_enabled)(const CPUState *cpu); 155 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list, 156 Error **errp); 157 void (*set_pc)(CPUState *cpu, vaddr value); 158 vaddr (*get_pc)(CPUState *cpu); 159 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb); 160 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw, 161 int mmu_index); 162 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr); 163 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr, 164 MemTxAttrs *attrs); 165 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs); 166 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg); 167 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg); 168 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); 169 void (*debug_excp_handler)(CPUState *cpu); 170 171 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu, 172 int cpuid, void *opaque); 173 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, 174 void *opaque); 175 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu, 176 int cpuid, void *opaque); 177 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu, 178 void *opaque); 179 180 void (*set_debug_context)(CPUState *cpu, unsigned int ctx); 181 const char **debug_contexts; 182 const struct VMStateDescription *vmsd; 183 int gdb_num_core_regs; 184 const char *gdb_core_xml_file; 185 gchar * (*gdb_arch_name)(CPUState *cpu); 186 bool gdb_stop_before_watchpoint; 187 188 void (*cpu_exec_enter)(CPUState *cpu); 189 void (*cpu_exec_exit)(CPUState *cpu); 190 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); 191 192 void (*disas_set_info)(CPUState *cpu, disassemble_info *info); 193} CPUClass; 194 195#ifdef HOST_WORDS_BIGENDIAN 196typedef struct icount_decr_u16 { 197 uint16_t high; 198 uint16_t low; 199} icount_decr_u16; 200#else 201typedef struct icount_decr_u16 { 202 uint16_t low; 203 uint16_t high; 204} icount_decr_u16; 205#endif 206 207typedef struct CPUBreakpoint { 208 vaddr pc; 209 int flags; /* BP_* */ 210 QTAILQ_ENTRY(CPUBreakpoint) entry; 211} CPUBreakpoint; 212 213struct CPUWatchpoint { 214 vaddr vaddr; 215 vaddr len; 216 vaddr hitaddr; 217 MemTxAttrs hitattrs; 218 int flags; /* BP_* */ 219 QTAILQ_ENTRY(CPUWatchpoint) entry; 220}; 221 222struct KVMState; 223struct kvm_run; 224 225#define TB_JMP_CACHE_BITS 12 226#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) 227 228/** 229 * CPUState: 230 * @cpu_index: CPU index (informative). 231 * @nr_cores: Number of cores within this CPU package. 232 * @nr_threads: Number of threads within this CPU. 233 * @numa_node: NUMA node this CPU is belonging to. 234 * @host_tid: Host thread ID. 235 * @running: #true if CPU is currently running (usermode). 236 * @created: Indicates whether the CPU thread has been successfully created. 237 * @interrupt_request: Indicates a pending interrupt request. 238 * @halted: Nonzero if the CPU is in suspended state. 239 * @stop: Indicates a pending stop request. 240 * @stopped: Indicates the CPU has been artificially stopped. 241 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU 242 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this 243 * CPU and return to its top level loop. 244 * @singlestep_enabled: Flags for single-stepping. 245 * @icount_extra: Instructions until next timer event. 246 * @icount_decr: Number of cycles left, with interrupt flag in high bit. 247 * This allows a single read-compare-cbranch-write sequence to test 248 * for both decrementer underflow and exceptions. 249 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution 250 * requires that IO only be performed on the last instruction of a TB 251 * so that interrupts take effect immediately. 252 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the 253 * AddressSpaces this CPU has) 254 * @num_ases: number of CPUAddressSpaces in @cpu_ases 255 * @as: Pointer to the first AddressSpace, for the convenience of targets which 256 * only have a single AddressSpace 257 * @env_ptr: Pointer to subclass-specific CPUArchState field. 258 * @current_tb: Currently executing TB. 259 * @gdb_regs: Additional GDB registers. 260 * @gdb_num_regs: Number of total registers accessible to GDB. 261 * @gdb_num_g_regs: Number of registers in GDB 'g' packets. 262 * @next_cpu: Next CPU sharing TB cache. 263 * @opaque: User data. 264 * @mem_io_pc: Host Program Counter at which the memory was accessed. 265 * @mem_io_vaddr: Target virtual address at which the memory was accessed. 266 * @kvm_fd: vCPU file descriptor for KVM. 267 * @work_mutex: Lock to prevent multiple access to queued_work_*. 268 * @queued_work_first: First asynchronous work pending. 269 * 270 * State of one CPU core or thread. 271 */ 272struct CPUState { 273 /*< private >*/ 274 DeviceState parent_obj; 275 /*< public >*/ 276 277 int nr_cores; 278 int nr_threads; 279 int numa_node; 280 281 struct QemuThread *thread; 282#ifdef _WIN32 283 HANDLE hThread; 284#endif 285 int thread_id; 286 uint32_t host_tid; 287 bool running; 288 struct QemuCond *halt_cond; 289 bool thread_kicked; 290 bool created; 291 bool stop; 292 bool stopped; 293 bool crash_occurred; 294 bool exit_request; 295 uint32_t interrupt_request; 296 int singlestep_enabled; 297 int64_t icount_extra; 298 sigjmp_buf jmp_env; 299 300 QemuMutex work_mutex; 301 struct qemu_work_item *queued_work_first, *queued_work_last; 302 303 CPUAddressSpace *cpu_ases; 304 int num_ases; 305 AddressSpace *as; 306 MemoryRegion *memory; 307 308 void *env_ptr; /* CPUArchState */ 309 struct TranslationBlock *current_tb; 310 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; 311 struct GDBRegisterState *gdb_regs; 312 int gdb_num_regs; 313 int gdb_num_g_regs; 314 QTAILQ_ENTRY(CPUState) node; 315 316 /* ice debug support */ 317 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; 318 319 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; 320 CPUWatchpoint *watchpoint_hit; 321 322 void *opaque; 323 324 /* In order to avoid passing too many arguments to the MMIO helpers, 325 * we store some rarely used information in the CPU context. 326 */ 327 uintptr_t mem_io_pc; 328 vaddr mem_io_vaddr; 329 330 int kvm_fd; 331 bool kvm_vcpu_dirty; 332 struct KVMState *kvm_state; 333 struct kvm_run *kvm_run; 334 335 /* TODO Move common fields from CPUArchState here. */ 336 int cpu_index; /* used by alpha TCG */ 337 uint32_t halted; /* used by alpha, cris, ppc TCG */ 338 union { 339 uint32_t u32; 340 icount_decr_u16 u16; 341 } icount_decr; 342 uint32_t can_do_io; 343 int32_t exception_index; /* used by m68k TCG */ 344 345 /* Used to keep track of an outstanding cpu throttle thread for migration 346 * autoconverge 347 */ 348 bool throttle_thread_scheduled; 349 350 /* Note that this is accessed at the start of every TB via a negative 351 offset from AREG0. Leave this field at the end so as to make the 352 (absolute value) offset as small as possible. This reduces code 353 size, especially for hosts without large memory offsets. */ 354 uint32_t tcg_exit_req; 355 356 bool reset_pin; /* state of reset pin */ 357 bool halt_pin; /* state of halt pin */ 358 bool arch_halt_pin; 359 360 char *gdb_id; 361}; 362 363QTAILQ_HEAD(CPUTailQ, CPUState); 364extern struct CPUTailQ cpus; 365#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node) 366#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node) 367#define CPU_FOREACH_SAFE(cpu, next_cpu) \ 368 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu) 369#define CPU_FOREACH_REVERSE(cpu) \ 370 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node) 371#define first_cpu QTAILQ_FIRST(&cpus) 372 373extern __thread CPUState *current_cpu; 374 375/** 376 * cpu_paging_enabled: 377 * @cpu: The CPU whose state is to be inspected. 378 * 379 * Returns: %true if paging is enabled, %false otherwise. 380 */ 381bool cpu_paging_enabled(const CPUState *cpu); 382 383/** 384 * cpu_get_memory_mapping: 385 * @cpu: The CPU whose memory mappings are to be obtained. 386 * @list: Where to write the memory mappings to. 387 * @errp: Pointer for reporting an #Error. 388 */ 389void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, 390 Error **errp); 391 392/** 393 * cpu_write_elf64_note: 394 * @f: pointer to a function that writes memory to a file 395 * @cpu: The CPU whose memory is to be dumped 396 * @cpuid: ID number of the CPU 397 * @opaque: pointer to the CPUState struct 398 */ 399int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, 400 int cpuid, void *opaque); 401 402/** 403 * cpu_write_elf64_qemunote: 404 * @f: pointer to a function that writes memory to a file 405 * @cpu: The CPU whose memory is to be dumped 406 * @cpuid: ID number of the CPU 407 * @opaque: pointer to the CPUState struct 408 */ 409int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 410 void *opaque); 411 412/** 413 * cpu_write_elf32_note: 414 * @f: pointer to a function that writes memory to a file 415 * @cpu: The CPU whose memory is to be dumped 416 * @cpuid: ID number of the CPU 417 * @opaque: pointer to the CPUState struct 418 */ 419int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, 420 int cpuid, void *opaque); 421 422/** 423 * cpu_write_elf32_qemunote: 424 * @f: pointer to a function that writes memory to a file 425 * @cpu: The CPU whose memory is to be dumped 426 * @cpuid: ID number of the CPU 427 * @opaque: pointer to the CPUState struct 428 */ 429int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, 430 void *opaque); 431 432/** 433 * CPUDumpFlags: 434 * @CPU_DUMP_CODE: 435 * @CPU_DUMP_FPU: dump FPU register state, not just integer 436 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state 437 */ 438enum CPUDumpFlags { 439 CPU_DUMP_CODE = 0x00010000, 440 CPU_DUMP_FPU = 0x00020000, 441 CPU_DUMP_CCOP = 0x00040000, 442}; 443 444/** 445 * cpu_dump_state: 446 * @cpu: The CPU whose state is to be dumped. 447 * @f: File to dump to. 448 * @cpu_fprintf: Function to dump with. 449 * @flags: Flags what to dump. 450 * 451 * Dumps CPU state. 452 */ 453void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 454 int flags); 455 456/** 457 * cpu_dump_statistics: 458 * @cpu: The CPU whose state is to be dumped. 459 * @f: File to dump to. 460 * @cpu_fprintf: Function to dump with. 461 * @flags: Flags what to dump. 462 * 463 * Dumps CPU statistics. 464 */ 465void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf, 466 int flags); 467 468#ifndef CONFIG_USER_ONLY 469/** 470 * cpu_get_phys_page_attrs_debug: 471 * @cpu: The CPU to obtain the physical page address for. 472 * @addr: The virtual address. 473 * @attrs: Updated on return with the memory transaction attributes to use 474 * for this access. 475 * 476 * Obtains the physical page corresponding to a virtual one, together 477 * with the corresponding memory transaction attributes to use for the access. 478 * Use it only for debugging because no protection checks are done. 479 * 480 * Returns: Corresponding physical page address or -1 if no page found. 481 */ 482static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 483 MemTxAttrs *attrs) 484{ 485 CPUClass *cc = CPU_GET_CLASS(cpu); 486 487 if (cc->get_phys_page_attrs_debug) { 488 return cc->get_phys_page_attrs_debug(cpu, addr, attrs); 489 } 490 /* Fallback for CPUs which don't implement the _attrs_ hook */ 491 *attrs = MEMTXATTRS_UNSPECIFIED; 492 return cc->get_phys_page_debug(cpu, addr); 493} 494 495/** 496 * cpu_get_phys_page_debug: 497 * @cpu: The CPU to obtain the physical page address for. 498 * @addr: The virtual address. 499 * 500 * Obtains the physical page corresponding to a virtual one. 501 * Use it only for debugging because no protection checks are done. 502 * 503 * Returns: Corresponding physical page address or -1 if no page found. 504 */ 505static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr) 506{ 507 MemTxAttrs attrs = {}; 508 509 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs); 510} 511 512/** cpu_asidx_from_attrs: 513 * @cpu: CPU 514 * @attrs: memory transaction attributes 515 * 516 * Returns the address space index specifying the CPU AddressSpace 517 * to use for a memory access with the given transaction attributes. 518 */ 519static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) 520{ 521 CPUClass *cc = CPU_GET_CLASS(cpu); 522 523 if (cc->asidx_from_attrs) { 524 return cc->asidx_from_attrs(cpu, attrs); 525 } 526 return 0; 527} 528#endif 529 530/** 531 * cpu_reset: 532 * @cpu: The CPU whose state is to be reset. 533 */ 534void cpu_reset(CPUState *cpu); 535 536/** 537 * cpu_class_by_name: 538 * @typename: The CPU base type. 539 * @cpu_model: The model string without any parameters. 540 * 541 * Looks up a CPU #ObjectClass matching name @cpu_model. 542 * 543 * Returns: A #CPUClass or %NULL if not matching class is found. 544 */ 545ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); 546 547/** 548 * cpu_generic_init: 549 * @typename: The CPU base type. 550 * @cpu_model: The model string including optional parameters. 551 * 552 * Instantiates a CPU, processes optional parameters and realizes the CPU. 553 * 554 * Returns: A #CPUState or %NULL if an error occurred. 555 */ 556CPUState *cpu_generic_init(const char *typename, const char *cpu_model); 557 558/** 559 * cpu_has_work: 560 * @cpu: The vCPU to check. 561 * 562 * Checks whether the CPU has work to do. 563 * 564 * Returns: %true if the CPU has work, %false otherwise. 565 */ 566static inline bool cpu_has_work(CPUState *cpu) 567{ 568 CPUClass *cc = CPU_GET_CLASS(cpu); 569 570 g_assert(cc->has_work); 571 return cc->has_work(cpu); 572} 573 574/** 575 * qemu_cpu_is_self: 576 * @cpu: The vCPU to check against. 577 * 578 * Checks whether the caller is executing on the vCPU thread. 579 * 580 * Returns: %true if called from @cpu's thread, %false otherwise. 581 */ 582bool qemu_cpu_is_self(CPUState *cpu); 583 584/** 585 * qemu_cpu_kick: 586 * @cpu: The vCPU to kick. 587 * 588 * Kicks @cpu's thread. 589 */ 590void qemu_cpu_kick(CPUState *cpu); 591 592/** 593 * cpu_is_stopped: 594 * @cpu: The CPU to check. 595 * 596 * Checks whether the CPU is stopped. 597 * 598 * Returns: %true if run state is not running or if artificially stopped; 599 * %false otherwise. 600 */ 601bool cpu_is_stopped(CPUState *cpu); 602 603/** 604 * run_on_cpu: 605 * @cpu: The vCPU to run on. 606 * @func: The function to be executed. 607 * @data: Data to pass to the function. 608 * 609 * Schedules the function @func for execution on the vCPU @cpu. 610 */ 611void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); 612 613/** 614 * async_run_on_cpu: 615 * @cpu: The vCPU to run on. 616 * @func: The function to be executed. 617 * @data: Data to pass to the function. 618 * 619 * Schedules the function @func for execution on the vCPU @cpu asynchronously. 620 */ 621void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); 622 623/** 624 * qemu_get_cpu: 625 * @index: The CPUState@cpu_index value of the CPU to obtain. 626 * 627 * Gets a CPU matching @index. 628 * 629 * Returns: The CPU or %NULL if there is no matching CPU. 630 */ 631CPUState *qemu_get_cpu(int index); 632 633/** 634 * cpu_exists: 635 * @id: Guest-exposed CPU ID to lookup. 636 * 637 * Search for CPU with specified ID. 638 * 639 * Returns: %true - CPU is found, %false - CPU isn't found. 640 */ 641bool cpu_exists(int64_t id); 642 643/** 644 * cpu_throttle_set: 645 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99. 646 * 647 * Throttles all vcpus by forcing them to sleep for the given percentage of 648 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly. 649 * (example: 10ms sleep for every 30ms awake). 650 * 651 * cpu_throttle_set can be called as needed to adjust new_throttle_pct. 652 * Once the throttling starts, it will remain in effect until cpu_throttle_stop 653 * is called. 654 */ 655void cpu_throttle_set(int new_throttle_pct); 656 657/** 658 * cpu_throttle_stop: 659 * 660 * Stops the vcpu throttling started by cpu_throttle_set. 661 */ 662void cpu_throttle_stop(void); 663 664/** 665 * cpu_throttle_active: 666 * 667 * Returns: %true if the vcpus are currently being throttled, %false otherwise. 668 */ 669bool cpu_throttle_active(void); 670 671/** 672 * cpu_throttle_get_percentage: 673 * 674 * Returns the vcpu throttle percentage. See cpu_throttle_set for details. 675 * 676 * Returns: The throttle percentage in range 1 to 99. 677 */ 678int cpu_throttle_get_percentage(void); 679 680#ifndef CONFIG_USER_ONLY 681 682typedef void (*CPUInterruptHandler)(CPUState *, int); 683 684extern CPUInterruptHandler cpu_interrupt_handler; 685 686/** 687 * cpu_interrupt: 688 * @cpu: The CPU to set an interrupt on. 689 * @mask: The interupts to set. 690 * 691 * Invokes the interrupt handler. 692 */ 693static inline void cpu_interrupt(CPUState *cpu, int mask) 694{ 695 cpu_interrupt_handler(cpu, mask); 696} 697 698#else /* USER_ONLY */ 699 700void cpu_interrupt(CPUState *cpu, int mask); 701 702#endif /* USER_ONLY */ 703 704#ifdef CONFIG_SOFTMMU 705static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, 706 bool is_write, bool is_exec, 707 int opaque, unsigned size) 708{ 709 CPUClass *cc = CPU_GET_CLASS(cpu); 710 711 if (cc->do_unassigned_access) { 712 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size); 713 } 714} 715 716static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, 717 int is_write, int is_user, 718 uintptr_t retaddr) 719{ 720 CPUClass *cc = CPU_GET_CLASS(cpu); 721 722 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); 723} 724#endif 725 726/** 727 * cpu_set_pc: 728 * @cpu: The CPU to set the program counter for. 729 * @addr: Program counter value. 730 * 731 * Sets the program counter for a CPU. 732 */ 733static inline void cpu_set_pc(CPUState *cpu, vaddr addr) 734{ 735 CPUClass *cc = CPU_GET_CLASS(cpu); 736 737 cc->set_pc(cpu, addr); 738} 739 740/** 741 * cpu_reset_interrupt: 742 * @cpu: The CPU to clear the interrupt on. 743 * @mask: The interrupt mask to clear. 744 * 745 * Resets interrupts on the vCPU @cpu. 746 */ 747void cpu_reset_interrupt(CPUState *cpu, int mask); 748 749/** 750 * cpu_exit: 751 * @cpu: The CPU to exit. 752 * 753 * Requests the CPU @cpu to exit execution. 754 */ 755void cpu_exit(CPUState *cpu); 756 757/** 758 * cpu_resume: 759 * @cpu: The CPU to resume. 760 * 761 * Resumes CPU, i.e. puts CPU into runnable state. 762 */ 763void cpu_resume(CPUState *cpu); 764 765/** 766 * qemu_init_vcpu: 767 * @cpu: The vCPU to initialize. 768 * 769 * Initializes a vCPU. 770 */ 771void qemu_init_vcpu(CPUState *cpu); 772 773#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */ 774#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */ 775#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */ 776 777/** 778 * cpu_single_step: 779 * @cpu: CPU to the flags for. 780 * @enabled: Flags to enable. 781 * 782 * Enables or disables single-stepping for @cpu. 783 */ 784void cpu_single_step(CPUState *cpu, int enabled); 785 786/* Breakpoint/watchpoint flags */ 787#define BP_MEM_READ 0x01 788#define BP_MEM_WRITE 0x02 789#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE) 790#define BP_STOP_BEFORE_ACCESS 0x04 791/* 0x08 currently unused */ 792#define BP_GDB 0x10 793#define BP_CPU 0x20 794#define BP_ANY (BP_GDB | BP_CPU) 795#define BP_WATCHPOINT_HIT_READ 0x40 796#define BP_WATCHPOINT_HIT_WRITE 0x80 797#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE) 798 799int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, 800 CPUBreakpoint **breakpoint); 801int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags); 802void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint); 803void cpu_breakpoint_remove_all(CPUState *cpu, int mask); 804 805/* Return true if PC matches an installed breakpoint. */ 806static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask) 807{ 808 CPUBreakpoint *bp; 809 810 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) { 811 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { 812 if (bp->pc == pc && (bp->flags & mask)) { 813 return true; 814 } 815 } 816 } 817 return false; 818} 819 820int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, 821 int flags, CPUWatchpoint **watchpoint); 822int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, 823 vaddr len, int flags); 824void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint); 825void cpu_watchpoint_remove_all(CPUState *cpu, int mask); 826 827void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) 828 GCC_FMT_ATTR(2, 3); 829void cpu_exec_exit(CPUState *cpu); 830 831#ifdef CONFIG_SOFTMMU 832extern const struct VMStateDescription vmstate_cpu_common; 833#else 834#define vmstate_cpu_common vmstate_dummy 835#endif 836 837#define VMSTATE_CPU() { \ 838 .name = "parent_obj", \ 839 .size = sizeof(CPUState), \ 840 .vmsd = &vmstate_cpu_common, \ 841 .flags = VMS_STRUCT, \ 842 .offset = 0, \ 843} 844 845void cpu_halt_gpio(void *opaque, int irq, int level); 846void cpu_reset_gpio(void *opaque, int irq, int level); 847 848void cpu_halt_reset_common(CPUState *cpu, bool *change, bool val, bool force); 849 850#endif 851