1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "qemu/thread.h"
25#include "hw/core/cpu.h"
26#include "qemu/rcu.h"
27
28#define EXCP_INTERRUPT 0x10000
29#define EXCP_HLT 0x10001
30#define EXCP_DEBUG 0x10002
31#define EXCP_HALTED 0x10003
32#define EXCP_YIELD 0x10004
33#define EXCP_ATOMIC 0x10005
34
35
36
37
38
39
40
41
42
43#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
44#define BSWAP_NEEDED
45#endif
46
47#ifdef BSWAP_NEEDED
48
49static inline uint16_t tswap16(uint16_t s)
50{
51 return bswap16(s);
52}
53
54static inline uint32_t tswap32(uint32_t s)
55{
56 return bswap32(s);
57}
58
59static inline uint64_t tswap64(uint64_t s)
60{
61 return bswap64(s);
62}
63
64static inline void tswap16s(uint16_t *s)
65{
66 *s = bswap16(*s);
67}
68
69static inline void tswap32s(uint32_t *s)
70{
71 *s = bswap32(*s);
72}
73
74static inline void tswap64s(uint64_t *s)
75{
76 *s = bswap64(*s);
77}
78
79#else
80
81static inline uint16_t tswap16(uint16_t s)
82{
83 return s;
84}
85
86static inline uint32_t tswap32(uint32_t s)
87{
88 return s;
89}
90
91static inline uint64_t tswap64(uint64_t s)
92{
93 return s;
94}
95
96static inline void tswap16s(uint16_t *s)
97{
98}
99
100static inline void tswap32s(uint32_t *s)
101{
102}
103
104static inline void tswap64s(uint64_t *s)
105{
106}
107
108#endif
109
110#if TARGET_LONG_SIZE == 4
111#define tswapl(s) tswap32(s)
112#define tswapls(s) tswap32s((uint32_t *)(s))
113#define bswaptls(s) bswap32s(s)
114#else
115#define tswapl(s) tswap64(s)
116#define tswapls(s) tswap64s((uint64_t *)(s))
117#define bswaptls(s) bswap64s(s)
118#endif
119
120
121
122
123#if defined(TARGET_WORDS_BIGENDIAN)
124#define lduw_p(p) lduw_be_p(p)
125#define ldsw_p(p) ldsw_be_p(p)
126#define ldl_p(p) ldl_be_p(p)
127#define ldq_p(p) ldq_be_p(p)
128#define stw_p(p, v) stw_be_p(p, v)
129#define stl_p(p, v) stl_be_p(p, v)
130#define stq_p(p, v) stq_be_p(p, v)
131#define ldn_p(p, sz) ldn_be_p(p, sz)
132#define stn_p(p, sz, v) stn_be_p(p, sz, v)
133#else
134#define lduw_p(p) lduw_le_p(p)
135#define ldsw_p(p) ldsw_le_p(p)
136#define ldl_p(p) ldl_le_p(p)
137#define ldq_p(p) ldq_le_p(p)
138#define stw_p(p, v) stw_le_p(p, v)
139#define stl_p(p, v) stl_le_p(p, v)
140#define stq_p(p, v) stq_le_p(p, v)
141#define ldn_p(p, sz) ldn_le_p(p, sz)
142#define stn_p(p, sz, v) stn_le_p(p, sz, v)
143#endif
144
145
146
147#if defined(CONFIG_USER_ONLY)
148#include "exec/user/abitypes.h"
149
150
151
152
153extern uintptr_t guest_base;
154extern bool have_guest_base;
155extern unsigned long reserved_va;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171#define GUEST_ADDR_MAX_ \
172 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
173 UINT32_MAX : ~0ul)
174#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
175
176#else
177
178#include "exec/hwaddr.h"
179
180#define SUFFIX
181#define ARG1 as
182#define ARG1_DECL AddressSpace *as
183#define TARGET_ENDIANNESS
184#include "exec/memory_ldst.h.inc"
185
186#define SUFFIX _cached_slow
187#define ARG1 cache
188#define ARG1_DECL MemoryRegionCache *cache
189#define TARGET_ENDIANNESS
190#include "exec/memory_ldst.h.inc"
191
192static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
193{
194 address_space_stl_notdirty(as, addr, val,
195 MEMTXATTRS_UNSPECIFIED, NULL);
196}
197
198#define SUFFIX
199#define ARG1 as
200#define ARG1_DECL AddressSpace *as
201#define TARGET_ENDIANNESS
202#include "exec/memory_ldst_phys.h.inc"
203
204
205#define ENDIANNESS
206#include "exec/memory_ldst_cached.h.inc"
207
208#define SUFFIX _cached
209#define ARG1 cache
210#define ARG1_DECL MemoryRegionCache *cache
211#define TARGET_ENDIANNESS
212#include "exec/memory_ldst_phys.h.inc"
213#endif
214
215
216
217#ifdef TARGET_PAGE_BITS_VARY
218# include "exec/page-vary.h"
219extern const TargetPageBits target_page;
220#ifdef CONFIG_DEBUG_TCG
221#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
222#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
223 (target_long)target_page.mask; })
224#else
225#define TARGET_PAGE_BITS target_page.bits
226#define TARGET_PAGE_MASK ((target_long)target_page.mask)
227#endif
228#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
229#else
230#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
231#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
232#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
233#endif
234
235#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
236
237
238
239
240extern uintptr_t qemu_host_page_size;
241extern intptr_t qemu_host_page_mask;
242
243#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
244#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
245
246
247#define PAGE_READ 0x0001
248#define PAGE_WRITE 0x0002
249#define PAGE_EXEC 0x0004
250#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
251#define PAGE_VALID 0x0008
252
253
254
255#define PAGE_WRITE_ORG 0x0010
256
257
258
259
260#define PAGE_WRITE_INV 0x0020
261
262#define PAGE_RESET 0x0040
263
264#define PAGE_ANON 0x0080
265
266#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
267
268#define PAGE_RESERVED 0x0100
269#endif
270
271#define PAGE_TARGET_1 0x0200
272#define PAGE_TARGET_2 0x0400
273
274#if defined(CONFIG_USER_ONLY)
275void page_dump(FILE *f);
276
277typedef int (*walk_memory_regions_fn)(void *, target_ulong,
278 target_ulong, unsigned long);
279int walk_memory_regions(void *, walk_memory_regions_fn);
280
281int page_get_flags(target_ulong address);
282void page_set_flags(target_ulong start, target_ulong end, int flags);
283int page_check_range(target_ulong start, target_ulong len, int flags);
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298void *page_alloc_target_data(target_ulong address, size_t size);
299
300
301
302
303
304
305
306
307void *page_get_target_data(target_ulong address);
308#endif
309
310CPUArchState *cpu_copy(CPUArchState *env);
311
312
313
314
315
316
317
318
319
320
321#define CPU_INTERRUPT_HARD 0x0002
322
323
324
325#define CPU_INTERRUPT_EXITTB 0x0004
326
327
328#define CPU_INTERRUPT_HALT 0x0020
329
330
331#define CPU_INTERRUPT_DEBUG 0x0080
332
333
334#define CPU_INTERRUPT_RESET 0x0400
335
336
337
338#define CPU_INTERRUPT_TGT_EXT_0 0x0008
339#define CPU_INTERRUPT_TGT_EXT_1 0x0010
340#define CPU_INTERRUPT_TGT_EXT_2 0x0040
341#define CPU_INTERRUPT_TGT_EXT_3 0x0200
342#define CPU_INTERRUPT_TGT_EXT_4 0x1000
343
344
345
346
347
348
349#define CPU_INTERRUPT_TGT_INT_0 0x0100
350#define CPU_INTERRUPT_TGT_INT_1 0x0800
351#define CPU_INTERRUPT_TGT_INT_2 0x2000
352
353
354
355
356#define CPU_INTERRUPT_SSTEP_MASK \
357 (CPU_INTERRUPT_HARD \
358 | CPU_INTERRUPT_TGT_EXT_0 \
359 | CPU_INTERRUPT_TGT_EXT_1 \
360 | CPU_INTERRUPT_TGT_EXT_2 \
361 | CPU_INTERRUPT_TGT_EXT_3 \
362 | CPU_INTERRUPT_TGT_EXT_4)
363
364#ifdef CONFIG_USER_ONLY
365
366
367
368
369
370
371#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
372#define TLB_MMIO 0
373#define TLB_WATCHPOINT 0
374
375#else
376
377
378
379
380
381
382
383
384
385
386
387#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
388
389
390#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
391
392#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
393
394#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
395
396#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
397
398#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
399
400
401
402
403#define TLB_FLAGS_MASK \
404 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
405 | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
406
407
408
409
410
411
412
413
414static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
415{
416 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
417}
418
419
420
421
422
423
424
425static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
426{
427 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
428}
429
430#ifdef CONFIG_TCG
431
432void dump_drift_info(GString *buf);
433
434void dump_exec_info(GString *buf);
435void dump_opcount_info(GString *buf);
436#endif
437
438#endif
439
440#ifdef CONFIG_TCG
441
442int cpu_exec(CPUState *cpu);
443void tcg_exec_realizefn(CPUState *cpu, Error **errp);
444void tcg_exec_unrealizefn(CPUState *cpu);
445#endif
446
447
448int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
449 void *ptr, target_ulong len, bool is_write);
450
451
452
453
454
455
456
457static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
458{
459 cpu->parent_obj.env_ptr = &cpu->env;
460 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
461}
462
463
464
465
466
467
468
469static inline ArchCPU *env_archcpu(CPUArchState *env)
470{
471 return container_of(env, ArchCPU, env);
472}
473
474
475
476
477
478
479
480static inline CPUState *env_cpu(CPUArchState *env)
481{
482 return &env_archcpu(env)->parent_obj;
483}
484
485
486
487
488
489
490
491static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
492{
493 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
494 return &arch_cpu->neg;
495}
496
497
498
499
500
501
502
503static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
504{
505 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
506 return &arch_cpu->neg;
507}
508
509
510
511
512
513
514
515static inline CPUTLB *env_tlb(CPUArchState *env)
516{
517 return &env_neg(env)->tlb;
518}
519
520#endif
521