1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "qemu/thread.h"
25#include "hw/core/cpu.h"
26#include "qemu/rcu.h"
27
28#define EXCP_INTERRUPT 0x10000
29#define EXCP_HLT 0x10001
30#define EXCP_DEBUG 0x10002
31#define EXCP_HALTED 0x10003
32#define EXCP_YIELD 0x10004
33#define EXCP_ATOMIC 0x10005
34
35
36
37
38
39
40
41
42
43#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
44#define BSWAP_NEEDED
45#endif
46
47#ifdef BSWAP_NEEDED
48
49static inline uint16_t tswap16(uint16_t s)
50{
51 return bswap16(s);
52}
53
54static inline uint32_t tswap32(uint32_t s)
55{
56 return bswap32(s);
57}
58
59static inline uint64_t tswap64(uint64_t s)
60{
61 return bswap64(s);
62}
63
64static inline void tswap16s(uint16_t *s)
65{
66 *s = bswap16(*s);
67}
68
69static inline void tswap32s(uint32_t *s)
70{
71 *s = bswap32(*s);
72}
73
74static inline void tswap64s(uint64_t *s)
75{
76 *s = bswap64(*s);
77}
78
79#else
80
81static inline uint16_t tswap16(uint16_t s)
82{
83 return s;
84}
85
86static inline uint32_t tswap32(uint32_t s)
87{
88 return s;
89}
90
91static inline uint64_t tswap64(uint64_t s)
92{
93 return s;
94}
95
96static inline void tswap16s(uint16_t *s)
97{
98}
99
100static inline void tswap32s(uint32_t *s)
101{
102}
103
104static inline void tswap64s(uint64_t *s)
105{
106}
107
108#endif
109
110#if TARGET_LONG_SIZE == 4
111#define tswapl(s) tswap32(s)
112#define tswapls(s) tswap32s((uint32_t *)(s))
113#define bswaptls(s) bswap32s(s)
114#else
115#define tswapl(s) tswap64(s)
116#define tswapls(s) tswap64s((uint64_t *)(s))
117#define bswaptls(s) bswap64s(s)
118#endif
119
120
121
122
123#if TARGET_BIG_ENDIAN
124#define lduw_p(p) lduw_be_p(p)
125#define ldsw_p(p) ldsw_be_p(p)
126#define ldl_p(p) ldl_be_p(p)
127#define ldq_p(p) ldq_be_p(p)
128#define stw_p(p, v) stw_be_p(p, v)
129#define stl_p(p, v) stl_be_p(p, v)
130#define stq_p(p, v) stq_be_p(p, v)
131#define ldn_p(p, sz) ldn_be_p(p, sz)
132#define stn_p(p, sz, v) stn_be_p(p, sz, v)
133#else
134#define lduw_p(p) lduw_le_p(p)
135#define ldsw_p(p) ldsw_le_p(p)
136#define ldl_p(p) ldl_le_p(p)
137#define ldq_p(p) ldq_le_p(p)
138#define stw_p(p, v) stw_le_p(p, v)
139#define stl_p(p, v) stl_le_p(p, v)
140#define stq_p(p, v) stq_le_p(p, v)
141#define ldn_p(p, sz) ldn_le_p(p, sz)
142#define stn_p(p, sz, v) stn_le_p(p, sz, v)
143#endif
144
145
146
147#if defined(CONFIG_USER_ONLY)
148#include "exec/user/abitypes.h"
149
150
151
152
153extern uintptr_t guest_base;
154extern bool have_guest_base;
155
156
157
158
159
160
161
162
163
164extern unsigned long reserved_va;
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180#define GUEST_ADDR_MAX_ \
181 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
182 UINT32_MAX : ~0ul)
183#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
184
185#else
186
187#include "exec/hwaddr.h"
188
189#define SUFFIX
190#define ARG1 as
191#define ARG1_DECL AddressSpace *as
192#define TARGET_ENDIANNESS
193#include "exec/memory_ldst.h.inc"
194
195#define SUFFIX _cached_slow
196#define ARG1 cache
197#define ARG1_DECL MemoryRegionCache *cache
198#define TARGET_ENDIANNESS
199#include "exec/memory_ldst.h.inc"
200
201static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
202{
203 address_space_stl_notdirty(as, addr, val,
204 MEMTXATTRS_UNSPECIFIED, NULL);
205}
206
207#define SUFFIX
208#define ARG1 as
209#define ARG1_DECL AddressSpace *as
210#define TARGET_ENDIANNESS
211#include "exec/memory_ldst_phys.h.inc"
212
213
214#define ENDIANNESS
215#include "exec/memory_ldst_cached.h.inc"
216
217#define SUFFIX _cached
218#define ARG1 cache
219#define ARG1_DECL MemoryRegionCache *cache
220#define TARGET_ENDIANNESS
221#include "exec/memory_ldst_phys.h.inc"
222#endif
223
224
225
226#ifdef TARGET_PAGE_BITS_VARY
227# include "exec/page-vary.h"
228extern const TargetPageBits target_page;
229#ifdef CONFIG_DEBUG_TCG
230#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
231#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
232 (target_long)target_page.mask; })
233#else
234#define TARGET_PAGE_BITS target_page.bits
235#define TARGET_PAGE_MASK ((target_long)target_page.mask)
236#endif
237#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
238#else
239#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
240#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
241#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
242#endif
243
244#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
245
246
247#define PAGE_READ 0x0001
248#define PAGE_WRITE 0x0002
249#define PAGE_EXEC 0x0004
250#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
251#define PAGE_VALID 0x0008
252
253
254
255#define PAGE_WRITE_ORG 0x0010
256
257
258
259
260#define PAGE_WRITE_INV 0x0020
261
262#define PAGE_RESET 0x0040
263
264#define PAGE_ANON 0x0080
265
266#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
267
268#define PAGE_RESERVED 0x0100
269#endif
270
271#define PAGE_TARGET_1 0x0200
272#define PAGE_TARGET_2 0x0400
273
274
275
276
277
278#define PAGE_PASSTHROUGH 0x0800
279
280#if defined(CONFIG_USER_ONLY)
281void page_dump(FILE *f);
282
283typedef int (*walk_memory_regions_fn)(void *, target_ulong,
284 target_ulong, unsigned long);
285int walk_memory_regions(void *, walk_memory_regions_fn);
286
287int page_get_flags(target_ulong address);
288void page_set_flags(target_ulong start, target_ulong last, int flags);
289void page_reset_target_data(target_ulong start, target_ulong last);
290int page_check_range(target_ulong start, target_ulong len, int flags);
291
292
293
294
295
296
297
298
299
300
301
302
303void *page_get_target_data(target_ulong address)
304 __attribute__((returns_nonnull));
305#endif
306
307CPUArchState *cpu_copy(CPUArchState *env);
308
309
310
311
312
313
314
315
316
317
318#define CPU_INTERRUPT_HARD 0x0002
319
320
321
322#define CPU_INTERRUPT_EXITTB 0x0004
323
324
325#define CPU_INTERRUPT_HALT 0x0020
326
327
328#define CPU_INTERRUPT_DEBUG 0x0080
329
330
331#define CPU_INTERRUPT_RESET 0x0400
332
333
334
335#define CPU_INTERRUPT_TGT_EXT_0 0x0008
336#define CPU_INTERRUPT_TGT_EXT_1 0x0010
337#define CPU_INTERRUPT_TGT_EXT_2 0x0040
338#define CPU_INTERRUPT_TGT_EXT_3 0x0200
339#define CPU_INTERRUPT_TGT_EXT_4 0x1000
340
341
342
343
344
345
346#define CPU_INTERRUPT_TGT_INT_0 0x0100
347#define CPU_INTERRUPT_TGT_INT_1 0x0800
348#define CPU_INTERRUPT_TGT_INT_2 0x2000
349
350
351
352
353#define CPU_INTERRUPT_SSTEP_MASK \
354 (CPU_INTERRUPT_HARD \
355 | CPU_INTERRUPT_TGT_EXT_0 \
356 | CPU_INTERRUPT_TGT_EXT_1 \
357 | CPU_INTERRUPT_TGT_EXT_2 \
358 | CPU_INTERRUPT_TGT_EXT_3 \
359 | CPU_INTERRUPT_TGT_EXT_4)
360
361#ifdef CONFIG_USER_ONLY
362
363
364
365
366
367
368#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
369#define TLB_MMIO 0
370#define TLB_WATCHPOINT 0
371
372#else
373
374
375
376
377
378
379
380
381
382
383
384#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
385
386
387#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
388
389#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
390
391#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
392
393#define TLB_BSWAP (1 << (TARGET_PAGE_BITS_MIN - 5))
394
395#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
396
397
398
399
400#define TLB_FLAGS_MASK \
401 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
402 | TLB_WATCHPOINT | TLB_BSWAP | TLB_DISCARD_WRITE)
403
404
405
406
407
408
409
410
411static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
412{
413 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
414}
415
416
417
418
419
420
421
422static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
423{
424 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
425}
426
427#ifdef CONFIG_TCG
428
429void dump_exec_info(GString *buf);
430#endif
431
432#endif
433
434
435int cpu_exec(CPUState *cpu);
436void tcg_exec_realizefn(CPUState *cpu, Error **errp);
437void tcg_exec_unrealizefn(CPUState *cpu);
438
439
440
441
442
443
444
445static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
446{
447 cpu->parent_obj.env_ptr = &cpu->env;
448 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
449}
450
451
452
453
454
455
456
457static inline ArchCPU *env_archcpu(CPUArchState *env)
458{
459 return container_of(env, ArchCPU, env);
460}
461
462
463
464
465
466
467
468static inline CPUState *env_cpu(CPUArchState *env)
469{
470 return &env_archcpu(env)->parent_obj;
471}
472
473
474
475
476
477
478
479static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
480{
481 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
482 return &arch_cpu->neg;
483}
484
485
486
487
488
489
490
491static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
492{
493 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
494 return &arch_cpu->neg;
495}
496
497
498
499
500
501
502
503static inline CPUTLB *env_tlb(CPUArchState *env)
504{
505 return &env_neg(env)->tlb;
506}
507
508#endif
509