1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
22#include "exec/cpu-common.h"
23#include "exec/memory.h"
24#include "exec/tswap.h"
25#include "qemu/thread.h"
26#include "hw/core/cpu.h"
27#include "qemu/rcu.h"
28
29#define EXCP_INTERRUPT 0x10000
30#define EXCP_HLT 0x10001
31#define EXCP_DEBUG 0x10002
32#define EXCP_HALTED 0x10003
33#define EXCP_YIELD 0x10004
34#define EXCP_ATOMIC 0x10005
35
36
37
38
39
40
41
42
43
44#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
45#define BSWAP_NEEDED
46#endif
47
48#if TARGET_LONG_SIZE == 4
49#define tswapl(s) tswap32(s)
50#define tswapls(s) tswap32s((uint32_t *)(s))
51#define bswaptls(s) bswap32s(s)
52#else
53#define tswapl(s) tswap64(s)
54#define tswapls(s) tswap64s((uint64_t *)(s))
55#define bswaptls(s) bswap64s(s)
56#endif
57
58
59
60
61#if TARGET_BIG_ENDIAN
62#define lduw_p(p) lduw_be_p(p)
63#define ldsw_p(p) ldsw_be_p(p)
64#define ldl_p(p) ldl_be_p(p)
65#define ldq_p(p) ldq_be_p(p)
66#define stw_p(p, v) stw_be_p(p, v)
67#define stl_p(p, v) stl_be_p(p, v)
68#define stq_p(p, v) stq_be_p(p, v)
69#define ldn_p(p, sz) ldn_be_p(p, sz)
70#define stn_p(p, sz, v) stn_be_p(p, sz, v)
71#else
72#define lduw_p(p) lduw_le_p(p)
73#define ldsw_p(p) ldsw_le_p(p)
74#define ldl_p(p) ldl_le_p(p)
75#define ldq_p(p) ldq_le_p(p)
76#define stw_p(p, v) stw_le_p(p, v)
77#define stl_p(p, v) stl_le_p(p, v)
78#define stq_p(p, v) stq_le_p(p, v)
79#define ldn_p(p, sz) ldn_le_p(p, sz)
80#define stn_p(p, sz, v) stn_le_p(p, sz, v)
81#endif
82
83
84
85#if defined(CONFIG_USER_ONLY)
86#include "exec/user/abitypes.h"
87#include "exec/user/guest-base.h"
88
89extern bool have_guest_base;
90
91
92
93
94
95
96
97
98
99extern unsigned long reserved_va;
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115#define GUEST_ADDR_MAX_ \
116 ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
117 UINT32_MAX : ~0ul)
118#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
119
120#else
121
122#include "exec/hwaddr.h"
123
124#define SUFFIX
125#define ARG1 as
126#define ARG1_DECL AddressSpace *as
127#define TARGET_ENDIANNESS
128#include "exec/memory_ldst.h.inc"
129
130#define SUFFIX _cached_slow
131#define ARG1 cache
132#define ARG1_DECL MemoryRegionCache *cache
133#define TARGET_ENDIANNESS
134#include "exec/memory_ldst.h.inc"
135
136static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
137{
138 address_space_stl_notdirty(as, addr, val,
139 MEMTXATTRS_UNSPECIFIED, NULL);
140}
141
142#define SUFFIX
143#define ARG1 as
144#define ARG1_DECL AddressSpace *as
145#define TARGET_ENDIANNESS
146#include "exec/memory_ldst_phys.h.inc"
147
148
149#define ENDIANNESS
150#include "exec/memory_ldst_cached.h.inc"
151
152#define SUFFIX _cached
153#define ARG1 cache
154#define ARG1_DECL MemoryRegionCache *cache
155#define TARGET_ENDIANNESS
156#include "exec/memory_ldst_phys.h.inc"
157#endif
158
159
160
161#ifdef TARGET_PAGE_BITS_VARY
162# include "exec/page-vary.h"
163extern const TargetPageBits target_page;
164#ifdef CONFIG_DEBUG_TCG
165#define TARGET_PAGE_BITS ({ assert(target_page.decided); target_page.bits; })
166#define TARGET_PAGE_MASK ({ assert(target_page.decided); \
167 (target_long)target_page.mask; })
168#else
169#define TARGET_PAGE_BITS target_page.bits
170#define TARGET_PAGE_MASK ((target_long)target_page.mask)
171#endif
172#define TARGET_PAGE_SIZE (-(int)TARGET_PAGE_MASK)
173#else
174#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
175#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
176#define TARGET_PAGE_MASK ((target_long)-1 << TARGET_PAGE_BITS)
177#endif
178
179#define TARGET_PAGE_ALIGN(addr) ROUND_UP((addr), TARGET_PAGE_SIZE)
180
181
182#define PAGE_READ 0x0001
183#define PAGE_WRITE 0x0002
184#define PAGE_EXEC 0x0004
185#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
186#define PAGE_VALID 0x0008
187
188
189
190#define PAGE_WRITE_ORG 0x0010
191
192
193
194
195#define PAGE_WRITE_INV 0x0020
196
197#define PAGE_RESET 0x0040
198
199#define PAGE_ANON 0x0080
200
201#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
202
203#define PAGE_RESERVED 0x0100
204#endif
205
206#define PAGE_TARGET_1 0x0200
207#define PAGE_TARGET_2 0x0400
208
209
210
211
212
213#define PAGE_PASSTHROUGH 0x0800
214
215#if defined(CONFIG_USER_ONLY)
216void page_dump(FILE *f);
217
218typedef int (*walk_memory_regions_fn)(void *, target_ulong,
219 target_ulong, unsigned long);
220int walk_memory_regions(void *, walk_memory_regions_fn);
221
222int page_get_flags(target_ulong address);
223void page_set_flags(target_ulong start, target_ulong last, int flags);
224void page_reset_target_data(target_ulong start, target_ulong last);
225
226
227
228
229
230
231
232
233
234
235
236bool page_check_range(target_ulong start, target_ulong last, int flags);
237
238
239
240
241
242
243
244
245
246
247
248bool page_check_range_empty(target_ulong start, target_ulong last);
249
250
251
252
253
254
255
256
257
258
259
260
261
262target_ulong page_find_range_empty(target_ulong min, target_ulong max,
263 target_ulong len, target_ulong align);
264
265
266
267
268
269
270
271
272
273
274
275
276void *page_get_target_data(target_ulong address)
277 __attribute__((returns_nonnull));
278#endif
279
280CPUArchState *cpu_copy(CPUArchState *env);
281
282
283
284
285
286
287
288
289
290
291#define CPU_INTERRUPT_HARD 0x0002
292
293
294
295#define CPU_INTERRUPT_EXITTB 0x0004
296
297
298#define CPU_INTERRUPT_HALT 0x0020
299
300
301#define CPU_INTERRUPT_DEBUG 0x0080
302
303
304#define CPU_INTERRUPT_RESET 0x0400
305
306
307
308#define CPU_INTERRUPT_TGT_EXT_0 0x0008
309#define CPU_INTERRUPT_TGT_EXT_1 0x0010
310#define CPU_INTERRUPT_TGT_EXT_2 0x0040
311#define CPU_INTERRUPT_TGT_EXT_3 0x0200
312#define CPU_INTERRUPT_TGT_EXT_4 0x1000
313
314
315
316
317
318
319#define CPU_INTERRUPT_TGT_INT_0 0x0100
320#define CPU_INTERRUPT_TGT_INT_1 0x0800
321#define CPU_INTERRUPT_TGT_INT_2 0x2000
322
323
324
325
326#define CPU_INTERRUPT_SSTEP_MASK \
327 (CPU_INTERRUPT_HARD \
328 | CPU_INTERRUPT_TGT_EXT_0 \
329 | CPU_INTERRUPT_TGT_EXT_1 \
330 | CPU_INTERRUPT_TGT_EXT_2 \
331 | CPU_INTERRUPT_TGT_EXT_3 \
332 | CPU_INTERRUPT_TGT_EXT_4)
333
334#ifdef CONFIG_USER_ONLY
335
336
337
338
339
340
341#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
342#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
343#define TLB_WATCHPOINT 0
344
345#else
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
361
362
363#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
364
365#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
366
367#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
368
369#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
370
371
372
373
374
375#define TLB_FLAGS_MASK \
376 (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
377 | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
378
379
380
381
382
383
384#define TLB_BSWAP (1 << 0)
385
386#define TLB_WATCHPOINT (1 << 1)
387
388#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
389
390
391QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
392
393
394
395
396
397
398
399
400static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
401{
402 return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
403}
404
405
406
407
408
409
410
411static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
412{
413 return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
414}
415
416#ifdef CONFIG_TCG
417
418void dump_exec_info(GString *buf);
419#endif
420
421#endif
422
423
424int cpu_exec(CPUState *cpu);
425void tcg_exec_realizefn(CPUState *cpu, Error **errp);
426void tcg_exec_unrealizefn(CPUState *cpu);
427
428
429
430
431
432
433
434static inline void cpu_set_cpustate_pointers(ArchCPU *cpu)
435{
436 cpu->parent_obj.env_ptr = &cpu->env;
437 cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr;
438}
439
440
441
442
443
444
445
446static inline ArchCPU *env_archcpu(CPUArchState *env)
447{
448 return container_of(env, ArchCPU, env);
449}
450
451
452
453
454
455
456
457static inline CPUState *env_cpu(CPUArchState *env)
458{
459 return &env_archcpu(env)->parent_obj;
460}
461
462
463
464
465
466
467
468static inline CPUNegativeOffsetState *env_neg(CPUArchState *env)
469{
470 ArchCPU *arch_cpu = container_of(env, ArchCPU, env);
471 return &arch_cpu->neg;
472}
473
474
475
476
477
478
479
480static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu)
481{
482 ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj);
483 return &arch_cpu->neg;
484}
485
486
487
488
489
490
491
492static inline CPUTLB *env_tlb(CPUArchState *env)
493{
494 return &env_neg(env)->tlb;
495}
496
497#endif
498