1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "disas/disas.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/helper-proto.h"
27#include "microblaze-decode.h"
28#include "exec/cpu_ldst.h"
29#include "exec/helper-gen.h"
30
31#include "trace-tcg.h"
32#include "exec/log.h"
33
34
35#define SIM_COMPAT 0
36#define DISAS_GNU 1
37#define DISAS_MB 1
38#if DISAS_MB && !SIM_COMPAT
39# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40#else
41# define LOG_DIS(...) do { } while (0)
42#endif
43
44#define D(x)
45
46#define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48
49static TCGv env_debug;
50static TCGv_env cpu_env;
51static TCGv cpu_R[32];
52static TCGv cpu_SR[18];
53static TCGv env_imm;
54static TCGv env_btaken;
55static TCGv env_btarget;
56static TCGv env_iflags;
57static TCGv env_res_addr;
58static TCGv env_res_val;
59
60#include "exec/gen-icount.h"
61
62
63typedef struct DisasContext {
64 MicroBlazeCPU *cpu;
65 target_ulong pc;
66
67
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
73
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags;
77 unsigned int clear_imm;
78 int is_jmp;
79
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91} DisasContext;
92
93static const char *regnames[] =
94{
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99};
100
101static const char *special_regnames[] =
102{
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
106};
107
108static inline void t_sync_flags(DisasContext *dc)
109{
110
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115}
116
117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118{
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123 gen_helper_raise_exception(cpu_env, tmp);
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126}
127
128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129{
130#ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132#else
133 return true;
134#endif
135}
136
137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138{
139 if (use_goto_tb(dc, dest)) {
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147}
148
149static void read_carry(DisasContext *dc, TCGv d)
150{
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152}
153
154
155
156
157
158static void write_carry(DisasContext *dc, TCGv v)
159{
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168}
169
170static void write_carryi(DisasContext *dc, bool carry)
171{
172 TCGv t0 = tcg_temp_new();
173 tcg_gen_movi_tl(t0, carry);
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176}
177
178
179
180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181{
182
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184}
185
186static inline TCGv *dec_alu_op_b(DisasContext *dc)
187{
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196}
197
198static void dec_add(DisasContext *dc)
199{
200 unsigned int k, c;
201 TCGv cf;
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
210
211 if (k) {
212
213
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229
230
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247 write_carry(dc, cf);
248 }
249 tcg_temp_free(cf);
250}
251
252static void dec_sub(DisasContext *dc)
253{
254 unsigned int u, cmp, k, c;
255 TCGv cf, na;
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276
277 if (k) {
278
279
280 if (dc->rd) {
281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282
283 if (c) {
284
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295
296
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317 write_carry(dc, cf);
318 }
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
321}
322
323static void dec_pattern(DisasContext *dc)
324{
325 unsigned int mode;
326
327 if ((dc->tb_flags & MSR_EE_FLAG)
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
347 }
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
351 if (dc->rd) {
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
354 }
355 break;
356 default:
357 cpu_abort(CPU(dc->cpu),
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
360 }
361}
362
363static void dec_and(DisasContext *dc)
364{
365 unsigned int not;
366
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
370 }
371
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
374
375 if (!dc->rd)
376 return;
377
378 if (not) {
379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382}
383
384static void dec_or(DisasContext *dc)
385{
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
389 }
390
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394}
395
396static void dec_xor(DisasContext *dc)
397{
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406}
407
408static inline void msr_read(DisasContext *dc, TCGv d)
409{
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411}
412
413static inline void msr_write(DisasContext *dc, TCGv v)
414{
415 TCGv t;
416
417 t = tcg_temp_new();
418 dc->cpustate_changed = 1;
419
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
424}
425
426static void dec_msr(DisasContext *dc)
427{
428 CPUState *cs = CPU(dc->cpu);
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
432
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
438
439
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
442
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
445
446 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
447
448 return;
449 }
450
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
456 }
457
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
460
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
477 }
478
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
485 }
486 }
487
488#if !defined(CONFIG_USER_ONLY)
489
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
495 else
496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
497 return;
498 }
499#endif
500
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
517 break;
518 case 0x800:
519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
520 break;
521 case 0x802:
522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
523 break;
524 default:
525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
526 break;
527 }
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
550 case 0x800:
551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
552 break;
553 case 0x802:
554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
555 break;
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
572 break;
573 default:
574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
575 break;
576 }
577 }
578
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
581 }
582}
583
584
585static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
586{
587 TCGv_i64 t0, t1;
588
589 t0 = tcg_temp_new_i64();
590 t1 = tcg_temp_new_i64();
591
592 tcg_gen_ext_i32_i64(t0, a);
593 tcg_gen_ext_i32_i64(t1, b);
594 tcg_gen_mul_i64(t0, t0, t1);
595
596 tcg_gen_extrl_i64_i32(d, t0);
597 tcg_gen_shri_i64(t0, t0, 32);
598 tcg_gen_extrl_i64_i32(d2, t0);
599
600 tcg_temp_free_i64(t0);
601 tcg_temp_free_i64(t1);
602}
603
604
605static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
606{
607 TCGv_i64 t0, t1;
608
609 t0 = tcg_temp_new_i64();
610 t1 = tcg_temp_new_i64();
611
612 tcg_gen_extu_i32_i64(t0, a);
613 tcg_gen_extu_i32_i64(t1, b);
614 tcg_gen_mul_i64(t0, t0, t1);
615
616 tcg_gen_extrl_i64_i32(d, t0);
617 tcg_gen_shri_i64(t0, t0, 32);
618 tcg_gen_extrl_i64_i32(d2, t0);
619
620 tcg_temp_free_i64(t0);
621 tcg_temp_free_i64(t1);
622}
623
624
625static void dec_mul(DisasContext *dc)
626{
627 TCGv d[2];
628 unsigned int subcode;
629
630 if ((dc->tb_flags & MSR_EE_FLAG)
631 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
632 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
633 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
634 t_gen_raise_exception(dc, EXCP_HW_EXCP);
635 return;
636 }
637
638 subcode = dc->imm & 3;
639 d[0] = tcg_temp_new();
640 d[1] = tcg_temp_new();
641
642 if (dc->type_b) {
643 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
644 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
645 goto done;
646 }
647
648
649 if (subcode >= 1 && subcode <= 3
650 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
651
652 }
653
654 switch (subcode) {
655 case 0:
656 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
657 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
658 break;
659 case 1:
660 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
661 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
662 break;
663 case 2:
664 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
665 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
666 break;
667 case 3:
668 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
669 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
670 break;
671 default:
672 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
673 break;
674 }
675done:
676 tcg_temp_free(d[0]);
677 tcg_temp_free(d[1]);
678}
679
680
681static void dec_div(DisasContext *dc)
682{
683 unsigned int u;
684
685 u = dc->imm & 2;
686 LOG_DIS("div\n");
687
688 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
689 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
690 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
691 t_gen_raise_exception(dc, EXCP_HW_EXCP);
692 }
693
694 if (u)
695 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
696 cpu_R[dc->ra]);
697 else
698 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
699 cpu_R[dc->ra]);
700 if (!dc->rd)
701 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
702}
703
704static void dec_barrel(DisasContext *dc)
705{
706 TCGv t0;
707 unsigned int imm_w, imm_s;
708 bool s, t, e = 0, i = 0;
709
710 if ((dc->tb_flags & MSR_EE_FLAG)
711 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
712 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
713 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
714 t_gen_raise_exception(dc, EXCP_HW_EXCP);
715 return;
716 }
717
718 if (dc->type_b) {
719
720 i = extract32(dc->imm, 15, 1);
721 e = extract32(dc->imm, 14, 1);
722 }
723 s = extract32(dc->imm, 10, 1);
724 t = extract32(dc->imm, 9, 1);
725 imm_w = extract32(dc->imm, 6, 5);
726 imm_s = extract32(dc->imm, 0, 5);
727
728 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
729 e ? "e" : "",
730 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
731
732 if (e) {
733 uint32_t mask = (1UL << imm_w) - 1;
734
735 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], imm_s);
736 tcg_gen_andi_tl(cpu_R[dc->rd], cpu_R[dc->rd], mask);
737 } else if (i) {
738 int width = imm_w - imm_s + 1;
739
740 if (width == 0 || imm_w <= imm_s) {
741
742 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
743 imm_w, imm_s);
744 } else {
745 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
746 imm_s, width);
747 }
748 } else {
749 t0 = tcg_temp_new();
750
751 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
752 tcg_gen_andi_tl(t0, t0, 31);
753
754 if (s) {
755 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
756 } else {
757 if (t) {
758 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
759 } else {
760 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
761 }
762 }
763 tcg_temp_free(t0);
764 }
765}
766
767static void dec_bit(DisasContext *dc)
768{
769 CPUState *cs = CPU(dc->cpu);
770 TCGv t0;
771 unsigned int op;
772 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
773
774 op = dc->ir & ((1 << 9) - 1);
775 switch (op) {
776 case 0x21:
777
778 t0 = tcg_temp_new();
779
780 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
781 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
782 write_carry(dc, cpu_R[dc->ra]);
783 if (dc->rd) {
784 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
785 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
786 }
787 tcg_temp_free(t0);
788 break;
789
790 case 0x1:
791 case 0x41:
792
793 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
794
795
796 write_carry(dc, cpu_R[dc->ra]);
797 if (dc->rd) {
798 if (op == 0x41)
799 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
800 else
801 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
802 }
803 break;
804 case 0x60:
805 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
806 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
807 break;
808 case 0x61:
809 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
810 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
811 break;
812 case 0x64:
813 case 0x66:
814 case 0x74:
815 case 0x76:
816
817 LOG_DIS("wdc r%d\n", dc->ra);
818 if ((dc->tb_flags & MSR_EE_FLAG)
819 && mem_index == MMU_USER_IDX) {
820 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
821 t_gen_raise_exception(dc, EXCP_HW_EXCP);
822 return;
823 }
824 break;
825 case 0x68:
826
827 LOG_DIS("wic r%d\n", dc->ra);
828 if ((dc->tb_flags & MSR_EE_FLAG)
829 && mem_index == MMU_USER_IDX) {
830 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
831 t_gen_raise_exception(dc, EXCP_HW_EXCP);
832 return;
833 }
834 break;
835 case 0xe0:
836 if ((dc->tb_flags & MSR_EE_FLAG)
837 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
838 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
839 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
840 t_gen_raise_exception(dc, EXCP_HW_EXCP);
841 }
842 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
843 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
844 }
845 break;
846 case 0x1e0:
847
848 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
849 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
850 break;
851 case 0x1e2:
852
853 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
854 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
855 break;
856 default:
857 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
858 dc->pc, op, dc->rd, dc->ra, dc->rb);
859 break;
860 }
861}
862
863static inline void sync_jmpstate(DisasContext *dc)
864{
865 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
866 if (dc->jmp == JMP_DIRECT) {
867 tcg_gen_movi_tl(env_btaken, 1);
868 }
869 dc->jmp = JMP_INDIRECT;
870 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
871 }
872}
873
874static void dec_imm(DisasContext *dc)
875{
876 LOG_DIS("imm %x\n", dc->imm << 16);
877 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
878 dc->tb_flags |= IMM_FLAG;
879 dc->clear_imm = 0;
880}
881
882static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
883{
884 unsigned int extimm = dc->tb_flags & IMM_FLAG;
885
886 int stackprot = 0;
887
888
889 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
890 stackprot = 1;
891 }
892
893
894 if (!dc->type_b) {
895
896 if (dc->ra == 0) {
897 return &cpu_R[dc->rb];
898 } else if (dc->rb == 0) {
899 return &cpu_R[dc->ra];
900 }
901
902 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
903 stackprot = 1;
904 }
905
906 *t = tcg_temp_new();
907 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
908
909 if (stackprot) {
910 gen_helper_stackprot(cpu_env, *t);
911 }
912 return t;
913 }
914
915 if (!extimm) {
916 if (dc->imm == 0) {
917 return &cpu_R[dc->ra];
918 }
919 *t = tcg_temp_new();
920 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
921 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
922 } else {
923 *t = tcg_temp_new();
924 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
925 }
926
927 if (stackprot) {
928 gen_helper_stackprot(cpu_env, *t);
929 }
930 return t;
931}
932
933static void dec_load(DisasContext *dc)
934{
935 TCGv t, v, *addr;
936 unsigned int size, rev = 0, ex = 0;
937 TCGMemOp mop;
938
939 mop = dc->opcode & 3;
940 size = 1 << mop;
941 if (!dc->type_b) {
942 rev = (dc->ir >> 9) & 1;
943 ex = (dc->ir >> 10) & 1;
944 }
945 mop |= MO_TE;
946 if (rev) {
947 mop ^= MO_BSWAP;
948 }
949
950 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
951 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
952 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
953 t_gen_raise_exception(dc, EXCP_HW_EXCP);
954 return;
955 }
956
957 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
958 ex ? "x" : "");
959
960 t_sync_flags(dc);
961 addr = compute_ldst_addr(dc, &t);
962
963
964
965
966
967
968
969 if (rev && size != 4) {
970
971 switch (size) {
972 case 1:
973 {
974
975
976
977
978 TCGv low = tcg_temp_new();
979
980
981 if (addr != &t) {
982 t = tcg_temp_new();
983 tcg_gen_mov_tl(t, *addr);
984 addr = &t;
985 }
986
987 tcg_gen_andi_tl(low, t, 3);
988 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
989 tcg_gen_andi_tl(t, t, ~3);
990 tcg_gen_or_tl(t, t, low);
991 tcg_gen_mov_tl(env_imm, t);
992 tcg_temp_free(low);
993 break;
994 }
995
996 case 2:
997
998
999
1000 if (addr != &t) {
1001 t = tcg_temp_new();
1002 tcg_gen_xori_tl(t, *addr, 2);
1003 addr = &t;
1004 } else {
1005 tcg_gen_xori_tl(t, t, 2);
1006 }
1007 break;
1008 default:
1009 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1010 break;
1011 }
1012 }
1013
1014
1015 if (ex) {
1016
1017 if (addr != &t) {
1018 t = tcg_temp_new();
1019 tcg_gen_mov_tl(t, *addr);
1020 addr = &t;
1021 }
1022 tcg_gen_andi_tl(t, t, ~3);
1023 }
1024
1025
1026 sync_jmpstate(dc);
1027
1028
1029
1030
1031
1032
1033
1034
1035 v = tcg_temp_new();
1036 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1037
1038 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1039 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1040 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1041 tcg_const_tl(0), tcg_const_tl(size - 1));
1042 }
1043
1044 if (ex) {
1045 tcg_gen_mov_tl(env_res_addr, *addr);
1046 tcg_gen_mov_tl(env_res_val, v);
1047 }
1048 if (dc->rd) {
1049 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1050 }
1051 tcg_temp_free(v);
1052
1053 if (ex) {
1054
1055 write_carryi(dc, 0);
1056 }
1057
1058 if (addr == &t)
1059 tcg_temp_free(t);
1060}
1061
1062static void dec_store(DisasContext *dc)
1063{
1064 TCGv t, *addr, swx_addr;
1065 TCGLabel *swx_skip = NULL;
1066 unsigned int size, rev = 0, ex = 0;
1067 TCGMemOp mop;
1068
1069 mop = dc->opcode & 3;
1070 size = 1 << mop;
1071 if (!dc->type_b) {
1072 rev = (dc->ir >> 9) & 1;
1073 ex = (dc->ir >> 10) & 1;
1074 }
1075 mop |= MO_TE;
1076 if (rev) {
1077 mop ^= MO_BSWAP;
1078 }
1079
1080 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1081 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1082 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1083 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1084 return;
1085 }
1086
1087 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1088 ex ? "x" : "");
1089 t_sync_flags(dc);
1090
1091 sync_jmpstate(dc);
1092 addr = compute_ldst_addr(dc, &t);
1093
1094 swx_addr = tcg_temp_local_new();
1095 if (ex) {
1096 TCGv tval;
1097
1098
1099 tcg_gen_mov_tl(swx_addr, *addr);
1100 addr = &swx_addr;
1101
1102 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1103
1104 write_carryi(dc, 1);
1105 swx_skip = gen_new_label();
1106 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1107
1108
1109
1110
1111
1112
1113 tval = tcg_temp_new();
1114 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1115 MO_TEUL);
1116 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1117 write_carryi(dc, 0);
1118 tcg_temp_free(tval);
1119 }
1120
1121 if (rev && size != 4) {
1122
1123 switch (size) {
1124 case 1:
1125 {
1126
1127
1128
1129
1130 TCGv low = tcg_temp_new();
1131
1132
1133 if (addr != &t) {
1134 t = tcg_temp_new();
1135 tcg_gen_mov_tl(t, *addr);
1136 addr = &t;
1137 }
1138
1139 tcg_gen_andi_tl(low, t, 3);
1140 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1141 tcg_gen_andi_tl(t, t, ~3);
1142 tcg_gen_or_tl(t, t, low);
1143 tcg_gen_mov_tl(env_imm, t);
1144 tcg_temp_free(low);
1145 break;
1146 }
1147
1148 case 2:
1149
1150
1151
1152 if (addr != &t) {
1153 t = tcg_temp_new();
1154 tcg_gen_xori_tl(t, *addr, 2);
1155 addr = &t;
1156 } else {
1157 tcg_gen_xori_tl(t, t, 2);
1158 }
1159 break;
1160 default:
1161 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1162 break;
1163 }
1164 }
1165 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1166
1167
1168 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1169 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1170
1171
1172
1173
1174
1175
1176 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1177 tcg_const_tl(1), tcg_const_tl(size - 1));
1178 }
1179
1180 if (ex) {
1181 gen_set_label(swx_skip);
1182 }
1183 tcg_temp_free(swx_addr);
1184
1185 if (addr == &t)
1186 tcg_temp_free(t);
1187}
1188
1189static inline void eval_cc(DisasContext *dc, unsigned int cc,
1190 TCGv d, TCGv a, TCGv b)
1191{
1192 switch (cc) {
1193 case CC_EQ:
1194 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1195 break;
1196 case CC_NE:
1197 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1198 break;
1199 case CC_LT:
1200 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1201 break;
1202 case CC_LE:
1203 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1204 break;
1205 case CC_GE:
1206 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1207 break;
1208 case CC_GT:
1209 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1210 break;
1211 default:
1212 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1213 break;
1214 }
1215}
1216
1217static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1218{
1219 TCGLabel *l1 = gen_new_label();
1220
1221 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1222 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1223 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1224 gen_set_label(l1);
1225}
1226
1227static void dec_bcc(DisasContext *dc)
1228{
1229 unsigned int cc;
1230 unsigned int dslot;
1231
1232 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1233 dslot = dc->ir & (1 << 25);
1234 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1235
1236 dc->delayed_branch = 1;
1237 if (dslot) {
1238 dc->delayed_branch = 2;
1239 dc->tb_flags |= D_FLAG;
1240 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1241 cpu_env, offsetof(CPUMBState, bimm));
1242 }
1243
1244 if (dec_alu_op_b_is_small_imm(dc)) {
1245 int32_t offset = (int32_t)((int16_t)dc->imm);
1246
1247 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1248 dc->jmp = JMP_DIRECT_CC;
1249 dc->jmp_pc = dc->pc + offset;
1250 } else {
1251 dc->jmp = JMP_INDIRECT;
1252 tcg_gen_movi_tl(env_btarget, dc->pc);
1253 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1254 }
1255 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1256}
1257
1258static void dec_br(DisasContext *dc)
1259{
1260 unsigned int dslot, link, abs, mbar;
1261 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1262
1263 dslot = dc->ir & (1 << 20);
1264 abs = dc->ir & (1 << 19);
1265 link = dc->ir & (1 << 18);
1266
1267
1268 mbar = (dc->ir >> 16) & 31;
1269 if (mbar == 2 && dc->imm == 4) {
1270
1271 if (dc->rd & 16) {
1272 TCGv_i32 tmp_1 = tcg_const_i32(1);
1273
1274 LOG_DIS("sleep\n");
1275
1276 t_sync_flags(dc);
1277 tcg_gen_st_i32(tmp_1, cpu_env,
1278 -offsetof(MicroBlazeCPU, env)
1279 +offsetof(CPUState, halted));
1280 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1281 tcg_temp_free_i32(tmp_1);
1282 dc->is_jmp = DISAS_UPDATE;
1283 gen_helper_sleep(cpu_env);
1284 return;
1285 }
1286 LOG_DIS("mbar %d\n", dc->rd);
1287
1288 dc->cpustate_changed = 1;
1289 return;
1290 }
1291
1292 LOG_DIS("br%s%s%s%s imm=%x\n",
1293 abs ? "a" : "", link ? "l" : "",
1294 dc->type_b ? "i" : "", dslot ? "d" : "",
1295 dc->imm);
1296
1297 dc->delayed_branch = 1;
1298 if (dslot) {
1299 dc->delayed_branch = 2;
1300 dc->tb_flags |= D_FLAG;
1301 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1302 cpu_env, offsetof(CPUMBState, bimm));
1303 }
1304 if (link && dc->rd)
1305 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1306
1307 dc->jmp = JMP_INDIRECT;
1308 if (abs) {
1309 tcg_gen_movi_tl(env_btaken, 1);
1310 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1311 if (link && !dslot) {
1312 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1313 t_gen_raise_exception(dc, EXCP_BREAK);
1314 if (dc->imm == 0) {
1315 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1316 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1317 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1318 return;
1319 }
1320
1321 t_gen_raise_exception(dc, EXCP_DEBUG);
1322 }
1323 }
1324 } else {
1325 if (dec_alu_op_b_is_small_imm(dc)) {
1326 dc->jmp = JMP_DIRECT;
1327 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1328 } else {
1329 tcg_gen_movi_tl(env_btaken, 1);
1330 tcg_gen_movi_tl(env_btarget, dc->pc);
1331 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1332 }
1333 }
1334}
1335
1336static inline void do_rti(DisasContext *dc)
1337{
1338 TCGv t0, t1;
1339 t0 = tcg_temp_new();
1340 t1 = tcg_temp_new();
1341 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1342 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1343 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1344
1345 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1346 tcg_gen_or_tl(t1, t1, t0);
1347 msr_write(dc, t1);
1348 tcg_temp_free(t1);
1349 tcg_temp_free(t0);
1350 dc->tb_flags &= ~DRTI_FLAG;
1351}
1352
1353static inline void do_rtb(DisasContext *dc)
1354{
1355 TCGv t0, t1;
1356 t0 = tcg_temp_new();
1357 t1 = tcg_temp_new();
1358 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1359 tcg_gen_shri_tl(t0, t1, 1);
1360 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1361
1362 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1363 tcg_gen_or_tl(t1, t1, t0);
1364 msr_write(dc, t1);
1365 tcg_temp_free(t1);
1366 tcg_temp_free(t0);
1367 dc->tb_flags &= ~DRTB_FLAG;
1368}
1369
1370static inline void do_rte(DisasContext *dc)
1371{
1372 TCGv t0, t1;
1373 t0 = tcg_temp_new();
1374 t1 = tcg_temp_new();
1375
1376 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1377 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1378 tcg_gen_shri_tl(t0, t1, 1);
1379 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1380
1381 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1382 tcg_gen_or_tl(t1, t1, t0);
1383 msr_write(dc, t1);
1384 tcg_temp_free(t1);
1385 tcg_temp_free(t0);
1386 dc->tb_flags &= ~DRTE_FLAG;
1387}
1388
1389static void dec_rts(DisasContext *dc)
1390{
1391 unsigned int b_bit, i_bit, e_bit;
1392 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1393
1394 i_bit = dc->ir & (1 << 21);
1395 b_bit = dc->ir & (1 << 22);
1396 e_bit = dc->ir & (1 << 23);
1397
1398 dc->delayed_branch = 2;
1399 dc->tb_flags |= D_FLAG;
1400 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1401 cpu_env, offsetof(CPUMBState, bimm));
1402
1403 if (i_bit) {
1404 LOG_DIS("rtid ir=%x\n", dc->ir);
1405 if ((dc->tb_flags & MSR_EE_FLAG)
1406 && mem_index == MMU_USER_IDX) {
1407 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1408 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1409 }
1410 dc->tb_flags |= DRTI_FLAG;
1411 } else if (b_bit) {
1412 LOG_DIS("rtbd ir=%x\n", dc->ir);
1413 if ((dc->tb_flags & MSR_EE_FLAG)
1414 && mem_index == MMU_USER_IDX) {
1415 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1416 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1417 }
1418 dc->tb_flags |= DRTB_FLAG;
1419 } else if (e_bit) {
1420 LOG_DIS("rted ir=%x\n", dc->ir);
1421 if ((dc->tb_flags & MSR_EE_FLAG)
1422 && mem_index == MMU_USER_IDX) {
1423 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1424 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1425 }
1426 dc->tb_flags |= DRTE_FLAG;
1427 } else
1428 LOG_DIS("rts ir=%x\n", dc->ir);
1429
1430 dc->jmp = JMP_INDIRECT;
1431 tcg_gen_movi_tl(env_btaken, 1);
1432 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1433}
1434
1435static int dec_check_fpuv2(DisasContext *dc)
1436{
1437 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1438 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1439 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1440 }
1441 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1442}
1443
1444static void dec_fpu(DisasContext *dc)
1445{
1446 unsigned int fpu_insn;
1447
1448 if ((dc->tb_flags & MSR_EE_FLAG)
1449 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1450 && (dc->cpu->cfg.use_fpu != 1)) {
1451 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1452 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1453 return;
1454 }
1455
1456 fpu_insn = (dc->ir >> 7) & 7;
1457
1458 switch (fpu_insn) {
1459 case 0:
1460 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1461 cpu_R[dc->rb]);
1462 break;
1463
1464 case 1:
1465 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1466 cpu_R[dc->rb]);
1467 break;
1468
1469 case 2:
1470 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1471 cpu_R[dc->rb]);
1472 break;
1473
1474 case 3:
1475 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1476 cpu_R[dc->rb]);
1477 break;
1478
1479 case 4:
1480 switch ((dc->ir >> 4) & 7) {
1481 case 0:
1482 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1483 cpu_R[dc->ra], cpu_R[dc->rb]);
1484 break;
1485 case 1:
1486 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1487 cpu_R[dc->ra], cpu_R[dc->rb]);
1488 break;
1489 case 2:
1490 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1491 cpu_R[dc->ra], cpu_R[dc->rb]);
1492 break;
1493 case 3:
1494 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1495 cpu_R[dc->ra], cpu_R[dc->rb]);
1496 break;
1497 case 4:
1498 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1499 cpu_R[dc->ra], cpu_R[dc->rb]);
1500 break;
1501 case 5:
1502 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1503 cpu_R[dc->ra], cpu_R[dc->rb]);
1504 break;
1505 case 6:
1506 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1507 cpu_R[dc->ra], cpu_R[dc->rb]);
1508 break;
1509 default:
1510 qemu_log_mask(LOG_UNIMP,
1511 "unimplemented fcmp fpu_insn=%x pc=%x"
1512 " opc=%x\n",
1513 fpu_insn, dc->pc, dc->opcode);
1514 dc->abort_at_next_insn = 1;
1515 break;
1516 }
1517 break;
1518
1519 case 5:
1520 if (!dec_check_fpuv2(dc)) {
1521 return;
1522 }
1523 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1524 break;
1525
1526 case 6:
1527 if (!dec_check_fpuv2(dc)) {
1528 return;
1529 }
1530 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1531 break;
1532
1533 case 7:
1534 if (!dec_check_fpuv2(dc)) {
1535 return;
1536 }
1537 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1538 break;
1539
1540 default:
1541 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1542 " opc=%x\n",
1543 fpu_insn, dc->pc, dc->opcode);
1544 dc->abort_at_next_insn = 1;
1545 break;
1546 }
1547}
1548
1549static void dec_null(DisasContext *dc)
1550{
1551 if ((dc->tb_flags & MSR_EE_FLAG)
1552 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1553 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1554 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1555 return;
1556 }
1557 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1558 dc->abort_at_next_insn = 1;
1559}
1560
1561
1562static void dec_stream(DisasContext *dc)
1563{
1564 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1565 TCGv_i32 t_id, t_ctrl;
1566 int ctrl;
1567
1568 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1569 dc->type_b ? "" : "d", dc->imm);
1570
1571 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1572 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1573 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1574 return;
1575 }
1576
1577 t_id = tcg_temp_new();
1578 if (dc->type_b) {
1579 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1580 ctrl = dc->imm >> 10;
1581 } else {
1582 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1583 ctrl = dc->imm >> 5;
1584 }
1585
1586 t_ctrl = tcg_const_tl(ctrl);
1587
1588 if (dc->rd == 0) {
1589 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1590 } else {
1591 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1592 }
1593 tcg_temp_free(t_id);
1594 tcg_temp_free(t_ctrl);
1595}
1596
1597static struct decoder_info {
1598 struct {
1599 uint32_t bits;
1600 uint32_t mask;
1601 };
1602 void (*dec)(DisasContext *dc);
1603} decinfo[] = {
1604 {DEC_ADD, dec_add},
1605 {DEC_SUB, dec_sub},
1606 {DEC_AND, dec_and},
1607 {DEC_XOR, dec_xor},
1608 {DEC_OR, dec_or},
1609 {DEC_BIT, dec_bit},
1610 {DEC_BARREL, dec_barrel},
1611 {DEC_LD, dec_load},
1612 {DEC_ST, dec_store},
1613 {DEC_IMM, dec_imm},
1614 {DEC_BR, dec_br},
1615 {DEC_BCC, dec_bcc},
1616 {DEC_RTS, dec_rts},
1617 {DEC_FPU, dec_fpu},
1618 {DEC_MUL, dec_mul},
1619 {DEC_DIV, dec_div},
1620 {DEC_MSR, dec_msr},
1621 {DEC_STREAM, dec_stream},
1622 {{0, 0}, dec_null}
1623};
1624
1625static inline void decode(DisasContext *dc, uint32_t ir)
1626{
1627 int i;
1628
1629 dc->ir = ir;
1630 LOG_DIS("%8.8x\t", dc->ir);
1631
1632 if (dc->ir)
1633 dc->nr_nops = 0;
1634 else {
1635 if ((dc->tb_flags & MSR_EE_FLAG)
1636 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1637 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1638 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1639 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1640 return;
1641 }
1642
1643 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1644 dc->nr_nops++;
1645 if (dc->nr_nops > 4) {
1646 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1647 }
1648 }
1649
1650 dc->type_b = ir & (1 << 29);
1651
1652 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1653 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1654 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1655 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1656 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1657
1658
1659 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1660 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1661 decinfo[i].dec(dc);
1662 break;
1663 }
1664 }
1665}
1666
1667
1668void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1669{
1670 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1671 CPUState *cs = CPU(cpu);
1672 uint32_t pc_start;
1673 struct DisasContext ctx;
1674 struct DisasContext *dc = &ctx;
1675 uint32_t next_page_start, org_flags;
1676 target_ulong npc;
1677 int num_insns;
1678 int max_insns;
1679
1680 pc_start = tb->pc;
1681 dc->cpu = cpu;
1682 dc->tb = tb;
1683 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1684
1685 dc->is_jmp = DISAS_NEXT;
1686 dc->jmp = 0;
1687 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1688 if (dc->delayed_branch) {
1689 dc->jmp = JMP_INDIRECT;
1690 }
1691 dc->pc = pc_start;
1692 dc->singlestep_enabled = cs->singlestep_enabled;
1693 dc->cpustate_changed = 0;
1694 dc->abort_at_next_insn = 0;
1695 dc->nr_nops = 0;
1696
1697 if (pc_start & 3) {
1698 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1699 }
1700
1701 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1702#if !SIM_COMPAT
1703 qemu_log("--------------\n");
1704 log_cpu_state(CPU(cpu), 0);
1705#endif
1706 }
1707
1708 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1709 num_insns = 0;
1710 max_insns = tb->cflags & CF_COUNT_MASK;
1711 if (max_insns == 0) {
1712 max_insns = CF_COUNT_MASK;
1713 }
1714 if (max_insns > TCG_MAX_INSNS) {
1715 max_insns = TCG_MAX_INSNS;
1716 }
1717
1718 gen_tb_start(tb);
1719 do
1720 {
1721 tcg_gen_insn_start(dc->pc);
1722 num_insns++;
1723
1724#if SIM_COMPAT
1725 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1726 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1727 gen_helper_debug();
1728 }
1729#endif
1730
1731 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1732 t_gen_raise_exception(dc, EXCP_DEBUG);
1733 dc->is_jmp = DISAS_UPDATE;
1734
1735
1736
1737
1738 dc->pc += 4;
1739 break;
1740 }
1741
1742
1743 LOG_DIS("%8.8x:\t", dc->pc);
1744
1745 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1746 gen_io_start();
1747 }
1748
1749 dc->clear_imm = 1;
1750 decode(dc, cpu_ldl_code(env, dc->pc));
1751 if (dc->clear_imm)
1752 dc->tb_flags &= ~IMM_FLAG;
1753 dc->pc += 4;
1754
1755 if (dc->delayed_branch) {
1756 dc->delayed_branch--;
1757 if (!dc->delayed_branch) {
1758 if (dc->tb_flags & DRTI_FLAG)
1759 do_rti(dc);
1760 if (dc->tb_flags & DRTB_FLAG)
1761 do_rtb(dc);
1762 if (dc->tb_flags & DRTE_FLAG)
1763 do_rte(dc);
1764
1765 dc->tb_flags &= ~D_FLAG;
1766
1767 if (dc->jmp == JMP_INDIRECT) {
1768 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1769 dc->is_jmp = DISAS_JUMP;
1770 } else if (dc->jmp == JMP_DIRECT) {
1771 t_sync_flags(dc);
1772 gen_goto_tb(dc, 0, dc->jmp_pc);
1773 dc->is_jmp = DISAS_TB_JUMP;
1774 } else if (dc->jmp == JMP_DIRECT_CC) {
1775 TCGLabel *l1 = gen_new_label();
1776 t_sync_flags(dc);
1777
1778 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1779 gen_goto_tb(dc, 1, dc->pc);
1780 gen_set_label(l1);
1781 gen_goto_tb(dc, 0, dc->jmp_pc);
1782
1783 dc->is_jmp = DISAS_TB_JUMP;
1784 }
1785 break;
1786 }
1787 }
1788 if (cs->singlestep_enabled) {
1789 break;
1790 }
1791 } while (!dc->is_jmp && !dc->cpustate_changed
1792 && !tcg_op_buf_full()
1793 && !singlestep
1794 && (dc->pc < next_page_start)
1795 && num_insns < max_insns);
1796
1797 npc = dc->pc;
1798 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1799 if (dc->tb_flags & D_FLAG) {
1800 dc->is_jmp = DISAS_UPDATE;
1801 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1802 sync_jmpstate(dc);
1803 } else
1804 npc = dc->jmp_pc;
1805 }
1806
1807 if (tb->cflags & CF_LAST_IO)
1808 gen_io_end();
1809
1810 if (dc->is_jmp == DISAS_NEXT
1811 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1812 dc->is_jmp = DISAS_UPDATE;
1813 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1814 }
1815 t_sync_flags(dc);
1816
1817 if (unlikely(cs->singlestep_enabled)) {
1818 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1819
1820 if (dc->is_jmp != DISAS_JUMP) {
1821 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1822 }
1823 gen_helper_raise_exception(cpu_env, tmp);
1824 tcg_temp_free_i32(tmp);
1825 } else {
1826 switch(dc->is_jmp) {
1827 case DISAS_NEXT:
1828 gen_goto_tb(dc, 1, npc);
1829 break;
1830 default:
1831 case DISAS_JUMP:
1832 case DISAS_UPDATE:
1833
1834
1835 tcg_gen_exit_tb(0);
1836 break;
1837 case DISAS_TB_JUMP:
1838
1839 break;
1840 }
1841 }
1842 gen_tb_end(tb, num_insns);
1843
1844 tb->size = dc->pc - pc_start;
1845 tb->icount = num_insns;
1846
1847#ifdef DEBUG_DISAS
1848#if !SIM_COMPAT
1849 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1850 qemu_log("\n");
1851#if DISAS_GNU
1852 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1853#endif
1854 qemu_log("\nisize=%d osize=%d\n",
1855 dc->pc - pc_start, tcg_op_buf_count());
1856 }
1857#endif
1858#endif
1859 assert(!dc->abort_at_next_insn);
1860}
1861
1862void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1863 int flags)
1864{
1865 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1866 CPUMBState *env = &cpu->env;
1867 int i;
1868
1869 if (!env || !f)
1870 return;
1871
1872 cpu_fprintf(f, "IN: PC=%x %s\n",
1873 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1874 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1875 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1876 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1877 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1878 env->btaken, env->btarget,
1879 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1880 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1881 (env->sregs[SR_MSR] & MSR_EIP),
1882 (env->sregs[SR_MSR] & MSR_IE));
1883
1884 for (i = 0; i < 32; i++) {
1885 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1886 if ((i + 1) % 4 == 0)
1887 cpu_fprintf(f, "\n");
1888 }
1889 cpu_fprintf(f, "\n\n");
1890}
1891
1892MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1893{
1894 MicroBlazeCPU *cpu;
1895
1896 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1897
1898 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1899
1900 return cpu;
1901}
1902
1903void mb_tcg_init(void)
1904{
1905 int i;
1906
1907 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1908
1909 env_debug = tcg_global_mem_new(cpu_env,
1910 offsetof(CPUMBState, debug),
1911 "debug0");
1912 env_iflags = tcg_global_mem_new(cpu_env,
1913 offsetof(CPUMBState, iflags),
1914 "iflags");
1915 env_imm = tcg_global_mem_new(cpu_env,
1916 offsetof(CPUMBState, imm),
1917 "imm");
1918 env_btarget = tcg_global_mem_new(cpu_env,
1919 offsetof(CPUMBState, btarget),
1920 "btarget");
1921 env_btaken = tcg_global_mem_new(cpu_env,
1922 offsetof(CPUMBState, btaken),
1923 "btaken");
1924 env_res_addr = tcg_global_mem_new(cpu_env,
1925 offsetof(CPUMBState, res_addr),
1926 "res_addr");
1927 env_res_val = tcg_global_mem_new(cpu_env,
1928 offsetof(CPUMBState, res_val),
1929 "res_val");
1930 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1931 cpu_R[i] = tcg_global_mem_new(cpu_env,
1932 offsetof(CPUMBState, regs[i]),
1933 regnames[i]);
1934 }
1935 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1936 cpu_SR[i] = tcg_global_mem_new(cpu_env,
1937 offsetof(CPUMBState, sregs[i]),
1938 special_regnames[i]);
1939 }
1940}
1941
1942void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1943 target_ulong *data)
1944{
1945 env->sregs[SR_PC] = data[0];
1946}
1947