1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "disas/disas.h"
24#include "exec/exec-all.h"
25#include "tcg-op.h"
26#include "exec/helper-proto.h"
27#include "microblaze-decode.h"
28#include "exec/cpu_ldst.h"
29#include "exec/helper-gen.h"
30
31#include "trace-tcg.h"
32#include "exec/log.h"
33
34
35#define SIM_COMPAT 0
36#define DISAS_GNU 1
37#define DISAS_MB 1
38#if DISAS_MB && !SIM_COMPAT
39# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40#else
41# define LOG_DIS(...) do { } while (0)
42#endif
43
44#define D(x)
45
46#define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48
49static TCGv env_debug;
50static TCGv_env cpu_env;
51static TCGv cpu_R[32];
52static TCGv cpu_SR[18];
53static TCGv env_imm;
54static TCGv env_btaken;
55static TCGv env_btarget;
56static TCGv env_iflags;
57static TCGv env_res_addr;
58static TCGv env_res_val;
59
60#include "exec/gen-icount.h"
61
62
63typedef struct DisasContext {
64 MicroBlazeCPU *cpu;
65 target_ulong pc;
66
67
68 int type_b;
69 uint32_t ir;
70 uint8_t opcode;
71 uint8_t rd, ra, rb;
72 uint16_t imm;
73
74 unsigned int cpustate_changed;
75 unsigned int delayed_branch;
76 unsigned int tb_flags, synced_flags;
77 unsigned int clear_imm;
78 int is_jmp;
79
80#define JMP_NOJMP 0
81#define JMP_DIRECT 1
82#define JMP_DIRECT_CC 2
83#define JMP_INDIRECT 3
84 unsigned int jmp;
85 uint32_t jmp_pc;
86
87 int abort_at_next_insn;
88 int nr_nops;
89 struct TranslationBlock *tb;
90 int singlestep_enabled;
91} DisasContext;
92
93static const char *regnames[] =
94{
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99};
100
101static const char *special_regnames[] =
102{
103 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
104 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
105 "sr16", "sr17", "sr18"
106};
107
108static inline void t_sync_flags(DisasContext *dc)
109{
110
111 if (dc->tb_flags != dc->synced_flags) {
112 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
113 dc->synced_flags = dc->tb_flags;
114 }
115}
116
117static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118{
119 TCGv_i32 tmp = tcg_const_i32(index);
120
121 t_sync_flags(dc);
122 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
123 gen_helper_raise_exception(cpu_env, tmp);
124 tcg_temp_free_i32(tmp);
125 dc->is_jmp = DISAS_UPDATE;
126}
127
128static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129{
130#ifndef CONFIG_USER_ONLY
131 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
132#else
133 return true;
134#endif
135}
136
137static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138{
139 if (use_goto_tb(dc, dest)) {
140 tcg_gen_goto_tb(n);
141 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
142 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
143 } else {
144 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
145 tcg_gen_exit_tb(0);
146 }
147}
148
149static void read_carry(DisasContext *dc, TCGv d)
150{
151 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
152}
153
154
155
156
157
158static void write_carry(DisasContext *dc, TCGv v)
159{
160 TCGv t0 = tcg_temp_new();
161 tcg_gen_shli_tl(t0, v, 31);
162 tcg_gen_sari_tl(t0, t0, 31);
163 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
164 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
165 ~(MSR_C | MSR_CC));
166 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
167 tcg_temp_free(t0);
168}
169
170static void write_carryi(DisasContext *dc, bool carry)
171{
172 TCGv t0 = tcg_temp_new();
173 tcg_gen_movi_tl(t0, carry);
174 write_carry(dc, t0);
175 tcg_temp_free(t0);
176}
177
178
179
180static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181{
182
183 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
184}
185
186static inline TCGv *dec_alu_op_b(DisasContext *dc)
187{
188 if (dc->type_b) {
189 if (dc->tb_flags & IMM_FLAG)
190 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
191 else
192 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
193 return &env_imm;
194 } else
195 return &cpu_R[dc->rb];
196}
197
198static void dec_add(DisasContext *dc)
199{
200 unsigned int k, c;
201 TCGv cf;
202
203 k = dc->opcode & 4;
204 c = dc->opcode & 2;
205
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
208 dc->rd, dc->ra, dc->rb);
209
210
211 if (k) {
212
213
214 if (dc->rd) {
215 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216
217 if (c) {
218
219 cf = tcg_temp_new();
220
221 read_carry(dc, cf);
222 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
223 tcg_temp_free(cf);
224 }
225 }
226 return;
227 }
228
229
230
231 cf = tcg_temp_new();
232 if (c) {
233 read_carry(dc, cf);
234 } else {
235 tcg_gen_movi_tl(cf, 0);
236 }
237
238 if (dc->rd) {
239 TCGv ncf = tcg_temp_new();
240 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
242 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
243 write_carry(dc, ncf);
244 tcg_temp_free(ncf);
245 } else {
246 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
247 write_carry(dc, cf);
248 }
249 tcg_temp_free(cf);
250}
251
252static void dec_sub(DisasContext *dc)
253{
254 unsigned int u, cmp, k, c;
255 TCGv cf, na;
256
257 u = dc->imm & 2;
258 k = dc->opcode & 4;
259 c = dc->opcode & 2;
260 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261
262 if (cmp) {
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
264 if (dc->rd) {
265 if (u)
266 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
267 else
268 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 }
270 return;
271 }
272
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275
276
277 if (k) {
278
279
280 if (dc->rd) {
281 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282
283 if (c) {
284
285 cf = tcg_temp_new();
286
287 read_carry(dc, cf);
288 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
289 tcg_temp_free(cf);
290 }
291 }
292 return;
293 }
294
295
296
297 cf = tcg_temp_new();
298 na = tcg_temp_new();
299 if (c) {
300 read_carry(dc, cf);
301 } else {
302 tcg_gen_movi_tl(cf, 1);
303 }
304
305
306 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307
308 if (dc->rd) {
309 TCGv ncf = tcg_temp_new();
310 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
311 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
312 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
313 write_carry(dc, ncf);
314 tcg_temp_free(ncf);
315 } else {
316 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
317 write_carry(dc, cf);
318 }
319 tcg_temp_free(cf);
320 tcg_temp_free(na);
321}
322
323static void dec_pattern(DisasContext *dc)
324{
325 unsigned int mode;
326
327 if ((dc->tb_flags & MSR_EE_FLAG)
328 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
329 && !dc->cpu->cfg.use_pcmp_instr) {
330 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
331 t_gen_raise_exception(dc, EXCP_HW_EXCP);
332 }
333
334 mode = dc->opcode & 3;
335 switch (mode) {
336 case 0:
337
338 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
339 if (dc->rd)
340 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
341 break;
342 case 2:
343 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
344 if (dc->rd) {
345 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
346 cpu_R[dc->ra], cpu_R[dc->rb]);
347 }
348 break;
349 case 3:
350 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
351 if (dc->rd) {
352 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
353 cpu_R[dc->ra], cpu_R[dc->rb]);
354 }
355 break;
356 default:
357 cpu_abort(CPU(dc->cpu),
358 "unsupported pattern insn opcode=%x\n", dc->opcode);
359 break;
360 }
361}
362
363static void dec_and(DisasContext *dc)
364{
365 unsigned int not;
366
367 if (!dc->type_b && (dc->imm & (1 << 10))) {
368 dec_pattern(dc);
369 return;
370 }
371
372 not = dc->opcode & (1 << 1);
373 LOG_DIS("and%s\n", not ? "n" : "");
374
375 if (!dc->rd)
376 return;
377
378 if (not) {
379 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
380 } else
381 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
382}
383
384static void dec_or(DisasContext *dc)
385{
386 if (!dc->type_b && (dc->imm & (1 << 10))) {
387 dec_pattern(dc);
388 return;
389 }
390
391 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
392 if (dc->rd)
393 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
394}
395
396static void dec_xor(DisasContext *dc)
397{
398 if (!dc->type_b && (dc->imm & (1 << 10))) {
399 dec_pattern(dc);
400 return;
401 }
402
403 LOG_DIS("xor r%d\n", dc->rd);
404 if (dc->rd)
405 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
406}
407
408static inline void msr_read(DisasContext *dc, TCGv d)
409{
410 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
411}
412
413static inline void msr_write(DisasContext *dc, TCGv v)
414{
415 TCGv t;
416
417 t = tcg_temp_new();
418 dc->cpustate_changed = 1;
419
420 tcg_gen_andi_tl(t, v, ~MSR_PVR);
421 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
422 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
423 tcg_temp_free(t);
424}
425
426static void dec_msr(DisasContext *dc)
427{
428 CPUState *cs = CPU(dc->cpu);
429 TCGv t0, t1;
430 unsigned int sr, to, rn;
431 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
432
433 sr = dc->imm & ((1 << 14) - 1);
434 to = dc->imm & (1 << 14);
435 dc->type_b = 1;
436 if (to)
437 dc->cpustate_changed = 1;
438
439
440 if (!(dc->imm & (1 << 15))) {
441 unsigned int clr = dc->ir & (1 << 16);
442
443 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
444 dc->rd, dc->imm);
445
446 if (!dc->cpu->cfg.use_msr_instr) {
447
448 return;
449 }
450
451 if ((dc->tb_flags & MSR_EE_FLAG)
452 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
453 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
454 t_gen_raise_exception(dc, EXCP_HW_EXCP);
455 return;
456 }
457
458 if (dc->rd)
459 msr_read(dc, cpu_R[dc->rd]);
460
461 t0 = tcg_temp_new();
462 t1 = tcg_temp_new();
463 msr_read(dc, t0);
464 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465
466 if (clr) {
467 tcg_gen_not_tl(t1, t1);
468 tcg_gen_and_tl(t0, t0, t1);
469 } else
470 tcg_gen_or_tl(t0, t0, t1);
471 msr_write(dc, t0);
472 tcg_temp_free(t0);
473 tcg_temp_free(t1);
474 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
475 dc->is_jmp = DISAS_UPDATE;
476 return;
477 }
478
479 if (to) {
480 if ((dc->tb_flags & MSR_EE_FLAG)
481 && mem_index == MMU_USER_IDX) {
482 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
483 t_gen_raise_exception(dc, EXCP_HW_EXCP);
484 return;
485 }
486 }
487
488#if !defined(CONFIG_USER_ONLY)
489
490 if ((sr & ~0xff) == 0x1000) {
491 sr &= 7;
492 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 if (to)
494 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
495 else
496 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
497 return;
498 }
499#endif
500
501 if (to) {
502 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
503 switch (sr) {
504 case 0:
505 break;
506 case 1:
507 msr_write(dc, cpu_R[dc->ra]);
508 break;
509 case 0x3:
510 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
511 break;
512 case 0x5:
513 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
514 break;
515 case 0x7:
516 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
517 break;
518 case 0x800:
519 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
520 break;
521 case 0x802:
522 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
523 break;
524 default:
525 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
526 break;
527 }
528 } else {
529 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530
531 switch (sr) {
532 case 0:
533 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
534 break;
535 case 1:
536 msr_read(dc, cpu_R[dc->rd]);
537 break;
538 case 0x3:
539 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
540 break;
541 case 0x5:
542 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
543 break;
544 case 0x7:
545 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
546 break;
547 case 0xb:
548 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
549 break;
550 case 0x800:
551 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
552 break;
553 case 0x802:
554 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
555 break;
556 case 0x2000:
557 case 0x2001:
558 case 0x2002:
559 case 0x2003:
560 case 0x2004:
561 case 0x2005:
562 case 0x2006:
563 case 0x2007:
564 case 0x2008:
565 case 0x2009:
566 case 0x200a:
567 case 0x200b:
568 case 0x200c:
569 rn = sr & 0xf;
570 tcg_gen_ld_tl(cpu_R[dc->rd],
571 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
572 break;
573 default:
574 cpu_abort(cs, "unknown mfs reg %x\n", sr);
575 break;
576 }
577 }
578
579 if (dc->rd == 0) {
580 tcg_gen_movi_tl(cpu_R[0], 0);
581 }
582}
583
584
585static void dec_mul(DisasContext *dc)
586{
587 TCGv tmp;
588 unsigned int subcode;
589
590 if ((dc->tb_flags & MSR_EE_FLAG)
591 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
592 && !dc->cpu->cfg.use_hw_mul) {
593 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
594 t_gen_raise_exception(dc, EXCP_HW_EXCP);
595 return;
596 }
597
598 subcode = dc->imm & 3;
599
600 if (dc->type_b) {
601 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
602 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
603 return;
604 }
605
606
607 if (subcode >= 1 && subcode <= 3 && dc->cpu->cfg.use_hw_mul < 2) {
608
609 }
610
611 tmp = tcg_temp_new();
612 switch (subcode) {
613 case 0:
614 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
615 tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
616 break;
617 case 1:
618 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
619 tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
620 break;
621 case 2:
622 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
623 tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
624 break;
625 case 3:
626 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
627 tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
628 break;
629 default:
630 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
631 break;
632 }
633 tcg_temp_free(tmp);
634}
635
636
637static void dec_div(DisasContext *dc)
638{
639 unsigned int u;
640
641 u = dc->imm & 2;
642 LOG_DIS("div\n");
643
644 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
645 && !dc->cpu->cfg.use_div) {
646 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
647 t_gen_raise_exception(dc, EXCP_HW_EXCP);
648 }
649
650 if (u)
651 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
652 cpu_R[dc->ra]);
653 else
654 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
655 cpu_R[dc->ra]);
656 if (!dc->rd)
657 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
658}
659
660static void dec_barrel(DisasContext *dc)
661{
662 TCGv t0;
663 unsigned int imm_w, imm_s;
664 bool s, t, e = false, i = false;
665
666 if ((dc->tb_flags & MSR_EE_FLAG)
667 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
668 && !dc->cpu->cfg.use_barrel) {
669 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
670 t_gen_raise_exception(dc, EXCP_HW_EXCP);
671 return;
672 }
673
674 if (dc->type_b) {
675
676 i = extract32(dc->imm, 15, 1);
677 e = extract32(dc->imm, 14, 1);
678 }
679 s = extract32(dc->imm, 10, 1);
680 t = extract32(dc->imm, 9, 1);
681 imm_w = extract32(dc->imm, 6, 5);
682 imm_s = extract32(dc->imm, 0, 5);
683
684 LOG_DIS("bs%s%s%s r%d r%d r%d\n",
685 e ? "e" : "",
686 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
687
688 if (e) {
689 if (imm_w + imm_s > 32 || imm_w == 0) {
690
691 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
692 imm_w, imm_s);
693 } else {
694 tcg_gen_extract_i32(cpu_R[dc->rd], cpu_R[dc->ra], imm_s, imm_w);
695 }
696 } else if (i) {
697 int width = imm_w - imm_s + 1;
698
699 if (imm_w < imm_s) {
700
701 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
702 imm_w, imm_s);
703 } else {
704 tcg_gen_deposit_i32(cpu_R[dc->rd], cpu_R[dc->rd], cpu_R[dc->ra],
705 imm_s, width);
706 }
707 } else {
708 t0 = tcg_temp_new();
709
710 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
711 tcg_gen_andi_tl(t0, t0, 31);
712
713 if (s) {
714 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
715 } else {
716 if (t) {
717 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
718 } else {
719 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720 }
721 }
722 tcg_temp_free(t0);
723 }
724}
725
726static void dec_bit(DisasContext *dc)
727{
728 CPUState *cs = CPU(dc->cpu);
729 TCGv t0;
730 unsigned int op;
731 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
732
733 op = dc->ir & ((1 << 9) - 1);
734 switch (op) {
735 case 0x21:
736
737 t0 = tcg_temp_new();
738
739 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
740 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
741 write_carry(dc, cpu_R[dc->ra]);
742 if (dc->rd) {
743 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
744 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
745 }
746 tcg_temp_free(t0);
747 break;
748
749 case 0x1:
750 case 0x41:
751
752 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
753
754
755 write_carry(dc, cpu_R[dc->ra]);
756 if (dc->rd) {
757 if (op == 0x41)
758 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
759 else
760 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 }
762 break;
763 case 0x60:
764 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
765 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
766 break;
767 case 0x61:
768 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
769 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
770 break;
771 case 0x64:
772 case 0x66:
773 case 0x74:
774 case 0x76:
775
776 LOG_DIS("wdc r%d\n", dc->ra);
777 if ((dc->tb_flags & MSR_EE_FLAG)
778 && mem_index == MMU_USER_IDX) {
779 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
780 t_gen_raise_exception(dc, EXCP_HW_EXCP);
781 return;
782 }
783 break;
784 case 0x68:
785
786 LOG_DIS("wic r%d\n", dc->ra);
787 if ((dc->tb_flags & MSR_EE_FLAG)
788 && mem_index == MMU_USER_IDX) {
789 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
790 t_gen_raise_exception(dc, EXCP_HW_EXCP);
791 return;
792 }
793 break;
794 case 0xe0:
795 if ((dc->tb_flags & MSR_EE_FLAG)
796 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
797 && !dc->cpu->cfg.use_pcmp_instr) {
798 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
799 t_gen_raise_exception(dc, EXCP_HW_EXCP);
800 }
801 if (dc->cpu->cfg.use_pcmp_instr) {
802 tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
803 }
804 break;
805 case 0x1e0:
806
807 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
808 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
809 break;
810 case 0x1e2:
811
812 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
813 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
814 break;
815 default:
816 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
817 dc->pc, op, dc->rd, dc->ra, dc->rb);
818 break;
819 }
820}
821
822static inline void sync_jmpstate(DisasContext *dc)
823{
824 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
825 if (dc->jmp == JMP_DIRECT) {
826 tcg_gen_movi_tl(env_btaken, 1);
827 }
828 dc->jmp = JMP_INDIRECT;
829 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
830 }
831}
832
833static void dec_imm(DisasContext *dc)
834{
835 LOG_DIS("imm %x\n", dc->imm << 16);
836 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
837 dc->tb_flags |= IMM_FLAG;
838 dc->clear_imm = 0;
839}
840
841static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
842{
843 unsigned int extimm = dc->tb_flags & IMM_FLAG;
844
845 int stackprot = 0;
846
847
848 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
849 stackprot = 1;
850 }
851
852
853 if (!dc->type_b) {
854
855 if (dc->ra == 0) {
856 return &cpu_R[dc->rb];
857 } else if (dc->rb == 0) {
858 return &cpu_R[dc->ra];
859 }
860
861 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
862 stackprot = 1;
863 }
864
865 *t = tcg_temp_new();
866 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
867
868 if (stackprot) {
869 gen_helper_stackprot(cpu_env, *t);
870 }
871 return t;
872 }
873
874 if (!extimm) {
875 if (dc->imm == 0) {
876 return &cpu_R[dc->ra];
877 }
878 *t = tcg_temp_new();
879 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
880 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
881 } else {
882 *t = tcg_temp_new();
883 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
884 }
885
886 if (stackprot) {
887 gen_helper_stackprot(cpu_env, *t);
888 }
889 return t;
890}
891
892static void dec_load(DisasContext *dc)
893{
894 TCGv t, v, *addr;
895 unsigned int size, rev = 0, ex = 0;
896 TCGMemOp mop;
897
898 mop = dc->opcode & 3;
899 size = 1 << mop;
900 if (!dc->type_b) {
901 rev = (dc->ir >> 9) & 1;
902 ex = (dc->ir >> 10) & 1;
903 }
904 mop |= MO_TE;
905 if (rev) {
906 mop ^= MO_BSWAP;
907 }
908
909 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
910 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
911 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
912 t_gen_raise_exception(dc, EXCP_HW_EXCP);
913 return;
914 }
915
916 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
917 ex ? "x" : "");
918
919 t_sync_flags(dc);
920 addr = compute_ldst_addr(dc, &t);
921
922
923
924
925
926
927
928 if (rev && size != 4) {
929
930 switch (size) {
931 case 1:
932 {
933
934
935
936
937 TCGv low = tcg_temp_new();
938
939
940 if (addr != &t) {
941 t = tcg_temp_new();
942 tcg_gen_mov_tl(t, *addr);
943 addr = &t;
944 }
945
946 tcg_gen_andi_tl(low, t, 3);
947 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
948 tcg_gen_andi_tl(t, t, ~3);
949 tcg_gen_or_tl(t, t, low);
950 tcg_gen_mov_tl(env_imm, t);
951 tcg_temp_free(low);
952 break;
953 }
954
955 case 2:
956
957
958
959 if (addr != &t) {
960 t = tcg_temp_new();
961 tcg_gen_xori_tl(t, *addr, 2);
962 addr = &t;
963 } else {
964 tcg_gen_xori_tl(t, t, 2);
965 }
966 break;
967 default:
968 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
969 break;
970 }
971 }
972
973
974 if (ex) {
975
976 if (addr != &t) {
977 t = tcg_temp_new();
978 tcg_gen_mov_tl(t, *addr);
979 addr = &t;
980 }
981 tcg_gen_andi_tl(t, t, ~3);
982 }
983
984
985 sync_jmpstate(dc);
986
987
988
989
990
991
992
993
994 v = tcg_temp_new();
995 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
996
997 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
998 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
999 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1000 tcg_const_tl(0), tcg_const_tl(size - 1));
1001 }
1002
1003 if (ex) {
1004 tcg_gen_mov_tl(env_res_addr, *addr);
1005 tcg_gen_mov_tl(env_res_val, v);
1006 }
1007 if (dc->rd) {
1008 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1009 }
1010 tcg_temp_free(v);
1011
1012 if (ex) {
1013
1014 write_carryi(dc, 0);
1015 }
1016
1017 if (addr == &t)
1018 tcg_temp_free(t);
1019}
1020
1021static void dec_store(DisasContext *dc)
1022{
1023 TCGv t, *addr, swx_addr;
1024 TCGLabel *swx_skip = NULL;
1025 unsigned int size, rev = 0, ex = 0;
1026 TCGMemOp mop;
1027
1028 mop = dc->opcode & 3;
1029 size = 1 << mop;
1030 if (!dc->type_b) {
1031 rev = (dc->ir >> 9) & 1;
1032 ex = (dc->ir >> 10) & 1;
1033 }
1034 mop |= MO_TE;
1035 if (rev) {
1036 mop ^= MO_BSWAP;
1037 }
1038
1039 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1040 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1041 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1042 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1043 return;
1044 }
1045
1046 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1047 ex ? "x" : "");
1048 t_sync_flags(dc);
1049
1050 sync_jmpstate(dc);
1051 addr = compute_ldst_addr(dc, &t);
1052
1053 swx_addr = tcg_temp_local_new();
1054 if (ex) {
1055 TCGv tval;
1056
1057
1058 tcg_gen_mov_tl(swx_addr, *addr);
1059 addr = &swx_addr;
1060
1061 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1062
1063 write_carryi(dc, 1);
1064 swx_skip = gen_new_label();
1065 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1066
1067
1068
1069
1070
1071
1072 tval = tcg_temp_new();
1073 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1074 MO_TEUL);
1075 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1076 write_carryi(dc, 0);
1077 tcg_temp_free(tval);
1078 }
1079
1080 if (rev && size != 4) {
1081
1082 switch (size) {
1083 case 1:
1084 {
1085
1086
1087
1088
1089 TCGv low = tcg_temp_new();
1090
1091
1092 if (addr != &t) {
1093 t = tcg_temp_new();
1094 tcg_gen_mov_tl(t, *addr);
1095 addr = &t;
1096 }
1097
1098 tcg_gen_andi_tl(low, t, 3);
1099 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1100 tcg_gen_andi_tl(t, t, ~3);
1101 tcg_gen_or_tl(t, t, low);
1102 tcg_gen_mov_tl(env_imm, t);
1103 tcg_temp_free(low);
1104 break;
1105 }
1106
1107 case 2:
1108
1109
1110
1111 if (addr != &t) {
1112 t = tcg_temp_new();
1113 tcg_gen_xori_tl(t, *addr, 2);
1114 addr = &t;
1115 } else {
1116 tcg_gen_xori_tl(t, t, 2);
1117 }
1118 break;
1119 default:
1120 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1121 break;
1122 }
1123 }
1124 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1125
1126
1127 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1128 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1129
1130
1131
1132
1133
1134
1135 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1136 tcg_const_tl(1), tcg_const_tl(size - 1));
1137 }
1138
1139 if (ex) {
1140 gen_set_label(swx_skip);
1141 }
1142 tcg_temp_free(swx_addr);
1143
1144 if (addr == &t)
1145 tcg_temp_free(t);
1146}
1147
1148static inline void eval_cc(DisasContext *dc, unsigned int cc,
1149 TCGv d, TCGv a, TCGv b)
1150{
1151 switch (cc) {
1152 case CC_EQ:
1153 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1154 break;
1155 case CC_NE:
1156 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1157 break;
1158 case CC_LT:
1159 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1160 break;
1161 case CC_LE:
1162 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1163 break;
1164 case CC_GE:
1165 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1166 break;
1167 case CC_GT:
1168 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1169 break;
1170 default:
1171 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1172 break;
1173 }
1174}
1175
1176static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1177{
1178 TCGLabel *l1 = gen_new_label();
1179
1180 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1181 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1182 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1183 gen_set_label(l1);
1184}
1185
1186static void dec_bcc(DisasContext *dc)
1187{
1188 unsigned int cc;
1189 unsigned int dslot;
1190
1191 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1192 dslot = dc->ir & (1 << 25);
1193 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1194
1195 dc->delayed_branch = 1;
1196 if (dslot) {
1197 dc->delayed_branch = 2;
1198 dc->tb_flags |= D_FLAG;
1199 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1200 cpu_env, offsetof(CPUMBState, bimm));
1201 }
1202
1203 if (dec_alu_op_b_is_small_imm(dc)) {
1204 int32_t offset = (int32_t)((int16_t)dc->imm);
1205
1206 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1207 dc->jmp = JMP_DIRECT_CC;
1208 dc->jmp_pc = dc->pc + offset;
1209 } else {
1210 dc->jmp = JMP_INDIRECT;
1211 tcg_gen_movi_tl(env_btarget, dc->pc);
1212 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1213 }
1214 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1215}
1216
1217static void dec_br(DisasContext *dc)
1218{
1219 unsigned int dslot, link, abs, mbar;
1220 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1221
1222 dslot = dc->ir & (1 << 20);
1223 abs = dc->ir & (1 << 19);
1224 link = dc->ir & (1 << 18);
1225
1226
1227 mbar = (dc->ir >> 16) & 31;
1228 if (mbar == 2 && dc->imm == 4) {
1229
1230 if (dc->rd & 16) {
1231 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1232 TCGv_i32 tmp_1 = tcg_const_i32(1);
1233
1234 LOG_DIS("sleep\n");
1235
1236 t_sync_flags(dc);
1237 tcg_gen_st_i32(tmp_1, cpu_env,
1238 -offsetof(MicroBlazeCPU, env)
1239 +offsetof(CPUState, halted));
1240 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1241 gen_helper_raise_exception(cpu_env, tmp_hlt);
1242 tcg_temp_free_i32(tmp_hlt);
1243 tcg_temp_free_i32(tmp_1);
1244 return;
1245 }
1246 LOG_DIS("mbar %d\n", dc->rd);
1247
1248 dc->cpustate_changed = 1;
1249 return;
1250 }
1251
1252 LOG_DIS("br%s%s%s%s imm=%x\n",
1253 abs ? "a" : "", link ? "l" : "",
1254 dc->type_b ? "i" : "", dslot ? "d" : "",
1255 dc->imm);
1256
1257 dc->delayed_branch = 1;
1258 if (dslot) {
1259 dc->delayed_branch = 2;
1260 dc->tb_flags |= D_FLAG;
1261 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1262 cpu_env, offsetof(CPUMBState, bimm));
1263 }
1264 if (link && dc->rd)
1265 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1266
1267 dc->jmp = JMP_INDIRECT;
1268 if (abs) {
1269 tcg_gen_movi_tl(env_btaken, 1);
1270 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1271 if (link && !dslot) {
1272 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1273 t_gen_raise_exception(dc, EXCP_BREAK);
1274 if (dc->imm == 0) {
1275 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1276 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1277 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1278 return;
1279 }
1280
1281 t_gen_raise_exception(dc, EXCP_DEBUG);
1282 }
1283 }
1284 } else {
1285 if (dec_alu_op_b_is_small_imm(dc)) {
1286 dc->jmp = JMP_DIRECT;
1287 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1288 } else {
1289 tcg_gen_movi_tl(env_btaken, 1);
1290 tcg_gen_movi_tl(env_btarget, dc->pc);
1291 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1292 }
1293 }
1294}
1295
1296static inline void do_rti(DisasContext *dc)
1297{
1298 TCGv t0, t1;
1299 t0 = tcg_temp_new();
1300 t1 = tcg_temp_new();
1301 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1302 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1303 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1304
1305 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1306 tcg_gen_or_tl(t1, t1, t0);
1307 msr_write(dc, t1);
1308 tcg_temp_free(t1);
1309 tcg_temp_free(t0);
1310 dc->tb_flags &= ~DRTI_FLAG;
1311}
1312
1313static inline void do_rtb(DisasContext *dc)
1314{
1315 TCGv t0, t1;
1316 t0 = tcg_temp_new();
1317 t1 = tcg_temp_new();
1318 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1319 tcg_gen_shri_tl(t0, t1, 1);
1320 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1321
1322 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1323 tcg_gen_or_tl(t1, t1, t0);
1324 msr_write(dc, t1);
1325 tcg_temp_free(t1);
1326 tcg_temp_free(t0);
1327 dc->tb_flags &= ~DRTB_FLAG;
1328}
1329
1330static inline void do_rte(DisasContext *dc)
1331{
1332 TCGv t0, t1;
1333 t0 = tcg_temp_new();
1334 t1 = tcg_temp_new();
1335
1336 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1337 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1338 tcg_gen_shri_tl(t0, t1, 1);
1339 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1340
1341 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1342 tcg_gen_or_tl(t1, t1, t0);
1343 msr_write(dc, t1);
1344 tcg_temp_free(t1);
1345 tcg_temp_free(t0);
1346 dc->tb_flags &= ~DRTE_FLAG;
1347}
1348
1349static void dec_rts(DisasContext *dc)
1350{
1351 unsigned int b_bit, i_bit, e_bit;
1352 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1353
1354 i_bit = dc->ir & (1 << 21);
1355 b_bit = dc->ir & (1 << 22);
1356 e_bit = dc->ir & (1 << 23);
1357
1358 dc->delayed_branch = 2;
1359 dc->tb_flags |= D_FLAG;
1360 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1361 cpu_env, offsetof(CPUMBState, bimm));
1362
1363 if (i_bit) {
1364 LOG_DIS("rtid ir=%x\n", dc->ir);
1365 if ((dc->tb_flags & MSR_EE_FLAG)
1366 && mem_index == MMU_USER_IDX) {
1367 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1368 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1369 }
1370 dc->tb_flags |= DRTI_FLAG;
1371 } else if (b_bit) {
1372 LOG_DIS("rtbd ir=%x\n", dc->ir);
1373 if ((dc->tb_flags & MSR_EE_FLAG)
1374 && mem_index == MMU_USER_IDX) {
1375 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1376 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1377 }
1378 dc->tb_flags |= DRTB_FLAG;
1379 } else if (e_bit) {
1380 LOG_DIS("rted ir=%x\n", dc->ir);
1381 if ((dc->tb_flags & MSR_EE_FLAG)
1382 && mem_index == MMU_USER_IDX) {
1383 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1384 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1385 }
1386 dc->tb_flags |= DRTE_FLAG;
1387 } else
1388 LOG_DIS("rts ir=%x\n", dc->ir);
1389
1390 dc->jmp = JMP_INDIRECT;
1391 tcg_gen_movi_tl(env_btaken, 1);
1392 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1393}
1394
1395static int dec_check_fpuv2(DisasContext *dc)
1396{
1397 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1398 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1399 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1400 }
1401 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1402}
1403
1404static void dec_fpu(DisasContext *dc)
1405{
1406 unsigned int fpu_insn;
1407
1408 if ((dc->tb_flags & MSR_EE_FLAG)
1409 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1410 && (dc->cpu->cfg.use_fpu != 1)) {
1411 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1412 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1413 return;
1414 }
1415
1416 fpu_insn = (dc->ir >> 7) & 7;
1417
1418 switch (fpu_insn) {
1419 case 0:
1420 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1421 cpu_R[dc->rb]);
1422 break;
1423
1424 case 1:
1425 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1426 cpu_R[dc->rb]);
1427 break;
1428
1429 case 2:
1430 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1431 cpu_R[dc->rb]);
1432 break;
1433
1434 case 3:
1435 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1436 cpu_R[dc->rb]);
1437 break;
1438
1439 case 4:
1440 switch ((dc->ir >> 4) & 7) {
1441 case 0:
1442 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1443 cpu_R[dc->ra], cpu_R[dc->rb]);
1444 break;
1445 case 1:
1446 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1447 cpu_R[dc->ra], cpu_R[dc->rb]);
1448 break;
1449 case 2:
1450 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1451 cpu_R[dc->ra], cpu_R[dc->rb]);
1452 break;
1453 case 3:
1454 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1455 cpu_R[dc->ra], cpu_R[dc->rb]);
1456 break;
1457 case 4:
1458 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1459 cpu_R[dc->ra], cpu_R[dc->rb]);
1460 break;
1461 case 5:
1462 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1463 cpu_R[dc->ra], cpu_R[dc->rb]);
1464 break;
1465 case 6:
1466 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1467 cpu_R[dc->ra], cpu_R[dc->rb]);
1468 break;
1469 default:
1470 qemu_log_mask(LOG_UNIMP,
1471 "unimplemented fcmp fpu_insn=%x pc=%x"
1472 " opc=%x\n",
1473 fpu_insn, dc->pc, dc->opcode);
1474 dc->abort_at_next_insn = 1;
1475 break;
1476 }
1477 break;
1478
1479 case 5:
1480 if (!dec_check_fpuv2(dc)) {
1481 return;
1482 }
1483 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1484 break;
1485
1486 case 6:
1487 if (!dec_check_fpuv2(dc)) {
1488 return;
1489 }
1490 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1491 break;
1492
1493 case 7:
1494 if (!dec_check_fpuv2(dc)) {
1495 return;
1496 }
1497 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1498 break;
1499
1500 default:
1501 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1502 " opc=%x\n",
1503 fpu_insn, dc->pc, dc->opcode);
1504 dc->abort_at_next_insn = 1;
1505 break;
1506 }
1507}
1508
1509static void dec_null(DisasContext *dc)
1510{
1511 if ((dc->tb_flags & MSR_EE_FLAG)
1512 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1513 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1514 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1515 return;
1516 }
1517 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1518 dc->abort_at_next_insn = 1;
1519}
1520
1521
1522static void dec_stream(DisasContext *dc)
1523{
1524 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1525 TCGv_i32 t_id, t_ctrl;
1526 int ctrl;
1527
1528 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1529 dc->type_b ? "" : "d", dc->imm);
1530
1531 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1532 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1533 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1534 return;
1535 }
1536
1537 t_id = tcg_temp_new();
1538 if (dc->type_b) {
1539 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1540 ctrl = dc->imm >> 10;
1541 } else {
1542 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1543 ctrl = dc->imm >> 5;
1544 }
1545
1546 t_ctrl = tcg_const_tl(ctrl);
1547
1548 if (dc->rd == 0) {
1549 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1550 } else {
1551 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1552 }
1553 tcg_temp_free(t_id);
1554 tcg_temp_free(t_ctrl);
1555}
1556
1557static struct decoder_info {
1558 struct {
1559 uint32_t bits;
1560 uint32_t mask;
1561 };
1562 void (*dec)(DisasContext *dc);
1563} decinfo[] = {
1564 {DEC_ADD, dec_add},
1565 {DEC_SUB, dec_sub},
1566 {DEC_AND, dec_and},
1567 {DEC_XOR, dec_xor},
1568 {DEC_OR, dec_or},
1569 {DEC_BIT, dec_bit},
1570 {DEC_BARREL, dec_barrel},
1571 {DEC_LD, dec_load},
1572 {DEC_ST, dec_store},
1573 {DEC_IMM, dec_imm},
1574 {DEC_BR, dec_br},
1575 {DEC_BCC, dec_bcc},
1576 {DEC_RTS, dec_rts},
1577 {DEC_FPU, dec_fpu},
1578 {DEC_MUL, dec_mul},
1579 {DEC_DIV, dec_div},
1580 {DEC_MSR, dec_msr},
1581 {DEC_STREAM, dec_stream},
1582 {{0, 0}, dec_null}
1583};
1584
1585static inline void decode(DisasContext *dc, uint32_t ir)
1586{
1587 int i;
1588
1589 dc->ir = ir;
1590 LOG_DIS("%8.8x\t", dc->ir);
1591
1592 if (dc->ir)
1593 dc->nr_nops = 0;
1594 else {
1595 if ((dc->tb_flags & MSR_EE_FLAG)
1596 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1597 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1598 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1599 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1600 return;
1601 }
1602
1603 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1604 dc->nr_nops++;
1605 if (dc->nr_nops > 4) {
1606 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1607 }
1608 }
1609
1610 dc->type_b = ir & (1 << 29);
1611
1612 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1613 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1614 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1615 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1616 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1617
1618
1619 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1620 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1621 decinfo[i].dec(dc);
1622 break;
1623 }
1624 }
1625}
1626
1627
1628void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1629{
1630 CPUMBState *env = cs->env_ptr;
1631 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1632 uint32_t pc_start;
1633 struct DisasContext ctx;
1634 struct DisasContext *dc = &ctx;
1635 uint32_t next_page_start, org_flags;
1636 target_ulong npc;
1637 int num_insns;
1638 int max_insns;
1639
1640 pc_start = tb->pc;
1641 dc->cpu = cpu;
1642 dc->tb = tb;
1643 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1644
1645 dc->is_jmp = DISAS_NEXT;
1646 dc->jmp = 0;
1647 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1648 if (dc->delayed_branch) {
1649 dc->jmp = JMP_INDIRECT;
1650 }
1651 dc->pc = pc_start;
1652 dc->singlestep_enabled = cs->singlestep_enabled;
1653 dc->cpustate_changed = 0;
1654 dc->abort_at_next_insn = 0;
1655 dc->nr_nops = 0;
1656
1657 if (pc_start & 3) {
1658 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1659 }
1660
1661 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1662 num_insns = 0;
1663 max_insns = tb->cflags & CF_COUNT_MASK;
1664 if (max_insns == 0) {
1665 max_insns = CF_COUNT_MASK;
1666 }
1667 if (max_insns > TCG_MAX_INSNS) {
1668 max_insns = TCG_MAX_INSNS;
1669 }
1670
1671 gen_tb_start(tb);
1672 do
1673 {
1674 tcg_gen_insn_start(dc->pc);
1675 num_insns++;
1676
1677#if SIM_COMPAT
1678 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1679 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1680 gen_helper_debug();
1681 }
1682#endif
1683
1684 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1685 t_gen_raise_exception(dc, EXCP_DEBUG);
1686 dc->is_jmp = DISAS_UPDATE;
1687
1688
1689
1690
1691 dc->pc += 4;
1692 break;
1693 }
1694
1695
1696 LOG_DIS("%8.8x:\t", dc->pc);
1697
1698 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1699 gen_io_start();
1700 }
1701
1702 dc->clear_imm = 1;
1703 decode(dc, cpu_ldl_code(env, dc->pc));
1704 if (dc->clear_imm)
1705 dc->tb_flags &= ~IMM_FLAG;
1706 dc->pc += 4;
1707
1708 if (dc->delayed_branch) {
1709 dc->delayed_branch--;
1710 if (!dc->delayed_branch) {
1711 if (dc->tb_flags & DRTI_FLAG)
1712 do_rti(dc);
1713 if (dc->tb_flags & DRTB_FLAG)
1714 do_rtb(dc);
1715 if (dc->tb_flags & DRTE_FLAG)
1716 do_rte(dc);
1717
1718 dc->tb_flags &= ~D_FLAG;
1719
1720 if (dc->jmp == JMP_INDIRECT) {
1721 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1722 dc->is_jmp = DISAS_JUMP;
1723 } else if (dc->jmp == JMP_DIRECT) {
1724 t_sync_flags(dc);
1725 gen_goto_tb(dc, 0, dc->jmp_pc);
1726 dc->is_jmp = DISAS_TB_JUMP;
1727 } else if (dc->jmp == JMP_DIRECT_CC) {
1728 TCGLabel *l1 = gen_new_label();
1729 t_sync_flags(dc);
1730
1731 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1732 gen_goto_tb(dc, 1, dc->pc);
1733 gen_set_label(l1);
1734 gen_goto_tb(dc, 0, dc->jmp_pc);
1735
1736 dc->is_jmp = DISAS_TB_JUMP;
1737 }
1738 break;
1739 }
1740 }
1741 if (cs->singlestep_enabled) {
1742 break;
1743 }
1744 } while (!dc->is_jmp && !dc->cpustate_changed
1745 && !tcg_op_buf_full()
1746 && !singlestep
1747 && (dc->pc < next_page_start)
1748 && num_insns < max_insns);
1749
1750 npc = dc->pc;
1751 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1752 if (dc->tb_flags & D_FLAG) {
1753 dc->is_jmp = DISAS_UPDATE;
1754 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1755 sync_jmpstate(dc);
1756 } else
1757 npc = dc->jmp_pc;
1758 }
1759
1760 if (tb->cflags & CF_LAST_IO)
1761 gen_io_end();
1762
1763 if (dc->is_jmp == DISAS_NEXT
1764 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1765 dc->is_jmp = DISAS_UPDATE;
1766 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1767 }
1768 t_sync_flags(dc);
1769
1770 if (unlikely(cs->singlestep_enabled)) {
1771 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1772
1773 if (dc->is_jmp != DISAS_JUMP) {
1774 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1775 }
1776 gen_helper_raise_exception(cpu_env, tmp);
1777 tcg_temp_free_i32(tmp);
1778 } else {
1779 switch(dc->is_jmp) {
1780 case DISAS_NEXT:
1781 gen_goto_tb(dc, 1, npc);
1782 break;
1783 default:
1784 case DISAS_JUMP:
1785 case DISAS_UPDATE:
1786
1787
1788 tcg_gen_exit_tb(0);
1789 break;
1790 case DISAS_TB_JUMP:
1791
1792 break;
1793 }
1794 }
1795 gen_tb_end(tb, num_insns);
1796
1797 tb->size = dc->pc - pc_start;
1798 tb->icount = num_insns;
1799
1800#ifdef DEBUG_DISAS
1801#if !SIM_COMPAT
1802 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1803 && qemu_log_in_addr_range(pc_start)) {
1804 qemu_log_lock();
1805 qemu_log("--------------\n");
1806#if DISAS_GNU
1807 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1808#endif
1809 qemu_log("\nisize=%d osize=%d\n",
1810 dc->pc - pc_start, tcg_op_buf_count());
1811 qemu_log_unlock();
1812 }
1813#endif
1814#endif
1815 assert(!dc->abort_at_next_insn);
1816}
1817
1818void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1819 int flags)
1820{
1821 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1822 CPUMBState *env = &cpu->env;
1823 int i;
1824
1825 if (!env || !f)
1826 return;
1827
1828 cpu_fprintf(f, "IN: PC=%x %s\n",
1829 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1830 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1831 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1832 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1833 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1834 env->btaken, env->btarget,
1835 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1836 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1837 (env->sregs[SR_MSR] & MSR_EIP),
1838 (env->sregs[SR_MSR] & MSR_IE));
1839
1840 for (i = 0; i < 32; i++) {
1841 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1842 if ((i + 1) % 4 == 0)
1843 cpu_fprintf(f, "\n");
1844 }
1845 cpu_fprintf(f, "\n\n");
1846}
1847
1848MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1849{
1850 MicroBlazeCPU *cpu;
1851
1852 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1853
1854 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1855
1856 return cpu;
1857}
1858
1859void mb_tcg_init(void)
1860{
1861 int i;
1862
1863 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1864 tcg_ctx.tcg_env = cpu_env;
1865
1866 env_debug = tcg_global_mem_new(cpu_env,
1867 offsetof(CPUMBState, debug),
1868 "debug0");
1869 env_iflags = tcg_global_mem_new(cpu_env,
1870 offsetof(CPUMBState, iflags),
1871 "iflags");
1872 env_imm = tcg_global_mem_new(cpu_env,
1873 offsetof(CPUMBState, imm),
1874 "imm");
1875 env_btarget = tcg_global_mem_new(cpu_env,
1876 offsetof(CPUMBState, btarget),
1877 "btarget");
1878 env_btaken = tcg_global_mem_new(cpu_env,
1879 offsetof(CPUMBState, btaken),
1880 "btaken");
1881 env_res_addr = tcg_global_mem_new(cpu_env,
1882 offsetof(CPUMBState, res_addr),
1883 "res_addr");
1884 env_res_val = tcg_global_mem_new(cpu_env,
1885 offsetof(CPUMBState, res_val),
1886 "res_val");
1887 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1888 cpu_R[i] = tcg_global_mem_new(cpu_env,
1889 offsetof(CPUMBState, regs[i]),
1890 regnames[i]);
1891 }
1892 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1893 cpu_SR[i] = tcg_global_mem_new(cpu_env,
1894 offsetof(CPUMBState, sregs[i]),
1895 special_regnames[i]);
1896 }
1897}
1898
1899void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1900 target_ulong *data)
1901{
1902 env->sregs[SR_PC] = data[0];
1903}
1904