1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "qemu/osdep.h"
22#include "cpu.h"
23#include "disas/disas.h"
24#include "exec/exec-all.h"
25#include "tcg/tcg-op.h"
26#include "exec/helper-proto.h"
27#include "exec/cpu_ldst.h"
28#include "exec/helper-gen.h"
29#include "exec/translator.h"
30#include "qemu/qemu-print.h"
31
32#include "exec/log.h"
33
34#define EXTRACT_FIELD(src, start, end) \
35 (((src) >> start) & ((1 << (end - start + 1)) - 1))
36
37
38#define DISAS_JUMP DISAS_TARGET_0
39#define DISAS_EXIT DISAS_TARGET_1
40
41
42#define DISAS_EXIT_NEXT DISAS_TARGET_2
43
44#define DISAS_EXIT_JUMP DISAS_TARGET_3
45
46static TCGv_i32 cpu_R[32];
47static TCGv_i32 cpu_pc;
48static TCGv_i32 cpu_msr;
49static TCGv_i32 cpu_msr_c;
50static TCGv_i32 cpu_imm;
51static TCGv_i32 cpu_bvalue;
52static TCGv_i32 cpu_btarget;
53static TCGv_i32 cpu_iflags;
54static TCGv cpu_res_addr;
55static TCGv_i32 cpu_res_val;
56
57#include "exec/gen-icount.h"
58
59
60typedef struct DisasContext {
61 DisasContextBase base;
62 const MicroBlazeCPUConfig *cfg;
63
64
65 TCGOp *insn_start;
66
67 TCGv_i32 r0;
68 bool r0_set;
69
70
71 uint32_t ext_imm;
72 unsigned int tb_flags;
73 unsigned int tb_flags_to_set;
74 int mem_index;
75
76
77 TCGCond jmp_cond;
78
79
80 uint32_t jmp_dest;
81} DisasContext;
82
83static int typeb_imm(DisasContext *dc, int x)
84{
85 if (dc->tb_flags & IMM_FLAG) {
86 return deposit32(dc->ext_imm, 0, 16, x);
87 }
88 return x;
89}
90
91
92#include "decode-insns.c.inc"
93
94static void t_sync_flags(DisasContext *dc)
95{
96
97 if ((dc->tb_flags ^ dc->base.tb->flags) & IFLAGS_TB_MASK) {
98 tcg_gen_movi_i32(cpu_iflags, dc->tb_flags & IFLAGS_TB_MASK);
99 }
100}
101
102static void gen_raise_exception(DisasContext *dc, uint32_t index)
103{
104 TCGv_i32 tmp = tcg_const_i32(index);
105
106 gen_helper_raise_exception(cpu_env, tmp);
107 tcg_temp_free_i32(tmp);
108 dc->base.is_jmp = DISAS_NORETURN;
109}
110
111static void gen_raise_exception_sync(DisasContext *dc, uint32_t index)
112{
113 t_sync_flags(dc);
114 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
115 gen_raise_exception(dc, index);
116}
117
118static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec)
119{
120 TCGv_i32 tmp = tcg_const_i32(esr_ec);
121 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUMBState, esr));
122 tcg_temp_free_i32(tmp);
123
124 gen_raise_exception_sync(dc, EXCP_HW_EXCP);
125}
126
127static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
128{
129 if (dc->base.singlestep_enabled) {
130 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
131 tcg_gen_movi_i32(cpu_pc, dest);
132 gen_helper_raise_exception(cpu_env, tmp);
133 tcg_temp_free_i32(tmp);
134 } else if (translator_use_goto_tb(&dc->base, dest)) {
135 tcg_gen_goto_tb(n);
136 tcg_gen_movi_i32(cpu_pc, dest);
137 tcg_gen_exit_tb(dc->base.tb, n);
138 } else {
139 tcg_gen_movi_i32(cpu_pc, dest);
140 tcg_gen_lookup_and_goto_ptr();
141 }
142 dc->base.is_jmp = DISAS_NORETURN;
143}
144
145
146
147
148
149static bool trap_illegal(DisasContext *dc, bool cond)
150{
151 if (cond && (dc->tb_flags & MSR_EE)
152 && dc->cfg->illegal_opcode_exception) {
153 gen_raise_hw_excp(dc, ESR_EC_ILLEGAL_OP);
154 }
155 return cond;
156}
157
158
159
160
161
162static bool trap_userspace(DisasContext *dc, bool cond)
163{
164 bool cond_user = cond && dc->mem_index == MMU_USER_IDX;
165
166 if (cond_user && (dc->tb_flags & MSR_EE)) {
167 gen_raise_hw_excp(dc, ESR_EC_PRIVINSN);
168 }
169 return cond_user;
170}
171
172
173
174
175
176static bool invalid_delay_slot(DisasContext *dc, const char *insn_type)
177{
178 if (dc->tb_flags & D_FLAG) {
179 qemu_log_mask(LOG_GUEST_ERROR,
180 "Invalid insn in delay slot: %s at %08x\n",
181 insn_type, (uint32_t)dc->base.pc_next);
182 return true;
183 }
184 return false;
185}
186
187static TCGv_i32 reg_for_read(DisasContext *dc, int reg)
188{
189 if (likely(reg != 0)) {
190 return cpu_R[reg];
191 }
192 if (!dc->r0_set) {
193 if (dc->r0 == NULL) {
194 dc->r0 = tcg_temp_new_i32();
195 }
196 tcg_gen_movi_i32(dc->r0, 0);
197 dc->r0_set = true;
198 }
199 return dc->r0;
200}
201
202static TCGv_i32 reg_for_write(DisasContext *dc, int reg)
203{
204 if (likely(reg != 0)) {
205 return cpu_R[reg];
206 }
207 if (dc->r0 == NULL) {
208 dc->r0 = tcg_temp_new_i32();
209 }
210 return dc->r0;
211}
212
213static bool do_typea(DisasContext *dc, arg_typea *arg, bool side_effects,
214 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
215{
216 TCGv_i32 rd, ra, rb;
217
218 if (arg->rd == 0 && !side_effects) {
219 return true;
220 }
221
222 rd = reg_for_write(dc, arg->rd);
223 ra = reg_for_read(dc, arg->ra);
224 rb = reg_for_read(dc, arg->rb);
225 fn(rd, ra, rb);
226 return true;
227}
228
229static bool do_typea0(DisasContext *dc, arg_typea0 *arg, bool side_effects,
230 void (*fn)(TCGv_i32, TCGv_i32))
231{
232 TCGv_i32 rd, ra;
233
234 if (arg->rd == 0 && !side_effects) {
235 return true;
236 }
237
238 rd = reg_for_write(dc, arg->rd);
239 ra = reg_for_read(dc, arg->ra);
240 fn(rd, ra);
241 return true;
242}
243
244static bool do_typeb_imm(DisasContext *dc, arg_typeb *arg, bool side_effects,
245 void (*fni)(TCGv_i32, TCGv_i32, int32_t))
246{
247 TCGv_i32 rd, ra;
248
249 if (arg->rd == 0 && !side_effects) {
250 return true;
251 }
252
253 rd = reg_for_write(dc, arg->rd);
254 ra = reg_for_read(dc, arg->ra);
255 fni(rd, ra, arg->imm);
256 return true;
257}
258
259static bool do_typeb_val(DisasContext *dc, arg_typeb *arg, bool side_effects,
260 void (*fn)(TCGv_i32, TCGv_i32, TCGv_i32))
261{
262 TCGv_i32 rd, ra, imm;
263
264 if (arg->rd == 0 && !side_effects) {
265 return true;
266 }
267
268 rd = reg_for_write(dc, arg->rd);
269 ra = reg_for_read(dc, arg->ra);
270 imm = tcg_const_i32(arg->imm);
271
272 fn(rd, ra, imm);
273
274 tcg_temp_free_i32(imm);
275 return true;
276}
277
278#define DO_TYPEA(NAME, SE, FN) \
279 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
280 { return do_typea(dc, a, SE, FN); }
281
282#define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
283 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
284 { return dc->cfg->CFG && do_typea(dc, a, SE, FN); }
285
286#define DO_TYPEA0(NAME, SE, FN) \
287 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
288 { return do_typea0(dc, a, SE, FN); }
289
290#define DO_TYPEA0_CFG(NAME, CFG, SE, FN) \
291 static bool trans_##NAME(DisasContext *dc, arg_typea0 *a) \
292 { return dc->cfg->CFG && do_typea0(dc, a, SE, FN); }
293
294#define DO_TYPEBI(NAME, SE, FNI) \
295 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
296 { return do_typeb_imm(dc, a, SE, FNI); }
297
298#define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
299 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
300 { return dc->cfg->CFG && do_typeb_imm(dc, a, SE, FNI); }
301
302#define DO_TYPEBV(NAME, SE, FN) \
303 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
304 { return do_typeb_val(dc, a, SE, FN); }
305
306#define ENV_WRAPPER2(NAME, HELPER) \
307 static void NAME(TCGv_i32 out, TCGv_i32 ina) \
308 { HELPER(out, cpu_env, ina); }
309
310#define ENV_WRAPPER3(NAME, HELPER) \
311 static void NAME(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb) \
312 { HELPER(out, cpu_env, ina, inb); }
313
314
315static void gen_add(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
316{
317 TCGv_i32 zero = tcg_const_i32(0);
318
319 tcg_gen_add2_i32(out, cpu_msr_c, ina, zero, inb, zero);
320
321 tcg_temp_free_i32(zero);
322}
323
324
325static void gen_addc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
326{
327 TCGv_i32 zero = tcg_const_i32(0);
328 TCGv_i32 tmp = tcg_temp_new_i32();
329
330 tcg_gen_add2_i32(tmp, cpu_msr_c, ina, zero, cpu_msr_c, zero);
331 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
332
333 tcg_temp_free_i32(tmp);
334 tcg_temp_free_i32(zero);
335}
336
337
338static void gen_addkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
339{
340 tcg_gen_add_i32(out, ina, inb);
341 tcg_gen_add_i32(out, out, cpu_msr_c);
342}
343
344DO_TYPEA(add, true, gen_add)
345DO_TYPEA(addc, true, gen_addc)
346DO_TYPEA(addk, false, tcg_gen_add_i32)
347DO_TYPEA(addkc, true, gen_addkc)
348
349DO_TYPEBV(addi, true, gen_add)
350DO_TYPEBV(addic, true, gen_addc)
351DO_TYPEBI(addik, false, tcg_gen_addi_i32)
352DO_TYPEBV(addikc, true, gen_addkc)
353
354static void gen_andni(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
355{
356 tcg_gen_andi_i32(out, ina, ~imm);
357}
358
359DO_TYPEA(and, false, tcg_gen_and_i32)
360DO_TYPEBI(andi, false, tcg_gen_andi_i32)
361DO_TYPEA(andn, false, tcg_gen_andc_i32)
362DO_TYPEBI(andni, false, gen_andni)
363
364static void gen_bsra(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
365{
366 TCGv_i32 tmp = tcg_temp_new_i32();
367 tcg_gen_andi_i32(tmp, inb, 31);
368 tcg_gen_sar_i32(out, ina, tmp);
369 tcg_temp_free_i32(tmp);
370}
371
372static void gen_bsrl(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
373{
374 TCGv_i32 tmp = tcg_temp_new_i32();
375 tcg_gen_andi_i32(tmp, inb, 31);
376 tcg_gen_shr_i32(out, ina, tmp);
377 tcg_temp_free_i32(tmp);
378}
379
380static void gen_bsll(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
381{
382 TCGv_i32 tmp = tcg_temp_new_i32();
383 tcg_gen_andi_i32(tmp, inb, 31);
384 tcg_gen_shl_i32(out, ina, tmp);
385 tcg_temp_free_i32(tmp);
386}
387
388static void gen_bsefi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
389{
390
391 int imm_w = extract32(imm, 5, 5);
392 int imm_s = extract32(imm, 0, 5);
393
394 if (imm_w + imm_s > 32 || imm_w == 0) {
395
396 qemu_log_mask(LOG_GUEST_ERROR, "bsefi: Bad input w=%d s=%d\n",
397 imm_w, imm_s);
398 } else {
399 tcg_gen_extract_i32(out, ina, imm_s, imm_w);
400 }
401}
402
403static void gen_bsifi(TCGv_i32 out, TCGv_i32 ina, int32_t imm)
404{
405
406 int imm_w = extract32(imm, 5, 5);
407 int imm_s = extract32(imm, 0, 5);
408 int width = imm_w - imm_s + 1;
409
410 if (imm_w < imm_s) {
411
412 qemu_log_mask(LOG_GUEST_ERROR, "bsifi: Bad input w=%d s=%d\n",
413 imm_w, imm_s);
414 } else {
415 tcg_gen_deposit_i32(out, out, ina, imm_s, width);
416 }
417}
418
419DO_TYPEA_CFG(bsra, use_barrel, false, gen_bsra)
420DO_TYPEA_CFG(bsrl, use_barrel, false, gen_bsrl)
421DO_TYPEA_CFG(bsll, use_barrel, false, gen_bsll)
422
423DO_TYPEBI_CFG(bsrai, use_barrel, false, tcg_gen_sari_i32)
424DO_TYPEBI_CFG(bsrli, use_barrel, false, tcg_gen_shri_i32)
425DO_TYPEBI_CFG(bslli, use_barrel, false, tcg_gen_shli_i32)
426
427DO_TYPEBI_CFG(bsefi, use_barrel, false, gen_bsefi)
428DO_TYPEBI_CFG(bsifi, use_barrel, false, gen_bsifi)
429
430static void gen_clz(TCGv_i32 out, TCGv_i32 ina)
431{
432 tcg_gen_clzi_i32(out, ina, 32);
433}
434
435DO_TYPEA0_CFG(clz, use_pcmp_instr, false, gen_clz)
436
437static void gen_cmp(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
438{
439 TCGv_i32 lt = tcg_temp_new_i32();
440
441 tcg_gen_setcond_i32(TCG_COND_LT, lt, inb, ina);
442 tcg_gen_sub_i32(out, inb, ina);
443 tcg_gen_deposit_i32(out, out, lt, 31, 1);
444 tcg_temp_free_i32(lt);
445}
446
447static void gen_cmpu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
448{
449 TCGv_i32 lt = tcg_temp_new_i32();
450
451 tcg_gen_setcond_i32(TCG_COND_LTU, lt, inb, ina);
452 tcg_gen_sub_i32(out, inb, ina);
453 tcg_gen_deposit_i32(out, out, lt, 31, 1);
454 tcg_temp_free_i32(lt);
455}
456
457DO_TYPEA(cmp, false, gen_cmp)
458DO_TYPEA(cmpu, false, gen_cmpu)
459
460ENV_WRAPPER3(gen_fadd, gen_helper_fadd)
461ENV_WRAPPER3(gen_frsub, gen_helper_frsub)
462ENV_WRAPPER3(gen_fmul, gen_helper_fmul)
463ENV_WRAPPER3(gen_fdiv, gen_helper_fdiv)
464ENV_WRAPPER3(gen_fcmp_un, gen_helper_fcmp_un)
465ENV_WRAPPER3(gen_fcmp_lt, gen_helper_fcmp_lt)
466ENV_WRAPPER3(gen_fcmp_eq, gen_helper_fcmp_eq)
467ENV_WRAPPER3(gen_fcmp_le, gen_helper_fcmp_le)
468ENV_WRAPPER3(gen_fcmp_gt, gen_helper_fcmp_gt)
469ENV_WRAPPER3(gen_fcmp_ne, gen_helper_fcmp_ne)
470ENV_WRAPPER3(gen_fcmp_ge, gen_helper_fcmp_ge)
471
472DO_TYPEA_CFG(fadd, use_fpu, true, gen_fadd)
473DO_TYPEA_CFG(frsub, use_fpu, true, gen_frsub)
474DO_TYPEA_CFG(fmul, use_fpu, true, gen_fmul)
475DO_TYPEA_CFG(fdiv, use_fpu, true, gen_fdiv)
476DO_TYPEA_CFG(fcmp_un, use_fpu, true, gen_fcmp_un)
477DO_TYPEA_CFG(fcmp_lt, use_fpu, true, gen_fcmp_lt)
478DO_TYPEA_CFG(fcmp_eq, use_fpu, true, gen_fcmp_eq)
479DO_TYPEA_CFG(fcmp_le, use_fpu, true, gen_fcmp_le)
480DO_TYPEA_CFG(fcmp_gt, use_fpu, true, gen_fcmp_gt)
481DO_TYPEA_CFG(fcmp_ne, use_fpu, true, gen_fcmp_ne)
482DO_TYPEA_CFG(fcmp_ge, use_fpu, true, gen_fcmp_ge)
483
484ENV_WRAPPER2(gen_flt, gen_helper_flt)
485ENV_WRAPPER2(gen_fint, gen_helper_fint)
486ENV_WRAPPER2(gen_fsqrt, gen_helper_fsqrt)
487
488DO_TYPEA0_CFG(flt, use_fpu >= 2, true, gen_flt)
489DO_TYPEA0_CFG(fint, use_fpu >= 2, true, gen_fint)
490DO_TYPEA0_CFG(fsqrt, use_fpu >= 2, true, gen_fsqrt)
491
492
493static void gen_idiv(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
494{
495 gen_helper_divs(out, cpu_env, inb, ina);
496}
497
498static void gen_idivu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
499{
500 gen_helper_divu(out, cpu_env, inb, ina);
501}
502
503DO_TYPEA_CFG(idiv, use_div, true, gen_idiv)
504DO_TYPEA_CFG(idivu, use_div, true, gen_idivu)
505
506static bool trans_imm(DisasContext *dc, arg_imm *arg)
507{
508 if (invalid_delay_slot(dc, "imm")) {
509 return true;
510 }
511 dc->ext_imm = arg->imm << 16;
512 tcg_gen_movi_i32(cpu_imm, dc->ext_imm);
513 dc->tb_flags_to_set = IMM_FLAG;
514 return true;
515}
516
517static void gen_mulh(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
518{
519 TCGv_i32 tmp = tcg_temp_new_i32();
520 tcg_gen_muls2_i32(tmp, out, ina, inb);
521 tcg_temp_free_i32(tmp);
522}
523
524static void gen_mulhu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
525{
526 TCGv_i32 tmp = tcg_temp_new_i32();
527 tcg_gen_mulu2_i32(tmp, out, ina, inb);
528 tcg_temp_free_i32(tmp);
529}
530
531static void gen_mulhsu(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
532{
533 TCGv_i32 tmp = tcg_temp_new_i32();
534 tcg_gen_mulsu2_i32(tmp, out, ina, inb);
535 tcg_temp_free_i32(tmp);
536}
537
538DO_TYPEA_CFG(mul, use_hw_mul, false, tcg_gen_mul_i32)
539DO_TYPEA_CFG(mulh, use_hw_mul >= 2, false, gen_mulh)
540DO_TYPEA_CFG(mulhu, use_hw_mul >= 2, false, gen_mulhu)
541DO_TYPEA_CFG(mulhsu, use_hw_mul >= 2, false, gen_mulhsu)
542DO_TYPEBI_CFG(muli, use_hw_mul, false, tcg_gen_muli_i32)
543
544DO_TYPEA(or, false, tcg_gen_or_i32)
545DO_TYPEBI(ori, false, tcg_gen_ori_i32)
546
547static void gen_pcmpeq(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
548{
549 tcg_gen_setcond_i32(TCG_COND_EQ, out, ina, inb);
550}
551
552static void gen_pcmpne(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
553{
554 tcg_gen_setcond_i32(TCG_COND_NE, out, ina, inb);
555}
556
557DO_TYPEA_CFG(pcmpbf, use_pcmp_instr, false, gen_helper_pcmpbf)
558DO_TYPEA_CFG(pcmpeq, use_pcmp_instr, false, gen_pcmpeq)
559DO_TYPEA_CFG(pcmpne, use_pcmp_instr, false, gen_pcmpne)
560
561
562static void gen_rsub(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
563{
564 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_msr_c, inb, ina);
565 tcg_gen_sub_i32(out, inb, ina);
566}
567
568
569static void gen_rsubc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
570{
571 TCGv_i32 zero = tcg_const_i32(0);
572 TCGv_i32 tmp = tcg_temp_new_i32();
573
574 tcg_gen_not_i32(tmp, ina);
575 tcg_gen_add2_i32(tmp, cpu_msr_c, tmp, zero, cpu_msr_c, zero);
576 tcg_gen_add2_i32(out, cpu_msr_c, tmp, cpu_msr_c, inb, zero);
577
578 tcg_temp_free_i32(zero);
579 tcg_temp_free_i32(tmp);
580}
581
582
583static void gen_rsubk(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
584{
585 tcg_gen_sub_i32(out, inb, ina);
586}
587
588
589static void gen_rsubkc(TCGv_i32 out, TCGv_i32 ina, TCGv_i32 inb)
590{
591 TCGv_i32 nota = tcg_temp_new_i32();
592
593 tcg_gen_not_i32(nota, ina);
594 tcg_gen_add_i32(out, inb, nota);
595 tcg_gen_add_i32(out, out, cpu_msr_c);
596
597 tcg_temp_free_i32(nota);
598}
599
600DO_TYPEA(rsub, true, gen_rsub)
601DO_TYPEA(rsubc, true, gen_rsubc)
602DO_TYPEA(rsubk, false, gen_rsubk)
603DO_TYPEA(rsubkc, true, gen_rsubkc)
604
605DO_TYPEBV(rsubi, true, gen_rsub)
606DO_TYPEBV(rsubic, true, gen_rsubc)
607DO_TYPEBV(rsubik, false, gen_rsubk)
608DO_TYPEBV(rsubikc, true, gen_rsubkc)
609
610DO_TYPEA0(sext8, false, tcg_gen_ext8s_i32)
611DO_TYPEA0(sext16, false, tcg_gen_ext16s_i32)
612
613static void gen_sra(TCGv_i32 out, TCGv_i32 ina)
614{
615 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
616 tcg_gen_sari_i32(out, ina, 1);
617}
618
619static void gen_src(TCGv_i32 out, TCGv_i32 ina)
620{
621 TCGv_i32 tmp = tcg_temp_new_i32();
622
623 tcg_gen_mov_i32(tmp, cpu_msr_c);
624 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
625 tcg_gen_extract2_i32(out, ina, tmp, 1);
626
627 tcg_temp_free_i32(tmp);
628}
629
630static void gen_srl(TCGv_i32 out, TCGv_i32 ina)
631{
632 tcg_gen_andi_i32(cpu_msr_c, ina, 1);
633 tcg_gen_shri_i32(out, ina, 1);
634}
635
636DO_TYPEA0(sra, false, gen_sra)
637DO_TYPEA0(src, false, gen_src)
638DO_TYPEA0(srl, false, gen_srl)
639
640static void gen_swaph(TCGv_i32 out, TCGv_i32 ina)
641{
642 tcg_gen_rotri_i32(out, ina, 16);
643}
644
645DO_TYPEA0(swapb, false, tcg_gen_bswap32_i32)
646DO_TYPEA0(swaph, false, gen_swaph)
647
648static bool trans_wdic(DisasContext *dc, arg_wdic *a)
649{
650
651 trap_userspace(dc, true);
652 return true;
653}
654
655DO_TYPEA(xor, false, tcg_gen_xor_i32)
656DO_TYPEBI(xori, false, tcg_gen_xori_i32)
657
658static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb)
659{
660 TCGv ret = tcg_temp_new();
661
662
663 if (ra && rb) {
664 TCGv_i32 tmp = tcg_temp_new_i32();
665 tcg_gen_add_i32(tmp, cpu_R[ra], cpu_R[rb]);
666 tcg_gen_extu_i32_tl(ret, tmp);
667 tcg_temp_free_i32(tmp);
668 } else if (ra) {
669 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
670 } else if (rb) {
671 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
672 } else {
673 tcg_gen_movi_tl(ret, 0);
674 }
675
676 if ((ra == 1 || rb == 1) && dc->cfg->stackprot) {
677 gen_helper_stackprot(cpu_env, ret);
678 }
679 return ret;
680}
681
682static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm)
683{
684 TCGv ret = tcg_temp_new();
685
686
687 if (ra) {
688 TCGv_i32 tmp = tcg_temp_new_i32();
689 tcg_gen_addi_i32(tmp, cpu_R[ra], imm);
690 tcg_gen_extu_i32_tl(ret, tmp);
691 tcg_temp_free_i32(tmp);
692 } else {
693 tcg_gen_movi_tl(ret, (uint32_t)imm);
694 }
695
696 if (ra == 1 && dc->cfg->stackprot) {
697 gen_helper_stackprot(cpu_env, ret);
698 }
699 return ret;
700}
701
702#ifndef CONFIG_USER_ONLY
703static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb)
704{
705 int addr_size = dc->cfg->addr_size;
706 TCGv ret = tcg_temp_new();
707
708 if (addr_size == 32 || ra == 0) {
709 if (rb) {
710 tcg_gen_extu_i32_tl(ret, cpu_R[rb]);
711 } else {
712 tcg_gen_movi_tl(ret, 0);
713 }
714 } else {
715 if (rb) {
716 tcg_gen_concat_i32_i64(ret, cpu_R[rb], cpu_R[ra]);
717 } else {
718 tcg_gen_extu_i32_tl(ret, cpu_R[ra]);
719 tcg_gen_shli_tl(ret, ret, 32);
720 }
721 if (addr_size < 64) {
722
723 tcg_gen_andi_i64(ret, ret, MAKE_64BIT_MASK(0, addr_size));
724 }
725 }
726 return ret;
727}
728#endif
729
730static void record_unaligned_ess(DisasContext *dc, int rd,
731 MemOp size, bool store)
732{
733 uint32_t iflags = tcg_get_insn_start_param(dc->insn_start, 1);
734
735 iflags |= ESR_ESS_FLAG;
736 iflags |= rd << 5;
737 iflags |= store * ESR_S;
738 iflags |= (size == MO_32) * ESR_W;
739
740 tcg_set_insn_start_param(dc->insn_start, 1, iflags);
741}
742
743static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop,
744 int mem_index, bool rev)
745{
746 MemOp size = mop & MO_SIZE;
747
748
749
750
751
752
753
754 if (rev) {
755 if (size > MO_8) {
756 mop ^= MO_BSWAP;
757 }
758 if (size < MO_32) {
759 tcg_gen_xori_tl(addr, addr, 3 - size);
760 }
761 }
762
763 if (size > MO_8 &&
764 (dc->tb_flags & MSR_EE) &&
765 dc->cfg->unaligned_exceptions) {
766 record_unaligned_ess(dc, rd, size, false);
767 mop |= MO_ALIGN;
768 }
769
770 tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop);
771
772 tcg_temp_free(addr);
773 return true;
774}
775
776static bool trans_lbu(DisasContext *dc, arg_typea *arg)
777{
778 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
779 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
780}
781
782static bool trans_lbur(DisasContext *dc, arg_typea *arg)
783{
784 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
785 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
786}
787
788static bool trans_lbuea(DisasContext *dc, arg_typea *arg)
789{
790 if (trap_userspace(dc, true)) {
791 return true;
792 }
793#ifdef CONFIG_USER_ONLY
794 return true;
795#else
796 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
797 return do_load(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
798#endif
799}
800
801static bool trans_lbui(DisasContext *dc, arg_typeb *arg)
802{
803 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
804 return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
805}
806
807static bool trans_lhu(DisasContext *dc, arg_typea *arg)
808{
809 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
810 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
811}
812
813static bool trans_lhur(DisasContext *dc, arg_typea *arg)
814{
815 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
816 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
817}
818
819static bool trans_lhuea(DisasContext *dc, arg_typea *arg)
820{
821 if (trap_userspace(dc, true)) {
822 return true;
823 }
824#ifdef CONFIG_USER_ONLY
825 return true;
826#else
827 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
828 return do_load(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
829#endif
830}
831
832static bool trans_lhui(DisasContext *dc, arg_typeb *arg)
833{
834 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
835 return do_load(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
836}
837
838static bool trans_lw(DisasContext *dc, arg_typea *arg)
839{
840 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
841 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
842}
843
844static bool trans_lwr(DisasContext *dc, arg_typea *arg)
845{
846 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
847 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
848}
849
850static bool trans_lwea(DisasContext *dc, arg_typea *arg)
851{
852 if (trap_userspace(dc, true)) {
853 return true;
854 }
855#ifdef CONFIG_USER_ONLY
856 return true;
857#else
858 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
859 return do_load(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
860#endif
861}
862
863static bool trans_lwi(DisasContext *dc, arg_typeb *arg)
864{
865 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
866 return do_load(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
867}
868
869static bool trans_lwx(DisasContext *dc, arg_typea *arg)
870{
871 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
872
873
874 tcg_gen_andi_tl(addr, addr, ~3);
875
876 tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, MO_TEUL);
877 tcg_gen_mov_tl(cpu_res_addr, addr);
878 tcg_temp_free(addr);
879
880 if (arg->rd) {
881 tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val);
882 }
883
884
885 tcg_gen_movi_i32(cpu_msr_c, 0);
886 return true;
887}
888
889static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop,
890 int mem_index, bool rev)
891{
892 MemOp size = mop & MO_SIZE;
893
894
895
896
897
898
899
900 if (rev) {
901 if (size > MO_8) {
902 mop ^= MO_BSWAP;
903 }
904 if (size < MO_32) {
905 tcg_gen_xori_tl(addr, addr, 3 - size);
906 }
907 }
908
909 if (size > MO_8 &&
910 (dc->tb_flags & MSR_EE) &&
911 dc->cfg->unaligned_exceptions) {
912 record_unaligned_ess(dc, rd, size, true);
913 mop |= MO_ALIGN;
914 }
915
916 tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop);
917
918 tcg_temp_free(addr);
919 return true;
920}
921
922static bool trans_sb(DisasContext *dc, arg_typea *arg)
923{
924 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
925 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
926}
927
928static bool trans_sbr(DisasContext *dc, arg_typea *arg)
929{
930 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
931 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true);
932}
933
934static bool trans_sbea(DisasContext *dc, arg_typea *arg)
935{
936 if (trap_userspace(dc, true)) {
937 return true;
938 }
939#ifdef CONFIG_USER_ONLY
940 return true;
941#else
942 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
943 return do_store(dc, arg->rd, addr, MO_UB, MMU_NOMMU_IDX, false);
944#endif
945}
946
947static bool trans_sbi(DisasContext *dc, arg_typeb *arg)
948{
949 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
950 return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false);
951}
952
953static bool trans_sh(DisasContext *dc, arg_typea *arg)
954{
955 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
956 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
957}
958
959static bool trans_shr(DisasContext *dc, arg_typea *arg)
960{
961 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
962 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, true);
963}
964
965static bool trans_shea(DisasContext *dc, arg_typea *arg)
966{
967 if (trap_userspace(dc, true)) {
968 return true;
969 }
970#ifdef CONFIG_USER_ONLY
971 return true;
972#else
973 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
974 return do_store(dc, arg->rd, addr, MO_TEUW, MMU_NOMMU_IDX, false);
975#endif
976}
977
978static bool trans_shi(DisasContext *dc, arg_typeb *arg)
979{
980 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
981 return do_store(dc, arg->rd, addr, MO_TEUW, dc->mem_index, false);
982}
983
984static bool trans_sw(DisasContext *dc, arg_typea *arg)
985{
986 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
987 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
988}
989
990static bool trans_swr(DisasContext *dc, arg_typea *arg)
991{
992 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
993 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, true);
994}
995
996static bool trans_swea(DisasContext *dc, arg_typea *arg)
997{
998 if (trap_userspace(dc, true)) {
999 return true;
1000 }
1001#ifdef CONFIG_USER_ONLY
1002 return true;
1003#else
1004 TCGv addr = compute_ldst_addr_ea(dc, arg->ra, arg->rb);
1005 return do_store(dc, arg->rd, addr, MO_TEUL, MMU_NOMMU_IDX, false);
1006#endif
1007}
1008
1009static bool trans_swi(DisasContext *dc, arg_typeb *arg)
1010{
1011 TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm);
1012 return do_store(dc, arg->rd, addr, MO_TEUL, dc->mem_index, false);
1013}
1014
1015static bool trans_swx(DisasContext *dc, arg_typea *arg)
1016{
1017 TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb);
1018 TCGLabel *swx_done = gen_new_label();
1019 TCGLabel *swx_fail = gen_new_label();
1020 TCGv_i32 tval;
1021
1022
1023 tcg_gen_andi_tl(addr, addr, ~3);
1024
1025
1026
1027
1028
1029
1030
1031 tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail);
1032 tcg_temp_free(addr);
1033
1034
1035
1036
1037
1038 tval = tcg_temp_new_i32();
1039
1040 tcg_gen_atomic_cmpxchg_i32(tval, cpu_res_addr, cpu_res_val,
1041 reg_for_write(dc, arg->rd),
1042 dc->mem_index, MO_TEUL);
1043
1044 tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_val, tval, swx_fail);
1045 tcg_temp_free_i32(tval);
1046
1047
1048 tcg_gen_movi_i32(cpu_msr_c, 0);
1049 tcg_gen_br(swx_done);
1050
1051
1052 gen_set_label(swx_fail);
1053 tcg_gen_movi_i32(cpu_msr_c, 1);
1054
1055 gen_set_label(swx_done);
1056
1057
1058
1059
1060
1061 tcg_gen_movi_tl(cpu_res_addr, -1);
1062 return true;
1063}
1064
1065static void setup_dslot(DisasContext *dc, bool type_b)
1066{
1067 dc->tb_flags_to_set |= D_FLAG;
1068 if (type_b && (dc->tb_flags & IMM_FLAG)) {
1069 dc->tb_flags_to_set |= BIMM_FLAG;
1070 }
1071}
1072
1073static bool do_branch(DisasContext *dc, int dest_rb, int dest_imm,
1074 bool delay, bool abs, int link)
1075{
1076 uint32_t add_pc;
1077
1078 if (invalid_delay_slot(dc, "branch")) {
1079 return true;
1080 }
1081 if (delay) {
1082 setup_dslot(dc, dest_rb < 0);
1083 }
1084
1085 if (link) {
1086 tcg_gen_movi_i32(cpu_R[link], dc->base.pc_next);
1087 }
1088
1089
1090 add_pc = abs ? 0 : dc->base.pc_next;
1091 if (dest_rb > 0) {
1092 dc->jmp_dest = -1;
1093 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], add_pc);
1094 } else {
1095 dc->jmp_dest = add_pc + dest_imm;
1096 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1097 }
1098 dc->jmp_cond = TCG_COND_ALWAYS;
1099 return true;
1100}
1101
1102#define DO_BR(NAME, NAMEI, DELAY, ABS, LINK) \
1103 static bool trans_##NAME(DisasContext *dc, arg_typea_br *arg) \
1104 { return do_branch(dc, arg->rb, 0, DELAY, ABS, LINK ? arg->rd : 0); } \
1105 static bool trans_##NAMEI(DisasContext *dc, arg_typeb_br *arg) \
1106 { return do_branch(dc, -1, arg->imm, DELAY, ABS, LINK ? arg->rd : 0); }
1107
1108DO_BR(br, bri, false, false, false)
1109DO_BR(bra, brai, false, true, false)
1110DO_BR(brd, brid, true, false, false)
1111DO_BR(brad, braid, true, true, false)
1112DO_BR(brld, brlid, true, false, true)
1113DO_BR(brald, bralid, true, true, true)
1114
1115static bool do_bcc(DisasContext *dc, int dest_rb, int dest_imm,
1116 TCGCond cond, int ra, bool delay)
1117{
1118 TCGv_i32 zero, next;
1119
1120 if (invalid_delay_slot(dc, "bcc")) {
1121 return true;
1122 }
1123 if (delay) {
1124 setup_dslot(dc, dest_rb < 0);
1125 }
1126
1127 dc->jmp_cond = cond;
1128
1129
1130 tcg_gen_mov_i32(cpu_bvalue, reg_for_read(dc, ra));
1131
1132
1133 if (dest_rb > 0) {
1134 dc->jmp_dest = -1;
1135 tcg_gen_addi_i32(cpu_btarget, cpu_R[dest_rb], dc->base.pc_next);
1136 } else {
1137 dc->jmp_dest = dc->base.pc_next + dest_imm;
1138 tcg_gen_movi_i32(cpu_btarget, dc->jmp_dest);
1139 }
1140
1141
1142 zero = tcg_const_i32(0);
1143 next = tcg_const_i32(dc->base.pc_next + (delay + 1) * 4);
1144 tcg_gen_movcond_i32(dc->jmp_cond, cpu_btarget,
1145 reg_for_read(dc, ra), zero,
1146 cpu_btarget, next);
1147 tcg_temp_free_i32(zero);
1148 tcg_temp_free_i32(next);
1149
1150 return true;
1151}
1152
1153#define DO_BCC(NAME, COND) \
1154 static bool trans_##NAME(DisasContext *dc, arg_typea_bc *arg) \
1155 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, false); } \
1156 static bool trans_##NAME##d(DisasContext *dc, arg_typea_bc *arg) \
1157 { return do_bcc(dc, arg->rb, 0, COND, arg->ra, true); } \
1158 static bool trans_##NAME##i(DisasContext *dc, arg_typeb_bc *arg) \
1159 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, false); } \
1160 static bool trans_##NAME##id(DisasContext *dc, arg_typeb_bc *arg) \
1161 { return do_bcc(dc, -1, arg->imm, COND, arg->ra, true); }
1162
1163DO_BCC(beq, TCG_COND_EQ)
1164DO_BCC(bge, TCG_COND_GE)
1165DO_BCC(bgt, TCG_COND_GT)
1166DO_BCC(ble, TCG_COND_LE)
1167DO_BCC(blt, TCG_COND_LT)
1168DO_BCC(bne, TCG_COND_NE)
1169
1170static bool trans_brk(DisasContext *dc, arg_typea_br *arg)
1171{
1172 if (trap_userspace(dc, true)) {
1173 return true;
1174 }
1175 if (invalid_delay_slot(dc, "brk")) {
1176 return true;
1177 }
1178
1179 tcg_gen_mov_i32(cpu_pc, reg_for_read(dc, arg->rb));
1180 if (arg->rd) {
1181 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1182 }
1183 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP);
1184 tcg_gen_movi_tl(cpu_res_addr, -1);
1185
1186 dc->base.is_jmp = DISAS_EXIT;
1187 return true;
1188}
1189
1190static bool trans_brki(DisasContext *dc, arg_typeb_br *arg)
1191{
1192 uint32_t imm = arg->imm;
1193
1194 if (trap_userspace(dc, imm != 0x8 && imm != 0x18)) {
1195 return true;
1196 }
1197 if (invalid_delay_slot(dc, "brki")) {
1198 return true;
1199 }
1200
1201 tcg_gen_movi_i32(cpu_pc, imm);
1202 if (arg->rd) {
1203 tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next);
1204 }
1205 tcg_gen_movi_tl(cpu_res_addr, -1);
1206
1207#ifdef CONFIG_USER_ONLY
1208 switch (imm) {
1209 case 0x8:
1210 gen_raise_exception_sync(dc, EXCP_SYSCALL);
1211 break;
1212 case 0x18:
1213 gen_raise_exception_sync(dc, EXCP_DEBUG);
1214 break;
1215 default:
1216 g_assert_not_reached();
1217 }
1218#else
1219 uint32_t msr_to_set = 0;
1220
1221 if (imm != 0x18) {
1222 msr_to_set |= MSR_BIP;
1223 }
1224 if (imm == 0x8 || imm == 0x18) {
1225
1226 msr_to_set |= (dc->tb_flags & (MSR_UM | MSR_VM)) << 1;
1227 tcg_gen_andi_i32(cpu_msr, cpu_msr,
1228 ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM));
1229 }
1230 tcg_gen_ori_i32(cpu_msr, cpu_msr, msr_to_set);
1231 dc->base.is_jmp = DISAS_EXIT;
1232#endif
1233
1234 return true;
1235}
1236
1237static bool trans_mbar(DisasContext *dc, arg_mbar *arg)
1238{
1239 int mbar_imm = arg->imm;
1240
1241
1242 if (invalid_delay_slot(dc, "mbar")) {
1243 return true;
1244 }
1245
1246
1247 if ((mbar_imm & 2) == 0) {
1248 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1249 }
1250
1251
1252 if (mbar_imm & 16) {
1253 TCGv_i32 tmp_1;
1254
1255 if (trap_userspace(dc, true)) {
1256
1257 return true;
1258 }
1259
1260 t_sync_flags(dc);
1261
1262 tmp_1 = tcg_const_i32(1);
1263 tcg_gen_st_i32(tmp_1, cpu_env,
1264 -offsetof(MicroBlazeCPU, env)
1265 +offsetof(CPUState, halted));
1266 tcg_temp_free_i32(tmp_1);
1267
1268 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4);
1269
1270 gen_raise_exception(dc, EXCP_HLT);
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 dc->base.is_jmp = DISAS_EXIT_NEXT;
1285 return true;
1286}
1287
1288static bool do_rts(DisasContext *dc, arg_typeb_bc *arg, int to_set)
1289{
1290 if (trap_userspace(dc, to_set)) {
1291 return true;
1292 }
1293 if (invalid_delay_slot(dc, "rts")) {
1294 return true;
1295 }
1296
1297 dc->tb_flags_to_set |= to_set;
1298 setup_dslot(dc, true);
1299
1300 dc->jmp_cond = TCG_COND_ALWAYS;
1301 dc->jmp_dest = -1;
1302 tcg_gen_addi_i32(cpu_btarget, reg_for_read(dc, arg->ra), arg->imm);
1303 return true;
1304}
1305
1306#define DO_RTS(NAME, IFLAG) \
1307 static bool trans_##NAME(DisasContext *dc, arg_typeb_bc *arg) \
1308 { return do_rts(dc, arg, IFLAG); }
1309
1310DO_RTS(rtbd, DRTB_FLAG)
1311DO_RTS(rtid, DRTI_FLAG)
1312DO_RTS(rted, DRTE_FLAG)
1313DO_RTS(rtsd, 0)
1314
1315static bool trans_zero(DisasContext *dc, arg_zero *arg)
1316{
1317
1318 if (dc->cfg->opcode_0_illegal) {
1319 trap_illegal(dc, true);
1320 return true;
1321 }
1322
1323
1324
1325
1326 return false;
1327}
1328
1329static void msr_read(DisasContext *dc, TCGv_i32 d)
1330{
1331 TCGv_i32 t;
1332
1333
1334 t = tcg_temp_new_i32();
1335 tcg_gen_muli_i32(t, cpu_msr_c, MSR_C | MSR_CC);
1336 tcg_gen_or_i32(d, cpu_msr, t);
1337 tcg_temp_free_i32(t);
1338}
1339
1340static bool do_msrclrset(DisasContext *dc, arg_type_msr *arg, bool set)
1341{
1342 uint32_t imm = arg->imm;
1343
1344 if (trap_userspace(dc, imm != MSR_C)) {
1345 return true;
1346 }
1347
1348 if (arg->rd) {
1349 msr_read(dc, cpu_R[arg->rd]);
1350 }
1351
1352
1353
1354
1355
1356 if (imm & MSR_C) {
1357 tcg_gen_movi_i32(cpu_msr_c, set);
1358 }
1359
1360
1361
1362
1363
1364 imm &= ~(MSR_C | MSR_CC | MSR_PVR);
1365
1366 if (imm != 0) {
1367 if (set) {
1368 tcg_gen_ori_i32(cpu_msr, cpu_msr, imm);
1369 } else {
1370 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~imm);
1371 }
1372 dc->base.is_jmp = DISAS_EXIT_NEXT;
1373 }
1374 return true;
1375}
1376
1377static bool trans_msrclr(DisasContext *dc, arg_type_msr *arg)
1378{
1379 return do_msrclrset(dc, arg, false);
1380}
1381
1382static bool trans_msrset(DisasContext *dc, arg_type_msr *arg)
1383{
1384 return do_msrclrset(dc, arg, true);
1385}
1386
1387static bool trans_mts(DisasContext *dc, arg_mts *arg)
1388{
1389 if (trap_userspace(dc, true)) {
1390 return true;
1391 }
1392
1393#ifdef CONFIG_USER_ONLY
1394 g_assert_not_reached();
1395#else
1396 if (arg->e && arg->rs != 0x1003) {
1397 qemu_log_mask(LOG_GUEST_ERROR,
1398 "Invalid extended mts reg 0x%x\n", arg->rs);
1399 return true;
1400 }
1401
1402 TCGv_i32 src = reg_for_read(dc, arg->ra);
1403 switch (arg->rs) {
1404 case SR_MSR:
1405
1406 tcg_gen_extract_i32(cpu_msr_c, src, 2, 1);
1407
1408
1409
1410
1411 tcg_gen_andi_i32(cpu_msr, src, ~(MSR_C | MSR_CC | MSR_PVR));
1412 break;
1413 case SR_FSR:
1414 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, fsr));
1415 break;
1416 case 0x800:
1417 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, slr));
1418 break;
1419 case 0x802:
1420 tcg_gen_st_i32(src, cpu_env, offsetof(CPUMBState, shr));
1421 break;
1422
1423 case 0x1000:
1424 case 0x1001:
1425 case 0x1002:
1426 case 0x1003:
1427 case 0x1004:
1428 case 0x1005:
1429 {
1430 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1431 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1432
1433 gen_helper_mmu_write(cpu_env, tmp_ext, tmp_reg, src);
1434 tcg_temp_free_i32(tmp_reg);
1435 tcg_temp_free_i32(tmp_ext);
1436 }
1437 break;
1438
1439 default:
1440 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mts reg 0x%x\n", arg->rs);
1441 return true;
1442 }
1443 dc->base.is_jmp = DISAS_EXIT_NEXT;
1444 return true;
1445#endif
1446}
1447
1448static bool trans_mfs(DisasContext *dc, arg_mfs *arg)
1449{
1450 TCGv_i32 dest = reg_for_write(dc, arg->rd);
1451
1452 if (arg->e) {
1453 switch (arg->rs) {
1454 case SR_EAR:
1455 {
1456 TCGv_i64 t64 = tcg_temp_new_i64();
1457 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1458 tcg_gen_extrh_i64_i32(dest, t64);
1459 tcg_temp_free_i64(t64);
1460 }
1461 return true;
1462#ifndef CONFIG_USER_ONLY
1463 case 0x1003:
1464
1465 break;
1466#endif
1467 case 0x2006 ... 0x2009:
1468
1469 tcg_gen_movi_i32(dest, 0);
1470 return true;
1471 default:
1472 qemu_log_mask(LOG_GUEST_ERROR,
1473 "Invalid extended mfs reg 0x%x\n", arg->rs);
1474 return true;
1475 }
1476 }
1477
1478 switch (arg->rs) {
1479 case SR_PC:
1480 tcg_gen_movi_i32(dest, dc->base.pc_next);
1481 break;
1482 case SR_MSR:
1483 msr_read(dc, dest);
1484 break;
1485 case SR_EAR:
1486 {
1487 TCGv_i64 t64 = tcg_temp_new_i64();
1488 tcg_gen_ld_i64(t64, cpu_env, offsetof(CPUMBState, ear));
1489 tcg_gen_extrl_i64_i32(dest, t64);
1490 tcg_temp_free_i64(t64);
1491 }
1492 break;
1493 case SR_ESR:
1494 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, esr));
1495 break;
1496 case SR_FSR:
1497 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, fsr));
1498 break;
1499 case SR_BTR:
1500 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, btr));
1501 break;
1502 case SR_EDR:
1503 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, edr));
1504 break;
1505 case 0x800:
1506 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, slr));
1507 break;
1508 case 0x802:
1509 tcg_gen_ld_i32(dest, cpu_env, offsetof(CPUMBState, shr));
1510 break;
1511
1512#ifndef CONFIG_USER_ONLY
1513 case 0x1000:
1514 case 0x1001:
1515 case 0x1002:
1516 case 0x1003:
1517 case 0x1004:
1518 case 0x1005:
1519 {
1520 TCGv_i32 tmp_ext = tcg_const_i32(arg->e);
1521 TCGv_i32 tmp_reg = tcg_const_i32(arg->rs & 7);
1522
1523 gen_helper_mmu_read(dest, cpu_env, tmp_ext, tmp_reg);
1524 tcg_temp_free_i32(tmp_reg);
1525 tcg_temp_free_i32(tmp_ext);
1526 }
1527 break;
1528#endif
1529
1530 case 0x2000 ... 0x200c:
1531 tcg_gen_ld_i32(dest, cpu_env,
1532 offsetof(MicroBlazeCPU, cfg.pvr_regs[arg->rs - 0x2000])
1533 - offsetof(MicroBlazeCPU, env));
1534 break;
1535 default:
1536 qemu_log_mask(LOG_GUEST_ERROR, "Invalid mfs reg 0x%x\n", arg->rs);
1537 break;
1538 }
1539 return true;
1540}
1541
1542static void do_rti(DisasContext *dc)
1543{
1544 TCGv_i32 tmp = tcg_temp_new_i32();
1545
1546 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1547 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_IE);
1548 tcg_gen_andi_i32(tmp, tmp, MSR_VM | MSR_UM);
1549 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM));
1550 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1551
1552 tcg_temp_free_i32(tmp);
1553}
1554
1555static void do_rtb(DisasContext *dc)
1556{
1557 TCGv_i32 tmp = tcg_temp_new_i32();
1558
1559 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1560 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_BIP));
1561 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1562 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1563
1564 tcg_temp_free_i32(tmp);
1565}
1566
1567static void do_rte(DisasContext *dc)
1568{
1569 TCGv_i32 tmp = tcg_temp_new_i32();
1570
1571 tcg_gen_shri_i32(tmp, cpu_msr, 1);
1572 tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_EE);
1573 tcg_gen_andi_i32(tmp, tmp, (MSR_VM | MSR_UM));
1574 tcg_gen_andi_i32(cpu_msr, cpu_msr, ~(MSR_VM | MSR_UM | MSR_EIP));
1575 tcg_gen_or_i32(cpu_msr, cpu_msr, tmp);
1576
1577 tcg_temp_free_i32(tmp);
1578}
1579
1580
1581static bool do_get(DisasContext *dc, int rd, int rb, int imm, int ctrl)
1582{
1583 TCGv_i32 t_id, t_ctrl;
1584
1585 if (trap_userspace(dc, true)) {
1586 return true;
1587 }
1588
1589 t_id = tcg_temp_new_i32();
1590 if (rb) {
1591 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1592 } else {
1593 tcg_gen_movi_i32(t_id, imm);
1594 }
1595
1596 t_ctrl = tcg_const_i32(ctrl);
1597 gen_helper_get(reg_for_write(dc, rd), t_id, t_ctrl);
1598 tcg_temp_free_i32(t_id);
1599 tcg_temp_free_i32(t_ctrl);
1600 return true;
1601}
1602
1603static bool trans_get(DisasContext *dc, arg_get *arg)
1604{
1605 return do_get(dc, arg->rd, 0, arg->imm, arg->ctrl);
1606}
1607
1608static bool trans_getd(DisasContext *dc, arg_getd *arg)
1609{
1610 return do_get(dc, arg->rd, arg->rb, 0, arg->ctrl);
1611}
1612
1613static bool do_put(DisasContext *dc, int ra, int rb, int imm, int ctrl)
1614{
1615 TCGv_i32 t_id, t_ctrl;
1616
1617 if (trap_userspace(dc, true)) {
1618 return true;
1619 }
1620
1621 t_id = tcg_temp_new_i32();
1622 if (rb) {
1623 tcg_gen_andi_i32(t_id, cpu_R[rb], 0xf);
1624 } else {
1625 tcg_gen_movi_i32(t_id, imm);
1626 }
1627
1628 t_ctrl = tcg_const_i32(ctrl);
1629 gen_helper_put(t_id, t_ctrl, reg_for_read(dc, ra));
1630 tcg_temp_free_i32(t_id);
1631 tcg_temp_free_i32(t_ctrl);
1632 return true;
1633}
1634
1635static bool trans_put(DisasContext *dc, arg_put *arg)
1636{
1637 return do_put(dc, arg->ra, 0, arg->imm, arg->ctrl);
1638}
1639
1640static bool trans_putd(DisasContext *dc, arg_putd *arg)
1641{
1642 return do_put(dc, arg->ra, arg->rb, 0, arg->ctrl);
1643}
1644
1645static void mb_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs)
1646{
1647 DisasContext *dc = container_of(dcb, DisasContext, base);
1648 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1649 int bound;
1650
1651 dc->cfg = &cpu->cfg;
1652 dc->tb_flags = dc->base.tb->flags;
1653 dc->ext_imm = dc->base.tb->cs_base;
1654 dc->r0 = NULL;
1655 dc->r0_set = false;
1656 dc->mem_index = cpu_mmu_index(&cpu->env, false);
1657 dc->jmp_cond = dc->tb_flags & D_FLAG ? TCG_COND_ALWAYS : TCG_COND_NEVER;
1658 dc->jmp_dest = -1;
1659
1660 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
1661 dc->base.max_insns = MIN(dc->base.max_insns, bound);
1662}
1663
1664static void mb_tr_tb_start(DisasContextBase *dcb, CPUState *cs)
1665{
1666}
1667
1668static void mb_tr_insn_start(DisasContextBase *dcb, CPUState *cs)
1669{
1670 DisasContext *dc = container_of(dcb, DisasContext, base);
1671
1672 tcg_gen_insn_start(dc->base.pc_next, dc->tb_flags & ~MSR_TB_MASK);
1673 dc->insn_start = tcg_last_op();
1674}
1675
1676static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
1677{
1678 DisasContext *dc = container_of(dcb, DisasContext, base);
1679 CPUMBState *env = cs->env_ptr;
1680 uint32_t ir;
1681
1682
1683 if (dc->base.pc_next & 3) {
1684 cpu_abort(cs, "Microblaze: unaligned PC=%x\n",
1685 (uint32_t)dc->base.pc_next);
1686 }
1687
1688 dc->tb_flags_to_set = 0;
1689
1690 ir = cpu_ldl_code(env, dc->base.pc_next);
1691 if (!decode(dc, ir)) {
1692 trap_illegal(dc, true);
1693 }
1694
1695 if (dc->r0) {
1696 tcg_temp_free_i32(dc->r0);
1697 dc->r0 = NULL;
1698 dc->r0_set = false;
1699 }
1700
1701
1702 if ((dc->tb_flags & ~dc->tb_flags_to_set) & IMM_FLAG) {
1703 tcg_gen_discard_i32(cpu_imm);
1704 }
1705
1706 dc->tb_flags &= ~(IMM_FLAG | BIMM_FLAG | D_FLAG);
1707 dc->tb_flags |= dc->tb_flags_to_set;
1708 dc->base.pc_next += 4;
1709
1710 if (dc->jmp_cond != TCG_COND_NEVER && !(dc->tb_flags & D_FLAG)) {
1711
1712
1713
1714 uint32_t rt_ibe = dc->tb_flags & (DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1715 if (unlikely(rt_ibe != 0)) {
1716 dc->tb_flags &= ~(DRTI_FLAG | DRTB_FLAG | DRTE_FLAG);
1717 if (rt_ibe & DRTI_FLAG) {
1718 do_rti(dc);
1719 } else if (rt_ibe & DRTB_FLAG) {
1720 do_rtb(dc);
1721 } else {
1722 do_rte(dc);
1723 }
1724 }
1725
1726
1727 switch (dc->base.is_jmp) {
1728 case DISAS_NORETURN:
1729
1730
1731
1732
1733 break;
1734 case DISAS_NEXT:
1735
1736
1737
1738
1739
1740 dc->base.is_jmp = (rt_ibe ? DISAS_EXIT_JUMP : DISAS_JUMP);
1741 break;
1742 case DISAS_EXIT_NEXT:
1743
1744
1745
1746
1747 dc->base.is_jmp = DISAS_EXIT_JUMP;
1748 break;
1749 default:
1750 g_assert_not_reached();
1751 }
1752 }
1753}
1754
1755static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
1756{
1757 DisasContext *dc = container_of(dcb, DisasContext, base);
1758
1759 if (dc->base.is_jmp == DISAS_NORETURN) {
1760
1761 return;
1762 }
1763
1764 t_sync_flags(dc);
1765
1766 switch (dc->base.is_jmp) {
1767 case DISAS_TOO_MANY:
1768 gen_goto_tb(dc, 0, dc->base.pc_next);
1769 return;
1770
1771 case DISAS_EXIT:
1772 break;
1773 case DISAS_EXIT_NEXT:
1774 tcg_gen_movi_i32(cpu_pc, dc->base.pc_next);
1775 break;
1776 case DISAS_EXIT_JUMP:
1777 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1778 tcg_gen_discard_i32(cpu_btarget);
1779 break;
1780
1781 case DISAS_JUMP:
1782 if (dc->jmp_dest != -1 && !cs->singlestep_enabled) {
1783
1784 tcg_gen_discard_i32(cpu_btarget);
1785
1786 if (dc->jmp_cond != TCG_COND_ALWAYS) {
1787
1788 TCGLabel *taken = gen_new_label();
1789 TCGv_i32 tmp = tcg_temp_new_i32();
1790
1791
1792
1793
1794
1795
1796 tcg_gen_mov_i32(tmp, cpu_bvalue);
1797 tcg_gen_discard_i32(cpu_bvalue);
1798
1799 tcg_gen_brcondi_i32(dc->jmp_cond, tmp, 0, taken);
1800 gen_goto_tb(dc, 1, dc->base.pc_next);
1801 gen_set_label(taken);
1802 }
1803 gen_goto_tb(dc, 0, dc->jmp_dest);
1804 return;
1805 }
1806
1807
1808 tcg_gen_mov_i32(cpu_pc, cpu_btarget);
1809 tcg_gen_discard_i32(cpu_btarget);
1810
1811 if (unlikely(cs->singlestep_enabled)) {
1812 gen_raise_exception(dc, EXCP_DEBUG);
1813 } else {
1814 tcg_gen_lookup_and_goto_ptr();
1815 }
1816 return;
1817
1818 default:
1819 g_assert_not_reached();
1820 }
1821
1822
1823 if (unlikely(cs->singlestep_enabled)) {
1824 gen_raise_exception(dc, EXCP_DEBUG);
1825 } else {
1826 tcg_gen_exit_tb(NULL, 0);
1827 }
1828}
1829
1830static void mb_tr_disas_log(const DisasContextBase *dcb, CPUState *cs)
1831{
1832 qemu_log("IN: %s\n", lookup_symbol(dcb->pc_first));
1833 log_target_disas(cs, dcb->pc_first, dcb->tb->size);
1834}
1835
1836static const TranslatorOps mb_tr_ops = {
1837 .init_disas_context = mb_tr_init_disas_context,
1838 .tb_start = mb_tr_tb_start,
1839 .insn_start = mb_tr_insn_start,
1840 .translate_insn = mb_tr_translate_insn,
1841 .tb_stop = mb_tr_tb_stop,
1842 .disas_log = mb_tr_disas_log,
1843};
1844
1845void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1846{
1847 DisasContext dc;
1848 translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1849}
1850
1851void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1852{
1853 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1854 CPUMBState *env = &cpu->env;
1855 uint32_t iflags;
1856 int i;
1857
1858 qemu_fprintf(f, "pc=0x%08x msr=0x%05x mode=%s(saved=%s) eip=%d ie=%d\n",
1859 env->pc, env->msr,
1860 (env->msr & MSR_UM) ? "user" : "kernel",
1861 (env->msr & MSR_UMS) ? "user" : "kernel",
1862 (bool)(env->msr & MSR_EIP),
1863 (bool)(env->msr & MSR_IE));
1864
1865 iflags = env->iflags;
1866 qemu_fprintf(f, "iflags: 0x%08x", iflags);
1867 if (iflags & IMM_FLAG) {
1868 qemu_fprintf(f, " IMM(0x%08x)", env->imm);
1869 }
1870 if (iflags & BIMM_FLAG) {
1871 qemu_fprintf(f, " BIMM");
1872 }
1873 if (iflags & D_FLAG) {
1874 qemu_fprintf(f, " D(btarget=0x%08x)", env->btarget);
1875 }
1876 if (iflags & DRTI_FLAG) {
1877 qemu_fprintf(f, " DRTI");
1878 }
1879 if (iflags & DRTE_FLAG) {
1880 qemu_fprintf(f, " DRTE");
1881 }
1882 if (iflags & DRTB_FLAG) {
1883 qemu_fprintf(f, " DRTB");
1884 }
1885 if (iflags & ESR_ESS_FLAG) {
1886 qemu_fprintf(f, " ESR_ESS(0x%04x)", iflags & ESR_ESS_MASK);
1887 }
1888
1889 qemu_fprintf(f, "\nesr=0x%04x fsr=0x%02x btr=0x%08x edr=0x%x\n"
1890 "ear=0x" TARGET_FMT_lx " slr=0x%x shr=0x%x\n",
1891 env->esr, env->fsr, env->btr, env->edr,
1892 env->ear, env->slr, env->shr);
1893
1894 for (i = 0; i < 32; i++) {
1895 qemu_fprintf(f, "r%2.2d=%08x%c",
1896 i, env->regs[i], i % 4 == 3 ? '\n' : ' ');
1897 }
1898 qemu_fprintf(f, "\n");
1899}
1900
1901void mb_tcg_init(void)
1902{
1903#define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1904#define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1905
1906 static const struct {
1907 TCGv_i32 *var; int ofs; char name[8];
1908 } i32s[] = {
1909
1910
1911
1912
1913
1914
1915 R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1916 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1917 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1918 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1919
1920 SP(pc),
1921 SP(msr),
1922 SP(msr_c),
1923 SP(imm),
1924 SP(iflags),
1925 SP(bvalue),
1926 SP(btarget),
1927 SP(res_val),
1928 };
1929
1930#undef R
1931#undef SP
1932
1933 for (int i = 0; i < ARRAY_SIZE(i32s); ++i) {
1934 *i32s[i].var =
1935 tcg_global_mem_new_i32(cpu_env, i32s[i].ofs, i32s[i].name);
1936 }
1937
1938 cpu_res_addr =
1939 tcg_global_mem_new(cpu_env, offsetof(CPUMBState, res_addr), "res_addr");
1940}
1941
1942void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1943 target_ulong *data)
1944{
1945 env->pc = data[0];
1946 env->iflags = data[1];
1947}
1948