1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "tcg-pool.inc.c"
26
27#ifdef CONFIG_DEBUG_TCG
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
62#endif
63
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
70
71
72
73
74
75
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
82
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
86#ifndef CONFIG_SOFTMMU
87# define TCG_GUEST_BASE_REG TCG_REG_I5
88#endif
89
90#define TCG_REG_TB TCG_REG_I1
91#define USE_REG_TB (sizeof(void *) > 4)
92
93static const int tcg_target_reg_alloc_order[] = {
94 TCG_REG_L0,
95 TCG_REG_L1,
96 TCG_REG_L2,
97 TCG_REG_L3,
98 TCG_REG_L4,
99 TCG_REG_L5,
100 TCG_REG_L6,
101 TCG_REG_L7,
102
103 TCG_REG_I0,
104 TCG_REG_I1,
105 TCG_REG_I2,
106 TCG_REG_I3,
107 TCG_REG_I4,
108 TCG_REG_I5,
109
110 TCG_REG_G2,
111 TCG_REG_G3,
112 TCG_REG_G4,
113 TCG_REG_G5,
114
115 TCG_REG_O0,
116 TCG_REG_O1,
117 TCG_REG_O2,
118 TCG_REG_O3,
119 TCG_REG_O4,
120 TCG_REG_O5,
121};
122
123static const int tcg_target_call_iarg_regs[6] = {
124 TCG_REG_O0,
125 TCG_REG_O1,
126 TCG_REG_O2,
127 TCG_REG_O3,
128 TCG_REG_O4,
129 TCG_REG_O5,
130};
131
132static const int tcg_target_call_oarg_regs[] = {
133 TCG_REG_O0,
134 TCG_REG_O1,
135 TCG_REG_O2,
136 TCG_REG_O3,
137};
138
139#define INSN_OP(x) ((x) << 30)
140#define INSN_OP2(x) ((x) << 22)
141#define INSN_OP3(x) ((x) << 19)
142#define INSN_OPF(x) ((x) << 5)
143#define INSN_RD(x) ((x) << 25)
144#define INSN_RS1(x) ((x) << 14)
145#define INSN_RS2(x) (x)
146#define INSN_ASI(x) ((x) << 5)
147
148#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
149#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
150#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
151#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
152#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
153#define INSN_COND(x) ((x) << 25)
154
155#define COND_N 0x0
156#define COND_E 0x1
157#define COND_LE 0x2
158#define COND_L 0x3
159#define COND_LEU 0x4
160#define COND_CS 0x5
161#define COND_NEG 0x6
162#define COND_VS 0x7
163#define COND_A 0x8
164#define COND_NE 0x9
165#define COND_G 0xa
166#define COND_GE 0xb
167#define COND_GU 0xc
168#define COND_CC 0xd
169#define COND_POS 0xe
170#define COND_VC 0xf
171#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
172
173#define RCOND_Z 1
174#define RCOND_LEZ 2
175#define RCOND_LZ 3
176#define RCOND_NZ 5
177#define RCOND_GZ 6
178#define RCOND_GEZ 7
179
180#define MOVCC_ICC (1 << 18)
181#define MOVCC_XCC (1 << 18 | 1 << 12)
182
183#define BPCC_ICC 0
184#define BPCC_XCC (2 << 20)
185#define BPCC_PT (1 << 19)
186#define BPCC_PN 0
187#define BPCC_A (1 << 29)
188
189#define BPR_PT BPCC_PT
190
191#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
192#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
193#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
198#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
199#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
203#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
204#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
205#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
210#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
211#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
212
213#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
214#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
215
216#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
217#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
218#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
219
220#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
223
224#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
225#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
226#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
227#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
228#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
229#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
230#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
231#define CALL INSN_OP(1)
232#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
233#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
234#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
235#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
236#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
237#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
238#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
239#define STB (INSN_OP(3) | INSN_OP3(0x05))
240#define STH (INSN_OP(3) | INSN_OP3(0x06))
241#define STW (INSN_OP(3) | INSN_OP3(0x04))
242#define STX (INSN_OP(3) | INSN_OP3(0x0e))
243#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
244#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
245#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
246#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
247#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
248#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
249#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
250#define STBA (INSN_OP(3) | INSN_OP3(0x15))
251#define STHA (INSN_OP(3) | INSN_OP3(0x16))
252#define STWA (INSN_OP(3) | INSN_OP3(0x14))
253#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
254
255#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
256
257#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
258
259#ifndef ASI_PRIMARY_LITTLE
260#define ASI_PRIMARY_LITTLE 0x88
261#endif
262
263#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
268
269#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#ifndef use_vis3_instructions
274bool use_vis3_instructions;
275#endif
276
277static inline int check_fit_i64(int64_t val, unsigned int bits)
278{
279 return val == sextract64(val, 0, bits);
280}
281
282static inline int check_fit_i32(int32_t val, unsigned int bits)
283{
284 return val == sextract32(val, 0, bits);
285}
286
287#define check_fit_tl check_fit_i64
288#if SPARC64
289# define check_fit_ptr check_fit_i64
290#else
291# define check_fit_ptr check_fit_i32
292#endif
293
294static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
295 intptr_t value, intptr_t addend)
296{
297 uint32_t insn = *code_ptr;
298 intptr_t pcrel;
299
300 value += addend;
301 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
302
303 switch (type) {
304 case R_SPARC_WDISP16:
305 assert(check_fit_ptr(pcrel >> 2, 16));
306 insn &= ~INSN_OFF16(-1);
307 insn |= INSN_OFF16(pcrel);
308 break;
309 case R_SPARC_WDISP19:
310 assert(check_fit_ptr(pcrel >> 2, 19));
311 insn &= ~INSN_OFF19(-1);
312 insn |= INSN_OFF19(pcrel);
313 break;
314 default:
315 g_assert_not_reached();
316 }
317
318 *code_ptr = insn;
319 return true;
320}
321
322
323static const char *target_parse_constraint(TCGArgConstraint *ct,
324 const char *ct_str, TCGType type)
325{
326 switch (*ct_str++) {
327 case 'r':
328 ct->ct |= TCG_CT_REG;
329 ct->u.regs = 0xffffffff;
330 break;
331 case 'R':
332 ct->ct |= TCG_CT_REG;
333 ct->u.regs = ALL_64;
334 break;
335 case 'A':
336 ct->ct |= TCG_CT_REG;
337 ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
338 reserve_helpers:
339 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
340 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
342 break;
343 case 's':
344 ct->ct |= TCG_CT_REG;
345 ct->u.regs = 0xffffffff;
346 goto reserve_helpers;
347 case 'S':
348 ct->ct |= TCG_CT_REG;
349 ct->u.regs = ALL_64;
350 goto reserve_helpers;
351 case 'I':
352 ct->ct |= TCG_CT_CONST_S11;
353 break;
354 case 'J':
355 ct->ct |= TCG_CT_CONST_S13;
356 break;
357 case 'Z':
358 ct->ct |= TCG_CT_CONST_ZERO;
359 break;
360 default:
361 return NULL;
362 }
363 return ct_str;
364}
365
366
367static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
368 const TCGArgConstraint *arg_ct)
369{
370 int ct = arg_ct->ct;
371
372 if (ct & TCG_CT_CONST) {
373 return 1;
374 }
375
376 if (type == TCG_TYPE_I32) {
377 val = (int32_t)val;
378 }
379
380 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
381 return 1;
382 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
383 return 1;
384 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
385 return 1;
386 } else {
387 return 0;
388 }
389}
390
391static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
392 TCGReg rs2, int op)
393{
394 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
395}
396
397static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
398 int32_t offset, int op)
399{
400 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
401}
402
403static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
404 int32_t val2, int val2const, int op)
405{
406 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
407 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
408}
409
410static inline void tcg_out_mov(TCGContext *s, TCGType type,
411 TCGReg ret, TCGReg arg)
412{
413 if (ret != arg) {
414 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
415 }
416}
417
418static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
419{
420 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
421}
422
423static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
424{
425 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
426}
427
428static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
429 tcg_target_long arg, bool in_prologue)
430{
431 tcg_target_long hi, lo = (int32_t)arg;
432 tcg_target_long test, lsb;
433
434
435 if (type == TCG_TYPE_I32) {
436 arg = lo;
437 }
438
439
440 if (check_fit_tl(arg, 13)) {
441 tcg_out_movi_imm13(s, ret, arg);
442 return;
443 }
444
445
446 if (!in_prologue && USE_REG_TB) {
447 test = arg - (uintptr_t)s->code_gen_ptr;
448 if (check_fit_ptr(test, 13)) {
449 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
450 return;
451 }
452 }
453
454
455 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
456 tcg_out_sethi(s, ret, arg);
457 if (arg & 0x3ff) {
458 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
459 }
460 return;
461 }
462
463
464 if (arg == lo) {
465 tcg_out_sethi(s, ret, ~arg);
466 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
467 return;
468 }
469
470
471 lsb = ctz64(arg);
472 test = (tcg_target_long)arg >> lsb;
473 if (check_fit_tl(test, 13)) {
474 tcg_out_movi_imm13(s, ret, test);
475 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
476 return;
477 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
478 tcg_out_sethi(s, ret, test << 10);
479 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
480 return;
481 }
482
483
484 if (check_fit_i32(lo, 13)) {
485 hi = (arg - lo) >> 32;
486 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
488 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
489 } else {
490 hi = arg >> 32;
491 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
492 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
493 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
494 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
495 }
496}
497
498static inline void tcg_out_movi(TCGContext *s, TCGType type,
499 TCGReg ret, tcg_target_long arg)
500{
501 tcg_out_movi_int(s, type, ret, arg, false);
502}
503
504static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
505 TCGReg a2, int op)
506{
507 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
508}
509
510static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
511 intptr_t offset, int op)
512{
513 if (check_fit_ptr(offset, 13)) {
514 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
515 INSN_IMM13(offset));
516 } else {
517 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
518 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
519 }
520}
521
522static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
523 TCGReg arg1, intptr_t arg2)
524{
525 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
526}
527
528static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
529 TCGReg arg1, intptr_t arg2)
530{
531 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
532}
533
534static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
535 TCGReg base, intptr_t ofs)
536{
537 if (val == 0) {
538 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
539 return true;
540 }
541 return false;
542}
543
544static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
545{
546 intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
547 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
548 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
549 return;
550 }
551 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
552 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
553}
554
555static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
556{
557 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
558}
559
560static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
561{
562 tcg_out32(s, RDY | INSN_RD(rd));
563}
564
565static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
566 int32_t val2, int val2const, int uns)
567{
568
569 if (uns) {
570 tcg_out_sety(s, TCG_REG_G0);
571 } else {
572 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
573 tcg_out_sety(s, TCG_REG_T1);
574 }
575
576 tcg_out_arithc(s, rd, rs1, val2, val2const,
577 uns ? ARITH_UDIV : ARITH_SDIV);
578}
579
580static inline void tcg_out_nop(TCGContext *s)
581{
582 tcg_out32(s, NOP);
583}
584
585static const uint8_t tcg_cond_to_bcond[] = {
586 [TCG_COND_EQ] = COND_E,
587 [TCG_COND_NE] = COND_NE,
588 [TCG_COND_LT] = COND_L,
589 [TCG_COND_GE] = COND_GE,
590 [TCG_COND_LE] = COND_LE,
591 [TCG_COND_GT] = COND_G,
592 [TCG_COND_LTU] = COND_CS,
593 [TCG_COND_GEU] = COND_CC,
594 [TCG_COND_LEU] = COND_LEU,
595 [TCG_COND_GTU] = COND_GU,
596};
597
598static const uint8_t tcg_cond_to_rcond[] = {
599 [TCG_COND_EQ] = RCOND_Z,
600 [TCG_COND_NE] = RCOND_NZ,
601 [TCG_COND_LT] = RCOND_LZ,
602 [TCG_COND_GT] = RCOND_GZ,
603 [TCG_COND_LE] = RCOND_LEZ,
604 [TCG_COND_GE] = RCOND_GEZ
605};
606
607static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
608{
609 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
610}
611
612static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
613{
614 int off19 = 0;
615
616 if (l->has_value) {
617 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
618 } else {
619 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
620 }
621 tcg_out_bpcc0(s, scond, flags, off19);
622}
623
624static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
625{
626 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
627}
628
629static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
630 int32_t arg2, int const_arg2, TCGLabel *l)
631{
632 tcg_out_cmp(s, arg1, arg2, const_arg2);
633 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
634 tcg_out_nop(s);
635}
636
637static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
638 int32_t v1, int v1const)
639{
640 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
641 | INSN_RS1(tcg_cond_to_bcond[cond])
642 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
643}
644
645static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
646 TCGReg c1, int32_t c2, int c2const,
647 int32_t v1, int v1const)
648{
649 tcg_out_cmp(s, c1, c2, c2const);
650 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
651}
652
653static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
654 int32_t arg2, int const_arg2, TCGLabel *l)
655{
656
657 if (arg2 == 0 && !is_unsigned_cond(cond)) {
658 int off16 = 0;
659
660 if (l->has_value) {
661 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
662 } else {
663 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
664 }
665 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
666 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
667 } else {
668 tcg_out_cmp(s, arg1, arg2, const_arg2);
669 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
670 }
671 tcg_out_nop(s);
672}
673
674static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
675 int32_t v1, int v1const)
676{
677 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
678 | (tcg_cond_to_rcond[cond] << 10)
679 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
680}
681
682static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
683 TCGReg c1, int32_t c2, int c2const,
684 int32_t v1, int v1const)
685{
686
687
688
689 if (c2 == 0 && !is_unsigned_cond(cond)
690 && (!v1const || check_fit_i32(v1, 10))) {
691 tcg_out_movr(s, cond, ret, c1, v1, v1const);
692 } else {
693 tcg_out_cmp(s, c1, c2, c2const);
694 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
695 }
696}
697
698static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
699 TCGReg c1, int32_t c2, int c2const)
700{
701
702 switch (cond) {
703 case TCG_COND_LTU:
704 case TCG_COND_GEU:
705
706 break;
707
708 case TCG_COND_EQ:
709 case TCG_COND_NE:
710
711 if (c2 != 0) {
712 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
713 c2 = TCG_REG_T1;
714 } else {
715 c2 = c1;
716 }
717 c1 = TCG_REG_G0, c2const = 0;
718 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
719 break;
720
721 case TCG_COND_GTU:
722 case TCG_COND_LEU:
723
724
725
726 if (!c2const || c2 == 0) {
727 TCGReg t = c1;
728 c1 = c2;
729 c2 = t;
730 c2const = 0;
731 cond = tcg_swap_cond(cond);
732 break;
733 }
734
735
736 default:
737 tcg_out_cmp(s, c1, c2, c2const);
738 tcg_out_movi_imm13(s, ret, 0);
739 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
740 return;
741 }
742
743 tcg_out_cmp(s, c1, c2, c2const);
744 if (cond == TCG_COND_LTU) {
745 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
746 } else {
747 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
748 }
749}
750
751static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
752 TCGReg c1, int32_t c2, int c2const)
753{
754 if (use_vis3_instructions) {
755 switch (cond) {
756 case TCG_COND_NE:
757 if (c2 != 0) {
758 break;
759 }
760 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
761
762 case TCG_COND_LTU:
763 tcg_out_cmp(s, c1, c2, c2const);
764 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
765 return;
766 default:
767 break;
768 }
769 }
770
771
772
773 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
774 tcg_out_movi_imm13(s, ret, 0);
775 tcg_out_movr(s, cond, ret, c1, 1, 1);
776 } else {
777 tcg_out_cmp(s, c1, c2, c2const);
778 tcg_out_movi_imm13(s, ret, 0);
779 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
780 }
781}
782
783static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
784 TCGReg al, TCGReg ah, int32_t bl, int blconst,
785 int32_t bh, int bhconst, int opl, int oph)
786{
787 TCGReg tmp = TCG_REG_T1;
788
789
790 if (rl != ah && (bhconst || rl != bh)) {
791 tmp = rl;
792 }
793
794 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
795 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
796 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
797}
798
799static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
800 TCGReg al, TCGReg ah, int32_t bl, int blconst,
801 int32_t bh, int bhconst, bool is_sub)
802{
803 TCGReg tmp = TCG_REG_T1;
804
805
806 if (rl != ah && (bhconst || rl != bh)) {
807 tmp = rl;
808 }
809
810 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
811
812 if (use_vis3_instructions && !is_sub) {
813
814 if (bhconst && bh != 0) {
815 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
816 bh = TCG_REG_T2;
817 }
818 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
819 } else if (bh == TCG_REG_G0) {
820
821
822 if (rh == ah) {
823 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
824 is_sub ? ARITH_SUB : ARITH_ADD);
825 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
826 } else {
827 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
828 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
829 }
830 } else {
831
832 if (bhconst) {
833 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
834 } else {
835 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
836 is_sub ? ARITH_SUB : ARITH_ADD);
837 }
838
839 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
840
841 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
842 }
843
844 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
845}
846
847static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
848 bool in_prologue)
849{
850 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
851
852 if (disp == (int32_t)disp) {
853 tcg_out32(s, CALL | (uint32_t)disp >> 2);
854 } else {
855 uintptr_t desti = (uintptr_t)dest;
856 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
857 desti & ~0xfff, in_prologue);
858 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
859 }
860}
861
862static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
863{
864 tcg_out_call_nodelay(s, dest, false);
865 tcg_out_nop(s);
866}
867
868static void tcg_out_mb(TCGContext *s, TCGArg a0)
869{
870
871 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
872}
873
874#ifdef CONFIG_SOFTMMU
875static tcg_insn_unit *qemu_ld_trampoline[16];
876static tcg_insn_unit *qemu_st_trampoline[16];
877
878static void emit_extend(TCGContext *s, TCGReg r, int op)
879{
880
881
882
883 switch (op & MO_SIZE) {
884 case MO_8:
885 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
886 break;
887 case MO_16:
888 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
889 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
890 break;
891 case MO_32:
892 if (SPARC64) {
893 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
894 }
895 break;
896 case MO_64:
897 break;
898 }
899}
900
901static void build_trampolines(TCGContext *s)
902{
903 static void * const qemu_ld_helpers[16] = {
904 [MO_UB] = helper_ret_ldub_mmu,
905 [MO_SB] = helper_ret_ldsb_mmu,
906 [MO_LEUW] = helper_le_lduw_mmu,
907 [MO_LESW] = helper_le_ldsw_mmu,
908 [MO_LEUL] = helper_le_ldul_mmu,
909 [MO_LEQ] = helper_le_ldq_mmu,
910 [MO_BEUW] = helper_be_lduw_mmu,
911 [MO_BESW] = helper_be_ldsw_mmu,
912 [MO_BEUL] = helper_be_ldul_mmu,
913 [MO_BEQ] = helper_be_ldq_mmu,
914 };
915 static void * const qemu_st_helpers[16] = {
916 [MO_UB] = helper_ret_stb_mmu,
917 [MO_LEUW] = helper_le_stw_mmu,
918 [MO_LEUL] = helper_le_stl_mmu,
919 [MO_LEQ] = helper_le_stq_mmu,
920 [MO_BEUW] = helper_be_stw_mmu,
921 [MO_BEUL] = helper_be_stl_mmu,
922 [MO_BEQ] = helper_be_stq_mmu,
923 };
924
925 int i;
926 TCGReg ra;
927
928 for (i = 0; i < 16; ++i) {
929 if (qemu_ld_helpers[i] == NULL) {
930 continue;
931 }
932
933
934 while ((uintptr_t)s->code_ptr & 15) {
935 tcg_out_nop(s);
936 }
937 qemu_ld_trampoline[i] = s->code_ptr;
938
939 if (SPARC64 || TARGET_LONG_BITS == 32) {
940 ra = TCG_REG_O3;
941 } else {
942
943 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
944 ra = TCG_REG_O4;
945 }
946
947
948 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
949
950 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
951
952 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
953 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
954 }
955
956 for (i = 0; i < 16; ++i) {
957 if (qemu_st_helpers[i] == NULL) {
958 continue;
959 }
960
961
962 while ((uintptr_t)s->code_ptr & 15) {
963 tcg_out_nop(s);
964 }
965 qemu_st_trampoline[i] = s->code_ptr;
966
967 if (SPARC64) {
968 emit_extend(s, TCG_REG_O2, i);
969 ra = TCG_REG_O4;
970 } else {
971 ra = TCG_REG_O1;
972 if (TARGET_LONG_BITS == 64) {
973
974 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
975 ra += 2;
976 } else {
977 ra += 1;
978 }
979 if ((i & MO_SIZE) == MO_64) {
980
981 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
982 ra += 2;
983 } else {
984 emit_extend(s, ra, i);
985 ra += 1;
986 }
987
988 ra += 1;
989 }
990
991
992 if (ra >= TCG_REG_O6) {
993 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
994 TCG_TARGET_CALL_STACK_OFFSET);
995 ra = TCG_REG_G1;
996 }
997 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
998
999 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
1000
1001 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
1002 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1003 }
1004}
1005#endif
1006
1007
1008static void tcg_target_qemu_prologue(TCGContext *s)
1009{
1010 int tmp_buf_size, frame_size;
1011
1012
1013
1014 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1015 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
1016 tmp_buf_size);
1017
1018
1019
1020 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1021 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1022 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1023 frame_size &= -TCG_TARGET_STACK_ALIGN;
1024 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1025 INSN_IMM13(-frame_size));
1026
1027#ifndef CONFIG_SOFTMMU
1028 if (guest_base != 0) {
1029 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1030 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1031 }
1032#endif
1033
1034
1035 if (USE_REG_TB) {
1036 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1037 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1038 }
1039
1040 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1041
1042 tcg_out_nop(s);
1043
1044
1045 s->code_gen_epilogue = s->code_ptr;
1046 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1047
1048 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1049
1050#ifdef CONFIG_SOFTMMU
1051 build_trampolines(s);
1052#endif
1053}
1054
1055static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1056{
1057 int i;
1058 for (i = 0; i < count; ++i) {
1059 p[i] = NOP;
1060 }
1061}
1062
1063#if defined(CONFIG_SOFTMMU)
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) <
1079 offsetof(CPUArchState, tlb_mask));
1080
1081
1082QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) -
1083 offsetof(CPUArchState, tlb_mask) >= (1 << 13));
1084
1085static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1086 TCGMemOp opc, int which)
1087{
1088 int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]);
1089 int table_off = offsetof(CPUArchState, tlb_table[mem_index]);
1090 TCGReg base = TCG_AREG0;
1091 const TCGReg r0 = TCG_REG_O0;
1092 const TCGReg r1 = TCG_REG_O1;
1093 const TCGReg r2 = TCG_REG_O2;
1094 unsigned s_bits = opc & MO_SIZE;
1095 unsigned a_bits = get_alignment_bits(opc);
1096 tcg_target_long compare_mask;
1097
1098 if (!check_fit_i32(table_off, 13)) {
1099 int table_hi;
1100
1101 base = r1;
1102 if (table_off <= 2 * 0xfff) {
1103 table_hi = 0xfff;
1104 tcg_out_arithi(s, base, TCG_AREG0, table_hi, ARITH_ADD);
1105 } else {
1106 table_hi = table_off & ~0x3ff;
1107 tcg_out_sethi(s, base, table_hi);
1108 tcg_out_arith(s, base, TCG_AREG0, base, ARITH_ADD);
1109 }
1110 mask_off -= table_hi;
1111 table_off -= table_hi;
1112 tcg_debug_assert(check_fit_i32(mask_off, 13));
1113 }
1114
1115
1116 tcg_out_ld(s, TCG_TYPE_PTR, r0, base, mask_off);
1117 tcg_out_ld(s, TCG_TYPE_PTR, r1, base, table_off);
1118
1119
1120 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1121 SHIFT_SRL);
1122 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1123
1124
1125 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1126
1127
1128 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1129 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1130
1131
1132
1133 if (a_bits < s_bits) {
1134 a_bits = s_bits;
1135 }
1136 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1137 if (check_fit_tl(compare_mask, 13)) {
1138 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1139 } else {
1140 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1141 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1142 }
1143 tcg_out_cmp(s, r0, r2, 0);
1144
1145
1146 if (SPARC64 && TARGET_LONG_BITS == 32) {
1147 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1148 return r0;
1149 }
1150 return addr;
1151}
1152#endif
1153
1154static const int qemu_ld_opc[16] = {
1155 [MO_UB] = LDUB,
1156 [MO_SB] = LDSB,
1157
1158 [MO_BEUW] = LDUH,
1159 [MO_BESW] = LDSH,
1160 [MO_BEUL] = LDUW,
1161 [MO_BESL] = LDSW,
1162 [MO_BEQ] = LDX,
1163
1164 [MO_LEUW] = LDUH_LE,
1165 [MO_LESW] = LDSH_LE,
1166 [MO_LEUL] = LDUW_LE,
1167 [MO_LESL] = LDSW_LE,
1168 [MO_LEQ] = LDX_LE,
1169};
1170
1171static const int qemu_st_opc[16] = {
1172 [MO_UB] = STB,
1173
1174 [MO_BEUW] = STH,
1175 [MO_BEUL] = STW,
1176 [MO_BEQ] = STX,
1177
1178 [MO_LEUW] = STH_LE,
1179 [MO_LEUL] = STW_LE,
1180 [MO_LEQ] = STX_LE,
1181};
1182
1183static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1184 TCGMemOpIdx oi, bool is_64)
1185{
1186 TCGMemOp memop = get_memop(oi);
1187#ifdef CONFIG_SOFTMMU
1188 unsigned memi = get_mmuidx(oi);
1189 TCGReg addrz, param;
1190 tcg_insn_unit *func;
1191 tcg_insn_unit *label_ptr;
1192
1193 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1194 offsetof(CPUTLBEntry, addr_read));
1195
1196
1197
1198
1199
1200
1201 label_ptr = s->code_ptr;
1202 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1203 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1204
1205 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1206 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1207
1208
1209
1210 param = TCG_REG_O1;
1211 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1212
1213 param++;
1214 }
1215 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1216
1217
1218
1219 if ((memop & MO_SSIZE) == MO_SL) {
1220 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1221 } else {
1222 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1223 }
1224 tcg_debug_assert(func != NULL);
1225 tcg_out_call_nodelay(s, func, false);
1226
1227 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1228
1229
1230
1231 if (SPARC64) {
1232
1233 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1234 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1235 } else {
1236 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1237 }
1238 } else {
1239 if ((memop & MO_SIZE) == MO_64) {
1240 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1241 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1242 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1243 } else if (is_64) {
1244
1245
1246 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1247 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1248 } else {
1249 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1250 }
1251 }
1252
1253 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1254#else
1255 if (SPARC64 && TARGET_LONG_BITS == 32) {
1256 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1257 addr = TCG_REG_T1;
1258 }
1259 tcg_out_ldst_rr(s, data, addr,
1260 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1261 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1262#endif
1263}
1264
1265static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1266 TCGMemOpIdx oi)
1267{
1268 TCGMemOp memop = get_memop(oi);
1269#ifdef CONFIG_SOFTMMU
1270 unsigned memi = get_mmuidx(oi);
1271 TCGReg addrz, param;
1272 tcg_insn_unit *func;
1273 tcg_insn_unit *label_ptr;
1274
1275 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1276 offsetof(CPUTLBEntry, addr_write));
1277
1278
1279
1280
1281 label_ptr = s->code_ptr;
1282 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1283 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1284
1285 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1286 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1287
1288
1289
1290 param = TCG_REG_O1;
1291 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1292
1293 param++;
1294 }
1295 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1296 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1297
1298 param++;
1299 }
1300 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1301
1302 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1303 tcg_debug_assert(func != NULL);
1304 tcg_out_call_nodelay(s, func, false);
1305
1306 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1307
1308 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1309#else
1310 if (SPARC64 && TARGET_LONG_BITS == 32) {
1311 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1312 addr = TCG_REG_T1;
1313 }
1314 tcg_out_ldst_rr(s, data, addr,
1315 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1316 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1317#endif
1318}
1319
1320static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1321 const TCGArg args[TCG_MAX_OP_ARGS],
1322 const int const_args[TCG_MAX_OP_ARGS])
1323{
1324 TCGArg a0, a1, a2;
1325 int c, c2;
1326
1327
1328 a0 = args[0];
1329 a1 = args[1];
1330 a2 = args[2];
1331 c2 = const_args[2];
1332
1333 switch (opc) {
1334 case INDEX_op_exit_tb:
1335 if (check_fit_ptr(a0, 13)) {
1336 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1337 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1338 break;
1339 } else if (USE_REG_TB) {
1340 intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr;
1341 if (check_fit_ptr(tb_diff, 13)) {
1342 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1343
1344 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1345 break;
1346 }
1347 }
1348 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1349 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1350 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1351 break;
1352 case INDEX_op_goto_tb:
1353 if (s->tb_jmp_insn_offset) {
1354
1355 if (USE_REG_TB) {
1356
1357 if ((intptr_t)s->code_ptr & 4) {
1358 tcg_out_nop(s);
1359 }
1360 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1361 tcg_out_sethi(s, TCG_REG_T1, 0);
1362 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1363 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1364 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1365 } else {
1366 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1367 tcg_out32(s, CALL);
1368 tcg_out_nop(s);
1369 }
1370 } else {
1371
1372 tcg_out_ld_ptr(s, TCG_REG_TB,
1373 (uintptr_t)(s->tb_jmp_target_addr + a0));
1374 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1375 tcg_out_nop(s);
1376 }
1377 set_jmp_reset_offset(s, a0);
1378
1379
1380
1381 if (USE_REG_TB) {
1382 c = -tcg_current_code_size(s);
1383 if (check_fit_i32(c, 13)) {
1384 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1385 } else {
1386 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1387 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1388 TCG_REG_T1, ARITH_ADD);
1389 }
1390 }
1391 break;
1392 case INDEX_op_goto_ptr:
1393 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1394 if (USE_REG_TB) {
1395 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1396 } else {
1397 tcg_out_nop(s);
1398 }
1399 break;
1400 case INDEX_op_br:
1401 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1402 tcg_out_nop(s);
1403 break;
1404
1405#define OP_32_64(x) \
1406 glue(glue(case INDEX_op_, x), _i32): \
1407 glue(glue(case INDEX_op_, x), _i64)
1408
1409 OP_32_64(ld8u):
1410 tcg_out_ldst(s, a0, a1, a2, LDUB);
1411 break;
1412 OP_32_64(ld8s):
1413 tcg_out_ldst(s, a0, a1, a2, LDSB);
1414 break;
1415 OP_32_64(ld16u):
1416 tcg_out_ldst(s, a0, a1, a2, LDUH);
1417 break;
1418 OP_32_64(ld16s):
1419 tcg_out_ldst(s, a0, a1, a2, LDSH);
1420 break;
1421 case INDEX_op_ld_i32:
1422 case INDEX_op_ld32u_i64:
1423 tcg_out_ldst(s, a0, a1, a2, LDUW);
1424 break;
1425 OP_32_64(st8):
1426 tcg_out_ldst(s, a0, a1, a2, STB);
1427 break;
1428 OP_32_64(st16):
1429 tcg_out_ldst(s, a0, a1, a2, STH);
1430 break;
1431 case INDEX_op_st_i32:
1432 case INDEX_op_st32_i64:
1433 tcg_out_ldst(s, a0, a1, a2, STW);
1434 break;
1435 OP_32_64(add):
1436 c = ARITH_ADD;
1437 goto gen_arith;
1438 OP_32_64(sub):
1439 c = ARITH_SUB;
1440 goto gen_arith;
1441 OP_32_64(and):
1442 c = ARITH_AND;
1443 goto gen_arith;
1444 OP_32_64(andc):
1445 c = ARITH_ANDN;
1446 goto gen_arith;
1447 OP_32_64(or):
1448 c = ARITH_OR;
1449 goto gen_arith;
1450 OP_32_64(orc):
1451 c = ARITH_ORN;
1452 goto gen_arith;
1453 OP_32_64(xor):
1454 c = ARITH_XOR;
1455 goto gen_arith;
1456 case INDEX_op_shl_i32:
1457 c = SHIFT_SLL;
1458 do_shift32:
1459
1460 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1461 break;
1462 case INDEX_op_shr_i32:
1463 c = SHIFT_SRL;
1464 goto do_shift32;
1465 case INDEX_op_sar_i32:
1466 c = SHIFT_SRA;
1467 goto do_shift32;
1468 case INDEX_op_mul_i32:
1469 c = ARITH_UMUL;
1470 goto gen_arith;
1471
1472 OP_32_64(neg):
1473 c = ARITH_SUB;
1474 goto gen_arith1;
1475 OP_32_64(not):
1476 c = ARITH_ORN;
1477 goto gen_arith1;
1478
1479 case INDEX_op_div_i32:
1480 tcg_out_div32(s, a0, a1, a2, c2, 0);
1481 break;
1482 case INDEX_op_divu_i32:
1483 tcg_out_div32(s, a0, a1, a2, c2, 1);
1484 break;
1485
1486 case INDEX_op_brcond_i32:
1487 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1488 break;
1489 case INDEX_op_setcond_i32:
1490 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1491 break;
1492 case INDEX_op_movcond_i32:
1493 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1494 break;
1495
1496 case INDEX_op_add2_i32:
1497 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1498 args[4], const_args[4], args[5], const_args[5],
1499 ARITH_ADDCC, ARITH_ADDC);
1500 break;
1501 case INDEX_op_sub2_i32:
1502 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1503 args[4], const_args[4], args[5], const_args[5],
1504 ARITH_SUBCC, ARITH_SUBC);
1505 break;
1506 case INDEX_op_mulu2_i32:
1507 c = ARITH_UMUL;
1508 goto do_mul2;
1509 case INDEX_op_muls2_i32:
1510 c = ARITH_SMUL;
1511 do_mul2:
1512
1513
1514 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1515 if (SPARC64 || a0 <= TCG_REG_O7) {
1516 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1517 } else {
1518 tcg_out_rdy(s, a1);
1519 }
1520 break;
1521
1522 case INDEX_op_qemu_ld_i32:
1523 tcg_out_qemu_ld(s, a0, a1, a2, false);
1524 break;
1525 case INDEX_op_qemu_ld_i64:
1526 tcg_out_qemu_ld(s, a0, a1, a2, true);
1527 break;
1528 case INDEX_op_qemu_st_i32:
1529 case INDEX_op_qemu_st_i64:
1530 tcg_out_qemu_st(s, a0, a1, a2);
1531 break;
1532
1533 case INDEX_op_ld32s_i64:
1534 tcg_out_ldst(s, a0, a1, a2, LDSW);
1535 break;
1536 case INDEX_op_ld_i64:
1537 tcg_out_ldst(s, a0, a1, a2, LDX);
1538 break;
1539 case INDEX_op_st_i64:
1540 tcg_out_ldst(s, a0, a1, a2, STX);
1541 break;
1542 case INDEX_op_shl_i64:
1543 c = SHIFT_SLLX;
1544 do_shift64:
1545
1546 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1547 break;
1548 case INDEX_op_shr_i64:
1549 c = SHIFT_SRLX;
1550 goto do_shift64;
1551 case INDEX_op_sar_i64:
1552 c = SHIFT_SRAX;
1553 goto do_shift64;
1554 case INDEX_op_mul_i64:
1555 c = ARITH_MULX;
1556 goto gen_arith;
1557 case INDEX_op_div_i64:
1558 c = ARITH_SDIVX;
1559 goto gen_arith;
1560 case INDEX_op_divu_i64:
1561 c = ARITH_UDIVX;
1562 goto gen_arith;
1563 case INDEX_op_ext_i32_i64:
1564 case INDEX_op_ext32s_i64:
1565 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1566 break;
1567 case INDEX_op_extu_i32_i64:
1568 case INDEX_op_ext32u_i64:
1569 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1570 break;
1571 case INDEX_op_extrl_i64_i32:
1572 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1573 break;
1574 case INDEX_op_extrh_i64_i32:
1575 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1576 break;
1577
1578 case INDEX_op_brcond_i64:
1579 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1580 break;
1581 case INDEX_op_setcond_i64:
1582 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1583 break;
1584 case INDEX_op_movcond_i64:
1585 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1586 break;
1587 case INDEX_op_add2_i64:
1588 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1589 const_args[4], args[5], const_args[5], false);
1590 break;
1591 case INDEX_op_sub2_i64:
1592 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1593 const_args[4], args[5], const_args[5], true);
1594 break;
1595 case INDEX_op_muluh_i64:
1596 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1597 break;
1598
1599 gen_arith:
1600 tcg_out_arithc(s, a0, a1, a2, c2, c);
1601 break;
1602
1603 gen_arith1:
1604 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1605 break;
1606
1607 case INDEX_op_mb:
1608 tcg_out_mb(s, a0);
1609 break;
1610
1611 case INDEX_op_mov_i32:
1612 case INDEX_op_mov_i64:
1613 case INDEX_op_movi_i32:
1614 case INDEX_op_movi_i64:
1615 case INDEX_op_call:
1616 default:
1617 tcg_abort();
1618 }
1619}
1620
1621static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
1622{
1623 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
1624 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
1625 static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } };
1626 static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } };
1627 static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } };
1628 static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } };
1629 static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } };
1630 static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
1631 static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } };
1632 static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } };
1633 static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } };
1634 static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } };
1635 static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } };
1636 static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } };
1637 static const TCGTargetOpDef r_rZ_rJ
1638 = { .args_ct_str = { "r", "rZ", "rJ" } };
1639 static const TCGTargetOpDef R_RZ_RJ
1640 = { .args_ct_str = { "R", "RZ", "RJ" } };
1641 static const TCGTargetOpDef r_r_rZ_rJ
1642 = { .args_ct_str = { "r", "r", "rZ", "rJ" } };
1643 static const TCGTargetOpDef movc_32
1644 = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } };
1645 static const TCGTargetOpDef movc_64
1646 = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } };
1647 static const TCGTargetOpDef add2_32
1648 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } };
1649 static const TCGTargetOpDef add2_64
1650 = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } };
1651
1652 switch (op) {
1653 case INDEX_op_goto_ptr:
1654 return &r;
1655
1656 case INDEX_op_ld8u_i32:
1657 case INDEX_op_ld8s_i32:
1658 case INDEX_op_ld16u_i32:
1659 case INDEX_op_ld16s_i32:
1660 case INDEX_op_ld_i32:
1661 case INDEX_op_neg_i32:
1662 case INDEX_op_not_i32:
1663 return &r_r;
1664
1665 case INDEX_op_st8_i32:
1666 case INDEX_op_st16_i32:
1667 case INDEX_op_st_i32:
1668 return &rZ_r;
1669
1670 case INDEX_op_add_i32:
1671 case INDEX_op_mul_i32:
1672 case INDEX_op_div_i32:
1673 case INDEX_op_divu_i32:
1674 case INDEX_op_sub_i32:
1675 case INDEX_op_and_i32:
1676 case INDEX_op_andc_i32:
1677 case INDEX_op_or_i32:
1678 case INDEX_op_orc_i32:
1679 case INDEX_op_xor_i32:
1680 case INDEX_op_shl_i32:
1681 case INDEX_op_shr_i32:
1682 case INDEX_op_sar_i32:
1683 case INDEX_op_setcond_i32:
1684 return &r_rZ_rJ;
1685
1686 case INDEX_op_brcond_i32:
1687 return &rZ_rJ;
1688 case INDEX_op_movcond_i32:
1689 return &movc_32;
1690 case INDEX_op_add2_i32:
1691 case INDEX_op_sub2_i32:
1692 return &add2_32;
1693 case INDEX_op_mulu2_i32:
1694 case INDEX_op_muls2_i32:
1695 return &r_r_rZ_rJ;
1696
1697 case INDEX_op_ld8u_i64:
1698 case INDEX_op_ld8s_i64:
1699 case INDEX_op_ld16u_i64:
1700 case INDEX_op_ld16s_i64:
1701 case INDEX_op_ld32u_i64:
1702 case INDEX_op_ld32s_i64:
1703 case INDEX_op_ld_i64:
1704 case INDEX_op_ext_i32_i64:
1705 case INDEX_op_extu_i32_i64:
1706 return &R_r;
1707
1708 case INDEX_op_st8_i64:
1709 case INDEX_op_st16_i64:
1710 case INDEX_op_st32_i64:
1711 case INDEX_op_st_i64:
1712 return &RZ_r;
1713
1714 case INDEX_op_add_i64:
1715 case INDEX_op_mul_i64:
1716 case INDEX_op_div_i64:
1717 case INDEX_op_divu_i64:
1718 case INDEX_op_sub_i64:
1719 case INDEX_op_and_i64:
1720 case INDEX_op_andc_i64:
1721 case INDEX_op_or_i64:
1722 case INDEX_op_orc_i64:
1723 case INDEX_op_xor_i64:
1724 case INDEX_op_shl_i64:
1725 case INDEX_op_shr_i64:
1726 case INDEX_op_sar_i64:
1727 case INDEX_op_setcond_i64:
1728 return &R_RZ_RJ;
1729
1730 case INDEX_op_neg_i64:
1731 case INDEX_op_not_i64:
1732 case INDEX_op_ext32s_i64:
1733 case INDEX_op_ext32u_i64:
1734 return &R_R;
1735
1736 case INDEX_op_extrl_i64_i32:
1737 case INDEX_op_extrh_i64_i32:
1738 return &r_R;
1739
1740 case INDEX_op_brcond_i64:
1741 return &RZ_RJ;
1742 case INDEX_op_movcond_i64:
1743 return &movc_64;
1744 case INDEX_op_add2_i64:
1745 case INDEX_op_sub2_i64:
1746 return &add2_64;
1747 case INDEX_op_muluh_i64:
1748 return &R_R_R;
1749
1750 case INDEX_op_qemu_ld_i32:
1751 return &r_A;
1752 case INDEX_op_qemu_ld_i64:
1753 return &R_A;
1754 case INDEX_op_qemu_st_i32:
1755 return &sZ_A;
1756 case INDEX_op_qemu_st_i64:
1757 return &SZ_A;
1758
1759 default:
1760 return NULL;
1761 }
1762}
1763
1764static void tcg_target_init(TCGContext *s)
1765{
1766
1767
1768#ifndef use_vis3_instructions
1769 {
1770 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1771 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1772 }
1773#endif
1774
1775 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1776 tcg_target_available_regs[TCG_TYPE_I64] = ALL_64;
1777
1778 tcg_target_call_clobber_regs = 0;
1779 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1780 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1781 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1782 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1783 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1784 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1785 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1786 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1787 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1788 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1789 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1790 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1791 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1792 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1793 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1794
1795 s->reserved_regs = 0;
1796 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
1797 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6);
1798 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7);
1799 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
1800 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
1801 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
1802 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1);
1803 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2);
1804}
1805
1806#if SPARC64
1807# define ELF_HOST_MACHINE EM_SPARCV9
1808#else
1809# define ELF_HOST_MACHINE EM_SPARC32PLUS
1810# define ELF_HOST_FLAGS EF_SPARC_32PLUS
1811#endif
1812
1813typedef struct {
1814 DebugFrameHeader h;
1815 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1816 uint8_t fde_win_save;
1817 uint8_t fde_ret_save[3];
1818} DebugFrame;
1819
1820static const DebugFrame debug_frame = {
1821 .h.cie.len = sizeof(DebugFrameCIE)-4,
1822 .h.cie.id = -1,
1823 .h.cie.version = 1,
1824 .h.cie.code_align = 1,
1825 .h.cie.data_align = -sizeof(void *) & 0x7f,
1826 .h.cie.return_column = 15,
1827
1828
1829 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1830
1831 .fde_def_cfa = {
1832#if SPARC64
1833 12, 30,
1834 (2047 & 0x7f) | 0x80, (2047 >> 7)
1835#else
1836 13, 30
1837#endif
1838 },
1839 .fde_win_save = 0x2d,
1840 .fde_ret_save = { 9, 15, 31 },
1841};
1842
1843void tcg_register_jit(void *buf, size_t buf_size)
1844{
1845 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1846}
1847
1848void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1849 uintptr_t addr)
1850{
1851 intptr_t tb_disp = addr - tc_ptr;
1852 intptr_t br_disp = addr - jmp_addr;
1853 tcg_insn_unit i1, i2;
1854
1855
1856
1857 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1858 tcg_debug_assert(br_disp == (int32_t)br_disp);
1859
1860 if (!USE_REG_TB) {
1861 atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2));
1862 flush_icache_range(jmp_addr, jmp_addr + 4);
1863 return;
1864 }
1865
1866
1867
1868
1869 if (check_fit_ptr(tb_disp, 13)) {
1870
1871 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1872 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1873 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1874 | INSN_IMM13(tb_disp));
1875 } else if (tb_disp >= 0) {
1876 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1877 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1878 | INSN_IMM13(tb_disp & 0x3ff));
1879 } else {
1880 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1881 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1882 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1883 }
1884
1885 atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
1886 flush_icache_range(jmp_addr, jmp_addr + 8);
1887}
1888