1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "tcg-pool.inc.c"
26
27#ifdef CONFIG_DEBUG_TCG
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
62#endif
63
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
70
71
72
73
74
75
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
82
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
86#ifndef CONFIG_SOFTMMU
87# define TCG_GUEST_BASE_REG TCG_REG_I5
88#endif
89
90#define TCG_REG_TB TCG_REG_I1
91#define USE_REG_TB (sizeof(void *) > 4)
92
93static const int tcg_target_reg_alloc_order[] = {
94 TCG_REG_L0,
95 TCG_REG_L1,
96 TCG_REG_L2,
97 TCG_REG_L3,
98 TCG_REG_L4,
99 TCG_REG_L5,
100 TCG_REG_L6,
101 TCG_REG_L7,
102
103 TCG_REG_I0,
104 TCG_REG_I1,
105 TCG_REG_I2,
106 TCG_REG_I3,
107 TCG_REG_I4,
108 TCG_REG_I5,
109
110 TCG_REG_G2,
111 TCG_REG_G3,
112 TCG_REG_G4,
113 TCG_REG_G5,
114
115 TCG_REG_O0,
116 TCG_REG_O1,
117 TCG_REG_O2,
118 TCG_REG_O3,
119 TCG_REG_O4,
120 TCG_REG_O5,
121};
122
123static const int tcg_target_call_iarg_regs[6] = {
124 TCG_REG_O0,
125 TCG_REG_O1,
126 TCG_REG_O2,
127 TCG_REG_O3,
128 TCG_REG_O4,
129 TCG_REG_O5,
130};
131
132static const int tcg_target_call_oarg_regs[] = {
133 TCG_REG_O0,
134 TCG_REG_O1,
135 TCG_REG_O2,
136 TCG_REG_O3,
137};
138
139#define INSN_OP(x) ((x) << 30)
140#define INSN_OP2(x) ((x) << 22)
141#define INSN_OP3(x) ((x) << 19)
142#define INSN_OPF(x) ((x) << 5)
143#define INSN_RD(x) ((x) << 25)
144#define INSN_RS1(x) ((x) << 14)
145#define INSN_RS2(x) (x)
146#define INSN_ASI(x) ((x) << 5)
147
148#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
149#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
150#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
151#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
152#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
153#define INSN_COND(x) ((x) << 25)
154
155#define COND_N 0x0
156#define COND_E 0x1
157#define COND_LE 0x2
158#define COND_L 0x3
159#define COND_LEU 0x4
160#define COND_CS 0x5
161#define COND_NEG 0x6
162#define COND_VS 0x7
163#define COND_A 0x8
164#define COND_NE 0x9
165#define COND_G 0xa
166#define COND_GE 0xb
167#define COND_GU 0xc
168#define COND_CC 0xd
169#define COND_POS 0xe
170#define COND_VC 0xf
171#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
172
173#define RCOND_Z 1
174#define RCOND_LEZ 2
175#define RCOND_LZ 3
176#define RCOND_NZ 5
177#define RCOND_GZ 6
178#define RCOND_GEZ 7
179
180#define MOVCC_ICC (1 << 18)
181#define MOVCC_XCC (1 << 18 | 1 << 12)
182
183#define BPCC_ICC 0
184#define BPCC_XCC (2 << 20)
185#define BPCC_PT (1 << 19)
186#define BPCC_PN 0
187#define BPCC_A (1 << 29)
188
189#define BPR_PT BPCC_PT
190
191#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
192#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
193#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
195#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
197#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
198#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
199#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
203#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
204#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
205#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
210#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
211#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
212
213#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
214#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
215
216#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
217#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
218#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
219
220#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
223
224#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
225#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
226#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
227#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
228#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
229#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
230#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
231#define CALL INSN_OP(1)
232#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
233#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
234#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
235#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
236#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
237#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
238#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
239#define STB (INSN_OP(3) | INSN_OP3(0x05))
240#define STH (INSN_OP(3) | INSN_OP3(0x06))
241#define STW (INSN_OP(3) | INSN_OP3(0x04))
242#define STX (INSN_OP(3) | INSN_OP3(0x0e))
243#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
244#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
245#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
246#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
247#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
248#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
249#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
250#define STBA (INSN_OP(3) | INSN_OP3(0x15))
251#define STHA (INSN_OP(3) | INSN_OP3(0x16))
252#define STWA (INSN_OP(3) | INSN_OP3(0x14))
253#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
254
255#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
256
257#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
258
259#ifndef ASI_PRIMARY_LITTLE
260#define ASI_PRIMARY_LITTLE 0x88
261#endif
262
263#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
268
269#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#ifndef use_vis3_instructions
274bool use_vis3_instructions;
275#endif
276
277static inline int check_fit_i64(int64_t val, unsigned int bits)
278{
279 return val == sextract64(val, 0, bits);
280}
281
282static inline int check_fit_i32(int32_t val, unsigned int bits)
283{
284 return val == sextract32(val, 0, bits);
285}
286
287#define check_fit_tl check_fit_i64
288#if SPARC64
289# define check_fit_ptr check_fit_i64
290#else
291# define check_fit_ptr check_fit_i32
292#endif
293
294static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
295 intptr_t value, intptr_t addend)
296{
297 uint32_t insn = *code_ptr;
298 intptr_t pcrel;
299
300 value += addend;
301 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
302
303 switch (type) {
304 case R_SPARC_WDISP16:
305 assert(check_fit_ptr(pcrel >> 2, 16));
306 insn &= ~INSN_OFF16(-1);
307 insn |= INSN_OFF16(pcrel);
308 break;
309 case R_SPARC_WDISP19:
310 assert(check_fit_ptr(pcrel >> 2, 19));
311 insn &= ~INSN_OFF19(-1);
312 insn |= INSN_OFF19(pcrel);
313 break;
314 default:
315 g_assert_not_reached();
316 }
317
318 *code_ptr = insn;
319 return true;
320}
321
322
323static const char *target_parse_constraint(TCGArgConstraint *ct,
324 const char *ct_str, TCGType type)
325{
326 switch (*ct_str++) {
327 case 'r':
328 ct->ct |= TCG_CT_REG;
329 ct->u.regs = 0xffffffff;
330 break;
331 case 'R':
332 ct->ct |= TCG_CT_REG;
333 ct->u.regs = ALL_64;
334 break;
335 case 'A':
336 ct->ct |= TCG_CT_REG;
337 ct->u.regs = TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff;
338 reserve_helpers:
339 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
340 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
342 break;
343 case 's':
344 ct->ct |= TCG_CT_REG;
345 ct->u.regs = 0xffffffff;
346 goto reserve_helpers;
347 case 'S':
348 ct->ct |= TCG_CT_REG;
349 ct->u.regs = ALL_64;
350 goto reserve_helpers;
351 case 'I':
352 ct->ct |= TCG_CT_CONST_S11;
353 break;
354 case 'J':
355 ct->ct |= TCG_CT_CONST_S13;
356 break;
357 case 'Z':
358 ct->ct |= TCG_CT_CONST_ZERO;
359 break;
360 default:
361 return NULL;
362 }
363 return ct_str;
364}
365
366
367static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
368 const TCGArgConstraint *arg_ct)
369{
370 int ct = arg_ct->ct;
371
372 if (ct & TCG_CT_CONST) {
373 return 1;
374 }
375
376 if (type == TCG_TYPE_I32) {
377 val = (int32_t)val;
378 }
379
380 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
381 return 1;
382 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
383 return 1;
384 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
385 return 1;
386 } else {
387 return 0;
388 }
389}
390
391static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
392 TCGReg rs2, int op)
393{
394 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
395}
396
397static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
398 int32_t offset, int op)
399{
400 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
401}
402
403static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
404 int32_t val2, int val2const, int op)
405{
406 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
407 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
408}
409
410static inline bool tcg_out_mov(TCGContext *s, TCGType type,
411 TCGReg ret, TCGReg arg)
412{
413 if (ret != arg) {
414 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
415 }
416 return true;
417}
418
419static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
420{
421 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
422}
423
424static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
425{
426 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
427}
428
429static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
430 tcg_target_long arg, bool in_prologue)
431{
432 tcg_target_long hi, lo = (int32_t)arg;
433 tcg_target_long test, lsb;
434
435
436 if (type == TCG_TYPE_I32) {
437 arg = lo;
438 }
439
440
441 if (check_fit_tl(arg, 13)) {
442 tcg_out_movi_imm13(s, ret, arg);
443 return;
444 }
445
446
447 if (!in_prologue && USE_REG_TB) {
448 test = arg - (uintptr_t)s->code_gen_ptr;
449 if (check_fit_ptr(test, 13)) {
450 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
451 return;
452 }
453 }
454
455
456 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
457 tcg_out_sethi(s, ret, arg);
458 if (arg & 0x3ff) {
459 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
460 }
461 return;
462 }
463
464
465 if (arg == lo) {
466 tcg_out_sethi(s, ret, ~arg);
467 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
468 return;
469 }
470
471
472 lsb = ctz64(arg);
473 test = (tcg_target_long)arg >> lsb;
474 if (check_fit_tl(test, 13)) {
475 tcg_out_movi_imm13(s, ret, test);
476 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
477 return;
478 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
479 tcg_out_sethi(s, ret, test << 10);
480 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
481 return;
482 }
483
484
485 if (check_fit_i32(lo, 13)) {
486 hi = (arg - lo) >> 32;
487 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
488 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
490 } else {
491 hi = arg >> 32;
492 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
493 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
494 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
495 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
496 }
497}
498
499static inline void tcg_out_movi(TCGContext *s, TCGType type,
500 TCGReg ret, tcg_target_long arg)
501{
502 tcg_out_movi_int(s, type, ret, arg, false);
503}
504
505static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
506 TCGReg a2, int op)
507{
508 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
509}
510
511static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
512 intptr_t offset, int op)
513{
514 if (check_fit_ptr(offset, 13)) {
515 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
516 INSN_IMM13(offset));
517 } else {
518 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
519 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
520 }
521}
522
523static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
524 TCGReg arg1, intptr_t arg2)
525{
526 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
527}
528
529static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
530 TCGReg arg1, intptr_t arg2)
531{
532 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
533}
534
535static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
536 TCGReg base, intptr_t ofs)
537{
538 if (val == 0) {
539 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
540 return true;
541 }
542 return false;
543}
544
545static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
546{
547 intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
548 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
549 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
550 return;
551 }
552 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
553 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
554}
555
556static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
557{
558 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
559}
560
561static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
562{
563 tcg_out32(s, RDY | INSN_RD(rd));
564}
565
566static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
567 int32_t val2, int val2const, int uns)
568{
569
570 if (uns) {
571 tcg_out_sety(s, TCG_REG_G0);
572 } else {
573 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
574 tcg_out_sety(s, TCG_REG_T1);
575 }
576
577 tcg_out_arithc(s, rd, rs1, val2, val2const,
578 uns ? ARITH_UDIV : ARITH_SDIV);
579}
580
581static inline void tcg_out_nop(TCGContext *s)
582{
583 tcg_out32(s, NOP);
584}
585
586static const uint8_t tcg_cond_to_bcond[] = {
587 [TCG_COND_EQ] = COND_E,
588 [TCG_COND_NE] = COND_NE,
589 [TCG_COND_LT] = COND_L,
590 [TCG_COND_GE] = COND_GE,
591 [TCG_COND_LE] = COND_LE,
592 [TCG_COND_GT] = COND_G,
593 [TCG_COND_LTU] = COND_CS,
594 [TCG_COND_GEU] = COND_CC,
595 [TCG_COND_LEU] = COND_LEU,
596 [TCG_COND_GTU] = COND_GU,
597};
598
599static const uint8_t tcg_cond_to_rcond[] = {
600 [TCG_COND_EQ] = RCOND_Z,
601 [TCG_COND_NE] = RCOND_NZ,
602 [TCG_COND_LT] = RCOND_LZ,
603 [TCG_COND_GT] = RCOND_GZ,
604 [TCG_COND_LE] = RCOND_LEZ,
605 [TCG_COND_GE] = RCOND_GEZ
606};
607
608static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
609{
610 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
611}
612
613static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
614{
615 int off19 = 0;
616
617 if (l->has_value) {
618 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
619 } else {
620 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
621 }
622 tcg_out_bpcc0(s, scond, flags, off19);
623}
624
625static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
626{
627 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
628}
629
630static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
631 int32_t arg2, int const_arg2, TCGLabel *l)
632{
633 tcg_out_cmp(s, arg1, arg2, const_arg2);
634 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
635 tcg_out_nop(s);
636}
637
638static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
639 int32_t v1, int v1const)
640{
641 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
642 | INSN_RS1(tcg_cond_to_bcond[cond])
643 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
644}
645
646static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
647 TCGReg c1, int32_t c2, int c2const,
648 int32_t v1, int v1const)
649{
650 tcg_out_cmp(s, c1, c2, c2const);
651 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
652}
653
654static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
655 int32_t arg2, int const_arg2, TCGLabel *l)
656{
657
658 if (arg2 == 0 && !is_unsigned_cond(cond)) {
659 int off16 = 0;
660
661 if (l->has_value) {
662 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
663 } else {
664 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
665 }
666 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
667 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
668 } else {
669 tcg_out_cmp(s, arg1, arg2, const_arg2);
670 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
671 }
672 tcg_out_nop(s);
673}
674
675static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
676 int32_t v1, int v1const)
677{
678 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
679 | (tcg_cond_to_rcond[cond] << 10)
680 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
681}
682
683static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
684 TCGReg c1, int32_t c2, int c2const,
685 int32_t v1, int v1const)
686{
687
688
689
690 if (c2 == 0 && !is_unsigned_cond(cond)
691 && (!v1const || check_fit_i32(v1, 10))) {
692 tcg_out_movr(s, cond, ret, c1, v1, v1const);
693 } else {
694 tcg_out_cmp(s, c1, c2, c2const);
695 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
696 }
697}
698
699static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
700 TCGReg c1, int32_t c2, int c2const)
701{
702
703 switch (cond) {
704 case TCG_COND_LTU:
705 case TCG_COND_GEU:
706
707 break;
708
709 case TCG_COND_EQ:
710 case TCG_COND_NE:
711
712 if (c2 != 0) {
713 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
714 c2 = TCG_REG_T1;
715 } else {
716 c2 = c1;
717 }
718 c1 = TCG_REG_G0, c2const = 0;
719 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
720 break;
721
722 case TCG_COND_GTU:
723 case TCG_COND_LEU:
724
725
726
727 if (!c2const || c2 == 0) {
728 TCGReg t = c1;
729 c1 = c2;
730 c2 = t;
731 c2const = 0;
732 cond = tcg_swap_cond(cond);
733 break;
734 }
735
736
737 default:
738 tcg_out_cmp(s, c1, c2, c2const);
739 tcg_out_movi_imm13(s, ret, 0);
740 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
741 return;
742 }
743
744 tcg_out_cmp(s, c1, c2, c2const);
745 if (cond == TCG_COND_LTU) {
746 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
747 } else {
748 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
749 }
750}
751
752static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
753 TCGReg c1, int32_t c2, int c2const)
754{
755 if (use_vis3_instructions) {
756 switch (cond) {
757 case TCG_COND_NE:
758 if (c2 != 0) {
759 break;
760 }
761 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
762
763 case TCG_COND_LTU:
764 tcg_out_cmp(s, c1, c2, c2const);
765 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
766 return;
767 default:
768 break;
769 }
770 }
771
772
773
774 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
775 tcg_out_movi_imm13(s, ret, 0);
776 tcg_out_movr(s, cond, ret, c1, 1, 1);
777 } else {
778 tcg_out_cmp(s, c1, c2, c2const);
779 tcg_out_movi_imm13(s, ret, 0);
780 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
781 }
782}
783
784static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
785 TCGReg al, TCGReg ah, int32_t bl, int blconst,
786 int32_t bh, int bhconst, int opl, int oph)
787{
788 TCGReg tmp = TCG_REG_T1;
789
790
791 if (rl != ah && (bhconst || rl != bh)) {
792 tmp = rl;
793 }
794
795 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
796 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
797 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
798}
799
800static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
801 TCGReg al, TCGReg ah, int32_t bl, int blconst,
802 int32_t bh, int bhconst, bool is_sub)
803{
804 TCGReg tmp = TCG_REG_T1;
805
806
807 if (rl != ah && (bhconst || rl != bh)) {
808 tmp = rl;
809 }
810
811 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
812
813 if (use_vis3_instructions && !is_sub) {
814
815 if (bhconst && bh != 0) {
816 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
817 bh = TCG_REG_T2;
818 }
819 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
820 } else if (bh == TCG_REG_G0) {
821
822
823 if (rh == ah) {
824 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
825 is_sub ? ARITH_SUB : ARITH_ADD);
826 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
827 } else {
828 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
829 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
830 }
831 } else {
832
833 if (bhconst) {
834 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
835 } else {
836 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
837 is_sub ? ARITH_SUB : ARITH_ADD);
838 }
839
840 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
841
842 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
843 }
844
845 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
846}
847
848static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
849 bool in_prologue)
850{
851 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
852
853 if (disp == (int32_t)disp) {
854 tcg_out32(s, CALL | (uint32_t)disp >> 2);
855 } else {
856 uintptr_t desti = (uintptr_t)dest;
857 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
858 desti & ~0xfff, in_prologue);
859 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
860 }
861}
862
863static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
864{
865 tcg_out_call_nodelay(s, dest, false);
866 tcg_out_nop(s);
867}
868
869static void tcg_out_mb(TCGContext *s, TCGArg a0)
870{
871
872 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
873}
874
875#ifdef CONFIG_SOFTMMU
876static tcg_insn_unit *qemu_ld_trampoline[16];
877static tcg_insn_unit *qemu_st_trampoline[16];
878
879static void emit_extend(TCGContext *s, TCGReg r, int op)
880{
881
882
883
884 switch (op & MO_SIZE) {
885 case MO_8:
886 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
887 break;
888 case MO_16:
889 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
890 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
891 break;
892 case MO_32:
893 if (SPARC64) {
894 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
895 }
896 break;
897 case MO_64:
898 break;
899 }
900}
901
902static void build_trampolines(TCGContext *s)
903{
904 static void * const qemu_ld_helpers[16] = {
905 [MO_UB] = helper_ret_ldub_mmu,
906 [MO_SB] = helper_ret_ldsb_mmu,
907 [MO_LEUW] = helper_le_lduw_mmu,
908 [MO_LESW] = helper_le_ldsw_mmu,
909 [MO_LEUL] = helper_le_ldul_mmu,
910 [MO_LEQ] = helper_le_ldq_mmu,
911 [MO_BEUW] = helper_be_lduw_mmu,
912 [MO_BESW] = helper_be_ldsw_mmu,
913 [MO_BEUL] = helper_be_ldul_mmu,
914 [MO_BEQ] = helper_be_ldq_mmu,
915 };
916 static void * const qemu_st_helpers[16] = {
917 [MO_UB] = helper_ret_stb_mmu,
918 [MO_LEUW] = helper_le_stw_mmu,
919 [MO_LEUL] = helper_le_stl_mmu,
920 [MO_LEQ] = helper_le_stq_mmu,
921 [MO_BEUW] = helper_be_stw_mmu,
922 [MO_BEUL] = helper_be_stl_mmu,
923 [MO_BEQ] = helper_be_stq_mmu,
924 };
925
926 int i;
927 TCGReg ra;
928
929 for (i = 0; i < 16; ++i) {
930 if (qemu_ld_helpers[i] == NULL) {
931 continue;
932 }
933
934
935 while ((uintptr_t)s->code_ptr & 15) {
936 tcg_out_nop(s);
937 }
938 qemu_ld_trampoline[i] = s->code_ptr;
939
940 if (SPARC64 || TARGET_LONG_BITS == 32) {
941 ra = TCG_REG_O3;
942 } else {
943
944 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
945 ra = TCG_REG_O4;
946 }
947
948
949 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
950
951 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
952
953 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
954 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
955 }
956
957 for (i = 0; i < 16; ++i) {
958 if (qemu_st_helpers[i] == NULL) {
959 continue;
960 }
961
962
963 while ((uintptr_t)s->code_ptr & 15) {
964 tcg_out_nop(s);
965 }
966 qemu_st_trampoline[i] = s->code_ptr;
967
968 if (SPARC64) {
969 emit_extend(s, TCG_REG_O2, i);
970 ra = TCG_REG_O4;
971 } else {
972 ra = TCG_REG_O1;
973 if (TARGET_LONG_BITS == 64) {
974
975 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
976 ra += 2;
977 } else {
978 ra += 1;
979 }
980 if ((i & MO_SIZE) == MO_64) {
981
982 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
983 ra += 2;
984 } else {
985 emit_extend(s, ra, i);
986 ra += 1;
987 }
988
989 ra += 1;
990 }
991
992
993 if (ra >= TCG_REG_O6) {
994 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
995 TCG_TARGET_CALL_STACK_OFFSET);
996 ra = TCG_REG_G1;
997 }
998 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
999
1000 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
1001
1002 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
1003 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1004 }
1005}
1006#endif
1007
1008
1009static void tcg_target_qemu_prologue(TCGContext *s)
1010{
1011 int tmp_buf_size, frame_size;
1012
1013
1014
1015 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1016 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
1017 tmp_buf_size);
1018
1019
1020
1021 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1022 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1023 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1024 frame_size &= -TCG_TARGET_STACK_ALIGN;
1025 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1026 INSN_IMM13(-frame_size));
1027
1028#ifndef CONFIG_SOFTMMU
1029 if (guest_base != 0) {
1030 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
1031 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1032 }
1033#endif
1034
1035
1036 if (USE_REG_TB) {
1037 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1038 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1039 }
1040
1041 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1042
1043 tcg_out_nop(s);
1044
1045
1046 s->code_gen_epilogue = s->code_ptr;
1047 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1048
1049 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1050
1051#ifdef CONFIG_SOFTMMU
1052 build_trampolines(s);
1053#endif
1054}
1055
1056static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1057{
1058 int i;
1059 for (i = 0; i < count; ++i) {
1060 p[i] = NOP;
1061 }
1062}
1063
1064#if defined(CONFIG_SOFTMMU)
1065
1066
1067QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1068QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1084 MemOp opc, int which)
1085{
1086 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1087 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1088 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1089 const TCGReg r0 = TCG_REG_O0;
1090 const TCGReg r1 = TCG_REG_O1;
1091 const TCGReg r2 = TCG_REG_O2;
1092 unsigned s_bits = opc & MO_SIZE;
1093 unsigned a_bits = get_alignment_bits(opc);
1094 tcg_target_long compare_mask;
1095
1096
1097 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1098 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1099
1100
1101 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1102 SHIFT_SRL);
1103 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1104
1105
1106 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1107
1108
1109 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1110 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1111
1112
1113
1114 if (a_bits < s_bits) {
1115 a_bits = s_bits;
1116 }
1117 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1118 if (check_fit_tl(compare_mask, 13)) {
1119 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1120 } else {
1121 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1122 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1123 }
1124 tcg_out_cmp(s, r0, r2, 0);
1125
1126
1127 if (SPARC64 && TARGET_LONG_BITS == 32) {
1128 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1129 return r0;
1130 }
1131 return addr;
1132}
1133#endif
1134
1135static const int qemu_ld_opc[16] = {
1136 [MO_UB] = LDUB,
1137 [MO_SB] = LDSB,
1138
1139 [MO_BEUW] = LDUH,
1140 [MO_BESW] = LDSH,
1141 [MO_BEUL] = LDUW,
1142 [MO_BESL] = LDSW,
1143 [MO_BEQ] = LDX,
1144
1145 [MO_LEUW] = LDUH_LE,
1146 [MO_LESW] = LDSH_LE,
1147 [MO_LEUL] = LDUW_LE,
1148 [MO_LESL] = LDSW_LE,
1149 [MO_LEQ] = LDX_LE,
1150};
1151
1152static const int qemu_st_opc[16] = {
1153 [MO_UB] = STB,
1154
1155 [MO_BEUW] = STH,
1156 [MO_BEUL] = STW,
1157 [MO_BEQ] = STX,
1158
1159 [MO_LEUW] = STH_LE,
1160 [MO_LEUL] = STW_LE,
1161 [MO_LEQ] = STX_LE,
1162};
1163
1164static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1165 TCGMemOpIdx oi, bool is_64)
1166{
1167 MemOp memop = get_memop(oi);
1168#ifdef CONFIG_SOFTMMU
1169 unsigned memi = get_mmuidx(oi);
1170 TCGReg addrz, param;
1171 tcg_insn_unit *func;
1172 tcg_insn_unit *label_ptr;
1173
1174 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1175 offsetof(CPUTLBEntry, addr_read));
1176
1177
1178
1179
1180
1181
1182 label_ptr = s->code_ptr;
1183 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1184 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1185
1186 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1187 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1188
1189
1190
1191 param = TCG_REG_O1;
1192 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1193
1194 param++;
1195 }
1196 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1197
1198
1199
1200 if ((memop & MO_SSIZE) == MO_SL) {
1201 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1202 } else {
1203 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1204 }
1205 tcg_debug_assert(func != NULL);
1206 tcg_out_call_nodelay(s, func, false);
1207
1208 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1209
1210
1211
1212 if (SPARC64) {
1213
1214 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1215 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1216 } else {
1217 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1218 }
1219 } else {
1220 if ((memop & MO_SIZE) == MO_64) {
1221 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1222 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1223 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1224 } else if (is_64) {
1225
1226
1227 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1228 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1229 } else {
1230 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1231 }
1232 }
1233
1234 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1235#else
1236 if (SPARC64 && TARGET_LONG_BITS == 32) {
1237 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1238 addr = TCG_REG_T1;
1239 }
1240 tcg_out_ldst_rr(s, data, addr,
1241 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1242 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1243#endif
1244}
1245
1246static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1247 TCGMemOpIdx oi)
1248{
1249 MemOp memop = get_memop(oi);
1250#ifdef CONFIG_SOFTMMU
1251 unsigned memi = get_mmuidx(oi);
1252 TCGReg addrz, param;
1253 tcg_insn_unit *func;
1254 tcg_insn_unit *label_ptr;
1255
1256 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1257 offsetof(CPUTLBEntry, addr_write));
1258
1259
1260
1261
1262 label_ptr = s->code_ptr;
1263 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1264 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1265
1266 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1267 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1268
1269
1270
1271 param = TCG_REG_O1;
1272 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1273
1274 param++;
1275 }
1276 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
1277 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1278
1279 param++;
1280 }
1281 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1282
1283 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1284 tcg_debug_assert(func != NULL);
1285 tcg_out_call_nodelay(s, func, false);
1286
1287 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1288
1289 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1290#else
1291 if (SPARC64 && TARGET_LONG_BITS == 32) {
1292 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1293 addr = TCG_REG_T1;
1294 }
1295 tcg_out_ldst_rr(s, data, addr,
1296 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1297 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1298#endif
1299}
1300
1301static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1302 const TCGArg args[TCG_MAX_OP_ARGS],
1303 const int const_args[TCG_MAX_OP_ARGS])
1304{
1305 TCGArg a0, a1, a2;
1306 int c, c2;
1307
1308
1309 a0 = args[0];
1310 a1 = args[1];
1311 a2 = args[2];
1312 c2 = const_args[2];
1313
1314 switch (opc) {
1315 case INDEX_op_exit_tb:
1316 if (check_fit_ptr(a0, 13)) {
1317 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1318 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1319 break;
1320 } else if (USE_REG_TB) {
1321 intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr;
1322 if (check_fit_ptr(tb_diff, 13)) {
1323 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1324
1325 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1326 break;
1327 }
1328 }
1329 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1330 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1331 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1332 break;
1333 case INDEX_op_goto_tb:
1334 if (s->tb_jmp_insn_offset) {
1335
1336 if (USE_REG_TB) {
1337
1338 if ((intptr_t)s->code_ptr & 4) {
1339 tcg_out_nop(s);
1340 }
1341 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1342 tcg_out_sethi(s, TCG_REG_T1, 0);
1343 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1344 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1345 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1346 } else {
1347 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1348 tcg_out32(s, CALL);
1349 tcg_out_nop(s);
1350 }
1351 } else {
1352
1353 tcg_out_ld_ptr(s, TCG_REG_TB,
1354 (uintptr_t)(s->tb_jmp_target_addr + a0));
1355 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1356 tcg_out_nop(s);
1357 }
1358 set_jmp_reset_offset(s, a0);
1359
1360
1361
1362 if (USE_REG_TB) {
1363 c = -tcg_current_code_size(s);
1364 if (check_fit_i32(c, 13)) {
1365 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1366 } else {
1367 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1368 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1369 TCG_REG_T1, ARITH_ADD);
1370 }
1371 }
1372 break;
1373 case INDEX_op_goto_ptr:
1374 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1375 if (USE_REG_TB) {
1376 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1377 } else {
1378 tcg_out_nop(s);
1379 }
1380 break;
1381 case INDEX_op_br:
1382 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1383 tcg_out_nop(s);
1384 break;
1385
1386#define OP_32_64(x) \
1387 glue(glue(case INDEX_op_, x), _i32): \
1388 glue(glue(case INDEX_op_, x), _i64)
1389
1390 OP_32_64(ld8u):
1391 tcg_out_ldst(s, a0, a1, a2, LDUB);
1392 break;
1393 OP_32_64(ld8s):
1394 tcg_out_ldst(s, a0, a1, a2, LDSB);
1395 break;
1396 OP_32_64(ld16u):
1397 tcg_out_ldst(s, a0, a1, a2, LDUH);
1398 break;
1399 OP_32_64(ld16s):
1400 tcg_out_ldst(s, a0, a1, a2, LDSH);
1401 break;
1402 case INDEX_op_ld_i32:
1403 case INDEX_op_ld32u_i64:
1404 tcg_out_ldst(s, a0, a1, a2, LDUW);
1405 break;
1406 OP_32_64(st8):
1407 tcg_out_ldst(s, a0, a1, a2, STB);
1408 break;
1409 OP_32_64(st16):
1410 tcg_out_ldst(s, a0, a1, a2, STH);
1411 break;
1412 case INDEX_op_st_i32:
1413 case INDEX_op_st32_i64:
1414 tcg_out_ldst(s, a0, a1, a2, STW);
1415 break;
1416 OP_32_64(add):
1417 c = ARITH_ADD;
1418 goto gen_arith;
1419 OP_32_64(sub):
1420 c = ARITH_SUB;
1421 goto gen_arith;
1422 OP_32_64(and):
1423 c = ARITH_AND;
1424 goto gen_arith;
1425 OP_32_64(andc):
1426 c = ARITH_ANDN;
1427 goto gen_arith;
1428 OP_32_64(or):
1429 c = ARITH_OR;
1430 goto gen_arith;
1431 OP_32_64(orc):
1432 c = ARITH_ORN;
1433 goto gen_arith;
1434 OP_32_64(xor):
1435 c = ARITH_XOR;
1436 goto gen_arith;
1437 case INDEX_op_shl_i32:
1438 c = SHIFT_SLL;
1439 do_shift32:
1440
1441 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1442 break;
1443 case INDEX_op_shr_i32:
1444 c = SHIFT_SRL;
1445 goto do_shift32;
1446 case INDEX_op_sar_i32:
1447 c = SHIFT_SRA;
1448 goto do_shift32;
1449 case INDEX_op_mul_i32:
1450 c = ARITH_UMUL;
1451 goto gen_arith;
1452
1453 OP_32_64(neg):
1454 c = ARITH_SUB;
1455 goto gen_arith1;
1456 OP_32_64(not):
1457 c = ARITH_ORN;
1458 goto gen_arith1;
1459
1460 case INDEX_op_div_i32:
1461 tcg_out_div32(s, a0, a1, a2, c2, 0);
1462 break;
1463 case INDEX_op_divu_i32:
1464 tcg_out_div32(s, a0, a1, a2, c2, 1);
1465 break;
1466
1467 case INDEX_op_brcond_i32:
1468 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1469 break;
1470 case INDEX_op_setcond_i32:
1471 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1472 break;
1473 case INDEX_op_movcond_i32:
1474 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1475 break;
1476
1477 case INDEX_op_add2_i32:
1478 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1479 args[4], const_args[4], args[5], const_args[5],
1480 ARITH_ADDCC, ARITH_ADDC);
1481 break;
1482 case INDEX_op_sub2_i32:
1483 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1484 args[4], const_args[4], args[5], const_args[5],
1485 ARITH_SUBCC, ARITH_SUBC);
1486 break;
1487 case INDEX_op_mulu2_i32:
1488 c = ARITH_UMUL;
1489 goto do_mul2;
1490 case INDEX_op_muls2_i32:
1491 c = ARITH_SMUL;
1492 do_mul2:
1493
1494
1495 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1496 if (SPARC64 || a0 <= TCG_REG_O7) {
1497 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1498 } else {
1499 tcg_out_rdy(s, a1);
1500 }
1501 break;
1502
1503 case INDEX_op_qemu_ld_i32:
1504 tcg_out_qemu_ld(s, a0, a1, a2, false);
1505 break;
1506 case INDEX_op_qemu_ld_i64:
1507 tcg_out_qemu_ld(s, a0, a1, a2, true);
1508 break;
1509 case INDEX_op_qemu_st_i32:
1510 case INDEX_op_qemu_st_i64:
1511 tcg_out_qemu_st(s, a0, a1, a2);
1512 break;
1513
1514 case INDEX_op_ld32s_i64:
1515 tcg_out_ldst(s, a0, a1, a2, LDSW);
1516 break;
1517 case INDEX_op_ld_i64:
1518 tcg_out_ldst(s, a0, a1, a2, LDX);
1519 break;
1520 case INDEX_op_st_i64:
1521 tcg_out_ldst(s, a0, a1, a2, STX);
1522 break;
1523 case INDEX_op_shl_i64:
1524 c = SHIFT_SLLX;
1525 do_shift64:
1526
1527 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1528 break;
1529 case INDEX_op_shr_i64:
1530 c = SHIFT_SRLX;
1531 goto do_shift64;
1532 case INDEX_op_sar_i64:
1533 c = SHIFT_SRAX;
1534 goto do_shift64;
1535 case INDEX_op_mul_i64:
1536 c = ARITH_MULX;
1537 goto gen_arith;
1538 case INDEX_op_div_i64:
1539 c = ARITH_SDIVX;
1540 goto gen_arith;
1541 case INDEX_op_divu_i64:
1542 c = ARITH_UDIVX;
1543 goto gen_arith;
1544 case INDEX_op_ext_i32_i64:
1545 case INDEX_op_ext32s_i64:
1546 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1547 break;
1548 case INDEX_op_extu_i32_i64:
1549 case INDEX_op_ext32u_i64:
1550 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1551 break;
1552 case INDEX_op_extrl_i64_i32:
1553 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1554 break;
1555 case INDEX_op_extrh_i64_i32:
1556 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1557 break;
1558
1559 case INDEX_op_brcond_i64:
1560 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1561 break;
1562 case INDEX_op_setcond_i64:
1563 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1564 break;
1565 case INDEX_op_movcond_i64:
1566 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1567 break;
1568 case INDEX_op_add2_i64:
1569 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1570 const_args[4], args[5], const_args[5], false);
1571 break;
1572 case INDEX_op_sub2_i64:
1573 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1574 const_args[4], args[5], const_args[5], true);
1575 break;
1576 case INDEX_op_muluh_i64:
1577 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1578 break;
1579
1580 gen_arith:
1581 tcg_out_arithc(s, a0, a1, a2, c2, c);
1582 break;
1583
1584 gen_arith1:
1585 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1586 break;
1587
1588 case INDEX_op_mb:
1589 tcg_out_mb(s, a0);
1590 break;
1591
1592 case INDEX_op_mov_i32:
1593 case INDEX_op_mov_i64:
1594 case INDEX_op_movi_i32:
1595 case INDEX_op_movi_i64:
1596 case INDEX_op_call:
1597 default:
1598 tcg_abort();
1599 }
1600}
1601
1602static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
1603{
1604 static const TCGTargetOpDef r = { .args_ct_str = { "r" } };
1605 static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
1606 static const TCGTargetOpDef R_r = { .args_ct_str = { "R", "r" } };
1607 static const TCGTargetOpDef r_R = { .args_ct_str = { "r", "R" } };
1608 static const TCGTargetOpDef R_R = { .args_ct_str = { "R", "R" } };
1609 static const TCGTargetOpDef r_A = { .args_ct_str = { "r", "A" } };
1610 static const TCGTargetOpDef R_A = { .args_ct_str = { "R", "A" } };
1611 static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } };
1612 static const TCGTargetOpDef RZ_r = { .args_ct_str = { "RZ", "r" } };
1613 static const TCGTargetOpDef sZ_A = { .args_ct_str = { "sZ", "A" } };
1614 static const TCGTargetOpDef SZ_A = { .args_ct_str = { "SZ", "A" } };
1615 static const TCGTargetOpDef rZ_rJ = { .args_ct_str = { "rZ", "rJ" } };
1616 static const TCGTargetOpDef RZ_RJ = { .args_ct_str = { "RZ", "RJ" } };
1617 static const TCGTargetOpDef R_R_R = { .args_ct_str = { "R", "R", "R" } };
1618 static const TCGTargetOpDef r_rZ_rJ
1619 = { .args_ct_str = { "r", "rZ", "rJ" } };
1620 static const TCGTargetOpDef R_RZ_RJ
1621 = { .args_ct_str = { "R", "RZ", "RJ" } };
1622 static const TCGTargetOpDef r_r_rZ_rJ
1623 = { .args_ct_str = { "r", "r", "rZ", "rJ" } };
1624 static const TCGTargetOpDef movc_32
1625 = { .args_ct_str = { "r", "rZ", "rJ", "rI", "0" } };
1626 static const TCGTargetOpDef movc_64
1627 = { .args_ct_str = { "R", "RZ", "RJ", "RI", "0" } };
1628 static const TCGTargetOpDef add2_32
1629 = { .args_ct_str = { "r", "r", "rZ", "rZ", "rJ", "rJ" } };
1630 static const TCGTargetOpDef add2_64
1631 = { .args_ct_str = { "R", "R", "RZ", "RZ", "RJ", "RI" } };
1632
1633 switch (op) {
1634 case INDEX_op_goto_ptr:
1635 return &r;
1636
1637 case INDEX_op_ld8u_i32:
1638 case INDEX_op_ld8s_i32:
1639 case INDEX_op_ld16u_i32:
1640 case INDEX_op_ld16s_i32:
1641 case INDEX_op_ld_i32:
1642 case INDEX_op_neg_i32:
1643 case INDEX_op_not_i32:
1644 return &r_r;
1645
1646 case INDEX_op_st8_i32:
1647 case INDEX_op_st16_i32:
1648 case INDEX_op_st_i32:
1649 return &rZ_r;
1650
1651 case INDEX_op_add_i32:
1652 case INDEX_op_mul_i32:
1653 case INDEX_op_div_i32:
1654 case INDEX_op_divu_i32:
1655 case INDEX_op_sub_i32:
1656 case INDEX_op_and_i32:
1657 case INDEX_op_andc_i32:
1658 case INDEX_op_or_i32:
1659 case INDEX_op_orc_i32:
1660 case INDEX_op_xor_i32:
1661 case INDEX_op_shl_i32:
1662 case INDEX_op_shr_i32:
1663 case INDEX_op_sar_i32:
1664 case INDEX_op_setcond_i32:
1665 return &r_rZ_rJ;
1666
1667 case INDEX_op_brcond_i32:
1668 return &rZ_rJ;
1669 case INDEX_op_movcond_i32:
1670 return &movc_32;
1671 case INDEX_op_add2_i32:
1672 case INDEX_op_sub2_i32:
1673 return &add2_32;
1674 case INDEX_op_mulu2_i32:
1675 case INDEX_op_muls2_i32:
1676 return &r_r_rZ_rJ;
1677
1678 case INDEX_op_ld8u_i64:
1679 case INDEX_op_ld8s_i64:
1680 case INDEX_op_ld16u_i64:
1681 case INDEX_op_ld16s_i64:
1682 case INDEX_op_ld32u_i64:
1683 case INDEX_op_ld32s_i64:
1684 case INDEX_op_ld_i64:
1685 case INDEX_op_ext_i32_i64:
1686 case INDEX_op_extu_i32_i64:
1687 return &R_r;
1688
1689 case INDEX_op_st8_i64:
1690 case INDEX_op_st16_i64:
1691 case INDEX_op_st32_i64:
1692 case INDEX_op_st_i64:
1693 return &RZ_r;
1694
1695 case INDEX_op_add_i64:
1696 case INDEX_op_mul_i64:
1697 case INDEX_op_div_i64:
1698 case INDEX_op_divu_i64:
1699 case INDEX_op_sub_i64:
1700 case INDEX_op_and_i64:
1701 case INDEX_op_andc_i64:
1702 case INDEX_op_or_i64:
1703 case INDEX_op_orc_i64:
1704 case INDEX_op_xor_i64:
1705 case INDEX_op_shl_i64:
1706 case INDEX_op_shr_i64:
1707 case INDEX_op_sar_i64:
1708 case INDEX_op_setcond_i64:
1709 return &R_RZ_RJ;
1710
1711 case INDEX_op_neg_i64:
1712 case INDEX_op_not_i64:
1713 case INDEX_op_ext32s_i64:
1714 case INDEX_op_ext32u_i64:
1715 return &R_R;
1716
1717 case INDEX_op_extrl_i64_i32:
1718 case INDEX_op_extrh_i64_i32:
1719 return &r_R;
1720
1721 case INDEX_op_brcond_i64:
1722 return &RZ_RJ;
1723 case INDEX_op_movcond_i64:
1724 return &movc_64;
1725 case INDEX_op_add2_i64:
1726 case INDEX_op_sub2_i64:
1727 return &add2_64;
1728 case INDEX_op_muluh_i64:
1729 return &R_R_R;
1730
1731 case INDEX_op_qemu_ld_i32:
1732 return &r_A;
1733 case INDEX_op_qemu_ld_i64:
1734 return &R_A;
1735 case INDEX_op_qemu_st_i32:
1736 return &sZ_A;
1737 case INDEX_op_qemu_st_i64:
1738 return &SZ_A;
1739
1740 default:
1741 return NULL;
1742 }
1743}
1744
1745static void tcg_target_init(TCGContext *s)
1746{
1747
1748
1749#ifndef use_vis3_instructions
1750 {
1751 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1752 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1753 }
1754#endif
1755
1756 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
1757 tcg_target_available_regs[TCG_TYPE_I64] = ALL_64;
1758
1759 tcg_target_call_clobber_regs = 0;
1760 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1761 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1763 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1764 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1765 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1766 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1767 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1768 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1769 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1770 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1771 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1772 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1773 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1774 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1775
1776 s->reserved_regs = 0;
1777 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
1778 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6);
1779 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7);
1780 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
1781 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
1782 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
1783 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1);
1784 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2);
1785}
1786
1787#if SPARC64
1788# define ELF_HOST_MACHINE EM_SPARCV9
1789#else
1790# define ELF_HOST_MACHINE EM_SPARC32PLUS
1791# define ELF_HOST_FLAGS EF_SPARC_32PLUS
1792#endif
1793
1794typedef struct {
1795 DebugFrameHeader h;
1796 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1797 uint8_t fde_win_save;
1798 uint8_t fde_ret_save[3];
1799} DebugFrame;
1800
1801static const DebugFrame debug_frame = {
1802 .h.cie.len = sizeof(DebugFrameCIE)-4,
1803 .h.cie.id = -1,
1804 .h.cie.version = 1,
1805 .h.cie.code_align = 1,
1806 .h.cie.data_align = -sizeof(void *) & 0x7f,
1807 .h.cie.return_column = 15,
1808
1809
1810 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1811
1812 .fde_def_cfa = {
1813#if SPARC64
1814 12, 30,
1815 (2047 & 0x7f) | 0x80, (2047 >> 7)
1816#else
1817 13, 30
1818#endif
1819 },
1820 .fde_win_save = 0x2d,
1821 .fde_ret_save = { 9, 15, 31 },
1822};
1823
1824void tcg_register_jit(void *buf, size_t buf_size)
1825{
1826 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1827}
1828
1829void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1830 uintptr_t addr)
1831{
1832 intptr_t tb_disp = addr - tc_ptr;
1833 intptr_t br_disp = addr - jmp_addr;
1834 tcg_insn_unit i1, i2;
1835
1836
1837
1838 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1839 tcg_debug_assert(br_disp == (int32_t)br_disp);
1840
1841 if (!USE_REG_TB) {
1842 atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2));
1843 flush_icache_range(jmp_addr, jmp_addr + 4);
1844 return;
1845 }
1846
1847
1848
1849
1850 if (check_fit_ptr(tb_disp, 13)) {
1851
1852 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1853 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1854 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1855 | INSN_IMM13(tb_disp));
1856 } else if (tb_disp >= 0) {
1857 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1858 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1859 | INSN_IMM13(tb_disp & 0x3ff));
1860 } else {
1861 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1862 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1863 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1864 }
1865
1866 atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
1867 flush_icache_range(jmp_addr, jmp_addr + 8);
1868}
1869