1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "tcg-be-null.h"
26
27#ifdef CONFIG_DEBUG_TCG
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
62#endif
63
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
70
71
72
73
74
75
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
82
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
86#ifndef CONFIG_SOFTMMU
87# define TCG_GUEST_BASE_REG TCG_REG_I5
88#endif
89
90static const int tcg_target_reg_alloc_order[] = {
91 TCG_REG_L0,
92 TCG_REG_L1,
93 TCG_REG_L2,
94 TCG_REG_L3,
95 TCG_REG_L4,
96 TCG_REG_L5,
97 TCG_REG_L6,
98 TCG_REG_L7,
99
100 TCG_REG_I0,
101 TCG_REG_I1,
102 TCG_REG_I2,
103 TCG_REG_I3,
104 TCG_REG_I4,
105 TCG_REG_I5,
106
107 TCG_REG_G2,
108 TCG_REG_G3,
109 TCG_REG_G4,
110 TCG_REG_G5,
111
112 TCG_REG_O0,
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
116 TCG_REG_O4,
117 TCG_REG_O5,
118};
119
120static const int tcg_target_call_iarg_regs[6] = {
121 TCG_REG_O0,
122 TCG_REG_O1,
123 TCG_REG_O2,
124 TCG_REG_O3,
125 TCG_REG_O4,
126 TCG_REG_O5,
127};
128
129static const int tcg_target_call_oarg_regs[] = {
130 TCG_REG_O0,
131 TCG_REG_O1,
132 TCG_REG_O2,
133 TCG_REG_O3,
134};
135
136#define INSN_OP(x) ((x) << 30)
137#define INSN_OP2(x) ((x) << 22)
138#define INSN_OP3(x) ((x) << 19)
139#define INSN_OPF(x) ((x) << 5)
140#define INSN_RD(x) ((x) << 25)
141#define INSN_RS1(x) ((x) << 14)
142#define INSN_RS2(x) (x)
143#define INSN_ASI(x) ((x) << 5)
144
145#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
146#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
147#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
148#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
149#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
150#define INSN_COND(x) ((x) << 25)
151
152#define COND_N 0x0
153#define COND_E 0x1
154#define COND_LE 0x2
155#define COND_L 0x3
156#define COND_LEU 0x4
157#define COND_CS 0x5
158#define COND_NEG 0x6
159#define COND_VS 0x7
160#define COND_A 0x8
161#define COND_NE 0x9
162#define COND_G 0xa
163#define COND_GE 0xb
164#define COND_GU 0xc
165#define COND_CC 0xd
166#define COND_POS 0xe
167#define COND_VC 0xf
168#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
169
170#define RCOND_Z 1
171#define RCOND_LEZ 2
172#define RCOND_LZ 3
173#define RCOND_NZ 5
174#define RCOND_GZ 6
175#define RCOND_GEZ 7
176
177#define MOVCC_ICC (1 << 18)
178#define MOVCC_XCC (1 << 18 | 1 << 12)
179
180#define BPCC_ICC 0
181#define BPCC_XCC (2 << 20)
182#define BPCC_PT (1 << 19)
183#define BPCC_PN 0
184#define BPCC_A (1 << 29)
185
186#define BPR_PT BPCC_PT
187
188#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
189#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
190#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
191#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
192#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
193#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
194#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
195#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
196#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
197#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
198#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
199#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
200#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
201#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
202#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
203#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
204#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
205#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
206#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
207#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
208#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
209
210#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
211#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
212
213#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
214#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
215#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
216
217#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
218#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
219#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
220
221#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
222#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
223#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
224#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
225#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
226#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
227#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
228#define CALL INSN_OP(1)
229#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
230#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
231#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
232#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
233#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
234#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
235#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
236#define STB (INSN_OP(3) | INSN_OP3(0x05))
237#define STH (INSN_OP(3) | INSN_OP3(0x06))
238#define STW (INSN_OP(3) | INSN_OP3(0x04))
239#define STX (INSN_OP(3) | INSN_OP3(0x0e))
240#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
241#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
242#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
243#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
244#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
245#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
246#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
247#define STBA (INSN_OP(3) | INSN_OP3(0x15))
248#define STHA (INSN_OP(3) | INSN_OP3(0x16))
249#define STWA (INSN_OP(3) | INSN_OP3(0x14))
250#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
251
252#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
253
254#ifndef ASI_PRIMARY_LITTLE
255#define ASI_PRIMARY_LITTLE 0x88
256#endif
257
258#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
259#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
260#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
261#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
262#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
263
264#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
267
268#ifndef use_vis3_instructions
269bool use_vis3_instructions;
270#endif
271
272static inline int check_fit_i64(int64_t val, unsigned int bits)
273{
274 return val == sextract64(val, 0, bits);
275}
276
277static inline int check_fit_i32(int32_t val, unsigned int bits)
278{
279 return val == sextract32(val, 0, bits);
280}
281
282#define check_fit_tl check_fit_i64
283#if SPARC64
284# define check_fit_ptr check_fit_i64
285#else
286# define check_fit_ptr check_fit_i32
287#endif
288
289static void patch_reloc(tcg_insn_unit *code_ptr, int type,
290 intptr_t value, intptr_t addend)
291{
292 uint32_t insn;
293
294 tcg_debug_assert(addend == 0);
295 value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
296
297 switch (type) {
298 case R_SPARC_WDISP16:
299 if (!check_fit_ptr(value >> 2, 16)) {
300 tcg_abort();
301 }
302 insn = *code_ptr;
303 insn &= ~INSN_OFF16(-1);
304 insn |= INSN_OFF16(value);
305 *code_ptr = insn;
306 break;
307 case R_SPARC_WDISP19:
308 if (!check_fit_ptr(value >> 2, 19)) {
309 tcg_abort();
310 }
311 insn = *code_ptr;
312 insn &= ~INSN_OFF19(-1);
313 insn |= INSN_OFF19(value);
314 *code_ptr = insn;
315 break;
316 default:
317 tcg_abort();
318 }
319}
320
321
322static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
323{
324 const char *ct_str;
325
326 ct_str = *pct_str;
327 switch (ct_str[0]) {
328 case 'r':
329 ct->ct |= TCG_CT_REG;
330 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
331 break;
332 case 'R':
333 ct->ct |= TCG_CT_REG;
334 tcg_regset_set32(ct->u.regs, 0, ALL_64);
335 break;
336 case 'A':
337 ct->ct |= TCG_CT_REG;
338 tcg_regset_set32(ct->u.regs, 0,
339 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
340 reserve_helpers:
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
342 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
343 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
344 break;
345 case 's':
346 ct->ct |= TCG_CT_REG;
347 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
348 goto reserve_helpers;
349 case 'S':
350 ct->ct |= TCG_CT_REG;
351 tcg_regset_set32(ct->u.regs, 0, ALL_64);
352 goto reserve_helpers;
353 case 'I':
354 ct->ct |= TCG_CT_CONST_S11;
355 break;
356 case 'J':
357 ct->ct |= TCG_CT_CONST_S13;
358 break;
359 case 'Z':
360 ct->ct |= TCG_CT_CONST_ZERO;
361 break;
362 default:
363 return -1;
364 }
365 ct_str++;
366 *pct_str = ct_str;
367 return 0;
368}
369
370
371static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
372 const TCGArgConstraint *arg_ct)
373{
374 int ct = arg_ct->ct;
375
376 if (ct & TCG_CT_CONST) {
377 return 1;
378 }
379
380 if (type == TCG_TYPE_I32) {
381 val = (int32_t)val;
382 }
383
384 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
385 return 1;
386 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
387 return 1;
388 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
389 return 1;
390 } else {
391 return 0;
392 }
393}
394
395static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
396 TCGReg rs2, int op)
397{
398 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
399}
400
401static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
402 int32_t offset, int op)
403{
404 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
405}
406
407static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
408 int32_t val2, int val2const, int op)
409{
410 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
411 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
412}
413
414static inline void tcg_out_mov(TCGContext *s, TCGType type,
415 TCGReg ret, TCGReg arg)
416{
417 if (ret != arg) {
418 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
419 }
420}
421
422static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
423{
424 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
425}
426
427static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
428{
429 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
430}
431
432static void tcg_out_movi(TCGContext *s, TCGType type,
433 TCGReg ret, tcg_target_long arg)
434{
435 tcg_target_long hi, lo = (int32_t)arg;
436
437
438 if (type == TCG_TYPE_I32) {
439 arg = lo;
440 }
441
442
443 if (check_fit_tl(arg, 13)) {
444 tcg_out_movi_imm13(s, ret, arg);
445 return;
446 }
447
448
449 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
450 tcg_out_sethi(s, ret, arg);
451 if (arg & 0x3ff) {
452 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
453 }
454 return;
455 }
456
457
458 if (arg == lo) {
459 tcg_out_sethi(s, ret, ~arg);
460 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
461 return;
462 }
463
464
465 if (check_fit_i32(lo, 13)) {
466 hi = (arg - lo) >> 32;
467 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
468 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
469 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
470 } else {
471 hi = arg >> 32;
472 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
473 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
474 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
475 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
476 }
477}
478
479static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
480 TCGReg a2, int op)
481{
482 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
483}
484
485static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
486 intptr_t offset, int op)
487{
488 if (check_fit_ptr(offset, 13)) {
489 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
490 INSN_IMM13(offset));
491 } else {
492 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
493 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
494 }
495}
496
497static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
498 TCGReg arg1, intptr_t arg2)
499{
500 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
501}
502
503static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
504 TCGReg arg1, intptr_t arg2)
505{
506 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
507}
508
509static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
510 TCGReg base, intptr_t ofs)
511{
512 if (val == 0) {
513 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
514 return true;
515 }
516 return false;
517}
518
519static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
520{
521 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
522 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
523}
524
525static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
526{
527 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
528}
529
530static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
531{
532 tcg_out32(s, RDY | INSN_RD(rd));
533}
534
535static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
536 int32_t val2, int val2const, int uns)
537{
538
539 if (uns) {
540 tcg_out_sety(s, TCG_REG_G0);
541 } else {
542 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
543 tcg_out_sety(s, TCG_REG_T1);
544 }
545
546 tcg_out_arithc(s, rd, rs1, val2, val2const,
547 uns ? ARITH_UDIV : ARITH_SDIV);
548}
549
550static inline void tcg_out_nop(TCGContext *s)
551{
552 tcg_out_sethi(s, TCG_REG_G0, 0);
553}
554
555static const uint8_t tcg_cond_to_bcond[] = {
556 [TCG_COND_EQ] = COND_E,
557 [TCG_COND_NE] = COND_NE,
558 [TCG_COND_LT] = COND_L,
559 [TCG_COND_GE] = COND_GE,
560 [TCG_COND_LE] = COND_LE,
561 [TCG_COND_GT] = COND_G,
562 [TCG_COND_LTU] = COND_CS,
563 [TCG_COND_GEU] = COND_CC,
564 [TCG_COND_LEU] = COND_LEU,
565 [TCG_COND_GTU] = COND_GU,
566};
567
568static const uint8_t tcg_cond_to_rcond[] = {
569 [TCG_COND_EQ] = RCOND_Z,
570 [TCG_COND_NE] = RCOND_NZ,
571 [TCG_COND_LT] = RCOND_LZ,
572 [TCG_COND_GT] = RCOND_GZ,
573 [TCG_COND_LE] = RCOND_LEZ,
574 [TCG_COND_GE] = RCOND_GEZ
575};
576
577static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
578{
579 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
580}
581
582static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
583{
584 int off19;
585
586 if (l->has_value) {
587 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
588 } else {
589
590 off19 = *s->code_ptr & INSN_OFF19(-1);
591 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
592 }
593 tcg_out_bpcc0(s, scond, flags, off19);
594}
595
596static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
597{
598 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
599}
600
601static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
602 int32_t arg2, int const_arg2, TCGLabel *l)
603{
604 tcg_out_cmp(s, arg1, arg2, const_arg2);
605 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
606 tcg_out_nop(s);
607}
608
609static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
610 int32_t v1, int v1const)
611{
612 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
613 | INSN_RS1(tcg_cond_to_bcond[cond])
614 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
615}
616
617static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
618 TCGReg c1, int32_t c2, int c2const,
619 int32_t v1, int v1const)
620{
621 tcg_out_cmp(s, c1, c2, c2const);
622 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
623}
624
625static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
626 int32_t arg2, int const_arg2, TCGLabel *l)
627{
628
629 if (arg2 == 0 && !is_unsigned_cond(cond)) {
630 int off16;
631
632 if (l->has_value) {
633 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
634 } else {
635
636 off16 = *s->code_ptr & INSN_OFF16(-1);
637 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
638 }
639 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
640 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
641 } else {
642 tcg_out_cmp(s, arg1, arg2, const_arg2);
643 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
644 }
645 tcg_out_nop(s);
646}
647
648static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
649 int32_t v1, int v1const)
650{
651 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
652 | (tcg_cond_to_rcond[cond] << 10)
653 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
654}
655
656static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
657 TCGReg c1, int32_t c2, int c2const,
658 int32_t v1, int v1const)
659{
660
661
662
663 if (c2 == 0 && !is_unsigned_cond(cond)
664 && (!v1const || check_fit_i32(v1, 10))) {
665 tcg_out_movr(s, cond, ret, c1, v1, v1const);
666 } else {
667 tcg_out_cmp(s, c1, c2, c2const);
668 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
669 }
670}
671
672static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
673 TCGReg c1, int32_t c2, int c2const)
674{
675
676 switch (cond) {
677 case TCG_COND_LTU:
678 case TCG_COND_GEU:
679
680 break;
681
682 case TCG_COND_EQ:
683 case TCG_COND_NE:
684
685 if (c2 != 0) {
686 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
687 c2 = TCG_REG_T1;
688 } else {
689 c2 = c1;
690 }
691 c1 = TCG_REG_G0, c2const = 0;
692 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
693 break;
694
695 case TCG_COND_GTU:
696 case TCG_COND_LEU:
697
698
699
700 if (!c2const || c2 == 0) {
701 TCGReg t = c1;
702 c1 = c2;
703 c2 = t;
704 c2const = 0;
705 cond = tcg_swap_cond(cond);
706 break;
707 }
708
709
710 default:
711 tcg_out_cmp(s, c1, c2, c2const);
712 tcg_out_movi_imm13(s, ret, 0);
713 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
714 return;
715 }
716
717 tcg_out_cmp(s, c1, c2, c2const);
718 if (cond == TCG_COND_LTU) {
719 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
720 } else {
721 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
722 }
723}
724
725static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
726 TCGReg c1, int32_t c2, int c2const)
727{
728 if (use_vis3_instructions) {
729 switch (cond) {
730 case TCG_COND_NE:
731 if (c2 != 0) {
732 break;
733 }
734 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
735
736 case TCG_COND_LTU:
737 tcg_out_cmp(s, c1, c2, c2const);
738 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
739 return;
740 default:
741 break;
742 }
743 }
744
745
746
747 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
748 tcg_out_movi_imm13(s, ret, 0);
749 tcg_out_movr(s, cond, ret, c1, 1, 1);
750 } else {
751 tcg_out_cmp(s, c1, c2, c2const);
752 tcg_out_movi_imm13(s, ret, 0);
753 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
754 }
755}
756
757static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
758 TCGReg al, TCGReg ah, int32_t bl, int blconst,
759 int32_t bh, int bhconst, int opl, int oph)
760{
761 TCGReg tmp = TCG_REG_T1;
762
763
764 if (rl != ah && (bhconst || rl != bh)) {
765 tmp = rl;
766 }
767
768 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
769 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
770 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
771}
772
773static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
774 TCGReg al, TCGReg ah, int32_t bl, int blconst,
775 int32_t bh, int bhconst, bool is_sub)
776{
777 TCGReg tmp = TCG_REG_T1;
778
779
780 if (rl != ah && (bhconst || rl != bh)) {
781 tmp = rl;
782 }
783
784 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
785
786 if (use_vis3_instructions && !is_sub) {
787
788 if (bhconst && bh != 0) {
789 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
790 bh = TCG_REG_T2;
791 }
792 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
793 } else if (bh == TCG_REG_G0) {
794
795
796 if (rh == ah) {
797 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
798 is_sub ? ARITH_SUB : ARITH_ADD);
799 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
800 } else {
801 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
802 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
803 }
804 } else {
805
806 if (bhconst) {
807 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
808 } else {
809 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
810 is_sub ? ARITH_SUB : ARITH_ADD);
811 }
812
813 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
814
815 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
816 }
817
818 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
819}
820
821static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
822{
823 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
824
825 if (disp == (int32_t)disp) {
826 tcg_out32(s, CALL | (uint32_t)disp >> 2);
827 } else {
828 uintptr_t desti = (uintptr_t)dest;
829 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff);
830 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
831 }
832}
833
834static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
835{
836 tcg_out_call_nodelay(s, dest);
837 tcg_out_nop(s);
838}
839
840static void tcg_out_mb(TCGContext *s, TCGArg a0)
841{
842
843 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
844}
845
846#ifdef CONFIG_SOFTMMU
847static tcg_insn_unit *qemu_ld_trampoline[16];
848static tcg_insn_unit *qemu_st_trampoline[16];
849
850static void build_trampolines(TCGContext *s)
851{
852 static void * const qemu_ld_helpers[16] = {
853 [MO_UB] = helper_ret_ldub_mmu,
854 [MO_SB] = helper_ret_ldsb_mmu,
855 [MO_LEUW] = helper_le_lduw_mmu,
856 [MO_LESW] = helper_le_ldsw_mmu,
857 [MO_LEUL] = helper_le_ldul_mmu,
858 [MO_LEQ] = helper_le_ldq_mmu,
859 [MO_BEUW] = helper_be_lduw_mmu,
860 [MO_BESW] = helper_be_ldsw_mmu,
861 [MO_BEUL] = helper_be_ldul_mmu,
862 [MO_BEQ] = helper_be_ldq_mmu,
863 };
864 static void * const qemu_st_helpers[16] = {
865 [MO_UB] = helper_ret_stb_mmu,
866 [MO_LEUW] = helper_le_stw_mmu,
867 [MO_LEUL] = helper_le_stl_mmu,
868 [MO_LEQ] = helper_le_stq_mmu,
869 [MO_BEUW] = helper_be_stw_mmu,
870 [MO_BEUL] = helper_be_stl_mmu,
871 [MO_BEQ] = helper_be_stq_mmu,
872 };
873
874 int i;
875 TCGReg ra;
876
877 for (i = 0; i < 16; ++i) {
878 if (qemu_ld_helpers[i] == NULL) {
879 continue;
880 }
881
882
883 while ((uintptr_t)s->code_ptr & 15) {
884 tcg_out_nop(s);
885 }
886 qemu_ld_trampoline[i] = s->code_ptr;
887
888 if (SPARC64 || TARGET_LONG_BITS == 32) {
889 ra = TCG_REG_O3;
890 } else {
891
892 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
893 ra = TCG_REG_O4;
894 }
895
896
897 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
898
899 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
900
901 tcg_out_call_nodelay(s, qemu_ld_helpers[i]);
902 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
903 }
904
905 for (i = 0; i < 16; ++i) {
906 if (qemu_st_helpers[i] == NULL) {
907 continue;
908 }
909
910
911 while ((uintptr_t)s->code_ptr & 15) {
912 tcg_out_nop(s);
913 }
914 qemu_st_trampoline[i] = s->code_ptr;
915
916 if (SPARC64) {
917 ra = TCG_REG_O4;
918 } else {
919 ra = TCG_REG_O1;
920 if (TARGET_LONG_BITS == 64) {
921
922 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
923 ra += 2;
924 } else {
925 ra += 1;
926 }
927 if ((i & MO_SIZE) == MO_64) {
928
929 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
930 ra += 2;
931 } else {
932 ra += 1;
933 }
934
935 ra += 1;
936 }
937
938
939 if (ra >= TCG_REG_O6) {
940 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
941 TCG_TARGET_CALL_STACK_OFFSET);
942 ra = TCG_REG_G1;
943 }
944 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
945
946 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
947
948 tcg_out_call_nodelay(s, qemu_st_helpers[i]);
949 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
950 }
951}
952#endif
953
954
955static void tcg_target_qemu_prologue(TCGContext *s)
956{
957 int tmp_buf_size, frame_size;
958
959
960
961 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
962 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
963 tmp_buf_size);
964
965
966
967 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
968 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
969 frame_size += TCG_TARGET_STACK_ALIGN - 1;
970 frame_size &= -TCG_TARGET_STACK_ALIGN;
971 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
972 INSN_IMM13(-frame_size));
973
974#ifndef CONFIG_SOFTMMU
975 if (guest_base != 0) {
976 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
977 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
978 }
979#endif
980
981 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
982
983 tcg_out_nop(s);
984
985
986
987#ifdef CONFIG_SOFTMMU
988 build_trampolines(s);
989#endif
990}
991
992#if defined(CONFIG_SOFTMMU)
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1007 TCGMemOp opc, int which)
1008{
1009 const TCGReg r0 = TCG_REG_O0;
1010 const TCGReg r1 = TCG_REG_O1;
1011 const TCGReg r2 = TCG_REG_O2;
1012 unsigned s_bits = opc & MO_SIZE;
1013 unsigned a_bits = get_alignment_bits(opc);
1014 int tlb_ofs;
1015
1016
1017 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
1018
1019
1020
1021 if (a_bits < s_bits) {
1022 a_bits = s_bits;
1023 }
1024 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
1025 TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1026
1027
1028 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
1029
1030
1031 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
1032
1033
1034 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
1035
1036
1037 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
1038
1039
1040 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
1041 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
1042 if (tlb_ofs & ~0x3ff) {
1043 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
1044 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
1045 }
1046 tlb_ofs &= 0x3ff;
1047 }
1048
1049
1050 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
1051 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
1052
1053
1054 tcg_out_cmp(s, r0, r2, 0);
1055
1056
1057 if (SPARC64 && TARGET_LONG_BITS == 32) {
1058 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1059 return r0;
1060 }
1061 return addr;
1062}
1063#endif
1064
1065static const int qemu_ld_opc[16] = {
1066 [MO_UB] = LDUB,
1067 [MO_SB] = LDSB,
1068
1069 [MO_BEUW] = LDUH,
1070 [MO_BESW] = LDSH,
1071 [MO_BEUL] = LDUW,
1072 [MO_BESL] = LDSW,
1073 [MO_BEQ] = LDX,
1074
1075 [MO_LEUW] = LDUH_LE,
1076 [MO_LESW] = LDSH_LE,
1077 [MO_LEUL] = LDUW_LE,
1078 [MO_LESL] = LDSW_LE,
1079 [MO_LEQ] = LDX_LE,
1080};
1081
1082static const int qemu_st_opc[16] = {
1083 [MO_UB] = STB,
1084
1085 [MO_BEUW] = STH,
1086 [MO_BEUL] = STW,
1087 [MO_BEQ] = STX,
1088
1089 [MO_LEUW] = STH_LE,
1090 [MO_LEUL] = STW_LE,
1091 [MO_LEQ] = STX_LE,
1092};
1093
1094static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1095 TCGMemOpIdx oi, bool is_64)
1096{
1097 TCGMemOp memop = get_memop(oi);
1098#ifdef CONFIG_SOFTMMU
1099 unsigned memi = get_mmuidx(oi);
1100 TCGReg addrz, param;
1101 tcg_insn_unit *func;
1102 tcg_insn_unit *label_ptr;
1103
1104 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1105 offsetof(CPUTLBEntry, addr_read));
1106
1107
1108
1109
1110
1111
1112 label_ptr = s->code_ptr;
1113 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1114 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1115
1116 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1117 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1118
1119
1120
1121 param = TCG_REG_O1;
1122 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1123
1124 param++;
1125 }
1126 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1127
1128
1129
1130 if ((memop & MO_SSIZE) == MO_SL) {
1131 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1132 } else {
1133 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1134 }
1135 tcg_debug_assert(func != NULL);
1136 tcg_out_call_nodelay(s, func);
1137
1138 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1139
1140
1141
1142 if (SPARC64) {
1143
1144 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1145 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1146 } else {
1147 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1148 }
1149 } else {
1150 if ((memop & MO_SIZE) == MO_64) {
1151 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1152 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1153 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1154 } else if (is_64) {
1155
1156
1157 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1158 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1159 } else {
1160 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1161 }
1162 }
1163
1164 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1165#else
1166 if (SPARC64 && TARGET_LONG_BITS == 32) {
1167 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1168 addr = TCG_REG_T1;
1169 }
1170 tcg_out_ldst_rr(s, data, addr,
1171 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1172 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1173#endif
1174}
1175
1176static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1177 TCGMemOpIdx oi)
1178{
1179 TCGMemOp memop = get_memop(oi);
1180#ifdef CONFIG_SOFTMMU
1181 unsigned memi = get_mmuidx(oi);
1182 TCGReg addrz, param;
1183 tcg_insn_unit *func;
1184 tcg_insn_unit *label_ptr;
1185
1186 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1187 offsetof(CPUTLBEntry, addr_write));
1188
1189
1190
1191
1192 label_ptr = s->code_ptr;
1193 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1194 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1195
1196 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1197 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1198
1199
1200
1201 param = TCG_REG_O1;
1202 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1203
1204 param++;
1205 }
1206 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1207 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1208
1209 param++;
1210 }
1211 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1212
1213 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1214 tcg_debug_assert(func != NULL);
1215 tcg_out_call_nodelay(s, func);
1216
1217 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1218
1219 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1220#else
1221 if (SPARC64 && TARGET_LONG_BITS == 32) {
1222 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1223 addr = TCG_REG_T1;
1224 }
1225 tcg_out_ldst_rr(s, data, addr,
1226 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1227 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1228#endif
1229}
1230
1231static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1232 const TCGArg args[TCG_MAX_OP_ARGS],
1233 const int const_args[TCG_MAX_OP_ARGS])
1234{
1235 TCGArg a0, a1, a2;
1236 int c, c2;
1237
1238
1239 a0 = args[0];
1240 a1 = args[1];
1241 a2 = args[2];
1242 c2 = const_args[2];
1243
1244 switch (opc) {
1245 case INDEX_op_exit_tb:
1246 if (check_fit_ptr(a0, 13)) {
1247 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1248 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1249 } else {
1250 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1251 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1252 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1253 }
1254 break;
1255 case INDEX_op_goto_tb:
1256 if (s->tb_jmp_insn_offset) {
1257
1258 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1259
1260 tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
1261 } else {
1262
1263 tcg_out_ld_ptr(s, TCG_REG_T1,
1264 (uintptr_t)(s->tb_jmp_target_addr + a0));
1265 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
1266 }
1267 tcg_out_nop(s);
1268 s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
1269 break;
1270 case INDEX_op_br:
1271 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1272 tcg_out_nop(s);
1273 break;
1274
1275#define OP_32_64(x) \
1276 glue(glue(case INDEX_op_, x), _i32): \
1277 glue(glue(case INDEX_op_, x), _i64)
1278
1279 OP_32_64(ld8u):
1280 tcg_out_ldst(s, a0, a1, a2, LDUB);
1281 break;
1282 OP_32_64(ld8s):
1283 tcg_out_ldst(s, a0, a1, a2, LDSB);
1284 break;
1285 OP_32_64(ld16u):
1286 tcg_out_ldst(s, a0, a1, a2, LDUH);
1287 break;
1288 OP_32_64(ld16s):
1289 tcg_out_ldst(s, a0, a1, a2, LDSH);
1290 break;
1291 case INDEX_op_ld_i32:
1292 case INDEX_op_ld32u_i64:
1293 tcg_out_ldst(s, a0, a1, a2, LDUW);
1294 break;
1295 OP_32_64(st8):
1296 tcg_out_ldst(s, a0, a1, a2, STB);
1297 break;
1298 OP_32_64(st16):
1299 tcg_out_ldst(s, a0, a1, a2, STH);
1300 break;
1301 case INDEX_op_st_i32:
1302 case INDEX_op_st32_i64:
1303 tcg_out_ldst(s, a0, a1, a2, STW);
1304 break;
1305 OP_32_64(add):
1306 c = ARITH_ADD;
1307 goto gen_arith;
1308 OP_32_64(sub):
1309 c = ARITH_SUB;
1310 goto gen_arith;
1311 OP_32_64(and):
1312 c = ARITH_AND;
1313 goto gen_arith;
1314 OP_32_64(andc):
1315 c = ARITH_ANDN;
1316 goto gen_arith;
1317 OP_32_64(or):
1318 c = ARITH_OR;
1319 goto gen_arith;
1320 OP_32_64(orc):
1321 c = ARITH_ORN;
1322 goto gen_arith;
1323 OP_32_64(xor):
1324 c = ARITH_XOR;
1325 goto gen_arith;
1326 case INDEX_op_shl_i32:
1327 c = SHIFT_SLL;
1328 do_shift32:
1329
1330 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1331 break;
1332 case INDEX_op_shr_i32:
1333 c = SHIFT_SRL;
1334 goto do_shift32;
1335 case INDEX_op_sar_i32:
1336 c = SHIFT_SRA;
1337 goto do_shift32;
1338 case INDEX_op_mul_i32:
1339 c = ARITH_UMUL;
1340 goto gen_arith;
1341
1342 OP_32_64(neg):
1343 c = ARITH_SUB;
1344 goto gen_arith1;
1345 OP_32_64(not):
1346 c = ARITH_ORN;
1347 goto gen_arith1;
1348
1349 case INDEX_op_div_i32:
1350 tcg_out_div32(s, a0, a1, a2, c2, 0);
1351 break;
1352 case INDEX_op_divu_i32:
1353 tcg_out_div32(s, a0, a1, a2, c2, 1);
1354 break;
1355
1356 case INDEX_op_brcond_i32:
1357 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1358 break;
1359 case INDEX_op_setcond_i32:
1360 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1361 break;
1362 case INDEX_op_movcond_i32:
1363 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1364 break;
1365
1366 case INDEX_op_add2_i32:
1367 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1368 args[4], const_args[4], args[5], const_args[5],
1369 ARITH_ADDCC, ARITH_ADDC);
1370 break;
1371 case INDEX_op_sub2_i32:
1372 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1373 args[4], const_args[4], args[5], const_args[5],
1374 ARITH_SUBCC, ARITH_SUBC);
1375 break;
1376 case INDEX_op_mulu2_i32:
1377 c = ARITH_UMUL;
1378 goto do_mul2;
1379 case INDEX_op_muls2_i32:
1380 c = ARITH_SMUL;
1381 do_mul2:
1382
1383
1384 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1385 if (SPARC64 || a0 <= TCG_REG_O7) {
1386 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1387 } else {
1388 tcg_out_rdy(s, a1);
1389 }
1390 break;
1391
1392 case INDEX_op_qemu_ld_i32:
1393 tcg_out_qemu_ld(s, a0, a1, a2, false);
1394 break;
1395 case INDEX_op_qemu_ld_i64:
1396 tcg_out_qemu_ld(s, a0, a1, a2, true);
1397 break;
1398 case INDEX_op_qemu_st_i32:
1399 case INDEX_op_qemu_st_i64:
1400 tcg_out_qemu_st(s, a0, a1, a2);
1401 break;
1402
1403 case INDEX_op_ld32s_i64:
1404 tcg_out_ldst(s, a0, a1, a2, LDSW);
1405 break;
1406 case INDEX_op_ld_i64:
1407 tcg_out_ldst(s, a0, a1, a2, LDX);
1408 break;
1409 case INDEX_op_st_i64:
1410 tcg_out_ldst(s, a0, a1, a2, STX);
1411 break;
1412 case INDEX_op_shl_i64:
1413 c = SHIFT_SLLX;
1414 do_shift64:
1415
1416 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1417 break;
1418 case INDEX_op_shr_i64:
1419 c = SHIFT_SRLX;
1420 goto do_shift64;
1421 case INDEX_op_sar_i64:
1422 c = SHIFT_SRAX;
1423 goto do_shift64;
1424 case INDEX_op_mul_i64:
1425 c = ARITH_MULX;
1426 goto gen_arith;
1427 case INDEX_op_div_i64:
1428 c = ARITH_SDIVX;
1429 goto gen_arith;
1430 case INDEX_op_divu_i64:
1431 c = ARITH_UDIVX;
1432 goto gen_arith;
1433 case INDEX_op_ext_i32_i64:
1434 case INDEX_op_ext32s_i64:
1435 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1436 break;
1437 case INDEX_op_extu_i32_i64:
1438 case INDEX_op_ext32u_i64:
1439 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1440 break;
1441 case INDEX_op_extrl_i64_i32:
1442 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1443 break;
1444 case INDEX_op_extrh_i64_i32:
1445 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1446 break;
1447
1448 case INDEX_op_brcond_i64:
1449 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1450 break;
1451 case INDEX_op_setcond_i64:
1452 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1453 break;
1454 case INDEX_op_movcond_i64:
1455 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1456 break;
1457 case INDEX_op_add2_i64:
1458 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1459 const_args[4], args[5], const_args[5], false);
1460 break;
1461 case INDEX_op_sub2_i64:
1462 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1463 const_args[4], args[5], const_args[5], true);
1464 break;
1465 case INDEX_op_muluh_i64:
1466 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1467 break;
1468
1469 gen_arith:
1470 tcg_out_arithc(s, a0, a1, a2, c2, c);
1471 break;
1472
1473 gen_arith1:
1474 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1475 break;
1476
1477 case INDEX_op_mb:
1478 tcg_out_mb(s, a0);
1479 break;
1480
1481 case INDEX_op_mov_i32:
1482 case INDEX_op_mov_i64:
1483 case INDEX_op_movi_i32:
1484 case INDEX_op_movi_i64:
1485 case INDEX_op_call:
1486 default:
1487 tcg_abort();
1488 }
1489}
1490
1491static const TCGTargetOpDef sparc_op_defs[] = {
1492 { INDEX_op_exit_tb, { } },
1493 { INDEX_op_goto_tb, { } },
1494 { INDEX_op_br, { } },
1495
1496 { INDEX_op_ld8u_i32, { "r", "r" } },
1497 { INDEX_op_ld8s_i32, { "r", "r" } },
1498 { INDEX_op_ld16u_i32, { "r", "r" } },
1499 { INDEX_op_ld16s_i32, { "r", "r" } },
1500 { INDEX_op_ld_i32, { "r", "r" } },
1501 { INDEX_op_st8_i32, { "rZ", "r" } },
1502 { INDEX_op_st16_i32, { "rZ", "r" } },
1503 { INDEX_op_st_i32, { "rZ", "r" } },
1504
1505 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1506 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1507 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1508 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1509 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1510 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1511 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1512 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1513 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1514 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1515
1516 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1517 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1518 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1519
1520 { INDEX_op_neg_i32, { "r", "rJ" } },
1521 { INDEX_op_not_i32, { "r", "rJ" } },
1522
1523 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1524 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1525 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1526
1527 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1528 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1529 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1530 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
1531
1532 { INDEX_op_ld8u_i64, { "R", "r" } },
1533 { INDEX_op_ld8s_i64, { "R", "r" } },
1534 { INDEX_op_ld16u_i64, { "R", "r" } },
1535 { INDEX_op_ld16s_i64, { "R", "r" } },
1536 { INDEX_op_ld32u_i64, { "R", "r" } },
1537 { INDEX_op_ld32s_i64, { "R", "r" } },
1538 { INDEX_op_ld_i64, { "R", "r" } },
1539 { INDEX_op_st8_i64, { "RZ", "r" } },
1540 { INDEX_op_st16_i64, { "RZ", "r" } },
1541 { INDEX_op_st32_i64, { "RZ", "r" } },
1542 { INDEX_op_st_i64, { "RZ", "r" } },
1543
1544 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1545 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1546 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1547 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1548 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1549 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1550 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1551 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1552 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1553 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1554
1555 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1556 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1557 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1558
1559 { INDEX_op_neg_i64, { "R", "RJ" } },
1560 { INDEX_op_not_i64, { "R", "RJ" } },
1561
1562 { INDEX_op_ext32s_i64, { "R", "R" } },
1563 { INDEX_op_ext32u_i64, { "R", "R" } },
1564 { INDEX_op_ext_i32_i64, { "R", "r" } },
1565 { INDEX_op_extu_i32_i64, { "R", "r" } },
1566 { INDEX_op_extrl_i64_i32, { "r", "R" } },
1567 { INDEX_op_extrh_i64_i32, { "r", "R" } },
1568
1569 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1570 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1571 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1572
1573 { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1574 { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1575 { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } },
1576
1577 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1578 { INDEX_op_qemu_ld_i64, { "R", "A" } },
1579 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1580 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
1581
1582 { INDEX_op_mb, { } },
1583 { -1 },
1584};
1585
1586static void tcg_target_init(TCGContext *s)
1587{
1588
1589
1590#ifndef use_vis3_instructions
1591 {
1592 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1593 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1594 }
1595#endif
1596
1597 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1598 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1599
1600 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1601 (1 << TCG_REG_G1) |
1602 (1 << TCG_REG_G2) |
1603 (1 << TCG_REG_G3) |
1604 (1 << TCG_REG_G4) |
1605 (1 << TCG_REG_G5) |
1606 (1 << TCG_REG_G6) |
1607 (1 << TCG_REG_G7) |
1608 (1 << TCG_REG_O0) |
1609 (1 << TCG_REG_O1) |
1610 (1 << TCG_REG_O2) |
1611 (1 << TCG_REG_O3) |
1612 (1 << TCG_REG_O4) |
1613 (1 << TCG_REG_O5) |
1614 (1 << TCG_REG_O7));
1615
1616 tcg_regset_clear(s->reserved_regs);
1617 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
1618 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6);
1619 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7);
1620 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
1621 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
1622 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
1623 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1);
1624 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2);
1625
1626 tcg_add_target_add_op_defs(sparc_op_defs);
1627}
1628
1629#if SPARC64
1630# define ELF_HOST_MACHINE EM_SPARCV9
1631#else
1632# define ELF_HOST_MACHINE EM_SPARC32PLUS
1633# define ELF_HOST_FLAGS EF_SPARC_32PLUS
1634#endif
1635
1636typedef struct {
1637 DebugFrameHeader h;
1638 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1639 uint8_t fde_win_save;
1640 uint8_t fde_ret_save[3];
1641} DebugFrame;
1642
1643static const DebugFrame debug_frame = {
1644 .h.cie.len = sizeof(DebugFrameCIE)-4,
1645 .h.cie.id = -1,
1646 .h.cie.version = 1,
1647 .h.cie.code_align = 1,
1648 .h.cie.data_align = -sizeof(void *) & 0x7f,
1649 .h.cie.return_column = 15,
1650
1651
1652 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1653
1654 .fde_def_cfa = {
1655#if SPARC64
1656 12, 30,
1657 (2047 & 0x7f) | 0x80, (2047 >> 7)
1658#else
1659 13, 30
1660#endif
1661 },
1662 .fde_win_save = 0x2d,
1663 .fde_ret_save = { 9, 15, 31 },
1664};
1665
1666void tcg_register_jit(void *buf, size_t buf_size)
1667{
1668 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1669}
1670
1671void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1672{
1673 uint32_t *ptr = (uint32_t *)jmp_addr;
1674 uintptr_t disp = addr - jmp_addr;
1675
1676
1677
1678 tcg_debug_assert(disp == (int32_t)disp);
1679
1680 atomic_set(ptr, deposit32(CALL, 0, 30, disp >> 2));
1681 flush_icache_range(jmp_addr, jmp_addr + 4);
1682}
1683