2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
60 static const int tcg_target_reg_alloc_order[] = {
76 static const int tcg_target_call_iarg_regs[6] = {
85 static const int tcg_target_call_oarg_regs[2] = {
90 static void patch_reloc(uint8_t *code_ptr, int type,
91 tcg_target_long value, tcg_target_long addend)
96 if (value != (uint32_t)value)
98 *(uint32_t *)code_ptr = value;
105 /* maximum number of register used for input function arguments */
106 static inline int tcg_target_get_call_iarg_regs_count(int flags)
111 /* parse target specific constraints */
112 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
119 case 'L': /* qemu_ld/st constraint */
120 ct->ct |= TCG_CT_REG;
121 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
124 ct->ct |= TCG_CT_CONST_S11;
127 ct->ct |= TCG_CT_CONST_S13;
137 #define ABS(x) ((x) < 0? -(x) : (x))
138 /* test if a constant matches the constraint */
139 static inline int tcg_target_const_match(tcg_target_long val,
140 const TCGArgConstraint *arg_ct)
145 if (ct & TCG_CT_CONST)
147 else if ((ct & TCG_CT_CONST_S11) && ABS(val) == (ABS(val) & 0x3ff))
149 else if ((ct & TCG_CT_CONST_S13) && ABS(val) == (ABS(val) & 0xfff))
155 #define INSN_OP(x) ((x) << 30)
156 #define INSN_OP2(x) ((x) << 22)
157 #define INSN_OP3(x) ((x) << 19)
158 #define INSN_OPF(x) ((x) << 5)
159 #define INSN_RD(x) ((x) << 25)
160 #define INSN_RS1(x) ((x) << 14)
161 #define INSN_RS2(x) (x)
163 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
164 #define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
166 #define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
168 #define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
170 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
171 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
172 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
173 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
174 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x08))
175 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
176 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
177 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
178 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
179 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
180 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
181 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
182 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
184 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
185 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
186 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
188 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
189 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
190 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
192 #define WRY (INSN_OP(2) | INSN_OP3(0x30))
193 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
194 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
195 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
196 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
197 #define CALL INSN_OP(1)
198 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
199 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
200 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
201 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
202 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
203 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
204 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
205 #define STB (INSN_OP(3) | INSN_OP3(0x05))
206 #define STH (INSN_OP(3) | INSN_OP3(0x06))
207 #define STW (INSN_OP(3) | INSN_OP3(0x04))
208 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
210 static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
212 tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(arg) |
213 INSN_RS2(TCG_REG_G0));
216 static inline void tcg_out_movi(TCGContext *s, TCGType type,
217 int ret, tcg_target_long arg)
219 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
220 if (arg != (arg & 0xffffffff))
221 fprintf(stderr, "unimplemented %s with constant %ld\n", __func__, arg);
223 if (arg == (arg & 0xfff))
224 tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(TCG_REG_G0) |
227 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
229 tcg_out32(s, ARITH_OR | INSN_RD(ret) | INSN_RS1(ret) |
230 INSN_IMM13(arg & 0x3ff));
234 static inline void tcg_out_ld_raw(TCGContext *s, int ret,
237 tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
238 tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
239 INSN_IMM13(arg & 0x3ff));
242 static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
245 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
246 if (arg != (arg & 0xffffffff))
247 fprintf(stderr, "unimplemented %s with offset %ld\n", __func__, arg);
248 if (arg != (arg & 0xfff))
249 tcg_out32(s, SETHI | INSN_RD(ret) | (((uint32_t)arg & 0xfffffc00) >> 10));
250 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
251 INSN_IMM13(arg & 0x3ff));
253 tcg_out_ld_raw(s, ret, arg);
257 static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
259 if (offset == (offset & 0xfff))
260 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
263 fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
266 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
267 int arg1, tcg_target_long arg2)
269 if (type == TCG_TYPE_I32)
270 tcg_out_ldst(s, ret, arg1, arg2, LDUW);
272 tcg_out_ldst(s, ret, arg1, arg2, LDX);
275 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
276 int arg1, tcg_target_long arg2)
278 if (type == TCG_TYPE_I32)
279 tcg_out_ldst(s, arg, arg1, arg2, STW);
281 tcg_out_ldst(s, arg, arg1, arg2, STX);
284 static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
287 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
291 static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1, int offset,
294 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
298 static inline void tcg_out_sety(TCGContext *s, tcg_target_long val)
300 if (val == 0 || val == -1)
301 tcg_out32(s, WRY | INSN_IMM13(val));
303 fprintf(stderr, "unimplemented sety %ld\n", (long)val);
306 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
309 if (val == (val & 0xfff))
310 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
312 fprintf(stderr, "unimplemented addi %ld\n", (long)val);
316 static inline void tcg_out_nop(TCGContext *s)
318 tcg_out32(s, SETHI | INSN_RD(TCG_REG_G0) | 0);
321 /* Generate global QEMU prologue and epilogue code */
322 void tcg_target_qemu_prologue(TCGContext *s)
324 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
325 INSN_IMM13(-TCG_TARGET_STACK_MINFRAME));
326 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_O0) |
327 INSN_RS2(TCG_REG_G0));
331 static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
332 const int *const_args)
337 case INDEX_op_exit_tb:
338 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
339 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
341 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
342 INSN_RS2(TCG_REG_G0));
344 case INDEX_op_goto_tb:
345 if (s->tb_jmp_offset) {
346 /* direct jump method */
347 if (ABS(args[0] - (unsigned long)s->code_ptr) ==
348 (ABS(args[0] - (unsigned long)s->code_ptr) & 0x1fffff)) {
350 INSN_OFF22(args[0] - (unsigned long)s->code_ptr));
352 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, args[0]);
353 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
354 INSN_RS2(TCG_REG_G0));
356 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
358 /* indirect jump method */
359 tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
360 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
361 INSN_RS2(TCG_REG_G0));
364 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
368 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
369 - (tcg_target_ulong)s->code_ptr) >> 2)
373 tcg_out_ld_ptr(s, TCG_REG_O7, (tcg_target_long)(s->tb_next + args[0]));
374 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_O7) |
375 INSN_RS2(TCG_REG_G0));
380 fprintf(stderr, "unimplemented jmp\n");
383 fprintf(stderr, "unimplemented br\n");
385 case INDEX_op_movi_i32:
386 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
389 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
390 #define OP_32_64(x) \
391 glue(glue(case INDEX_op_, x), _i32:) \
392 glue(glue(case INDEX_op_, x), _i64:)
394 #define OP_32_64(x) \
395 glue(glue(case INDEX_op_, x), _i32:)
398 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
401 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
404 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
407 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
409 case INDEX_op_ld_i32:
410 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
411 case INDEX_op_ld32u_i64:
413 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
416 tcg_out_ldst(s, args[0], args[1], args[2], STB);
419 tcg_out_ldst(s, args[0], args[1], args[2], STH);
421 case INDEX_op_st_i32:
422 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
423 case INDEX_op_st32_i64:
425 tcg_out_ldst(s, args[0], args[1], args[2], STW);
442 case INDEX_op_shl_i32:
445 case INDEX_op_shr_i32:
448 case INDEX_op_sar_i32:
451 case INDEX_op_mul_i32:
454 case INDEX_op_div2_i32:
455 #if defined(__sparc_v9__) || defined(__sparc_v8plus__)
463 case INDEX_op_divu2_i32:
464 #if defined(__sparc_v9__) || defined(__sparc_v8plus__)
473 case INDEX_op_brcond_i32:
474 fprintf(stderr, "unimplemented brcond\n");
477 case INDEX_op_qemu_ld8u:
478 fprintf(stderr, "unimplemented qld\n");
480 case INDEX_op_qemu_ld8s:
481 fprintf(stderr, "unimplemented qld\n");
483 case INDEX_op_qemu_ld16u:
484 fprintf(stderr, "unimplemented qld\n");
486 case INDEX_op_qemu_ld16s:
487 fprintf(stderr, "unimplemented qld\n");
489 case INDEX_op_qemu_ld32u:
490 fprintf(stderr, "unimplemented qld\n");
492 case INDEX_op_qemu_ld32s:
493 fprintf(stderr, "unimplemented qld\n");
495 case INDEX_op_qemu_st8:
496 fprintf(stderr, "unimplemented qst\n");
498 case INDEX_op_qemu_st16:
499 fprintf(stderr, "unimplemented qst\n");
501 case INDEX_op_qemu_st32:
502 fprintf(stderr, "unimplemented qst\n");
505 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
506 case INDEX_op_movi_i64:
507 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
509 case INDEX_op_ld32s_i64:
510 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
512 case INDEX_op_ld_i64:
513 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
515 case INDEX_op_st_i64:
516 tcg_out_ldst(s, args[0], args[1], args[2], STX);
518 case INDEX_op_shl_i64:
521 case INDEX_op_shr_i64:
524 case INDEX_op_sar_i64:
527 case INDEX_op_mul_i64:
530 case INDEX_op_div2_i64:
533 case INDEX_op_divu2_i64:
537 case INDEX_op_brcond_i64:
538 fprintf(stderr, "unimplemented brcond\n");
540 case INDEX_op_qemu_ld64:
541 fprintf(stderr, "unimplemented qld\n");
543 case INDEX_op_qemu_st64:
544 fprintf(stderr, "unimplemented qst\n");
550 tcg_out_arithi(s, args[0], args[1], args[2], c);
552 tcg_out_arith(s, args[0], args[1], args[2], c);
557 fprintf(stderr, "unknown opcode 0x%x\n", opc);
562 static const TCGTargetOpDef sparc_op_defs[] = {
563 { INDEX_op_exit_tb, { } },
564 { INDEX_op_goto_tb, { } },
565 { INDEX_op_call, { "ri" } },
566 { INDEX_op_jmp, { "ri" } },
567 { INDEX_op_br, { } },
569 { INDEX_op_mov_i32, { "r", "r" } },
570 { INDEX_op_movi_i32, { "r" } },
571 { INDEX_op_ld8u_i32, { "r", "r" } },
572 { INDEX_op_ld8s_i32, { "r", "r" } },
573 { INDEX_op_ld16u_i32, { "r", "r" } },
574 { INDEX_op_ld16s_i32, { "r", "r" } },
575 { INDEX_op_ld_i32, { "r", "r" } },
576 { INDEX_op_st8_i32, { "r", "r" } },
577 { INDEX_op_st16_i32, { "r", "r" } },
578 { INDEX_op_st_i32, { "r", "r" } },
580 { INDEX_op_add_i32, { "r", "r", "rJ" } },
581 { INDEX_op_mul_i32, { "r", "r", "rJ" } },
582 { INDEX_op_div2_i32, { "r", "r", "0", "1", "r" } },
583 { INDEX_op_divu2_i32, { "r", "r", "0", "1", "r" } },
584 { INDEX_op_sub_i32, { "r", "r", "rJ" } },
585 { INDEX_op_and_i32, { "r", "r", "rJ" } },
586 { INDEX_op_or_i32, { "r", "r", "rJ" } },
587 { INDEX_op_xor_i32, { "r", "r", "rJ" } },
589 { INDEX_op_shl_i32, { "r", "r", "rJ" } },
590 { INDEX_op_shr_i32, { "r", "r", "rJ" } },
591 { INDEX_op_sar_i32, { "r", "r", "rJ" } },
593 { INDEX_op_brcond_i32, { "r", "ri" } },
595 { INDEX_op_qemu_ld8u, { "r", "L" } },
596 { INDEX_op_qemu_ld8s, { "r", "L" } },
597 { INDEX_op_qemu_ld16u, { "r", "L" } },
598 { INDEX_op_qemu_ld16s, { "r", "L" } },
599 { INDEX_op_qemu_ld32u, { "r", "L" } },
600 { INDEX_op_qemu_ld32s, { "r", "L" } },
602 { INDEX_op_qemu_st8, { "L", "L" } },
603 { INDEX_op_qemu_st16, { "L", "L" } },
604 { INDEX_op_qemu_st32, { "L", "L" } },
606 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
607 { INDEX_op_mov_i64, { "r", "r" } },
608 { INDEX_op_movi_i64, { "r" } },
609 { INDEX_op_ld8u_i64, { "r", "r" } },
610 { INDEX_op_ld8s_i64, { "r", "r" } },
611 { INDEX_op_ld16u_i64, { "r", "r" } },
612 { INDEX_op_ld16s_i64, { "r", "r" } },
613 { INDEX_op_ld32u_i64, { "r", "r" } },
614 { INDEX_op_ld32s_i64, { "r", "r" } },
615 { INDEX_op_ld_i64, { "r", "r" } },
616 { INDEX_op_st8_i64, { "r", "r" } },
617 { INDEX_op_st16_i64, { "r", "r" } },
618 { INDEX_op_st32_i64, { "r", "r" } },
619 { INDEX_op_st_i64, { "r", "r" } },
621 { INDEX_op_add_i64, { "r", "r", "rJ" } },
622 { INDEX_op_mul_i64, { "r", "r", "rJ" } },
623 { INDEX_op_div2_i64, { "r", "r", "0", "1", "r" } },
624 { INDEX_op_divu2_i64, { "r", "r", "0", "1", "r" } },
625 { INDEX_op_sub_i64, { "r", "r", "rJ" } },
626 { INDEX_op_and_i64, { "r", "r", "rJ" } },
627 { INDEX_op_or_i64, { "r", "r", "rJ" } },
628 { INDEX_op_xor_i64, { "r", "r", "rJ" } },
630 { INDEX_op_shl_i64, { "r", "r", "rJ" } },
631 { INDEX_op_shr_i64, { "r", "r", "rJ" } },
632 { INDEX_op_sar_i64, { "r", "r", "rJ" } },
634 { INDEX_op_brcond_i64, { "r", "ri" } },
639 void tcg_target_init(TCGContext *s)
641 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
642 #if defined(__sparc_v9__) && !defined(__sparc_v8plus__)
643 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
645 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
661 tcg_regset_clear(s->reserved_regs);
662 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
663 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
664 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
665 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
666 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
667 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
668 tcg_add_target_add_op_defs(sparc_op_defs);