| /* |
| * Tiny Code Generator for QEMU |
| * |
| * Copyright (c) 2009, 2011 Stefan Weil |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to deal |
| * in the Software without restriction, including without limitation the rights |
| * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| * copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in |
| * all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| * THE SOFTWARE. |
| */ |
| |
| #include "../tcg-pool.c.inc" |
| |
| static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) |
| { |
| switch (op) { |
| case INDEX_op_goto_ptr: |
| return C_O0_I1(r); |
| |
| case INDEX_op_ld8u_i32: |
| case INDEX_op_ld8s_i32: |
| case INDEX_op_ld16u_i32: |
| case INDEX_op_ld16s_i32: |
| case INDEX_op_ld_i32: |
| case INDEX_op_ld8u_i64: |
| case INDEX_op_ld8s_i64: |
| case INDEX_op_ld16u_i64: |
| case INDEX_op_ld16s_i64: |
| case INDEX_op_ld32u_i64: |
| case INDEX_op_ld32s_i64: |
| case INDEX_op_ld_i64: |
| case INDEX_op_not_i32: |
| case INDEX_op_not_i64: |
| case INDEX_op_neg_i32: |
| case INDEX_op_neg_i64: |
| case INDEX_op_ext8s_i32: |
| case INDEX_op_ext8s_i64: |
| case INDEX_op_ext16s_i32: |
| case INDEX_op_ext16s_i64: |
| case INDEX_op_ext8u_i32: |
| case INDEX_op_ext8u_i64: |
| case INDEX_op_ext16u_i32: |
| case INDEX_op_ext16u_i64: |
| case INDEX_op_ext32s_i64: |
| case INDEX_op_ext32u_i64: |
| case INDEX_op_ext_i32_i64: |
| case INDEX_op_extu_i32_i64: |
| case INDEX_op_bswap16_i32: |
| case INDEX_op_bswap16_i64: |
| case INDEX_op_bswap32_i32: |
| case INDEX_op_bswap32_i64: |
| case INDEX_op_bswap64_i64: |
| case INDEX_op_extract_i32: |
| case INDEX_op_extract_i64: |
| case INDEX_op_sextract_i32: |
| case INDEX_op_sextract_i64: |
| case INDEX_op_ctpop_i32: |
| case INDEX_op_ctpop_i64: |
| return C_O1_I1(r, r); |
| |
| case INDEX_op_st8_i32: |
| case INDEX_op_st16_i32: |
| case INDEX_op_st_i32: |
| case INDEX_op_st8_i64: |
| case INDEX_op_st16_i64: |
| case INDEX_op_st32_i64: |
| case INDEX_op_st_i64: |
| return C_O0_I2(r, r); |
| |
| case INDEX_op_div_i32: |
| case INDEX_op_div_i64: |
| case INDEX_op_divu_i32: |
| case INDEX_op_divu_i64: |
| case INDEX_op_rem_i32: |
| case INDEX_op_rem_i64: |
| case INDEX_op_remu_i32: |
| case INDEX_op_remu_i64: |
| case INDEX_op_add_i32: |
| case INDEX_op_add_i64: |
| case INDEX_op_sub_i32: |
| case INDEX_op_sub_i64: |
| case INDEX_op_mul_i32: |
| case INDEX_op_mul_i64: |
| case INDEX_op_and_i32: |
| case INDEX_op_and_i64: |
| case INDEX_op_andc_i32: |
| case INDEX_op_andc_i64: |
| case INDEX_op_eqv_i32: |
| case INDEX_op_eqv_i64: |
| case INDEX_op_nand_i32: |
| case INDEX_op_nand_i64: |
| case INDEX_op_nor_i32: |
| case INDEX_op_nor_i64: |
| case INDEX_op_or_i32: |
| case INDEX_op_or_i64: |
| case INDEX_op_orc_i32: |
| case INDEX_op_orc_i64: |
| case INDEX_op_xor_i32: |
| case INDEX_op_xor_i64: |
| case INDEX_op_shl_i32: |
| case INDEX_op_shl_i64: |
| case INDEX_op_shr_i32: |
| case INDEX_op_shr_i64: |
| case INDEX_op_sar_i32: |
| case INDEX_op_sar_i64: |
| case INDEX_op_rotl_i32: |
| case INDEX_op_rotl_i64: |
| case INDEX_op_rotr_i32: |
| case INDEX_op_rotr_i64: |
| case INDEX_op_setcond_i32: |
| case INDEX_op_setcond_i64: |
| case INDEX_op_deposit_i32: |
| case INDEX_op_deposit_i64: |
| case INDEX_op_clz_i32: |
| case INDEX_op_clz_i64: |
| case INDEX_op_ctz_i32: |
| case INDEX_op_ctz_i64: |
| return C_O1_I2(r, r, r); |
| |
| case INDEX_op_brcond_i32: |
| case INDEX_op_brcond_i64: |
| return C_O0_I2(r, r); |
| |
| case INDEX_op_add2_i32: |
| case INDEX_op_add2_i64: |
| case INDEX_op_sub2_i32: |
| case INDEX_op_sub2_i64: |
| return C_O2_I4(r, r, r, r, r, r); |
| |
| #if TCG_TARGET_REG_BITS == 32 |
| case INDEX_op_brcond2_i32: |
| return C_O0_I4(r, r, r, r); |
| #endif |
| |
| case INDEX_op_mulu2_i32: |
| case INDEX_op_mulu2_i64: |
| case INDEX_op_muls2_i32: |
| case INDEX_op_muls2_i64: |
| return C_O2_I2(r, r, r, r); |
| |
| case INDEX_op_movcond_i32: |
| case INDEX_op_movcond_i64: |
| case INDEX_op_setcond2_i32: |
| return C_O1_I4(r, r, r, r, r); |
| |
| case INDEX_op_qemu_ld_a32_i32: |
| return C_O1_I1(r, r); |
| case INDEX_op_qemu_ld_a64_i32: |
| return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r); |
| case INDEX_op_qemu_ld_a32_i64: |
| return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r); |
| case INDEX_op_qemu_ld_a64_i64: |
| return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r); |
| case INDEX_op_qemu_st_a32_i32: |
| return C_O0_I2(r, r); |
| case INDEX_op_qemu_st_a64_i32: |
| return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); |
| case INDEX_op_qemu_st_a32_i64: |
| return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r); |
| case INDEX_op_qemu_st_a64_i64: |
| return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r); |
| |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static const int tcg_target_reg_alloc_order[] = { |
| TCG_REG_R4, |
| TCG_REG_R5, |
| TCG_REG_R6, |
| TCG_REG_R7, |
| TCG_REG_R8, |
| TCG_REG_R9, |
| TCG_REG_R10, |
| TCG_REG_R11, |
| TCG_REG_R12, |
| TCG_REG_R13, |
| TCG_REG_R14, |
| TCG_REG_R15, |
| /* Either 2 or 4 of these are call clobbered, so use them last. */ |
| TCG_REG_R3, |
| TCG_REG_R2, |
| TCG_REG_R1, |
| TCG_REG_R0, |
| }; |
| |
| /* No call arguments via registers. All will be stored on the "stack". */ |
| static const int tcg_target_call_iarg_regs[] = { }; |
| |
| static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) |
| { |
| tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); |
| tcg_debug_assert(slot >= 0 && slot < 128 / TCG_TARGET_REG_BITS); |
| return TCG_REG_R0 + slot; |
| } |
| |
| #ifdef CONFIG_DEBUG_TCG |
| static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { |
| "r00", |
| "r01", |
| "r02", |
| "r03", |
| "r04", |
| "r05", |
| "r06", |
| "r07", |
| "r08", |
| "r09", |
| "r10", |
| "r11", |
| "r12", |
| "r13", |
| "r14", |
| "r15", |
| }; |
| #endif |
| |
| static bool patch_reloc(tcg_insn_unit *code_ptr, int type, |
| intptr_t value, intptr_t addend) |
| { |
| intptr_t diff = value - (intptr_t)(code_ptr + 1); |
| |
| tcg_debug_assert(addend == 0); |
| tcg_debug_assert(type == 20); |
| |
| if (diff == sextract32(diff, 0, type)) { |
| tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff)); |
| return true; |
| } |
| return false; |
| } |
| |
| static void stack_bounds_check(TCGReg base, intptr_t offset) |
| { |
| if (base == TCG_REG_CALL_STACK) { |
| tcg_debug_assert(offset >= 0); |
| tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE + |
| TCG_STATIC_FRAME_SIZE)); |
| } |
| } |
| |
| static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_out_reloc(s, s->code_ptr, 20, l0, 0); |
| insn = deposit32(insn, 0, 8, op); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0) |
| { |
| tcg_insn_unit insn = 0; |
| intptr_t diff; |
| |
| /* Special case for exit_tb: map null -> 0. */ |
| if (p0 == NULL) { |
| diff = 0; |
| } else { |
| diff = p0 - (void *)(s->code_ptr + 1); |
| tcg_debug_assert(diff != 0); |
| if (diff != sextract32(diff, 0, 20)) { |
| tcg_raise_tb_overflow(s); |
| } |
| } |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 12, 20, diff); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_v(TCGContext *s, TCGOpcode op) |
| { |
| tcg_out32(s, (uint8_t)op); |
| } |
| |
| static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_debug_assert(i1 == sextract32(i1, 0, 20)); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 20, i1); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_out_reloc(s, s->code_ptr, 20, l1, 0); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGArg m2) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_debug_assert(m2 == extract32(m2, 0, 16)); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 16, m2); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGReg r2) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, intptr_t i2) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_debug_assert(i2 == sextract32(i2, 0, 16)); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 16, i2); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0, |
| TCGReg r1, uint8_t b2, uint8_t b3) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_debug_assert(b2 == extract32(b2, 0, 6)); |
| tcg_debug_assert(b3 == extract32(b3, 0, 6)); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 6, b2); |
| insn = deposit32(insn, 22, 6, b3); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 4, c3); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0, |
| TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4) |
| { |
| tcg_insn_unit insn = 0; |
| |
| tcg_debug_assert(b3 == extract32(b3, 0, 6)); |
| tcg_debug_assert(b4 == extract32(b4, 0, 6)); |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 6, b3); |
| insn = deposit32(insn, 26, 6, b4); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0, |
| TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 4, r3); |
| insn = deposit32(insn, 24, 4, r4); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 4, r3); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGReg r2, |
| TCGReg r3, TCGReg r4, TCGCond c5) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 4, r3); |
| insn = deposit32(insn, 24, 4, r4); |
| insn = deposit32(insn, 28, 4, c5); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op, |
| TCGReg r0, TCGReg r1, TCGReg r2, |
| TCGReg r3, TCGReg r4, TCGReg r5) |
| { |
| tcg_insn_unit insn = 0; |
| |
| insn = deposit32(insn, 0, 8, op); |
| insn = deposit32(insn, 8, 4, r0); |
| insn = deposit32(insn, 12, 4, r1); |
| insn = deposit32(insn, 16, 4, r2); |
| insn = deposit32(insn, 20, 4, r3); |
| insn = deposit32(insn, 24, 4, r4); |
| insn = deposit32(insn, 28, 4, r5); |
| tcg_out32(s, insn); |
| } |
| |
| static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val, |
| TCGReg base, intptr_t offset) |
| { |
| stack_bounds_check(base, offset); |
| if (offset != sextract32(offset, 0, 16)) { |
| tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset); |
| tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32 |
| ? INDEX_op_add_i32 : INDEX_op_add_i64), |
| TCG_REG_TMP, TCG_REG_TMP, base); |
| base = TCG_REG_TMP; |
| offset = 0; |
| } |
| tcg_out_op_rrs(s, op, val, base, offset); |
| } |
| |
| static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base, |
| intptr_t offset) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset); |
| break; |
| #if TCG_TARGET_REG_BITS == 64 |
| case TCG_TYPE_I64: |
| tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset); |
| break; |
| #endif |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg); |
| break; |
| #if TCG_TARGET_REG_BITS == 64 |
| case TCG_TYPE_I64: |
| tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg); |
| break; |
| #endif |
| default: |
| g_assert_not_reached(); |
| } |
| return true; |
| } |
| |
| static void tcg_out_movi(TCGContext *s, TCGType type, |
| TCGReg ret, tcg_target_long arg) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| #if TCG_TARGET_REG_BITS == 64 |
| arg = (int32_t)arg; |
| /* fall through */ |
| case TCG_TYPE_I64: |
| #endif |
| break; |
| default: |
| g_assert_not_reached(); |
| } |
| |
| if (arg == sextract32(arg, 0, 20)) { |
| tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg); |
| } else { |
| tcg_insn_unit insn = 0; |
| |
| new_pool_label(s, arg, 20, s->code_ptr, 0); |
| insn = deposit32(insn, 0, 8, INDEX_op_tci_movl); |
| insn = deposit32(insn, 8, 4, ret); |
| tcg_out32(s, insn); |
| } |
| } |
| |
| static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| tcg_debug_assert(TCG_TARGET_HAS_ext8s_i32); |
| tcg_out_op_rr(s, INDEX_op_ext8s_i32, rd, rs); |
| break; |
| #if TCG_TARGET_REG_BITS == 64 |
| case TCG_TYPE_I64: |
| tcg_debug_assert(TCG_TARGET_HAS_ext8s_i64); |
| tcg_out_op_rr(s, INDEX_op_ext8s_i64, rd, rs); |
| break; |
| #endif |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| if (TCG_TARGET_REG_BITS == 64) { |
| tcg_debug_assert(TCG_TARGET_HAS_ext8u_i64); |
| tcg_out_op_rr(s, INDEX_op_ext8u_i64, rd, rs); |
| } else { |
| tcg_debug_assert(TCG_TARGET_HAS_ext8u_i32); |
| tcg_out_op_rr(s, INDEX_op_ext8u_i32, rd, rs); |
| } |
| } |
| |
| static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| tcg_debug_assert(TCG_TARGET_HAS_ext16s_i32); |
| tcg_out_op_rr(s, INDEX_op_ext16s_i32, rd, rs); |
| break; |
| #if TCG_TARGET_REG_BITS == 64 |
| case TCG_TYPE_I64: |
| tcg_debug_assert(TCG_TARGET_HAS_ext16s_i64); |
| tcg_out_op_rr(s, INDEX_op_ext16s_i64, rd, rs); |
| break; |
| #endif |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| if (TCG_TARGET_REG_BITS == 64) { |
| tcg_debug_assert(TCG_TARGET_HAS_ext16u_i64); |
| tcg_out_op_rr(s, INDEX_op_ext16u_i64, rd, rs); |
| } else { |
| tcg_debug_assert(TCG_TARGET_HAS_ext16u_i32); |
| tcg_out_op_rr(s, INDEX_op_ext16u_i32, rd, rs); |
| } |
| } |
| |
| static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| tcg_debug_assert(TCG_TARGET_REG_BITS == 64); |
| tcg_debug_assert(TCG_TARGET_HAS_ext32s_i64); |
| tcg_out_op_rr(s, INDEX_op_ext32s_i64, rd, rs); |
| } |
| |
| static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| tcg_debug_assert(TCG_TARGET_REG_BITS == 64); |
| tcg_debug_assert(TCG_TARGET_HAS_ext32u_i64); |
| tcg_out_op_rr(s, INDEX_op_ext32u_i64, rd, rs); |
| } |
| |
| static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| tcg_out_ext32s(s, rd, rs); |
| } |
| |
| static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| tcg_out_ext32u(s, rd, rs); |
| } |
| |
| static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs) |
| { |
| tcg_debug_assert(TCG_TARGET_REG_BITS == 64); |
| tcg_out_mov(s, TCG_TYPE_I32, rd, rs); |
| } |
| |
| static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) |
| { |
| return false; |
| } |
| |
| static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, |
| tcg_target_long imm) |
| { |
| /* This function is only used for passing structs by reference. */ |
| g_assert_not_reached(); |
| } |
| |
| static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func, |
| const TCGHelperInfo *info) |
| { |
| ffi_cif *cif = info->cif; |
| tcg_insn_unit insn = 0; |
| uint8_t which; |
| |
| if (cif->rtype == &ffi_type_void) { |
| which = 0; |
| } else { |
| tcg_debug_assert(cif->rtype->size == 4 || |
| cif->rtype->size == 8 || |
| cif->rtype->size == 16); |
| which = ctz32(cif->rtype->size) - 1; |
| } |
| new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif); |
| insn = deposit32(insn, 0, 8, INDEX_op_call); |
| insn = deposit32(insn, 8, 4, which); |
| tcg_out32(s, insn); |
| } |
| |
| #if TCG_TARGET_REG_BITS == 64 |
| # define CASE_32_64(x) \ |
| case glue(glue(INDEX_op_, x), _i64): \ |
| case glue(glue(INDEX_op_, x), _i32): |
| # define CASE_64(x) \ |
| case glue(glue(INDEX_op_, x), _i64): |
| #else |
| # define CASE_32_64(x) \ |
| case glue(glue(INDEX_op_, x), _i32): |
| # define CASE_64(x) |
| #endif |
| |
| static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg) |
| { |
| tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg); |
| } |
| |
| static void tcg_out_goto_tb(TCGContext *s, int which) |
| { |
| /* indirect jump method. */ |
| tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which)); |
| set_jmp_reset_offset(s, which); |
| } |
| |
| void tb_target_set_jmp_target(const TranslationBlock *tb, int n, |
| uintptr_t jmp_rx, uintptr_t jmp_rw) |
| { |
| /* Always indirect, nothing to do */ |
| } |
| |
| static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
| const TCGArg args[TCG_MAX_OP_ARGS], |
| const int const_args[TCG_MAX_OP_ARGS]) |
| { |
| TCGOpcode exts; |
| |
| switch (opc) { |
| case INDEX_op_goto_ptr: |
| tcg_out_op_r(s, opc, args[0]); |
| break; |
| |
| case INDEX_op_br: |
| tcg_out_op_l(s, opc, arg_label(args[0])); |
| break; |
| |
| CASE_32_64(setcond) |
| tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]); |
| break; |
| |
| CASE_32_64(movcond) |
| case INDEX_op_setcond2_i32: |
| tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2], |
| args[3], args[4], args[5]); |
| break; |
| |
| CASE_32_64(ld8u) |
| CASE_32_64(ld8s) |
| CASE_32_64(ld16u) |
| CASE_32_64(ld16s) |
| case INDEX_op_ld_i32: |
| CASE_64(ld32u) |
| CASE_64(ld32s) |
| CASE_64(ld) |
| CASE_32_64(st8) |
| CASE_32_64(st16) |
| case INDEX_op_st_i32: |
| CASE_64(st32) |
| CASE_64(st) |
| tcg_out_ldst(s, opc, args[0], args[1], args[2]); |
| break; |
| |
| CASE_32_64(add) |
| CASE_32_64(sub) |
| CASE_32_64(mul) |
| CASE_32_64(and) |
| CASE_32_64(or) |
| CASE_32_64(xor) |
| CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */ |
| CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */ |
| CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */ |
| CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */ |
| CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */ |
| CASE_32_64(shl) |
| CASE_32_64(shr) |
| CASE_32_64(sar) |
| CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */ |
| CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */ |
| CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */ |
| CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */ |
| CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */ |
| CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */ |
| CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */ |
| CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */ |
| tcg_out_op_rrr(s, opc, args[0], args[1], args[2]); |
| break; |
| |
| CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */ |
| { |
| TCGArg pos = args[3], len = args[4]; |
| TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64; |
| |
| tcg_debug_assert(pos < max); |
| tcg_debug_assert(pos + len <= max); |
| |
| tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len); |
| } |
| break; |
| |
| CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */ |
| CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */ |
| { |
| TCGArg pos = args[2], len = args[3]; |
| TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32; |
| |
| tcg_debug_assert(pos < max); |
| tcg_debug_assert(pos + len <= max); |
| |
| tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len); |
| } |
| break; |
| |
| CASE_32_64(brcond) |
| tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32 |
| ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64), |
| TCG_REG_TMP, args[0], args[1], args[2]); |
| tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3])); |
| break; |
| |
| CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */ |
| CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */ |
| CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */ |
| case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ |
| case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ |
| tcg_out_op_rr(s, opc, args[0], args[1]); |
| break; |
| |
| case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ |
| exts = INDEX_op_ext16s_i32; |
| goto do_bswap; |
| case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ |
| exts = INDEX_op_ext16s_i64; |
| goto do_bswap; |
| case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ |
| exts = INDEX_op_ext32s_i64; |
| do_bswap: |
| /* The base tci bswaps zero-extend, and ignore high bits. */ |
| tcg_out_op_rr(s, opc, args[0], args[1]); |
| if (args[2] & TCG_BSWAP_OS) { |
| tcg_out_op_rr(s, exts, args[0], args[0]); |
| } |
| break; |
| |
| CASE_32_64(add2) |
| CASE_32_64(sub2) |
| tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2], |
| args[3], args[4], args[5]); |
| break; |
| |
| #if TCG_TARGET_REG_BITS == 32 |
| case INDEX_op_brcond2_i32: |
| tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP, |
| args[0], args[1], args[2], args[3], args[4]); |
| tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5])); |
| break; |
| #endif |
| |
| CASE_32_64(mulu2) |
| CASE_32_64(muls2) |
| tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]); |
| break; |
| |
| case INDEX_op_qemu_ld_a32_i32: |
| case INDEX_op_qemu_st_a32_i32: |
| tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); |
| break; |
| case INDEX_op_qemu_ld_a64_i32: |
| case INDEX_op_qemu_st_a64_i32: |
| case INDEX_op_qemu_ld_a32_i64: |
| case INDEX_op_qemu_st_a32_i64: |
| if (TCG_TARGET_REG_BITS == 64) { |
| tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); |
| } else { |
| tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[3]); |
| tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], TCG_REG_TMP); |
| } |
| break; |
| case INDEX_op_qemu_ld_a64_i64: |
| case INDEX_op_qemu_st_a64_i64: |
| if (TCG_TARGET_REG_BITS == 64) { |
| tcg_out_op_rrm(s, opc, args[0], args[1], args[2]); |
| } else { |
| tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]); |
| tcg_out_op_rrrrr(s, opc, args[0], args[1], |
| args[2], args[3], TCG_REG_TMP); |
| } |
| break; |
| |
| case INDEX_op_mb: |
| tcg_out_op_v(s, opc); |
| break; |
| |
| case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ |
| case INDEX_op_mov_i64: |
| case INDEX_op_call: /* Always emitted via tcg_out_call. */ |
| case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ |
| case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ |
| case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ |
| case INDEX_op_ext8s_i64: |
| case INDEX_op_ext8u_i32: |
| case INDEX_op_ext8u_i64: |
| case INDEX_op_ext16s_i32: |
| case INDEX_op_ext16s_i64: |
| case INDEX_op_ext16u_i32: |
| case INDEX_op_ext16u_i64: |
| case INDEX_op_ext32s_i64: |
| case INDEX_op_ext32u_i64: |
| case INDEX_op_ext_i32_i64: |
| case INDEX_op_extu_i32_i64: |
| case INDEX_op_extrl_i64_i32: |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base, |
| intptr_t offset) |
| { |
| switch (type) { |
| case TCG_TYPE_I32: |
| tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset); |
| break; |
| #if TCG_TARGET_REG_BITS == 64 |
| case TCG_TYPE_I64: |
| tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset); |
| break; |
| #endif |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, |
| TCGReg base, intptr_t ofs) |
| { |
| return false; |
| } |
| |
| /* Test if a constant matches the constraint. */ |
| static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) |
| { |
| return ct & TCG_CT_CONST; |
| } |
| |
| static void tcg_out_nop_fill(tcg_insn_unit *p, int count) |
| { |
| memset(p, 0, sizeof(*p) * count); |
| } |
| |
| static void tcg_target_init(TCGContext *s) |
| { |
| /* The current code uses uint8_t for tcg operations. */ |
| tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX); |
| |
| /* Registers available for 32 bit operations. */ |
| tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1; |
| /* Registers available for 64 bit operations. */ |
| tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1; |
| /* |
| * The interpreter "registers" are in the local stack frame and |
| * cannot be clobbered by the called helper functions. However, |
| * the interpreter assumes a 128-bit return value and assigns to |
| * the return value registers. |
| */ |
| tcg_target_call_clobber_regs = |
| MAKE_64BIT_MASK(TCG_REG_R0, 128 / TCG_TARGET_REG_BITS); |
| |
| s->reserved_regs = 0; |
| tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); |
| tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); |
| |
| /* The call arguments come first, followed by the temp storage. */ |
| tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE, |
| TCG_STATIC_FRAME_SIZE); |
| } |
| |
| /* Generate global QEMU prologue and epilogue code. */ |
| static inline void tcg_target_qemu_prologue(TCGContext *s) |
| { |
| } |
| |
| static void tcg_out_tb_start(TCGContext *s) |
| { |
| /* nothing to do */ |
| } |
| |
| bool tcg_target_has_memory_bswap(MemOp memop) |
| { |
| return true; |
| } |